if_wm.c revision 1.777 1 /* $NetBSD: if_wm.c,v 1.777 2023/05/11 07:27:09 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - TX Multi queue improvement (refine queue selection logic)
77 * - Split header buffer for newer descriptors
78 * - EEE (Energy Efficiency Ethernet) for I354
79 * - Virtual Function
80 * - Set LED correctly (based on contents in EEPROM)
81 * - Rework how parameters are loaded from the EEPROM.
82 */
83
84 #include <sys/cdefs.h>
85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.777 2023/05/11 07:27:09 msaitoh Exp $");
86
87 #ifdef _KERNEL_OPT
88 #include "opt_if_wm.h"
89 #endif
90
91 #include <sys/param.h>
92
93 #include <sys/atomic.h>
94 #include <sys/callout.h>
95 #include <sys/cpu.h>
96 #include <sys/device.h>
97 #include <sys/errno.h>
98 #include <sys/interrupt.h>
99 #include <sys/ioctl.h>
100 #include <sys/kernel.h>
101 #include <sys/kmem.h>
102 #include <sys/mbuf.h>
103 #include <sys/pcq.h>
104 #include <sys/queue.h>
105 #include <sys/rndsource.h>
106 #include <sys/socket.h>
107 #include <sys/sysctl.h>
108 #include <sys/syslog.h>
109 #include <sys/systm.h>
110 #include <sys/workqueue.h>
111
112 #include <net/if.h>
113 #include <net/if_dl.h>
114 #include <net/if_media.h>
115 #include <net/if_ether.h>
116
117 #include <net/bpf.h>
118
119 #include <net/rss_config.h>
120
121 #include <netinet/in.h> /* XXX for struct ip */
122 #include <netinet/in_systm.h> /* XXX for struct ip */
123 #include <netinet/ip.h> /* XXX for struct ip */
124 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
125 #include <netinet/tcp.h> /* XXX for struct tcphdr */
126
127 #include <sys/bus.h>
128 #include <sys/intr.h>
129 #include <machine/endian.h>
130
131 #include <dev/mii/mii.h>
132 #include <dev/mii/mdio.h>
133 #include <dev/mii/miivar.h>
134 #include <dev/mii/miidevs.h>
135 #include <dev/mii/mii_bitbang.h>
136 #include <dev/mii/ikphyreg.h>
137 #include <dev/mii/igphyreg.h>
138 #include <dev/mii/igphyvar.h>
139 #include <dev/mii/inbmphyreg.h>
140 #include <dev/mii/ihphyreg.h>
141 #include <dev/mii/makphyreg.h>
142
143 #include <dev/pci/pcireg.h>
144 #include <dev/pci/pcivar.h>
145 #include <dev/pci/pcidevs.h>
146
147 #include <dev/pci/if_wmreg.h>
148 #include <dev/pci/if_wmvar.h>
149
150 #ifdef WM_DEBUG
151 #define WM_DEBUG_LINK __BIT(0)
152 #define WM_DEBUG_TX __BIT(1)
153 #define WM_DEBUG_RX __BIT(2)
154 #define WM_DEBUG_GMII __BIT(3)
155 #define WM_DEBUG_MANAGE __BIT(4)
156 #define WM_DEBUG_NVM __BIT(5)
157 #define WM_DEBUG_INIT __BIT(6)
158 #define WM_DEBUG_LOCK __BIT(7)
159
160 #if 0
161 #define WM_DEBUG_DEFAULT WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
162 WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | \
163 WM_DEBUG_LOCK
164 #endif
165
166 #define DPRINTF(sc, x, y) \
167 do { \
168 if ((sc)->sc_debug & (x)) \
169 printf y; \
170 } while (0)
171 #else
172 #define DPRINTF(sc, x, y) __nothing
173 #endif /* WM_DEBUG */
174
175 #define WM_WORKQUEUE_PRI PRI_SOFTNET
176
177 /*
178 * This device driver's max interrupt numbers.
179 */
180 #define WM_MAX_NQUEUEINTR 16
181 #define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1)
182
183 #ifndef WM_DISABLE_MSI
184 #define WM_DISABLE_MSI 0
185 #endif
186 #ifndef WM_DISABLE_MSIX
187 #define WM_DISABLE_MSIX 0
188 #endif
189
190 int wm_disable_msi = WM_DISABLE_MSI;
191 int wm_disable_msix = WM_DISABLE_MSIX;
192
193 #ifndef WM_WATCHDOG_TIMEOUT
194 #define WM_WATCHDOG_TIMEOUT 5
195 #endif
196 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
197
198 /*
199 * Transmit descriptor list size. Due to errata, we can only have
200 * 256 hardware descriptors in the ring on < 82544, but we use 4096
201 * on >= 82544. We tell the upper layers that they can queue a lot
202 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
203 * of them at a time.
204 *
205 * We allow up to 64 DMA segments per packet. Pathological packet
206 * chains containing many small mbufs have been observed in zero-copy
207 * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
208 * m_defrag() is called to reduce it.
209 */
210 #define WM_NTXSEGS 64
211 #define WM_IFQUEUELEN 256
212 #define WM_TXQUEUELEN_MAX 64
213 #define WM_TXQUEUELEN_MAX_82547 16
214 #define WM_TXQUEUELEN(txq) ((txq)->txq_num)
215 #define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1)
216 #define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8)
217 #define WM_NTXDESC_82542 256
218 #define WM_NTXDESC_82544 4096
219 #define WM_NTXDESC(txq) ((txq)->txq_ndesc)
220 #define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1)
221 #define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize)
222 #define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq))
223 #define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq))
224
225 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
226
227 #define WM_TXINTERQSIZE 256
228
229 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
230 #define WM_TX_PROCESS_LIMIT_DEFAULT 100U
231 #endif
232 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
233 #define WM_TX_INTR_PROCESS_LIMIT_DEFAULT 0U
234 #endif
235
236 /*
237 * Receive descriptor list size. We have one Rx buffer for normal
238 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
239 * packet. We allocate 256 receive descriptors, each with a 2k
240 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
241 */
242 #define WM_NRXDESC 256U
243 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
244 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
245 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
246
247 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
248 #define WM_RX_PROCESS_LIMIT_DEFAULT 100U
249 #endif
250 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
251 #define WM_RX_INTR_PROCESS_LIMIT_DEFAULT 0U
252 #endif
253
254 typedef union txdescs {
255 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
256 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544];
257 } txdescs_t;
258
259 typedef union rxdescs {
260 wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
261 ext_rxdesc_t sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
262 nq_rxdesc_t sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
263 } rxdescs_t;
264
265 #define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x))
266 #define WM_CDRXOFF(rxq, x) ((rxq)->rxq_descsize * (x))
267
268 /*
269 * Software state for transmit jobs.
270 */
271 struct wm_txsoft {
272 struct mbuf *txs_mbuf; /* head of our mbuf chain */
273 bus_dmamap_t txs_dmamap; /* our DMA map */
274 int txs_firstdesc; /* first descriptor in packet */
275 int txs_lastdesc; /* last descriptor in packet */
276 int txs_ndesc; /* # of descriptors used */
277 };
278
279 /*
280 * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
281 * buffer and a DMA map. For packets which fill more than one buffer, we chain
282 * them together.
283 */
284 struct wm_rxsoft {
285 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
286 bus_dmamap_t rxs_dmamap; /* our DMA map */
287 };
288
289 #define WM_LINKUP_TIMEOUT 50
290
291 static uint16_t swfwphysem[] = {
292 SWFW_PHY0_SM,
293 SWFW_PHY1_SM,
294 SWFW_PHY2_SM,
295 SWFW_PHY3_SM
296 };
297
298 static const uint32_t wm_82580_rxpbs_table[] = {
299 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
300 };
301
302 struct wm_softc;
303
304 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
305 #if !defined(WM_EVENT_COUNTERS)
306 #define WM_EVENT_COUNTERS 1
307 #endif
308 #endif
309
310 #ifdef WM_EVENT_COUNTERS
311 #define WM_Q_EVCNT_DEFINE(qname, evname) \
312 char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
313 struct evcnt qname##_ev_##evname
314
315 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \
316 do { \
317 snprintf((q)->qname##_##evname##_evcnt_name, \
318 sizeof((q)->qname##_##evname##_evcnt_name), \
319 "%s%02d%s", #qname, (qnum), #evname); \
320 evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \
321 (evtype), NULL, (xname), \
322 (q)->qname##_##evname##_evcnt_name); \
323 } while (0)
324
325 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
326 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
327
328 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
329 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
330
331 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum) \
332 evcnt_detach(&(q)->qname##_ev_##evname)
333 #endif /* WM_EVENT_COUNTERS */
334
335 struct wm_txqueue {
336 kmutex_t *txq_lock; /* lock for tx operations */
337
338 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */
339
340 /* Software state for the transmit descriptors. */
341 int txq_num; /* must be a power of two */
342 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
343
344 /* TX control data structures. */
345 int txq_ndesc; /* must be a power of two */
346 size_t txq_descsize; /* a tx descriptor size */
347 txdescs_t *txq_descs_u;
348 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */
349 bus_dma_segment_t txq_desc_seg; /* control data segment */
350 int txq_desc_rseg; /* real number of control segment */
351 #define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr
352 #define txq_descs txq_descs_u->sctxu_txdescs
353 #define txq_nq_descs txq_descs_u->sctxu_nq_txdescs
354
355 bus_addr_t txq_tdt_reg; /* offset of TDT register */
356
357 int txq_free; /* number of free Tx descriptors */
358 int txq_next; /* next ready Tx descriptor */
359
360 int txq_sfree; /* number of free Tx jobs */
361 int txq_snext; /* next free Tx job */
362 int txq_sdirty; /* dirty Tx jobs */
363
364 /* These 4 variables are used only on the 82547. */
365 int txq_fifo_size; /* Tx FIFO size */
366 int txq_fifo_head; /* current head of FIFO */
367 uint32_t txq_fifo_addr; /* internal address of start of FIFO */
368 int txq_fifo_stall; /* Tx FIFO is stalled */
369
370 /*
371 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
372 * CPUs. This queue intermediate them without block.
373 */
374 pcq_t *txq_interq;
375
376 /*
377 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
378 * to manage Tx H/W queue's busy flag.
379 */
380 int txq_flags; /* flags for H/W queue, see below */
381 #define WM_TXQ_NO_SPACE 0x1
382 #define WM_TXQ_LINKDOWN_DISCARD 0x2
383
384 bool txq_stopping;
385
386 bool txq_sending;
387 time_t txq_lastsent;
388
389 /* Checksum flags used for previous packet */
390 uint32_t txq_last_hw_cmd;
391 uint8_t txq_last_hw_fields;
392 uint16_t txq_last_hw_ipcs;
393 uint16_t txq_last_hw_tucs;
394
395 uint32_t txq_packets; /* for AIM */
396 uint32_t txq_bytes; /* for AIM */
397 #ifdef WM_EVENT_COUNTERS
398 /* TX event counters */
399 WM_Q_EVCNT_DEFINE(txq, txsstall); /* Stalled due to no txs */
400 WM_Q_EVCNT_DEFINE(txq, txdstall); /* Stalled due to no txd */
401 WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
402 WM_Q_EVCNT_DEFINE(txq, txdw); /* Tx descriptor interrupts */
403 WM_Q_EVCNT_DEFINE(txq, txqe); /* Tx queue empty interrupts */
404 /* XXX not used? */
405
406 WM_Q_EVCNT_DEFINE(txq, ipsum); /* IP checksums comp. */
407 WM_Q_EVCNT_DEFINE(txq, tusum); /* TCP/UDP cksums comp. */
408 WM_Q_EVCNT_DEFINE(txq, tusum6); /* TCP/UDP v6 cksums comp. */
409 WM_Q_EVCNT_DEFINE(txq, tso); /* TCP seg offload (IPv4) */
410 WM_Q_EVCNT_DEFINE(txq, tso6); /* TCP seg offload (IPv6) */
411 WM_Q_EVCNT_DEFINE(txq, tsopain); /* Painful header manip. for TSO */
412 WM_Q_EVCNT_DEFINE(txq, pcqdrop); /* Pkt dropped in pcq */
413 WM_Q_EVCNT_DEFINE(txq, descdrop); /* Pkt dropped in MAC desc ring */
414 /* other than toomanyseg */
415
416 WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
417 WM_Q_EVCNT_DEFINE(txq, defrag); /* m_defrag() */
418 WM_Q_EVCNT_DEFINE(txq, underrun); /* Tx underrun */
419 WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
420
421 char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
422 struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
423 #endif /* WM_EVENT_COUNTERS */
424 };
425
426 struct wm_rxqueue {
427 kmutex_t *rxq_lock; /* lock for rx operations */
428
429 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */
430
431 /* Software state for the receive descriptors. */
432 struct wm_rxsoft rxq_soft[WM_NRXDESC];
433
434 /* RX control data structures. */
435 int rxq_ndesc; /* must be a power of two */
436 size_t rxq_descsize; /* a rx descriptor size */
437 rxdescs_t *rxq_descs_u;
438 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */
439 bus_dma_segment_t rxq_desc_seg; /* control data segment */
440 int rxq_desc_rseg; /* real number of control segment */
441 #define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr
442 #define rxq_descs rxq_descs_u->sctxu_rxdescs
443 #define rxq_ext_descs rxq_descs_u->sctxu_ext_rxdescs
444 #define rxq_nq_descs rxq_descs_u->sctxu_nq_rxdescs
445
446 bus_addr_t rxq_rdt_reg; /* offset of RDT register */
447
448 int rxq_ptr; /* next ready Rx desc/queue ent */
449 int rxq_discard;
450 int rxq_len;
451 struct mbuf *rxq_head;
452 struct mbuf *rxq_tail;
453 struct mbuf **rxq_tailp;
454
455 bool rxq_stopping;
456
457 uint32_t rxq_packets; /* for AIM */
458 uint32_t rxq_bytes; /* for AIM */
459 #ifdef WM_EVENT_COUNTERS
460 /* RX event counters */
461 WM_Q_EVCNT_DEFINE(rxq, intr); /* Interrupts */
462 WM_Q_EVCNT_DEFINE(rxq, defer); /* Rx deferred processing */
463
464 WM_Q_EVCNT_DEFINE(rxq, ipsum); /* IP checksums checked */
465 WM_Q_EVCNT_DEFINE(rxq, tusum); /* TCP/UDP cksums checked */
466 #endif
467 };
468
469 struct wm_queue {
470 int wmq_id; /* index of TX/RX queues */
471 int wmq_intr_idx; /* index of MSI-X tables */
472
473 uint32_t wmq_itr; /* interrupt interval per queue. */
474 bool wmq_set_itr;
475
476 struct wm_txqueue wmq_txq;
477 struct wm_rxqueue wmq_rxq;
478 char sysctlname[32]; /* Name for sysctl */
479
480 bool wmq_txrx_use_workqueue;
481 bool wmq_wq_enqueued;
482 struct work wmq_cookie;
483 void *wmq_si;
484 };
485
486 struct wm_phyop {
487 int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
488 void (*release)(struct wm_softc *);
489 int (*readreg_locked)(device_t, int, int, uint16_t *);
490 int (*writereg_locked)(device_t, int, int, uint16_t);
491 int reset_delay_us;
492 bool no_errprint;
493 };
494
495 struct wm_nvmop {
496 int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
497 void (*release)(struct wm_softc *);
498 int (*read)(struct wm_softc *, int, int, uint16_t *);
499 };
500
501 /*
502 * Software state per device.
503 */
504 struct wm_softc {
505 device_t sc_dev; /* generic device information */
506 bus_space_tag_t sc_st; /* bus space tag */
507 bus_space_handle_t sc_sh; /* bus space handle */
508 bus_size_t sc_ss; /* bus space size */
509 bus_space_tag_t sc_iot; /* I/O space tag */
510 bus_space_handle_t sc_ioh; /* I/O space handle */
511 bus_size_t sc_ios; /* I/O space size */
512 bus_space_tag_t sc_flasht; /* flash registers space tag */
513 bus_space_handle_t sc_flashh; /* flash registers space handle */
514 bus_size_t sc_flashs; /* flash registers space size */
515 off_t sc_flashreg_offset; /*
516 * offset to flash registers from
517 * start of BAR
518 */
519 bus_dma_tag_t sc_dmat; /* bus DMA tag */
520
521 struct ethercom sc_ethercom; /* Ethernet common data */
522 struct mii_data sc_mii; /* MII/media information */
523
524 pci_chipset_tag_t sc_pc;
525 pcitag_t sc_pcitag;
526 int sc_bus_speed; /* PCI/PCIX bus speed */
527 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
528
529 uint16_t sc_pcidevid; /* PCI device ID */
530 wm_chip_type sc_type; /* MAC type */
531 int sc_rev; /* MAC revision */
532 wm_phy_type sc_phytype; /* PHY type */
533 uint8_t sc_sfptype; /* SFP type */
534 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
535 #define WM_MEDIATYPE_UNKNOWN 0x00
536 #define WM_MEDIATYPE_FIBER 0x01
537 #define WM_MEDIATYPE_COPPER 0x02
538 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
539 int sc_funcid; /* unit number of the chip (0 to 3) */
540 int sc_flags; /* flags; see below */
541 u_short sc_if_flags; /* last if_flags */
542 int sc_ec_capenable; /* last ec_capenable */
543 int sc_flowflags; /* 802.3x flow control flags */
544 uint16_t eee_lp_ability; /* EEE link partner's ability */
545 int sc_align_tweak;
546
547 void *sc_ihs[WM_MAX_NINTR]; /*
548 * interrupt cookie.
549 * - legacy and msi use sc_ihs[0] only
550 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
551 */
552 pci_intr_handle_t *sc_intrs; /*
553 * legacy and msi use sc_intrs[0] only
554 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
555 */
556 int sc_nintrs; /* number of interrupts */
557
558 int sc_link_intr_idx; /* index of MSI-X tables */
559
560 callout_t sc_tick_ch; /* tick callout */
561 bool sc_core_stopping;
562
563 int sc_nvm_ver_major;
564 int sc_nvm_ver_minor;
565 int sc_nvm_ver_build;
566 int sc_nvm_addrbits; /* NVM address bits */
567 unsigned int sc_nvm_wordsize; /* NVM word size */
568 int sc_ich8_flash_base;
569 int sc_ich8_flash_bank_size;
570 int sc_nvm_k1_enabled;
571
572 int sc_nqueues;
573 struct wm_queue *sc_queue;
574 u_int sc_tx_process_limit; /* Tx proc. repeat limit in softint */
575 u_int sc_tx_intr_process_limit; /* Tx proc. repeat limit in H/W intr */
576 u_int sc_rx_process_limit; /* Rx proc. repeat limit in softint */
577 u_int sc_rx_intr_process_limit; /* Rx proc. repeat limit in H/W intr */
578 struct workqueue *sc_queue_wq;
579 bool sc_txrx_use_workqueue;
580
581 int sc_affinity_offset;
582
583 #ifdef WM_EVENT_COUNTERS
584 /* Event counters. */
585 struct evcnt sc_ev_linkintr; /* Link interrupts */
586
587 /* >= WM_T_82542_2_1 */
588 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
589 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
590 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
591 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
592 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
593
594 struct evcnt sc_ev_crcerrs; /* CRC Error */
595 struct evcnt sc_ev_algnerrc; /* Alignment Error */
596 struct evcnt sc_ev_symerrc; /* Symbol Error */
597 struct evcnt sc_ev_rxerrc; /* Receive Error */
598 struct evcnt sc_ev_mpc; /* Missed Packets */
599 struct evcnt sc_ev_scc; /* Single Collision */
600 struct evcnt sc_ev_ecol; /* Excessive Collision */
601 struct evcnt sc_ev_mcc; /* Multiple Collision */
602 struct evcnt sc_ev_latecol; /* Late Collision */
603 struct evcnt sc_ev_colc; /* Collision */
604 struct evcnt sc_ev_cbtmpc; /* Circuit Breaker Tx Mng. Packet */
605 struct evcnt sc_ev_dc; /* Defer */
606 struct evcnt sc_ev_tncrs; /* Tx-No CRS */
607 struct evcnt sc_ev_sec; /* Sequence Error */
608
609 /* Old */
610 struct evcnt sc_ev_cexterr; /* Carrier Extension Error */
611 /* New */
612 struct evcnt sc_ev_htdpmc; /* Host Tx Discarded Pkts by MAC */
613
614 struct evcnt sc_ev_rlec; /* Receive Length Error */
615 struct evcnt sc_ev_cbrdpc; /* Circuit Breaker Rx Dropped Packet */
616 struct evcnt sc_ev_prc64; /* Packets Rx (64 bytes) */
617 struct evcnt sc_ev_prc127; /* Packets Rx (65-127 bytes) */
618 struct evcnt sc_ev_prc255; /* Packets Rx (128-255 bytes) */
619 struct evcnt sc_ev_prc511; /* Packets Rx (255-511 bytes) */
620 struct evcnt sc_ev_prc1023; /* Packets Rx (512-1023 bytes) */
621 struct evcnt sc_ev_prc1522; /* Packets Rx (1024-1522 bytes) */
622 struct evcnt sc_ev_gprc; /* Good Packets Rx */
623 struct evcnt sc_ev_bprc; /* Broadcast Packets Rx */
624 struct evcnt sc_ev_mprc; /* Multicast Packets Rx */
625 struct evcnt sc_ev_gptc; /* Good Packets Tx */
626 struct evcnt sc_ev_gorc; /* Good Octets Rx */
627 struct evcnt sc_ev_gotc; /* Good Octets Tx */
628 struct evcnt sc_ev_rnbc; /* Rx No Buffers */
629 struct evcnt sc_ev_ruc; /* Rx Undersize */
630 struct evcnt sc_ev_rfc; /* Rx Fragment */
631 struct evcnt sc_ev_roc; /* Rx Oversize */
632 struct evcnt sc_ev_rjc; /* Rx Jabber */
633 struct evcnt sc_ev_mgtprc; /* Management Packets RX */
634 struct evcnt sc_ev_mgtpdc; /* Management Packets Dropped */
635 struct evcnt sc_ev_mgtptc; /* Management Packets TX */
636 struct evcnt sc_ev_tor; /* Total Octets Rx */
637 struct evcnt sc_ev_tot; /* Total Octets Tx */
638 struct evcnt sc_ev_tpr; /* Total Packets Rx */
639 struct evcnt sc_ev_tpt; /* Total Packets Tx */
640 struct evcnt sc_ev_ptc64; /* Packets Tx (64 bytes) */
641 struct evcnt sc_ev_ptc127; /* Packets Tx (65-127 bytes) */
642 struct evcnt sc_ev_ptc255; /* Packets Tx (128-255 bytes) */
643 struct evcnt sc_ev_ptc511; /* Packets Tx (256-511 bytes) */
644 struct evcnt sc_ev_ptc1023; /* Packets Tx (512-1023 bytes) */
645 struct evcnt sc_ev_ptc1522; /* Packets Tx (1024-1522 Bytes) */
646 struct evcnt sc_ev_mptc; /* Multicast Packets Tx */
647 struct evcnt sc_ev_bptc; /* Broadcast Packets Tx */
648 struct evcnt sc_ev_tsctc; /* TCP Segmentation Context Tx */
649
650 /* Old */
651 struct evcnt sc_ev_tsctfc; /* TCP Segmentation Context Tx Fail */
652 /* New */
653 struct evcnt sc_ev_cbrmpc; /* Circuit Breaker Rx Mng. Packet */
654
655 struct evcnt sc_ev_iac; /* Interrupt Assertion */
656
657 /* Old */
658 struct evcnt sc_ev_icrxptc; /* Intr. Cause Rx Pkt Timer Expire */
659 struct evcnt sc_ev_icrxatc; /* Intr. Cause Rx Abs Timer Expire */
660 struct evcnt sc_ev_ictxptc; /* Intr. Cause Tx Pkt Timer Expire */
661 struct evcnt sc_ev_ictxact; /* Intr. Cause Tx Abs Timer Expire */
662 struct evcnt sc_ev_ictxqec; /* Intr. Cause Tx Queue Empty */
663 struct evcnt sc_ev_ictxqmtc; /* Intr. Cause Tx Queue Min Thresh */
664 /*
665 * sc_ev_rxdmtc is shared with both "Intr. cause" and
666 * non "Intr. cause" register.
667 */
668 struct evcnt sc_ev_rxdmtc; /* (Intr. Cause) Rx Desc Min Thresh */
669 struct evcnt sc_ev_icrxoc; /* Intr. Cause Receiver Overrun */
670 /* New */
671 struct evcnt sc_ev_rpthc; /* Rx Packets To Host */
672 struct evcnt sc_ev_debug1; /* Debug Counter 1 */
673 struct evcnt sc_ev_debug2; /* Debug Counter 2 */
674 struct evcnt sc_ev_debug3; /* Debug Counter 3 */
675 struct evcnt sc_ev_hgptc; /* Host Good Packets TX */
676 struct evcnt sc_ev_debug4; /* Debug Counter 4 */
677 struct evcnt sc_ev_htcbdpc; /* Host Tx Circuit Breaker Drp. Pkts */
678 struct evcnt sc_ev_hgorc; /* Host Good Octets Rx */
679 struct evcnt sc_ev_hgotc; /* Host Good Octets Tx */
680 struct evcnt sc_ev_lenerrs; /* Length Error */
681 struct evcnt sc_ev_tlpic; /* EEE Tx LPI */
682 struct evcnt sc_ev_rlpic; /* EEE Rx LPI */
683 struct evcnt sc_ev_b2ogprc; /* BMC2OS pkts received by host */
684 struct evcnt sc_ev_o2bspc; /* OS2BMC pkts transmitted by host */
685 struct evcnt sc_ev_b2ospc; /* BMC2OS pkts sent by BMC */
686 struct evcnt sc_ev_o2bgptc; /* OS2BMC pkts received by BMC */
687 struct evcnt sc_ev_scvpc; /* SerDes/SGMII Code Violation Pkt. */
688 struct evcnt sc_ev_hrmpc; /* Header Redirection Missed Packet */
689 #endif /* WM_EVENT_COUNTERS */
690
691 struct sysctllog *sc_sysctllog;
692
693 /* This variable are used only on the 82547. */
694 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
695
696 uint32_t sc_ctrl; /* prototype CTRL register */
697 #if 0
698 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
699 #endif
700 uint32_t sc_icr; /* prototype interrupt bits */
701 uint32_t sc_itr_init; /* prototype intr throttling reg */
702 uint32_t sc_tctl; /* prototype TCTL register */
703 uint32_t sc_rctl; /* prototype RCTL register */
704 uint32_t sc_txcw; /* prototype TXCW register */
705 uint32_t sc_tipg; /* prototype TIPG register */
706 uint32_t sc_fcrtl; /* prototype FCRTL register */
707 uint32_t sc_pba; /* prototype PBA register */
708
709 int sc_tbi_linkup; /* TBI link status */
710 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
711 int sc_tbi_serdes_ticks; /* tbi ticks */
712
713 int sc_mchash_type; /* multicast filter offset */
714
715 krndsource_t rnd_source; /* random source */
716
717 struct if_percpuq *sc_ipq; /* softint-based input queues */
718
719 kmutex_t *sc_core_lock; /* lock for softc operations */
720 kmutex_t *sc_ich_phymtx; /*
721 * 82574/82583/ICH/PCH specific PHY
722 * mutex. For 82574/82583, the mutex
723 * is used for both PHY and NVM.
724 */
725 kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */
726
727 struct wm_phyop phy;
728 struct wm_nvmop nvm;
729
730 struct workqueue *sc_reset_wq;
731 struct work sc_reset_work;
732 volatile unsigned sc_reset_pending;
733
734 bool sc_dying;
735
736 #ifdef WM_DEBUG
737 uint32_t sc_debug;
738 bool sc_trigger_reset;
739 #endif
740 };
741
742 #define WM_RXCHAIN_RESET(rxq) \
743 do { \
744 (rxq)->rxq_tailp = &(rxq)->rxq_head; \
745 *(rxq)->rxq_tailp = NULL; \
746 (rxq)->rxq_len = 0; \
747 } while (/*CONSTCOND*/0)
748
749 #define WM_RXCHAIN_LINK(rxq, m) \
750 do { \
751 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \
752 (rxq)->rxq_tailp = &(m)->m_next; \
753 } while (/*CONSTCOND*/0)
754
755 #ifdef WM_EVENT_COUNTERS
756 #ifdef __HAVE_ATOMIC64_LOADSTORE
757 #define WM_EVCNT_INCR(ev) \
758 atomic_store_relaxed(&((ev)->ev_count), \
759 atomic_load_relaxed(&(ev)->ev_count) + 1)
760 #define WM_EVCNT_ADD(ev, val) \
761 atomic_store_relaxed(&((ev)->ev_count), \
762 atomic_load_relaxed(&(ev)->ev_count) + (val))
763 #else
764 #define WM_EVCNT_INCR(ev) \
765 ((ev)->ev_count)++
766 #define WM_EVCNT_ADD(ev, val) \
767 (ev)->ev_count += (val)
768 #endif
769
770 #define WM_Q_EVCNT_INCR(qname, evname) \
771 WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
772 #define WM_Q_EVCNT_ADD(qname, evname, val) \
773 WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
774 #else /* !WM_EVENT_COUNTERS */
775 #define WM_EVCNT_INCR(ev) /* nothing */
776 #define WM_EVCNT_ADD(ev, val) /* nothing */
777
778 #define WM_Q_EVCNT_INCR(qname, evname) /* nothing */
779 #define WM_Q_EVCNT_ADD(qname, evname, val) /* nothing */
780 #endif /* !WM_EVENT_COUNTERS */
781
782 #define CSR_READ(sc, reg) \
783 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
784 #define CSR_WRITE(sc, reg, val) \
785 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
786 #define CSR_WRITE_FLUSH(sc) \
787 (void)CSR_READ((sc), WMREG_STATUS)
788
789 #define ICH8_FLASH_READ32(sc, reg) \
790 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \
791 (reg) + sc->sc_flashreg_offset)
792 #define ICH8_FLASH_WRITE32(sc, reg, data) \
793 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \
794 (reg) + sc->sc_flashreg_offset, (data))
795
796 #define ICH8_FLASH_READ16(sc, reg) \
797 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \
798 (reg) + sc->sc_flashreg_offset)
799 #define ICH8_FLASH_WRITE16(sc, reg, data) \
800 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \
801 (reg) + sc->sc_flashreg_offset, (data))
802
803 #define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
804 #define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
805
806 #define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU)
807 #define WM_CDTXADDR_HI(txq, x) \
808 (sizeof(bus_addr_t) == 8 ? \
809 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
810
811 #define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
812 #define WM_CDRXADDR_HI(rxq, x) \
813 (sizeof(bus_addr_t) == 8 ? \
814 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
815
816 /*
817 * Register read/write functions.
818 * Other than CSR_{READ|WRITE}().
819 */
820 #if 0
821 static inline uint32_t wm_io_read(struct wm_softc *, int);
822 #endif
823 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
824 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
825 uint32_t, uint32_t);
826 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
827
828 /*
829 * Descriptor sync/init functions.
830 */
831 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
832 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
833 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
834
835 /*
836 * Device driver interface functions and commonly used functions.
837 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
838 */
839 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
840 static int wm_match(device_t, cfdata_t, void *);
841 static void wm_attach(device_t, device_t, void *);
842 static int wm_detach(device_t, int);
843 static bool wm_suspend(device_t, const pmf_qual_t *);
844 static bool wm_resume(device_t, const pmf_qual_t *);
845 static bool wm_watchdog(struct ifnet *);
846 static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
847 uint16_t *);
848 static void wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
849 uint16_t *);
850 static void wm_tick(void *);
851 static int wm_ifflags_cb(struct ethercom *);
852 static int wm_ioctl(struct ifnet *, u_long, void *);
853 /* MAC address related */
854 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
855 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
856 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
857 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
858 static int wm_rar_count(struct wm_softc *);
859 static void wm_set_filter(struct wm_softc *);
860 /* Reset and init related */
861 static void wm_set_vlan(struct wm_softc *);
862 static void wm_set_pcie_completion_timeout(struct wm_softc *);
863 static void wm_get_auto_rd_done(struct wm_softc *);
864 static void wm_lan_init_done(struct wm_softc *);
865 static void wm_get_cfg_done(struct wm_softc *);
866 static int wm_phy_post_reset(struct wm_softc *);
867 static int wm_write_smbus_addr(struct wm_softc *);
868 static int wm_init_lcd_from_nvm(struct wm_softc *);
869 static int wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
870 static void wm_initialize_hardware_bits(struct wm_softc *);
871 static uint32_t wm_rxpbs_adjust_82580(uint32_t);
872 static int wm_reset_phy(struct wm_softc *);
873 static void wm_flush_desc_rings(struct wm_softc *);
874 static void wm_reset(struct wm_softc *);
875 static int wm_add_rxbuf(struct wm_rxqueue *, int);
876 static void wm_rxdrain(struct wm_rxqueue *);
877 static void wm_init_rss(struct wm_softc *);
878 static void wm_adjust_qnum(struct wm_softc *, int);
879 static inline bool wm_is_using_msix(struct wm_softc *);
880 static inline bool wm_is_using_multiqueue(struct wm_softc *);
881 static int wm_softint_establish_queue(struct wm_softc *, int, int);
882 static int wm_setup_legacy(struct wm_softc *);
883 static int wm_setup_msix(struct wm_softc *);
884 static int wm_init(struct ifnet *);
885 static int wm_init_locked(struct ifnet *);
886 static void wm_init_sysctls(struct wm_softc *);
887 static void wm_update_stats(struct wm_softc *);
888 static void wm_unset_stopping_flags(struct wm_softc *);
889 static void wm_set_stopping_flags(struct wm_softc *);
890 static void wm_stop(struct ifnet *, int);
891 static void wm_stop_locked(struct ifnet *, bool, bool);
892 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
893 static void wm_82547_txfifo_stall(void *);
894 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
895 static void wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
896 /* DMA related */
897 static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
898 static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
899 static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
900 static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
901 struct wm_txqueue *);
902 static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
903 static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
904 static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
905 struct wm_rxqueue *);
906 static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
907 static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
908 static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
909 static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
910 static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
911 static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
912 static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
913 struct wm_txqueue *);
914 static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
915 struct wm_rxqueue *);
916 static int wm_alloc_txrx_queues(struct wm_softc *);
917 static void wm_free_txrx_queues(struct wm_softc *);
918 static int wm_init_txrx_queues(struct wm_softc *);
919 /* Start */
920 static void wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
921 struct wm_txsoft *, uint32_t *, uint8_t *);
922 static inline int wm_select_txqueue(struct ifnet *, struct mbuf *);
923 static void wm_start(struct ifnet *);
924 static void wm_start_locked(struct ifnet *);
925 static int wm_transmit(struct ifnet *, struct mbuf *);
926 static void wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
927 static void wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
928 bool);
929 static void wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
930 struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
931 static void wm_nq_start(struct ifnet *);
932 static void wm_nq_start_locked(struct ifnet *);
933 static int wm_nq_transmit(struct ifnet *, struct mbuf *);
934 static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
935 static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
936 bool);
937 static void wm_deferred_start_locked(struct wm_txqueue *);
938 static void wm_handle_queue(void *);
939 static void wm_handle_queue_work(struct work *, void *);
940 static void wm_handle_reset_work(struct work *, void *);
941 /* Interrupt */
942 static bool wm_txeof(struct wm_txqueue *, u_int);
943 static bool wm_rxeof(struct wm_rxqueue *, u_int);
944 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
945 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
946 static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
947 static void wm_linkintr(struct wm_softc *, uint32_t);
948 static int wm_intr_legacy(void *);
949 static inline void wm_txrxintr_disable(struct wm_queue *);
950 static inline void wm_txrxintr_enable(struct wm_queue *);
951 static void wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
952 static int wm_txrxintr_msix(void *);
953 static int wm_linkintr_msix(void *);
954
955 /*
956 * Media related.
957 * GMII, SGMII, TBI, SERDES and SFP.
958 */
959 /* Common */
960 static void wm_tbi_serdes_set_linkled(struct wm_softc *);
961 /* GMII related */
962 static void wm_gmii_reset(struct wm_softc *);
963 static void wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
964 static int wm_get_phy_id_82575(struct wm_softc *);
965 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
966 static int wm_gmii_mediachange(struct ifnet *);
967 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
968 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
969 static uint16_t wm_i82543_mii_recvbits(struct wm_softc *);
970 static int wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
971 static int wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
972 static int wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
973 static int wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
974 static int wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
975 static int wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
976 static int wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
977 static int wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
978 static int wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
979 static int wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
980 static int wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
981 static int wm_gmii_bm_writereg(device_t, int, int, uint16_t);
982 static int wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
983 static int wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
984 static int wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
985 bool);
986 static int wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
987 static int wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
988 static int wm_gmii_hv_writereg(device_t, int, int, uint16_t);
989 static int wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
990 static int wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
991 static int wm_gmii_82580_writereg(device_t, int, int, uint16_t);
992 static int wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
993 static int wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
994 static void wm_gmii_statchg(struct ifnet *);
995 /*
996 * kumeran related (80003, ICH* and PCH*).
997 * These functions are not for accessing MII registers but for accessing
998 * kumeran specific registers.
999 */
1000 static int wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
1001 static int wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
1002 static int wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
1003 static int wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
1004 /* EMI register related */
1005 static int wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
1006 static int wm_read_emi_reg_locked(device_t, int, uint16_t *);
1007 static int wm_write_emi_reg_locked(device_t, int, uint16_t);
1008 /* SGMII */
1009 static bool wm_sgmii_uses_mdio(struct wm_softc *);
1010 static void wm_sgmii_sfp_preconfig(struct wm_softc *);
1011 static int wm_sgmii_readreg(device_t, int, int, uint16_t *);
1012 static int wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
1013 static int wm_sgmii_writereg(device_t, int, int, uint16_t);
1014 static int wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
1015 /* TBI related */
1016 static bool wm_tbi_havesignal(struct wm_softc *, uint32_t);
1017 static void wm_tbi_mediainit(struct wm_softc *);
1018 static int wm_tbi_mediachange(struct ifnet *);
1019 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
1020 static int wm_check_for_link(struct wm_softc *);
1021 static void wm_tbi_tick(struct wm_softc *);
1022 /* SERDES related */
1023 static void wm_serdes_power_up_link_82575(struct wm_softc *);
1024 static int wm_serdes_mediachange(struct ifnet *);
1025 static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
1026 static void wm_serdes_tick(struct wm_softc *);
1027 /* SFP related */
1028 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
1029 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
1030
1031 /*
1032 * NVM related.
1033 * Microwire, SPI (w/wo EERD) and Flash.
1034 */
1035 /* Misc functions */
1036 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
1037 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
1038 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
1039 /* Microwire */
1040 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
1041 /* SPI */
1042 static int wm_nvm_ready_spi(struct wm_softc *);
1043 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
1044 /* Using with EERD */
1045 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
1046 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
1047 /* Flash */
1048 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
1049 unsigned int *);
1050 static int32_t wm_ich8_cycle_init(struct wm_softc *);
1051 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
1052 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
1053 uint32_t *);
1054 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
1055 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
1056 static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
1057 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
1058 static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
1059 /* iNVM */
1060 static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
1061 static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
1062 /* Lock, detecting NVM type, validate checksum and read */
1063 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
1064 static int wm_nvm_flash_presence_i210(struct wm_softc *);
1065 static int wm_nvm_validate_checksum(struct wm_softc *);
1066 static void wm_nvm_version_invm(struct wm_softc *);
1067 static void wm_nvm_version(struct wm_softc *);
1068 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
1069
1070 /*
1071 * Hardware semaphores.
1072 * Very complexed...
1073 */
1074 static int wm_get_null(struct wm_softc *);
1075 static void wm_put_null(struct wm_softc *);
1076 static int wm_get_eecd(struct wm_softc *);
1077 static void wm_put_eecd(struct wm_softc *);
1078 static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
1079 static void wm_put_swsm_semaphore(struct wm_softc *);
1080 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
1081 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
1082 static int wm_get_nvm_80003(struct wm_softc *);
1083 static void wm_put_nvm_80003(struct wm_softc *);
1084 static int wm_get_nvm_82571(struct wm_softc *);
1085 static void wm_put_nvm_82571(struct wm_softc *);
1086 static int wm_get_phy_82575(struct wm_softc *);
1087 static void wm_put_phy_82575(struct wm_softc *);
1088 static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
1089 static void wm_put_swfwhw_semaphore(struct wm_softc *);
1090 static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */
1091 static void wm_put_swflag_ich8lan(struct wm_softc *);
1092 static int wm_get_nvm_ich8lan(struct wm_softc *);
1093 static void wm_put_nvm_ich8lan(struct wm_softc *);
1094 static int wm_get_hw_semaphore_82573(struct wm_softc *);
1095 static void wm_put_hw_semaphore_82573(struct wm_softc *);
1096
1097 /*
1098 * Management mode and power management related subroutines.
1099 * BMC, AMT, suspend/resume and EEE.
1100 */
1101 #if 0
1102 static int wm_check_mng_mode(struct wm_softc *);
1103 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
1104 static int wm_check_mng_mode_82574(struct wm_softc *);
1105 static int wm_check_mng_mode_generic(struct wm_softc *);
1106 #endif
1107 static int wm_enable_mng_pass_thru(struct wm_softc *);
1108 static bool wm_phy_resetisblocked(struct wm_softc *);
1109 static void wm_get_hw_control(struct wm_softc *);
1110 static void wm_release_hw_control(struct wm_softc *);
1111 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
1112 static int wm_init_phy_workarounds_pchlan(struct wm_softc *);
1113 static void wm_init_manageability(struct wm_softc *);
1114 static void wm_release_manageability(struct wm_softc *);
1115 static void wm_get_wakeup(struct wm_softc *);
1116 static int wm_ulp_disable(struct wm_softc *);
1117 static int wm_enable_phy_wakeup(struct wm_softc *);
1118 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
1119 static void wm_suspend_workarounds_ich8lan(struct wm_softc *);
1120 static int wm_resume_workarounds_pchlan(struct wm_softc *);
1121 static void wm_enable_wakeup(struct wm_softc *);
1122 static void wm_disable_aspm(struct wm_softc *);
1123 /* LPLU (Low Power Link Up) */
1124 static void wm_lplu_d0_disable(struct wm_softc *);
1125 /* EEE */
1126 static int wm_set_eee_i350(struct wm_softc *);
1127 static int wm_set_eee_pchlan(struct wm_softc *);
1128 static int wm_set_eee(struct wm_softc *);
1129
1130 /*
1131 * Workarounds (mainly PHY related).
1132 * Basically, PHY's workarounds are in the PHY drivers.
1133 */
1134 static int wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
1135 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
1136 static int wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
1137 static void wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
1138 static void wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
1139 static int wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
1140 static int wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
1141 static int wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
1142 static int wm_k1_gig_workaround_hv(struct wm_softc *, int);
1143 static int wm_k1_workaround_lv(struct wm_softc *);
1144 static int wm_link_stall_workaround_hv(struct wm_softc *);
1145 static int wm_set_mdio_slow_mode_hv(struct wm_softc *);
1146 static int wm_set_mdio_slow_mode_hv_locked(struct wm_softc *);
1147 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
1148 static void wm_reset_init_script_82575(struct wm_softc *);
1149 static void wm_reset_mdicnfg_82580(struct wm_softc *);
1150 static bool wm_phy_is_accessible_pchlan(struct wm_softc *);
1151 static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
1152 static int wm_platform_pm_pch_lpt(struct wm_softc *, bool);
1153 static int wm_pll_workaround_i210(struct wm_softc *);
1154 static void wm_legacy_irq_quirk_spt(struct wm_softc *);
1155 static bool wm_phy_need_linkdown_discard(struct wm_softc *);
1156 static void wm_set_linkdown_discard(struct wm_softc *);
1157 static void wm_clear_linkdown_discard(struct wm_softc *);
1158
1159 static int wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
1160 static int wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
1161 #ifdef WM_DEBUG
1162 static int wm_sysctl_debug(SYSCTLFN_PROTO);
1163 #endif
1164
1165 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
1166 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
1167
1168 /*
1169 * Devices supported by this driver.
1170 */
1171 static const struct wm_product {
1172 pci_vendor_id_t wmp_vendor;
1173 pci_product_id_t wmp_product;
1174 const char *wmp_name;
1175 wm_chip_type wmp_type;
1176 uint32_t wmp_flags;
1177 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
1178 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
1179 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
1180 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
1181 #define WMP_MEDIATYPE(x) ((x) & 0x03)
1182 } wm_products[] = {
1183 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
1184 "Intel i82542 1000BASE-X Ethernet",
1185 WM_T_82542_2_1, WMP_F_FIBER },
1186
1187 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
1188 "Intel i82543GC 1000BASE-X Ethernet",
1189 WM_T_82543, WMP_F_FIBER },
1190
1191 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
1192 "Intel i82543GC 1000BASE-T Ethernet",
1193 WM_T_82543, WMP_F_COPPER },
1194
1195 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
1196 "Intel i82544EI 1000BASE-T Ethernet",
1197 WM_T_82544, WMP_F_COPPER },
1198
1199 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
1200 "Intel i82544EI 1000BASE-X Ethernet",
1201 WM_T_82544, WMP_F_FIBER },
1202
1203 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
1204 "Intel i82544GC 1000BASE-T Ethernet",
1205 WM_T_82544, WMP_F_COPPER },
1206
1207 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
1208 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
1209 WM_T_82544, WMP_F_COPPER },
1210
1211 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
1212 "Intel i82540EM 1000BASE-T Ethernet",
1213 WM_T_82540, WMP_F_COPPER },
1214
1215 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
1216 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
1217 WM_T_82540, WMP_F_COPPER },
1218
1219 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
1220 "Intel i82540EP 1000BASE-T Ethernet",
1221 WM_T_82540, WMP_F_COPPER },
1222
1223 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
1224 "Intel i82540EP 1000BASE-T Ethernet",
1225 WM_T_82540, WMP_F_COPPER },
1226
1227 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
1228 "Intel i82540EP 1000BASE-T Ethernet",
1229 WM_T_82540, WMP_F_COPPER },
1230
1231 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
1232 "Intel i82545EM 1000BASE-T Ethernet",
1233 WM_T_82545, WMP_F_COPPER },
1234
1235 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
1236 "Intel i82545GM 1000BASE-T Ethernet",
1237 WM_T_82545_3, WMP_F_COPPER },
1238
1239 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
1240 "Intel i82545GM 1000BASE-X Ethernet",
1241 WM_T_82545_3, WMP_F_FIBER },
1242
1243 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
1244 "Intel i82545GM Gigabit Ethernet (SERDES)",
1245 WM_T_82545_3, WMP_F_SERDES },
1246
1247 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
1248 "Intel i82546EB 1000BASE-T Ethernet",
1249 WM_T_82546, WMP_F_COPPER },
1250
1251 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
1252 "Intel i82546EB 1000BASE-T Ethernet",
1253 WM_T_82546, WMP_F_COPPER },
1254
1255 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
1256 "Intel i82545EM 1000BASE-X Ethernet",
1257 WM_T_82545, WMP_F_FIBER },
1258
1259 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
1260 "Intel i82546EB 1000BASE-X Ethernet",
1261 WM_T_82546, WMP_F_FIBER },
1262
1263 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
1264 "Intel i82546GB 1000BASE-T Ethernet",
1265 WM_T_82546_3, WMP_F_COPPER },
1266
1267 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
1268 "Intel i82546GB 1000BASE-X Ethernet",
1269 WM_T_82546_3, WMP_F_FIBER },
1270
1271 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
1272 "Intel i82546GB Gigabit Ethernet (SERDES)",
1273 WM_T_82546_3, WMP_F_SERDES },
1274
1275 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
1276 "i82546GB quad-port Gigabit Ethernet",
1277 WM_T_82546_3, WMP_F_COPPER },
1278
1279 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
1280 "i82546GB quad-port Gigabit Ethernet (KSP3)",
1281 WM_T_82546_3, WMP_F_COPPER },
1282
1283 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
1284 "Intel PRO/1000MT (82546GB)",
1285 WM_T_82546_3, WMP_F_COPPER },
1286
1287 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
1288 "Intel i82541EI 1000BASE-T Ethernet",
1289 WM_T_82541, WMP_F_COPPER },
1290
1291 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
1292 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
1293 WM_T_82541, WMP_F_COPPER },
1294
1295 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
1296 "Intel i82541EI Mobile 1000BASE-T Ethernet",
1297 WM_T_82541, WMP_F_COPPER },
1298
1299 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
1300 "Intel i82541ER 1000BASE-T Ethernet",
1301 WM_T_82541_2, WMP_F_COPPER },
1302
1303 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
1304 "Intel i82541GI 1000BASE-T Ethernet",
1305 WM_T_82541_2, WMP_F_COPPER },
1306
1307 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
1308 "Intel i82541GI Mobile 1000BASE-T Ethernet",
1309 WM_T_82541_2, WMP_F_COPPER },
1310
1311 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
1312 "Intel i82541PI 1000BASE-T Ethernet",
1313 WM_T_82541_2, WMP_F_COPPER },
1314
1315 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
1316 "Intel i82547EI 1000BASE-T Ethernet",
1317 WM_T_82547, WMP_F_COPPER },
1318
1319 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
1320 "Intel i82547EI Mobile 1000BASE-T Ethernet",
1321 WM_T_82547, WMP_F_COPPER },
1322
1323 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
1324 "Intel i82547GI 1000BASE-T Ethernet",
1325 WM_T_82547_2, WMP_F_COPPER },
1326
1327 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
1328 "Intel PRO/1000 PT (82571EB)",
1329 WM_T_82571, WMP_F_COPPER },
1330
1331 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
1332 "Intel PRO/1000 PF (82571EB)",
1333 WM_T_82571, WMP_F_FIBER },
1334
1335 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
1336 "Intel PRO/1000 PB (82571EB)",
1337 WM_T_82571, WMP_F_SERDES },
1338
1339 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1340 "Intel PRO/1000 QT (82571EB)",
1341 WM_T_82571, WMP_F_COPPER },
1342
1343 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1344 "Intel PRO/1000 PT Quad Port Server Adapter",
1345 WM_T_82571, WMP_F_COPPER },
1346
1347 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1348 "Intel Gigabit PT Quad Port Server ExpressModule",
1349 WM_T_82571, WMP_F_COPPER },
1350
1351 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1352 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1353 WM_T_82571, WMP_F_SERDES },
1354
1355 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1356 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1357 WM_T_82571, WMP_F_SERDES },
1358
1359 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1360 "Intel 82571EB Quad 1000baseX Ethernet",
1361 WM_T_82571, WMP_F_FIBER },
1362
1363 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
1364 "Intel i82572EI 1000baseT Ethernet",
1365 WM_T_82572, WMP_F_COPPER },
1366
1367 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
1368 "Intel i82572EI 1000baseX Ethernet",
1369 WM_T_82572, WMP_F_FIBER },
1370
1371 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
1372 "Intel i82572EI Gigabit Ethernet (SERDES)",
1373 WM_T_82572, WMP_F_SERDES },
1374
1375 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
1376 "Intel i82572EI 1000baseT Ethernet",
1377 WM_T_82572, WMP_F_COPPER },
1378
1379 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
1380 "Intel i82573E",
1381 WM_T_82573, WMP_F_COPPER },
1382
1383 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
1384 "Intel i82573E IAMT",
1385 WM_T_82573, WMP_F_COPPER },
1386
1387 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
1388 "Intel i82573L Gigabit Ethernet",
1389 WM_T_82573, WMP_F_COPPER },
1390
1391 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
1392 "Intel i82574L",
1393 WM_T_82574, WMP_F_COPPER },
1394
1395 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
1396 "Intel i82574L",
1397 WM_T_82574, WMP_F_COPPER },
1398
1399 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
1400 "Intel i82583V",
1401 WM_T_82583, WMP_F_COPPER },
1402
1403 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1404 "i80003 dual 1000baseT Ethernet",
1405 WM_T_80003, WMP_F_COPPER },
1406
1407 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1408 "i80003 dual 1000baseX Ethernet",
1409 WM_T_80003, WMP_F_COPPER },
1410
1411 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1412 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1413 WM_T_80003, WMP_F_SERDES },
1414
1415 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1416 "Intel i80003 1000baseT Ethernet",
1417 WM_T_80003, WMP_F_COPPER },
1418
1419 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1420 "Intel i80003 Gigabit Ethernet (SERDES)",
1421 WM_T_80003, WMP_F_SERDES },
1422
1423 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
1424 "Intel i82801H (M_AMT) LAN Controller",
1425 WM_T_ICH8, WMP_F_COPPER },
1426 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
1427 "Intel i82801H (AMT) LAN Controller",
1428 WM_T_ICH8, WMP_F_COPPER },
1429 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
1430 "Intel i82801H LAN Controller",
1431 WM_T_ICH8, WMP_F_COPPER },
1432 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1433 "Intel i82801H (IFE) 10/100 LAN Controller",
1434 WM_T_ICH8, WMP_F_COPPER },
1435 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
1436 "Intel i82801H (M) LAN Controller",
1437 WM_T_ICH8, WMP_F_COPPER },
1438 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1439 "Intel i82801H IFE (GT) 10/100 LAN Controller",
1440 WM_T_ICH8, WMP_F_COPPER },
1441 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1442 "Intel i82801H IFE (G) 10/100 LAN Controller",
1443 WM_T_ICH8, WMP_F_COPPER },
1444 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_82567V_3,
1445 "82567V-3 LAN Controller",
1446 WM_T_ICH8, WMP_F_COPPER },
1447 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1448 "82801I (AMT) LAN Controller",
1449 WM_T_ICH9, WMP_F_COPPER },
1450 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1451 "82801I 10/100 LAN Controller",
1452 WM_T_ICH9, WMP_F_COPPER },
1453 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1454 "82801I (G) 10/100 LAN Controller",
1455 WM_T_ICH9, WMP_F_COPPER },
1456 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1457 "82801I (GT) 10/100 LAN Controller",
1458 WM_T_ICH9, WMP_F_COPPER },
1459 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1460 "82801I (C) LAN Controller",
1461 WM_T_ICH9, WMP_F_COPPER },
1462 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1463 "82801I mobile LAN Controller",
1464 WM_T_ICH9, WMP_F_COPPER },
1465 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_V,
1466 "82801I mobile (V) LAN Controller",
1467 WM_T_ICH9, WMP_F_COPPER },
1468 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1469 "82801I mobile (AMT) LAN Controller",
1470 WM_T_ICH9, WMP_F_COPPER },
1471 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1472 "82567LM-4 LAN Controller",
1473 WM_T_ICH9, WMP_F_COPPER },
1474 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1475 "82567LM-2 LAN Controller",
1476 WM_T_ICH10, WMP_F_COPPER },
1477 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1478 "82567LF-2 LAN Controller",
1479 WM_T_ICH10, WMP_F_COPPER },
1480 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1481 "82567LM-3 LAN Controller",
1482 WM_T_ICH10, WMP_F_COPPER },
1483 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1484 "82567LF-3 LAN Controller",
1485 WM_T_ICH10, WMP_F_COPPER },
1486 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1487 "82567V-2 LAN Controller",
1488 WM_T_ICH10, WMP_F_COPPER },
1489 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1490 "82567V-3? LAN Controller",
1491 WM_T_ICH10, WMP_F_COPPER },
1492 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1493 "HANKSVILLE LAN Controller",
1494 WM_T_ICH10, WMP_F_COPPER },
1495 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1496 "PCH LAN (82577LM) Controller",
1497 WM_T_PCH, WMP_F_COPPER },
1498 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1499 "PCH LAN (82577LC) Controller",
1500 WM_T_PCH, WMP_F_COPPER },
1501 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1502 "PCH LAN (82578DM) Controller",
1503 WM_T_PCH, WMP_F_COPPER },
1504 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1505 "PCH LAN (82578DC) Controller",
1506 WM_T_PCH, WMP_F_COPPER },
1507 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1508 "PCH2 LAN (82579LM) Controller",
1509 WM_T_PCH2, WMP_F_COPPER },
1510 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1511 "PCH2 LAN (82579V) Controller",
1512 WM_T_PCH2, WMP_F_COPPER },
1513 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1514 "82575EB dual-1000baseT Ethernet",
1515 WM_T_82575, WMP_F_COPPER },
1516 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1517 "82575EB dual-1000baseX Ethernet (SERDES)",
1518 WM_T_82575, WMP_F_SERDES },
1519 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1520 "82575GB quad-1000baseT Ethernet",
1521 WM_T_82575, WMP_F_COPPER },
1522 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1523 "82575GB quad-1000baseT Ethernet (PM)",
1524 WM_T_82575, WMP_F_COPPER },
1525 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1526 "82576 1000BaseT Ethernet",
1527 WM_T_82576, WMP_F_COPPER },
1528 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1529 "82576 1000BaseX Ethernet",
1530 WM_T_82576, WMP_F_FIBER },
1531
1532 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1533 "82576 gigabit Ethernet (SERDES)",
1534 WM_T_82576, WMP_F_SERDES },
1535
1536 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1537 "82576 quad-1000BaseT Ethernet",
1538 WM_T_82576, WMP_F_COPPER },
1539
1540 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1541 "82576 Gigabit ET2 Quad Port Server Adapter",
1542 WM_T_82576, WMP_F_COPPER },
1543
1544 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1545 "82576 gigabit Ethernet",
1546 WM_T_82576, WMP_F_COPPER },
1547
1548 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1549 "82576 gigabit Ethernet (SERDES)",
1550 WM_T_82576, WMP_F_SERDES },
1551 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1552 "82576 quad-gigabit Ethernet (SERDES)",
1553 WM_T_82576, WMP_F_SERDES },
1554
1555 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1556 "82580 1000BaseT Ethernet",
1557 WM_T_82580, WMP_F_COPPER },
1558 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1559 "82580 1000BaseX Ethernet",
1560 WM_T_82580, WMP_F_FIBER },
1561
1562 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1563 "82580 1000BaseT Ethernet (SERDES)",
1564 WM_T_82580, WMP_F_SERDES },
1565
1566 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1567 "82580 gigabit Ethernet (SGMII)",
1568 WM_T_82580, WMP_F_COPPER },
1569 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1570 "82580 dual-1000BaseT Ethernet",
1571 WM_T_82580, WMP_F_COPPER },
1572
1573 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1574 "82580 quad-1000BaseX Ethernet",
1575 WM_T_82580, WMP_F_FIBER },
1576
1577 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1578 "DH89XXCC Gigabit Ethernet (SGMII)",
1579 WM_T_82580, WMP_F_COPPER },
1580
1581 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1582 "DH89XXCC Gigabit Ethernet (SERDES)",
1583 WM_T_82580, WMP_F_SERDES },
1584
1585 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1586 "DH89XXCC 1000BASE-KX Ethernet",
1587 WM_T_82580, WMP_F_SERDES },
1588
1589 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1590 "DH89XXCC Gigabit Ethernet (SFP)",
1591 WM_T_82580, WMP_F_SERDES },
1592
1593 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1594 "I350 Gigabit Network Connection",
1595 WM_T_I350, WMP_F_COPPER },
1596
1597 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1598 "I350 Gigabit Fiber Network Connection",
1599 WM_T_I350, WMP_F_FIBER },
1600
1601 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1602 "I350 Gigabit Backplane Connection",
1603 WM_T_I350, WMP_F_SERDES },
1604
1605 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1606 "I350 Quad Port Gigabit Ethernet",
1607 WM_T_I350, WMP_F_SERDES },
1608
1609 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1610 "I350 Gigabit Connection",
1611 WM_T_I350, WMP_F_COPPER },
1612
1613 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1614 "I354 Gigabit Ethernet (KX)",
1615 WM_T_I354, WMP_F_SERDES },
1616
1617 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1618 "I354 Gigabit Ethernet (SGMII)",
1619 WM_T_I354, WMP_F_COPPER },
1620
1621 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1622 "I354 Gigabit Ethernet (2.5G)",
1623 WM_T_I354, WMP_F_COPPER },
1624
1625 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1626 "I210-T1 Ethernet Server Adapter",
1627 WM_T_I210, WMP_F_COPPER },
1628
1629 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1630 "I210 Ethernet (Copper OEM)",
1631 WM_T_I210, WMP_F_COPPER },
1632
1633 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1634 "I210 Ethernet (Copper IT)",
1635 WM_T_I210, WMP_F_COPPER },
1636
1637 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1638 "I210 Ethernet (Copper, FLASH less)",
1639 WM_T_I210, WMP_F_COPPER },
1640
1641 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1642 "I210 Gigabit Ethernet (Fiber)",
1643 WM_T_I210, WMP_F_FIBER },
1644
1645 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1646 "I210 Gigabit Ethernet (SERDES)",
1647 WM_T_I210, WMP_F_SERDES },
1648
1649 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1650 "I210 Gigabit Ethernet (SERDES, FLASH less)",
1651 WM_T_I210, WMP_F_SERDES },
1652
1653 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1654 "I210 Gigabit Ethernet (SGMII)",
1655 WM_T_I210, WMP_F_COPPER },
1656
1657 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII_WOF,
1658 "I210 Gigabit Ethernet (SGMII, FLASH less)",
1659 WM_T_I210, WMP_F_COPPER },
1660
1661 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1662 "I211 Ethernet (COPPER)",
1663 WM_T_I211, WMP_F_COPPER },
1664 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1665 "I217 V Ethernet Connection",
1666 WM_T_PCH_LPT, WMP_F_COPPER },
1667 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1668 "I217 LM Ethernet Connection",
1669 WM_T_PCH_LPT, WMP_F_COPPER },
1670 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1671 "I218 V Ethernet Connection",
1672 WM_T_PCH_LPT, WMP_F_COPPER },
1673 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1674 "I218 V Ethernet Connection",
1675 WM_T_PCH_LPT, WMP_F_COPPER },
1676 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1677 "I218 V Ethernet Connection",
1678 WM_T_PCH_LPT, WMP_F_COPPER },
1679 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1680 "I218 LM Ethernet Connection",
1681 WM_T_PCH_LPT, WMP_F_COPPER },
1682 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1683 "I218 LM Ethernet Connection",
1684 WM_T_PCH_LPT, WMP_F_COPPER },
1685 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1686 "I218 LM Ethernet Connection",
1687 WM_T_PCH_LPT, WMP_F_COPPER },
1688 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM,
1689 "I219 LM Ethernet Connection",
1690 WM_T_PCH_SPT, WMP_F_COPPER },
1691 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM2,
1692 "I219 LM (2) Ethernet Connection",
1693 WM_T_PCH_SPT, WMP_F_COPPER },
1694 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM3,
1695 "I219 LM (3) Ethernet Connection",
1696 WM_T_PCH_SPT, WMP_F_COPPER },
1697 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM4,
1698 "I219 LM (4) Ethernet Connection",
1699 WM_T_PCH_SPT, WMP_F_COPPER },
1700 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM5,
1701 "I219 LM (5) Ethernet Connection",
1702 WM_T_PCH_SPT, WMP_F_COPPER },
1703 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM6,
1704 "I219 LM (6) Ethernet Connection",
1705 WM_T_PCH_CNP, WMP_F_COPPER },
1706 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM7,
1707 "I219 LM (7) Ethernet Connection",
1708 WM_T_PCH_CNP, WMP_F_COPPER },
1709 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM8,
1710 "I219 LM (8) Ethernet Connection",
1711 WM_T_PCH_CNP, WMP_F_COPPER },
1712 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM9,
1713 "I219 LM (9) Ethernet Connection",
1714 WM_T_PCH_CNP, WMP_F_COPPER },
1715 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM10,
1716 "I219 LM (10) Ethernet Connection",
1717 WM_T_PCH_CNP, WMP_F_COPPER },
1718 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM11,
1719 "I219 LM (11) Ethernet Connection",
1720 WM_T_PCH_CNP, WMP_F_COPPER },
1721 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM12,
1722 "I219 LM (12) Ethernet Connection",
1723 WM_T_PCH_SPT, WMP_F_COPPER },
1724 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM13,
1725 "I219 LM (13) Ethernet Connection",
1726 WM_T_PCH_CNP, WMP_F_COPPER },
1727 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM14,
1728 "I219 LM (14) Ethernet Connection",
1729 WM_T_PCH_CNP, WMP_F_COPPER },
1730 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM15,
1731 "I219 LM (15) Ethernet Connection",
1732 WM_T_PCH_CNP, WMP_F_COPPER },
1733 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM16,
1734 "I219 LM (16) Ethernet Connection",
1735 WM_T_PCH_CNP, WMP_F_COPPER },
1736 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM17,
1737 "I219 LM (17) Ethernet Connection",
1738 WM_T_PCH_CNP, WMP_F_COPPER },
1739 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM18,
1740 "I219 LM (18) Ethernet Connection",
1741 WM_T_PCH_CNP, WMP_F_COPPER },
1742 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM19,
1743 "I219 LM (19) Ethernet Connection",
1744 WM_T_PCH_CNP, WMP_F_COPPER },
1745 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V,
1746 "I219 V Ethernet Connection",
1747 WM_T_PCH_SPT, WMP_F_COPPER },
1748 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V2,
1749 "I219 V (2) Ethernet Connection",
1750 WM_T_PCH_SPT, WMP_F_COPPER },
1751 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V4,
1752 "I219 V (4) Ethernet Connection",
1753 WM_T_PCH_SPT, WMP_F_COPPER },
1754 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V5,
1755 "I219 V (5) Ethernet Connection",
1756 WM_T_PCH_SPT, WMP_F_COPPER },
1757 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V6,
1758 "I219 V (6) Ethernet Connection",
1759 WM_T_PCH_CNP, WMP_F_COPPER },
1760 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V7,
1761 "I219 V (7) Ethernet Connection",
1762 WM_T_PCH_CNP, WMP_F_COPPER },
1763 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V8,
1764 "I219 V (8) Ethernet Connection",
1765 WM_T_PCH_CNP, WMP_F_COPPER },
1766 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V9,
1767 "I219 V (9) Ethernet Connection",
1768 WM_T_PCH_CNP, WMP_F_COPPER },
1769 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V10,
1770 "I219 V (10) Ethernet Connection",
1771 WM_T_PCH_CNP, WMP_F_COPPER },
1772 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V11,
1773 "I219 V (11) Ethernet Connection",
1774 WM_T_PCH_CNP, WMP_F_COPPER },
1775 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V12,
1776 "I219 V (12) Ethernet Connection",
1777 WM_T_PCH_SPT, WMP_F_COPPER },
1778 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V13,
1779 "I219 V (13) Ethernet Connection",
1780 WM_T_PCH_CNP, WMP_F_COPPER },
1781 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V14,
1782 "I219 V (14) Ethernet Connection",
1783 WM_T_PCH_CNP, WMP_F_COPPER },
1784 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V15,
1785 "I219 V (15) Ethernet Connection",
1786 WM_T_PCH_CNP, WMP_F_COPPER },
1787 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V16,
1788 "I219 V (16) Ethernet Connection",
1789 WM_T_PCH_CNP, WMP_F_COPPER },
1790 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V17,
1791 "I219 V (17) Ethernet Connection",
1792 WM_T_PCH_CNP, WMP_F_COPPER },
1793 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V18,
1794 "I219 V (18) Ethernet Connection",
1795 WM_T_PCH_CNP, WMP_F_COPPER },
1796 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V19,
1797 "I219 V (19) Ethernet Connection",
1798 WM_T_PCH_CNP, WMP_F_COPPER },
1799 { 0, 0,
1800 NULL,
1801 0, 0 },
1802 };
1803
1804 /*
1805 * Register read/write functions.
1806 * Other than CSR_{READ|WRITE}().
1807 */
1808
1809 #if 0 /* Not currently used */
1810 static inline uint32_t
1811 wm_io_read(struct wm_softc *sc, int reg)
1812 {
1813
1814 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1815 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1816 }
1817 #endif
1818
1819 static inline void
1820 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1821 {
1822
1823 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1824 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1825 }
1826
1827 static inline void
1828 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1829 uint32_t data)
1830 {
1831 uint32_t regval;
1832 int i;
1833
1834 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1835
1836 CSR_WRITE(sc, reg, regval);
1837
1838 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1839 delay(5);
1840 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1841 break;
1842 }
1843 if (i == SCTL_CTL_POLL_TIMEOUT) {
1844 aprint_error("%s: WARNING:"
1845 " i82575 reg 0x%08x setup did not indicate ready\n",
1846 device_xname(sc->sc_dev), reg);
1847 }
1848 }
1849
1850 static inline void
1851 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1852 {
1853 wa->wa_low = htole32(BUS_ADDR_LO32(v));
1854 wa->wa_high = htole32(BUS_ADDR_HI32(v));
1855 }
1856
1857 /*
1858 * Descriptor sync/init functions.
1859 */
1860 static inline void
1861 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1862 {
1863 struct wm_softc *sc = txq->txq_sc;
1864
1865 /* If it will wrap around, sync to the end of the ring. */
1866 if ((start + num) > WM_NTXDESC(txq)) {
1867 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1868 WM_CDTXOFF(txq, start), txq->txq_descsize *
1869 (WM_NTXDESC(txq) - start), ops);
1870 num -= (WM_NTXDESC(txq) - start);
1871 start = 0;
1872 }
1873
1874 /* Now sync whatever is left. */
1875 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1876 WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1877 }
1878
1879 static inline void
1880 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1881 {
1882 struct wm_softc *sc = rxq->rxq_sc;
1883
1884 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1885 WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
1886 }
1887
1888 static inline void
1889 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1890 {
1891 struct wm_softc *sc = rxq->rxq_sc;
1892 struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1893 struct mbuf *m = rxs->rxs_mbuf;
1894
1895 /*
1896 * Note: We scoot the packet forward 2 bytes in the buffer
1897 * so that the payload after the Ethernet header is aligned
1898 * to a 4-byte boundary.
1899
1900 * XXX BRAINDAMAGE ALERT!
1901 * The stupid chip uses the same size for every buffer, which
1902 * is set in the Receive Control register. We are using the 2K
1903 * size option, but what we REALLY want is (2K - 2)! For this
1904 * reason, we can't "scoot" packets longer than the standard
1905 * Ethernet MTU. On strict-alignment platforms, if the total
1906 * size exceeds (2K - 2) we set align_tweak to 0 and let
1907 * the upper layer copy the headers.
1908 */
1909 m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1910
1911 if (sc->sc_type == WM_T_82574) {
1912 ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
1913 rxd->erx_data.erxd_addr =
1914 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1915 rxd->erx_data.erxd_dd = 0;
1916 } else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
1917 nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
1918
1919 rxd->nqrx_data.nrxd_paddr =
1920 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1921 /* Currently, split header is not supported. */
1922 rxd->nqrx_data.nrxd_haddr = 0;
1923 } else {
1924 wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1925
1926 wm_set_dma_addr(&rxd->wrx_addr,
1927 rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1928 rxd->wrx_len = 0;
1929 rxd->wrx_cksum = 0;
1930 rxd->wrx_status = 0;
1931 rxd->wrx_errors = 0;
1932 rxd->wrx_special = 0;
1933 }
1934 wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1935
1936 CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1937 }
1938
1939 /*
1940 * Device driver interface functions and commonly used functions.
1941 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1942 */
1943
1944 /* Lookup supported device table */
1945 static const struct wm_product *
1946 wm_lookup(const struct pci_attach_args *pa)
1947 {
1948 const struct wm_product *wmp;
1949
1950 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1951 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1952 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1953 return wmp;
1954 }
1955 return NULL;
1956 }
1957
1958 /* The match function (ca_match) */
1959 static int
1960 wm_match(device_t parent, cfdata_t cf, void *aux)
1961 {
1962 struct pci_attach_args *pa = aux;
1963
1964 if (wm_lookup(pa) != NULL)
1965 return 1;
1966
1967 return 0;
1968 }
1969
1970 /* The attach function (ca_attach) */
1971 static void
1972 wm_attach(device_t parent, device_t self, void *aux)
1973 {
1974 struct wm_softc *sc = device_private(self);
1975 struct pci_attach_args *pa = aux;
1976 prop_dictionary_t dict;
1977 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1978 pci_chipset_tag_t pc = pa->pa_pc;
1979 int counts[PCI_INTR_TYPE_SIZE];
1980 pci_intr_type_t max_type;
1981 const char *eetype, *xname;
1982 bus_space_tag_t memt;
1983 bus_space_handle_t memh;
1984 bus_size_t memsize;
1985 int memh_valid;
1986 int i, error;
1987 const struct wm_product *wmp;
1988 prop_data_t ea;
1989 prop_number_t pn;
1990 uint8_t enaddr[ETHER_ADDR_LEN];
1991 char buf[256];
1992 char wqname[MAXCOMLEN];
1993 uint16_t cfg1, cfg2, swdpin, nvmword;
1994 pcireg_t preg, memtype;
1995 uint16_t eeprom_data, apme_mask;
1996 bool force_clear_smbi;
1997 uint32_t link_mode;
1998 uint32_t reg;
1999
2000 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
2001 sc->sc_debug = WM_DEBUG_DEFAULT;
2002 #endif
2003 sc->sc_dev = self;
2004 callout_init(&sc->sc_tick_ch, CALLOUT_MPSAFE);
2005 callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
2006 sc->sc_core_stopping = false;
2007
2008 wmp = wm_lookup(pa);
2009 #ifdef DIAGNOSTIC
2010 if (wmp == NULL) {
2011 printf("\n");
2012 panic("wm_attach: impossible");
2013 }
2014 #endif
2015 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
2016
2017 sc->sc_pc = pa->pa_pc;
2018 sc->sc_pcitag = pa->pa_tag;
2019
2020 if (pci_dma64_available(pa)) {
2021 aprint_verbose(", 64-bit DMA");
2022 sc->sc_dmat = pa->pa_dmat64;
2023 } else {
2024 aprint_verbose(", 32-bit DMA");
2025 sc->sc_dmat = pa->pa_dmat;
2026 }
2027
2028 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
2029 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
2030 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
2031
2032 sc->sc_type = wmp->wmp_type;
2033
2034 /* Set default function pointers */
2035 sc->phy.acquire = sc->nvm.acquire = wm_get_null;
2036 sc->phy.release = sc->nvm.release = wm_put_null;
2037 sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
2038
2039 if (sc->sc_type < WM_T_82543) {
2040 if (sc->sc_rev < 2) {
2041 aprint_error_dev(sc->sc_dev,
2042 "i82542 must be at least rev. 2\n");
2043 return;
2044 }
2045 if (sc->sc_rev < 3)
2046 sc->sc_type = WM_T_82542_2_0;
2047 }
2048
2049 /*
2050 * Disable MSI for Errata:
2051 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
2052 *
2053 * 82544: Errata 25
2054 * 82540: Errata 6 (easy to reproduce device timeout)
2055 * 82545: Errata 4 (easy to reproduce device timeout)
2056 * 82546: Errata 26 (easy to reproduce device timeout)
2057 * 82541: Errata 7 (easy to reproduce device timeout)
2058 *
2059 * "Byte Enables 2 and 3 are not set on MSI writes"
2060 *
2061 * 82571 & 82572: Errata 63
2062 */
2063 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
2064 || (sc->sc_type == WM_T_82572))
2065 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
2066
2067 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2068 || (sc->sc_type == WM_T_82580)
2069 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
2070 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
2071 sc->sc_flags |= WM_F_NEWQUEUE;
2072
2073 /* Set device properties (mactype) */
2074 dict = device_properties(sc->sc_dev);
2075 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
2076
2077 /*
2078 * Map the device. All devices support memory-mapped acccess,
2079 * and it is really required for normal operation.
2080 */
2081 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
2082 switch (memtype) {
2083 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
2084 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
2085 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
2086 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
2087 break;
2088 default:
2089 memh_valid = 0;
2090 break;
2091 }
2092
2093 if (memh_valid) {
2094 sc->sc_st = memt;
2095 sc->sc_sh = memh;
2096 sc->sc_ss = memsize;
2097 } else {
2098 aprint_error_dev(sc->sc_dev,
2099 "unable to map device registers\n");
2100 return;
2101 }
2102
2103 /*
2104 * In addition, i82544 and later support I/O mapped indirect
2105 * register access. It is not desirable (nor supported in
2106 * this driver) to use it for normal operation, though it is
2107 * required to work around bugs in some chip versions.
2108 */
2109 switch (sc->sc_type) {
2110 case WM_T_82544:
2111 case WM_T_82541:
2112 case WM_T_82541_2:
2113 case WM_T_82547:
2114 case WM_T_82547_2:
2115 /* First we have to find the I/O BAR. */
2116 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
2117 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
2118 if (memtype == PCI_MAPREG_TYPE_IO)
2119 break;
2120 if (PCI_MAPREG_MEM_TYPE(memtype) ==
2121 PCI_MAPREG_MEM_TYPE_64BIT)
2122 i += 4; /* skip high bits, too */
2123 }
2124 if (i < PCI_MAPREG_END) {
2125 /*
2126 * We found PCI_MAPREG_TYPE_IO. Note that 82580
2127 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
2128 * It's no problem because newer chips has no this
2129 * bug.
2130 *
2131 * The i8254x doesn't apparently respond when the
2132 * I/O BAR is 0, which looks somewhat like it's not
2133 * been configured.
2134 */
2135 preg = pci_conf_read(pc, pa->pa_tag, i);
2136 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
2137 aprint_error_dev(sc->sc_dev,
2138 "WARNING: I/O BAR at zero.\n");
2139 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
2140 0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
2141 == 0) {
2142 sc->sc_flags |= WM_F_IOH_VALID;
2143 } else
2144 aprint_error_dev(sc->sc_dev,
2145 "WARNING: unable to map I/O space\n");
2146 }
2147 break;
2148 default:
2149 break;
2150 }
2151
2152 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
2153 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
2154 preg |= PCI_COMMAND_MASTER_ENABLE;
2155 if (sc->sc_type < WM_T_82542_2_1)
2156 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
2157 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
2158
2159 /* Power up chip */
2160 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
2161 && error != EOPNOTSUPP) {
2162 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
2163 return;
2164 }
2165
2166 wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
2167 /*
2168 * Don't use MSI-X if we can use only one queue to save interrupt
2169 * resource.
2170 */
2171 if (sc->sc_nqueues > 1) {
2172 max_type = PCI_INTR_TYPE_MSIX;
2173 /*
2174 * 82583 has a MSI-X capability in the PCI configuration space
2175 * but it doesn't support it. At least the document doesn't
2176 * say anything about MSI-X.
2177 */
2178 counts[PCI_INTR_TYPE_MSIX]
2179 = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
2180 } else {
2181 max_type = PCI_INTR_TYPE_MSI;
2182 counts[PCI_INTR_TYPE_MSIX] = 0;
2183 }
2184
2185 /* Allocation settings */
2186 counts[PCI_INTR_TYPE_MSI] = 1;
2187 counts[PCI_INTR_TYPE_INTX] = 1;
2188 /* overridden by disable flags */
2189 if (wm_disable_msi != 0) {
2190 counts[PCI_INTR_TYPE_MSI] = 0;
2191 if (wm_disable_msix != 0) {
2192 max_type = PCI_INTR_TYPE_INTX;
2193 counts[PCI_INTR_TYPE_MSIX] = 0;
2194 }
2195 } else if (wm_disable_msix != 0) {
2196 max_type = PCI_INTR_TYPE_MSI;
2197 counts[PCI_INTR_TYPE_MSIX] = 0;
2198 }
2199
2200 alloc_retry:
2201 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
2202 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
2203 return;
2204 }
2205
2206 if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
2207 error = wm_setup_msix(sc);
2208 if (error) {
2209 pci_intr_release(pc, sc->sc_intrs,
2210 counts[PCI_INTR_TYPE_MSIX]);
2211
2212 /* Setup for MSI: Disable MSI-X */
2213 max_type = PCI_INTR_TYPE_MSI;
2214 counts[PCI_INTR_TYPE_MSI] = 1;
2215 counts[PCI_INTR_TYPE_INTX] = 1;
2216 goto alloc_retry;
2217 }
2218 } else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
2219 wm_adjust_qnum(sc, 0); /* Must not use multiqueue */
2220 error = wm_setup_legacy(sc);
2221 if (error) {
2222 pci_intr_release(sc->sc_pc, sc->sc_intrs,
2223 counts[PCI_INTR_TYPE_MSI]);
2224
2225 /* The next try is for INTx: Disable MSI */
2226 max_type = PCI_INTR_TYPE_INTX;
2227 counts[PCI_INTR_TYPE_INTX] = 1;
2228 goto alloc_retry;
2229 }
2230 } else {
2231 wm_adjust_qnum(sc, 0); /* Must not use multiqueue */
2232 error = wm_setup_legacy(sc);
2233 if (error) {
2234 pci_intr_release(sc->sc_pc, sc->sc_intrs,
2235 counts[PCI_INTR_TYPE_INTX]);
2236 return;
2237 }
2238 }
2239
2240 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
2241 error = workqueue_create(&sc->sc_queue_wq, wqname,
2242 wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
2243 WQ_PERCPU | WQ_MPSAFE);
2244 if (error) {
2245 aprint_error_dev(sc->sc_dev,
2246 "unable to create TxRx workqueue\n");
2247 goto out;
2248 }
2249
2250 snprintf(wqname, sizeof(wqname), "%sReset", device_xname(sc->sc_dev));
2251 error = workqueue_create(&sc->sc_reset_wq, wqname,
2252 wm_handle_reset_work, sc, WM_WORKQUEUE_PRI, IPL_SOFTCLOCK,
2253 WQ_MPSAFE);
2254 if (error) {
2255 workqueue_destroy(sc->sc_queue_wq);
2256 aprint_error_dev(sc->sc_dev,
2257 "unable to create reset workqueue\n");
2258 goto out;
2259 }
2260
2261 /*
2262 * Check the function ID (unit number of the chip).
2263 */
2264 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
2265 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
2266 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2267 || (sc->sc_type == WM_T_82580)
2268 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2269 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
2270 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
2271 else
2272 sc->sc_funcid = 0;
2273
2274 /*
2275 * Determine a few things about the bus we're connected to.
2276 */
2277 if (sc->sc_type < WM_T_82543) {
2278 /* We don't really know the bus characteristics here. */
2279 sc->sc_bus_speed = 33;
2280 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
2281 /*
2282 * CSA (Communication Streaming Architecture) is about as fast
2283 * a 32-bit 66MHz PCI Bus.
2284 */
2285 sc->sc_flags |= WM_F_CSA;
2286 sc->sc_bus_speed = 66;
2287 aprint_verbose_dev(sc->sc_dev,
2288 "Communication Streaming Architecture\n");
2289 if (sc->sc_type == WM_T_82547) {
2290 callout_init(&sc->sc_txfifo_ch, CALLOUT_MPSAFE);
2291 callout_setfunc(&sc->sc_txfifo_ch,
2292 wm_82547_txfifo_stall, sc);
2293 aprint_verbose_dev(sc->sc_dev,
2294 "using 82547 Tx FIFO stall work-around\n");
2295 }
2296 } else if (sc->sc_type >= WM_T_82571) {
2297 sc->sc_flags |= WM_F_PCIE;
2298 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
2299 && (sc->sc_type != WM_T_ICH10)
2300 && (sc->sc_type != WM_T_PCH)
2301 && (sc->sc_type != WM_T_PCH2)
2302 && (sc->sc_type != WM_T_PCH_LPT)
2303 && (sc->sc_type != WM_T_PCH_SPT)
2304 && (sc->sc_type != WM_T_PCH_CNP)) {
2305 /* ICH* and PCH* have no PCIe capability registers */
2306 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2307 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
2308 NULL) == 0)
2309 aprint_error_dev(sc->sc_dev,
2310 "unable to find PCIe capability\n");
2311 }
2312 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
2313 } else {
2314 reg = CSR_READ(sc, WMREG_STATUS);
2315 if (reg & STATUS_BUS64)
2316 sc->sc_flags |= WM_F_BUS64;
2317 if ((reg & STATUS_PCIX_MODE) != 0) {
2318 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
2319
2320 sc->sc_flags |= WM_F_PCIX;
2321 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2322 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
2323 aprint_error_dev(sc->sc_dev,
2324 "unable to find PCIX capability\n");
2325 else if (sc->sc_type != WM_T_82545_3 &&
2326 sc->sc_type != WM_T_82546_3) {
2327 /*
2328 * Work around a problem caused by the BIOS
2329 * setting the max memory read byte count
2330 * incorrectly.
2331 */
2332 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
2333 sc->sc_pcixe_capoff + PCIX_CMD);
2334 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
2335 sc->sc_pcixe_capoff + PCIX_STATUS);
2336
2337 bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
2338 PCIX_CMD_BYTECNT_SHIFT;
2339 maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
2340 PCIX_STATUS_MAXB_SHIFT;
2341 if (bytecnt > maxb) {
2342 aprint_verbose_dev(sc->sc_dev,
2343 "resetting PCI-X MMRBC: %d -> %d\n",
2344 512 << bytecnt, 512 << maxb);
2345 pcix_cmd = (pcix_cmd &
2346 ~PCIX_CMD_BYTECNT_MASK) |
2347 (maxb << PCIX_CMD_BYTECNT_SHIFT);
2348 pci_conf_write(pa->pa_pc, pa->pa_tag,
2349 sc->sc_pcixe_capoff + PCIX_CMD,
2350 pcix_cmd);
2351 }
2352 }
2353 }
2354 /*
2355 * The quad port adapter is special; it has a PCIX-PCIX
2356 * bridge on the board, and can run the secondary bus at
2357 * a higher speed.
2358 */
2359 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
2360 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
2361 : 66;
2362 } else if (sc->sc_flags & WM_F_PCIX) {
2363 switch (reg & STATUS_PCIXSPD_MASK) {
2364 case STATUS_PCIXSPD_50_66:
2365 sc->sc_bus_speed = 66;
2366 break;
2367 case STATUS_PCIXSPD_66_100:
2368 sc->sc_bus_speed = 100;
2369 break;
2370 case STATUS_PCIXSPD_100_133:
2371 sc->sc_bus_speed = 133;
2372 break;
2373 default:
2374 aprint_error_dev(sc->sc_dev,
2375 "unknown PCIXSPD %d; assuming 66MHz\n",
2376 reg & STATUS_PCIXSPD_MASK);
2377 sc->sc_bus_speed = 66;
2378 break;
2379 }
2380 } else
2381 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
2382 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
2383 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
2384 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
2385 }
2386
2387 /* clear interesting stat counters */
2388 CSR_READ(sc, WMREG_COLC);
2389 CSR_READ(sc, WMREG_RXERRC);
2390
2391 if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
2392 || (sc->sc_type >= WM_T_ICH8))
2393 sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2394 if (sc->sc_type >= WM_T_ICH8)
2395 sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2396
2397 /* Set PHY, NVM mutex related stuff */
2398 switch (sc->sc_type) {
2399 case WM_T_82542_2_0:
2400 case WM_T_82542_2_1:
2401 case WM_T_82543:
2402 case WM_T_82544:
2403 /* Microwire */
2404 sc->nvm.read = wm_nvm_read_uwire;
2405 sc->sc_nvm_wordsize = 64;
2406 sc->sc_nvm_addrbits = 6;
2407 break;
2408 case WM_T_82540:
2409 case WM_T_82545:
2410 case WM_T_82545_3:
2411 case WM_T_82546:
2412 case WM_T_82546_3:
2413 /* Microwire */
2414 sc->nvm.read = wm_nvm_read_uwire;
2415 reg = CSR_READ(sc, WMREG_EECD);
2416 if (reg & EECD_EE_SIZE) {
2417 sc->sc_nvm_wordsize = 256;
2418 sc->sc_nvm_addrbits = 8;
2419 } else {
2420 sc->sc_nvm_wordsize = 64;
2421 sc->sc_nvm_addrbits = 6;
2422 }
2423 sc->sc_flags |= WM_F_LOCK_EECD;
2424 sc->nvm.acquire = wm_get_eecd;
2425 sc->nvm.release = wm_put_eecd;
2426 break;
2427 case WM_T_82541:
2428 case WM_T_82541_2:
2429 case WM_T_82547:
2430 case WM_T_82547_2:
2431 reg = CSR_READ(sc, WMREG_EECD);
2432 /*
2433 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
2434 * on 8254[17], so set flags and functios before calling it.
2435 */
2436 sc->sc_flags |= WM_F_LOCK_EECD;
2437 sc->nvm.acquire = wm_get_eecd;
2438 sc->nvm.release = wm_put_eecd;
2439 if (reg & EECD_EE_TYPE) {
2440 /* SPI */
2441 sc->nvm.read = wm_nvm_read_spi;
2442 sc->sc_flags |= WM_F_EEPROM_SPI;
2443 wm_nvm_set_addrbits_size_eecd(sc);
2444 } else {
2445 /* Microwire */
2446 sc->nvm.read = wm_nvm_read_uwire;
2447 if ((reg & EECD_EE_ABITS) != 0) {
2448 sc->sc_nvm_wordsize = 256;
2449 sc->sc_nvm_addrbits = 8;
2450 } else {
2451 sc->sc_nvm_wordsize = 64;
2452 sc->sc_nvm_addrbits = 6;
2453 }
2454 }
2455 break;
2456 case WM_T_82571:
2457 case WM_T_82572:
2458 /* SPI */
2459 sc->nvm.read = wm_nvm_read_eerd;
2460 /* Not use WM_F_LOCK_EECD because we use EERD */
2461 sc->sc_flags |= WM_F_EEPROM_SPI;
2462 wm_nvm_set_addrbits_size_eecd(sc);
2463 sc->phy.acquire = wm_get_swsm_semaphore;
2464 sc->phy.release = wm_put_swsm_semaphore;
2465 sc->nvm.acquire = wm_get_nvm_82571;
2466 sc->nvm.release = wm_put_nvm_82571;
2467 break;
2468 case WM_T_82573:
2469 case WM_T_82574:
2470 case WM_T_82583:
2471 sc->nvm.read = wm_nvm_read_eerd;
2472 /* Not use WM_F_LOCK_EECD because we use EERD */
2473 if (sc->sc_type == WM_T_82573) {
2474 sc->phy.acquire = wm_get_swsm_semaphore;
2475 sc->phy.release = wm_put_swsm_semaphore;
2476 sc->nvm.acquire = wm_get_nvm_82571;
2477 sc->nvm.release = wm_put_nvm_82571;
2478 } else {
2479 /* Both PHY and NVM use the same semaphore. */
2480 sc->phy.acquire = sc->nvm.acquire
2481 = wm_get_swfwhw_semaphore;
2482 sc->phy.release = sc->nvm.release
2483 = wm_put_swfwhw_semaphore;
2484 }
2485 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2486 sc->sc_flags |= WM_F_EEPROM_FLASH;
2487 sc->sc_nvm_wordsize = 2048;
2488 } else {
2489 /* SPI */
2490 sc->sc_flags |= WM_F_EEPROM_SPI;
2491 wm_nvm_set_addrbits_size_eecd(sc);
2492 }
2493 break;
2494 case WM_T_82575:
2495 case WM_T_82576:
2496 case WM_T_82580:
2497 case WM_T_I350:
2498 case WM_T_I354:
2499 case WM_T_80003:
2500 /* SPI */
2501 sc->sc_flags |= WM_F_EEPROM_SPI;
2502 wm_nvm_set_addrbits_size_eecd(sc);
2503 if ((sc->sc_type == WM_T_80003)
2504 || (sc->sc_nvm_wordsize < (1 << 15))) {
2505 sc->nvm.read = wm_nvm_read_eerd;
2506 /* Don't use WM_F_LOCK_EECD because we use EERD */
2507 } else {
2508 sc->nvm.read = wm_nvm_read_spi;
2509 sc->sc_flags |= WM_F_LOCK_EECD;
2510 }
2511 sc->phy.acquire = wm_get_phy_82575;
2512 sc->phy.release = wm_put_phy_82575;
2513 sc->nvm.acquire = wm_get_nvm_80003;
2514 sc->nvm.release = wm_put_nvm_80003;
2515 break;
2516 case WM_T_ICH8:
2517 case WM_T_ICH9:
2518 case WM_T_ICH10:
2519 case WM_T_PCH:
2520 case WM_T_PCH2:
2521 case WM_T_PCH_LPT:
2522 sc->nvm.read = wm_nvm_read_ich8;
2523 /* FLASH */
2524 sc->sc_flags |= WM_F_EEPROM_FLASH;
2525 sc->sc_nvm_wordsize = 2048;
2526 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2527 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2528 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2529 aprint_error_dev(sc->sc_dev,
2530 "can't map FLASH registers\n");
2531 goto out;
2532 }
2533 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2534 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2535 ICH_FLASH_SECTOR_SIZE;
2536 sc->sc_ich8_flash_bank_size =
2537 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2538 sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2539 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2540 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2541 sc->sc_flashreg_offset = 0;
2542 sc->phy.acquire = wm_get_swflag_ich8lan;
2543 sc->phy.release = wm_put_swflag_ich8lan;
2544 sc->nvm.acquire = wm_get_nvm_ich8lan;
2545 sc->nvm.release = wm_put_nvm_ich8lan;
2546 break;
2547 case WM_T_PCH_SPT:
2548 case WM_T_PCH_CNP:
2549 sc->nvm.read = wm_nvm_read_spt;
2550 /* SPT has no GFPREG; flash registers mapped through BAR0 */
2551 sc->sc_flags |= WM_F_EEPROM_FLASH;
2552 sc->sc_flasht = sc->sc_st;
2553 sc->sc_flashh = sc->sc_sh;
2554 sc->sc_ich8_flash_base = 0;
2555 sc->sc_nvm_wordsize =
2556 (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2557 * NVM_SIZE_MULTIPLIER;
2558 /* It is size in bytes, we want words */
2559 sc->sc_nvm_wordsize /= 2;
2560 /* Assume 2 banks */
2561 sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2562 sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2563 sc->phy.acquire = wm_get_swflag_ich8lan;
2564 sc->phy.release = wm_put_swflag_ich8lan;
2565 sc->nvm.acquire = wm_get_nvm_ich8lan;
2566 sc->nvm.release = wm_put_nvm_ich8lan;
2567 break;
2568 case WM_T_I210:
2569 case WM_T_I211:
2570 /* Allow a single clear of the SW semaphore on I210 and newer*/
2571 sc->sc_flags |= WM_F_WA_I210_CLSEM;
2572 if (wm_nvm_flash_presence_i210(sc)) {
2573 sc->nvm.read = wm_nvm_read_eerd;
2574 /* Don't use WM_F_LOCK_EECD because we use EERD */
2575 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2576 wm_nvm_set_addrbits_size_eecd(sc);
2577 } else {
2578 sc->nvm.read = wm_nvm_read_invm;
2579 sc->sc_flags |= WM_F_EEPROM_INVM;
2580 sc->sc_nvm_wordsize = INVM_SIZE;
2581 }
2582 sc->phy.acquire = wm_get_phy_82575;
2583 sc->phy.release = wm_put_phy_82575;
2584 sc->nvm.acquire = wm_get_nvm_80003;
2585 sc->nvm.release = wm_put_nvm_80003;
2586 break;
2587 default:
2588 break;
2589 }
2590
2591 /* Ensure the SMBI bit is clear before first NVM or PHY access */
2592 switch (sc->sc_type) {
2593 case WM_T_82571:
2594 case WM_T_82572:
2595 reg = CSR_READ(sc, WMREG_SWSM2);
2596 if ((reg & SWSM2_LOCK) == 0) {
2597 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2598 force_clear_smbi = true;
2599 } else
2600 force_clear_smbi = false;
2601 break;
2602 case WM_T_82573:
2603 case WM_T_82574:
2604 case WM_T_82583:
2605 force_clear_smbi = true;
2606 break;
2607 default:
2608 force_clear_smbi = false;
2609 break;
2610 }
2611 if (force_clear_smbi) {
2612 reg = CSR_READ(sc, WMREG_SWSM);
2613 if ((reg & SWSM_SMBI) != 0)
2614 aprint_error_dev(sc->sc_dev,
2615 "Please update the Bootagent\n");
2616 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2617 }
2618
2619 /*
2620 * Defer printing the EEPROM type until after verifying the checksum
2621 * This allows the EEPROM type to be printed correctly in the case
2622 * that no EEPROM is attached.
2623 */
2624 /*
2625 * Validate the EEPROM checksum. If the checksum fails, flag
2626 * this for later, so we can fail future reads from the EEPROM.
2627 */
2628 if (wm_nvm_validate_checksum(sc)) {
2629 /*
2630 * Read twice again because some PCI-e parts fail the
2631 * first check due to the link being in sleep state.
2632 */
2633 if (wm_nvm_validate_checksum(sc))
2634 sc->sc_flags |= WM_F_EEPROM_INVALID;
2635 }
2636
2637 if (sc->sc_flags & WM_F_EEPROM_INVALID)
2638 aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2639 else {
2640 aprint_verbose_dev(sc->sc_dev, "%u words ",
2641 sc->sc_nvm_wordsize);
2642 if (sc->sc_flags & WM_F_EEPROM_INVM)
2643 aprint_verbose("iNVM");
2644 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2645 aprint_verbose("FLASH(HW)");
2646 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2647 aprint_verbose("FLASH");
2648 else {
2649 if (sc->sc_flags & WM_F_EEPROM_SPI)
2650 eetype = "SPI";
2651 else
2652 eetype = "MicroWire";
2653 aprint_verbose("(%d address bits) %s EEPROM",
2654 sc->sc_nvm_addrbits, eetype);
2655 }
2656 }
2657 wm_nvm_version(sc);
2658 aprint_verbose("\n");
2659
2660 /*
2661 * XXX The first call of wm_gmii_setup_phytype. The result might be
2662 * incorrect.
2663 */
2664 wm_gmii_setup_phytype(sc, 0, 0);
2665
2666 /* Check for WM_F_WOL on some chips before wm_reset() */
2667 switch (sc->sc_type) {
2668 case WM_T_ICH8:
2669 case WM_T_ICH9:
2670 case WM_T_ICH10:
2671 case WM_T_PCH:
2672 case WM_T_PCH2:
2673 case WM_T_PCH_LPT:
2674 case WM_T_PCH_SPT:
2675 case WM_T_PCH_CNP:
2676 apme_mask = WUC_APME;
2677 eeprom_data = CSR_READ(sc, WMREG_WUC);
2678 if ((eeprom_data & apme_mask) != 0)
2679 sc->sc_flags |= WM_F_WOL;
2680 break;
2681 default:
2682 break;
2683 }
2684
2685 /* Reset the chip to a known state. */
2686 wm_reset(sc);
2687
2688 /*
2689 * Check for I21[01] PLL workaround.
2690 *
2691 * Three cases:
2692 * a) Chip is I211.
2693 * b) Chip is I210 and it uses INVM (not FLASH).
2694 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
2695 */
2696 if (sc->sc_type == WM_T_I211)
2697 sc->sc_flags |= WM_F_PLL_WA_I210;
2698 if (sc->sc_type == WM_T_I210) {
2699 if (!wm_nvm_flash_presence_i210(sc))
2700 sc->sc_flags |= WM_F_PLL_WA_I210;
2701 else if ((sc->sc_nvm_ver_major < 3)
2702 || ((sc->sc_nvm_ver_major == 3)
2703 && (sc->sc_nvm_ver_minor < 25))) {
2704 aprint_verbose_dev(sc->sc_dev,
2705 "ROM image version %d.%d is older than 3.25\n",
2706 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2707 sc->sc_flags |= WM_F_PLL_WA_I210;
2708 }
2709 }
2710 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2711 wm_pll_workaround_i210(sc);
2712
2713 wm_get_wakeup(sc);
2714
2715 /* Non-AMT based hardware can now take control from firmware */
2716 if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2717 wm_get_hw_control(sc);
2718
2719 /*
2720 * Read the Ethernet address from the EEPROM, if not first found
2721 * in device properties.
2722 */
2723 ea = prop_dictionary_get(dict, "mac-address");
2724 if (ea != NULL) {
2725 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2726 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2727 memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
2728 } else {
2729 if (wm_read_mac_addr(sc, enaddr) != 0) {
2730 aprint_error_dev(sc->sc_dev,
2731 "unable to read Ethernet address\n");
2732 goto out;
2733 }
2734 }
2735
2736 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2737 ether_sprintf(enaddr));
2738
2739 /*
2740 * Read the config info from the EEPROM, and set up various
2741 * bits in the control registers based on their contents.
2742 */
2743 pn = prop_dictionary_get(dict, "i82543-cfg1");
2744 if (pn != NULL) {
2745 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2746 cfg1 = (uint16_t) prop_number_signed_value(pn);
2747 } else {
2748 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2749 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2750 goto out;
2751 }
2752 }
2753
2754 pn = prop_dictionary_get(dict, "i82543-cfg2");
2755 if (pn != NULL) {
2756 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2757 cfg2 = (uint16_t) prop_number_signed_value(pn);
2758 } else {
2759 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2760 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2761 goto out;
2762 }
2763 }
2764
2765 /* check for WM_F_WOL */
2766 switch (sc->sc_type) {
2767 case WM_T_82542_2_0:
2768 case WM_T_82542_2_1:
2769 case WM_T_82543:
2770 /* dummy? */
2771 eeprom_data = 0;
2772 apme_mask = NVM_CFG3_APME;
2773 break;
2774 case WM_T_82544:
2775 apme_mask = NVM_CFG2_82544_APM_EN;
2776 eeprom_data = cfg2;
2777 break;
2778 case WM_T_82546:
2779 case WM_T_82546_3:
2780 case WM_T_82571:
2781 case WM_T_82572:
2782 case WM_T_82573:
2783 case WM_T_82574:
2784 case WM_T_82583:
2785 case WM_T_80003:
2786 case WM_T_82575:
2787 case WM_T_82576:
2788 apme_mask = NVM_CFG3_APME;
2789 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2790 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2791 break;
2792 case WM_T_82580:
2793 case WM_T_I350:
2794 case WM_T_I354:
2795 case WM_T_I210:
2796 case WM_T_I211:
2797 apme_mask = NVM_CFG3_APME;
2798 wm_nvm_read(sc,
2799 NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2800 1, &eeprom_data);
2801 break;
2802 case WM_T_ICH8:
2803 case WM_T_ICH9:
2804 case WM_T_ICH10:
2805 case WM_T_PCH:
2806 case WM_T_PCH2:
2807 case WM_T_PCH_LPT:
2808 case WM_T_PCH_SPT:
2809 case WM_T_PCH_CNP:
2810 /* Already checked before wm_reset () */
2811 apme_mask = eeprom_data = 0;
2812 break;
2813 default: /* XXX 82540 */
2814 apme_mask = NVM_CFG3_APME;
2815 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2816 break;
2817 }
2818 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2819 if ((eeprom_data & apme_mask) != 0)
2820 sc->sc_flags |= WM_F_WOL;
2821
2822 /*
2823 * We have the eeprom settings, now apply the special cases
2824 * where the eeprom may be wrong or the board won't support
2825 * wake on lan on a particular port
2826 */
2827 switch (sc->sc_pcidevid) {
2828 case PCI_PRODUCT_INTEL_82546GB_PCIE:
2829 sc->sc_flags &= ~WM_F_WOL;
2830 break;
2831 case PCI_PRODUCT_INTEL_82546EB_FIBER:
2832 case PCI_PRODUCT_INTEL_82546GB_FIBER:
2833 /* Wake events only supported on port A for dual fiber
2834 * regardless of eeprom setting */
2835 if (sc->sc_funcid == 1)
2836 sc->sc_flags &= ~WM_F_WOL;
2837 break;
2838 case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
2839 /* If quad port adapter, disable WoL on all but port A */
2840 if (sc->sc_funcid != 0)
2841 sc->sc_flags &= ~WM_F_WOL;
2842 break;
2843 case PCI_PRODUCT_INTEL_82571EB_FIBER:
2844 /* Wake events only supported on port A for dual fiber
2845 * regardless of eeprom setting */
2846 if (sc->sc_funcid == 1)
2847 sc->sc_flags &= ~WM_F_WOL;
2848 break;
2849 case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
2850 case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
2851 case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
2852 /* If quad port adapter, disable WoL on all but port A */
2853 if (sc->sc_funcid != 0)
2854 sc->sc_flags &= ~WM_F_WOL;
2855 break;
2856 }
2857
2858 if (sc->sc_type >= WM_T_82575) {
2859 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2860 aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
2861 nvmword);
2862 if ((sc->sc_type == WM_T_82575) ||
2863 (sc->sc_type == WM_T_82576)) {
2864 /* Check NVM for autonegotiation */
2865 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
2866 != 0)
2867 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2868 }
2869 if ((sc->sc_type == WM_T_82575) ||
2870 (sc->sc_type == WM_T_I350)) {
2871 if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
2872 sc->sc_flags |= WM_F_MAS;
2873 }
2874 }
2875 }
2876
2877 /*
2878 * XXX need special handling for some multiple port cards
2879 * to disable a paticular port.
2880 */
2881
2882 if (sc->sc_type >= WM_T_82544) {
2883 pn = prop_dictionary_get(dict, "i82543-swdpin");
2884 if (pn != NULL) {
2885 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2886 swdpin = (uint16_t) prop_number_signed_value(pn);
2887 } else {
2888 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2889 aprint_error_dev(sc->sc_dev,
2890 "unable to read SWDPIN\n");
2891 goto out;
2892 }
2893 }
2894 }
2895
2896 if (cfg1 & NVM_CFG1_ILOS)
2897 sc->sc_ctrl |= CTRL_ILOS;
2898
2899 /*
2900 * XXX
2901 * This code isn't correct because pin 2 and 3 are located
2902 * in different position on newer chips. Check all datasheet.
2903 *
2904 * Until resolve this problem, check if a chip < 82580
2905 */
2906 if (sc->sc_type <= WM_T_82580) {
2907 if (sc->sc_type >= WM_T_82544) {
2908 sc->sc_ctrl |=
2909 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2910 CTRL_SWDPIO_SHIFT;
2911 sc->sc_ctrl |=
2912 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2913 CTRL_SWDPINS_SHIFT;
2914 } else {
2915 sc->sc_ctrl |=
2916 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2917 CTRL_SWDPIO_SHIFT;
2918 }
2919 }
2920
2921 if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
2922 wm_nvm_read(sc,
2923 NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2924 1, &nvmword);
2925 if (nvmword & NVM_CFG3_ILOS)
2926 sc->sc_ctrl |= CTRL_ILOS;
2927 }
2928
2929 #if 0
2930 if (sc->sc_type >= WM_T_82544) {
2931 if (cfg1 & NVM_CFG1_IPS0)
2932 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2933 if (cfg1 & NVM_CFG1_IPS1)
2934 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2935 sc->sc_ctrl_ext |=
2936 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2937 CTRL_EXT_SWDPIO_SHIFT;
2938 sc->sc_ctrl_ext |=
2939 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2940 CTRL_EXT_SWDPINS_SHIFT;
2941 } else {
2942 sc->sc_ctrl_ext |=
2943 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2944 CTRL_EXT_SWDPIO_SHIFT;
2945 }
2946 #endif
2947
2948 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2949 #if 0
2950 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2951 #endif
2952
2953 if (sc->sc_type == WM_T_PCH) {
2954 uint16_t val;
2955
2956 /* Save the NVM K1 bit setting */
2957 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2958
2959 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2960 sc->sc_nvm_k1_enabled = 1;
2961 else
2962 sc->sc_nvm_k1_enabled = 0;
2963 }
2964
2965 /* Determine if we're GMII, TBI, SERDES or SGMII mode */
2966 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2967 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2968 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2969 || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
2970 || sc->sc_type == WM_T_82573
2971 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2972 /* Copper only */
2973 } else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2974 || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
2975 || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
2976 || (sc->sc_type ==WM_T_I211)) {
2977 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2978 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2979 switch (link_mode) {
2980 case CTRL_EXT_LINK_MODE_1000KX:
2981 aprint_normal_dev(sc->sc_dev, "1000KX\n");
2982 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2983 break;
2984 case CTRL_EXT_LINK_MODE_SGMII:
2985 if (wm_sgmii_uses_mdio(sc)) {
2986 aprint_normal_dev(sc->sc_dev,
2987 "SGMII(MDIO)\n");
2988 sc->sc_flags |= WM_F_SGMII;
2989 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2990 break;
2991 }
2992 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2993 /*FALLTHROUGH*/
2994 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2995 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2996 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2997 if (link_mode
2998 == CTRL_EXT_LINK_MODE_SGMII) {
2999 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
3000 sc->sc_flags |= WM_F_SGMII;
3001 aprint_verbose_dev(sc->sc_dev,
3002 "SGMII\n");
3003 } else {
3004 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
3005 aprint_verbose_dev(sc->sc_dev,
3006 "SERDES\n");
3007 }
3008 break;
3009 }
3010 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
3011 aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
3012 else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
3013 aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
3014 sc->sc_flags |= WM_F_SGMII;
3015 }
3016 /* Do not change link mode for 100BaseFX */
3017 if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
3018 break;
3019
3020 /* Change current link mode setting */
3021 reg &= ~CTRL_EXT_LINK_MODE_MASK;
3022 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
3023 reg |= CTRL_EXT_LINK_MODE_SGMII;
3024 else
3025 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
3026 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3027 break;
3028 case CTRL_EXT_LINK_MODE_GMII:
3029 default:
3030 aprint_normal_dev(sc->sc_dev, "Copper\n");
3031 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
3032 break;
3033 }
3034
3035 reg &= ~CTRL_EXT_I2C_ENA;
3036 if ((sc->sc_flags & WM_F_SGMII) != 0)
3037 reg |= CTRL_EXT_I2C_ENA;
3038 else
3039 reg &= ~CTRL_EXT_I2C_ENA;
3040 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3041 if ((sc->sc_flags & WM_F_SGMII) != 0) {
3042 if (!wm_sgmii_uses_mdio(sc))
3043 wm_gmii_setup_phytype(sc, 0, 0);
3044 wm_reset_mdicnfg_82580(sc);
3045 }
3046 } else if (sc->sc_type < WM_T_82543 ||
3047 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
3048 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
3049 aprint_error_dev(sc->sc_dev,
3050 "WARNING: TBIMODE set on 1000BASE-T product!\n");
3051 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
3052 }
3053 } else {
3054 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
3055 aprint_error_dev(sc->sc_dev,
3056 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
3057 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
3058 }
3059 }
3060
3061 if (sc->sc_type >= WM_T_PCH2)
3062 sc->sc_flags |= WM_F_EEE;
3063 else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
3064 && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
3065 /* XXX: Need special handling for I354. (not yet) */
3066 if (sc->sc_type != WM_T_I354)
3067 sc->sc_flags |= WM_F_EEE;
3068 }
3069
3070 /*
3071 * The I350 has a bug where it always strips the CRC whether
3072 * asked to or not. So ask for stripped CRC here and cope in rxeof
3073 */
3074 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3075 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3076 sc->sc_flags |= WM_F_CRC_STRIP;
3077
3078 /* Set device properties (macflags) */
3079 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
3080
3081 if (sc->sc_flags != 0) {
3082 snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
3083 aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
3084 }
3085
3086 sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
3087
3088 /* Initialize the media structures accordingly. */
3089 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
3090 wm_gmii_mediainit(sc, wmp->wmp_product);
3091 else
3092 wm_tbi_mediainit(sc); /* All others */
3093
3094 ifp = &sc->sc_ethercom.ec_if;
3095 xname = device_xname(sc->sc_dev);
3096 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
3097 ifp->if_softc = sc;
3098 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3099 ifp->if_extflags = IFEF_MPSAFE;
3100 ifp->if_ioctl = wm_ioctl;
3101 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3102 ifp->if_start = wm_nq_start;
3103 /*
3104 * When the number of CPUs is one and the controller can use
3105 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
3106 * That is, wm(4) use two interrupts, one is used for Tx/Rx
3107 * and the other is used for link status changing.
3108 * In this situation, wm_nq_transmit() is disadvantageous
3109 * because of wm_select_txqueue() and pcq(9) overhead.
3110 */
3111 if (wm_is_using_multiqueue(sc))
3112 ifp->if_transmit = wm_nq_transmit;
3113 } else {
3114 ifp->if_start = wm_start;
3115 /*
3116 * wm_transmit() has the same disadvantages as wm_nq_transmit()
3117 * described above.
3118 */
3119 if (wm_is_using_multiqueue(sc))
3120 ifp->if_transmit = wm_transmit;
3121 }
3122 /* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
3123 ifp->if_init = wm_init;
3124 ifp->if_stop = wm_stop;
3125 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
3126 IFQ_SET_READY(&ifp->if_snd);
3127
3128 /* Check for jumbo frame */
3129 switch (sc->sc_type) {
3130 case WM_T_82573:
3131 /* XXX limited to 9234 if ASPM is disabled */
3132 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
3133 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
3134 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3135 break;
3136 case WM_T_82571:
3137 case WM_T_82572:
3138 case WM_T_82574:
3139 case WM_T_82583:
3140 case WM_T_82575:
3141 case WM_T_82576:
3142 case WM_T_82580:
3143 case WM_T_I350:
3144 case WM_T_I354:
3145 case WM_T_I210:
3146 case WM_T_I211:
3147 case WM_T_80003:
3148 case WM_T_ICH9:
3149 case WM_T_ICH10:
3150 case WM_T_PCH2: /* PCH2 supports 9K frame size */
3151 case WM_T_PCH_LPT:
3152 case WM_T_PCH_SPT:
3153 case WM_T_PCH_CNP:
3154 /* XXX limited to 9234 */
3155 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3156 break;
3157 case WM_T_PCH:
3158 /* XXX limited to 4096 */
3159 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3160 break;
3161 case WM_T_82542_2_0:
3162 case WM_T_82542_2_1:
3163 case WM_T_ICH8:
3164 /* No support for jumbo frame */
3165 break;
3166 default:
3167 /* ETHER_MAX_LEN_JUMBO */
3168 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3169 break;
3170 }
3171
3172 /* If we're a i82543 or greater, we can support VLANs. */
3173 if (sc->sc_type >= WM_T_82543) {
3174 sc->sc_ethercom.ec_capabilities |=
3175 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
3176 sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
3177 }
3178
3179 if ((sc->sc_flags & WM_F_EEE) != 0)
3180 sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
3181
3182 /*
3183 * We can perform TCPv4 and UDPv4 checksums in-bound. Only
3184 * on i82543 and later.
3185 */
3186 if (sc->sc_type >= WM_T_82543) {
3187 ifp->if_capabilities |=
3188 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
3189 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
3190 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
3191 IFCAP_CSUM_TCPv6_Tx |
3192 IFCAP_CSUM_UDPv6_Tx;
3193 }
3194
3195 /*
3196 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
3197 *
3198 * 82541GI (8086:1076) ... no
3199 * 82572EI (8086:10b9) ... yes
3200 */
3201 if (sc->sc_type >= WM_T_82571) {
3202 ifp->if_capabilities |=
3203 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
3204 }
3205
3206 /*
3207 * If we're a i82544 or greater (except i82547), we can do
3208 * TCP segmentation offload.
3209 */
3210 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
3211 ifp->if_capabilities |= IFCAP_TSOv4;
3212
3213 if (sc->sc_type >= WM_T_82571)
3214 ifp->if_capabilities |= IFCAP_TSOv6;
3215
3216 sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
3217 sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
3218 sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
3219 sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
3220
3221 /* Attach the interface. */
3222 if_initialize(ifp);
3223 sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
3224 ether_ifattach(ifp, enaddr);
3225 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
3226 if_register(ifp);
3227 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
3228 RND_FLAG_DEFAULT);
3229
3230 #ifdef WM_EVENT_COUNTERS
3231 /* Attach event counters. */
3232 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
3233 NULL, xname, "linkintr");
3234
3235 evcnt_attach_dynamic(&sc->sc_ev_crcerrs, EVCNT_TYPE_MISC,
3236 NULL, xname, "CRC Error");
3237 evcnt_attach_dynamic(&sc->sc_ev_symerrc, EVCNT_TYPE_MISC,
3238 NULL, xname, "Symbol Error");
3239 evcnt_attach_dynamic(&sc->sc_ev_mpc, EVCNT_TYPE_MISC,
3240 NULL, xname, "Missed Packets");
3241 evcnt_attach_dynamic(&sc->sc_ev_colc, EVCNT_TYPE_MISC,
3242 NULL, xname, "Collision");
3243 evcnt_attach_dynamic(&sc->sc_ev_sec, EVCNT_TYPE_MISC,
3244 NULL, xname, "Sequence Error");
3245 evcnt_attach_dynamic(&sc->sc_ev_rlec, EVCNT_TYPE_MISC,
3246 NULL, xname, "Receive Length Error");
3247
3248 if (sc->sc_type >= WM_T_82543) {
3249 evcnt_attach_dynamic(&sc->sc_ev_algnerrc, EVCNT_TYPE_MISC,
3250 NULL, xname, "Alignment Error");
3251 evcnt_attach_dynamic(&sc->sc_ev_rxerrc, EVCNT_TYPE_MISC,
3252 NULL, xname, "Receive Error");
3253 /* XXX Does 82575 have HTDPMC? */
3254 if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
3255 evcnt_attach_dynamic(&sc->sc_ev_cexterr,
3256 EVCNT_TYPE_MISC, NULL, xname,
3257 "Carrier Extension Error");
3258 else
3259 evcnt_attach_dynamic(&sc->sc_ev_htdpmc,
3260 EVCNT_TYPE_MISC, NULL, xname,
3261 "Host Transmit Discarded Packets by MAC");
3262
3263 evcnt_attach_dynamic(&sc->sc_ev_tncrs, EVCNT_TYPE_MISC,
3264 NULL, xname, "Tx with No CRS");
3265 evcnt_attach_dynamic(&sc->sc_ev_tsctc, EVCNT_TYPE_MISC,
3266 NULL, xname, "TCP Segmentation Context Tx");
3267 if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
3268 evcnt_attach_dynamic(&sc->sc_ev_tsctfc,
3269 EVCNT_TYPE_MISC, NULL, xname,
3270 "TCP Segmentation Context Tx Fail");
3271 else {
3272 /* XXX Is the circuit breaker only for 82576? */
3273 evcnt_attach_dynamic(&sc->sc_ev_cbrdpc,
3274 EVCNT_TYPE_MISC, NULL, xname,
3275 "Circuit Breaker Rx Dropped Packet");
3276 evcnt_attach_dynamic(&sc->sc_ev_cbrmpc,
3277 EVCNT_TYPE_MISC, NULL, xname,
3278 "Circuit Breaker Rx Manageability Packet");
3279 }
3280 }
3281
3282 if (sc->sc_type >= WM_T_82542_2_1) {
3283 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
3284 NULL, xname, "tx_xoff");
3285 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
3286 NULL, xname, "tx_xon");
3287 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
3288 NULL, xname, "rx_xoff");
3289 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
3290 NULL, xname, "rx_xon");
3291 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
3292 NULL, xname, "rx_macctl");
3293 }
3294
3295 evcnt_attach_dynamic(&sc->sc_ev_scc, EVCNT_TYPE_MISC,
3296 NULL, xname, "Single Collision");
3297 evcnt_attach_dynamic(&sc->sc_ev_ecol, EVCNT_TYPE_MISC,
3298 NULL, xname, "Excessive Collisions");
3299 evcnt_attach_dynamic(&sc->sc_ev_mcc, EVCNT_TYPE_MISC,
3300 NULL, xname, "Multiple Collision");
3301 evcnt_attach_dynamic(&sc->sc_ev_latecol, EVCNT_TYPE_MISC,
3302 NULL, xname, "Late Collisions");
3303
3304 if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
3305 evcnt_attach_dynamic(&sc->sc_ev_cbtmpc, EVCNT_TYPE_MISC,
3306 NULL, xname, "Circuit Breaker Tx Manageability Packet");
3307
3308 evcnt_attach_dynamic(&sc->sc_ev_dc, EVCNT_TYPE_MISC,
3309 NULL, xname, "Defer");
3310 evcnt_attach_dynamic(&sc->sc_ev_prc64, EVCNT_TYPE_MISC,
3311 NULL, xname, "Packets Rx (64 bytes)");
3312 evcnt_attach_dynamic(&sc->sc_ev_prc127, EVCNT_TYPE_MISC,
3313 NULL, xname, "Packets Rx (65-127 bytes)");
3314 evcnt_attach_dynamic(&sc->sc_ev_prc255, EVCNT_TYPE_MISC,
3315 NULL, xname, "Packets Rx (128-255 bytes)");
3316 evcnt_attach_dynamic(&sc->sc_ev_prc511, EVCNT_TYPE_MISC,
3317 NULL, xname, "Packets Rx (255-511 bytes)");
3318 evcnt_attach_dynamic(&sc->sc_ev_prc1023, EVCNT_TYPE_MISC,
3319 NULL, xname, "Packets Rx (512-1023 bytes)");
3320 evcnt_attach_dynamic(&sc->sc_ev_prc1522, EVCNT_TYPE_MISC,
3321 NULL, xname, "Packets Rx (1024-1522 bytes)");
3322 evcnt_attach_dynamic(&sc->sc_ev_gprc, EVCNT_TYPE_MISC,
3323 NULL, xname, "Good Packets Rx");
3324 evcnt_attach_dynamic(&sc->sc_ev_bprc, EVCNT_TYPE_MISC,
3325 NULL, xname, "Broadcast Packets Rx");
3326 evcnt_attach_dynamic(&sc->sc_ev_mprc, EVCNT_TYPE_MISC,
3327 NULL, xname, "Multicast Packets Rx");
3328 evcnt_attach_dynamic(&sc->sc_ev_gptc, EVCNT_TYPE_MISC,
3329 NULL, xname, "Good Packets Tx");
3330 evcnt_attach_dynamic(&sc->sc_ev_gorc, EVCNT_TYPE_MISC,
3331 NULL, xname, "Good Octets Rx");
3332 evcnt_attach_dynamic(&sc->sc_ev_gotc, EVCNT_TYPE_MISC,
3333 NULL, xname, "Good Octets Tx");
3334 evcnt_attach_dynamic(&sc->sc_ev_rnbc, EVCNT_TYPE_MISC,
3335 NULL, xname, "Rx No Buffers");
3336 evcnt_attach_dynamic(&sc->sc_ev_ruc, EVCNT_TYPE_MISC,
3337 NULL, xname, "Rx Undersize");
3338 evcnt_attach_dynamic(&sc->sc_ev_rfc, EVCNT_TYPE_MISC,
3339 NULL, xname, "Rx Fragment");
3340 evcnt_attach_dynamic(&sc->sc_ev_roc, EVCNT_TYPE_MISC,
3341 NULL, xname, "Rx Oversize");
3342 evcnt_attach_dynamic(&sc->sc_ev_rjc, EVCNT_TYPE_MISC,
3343 NULL, xname, "Rx Jabber");
3344 if (sc->sc_type >= WM_T_82540) {
3345 evcnt_attach_dynamic(&sc->sc_ev_mgtprc, EVCNT_TYPE_MISC,
3346 NULL, xname, "Management Packets RX");
3347 evcnt_attach_dynamic(&sc->sc_ev_mgtpdc, EVCNT_TYPE_MISC,
3348 NULL, xname, "Management Packets Dropped");
3349 evcnt_attach_dynamic(&sc->sc_ev_mgtptc, EVCNT_TYPE_MISC,
3350 NULL, xname, "Management Packets TX");
3351 }
3352 evcnt_attach_dynamic(&sc->sc_ev_tor, EVCNT_TYPE_MISC,
3353 NULL, xname, "Total Octets Rx");
3354 evcnt_attach_dynamic(&sc->sc_ev_tot, EVCNT_TYPE_MISC,
3355 NULL, xname, "Total Octets Tx");
3356 evcnt_attach_dynamic(&sc->sc_ev_tpr, EVCNT_TYPE_MISC,
3357 NULL, xname, "Total Packets Rx");
3358 evcnt_attach_dynamic(&sc->sc_ev_tpt, EVCNT_TYPE_MISC,
3359 NULL, xname, "Total Packets Tx");
3360 evcnt_attach_dynamic(&sc->sc_ev_ptc64, EVCNT_TYPE_MISC,
3361 NULL, xname, "Packets Tx (64 bytes)");
3362 evcnt_attach_dynamic(&sc->sc_ev_ptc127, EVCNT_TYPE_MISC,
3363 NULL, xname, "Packets Tx (65-127 bytes)");
3364 evcnt_attach_dynamic(&sc->sc_ev_ptc255, EVCNT_TYPE_MISC,
3365 NULL, xname, "Packets Tx (128-255 bytes)");
3366 evcnt_attach_dynamic(&sc->sc_ev_ptc511, EVCNT_TYPE_MISC,
3367 NULL, xname, "Packets Tx (256-511 bytes)");
3368 evcnt_attach_dynamic(&sc->sc_ev_ptc1023, EVCNT_TYPE_MISC,
3369 NULL, xname, "Packets Tx (512-1023 bytes)");
3370 evcnt_attach_dynamic(&sc->sc_ev_ptc1522, EVCNT_TYPE_MISC,
3371 NULL, xname, "Packets Tx (1024-1522 Bytes)");
3372 evcnt_attach_dynamic(&sc->sc_ev_mptc, EVCNT_TYPE_MISC,
3373 NULL, xname, "Multicast Packets Tx");
3374 evcnt_attach_dynamic(&sc->sc_ev_bptc, EVCNT_TYPE_MISC,
3375 NULL, xname, "Broadcast Packets Tx");
3376 evcnt_attach_dynamic(&sc->sc_ev_iac, EVCNT_TYPE_MISC,
3377 NULL, xname, "Interrupt Assertion");
3378 if (sc->sc_type < WM_T_82575) {
3379 evcnt_attach_dynamic(&sc->sc_ev_icrxptc, EVCNT_TYPE_MISC,
3380 NULL, xname, "Intr. Cause Rx Pkt Timer Expire");
3381 evcnt_attach_dynamic(&sc->sc_ev_icrxatc, EVCNT_TYPE_MISC,
3382 NULL, xname, "Intr. Cause Rx Abs Timer Expire");
3383 evcnt_attach_dynamic(&sc->sc_ev_ictxptc, EVCNT_TYPE_MISC,
3384 NULL, xname, "Intr. Cause Tx Pkt Timer Expire");
3385 evcnt_attach_dynamic(&sc->sc_ev_ictxact, EVCNT_TYPE_MISC,
3386 NULL, xname, "Intr. Cause Tx Abs Timer Expire");
3387 evcnt_attach_dynamic(&sc->sc_ev_ictxqec, EVCNT_TYPE_MISC,
3388 NULL, xname, "Intr. Cause Tx Queue Empty");
3389 evcnt_attach_dynamic(&sc->sc_ev_ictxqmtc, EVCNT_TYPE_MISC,
3390 NULL, xname, "Intr. Cause Tx Queue Min Thresh");
3391 evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC,
3392 NULL, xname, "Intr. Cause Rx Desc Min Thresh");
3393
3394 /* XXX 82575 document says it has ICRXOC. Is that right? */
3395 evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC,
3396 NULL, xname, "Interrupt Cause Receiver Overrun");
3397 } else if (!WM_IS_ICHPCH(sc)) {
3398 /*
3399 * For 82575 and newer.
3400 *
3401 * On 80003, ICHs and PCHs, it seems all of the following
3402 * registers are zero.
3403 */
3404 evcnt_attach_dynamic(&sc->sc_ev_rpthc, EVCNT_TYPE_MISC,
3405 NULL, xname, "Rx Packets To Host");
3406 evcnt_attach_dynamic(&sc->sc_ev_debug1, EVCNT_TYPE_MISC,
3407 NULL, xname, "Debug Counter 1");
3408 evcnt_attach_dynamic(&sc->sc_ev_debug2, EVCNT_TYPE_MISC,
3409 NULL, xname, "Debug Counter 2");
3410 evcnt_attach_dynamic(&sc->sc_ev_debug3, EVCNT_TYPE_MISC,
3411 NULL, xname, "Debug Counter 3");
3412
3413 /*
3414 * 82575 datasheet says 0x4118 is for TXQEC(Tx Queue Empty).
3415 * I think it's wrong. The real count I observed is the same
3416 * as GPTC(Good Packets Tx) and TPT(Total Packets Tx).
3417 * It's HGPTC(Host Good Packets Tx) which is described in
3418 * 82576's datasheet.
3419 */
3420 evcnt_attach_dynamic(&sc->sc_ev_hgptc, EVCNT_TYPE_MISC,
3421 NULL, xname, "Host Good Packets TX");
3422
3423 evcnt_attach_dynamic(&sc->sc_ev_debug4, EVCNT_TYPE_MISC,
3424 NULL, xname, "Debug Counter 4");
3425 evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC,
3426 NULL, xname, "Rx Desc Min Thresh");
3427 /* XXX Is the circuit breaker only for 82576? */
3428 evcnt_attach_dynamic(&sc->sc_ev_htcbdpc, EVCNT_TYPE_MISC,
3429 NULL, xname, "Host Tx Circuit Breaker Dropped Packets");
3430
3431 evcnt_attach_dynamic(&sc->sc_ev_hgorc, EVCNT_TYPE_MISC,
3432 NULL, xname, "Host Good Octets Rx");
3433 evcnt_attach_dynamic(&sc->sc_ev_hgotc, EVCNT_TYPE_MISC,
3434 NULL, xname, "Host Good Octets Tx");
3435 evcnt_attach_dynamic(&sc->sc_ev_lenerrs, EVCNT_TYPE_MISC,
3436 NULL, xname, "Length Errors");
3437 }
3438 if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
3439 evcnt_attach_dynamic(&sc->sc_ev_tlpic, EVCNT_TYPE_MISC,
3440 NULL, xname, "EEE Tx LPI");
3441 evcnt_attach_dynamic(&sc->sc_ev_rlpic, EVCNT_TYPE_MISC,
3442 NULL, xname, "EEE Rx LPI");
3443 evcnt_attach_dynamic(&sc->sc_ev_b2ogprc, EVCNT_TYPE_MISC,
3444 NULL, xname, "BMC2OS Packets received by host");
3445 evcnt_attach_dynamic(&sc->sc_ev_o2bspc, EVCNT_TYPE_MISC,
3446 NULL, xname, "OS2BMC Packets transmitted by host");
3447 evcnt_attach_dynamic(&sc->sc_ev_b2ospc, EVCNT_TYPE_MISC,
3448 NULL, xname, "BMC2OS Packets sent by BMC");
3449 evcnt_attach_dynamic(&sc->sc_ev_o2bgptc, EVCNT_TYPE_MISC,
3450 NULL, xname, "OS2BMC Packets received by BMC");
3451 evcnt_attach_dynamic(&sc->sc_ev_scvpc, EVCNT_TYPE_MISC,
3452 NULL, xname, "SerDes/SGMII Code Violation Packet");
3453 evcnt_attach_dynamic(&sc->sc_ev_hrmpc, EVCNT_TYPE_MISC,
3454 NULL, xname, "Header Redirection Missed Packet");
3455 }
3456 #endif /* WM_EVENT_COUNTERS */
3457
3458 sc->sc_txrx_use_workqueue = false;
3459
3460 if (wm_phy_need_linkdown_discard(sc)) {
3461 DPRINTF(sc, WM_DEBUG_LINK,
3462 ("%s: %s: Set linkdown discard flag\n",
3463 device_xname(sc->sc_dev), __func__));
3464 wm_set_linkdown_discard(sc);
3465 }
3466
3467 wm_init_sysctls(sc);
3468
3469 if (pmf_device_register(self, wm_suspend, wm_resume))
3470 pmf_class_network_register(self, ifp);
3471 else
3472 aprint_error_dev(self, "couldn't establish power handler\n");
3473
3474 sc->sc_flags |= WM_F_ATTACHED;
3475 out:
3476 return;
3477 }
3478
3479 /* The detach function (ca_detach) */
3480 static int
3481 wm_detach(device_t self, int flags __unused)
3482 {
3483 struct wm_softc *sc = device_private(self);
3484 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3485 int i;
3486
3487 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
3488 return 0;
3489
3490 /* Stop the interface. Callouts are stopped in it. */
3491 IFNET_LOCK(ifp);
3492 sc->sc_dying = true;
3493 wm_stop(ifp, 1);
3494 IFNET_UNLOCK(ifp);
3495
3496 pmf_device_deregister(self);
3497
3498 sysctl_teardown(&sc->sc_sysctllog);
3499
3500 #ifdef WM_EVENT_COUNTERS
3501 evcnt_detach(&sc->sc_ev_linkintr);
3502
3503 evcnt_detach(&sc->sc_ev_crcerrs);
3504 evcnt_detach(&sc->sc_ev_symerrc);
3505 evcnt_detach(&sc->sc_ev_mpc);
3506 evcnt_detach(&sc->sc_ev_colc);
3507 evcnt_detach(&sc->sc_ev_sec);
3508 evcnt_detach(&sc->sc_ev_rlec);
3509
3510 if (sc->sc_type >= WM_T_82543) {
3511 evcnt_detach(&sc->sc_ev_algnerrc);
3512 evcnt_detach(&sc->sc_ev_rxerrc);
3513 if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
3514 evcnt_detach(&sc->sc_ev_cexterr);
3515 else
3516 evcnt_detach(&sc->sc_ev_htdpmc);
3517
3518 evcnt_detach(&sc->sc_ev_tncrs);
3519 evcnt_detach(&sc->sc_ev_tsctc);
3520 if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
3521 evcnt_detach(&sc->sc_ev_tsctfc);
3522 else {
3523 evcnt_detach(&sc->sc_ev_cbrdpc);
3524 evcnt_detach(&sc->sc_ev_cbrmpc);
3525 }
3526 }
3527
3528 if (sc->sc_type >= WM_T_82542_2_1) {
3529 evcnt_detach(&sc->sc_ev_tx_xoff);
3530 evcnt_detach(&sc->sc_ev_tx_xon);
3531 evcnt_detach(&sc->sc_ev_rx_xoff);
3532 evcnt_detach(&sc->sc_ev_rx_xon);
3533 evcnt_detach(&sc->sc_ev_rx_macctl);
3534 }
3535
3536 evcnt_detach(&sc->sc_ev_scc);
3537 evcnt_detach(&sc->sc_ev_ecol);
3538 evcnt_detach(&sc->sc_ev_mcc);
3539 evcnt_detach(&sc->sc_ev_latecol);
3540
3541 if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
3542 evcnt_detach(&sc->sc_ev_cbtmpc);
3543
3544 evcnt_detach(&sc->sc_ev_dc);
3545 evcnt_detach(&sc->sc_ev_prc64);
3546 evcnt_detach(&sc->sc_ev_prc127);
3547 evcnt_detach(&sc->sc_ev_prc255);
3548 evcnt_detach(&sc->sc_ev_prc511);
3549 evcnt_detach(&sc->sc_ev_prc1023);
3550 evcnt_detach(&sc->sc_ev_prc1522);
3551 evcnt_detach(&sc->sc_ev_gprc);
3552 evcnt_detach(&sc->sc_ev_bprc);
3553 evcnt_detach(&sc->sc_ev_mprc);
3554 evcnt_detach(&sc->sc_ev_gptc);
3555 evcnt_detach(&sc->sc_ev_gorc);
3556 evcnt_detach(&sc->sc_ev_gotc);
3557 evcnt_detach(&sc->sc_ev_rnbc);
3558 evcnt_detach(&sc->sc_ev_ruc);
3559 evcnt_detach(&sc->sc_ev_rfc);
3560 evcnt_detach(&sc->sc_ev_roc);
3561 evcnt_detach(&sc->sc_ev_rjc);
3562 if (sc->sc_type >= WM_T_82540) {
3563 evcnt_detach(&sc->sc_ev_mgtprc);
3564 evcnt_detach(&sc->sc_ev_mgtpdc);
3565 evcnt_detach(&sc->sc_ev_mgtptc);
3566 }
3567 evcnt_detach(&sc->sc_ev_tor);
3568 evcnt_detach(&sc->sc_ev_tot);
3569 evcnt_detach(&sc->sc_ev_tpr);
3570 evcnt_detach(&sc->sc_ev_tpt);
3571 evcnt_detach(&sc->sc_ev_ptc64);
3572 evcnt_detach(&sc->sc_ev_ptc127);
3573 evcnt_detach(&sc->sc_ev_ptc255);
3574 evcnt_detach(&sc->sc_ev_ptc511);
3575 evcnt_detach(&sc->sc_ev_ptc1023);
3576 evcnt_detach(&sc->sc_ev_ptc1522);
3577 evcnt_detach(&sc->sc_ev_mptc);
3578 evcnt_detach(&sc->sc_ev_bptc);
3579 evcnt_detach(&sc->sc_ev_iac);
3580 if (sc->sc_type < WM_T_82575) {
3581 evcnt_detach(&sc->sc_ev_icrxptc);
3582 evcnt_detach(&sc->sc_ev_icrxatc);
3583 evcnt_detach(&sc->sc_ev_ictxptc);
3584 evcnt_detach(&sc->sc_ev_ictxact);
3585 evcnt_detach(&sc->sc_ev_ictxqec);
3586 evcnt_detach(&sc->sc_ev_ictxqmtc);
3587 evcnt_detach(&sc->sc_ev_rxdmtc);
3588 evcnt_detach(&sc->sc_ev_icrxoc);
3589 } else if (!WM_IS_ICHPCH(sc)) {
3590 evcnt_detach(&sc->sc_ev_rpthc);
3591 evcnt_detach(&sc->sc_ev_debug1);
3592 evcnt_detach(&sc->sc_ev_debug2);
3593 evcnt_detach(&sc->sc_ev_debug3);
3594 evcnt_detach(&sc->sc_ev_hgptc);
3595 evcnt_detach(&sc->sc_ev_debug4);
3596 evcnt_detach(&sc->sc_ev_rxdmtc);
3597 evcnt_detach(&sc->sc_ev_htcbdpc);
3598
3599 evcnt_detach(&sc->sc_ev_hgorc);
3600 evcnt_detach(&sc->sc_ev_hgotc);
3601 evcnt_detach(&sc->sc_ev_lenerrs);
3602 }
3603 if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
3604 evcnt_detach(&sc->sc_ev_tlpic);
3605 evcnt_detach(&sc->sc_ev_rlpic);
3606 evcnt_detach(&sc->sc_ev_b2ogprc);
3607 evcnt_detach(&sc->sc_ev_o2bspc);
3608 evcnt_detach(&sc->sc_ev_b2ospc);
3609 evcnt_detach(&sc->sc_ev_o2bgptc);
3610 evcnt_detach(&sc->sc_ev_scvpc);
3611 evcnt_detach(&sc->sc_ev_hrmpc);
3612 }
3613 #endif /* WM_EVENT_COUNTERS */
3614
3615 rnd_detach_source(&sc->rnd_source);
3616
3617 /* Tell the firmware about the release */
3618 mutex_enter(sc->sc_core_lock);
3619 wm_release_manageability(sc);
3620 wm_release_hw_control(sc);
3621 wm_enable_wakeup(sc);
3622 mutex_exit(sc->sc_core_lock);
3623
3624 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
3625
3626 ether_ifdetach(ifp);
3627 if_detach(ifp);
3628 if_percpuq_destroy(sc->sc_ipq);
3629
3630 /* Delete all remaining media. */
3631 ifmedia_fini(&sc->sc_mii.mii_media);
3632
3633 /* Unload RX dmamaps and free mbufs */
3634 for (i = 0; i < sc->sc_nqueues; i++) {
3635 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
3636 mutex_enter(rxq->rxq_lock);
3637 wm_rxdrain(rxq);
3638 mutex_exit(rxq->rxq_lock);
3639 }
3640 /* Must unlock here */
3641
3642 /* Disestablish the interrupt handler */
3643 for (i = 0; i < sc->sc_nintrs; i++) {
3644 if (sc->sc_ihs[i] != NULL) {
3645 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
3646 sc->sc_ihs[i] = NULL;
3647 }
3648 }
3649 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
3650
3651 /* wm_stop() ensured that the workqueues are stopped. */
3652 workqueue_destroy(sc->sc_queue_wq);
3653 workqueue_destroy(sc->sc_reset_wq);
3654
3655 for (i = 0; i < sc->sc_nqueues; i++)
3656 softint_disestablish(sc->sc_queue[i].wmq_si);
3657
3658 wm_free_txrx_queues(sc);
3659
3660 /* Unmap the registers */
3661 if (sc->sc_ss) {
3662 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
3663 sc->sc_ss = 0;
3664 }
3665 if (sc->sc_ios) {
3666 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
3667 sc->sc_ios = 0;
3668 }
3669 if (sc->sc_flashs) {
3670 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
3671 sc->sc_flashs = 0;
3672 }
3673
3674 if (sc->sc_core_lock)
3675 mutex_obj_free(sc->sc_core_lock);
3676 if (sc->sc_ich_phymtx)
3677 mutex_obj_free(sc->sc_ich_phymtx);
3678 if (sc->sc_ich_nvmmtx)
3679 mutex_obj_free(sc->sc_ich_nvmmtx);
3680
3681 return 0;
3682 }
3683
3684 static bool
3685 wm_suspend(device_t self, const pmf_qual_t *qual)
3686 {
3687 struct wm_softc *sc = device_private(self);
3688
3689 wm_release_manageability(sc);
3690 wm_release_hw_control(sc);
3691 wm_enable_wakeup(sc);
3692
3693 return true;
3694 }
3695
3696 static bool
3697 wm_resume(device_t self, const pmf_qual_t *qual)
3698 {
3699 struct wm_softc *sc = device_private(self);
3700 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3701 pcireg_t reg;
3702 char buf[256];
3703
3704 reg = CSR_READ(sc, WMREG_WUS);
3705 if (reg != 0) {
3706 snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
3707 device_printf(sc->sc_dev, "wakeup status %s\n", buf);
3708 CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
3709 }
3710
3711 if (sc->sc_type >= WM_T_PCH2)
3712 wm_resume_workarounds_pchlan(sc);
3713 IFNET_LOCK(ifp);
3714 if ((ifp->if_flags & IFF_UP) == 0) {
3715 /* >= PCH_SPT hardware workaround before reset. */
3716 if (sc->sc_type >= WM_T_PCH_SPT)
3717 wm_flush_desc_rings(sc);
3718
3719 wm_reset(sc);
3720 /* Non-AMT based hardware can now take control from firmware */
3721 if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
3722 wm_get_hw_control(sc);
3723 wm_init_manageability(sc);
3724 } else {
3725 /*
3726 * We called pmf_class_network_register(), so if_init() is
3727 * automatically called when IFF_UP. wm_reset(),
3728 * wm_get_hw_control() and wm_init_manageability() are called
3729 * via wm_init().
3730 */
3731 }
3732 IFNET_UNLOCK(ifp);
3733
3734 return true;
3735 }
3736
3737 /*
3738 * wm_watchdog:
3739 *
3740 * Watchdog checker.
3741 */
3742 static bool
3743 wm_watchdog(struct ifnet *ifp)
3744 {
3745 int qid;
3746 struct wm_softc *sc = ifp->if_softc;
3747 uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
3748
3749 for (qid = 0; qid < sc->sc_nqueues; qid++) {
3750 struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
3751
3752 wm_watchdog_txq(ifp, txq, &hang_queue);
3753 }
3754
3755 #ifdef WM_DEBUG
3756 if (sc->sc_trigger_reset) {
3757 /* debug operation, no need for atomicity or reliability */
3758 sc->sc_trigger_reset = 0;
3759 hang_queue++;
3760 }
3761 #endif
3762
3763 if (hang_queue == 0)
3764 return true;
3765
3766 if (atomic_swap_uint(&sc->sc_reset_pending, 1) == 0)
3767 workqueue_enqueue(sc->sc_reset_wq, &sc->sc_reset_work, NULL);
3768
3769 return false;
3770 }
3771
3772 /*
3773 * Perform an interface watchdog reset.
3774 */
3775 static void
3776 wm_handle_reset_work(struct work *work, void *arg)
3777 {
3778 struct wm_softc * const sc = arg;
3779 struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
3780
3781 /* Don't want ioctl operations to happen */
3782 IFNET_LOCK(ifp);
3783
3784 /* reset the interface. */
3785 wm_init(ifp);
3786
3787 IFNET_UNLOCK(ifp);
3788
3789 /*
3790 * There are still some upper layer processing which call
3791 * ifp->if_start(). e.g. ALTQ or one CPU system
3792 */
3793 /* Try to get more packets going. */
3794 ifp->if_start(ifp);
3795
3796 atomic_store_relaxed(&sc->sc_reset_pending, 0);
3797 }
3798
3799
3800 static void
3801 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
3802 {
3803
3804 mutex_enter(txq->txq_lock);
3805 if (txq->txq_sending &&
3806 time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
3807 wm_watchdog_txq_locked(ifp, txq, hang);
3808
3809 mutex_exit(txq->txq_lock);
3810 }
3811
3812 static void
3813 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
3814 uint16_t *hang)
3815 {
3816 struct wm_softc *sc = ifp->if_softc;
3817 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
3818
3819 KASSERT(mutex_owned(txq->txq_lock));
3820
3821 /*
3822 * Since we're using delayed interrupts, sweep up
3823 * before we report an error.
3824 */
3825 wm_txeof(txq, UINT_MAX);
3826
3827 if (txq->txq_sending)
3828 *hang |= __BIT(wmq->wmq_id);
3829
3830 if (txq->txq_free == WM_NTXDESC(txq)) {
3831 log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
3832 device_xname(sc->sc_dev));
3833 } else {
3834 #ifdef WM_DEBUG
3835 int i, j;
3836 struct wm_txsoft *txs;
3837 #endif
3838 log(LOG_ERR,
3839 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3840 device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
3841 txq->txq_next);
3842 if_statinc(ifp, if_oerrors);
3843 #ifdef WM_DEBUG
3844 for (i = txq->txq_sdirty; i != txq->txq_snext;
3845 i = WM_NEXTTXS(txq, i)) {
3846 txs = &txq->txq_soft[i];
3847 printf("txs %d tx %d -> %d\n",
3848 i, txs->txs_firstdesc, txs->txs_lastdesc);
3849 for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
3850 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3851 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3852 txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
3853 printf("\t %#08x%08x\n",
3854 txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
3855 txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
3856 } else {
3857 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3858 (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
3859 txq->txq_descs[j].wtx_addr.wa_low);
3860 printf("\t %#04x%02x%02x%08x\n",
3861 txq->txq_descs[j].wtx_fields.wtxu_vlan,
3862 txq->txq_descs[j].wtx_fields.wtxu_options,
3863 txq->txq_descs[j].wtx_fields.wtxu_status,
3864 txq->txq_descs[j].wtx_cmdlen);
3865 }
3866 if (j == txs->txs_lastdesc)
3867 break;
3868 }
3869 }
3870 #endif
3871 }
3872 }
3873
3874 /*
3875 * wm_tick:
3876 *
3877 * One second timer, used to check link status, sweep up
3878 * completed transmit jobs, etc.
3879 */
3880 static void
3881 wm_tick(void *arg)
3882 {
3883 struct wm_softc *sc = arg;
3884 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3885
3886 mutex_enter(sc->sc_core_lock);
3887
3888 if (sc->sc_core_stopping) {
3889 mutex_exit(sc->sc_core_lock);
3890 return;
3891 }
3892
3893 wm_update_stats(sc);
3894
3895 if (sc->sc_flags & WM_F_HAS_MII)
3896 mii_tick(&sc->sc_mii);
3897 else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
3898 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3899 wm_serdes_tick(sc);
3900 else
3901 wm_tbi_tick(sc);
3902
3903 mutex_exit(sc->sc_core_lock);
3904
3905 if (wm_watchdog(ifp))
3906 callout_schedule(&sc->sc_tick_ch, hz);
3907 }
3908
3909 static int
3910 wm_ifflags_cb(struct ethercom *ec)
3911 {
3912 struct ifnet *ifp = &ec->ec_if;
3913 struct wm_softc *sc = ifp->if_softc;
3914 u_short iffchange;
3915 int ecchange;
3916 bool needreset = false;
3917 int rc = 0;
3918
3919 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
3920 device_xname(sc->sc_dev), __func__));
3921
3922 KASSERT(IFNET_LOCKED(ifp));
3923
3924 mutex_enter(sc->sc_core_lock);
3925
3926 /*
3927 * Check for if_flags.
3928 * Main usage is to prevent linkdown when opening bpf.
3929 */
3930 iffchange = ifp->if_flags ^ sc->sc_if_flags;
3931 sc->sc_if_flags = ifp->if_flags;
3932 if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
3933 needreset = true;
3934 goto ec;
3935 }
3936
3937 /* iff related updates */
3938 if ((iffchange & IFF_PROMISC) != 0)
3939 wm_set_filter(sc);
3940
3941 wm_set_vlan(sc);
3942
3943 ec:
3944 /* Check for ec_capenable. */
3945 ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
3946 sc->sc_ec_capenable = ec->ec_capenable;
3947 if ((ecchange & ~ETHERCAP_EEE) != 0) {
3948 needreset = true;
3949 goto out;
3950 }
3951
3952 /* ec related updates */
3953 wm_set_eee(sc);
3954
3955 out:
3956 if (needreset)
3957 rc = ENETRESET;
3958 mutex_exit(sc->sc_core_lock);
3959
3960 return rc;
3961 }
3962
3963 static bool
3964 wm_phy_need_linkdown_discard(struct wm_softc *sc)
3965 {
3966
3967 switch (sc->sc_phytype) {
3968 case WMPHY_82577: /* ihphy */
3969 case WMPHY_82578: /* atphy */
3970 case WMPHY_82579: /* ihphy */
3971 case WMPHY_I217: /* ihphy */
3972 case WMPHY_82580: /* ihphy */
3973 case WMPHY_I350: /* ihphy */
3974 return true;
3975 default:
3976 return false;
3977 }
3978 }
3979
3980 static void
3981 wm_set_linkdown_discard(struct wm_softc *sc)
3982 {
3983
3984 for (int i = 0; i < sc->sc_nqueues; i++) {
3985 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3986
3987 mutex_enter(txq->txq_lock);
3988 txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
3989 mutex_exit(txq->txq_lock);
3990 }
3991 }
3992
3993 static void
3994 wm_clear_linkdown_discard(struct wm_softc *sc)
3995 {
3996
3997 for (int i = 0; i < sc->sc_nqueues; i++) {
3998 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3999
4000 mutex_enter(txq->txq_lock);
4001 txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
4002 mutex_exit(txq->txq_lock);
4003 }
4004 }
4005
4006 /*
4007 * wm_ioctl: [ifnet interface function]
4008 *
4009 * Handle control requests from the operator.
4010 */
4011 static int
4012 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
4013 {
4014 struct wm_softc *sc = ifp->if_softc;
4015 struct ifreq *ifr = (struct ifreq *)data;
4016 struct ifaddr *ifa = (struct ifaddr *)data;
4017 struct sockaddr_dl *sdl;
4018 int error;
4019
4020 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4021 device_xname(sc->sc_dev), __func__));
4022
4023 switch (cmd) {
4024 case SIOCADDMULTI:
4025 case SIOCDELMULTI:
4026 break;
4027 default:
4028 KASSERT(IFNET_LOCKED(ifp));
4029 }
4030
4031 switch (cmd) {
4032 case SIOCSIFMEDIA:
4033 mutex_enter(sc->sc_core_lock);
4034 /* Flow control requires full-duplex mode. */
4035 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
4036 (ifr->ifr_media & IFM_FDX) == 0)
4037 ifr->ifr_media &= ~IFM_ETH_FMASK;
4038 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
4039 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
4040 /* We can do both TXPAUSE and RXPAUSE. */
4041 ifr->ifr_media |=
4042 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
4043 }
4044 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
4045 }
4046 mutex_exit(sc->sc_core_lock);
4047 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
4048 if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
4049 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
4050 DPRINTF(sc, WM_DEBUG_LINK,
4051 ("%s: %s: Set linkdown discard flag\n",
4052 device_xname(sc->sc_dev), __func__));
4053 wm_set_linkdown_discard(sc);
4054 }
4055 }
4056 break;
4057 case SIOCINITIFADDR:
4058 mutex_enter(sc->sc_core_lock);
4059 if (ifa->ifa_addr->sa_family == AF_LINK) {
4060 sdl = satosdl(ifp->if_dl->ifa_addr);
4061 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
4062 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
4063 /* Unicast address is the first multicast entry */
4064 wm_set_filter(sc);
4065 error = 0;
4066 mutex_exit(sc->sc_core_lock);
4067 break;
4068 }
4069 mutex_exit(sc->sc_core_lock);
4070 /*FALLTHROUGH*/
4071 default:
4072 if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
4073 if (((ifp->if_flags & IFF_UP) != 0) &&
4074 ((ifr->ifr_flags & IFF_UP) == 0)) {
4075 DPRINTF(sc, WM_DEBUG_LINK,
4076 ("%s: %s: Set linkdown discard flag\n",
4077 device_xname(sc->sc_dev), __func__));
4078 wm_set_linkdown_discard(sc);
4079 }
4080 }
4081 const int s = splnet();
4082 /* It may call wm_start, so unlock here */
4083 error = ether_ioctl(ifp, cmd, data);
4084 splx(s);
4085 if (error != ENETRESET)
4086 break;
4087
4088 error = 0;
4089
4090 if (cmd == SIOCSIFCAP)
4091 error = if_init(ifp);
4092 else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
4093 mutex_enter(sc->sc_core_lock);
4094 if (sc->sc_if_flags & IFF_RUNNING) {
4095 /*
4096 * Multicast list has changed; set the
4097 * hardware filter accordingly.
4098 */
4099 wm_set_filter(sc);
4100 }
4101 mutex_exit(sc->sc_core_lock);
4102 }
4103 break;
4104 }
4105
4106 return error;
4107 }
4108
4109 /* MAC address related */
4110
4111 /*
4112 * Get the offset of MAC address and return it.
4113 * If error occured, use offset 0.
4114 */
4115 static uint16_t
4116 wm_check_alt_mac_addr(struct wm_softc *sc)
4117 {
4118 uint16_t myea[ETHER_ADDR_LEN / 2];
4119 uint16_t offset = NVM_OFF_MACADDR;
4120
4121 /* Try to read alternative MAC address pointer */
4122 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
4123 return 0;
4124
4125 /* Check pointer if it's valid or not. */
4126 if ((offset == 0x0000) || (offset == 0xffff))
4127 return 0;
4128
4129 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
4130 /*
4131 * Check whether alternative MAC address is valid or not.
4132 * Some cards have non 0xffff pointer but those don't use
4133 * alternative MAC address in reality.
4134 *
4135 * Check whether the broadcast bit is set or not.
4136 */
4137 if (wm_nvm_read(sc, offset, 1, myea) == 0)
4138 if (((myea[0] & 0xff) & 0x01) == 0)
4139 return offset; /* Found */
4140
4141 /* Not found */
4142 return 0;
4143 }
4144
4145 static int
4146 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
4147 {
4148 uint16_t myea[ETHER_ADDR_LEN / 2];
4149 uint16_t offset = NVM_OFF_MACADDR;
4150 int do_invert = 0;
4151
4152 switch (sc->sc_type) {
4153 case WM_T_82580:
4154 case WM_T_I350:
4155 case WM_T_I354:
4156 /* EEPROM Top Level Partitioning */
4157 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
4158 break;
4159 case WM_T_82571:
4160 case WM_T_82575:
4161 case WM_T_82576:
4162 case WM_T_80003:
4163 case WM_T_I210:
4164 case WM_T_I211:
4165 offset = wm_check_alt_mac_addr(sc);
4166 if (offset == 0)
4167 if ((sc->sc_funcid & 0x01) == 1)
4168 do_invert = 1;
4169 break;
4170 default:
4171 if ((sc->sc_funcid & 0x01) == 1)
4172 do_invert = 1;
4173 break;
4174 }
4175
4176 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
4177 goto bad;
4178
4179 enaddr[0] = myea[0] & 0xff;
4180 enaddr[1] = myea[0] >> 8;
4181 enaddr[2] = myea[1] & 0xff;
4182 enaddr[3] = myea[1] >> 8;
4183 enaddr[4] = myea[2] & 0xff;
4184 enaddr[5] = myea[2] >> 8;
4185
4186 /*
4187 * Toggle the LSB of the MAC address on the second port
4188 * of some dual port cards.
4189 */
4190 if (do_invert != 0)
4191 enaddr[5] ^= 1;
4192
4193 return 0;
4194
4195 bad:
4196 return -1;
4197 }
4198
4199 /*
4200 * wm_set_ral:
4201 *
4202 * Set an entery in the receive address list.
4203 */
4204 static void
4205 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4206 {
4207 uint32_t ral_lo, ral_hi, addrl, addrh;
4208 uint32_t wlock_mac;
4209 int rv;
4210
4211 if (enaddr != NULL) {
4212 ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
4213 ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
4214 ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
4215 ral_hi |= RAL_AV;
4216 } else {
4217 ral_lo = 0;
4218 ral_hi = 0;
4219 }
4220
4221 switch (sc->sc_type) {
4222 case WM_T_82542_2_0:
4223 case WM_T_82542_2_1:
4224 case WM_T_82543:
4225 CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
4226 CSR_WRITE_FLUSH(sc);
4227 CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
4228 CSR_WRITE_FLUSH(sc);
4229 break;
4230 case WM_T_PCH2:
4231 case WM_T_PCH_LPT:
4232 case WM_T_PCH_SPT:
4233 case WM_T_PCH_CNP:
4234 if (idx == 0) {
4235 CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
4236 CSR_WRITE_FLUSH(sc);
4237 CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
4238 CSR_WRITE_FLUSH(sc);
4239 return;
4240 }
4241 if (sc->sc_type != WM_T_PCH2) {
4242 wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
4243 FWSM_WLOCK_MAC);
4244 addrl = WMREG_SHRAL(idx - 1);
4245 addrh = WMREG_SHRAH(idx - 1);
4246 } else {
4247 wlock_mac = 0;
4248 addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
4249 addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
4250 }
4251
4252 if ((wlock_mac == 0) || (idx <= wlock_mac)) {
4253 rv = wm_get_swflag_ich8lan(sc);
4254 if (rv != 0)
4255 return;
4256 CSR_WRITE(sc, addrl, ral_lo);
4257 CSR_WRITE_FLUSH(sc);
4258 CSR_WRITE(sc, addrh, ral_hi);
4259 CSR_WRITE_FLUSH(sc);
4260 wm_put_swflag_ich8lan(sc);
4261 }
4262
4263 break;
4264 default:
4265 CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
4266 CSR_WRITE_FLUSH(sc);
4267 CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
4268 CSR_WRITE_FLUSH(sc);
4269 break;
4270 }
4271 }
4272
4273 /*
4274 * wm_mchash:
4275 *
4276 * Compute the hash of the multicast address for the 4096-bit
4277 * multicast filter.
4278 */
4279 static uint32_t
4280 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4281 {
4282 static const int lo_shift[4] = { 4, 3, 2, 0 };
4283 static const int hi_shift[4] = { 4, 5, 6, 8 };
4284 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4285 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4286 uint32_t hash;
4287
4288 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4289 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4290 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4291 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
4292 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4293 (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4294 return (hash & 0x3ff);
4295 }
4296 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4297 (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4298
4299 return (hash & 0xfff);
4300 }
4301
4302 /*
4303 *
4304 *
4305 */
4306 static int
4307 wm_rar_count(struct wm_softc *sc)
4308 {
4309 int size;
4310
4311 switch (sc->sc_type) {
4312 case WM_T_ICH8:
4313 size = WM_RAL_TABSIZE_ICH8 -1;
4314 break;
4315 case WM_T_ICH9:
4316 case WM_T_ICH10:
4317 case WM_T_PCH:
4318 size = WM_RAL_TABSIZE_ICH8;
4319 break;
4320 case WM_T_PCH2:
4321 size = WM_RAL_TABSIZE_PCH2;
4322 break;
4323 case WM_T_PCH_LPT:
4324 case WM_T_PCH_SPT:
4325 case WM_T_PCH_CNP:
4326 size = WM_RAL_TABSIZE_PCH_LPT;
4327 break;
4328 case WM_T_82575:
4329 case WM_T_I210:
4330 case WM_T_I211:
4331 size = WM_RAL_TABSIZE_82575;
4332 break;
4333 case WM_T_82576:
4334 case WM_T_82580:
4335 size = WM_RAL_TABSIZE_82576;
4336 break;
4337 case WM_T_I350:
4338 case WM_T_I354:
4339 size = WM_RAL_TABSIZE_I350;
4340 break;
4341 default:
4342 size = WM_RAL_TABSIZE;
4343 }
4344
4345 return size;
4346 }
4347
4348 /*
4349 * wm_set_filter:
4350 *
4351 * Set up the receive filter.
4352 */
4353 static void
4354 wm_set_filter(struct wm_softc *sc)
4355 {
4356 struct ethercom *ec = &sc->sc_ethercom;
4357 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4358 struct ether_multi *enm;
4359 struct ether_multistep step;
4360 bus_addr_t mta_reg;
4361 uint32_t hash, reg, bit;
4362 int i, size, ralmax, rv;
4363
4364 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4365 device_xname(sc->sc_dev), __func__));
4366 KASSERT(mutex_owned(sc->sc_core_lock));
4367
4368 if (sc->sc_type >= WM_T_82544)
4369 mta_reg = WMREG_CORDOVA_MTA;
4370 else
4371 mta_reg = WMREG_MTA;
4372
4373 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4374
4375 if (sc->sc_if_flags & IFF_BROADCAST)
4376 sc->sc_rctl |= RCTL_BAM;
4377 if (sc->sc_if_flags & IFF_PROMISC) {
4378 sc->sc_rctl |= RCTL_UPE;
4379 ETHER_LOCK(ec);
4380 ec->ec_flags |= ETHER_F_ALLMULTI;
4381 ETHER_UNLOCK(ec);
4382 goto allmulti;
4383 }
4384
4385 /*
4386 * Set the station address in the first RAL slot, and
4387 * clear the remaining slots.
4388 */
4389 size = wm_rar_count(sc);
4390 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4391
4392 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
4393 || (sc->sc_type == WM_T_PCH_CNP)) {
4394 i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
4395 switch (i) {
4396 case 0:
4397 /* We can use all entries */
4398 ralmax = size;
4399 break;
4400 case 1:
4401 /* Only RAR[0] */
4402 ralmax = 1;
4403 break;
4404 default:
4405 /* Available SHRA + RAR[0] */
4406 ralmax = i + 1;
4407 }
4408 } else
4409 ralmax = size;
4410 for (i = 1; i < size; i++) {
4411 if (i < ralmax)
4412 wm_set_ral(sc, NULL, i);
4413 }
4414
4415 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4416 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4417 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4418 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
4419 size = WM_ICH8_MC_TABSIZE;
4420 else
4421 size = WM_MC_TABSIZE;
4422 /* Clear out the multicast table. */
4423 for (i = 0; i < size; i++) {
4424 CSR_WRITE(sc, mta_reg + (i << 2), 0);
4425 CSR_WRITE_FLUSH(sc);
4426 }
4427
4428 ETHER_LOCK(ec);
4429 ETHER_FIRST_MULTI(step, ec, enm);
4430 while (enm != NULL) {
4431 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4432 ec->ec_flags |= ETHER_F_ALLMULTI;
4433 ETHER_UNLOCK(ec);
4434 /*
4435 * We must listen to a range of multicast addresses.
4436 * For now, just accept all multicasts, rather than
4437 * trying to set only those filter bits needed to match
4438 * the range. (At this time, the only use of address
4439 * ranges is for IP multicast routing, for which the
4440 * range is big enough to require all bits set.)
4441 */
4442 goto allmulti;
4443 }
4444
4445 hash = wm_mchash(sc, enm->enm_addrlo);
4446
4447 reg = (hash >> 5);
4448 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4449 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4450 || (sc->sc_type == WM_T_PCH2)
4451 || (sc->sc_type == WM_T_PCH_LPT)
4452 || (sc->sc_type == WM_T_PCH_SPT)
4453 || (sc->sc_type == WM_T_PCH_CNP))
4454 reg &= 0x1f;
4455 else
4456 reg &= 0x7f;
4457 bit = hash & 0x1f;
4458
4459 hash = CSR_READ(sc, mta_reg + (reg << 2));
4460 hash |= 1U << bit;
4461
4462 if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
4463 /*
4464 * 82544 Errata 9: Certain register cannot be written
4465 * with particular alignments in PCI-X bus operation
4466 * (FCAH, MTA and VFTA).
4467 */
4468 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4469 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4470 CSR_WRITE_FLUSH(sc);
4471 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4472 CSR_WRITE_FLUSH(sc);
4473 } else {
4474 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4475 CSR_WRITE_FLUSH(sc);
4476 }
4477
4478 ETHER_NEXT_MULTI(step, enm);
4479 }
4480 ec->ec_flags &= ~ETHER_F_ALLMULTI;
4481 ETHER_UNLOCK(ec);
4482
4483 goto setit;
4484
4485 allmulti:
4486 sc->sc_rctl |= RCTL_MPE;
4487
4488 setit:
4489 if (sc->sc_type >= WM_T_PCH2) {
4490 if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4491 && (ifp->if_mtu > ETHERMTU))
4492 rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
4493 else
4494 rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
4495 if (rv != 0)
4496 device_printf(sc->sc_dev,
4497 "Failed to do workaround for jumbo frame.\n");
4498 }
4499
4500 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4501 }
4502
4503 /* Reset and init related */
4504
4505 static void
4506 wm_set_vlan(struct wm_softc *sc)
4507 {
4508
4509 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4510 device_xname(sc->sc_dev), __func__));
4511
4512 /* Deal with VLAN enables. */
4513 if (VLAN_ATTACHED(&sc->sc_ethercom))
4514 sc->sc_ctrl |= CTRL_VME;
4515 else
4516 sc->sc_ctrl &= ~CTRL_VME;
4517
4518 /* Write the control registers. */
4519 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4520 }
4521
4522 static void
4523 wm_set_pcie_completion_timeout(struct wm_softc *sc)
4524 {
4525 uint32_t gcr;
4526 pcireg_t ctrl2;
4527
4528 gcr = CSR_READ(sc, WMREG_GCR);
4529
4530 /* Only take action if timeout value is defaulted to 0 */
4531 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
4532 goto out;
4533
4534 if ((gcr & GCR_CAP_VER2) == 0) {
4535 gcr |= GCR_CMPL_TMOUT_10MS;
4536 goto out;
4537 }
4538
4539 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
4540 sc->sc_pcixe_capoff + PCIE_DCSR2);
4541 ctrl2 |= WM_PCIE_DCSR2_16MS;
4542 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
4543 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
4544
4545 out:
4546 /* Disable completion timeout resend */
4547 gcr &= ~GCR_CMPL_TMOUT_RESEND;
4548
4549 CSR_WRITE(sc, WMREG_GCR, gcr);
4550 }
4551
4552 void
4553 wm_get_auto_rd_done(struct wm_softc *sc)
4554 {
4555 int i;
4556
4557 /* wait for eeprom to reload */
4558 switch (sc->sc_type) {
4559 case WM_T_82571:
4560 case WM_T_82572:
4561 case WM_T_82573:
4562 case WM_T_82574:
4563 case WM_T_82583:
4564 case WM_T_82575:
4565 case WM_T_82576:
4566 case WM_T_82580:
4567 case WM_T_I350:
4568 case WM_T_I354:
4569 case WM_T_I210:
4570 case WM_T_I211:
4571 case WM_T_80003:
4572 case WM_T_ICH8:
4573 case WM_T_ICH9:
4574 for (i = 0; i < 10; i++) {
4575 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4576 break;
4577 delay(1000);
4578 }
4579 if (i == 10) {
4580 log(LOG_ERR, "%s: auto read from eeprom failed to "
4581 "complete\n", device_xname(sc->sc_dev));
4582 }
4583 break;
4584 default:
4585 break;
4586 }
4587 }
4588
4589 void
4590 wm_lan_init_done(struct wm_softc *sc)
4591 {
4592 uint32_t reg = 0;
4593 int i;
4594
4595 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4596 device_xname(sc->sc_dev), __func__));
4597
4598 /* Wait for eeprom to reload */
4599 switch (sc->sc_type) {
4600 case WM_T_ICH10:
4601 case WM_T_PCH:
4602 case WM_T_PCH2:
4603 case WM_T_PCH_LPT:
4604 case WM_T_PCH_SPT:
4605 case WM_T_PCH_CNP:
4606 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4607 reg = CSR_READ(sc, WMREG_STATUS);
4608 if ((reg & STATUS_LAN_INIT_DONE) != 0)
4609 break;
4610 delay(100);
4611 }
4612 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4613 log(LOG_ERR, "%s: %s: lan_init_done failed to "
4614 "complete\n", device_xname(sc->sc_dev), __func__);
4615 }
4616 break;
4617 default:
4618 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4619 __func__);
4620 break;
4621 }
4622
4623 reg &= ~STATUS_LAN_INIT_DONE;
4624 CSR_WRITE(sc, WMREG_STATUS, reg);
4625 }
4626
4627 void
4628 wm_get_cfg_done(struct wm_softc *sc)
4629 {
4630 int mask;
4631 uint32_t reg;
4632 int i;
4633
4634 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4635 device_xname(sc->sc_dev), __func__));
4636
4637 /* Wait for eeprom to reload */
4638 switch (sc->sc_type) {
4639 case WM_T_82542_2_0:
4640 case WM_T_82542_2_1:
4641 /* null */
4642 break;
4643 case WM_T_82543:
4644 case WM_T_82544:
4645 case WM_T_82540:
4646 case WM_T_82545:
4647 case WM_T_82545_3:
4648 case WM_T_82546:
4649 case WM_T_82546_3:
4650 case WM_T_82541:
4651 case WM_T_82541_2:
4652 case WM_T_82547:
4653 case WM_T_82547_2:
4654 case WM_T_82573:
4655 case WM_T_82574:
4656 case WM_T_82583:
4657 /* generic */
4658 delay(10*1000);
4659 break;
4660 case WM_T_80003:
4661 case WM_T_82571:
4662 case WM_T_82572:
4663 case WM_T_82575:
4664 case WM_T_82576:
4665 case WM_T_82580:
4666 case WM_T_I350:
4667 case WM_T_I354:
4668 case WM_T_I210:
4669 case WM_T_I211:
4670 if (sc->sc_type == WM_T_82571) {
4671 /* Only 82571 shares port 0 */
4672 mask = EEMNGCTL_CFGDONE_0;
4673 } else
4674 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4675 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4676 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4677 break;
4678 delay(1000);
4679 }
4680 if (i >= WM_PHY_CFG_TIMEOUT)
4681 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
4682 device_xname(sc->sc_dev), __func__));
4683 break;
4684 case WM_T_ICH8:
4685 case WM_T_ICH9:
4686 case WM_T_ICH10:
4687 case WM_T_PCH:
4688 case WM_T_PCH2:
4689 case WM_T_PCH_LPT:
4690 case WM_T_PCH_SPT:
4691 case WM_T_PCH_CNP:
4692 delay(10*1000);
4693 if (sc->sc_type >= WM_T_ICH10)
4694 wm_lan_init_done(sc);
4695 else
4696 wm_get_auto_rd_done(sc);
4697
4698 /* Clear PHY Reset Asserted bit */
4699 reg = CSR_READ(sc, WMREG_STATUS);
4700 if ((reg & STATUS_PHYRA) != 0)
4701 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
4702 break;
4703 default:
4704 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4705 __func__);
4706 break;
4707 }
4708 }
4709
4710 int
4711 wm_phy_post_reset(struct wm_softc *sc)
4712 {
4713 device_t dev = sc->sc_dev;
4714 uint16_t reg;
4715 int rv = 0;
4716
4717 /* This function is only for ICH8 and newer. */
4718 if (sc->sc_type < WM_T_ICH8)
4719 return 0;
4720
4721 if (wm_phy_resetisblocked(sc)) {
4722 /* XXX */
4723 device_printf(dev, "PHY is blocked\n");
4724 return -1;
4725 }
4726
4727 /* Allow time for h/w to get to quiescent state after reset */
4728 delay(10*1000);
4729
4730 /* Perform any necessary post-reset workarounds */
4731 if (sc->sc_type == WM_T_PCH)
4732 rv = wm_hv_phy_workarounds_ich8lan(sc);
4733 else if (sc->sc_type == WM_T_PCH2)
4734 rv = wm_lv_phy_workarounds_ich8lan(sc);
4735 if (rv != 0)
4736 return rv;
4737
4738 /* Clear the host wakeup bit after lcd reset */
4739 if (sc->sc_type >= WM_T_PCH) {
4740 wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, ®);
4741 reg &= ~BM_WUC_HOST_WU_BIT;
4742 wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
4743 }
4744
4745 /* Configure the LCD with the extended configuration region in NVM */
4746 if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
4747 return rv;
4748
4749 /* Configure the LCD with the OEM bits in NVM */
4750 rv = wm_oem_bits_config_ich8lan(sc, true);
4751
4752 if (sc->sc_type == WM_T_PCH2) {
4753 /* Ungate automatic PHY configuration on non-managed 82579 */
4754 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
4755 delay(10 * 1000);
4756 wm_gate_hw_phy_config_ich8lan(sc, false);
4757 }
4758 /* Set EEE LPI Update Timer to 200usec */
4759 rv = sc->phy.acquire(sc);
4760 if (rv)
4761 return rv;
4762 rv = wm_write_emi_reg_locked(dev,
4763 I82579_LPI_UPDATE_TIMER, 0x1387);
4764 sc->phy.release(sc);
4765 }
4766
4767 return rv;
4768 }
4769
4770 /* Only for PCH and newer */
4771 static int
4772 wm_write_smbus_addr(struct wm_softc *sc)
4773 {
4774 uint32_t strap, freq;
4775 uint16_t phy_data;
4776 int rv;
4777
4778 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4779 device_xname(sc->sc_dev), __func__));
4780 KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
4781
4782 strap = CSR_READ(sc, WMREG_STRAP);
4783 freq = __SHIFTOUT(strap, STRAP_FREQ);
4784
4785 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
4786 if (rv != 0)
4787 return rv;
4788
4789 phy_data &= ~HV_SMB_ADDR_ADDR;
4790 phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
4791 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
4792
4793 if (sc->sc_phytype == WMPHY_I217) {
4794 /* Restore SMBus frequency */
4795 if (freq --) {
4796 phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
4797 | HV_SMB_ADDR_FREQ_HIGH);
4798 phy_data |= __SHIFTIN((freq & 0x01) != 0,
4799 HV_SMB_ADDR_FREQ_LOW);
4800 phy_data |= __SHIFTIN((freq & 0x02) != 0,
4801 HV_SMB_ADDR_FREQ_HIGH);
4802 } else
4803 DPRINTF(sc, WM_DEBUG_INIT,
4804 ("%s: %s Unsupported SMB frequency in PHY\n",
4805 device_xname(sc->sc_dev), __func__));
4806 }
4807
4808 return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
4809 phy_data);
4810 }
4811
4812 static int
4813 wm_init_lcd_from_nvm(struct wm_softc *sc)
4814 {
4815 uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
4816 uint16_t phy_page = 0;
4817 int rv = 0;
4818
4819 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4820 device_xname(sc->sc_dev), __func__));
4821
4822 switch (sc->sc_type) {
4823 case WM_T_ICH8:
4824 if ((sc->sc_phytype == WMPHY_UNKNOWN)
4825 || (sc->sc_phytype != WMPHY_IGP_3))
4826 return 0;
4827
4828 if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
4829 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
4830 sw_cfg_mask = FEXTNVM_SW_CONFIG;
4831 break;
4832 }
4833 /* FALLTHROUGH */
4834 case WM_T_PCH:
4835 case WM_T_PCH2:
4836 case WM_T_PCH_LPT:
4837 case WM_T_PCH_SPT:
4838 case WM_T_PCH_CNP:
4839 sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
4840 break;
4841 default:
4842 return 0;
4843 }
4844
4845 if ((rv = sc->phy.acquire(sc)) != 0)
4846 return rv;
4847
4848 reg = CSR_READ(sc, WMREG_FEXTNVM);
4849 if ((reg & sw_cfg_mask) == 0)
4850 goto release;
4851
4852 /*
4853 * Make sure HW does not configure LCD from PHY extended configuration
4854 * before SW configuration
4855 */
4856 extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
4857 if ((sc->sc_type < WM_T_PCH2)
4858 && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
4859 goto release;
4860
4861 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
4862 device_xname(sc->sc_dev), __func__));
4863 /* word_addr is in DWORD */
4864 word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
4865
4866 reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
4867 cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
4868 if (cnf_size == 0)
4869 goto release;
4870
4871 if (((sc->sc_type == WM_T_PCH)
4872 && ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
4873 || (sc->sc_type > WM_T_PCH)) {
4874 /*
4875 * HW configures the SMBus address and LEDs when the OEM and
4876 * LCD Write Enable bits are set in the NVM. When both NVM bits
4877 * are cleared, SW will configure them instead.
4878 */
4879 DPRINTF(sc, WM_DEBUG_INIT,
4880 ("%s: %s: Configure SMBus and LED\n",
4881 device_xname(sc->sc_dev), __func__));
4882 if ((rv = wm_write_smbus_addr(sc)) != 0)
4883 goto release;
4884
4885 reg = CSR_READ(sc, WMREG_LEDCTL);
4886 rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
4887 (uint16_t)reg);
4888 if (rv != 0)
4889 goto release;
4890 }
4891
4892 /* Configure LCD from extended configuration region. */
4893 for (i = 0; i < cnf_size; i++) {
4894 uint16_t reg_data, reg_addr;
4895
4896 if (wm_nvm_read(sc, (word_addr + i * 2), 1, ®_data) != 0)
4897 goto release;
4898
4899 if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, ®_addr) !=0)
4900 goto release;
4901
4902 if (reg_addr == IGPHY_PAGE_SELECT)
4903 phy_page = reg_data;
4904
4905 reg_addr &= IGPHY_MAXREGADDR;
4906 reg_addr |= phy_page;
4907
4908 KASSERT(sc->phy.writereg_locked != NULL);
4909 rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
4910 reg_data);
4911 }
4912
4913 release:
4914 sc->phy.release(sc);
4915 return rv;
4916 }
4917
4918 /*
4919 * wm_oem_bits_config_ich8lan - SW-based LCD Configuration
4920 * @sc: pointer to the HW structure
4921 * @d0_state: boolean if entering d0 or d3 device state
4922 *
4923 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
4924 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
4925 * in NVM determines whether HW should configure LPLU and Gbe Disable.
4926 */
4927 int
4928 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
4929 {
4930 uint32_t mac_reg;
4931 uint16_t oem_reg;
4932 int rv;
4933
4934 if (sc->sc_type < WM_T_PCH)
4935 return 0;
4936
4937 rv = sc->phy.acquire(sc);
4938 if (rv != 0)
4939 return rv;
4940
4941 if (sc->sc_type == WM_T_PCH) {
4942 mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
4943 if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
4944 goto release;
4945 }
4946
4947 mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
4948 if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
4949 goto release;
4950
4951 mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
4952
4953 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
4954 if (rv != 0)
4955 goto release;
4956 oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
4957
4958 if (d0_state) {
4959 if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
4960 oem_reg |= HV_OEM_BITS_A1KDIS;
4961 if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
4962 oem_reg |= HV_OEM_BITS_LPLU;
4963 } else {
4964 if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
4965 != 0)
4966 oem_reg |= HV_OEM_BITS_A1KDIS;
4967 if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
4968 != 0)
4969 oem_reg |= HV_OEM_BITS_LPLU;
4970 }
4971
4972 /* Set Restart auto-neg to activate the bits */
4973 if ((d0_state || (sc->sc_type != WM_T_PCH))
4974 && (wm_phy_resetisblocked(sc) == false))
4975 oem_reg |= HV_OEM_BITS_ANEGNOW;
4976
4977 rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
4978
4979 release:
4980 sc->phy.release(sc);
4981
4982 return rv;
4983 }
4984
4985 /* Init hardware bits */
4986 void
4987 wm_initialize_hardware_bits(struct wm_softc *sc)
4988 {
4989 uint32_t tarc0, tarc1, reg;
4990
4991 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4992 device_xname(sc->sc_dev), __func__));
4993
4994 /* For 82571 variant, 80003 and ICHs */
4995 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
4996 || WM_IS_ICHPCH(sc)) {
4997
4998 /* Transmit Descriptor Control 0 */
4999 reg = CSR_READ(sc, WMREG_TXDCTL(0));
5000 reg |= TXDCTL_COUNT_DESC;
5001 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
5002
5003 /* Transmit Descriptor Control 1 */
5004 reg = CSR_READ(sc, WMREG_TXDCTL(1));
5005 reg |= TXDCTL_COUNT_DESC;
5006 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
5007
5008 /* TARC0 */
5009 tarc0 = CSR_READ(sc, WMREG_TARC0);
5010 switch (sc->sc_type) {
5011 case WM_T_82571:
5012 case WM_T_82572:
5013 case WM_T_82573:
5014 case WM_T_82574:
5015 case WM_T_82583:
5016 case WM_T_80003:
5017 /* Clear bits 30..27 */
5018 tarc0 &= ~__BITS(30, 27);
5019 break;
5020 default:
5021 break;
5022 }
5023
5024 switch (sc->sc_type) {
5025 case WM_T_82571:
5026 case WM_T_82572:
5027 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
5028
5029 tarc1 = CSR_READ(sc, WMREG_TARC1);
5030 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
5031 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
5032 /* 8257[12] Errata No.7 */
5033 tarc1 |= __BIT(22); /* TARC1 bits 22 */
5034
5035 /* TARC1 bit 28 */
5036 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
5037 tarc1 &= ~__BIT(28);
5038 else
5039 tarc1 |= __BIT(28);
5040 CSR_WRITE(sc, WMREG_TARC1, tarc1);
5041
5042 /*
5043 * 8257[12] Errata No.13
5044 * Disable Dyamic Clock Gating.
5045 */
5046 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5047 reg &= ~CTRL_EXT_DMA_DYN_CLK;
5048 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5049 break;
5050 case WM_T_82573:
5051 case WM_T_82574:
5052 case WM_T_82583:
5053 if ((sc->sc_type == WM_T_82574)
5054 || (sc->sc_type == WM_T_82583))
5055 tarc0 |= __BIT(26); /* TARC0 bit 26 */
5056
5057 /* Extended Device Control */
5058 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5059 reg &= ~__BIT(23); /* Clear bit 23 */
5060 reg |= __BIT(22); /* Set bit 22 */
5061 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5062
5063 /* Device Control */
5064 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
5065 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5066
5067 /* PCIe Control Register */
5068 /*
5069 * 82573 Errata (unknown).
5070 *
5071 * 82574 Errata 25 and 82583 Errata 12
5072 * "Dropped Rx Packets":
5073 * NVM Image Version 2.1.4 and newer has no this bug.
5074 */
5075 reg = CSR_READ(sc, WMREG_GCR);
5076 reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
5077 CSR_WRITE(sc, WMREG_GCR, reg);
5078
5079 if ((sc->sc_type == WM_T_82574)
5080 || (sc->sc_type == WM_T_82583)) {
5081 /*
5082 * Document says this bit must be set for
5083 * proper operation.
5084 */
5085 reg = CSR_READ(sc, WMREG_GCR);
5086 reg |= __BIT(22);
5087 CSR_WRITE(sc, WMREG_GCR, reg);
5088
5089 /*
5090 * Apply workaround for hardware errata
5091 * documented in errata docs Fixes issue where
5092 * some error prone or unreliable PCIe
5093 * completions are occurring, particularly
5094 * with ASPM enabled. Without fix, issue can
5095 * cause Tx timeouts.
5096 */
5097 reg = CSR_READ(sc, WMREG_GCR2);
5098 reg |= __BIT(0);
5099 CSR_WRITE(sc, WMREG_GCR2, reg);
5100 }
5101 break;
5102 case WM_T_80003:
5103 /* TARC0 */
5104 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
5105 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
5106 tarc0 &= ~__BIT(20); /* Clear bits 20 */
5107
5108 /* TARC1 bit 28 */
5109 tarc1 = CSR_READ(sc, WMREG_TARC1);
5110 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
5111 tarc1 &= ~__BIT(28);
5112 else
5113 tarc1 |= __BIT(28);
5114 CSR_WRITE(sc, WMREG_TARC1, tarc1);
5115 break;
5116 case WM_T_ICH8:
5117 case WM_T_ICH9:
5118 case WM_T_ICH10:
5119 case WM_T_PCH:
5120 case WM_T_PCH2:
5121 case WM_T_PCH_LPT:
5122 case WM_T_PCH_SPT:
5123 case WM_T_PCH_CNP:
5124 /* TARC0 */
5125 if (sc->sc_type == WM_T_ICH8) {
5126 /* Set TARC0 bits 29 and 28 */
5127 tarc0 |= __BITS(29, 28);
5128 } else if (sc->sc_type == WM_T_PCH_SPT) {
5129 tarc0 |= __BIT(29);
5130 /*
5131 * Drop bit 28. From Linux.
5132 * See I218/I219 spec update
5133 * "5. Buffer Overrun While the I219 is
5134 * Processing DMA Transactions"
5135 */
5136 tarc0 &= ~__BIT(28);
5137 }
5138 /* Set TARC0 bits 23,24,26,27 */
5139 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
5140
5141 /* CTRL_EXT */
5142 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5143 reg |= __BIT(22); /* Set bit 22 */
5144 /*
5145 * Enable PHY low-power state when MAC is at D3
5146 * w/o WoL
5147 */
5148 if (sc->sc_type >= WM_T_PCH)
5149 reg |= CTRL_EXT_PHYPDEN;
5150 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5151
5152 /* TARC1 */
5153 tarc1 = CSR_READ(sc, WMREG_TARC1);
5154 /* bit 28 */
5155 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
5156 tarc1 &= ~__BIT(28);
5157 else
5158 tarc1 |= __BIT(28);
5159 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
5160 CSR_WRITE(sc, WMREG_TARC1, tarc1);
5161
5162 /* Device Status */
5163 if (sc->sc_type == WM_T_ICH8) {
5164 reg = CSR_READ(sc, WMREG_STATUS);
5165 reg &= ~__BIT(31);
5166 CSR_WRITE(sc, WMREG_STATUS, reg);
5167
5168 }
5169
5170 /* IOSFPC */
5171 if (sc->sc_type == WM_T_PCH_SPT) {
5172 reg = CSR_READ(sc, WMREG_IOSFPC);
5173 reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
5174 CSR_WRITE(sc, WMREG_IOSFPC, reg);
5175 }
5176 /*
5177 * Work-around descriptor data corruption issue during
5178 * NFS v2 UDP traffic, just disable the NFS filtering
5179 * capability.
5180 */
5181 reg = CSR_READ(sc, WMREG_RFCTL);
5182 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
5183 CSR_WRITE(sc, WMREG_RFCTL, reg);
5184 break;
5185 default:
5186 break;
5187 }
5188 CSR_WRITE(sc, WMREG_TARC0, tarc0);
5189
5190 switch (sc->sc_type) {
5191 case WM_T_82571:
5192 case WM_T_82572:
5193 case WM_T_82573:
5194 case WM_T_80003:
5195 case WM_T_ICH8:
5196 /*
5197 * 8257[12] Errata No.52, 82573 Errata No.43 and some
5198 * others to avoid RSS Hash Value bug.
5199 */
5200 reg = CSR_READ(sc, WMREG_RFCTL);
5201 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
5202 CSR_WRITE(sc, WMREG_RFCTL, reg);
5203 break;
5204 case WM_T_82574:
5205 /* Use extened Rx descriptor. */
5206 reg = CSR_READ(sc, WMREG_RFCTL);
5207 reg |= WMREG_RFCTL_EXSTEN;
5208 CSR_WRITE(sc, WMREG_RFCTL, reg);
5209 break;
5210 default:
5211 break;
5212 }
5213 } else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
5214 /*
5215 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
5216 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
5217 * "Certain Malformed IPv6 Extension Headers are Not Processed
5218 * Correctly by the Device"
5219 *
5220 * I354(C2000) Errata AVR53:
5221 * "Malformed IPv6 Extension Headers May Result in LAN Device
5222 * Hang"
5223 */
5224 reg = CSR_READ(sc, WMREG_RFCTL);
5225 reg |= WMREG_RFCTL_IPV6EXDIS;
5226 CSR_WRITE(sc, WMREG_RFCTL, reg);
5227 }
5228 }
5229
5230 static uint32_t
5231 wm_rxpbs_adjust_82580(uint32_t val)
5232 {
5233 uint32_t rv = 0;
5234
5235 if (val < __arraycount(wm_82580_rxpbs_table))
5236 rv = wm_82580_rxpbs_table[val];
5237
5238 return rv;
5239 }
5240
5241 /*
5242 * wm_reset_phy:
5243 *
5244 * generic PHY reset function.
5245 * Same as e1000_phy_hw_reset_generic()
5246 */
5247 static int
5248 wm_reset_phy(struct wm_softc *sc)
5249 {
5250 uint32_t reg;
5251 int rv;
5252
5253 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
5254 device_xname(sc->sc_dev), __func__));
5255 if (wm_phy_resetisblocked(sc))
5256 return -1;
5257
5258 rv = sc->phy.acquire(sc);
5259 if (rv) {
5260 device_printf(sc->sc_dev, "%s: failed to acquire phy: %d\n",
5261 __func__, rv);
5262 return rv;
5263 }
5264
5265 reg = CSR_READ(sc, WMREG_CTRL);
5266 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
5267 CSR_WRITE_FLUSH(sc);
5268
5269 delay(sc->phy.reset_delay_us);
5270
5271 CSR_WRITE(sc, WMREG_CTRL, reg);
5272 CSR_WRITE_FLUSH(sc);
5273
5274 delay(150);
5275
5276 sc->phy.release(sc);
5277
5278 wm_get_cfg_done(sc);
5279 wm_phy_post_reset(sc);
5280
5281 return 0;
5282 }
5283
5284 /*
5285 * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
5286 *
5287 * In i219, the descriptor rings must be emptied before resetting the HW
5288 * or before changing the device state to D3 during runtime (runtime PM).
5289 *
5290 * Failure to do this will cause the HW to enter a unit hang state which can
5291 * only be released by PCI reset on the device.
5292 *
5293 * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
5294 */
5295 static void
5296 wm_flush_desc_rings(struct wm_softc *sc)
5297 {
5298 pcireg_t preg;
5299 uint32_t reg;
5300 struct wm_txqueue *txq;
5301 wiseman_txdesc_t *txd;
5302 int nexttx;
5303 uint32_t rctl;
5304
5305 KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
5306
5307 /* First, disable MULR fix in FEXTNVM11 */
5308 reg = CSR_READ(sc, WMREG_FEXTNVM11);
5309 reg |= FEXTNVM11_DIS_MULRFIX;
5310 CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
5311
5312 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
5313 reg = CSR_READ(sc, WMREG_TDLEN(0));
5314 if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
5315 return;
5316
5317 /*
5318 * Remove all descriptors from the tx_ring.
5319 *
5320 * We want to clear all pending descriptors from the TX ring. Zeroing
5321 * happens when the HW reads the regs. We assign the ring itself as
5322 * the data of the next descriptor. We don't care about the data we are
5323 * about to reset the HW.
5324 */
5325 #ifdef WM_DEBUG
5326 device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
5327 #endif
5328 reg = CSR_READ(sc, WMREG_TCTL);
5329 CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
5330
5331 txq = &sc->sc_queue[0].wmq_txq;
5332 nexttx = txq->txq_next;
5333 txd = &txq->txq_descs[nexttx];
5334 wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
5335 txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
5336 txd->wtx_fields.wtxu_status = 0;
5337 txd->wtx_fields.wtxu_options = 0;
5338 txd->wtx_fields.wtxu_vlan = 0;
5339
5340 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
5341 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5342
5343 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
5344 CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
5345 CSR_WRITE_FLUSH(sc);
5346 delay(250);
5347
5348 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
5349 if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
5350 return;
5351
5352 /*
5353 * Mark all descriptors in the RX ring as consumed and disable the
5354 * rx ring.
5355 */
5356 #ifdef WM_DEBUG
5357 device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
5358 #endif
5359 rctl = CSR_READ(sc, WMREG_RCTL);
5360 CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
5361 CSR_WRITE_FLUSH(sc);
5362 delay(150);
5363
5364 reg = CSR_READ(sc, WMREG_RXDCTL(0));
5365 /* Zero the lower 14 bits (prefetch and host thresholds) */
5366 reg &= 0xffffc000;
5367 /*
5368 * Update thresholds: prefetch threshold to 31, host threshold
5369 * to 1 and make sure the granularity is "descriptors" and not
5370 * "cache lines"
5371 */
5372 reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
5373 CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
5374
5375 /* Momentarily enable the RX ring for the changes to take effect */
5376 CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
5377 CSR_WRITE_FLUSH(sc);
5378 delay(150);
5379 CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
5380 }
5381
5382 /*
5383 * wm_reset:
5384 *
5385 * Reset the i82542 chip.
5386 */
5387 static void
5388 wm_reset(struct wm_softc *sc)
5389 {
5390 int phy_reset = 0;
5391 int i, error = 0;
5392 uint32_t reg;
5393 uint16_t kmreg;
5394 int rv;
5395
5396 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
5397 device_xname(sc->sc_dev), __func__));
5398 KASSERT(sc->sc_type != 0);
5399
5400 /*
5401 * Allocate on-chip memory according to the MTU size.
5402 * The Packet Buffer Allocation register must be written
5403 * before the chip is reset.
5404 */
5405 switch (sc->sc_type) {
5406 case WM_T_82547:
5407 case WM_T_82547_2:
5408 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
5409 PBA_22K : PBA_30K;
5410 for (i = 0; i < sc->sc_nqueues; i++) {
5411 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5412 txq->txq_fifo_head = 0;
5413 txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
5414 txq->txq_fifo_size =
5415 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
5416 txq->txq_fifo_stall = 0;
5417 }
5418 break;
5419 case WM_T_82571:
5420 case WM_T_82572:
5421 case WM_T_82575: /* XXX need special handing for jumbo frames */
5422 case WM_T_80003:
5423 sc->sc_pba = PBA_32K;
5424 break;
5425 case WM_T_82573:
5426 sc->sc_pba = PBA_12K;
5427 break;
5428 case WM_T_82574:
5429 case WM_T_82583:
5430 sc->sc_pba = PBA_20K;
5431 break;
5432 case WM_T_82576:
5433 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
5434 sc->sc_pba &= RXPBS_SIZE_MASK_82576;
5435 break;
5436 case WM_T_82580:
5437 case WM_T_I350:
5438 case WM_T_I354:
5439 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
5440 break;
5441 case WM_T_I210:
5442 case WM_T_I211:
5443 sc->sc_pba = PBA_34K;
5444 break;
5445 case WM_T_ICH8:
5446 /* Workaround for a bit corruption issue in FIFO memory */
5447 sc->sc_pba = PBA_8K;
5448 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
5449 break;
5450 case WM_T_ICH9:
5451 case WM_T_ICH10:
5452 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
5453 PBA_14K : PBA_10K;
5454 break;
5455 case WM_T_PCH:
5456 case WM_T_PCH2: /* XXX 14K? */
5457 case WM_T_PCH_LPT:
5458 case WM_T_PCH_SPT:
5459 case WM_T_PCH_CNP:
5460 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
5461 PBA_12K : PBA_26K;
5462 break;
5463 default:
5464 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
5465 PBA_40K : PBA_48K;
5466 break;
5467 }
5468 /*
5469 * Only old or non-multiqueue devices have the PBA register
5470 * XXX Need special handling for 82575.
5471 */
5472 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
5473 || (sc->sc_type == WM_T_82575))
5474 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
5475
5476 /* Prevent the PCI-E bus from sticking */
5477 if (sc->sc_flags & WM_F_PCIE) {
5478 int timeout = 800;
5479
5480 sc->sc_ctrl |= CTRL_GIO_M_DIS;
5481 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5482
5483 while (timeout--) {
5484 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
5485 == 0)
5486 break;
5487 delay(100);
5488 }
5489 if (timeout == 0)
5490 device_printf(sc->sc_dev,
5491 "failed to disable bus mastering\n");
5492 }
5493
5494 /* Set the completion timeout for interface */
5495 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
5496 || (sc->sc_type == WM_T_82580)
5497 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
5498 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
5499 wm_set_pcie_completion_timeout(sc);
5500
5501 /* Clear interrupt */
5502 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5503 if (wm_is_using_msix(sc)) {
5504 if (sc->sc_type != WM_T_82574) {
5505 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5506 CSR_WRITE(sc, WMREG_EIAC, 0);
5507 } else
5508 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5509 }
5510
5511 /* Stop the transmit and receive processes. */
5512 CSR_WRITE(sc, WMREG_RCTL, 0);
5513 sc->sc_rctl &= ~RCTL_EN;
5514 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
5515 CSR_WRITE_FLUSH(sc);
5516
5517 /* XXX set_tbi_sbp_82543() */
5518
5519 delay(10*1000);
5520
5521 /* Must acquire the MDIO ownership before MAC reset */
5522 switch (sc->sc_type) {
5523 case WM_T_82573:
5524 case WM_T_82574:
5525 case WM_T_82583:
5526 error = wm_get_hw_semaphore_82573(sc);
5527 break;
5528 default:
5529 break;
5530 }
5531
5532 /*
5533 * 82541 Errata 29? & 82547 Errata 28?
5534 * See also the description about PHY_RST bit in CTRL register
5535 * in 8254x_GBe_SDM.pdf.
5536 */
5537 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
5538 CSR_WRITE(sc, WMREG_CTRL,
5539 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
5540 CSR_WRITE_FLUSH(sc);
5541 delay(5000);
5542 }
5543
5544 switch (sc->sc_type) {
5545 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
5546 case WM_T_82541:
5547 case WM_T_82541_2:
5548 case WM_T_82547:
5549 case WM_T_82547_2:
5550 /*
5551 * On some chipsets, a reset through a memory-mapped write
5552 * cycle can cause the chip to reset before completing the
5553 * write cycle. This causes major headache that can be avoided
5554 * by issuing the reset via indirect register writes through
5555 * I/O space.
5556 *
5557 * So, if we successfully mapped the I/O BAR at attach time,
5558 * use that. Otherwise, try our luck with a memory-mapped
5559 * reset.
5560 */
5561 if (sc->sc_flags & WM_F_IOH_VALID)
5562 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
5563 else
5564 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
5565 break;
5566 case WM_T_82545_3:
5567 case WM_T_82546_3:
5568 /* Use the shadow control register on these chips. */
5569 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
5570 break;
5571 case WM_T_80003:
5572 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5573 if (sc->phy.acquire(sc) != 0)
5574 break;
5575 CSR_WRITE(sc, WMREG_CTRL, reg);
5576 sc->phy.release(sc);
5577 break;
5578 case WM_T_ICH8:
5579 case WM_T_ICH9:
5580 case WM_T_ICH10:
5581 case WM_T_PCH:
5582 case WM_T_PCH2:
5583 case WM_T_PCH_LPT:
5584 case WM_T_PCH_SPT:
5585 case WM_T_PCH_CNP:
5586 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5587 if (wm_phy_resetisblocked(sc) == false) {
5588 /*
5589 * Gate automatic PHY configuration by hardware on
5590 * non-managed 82579
5591 */
5592 if ((sc->sc_type == WM_T_PCH2)
5593 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
5594 == 0))
5595 wm_gate_hw_phy_config_ich8lan(sc, true);
5596
5597 reg |= CTRL_PHY_RESET;
5598 phy_reset = 1;
5599 } else
5600 device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
5601 if (sc->phy.acquire(sc) != 0)
5602 break;
5603 CSR_WRITE(sc, WMREG_CTRL, reg);
5604 /* Don't insert a completion barrier when reset */
5605 delay(20*1000);
5606 /*
5607 * The EXTCNFCTR_MDIO_SW_OWNERSHIP bit is cleared by the reset,
5608 * so don't use sc->phy.release(sc). Release sc_ich_phymtx
5609 * only. See also wm_get_swflag_ich8lan().
5610 */
5611 mutex_exit(sc->sc_ich_phymtx);
5612 break;
5613 case WM_T_82580:
5614 case WM_T_I350:
5615 case WM_T_I354:
5616 case WM_T_I210:
5617 case WM_T_I211:
5618 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5619 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
5620 CSR_WRITE_FLUSH(sc);
5621 delay(5000);
5622 break;
5623 case WM_T_82542_2_0:
5624 case WM_T_82542_2_1:
5625 case WM_T_82543:
5626 case WM_T_82540:
5627 case WM_T_82545:
5628 case WM_T_82546:
5629 case WM_T_82571:
5630 case WM_T_82572:
5631 case WM_T_82573:
5632 case WM_T_82574:
5633 case WM_T_82575:
5634 case WM_T_82576:
5635 case WM_T_82583:
5636 default:
5637 /* Everything else can safely use the documented method. */
5638 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5639 break;
5640 }
5641
5642 /* Must release the MDIO ownership after MAC reset */
5643 switch (sc->sc_type) {
5644 case WM_T_82573:
5645 case WM_T_82574:
5646 case WM_T_82583:
5647 if (error == 0)
5648 wm_put_hw_semaphore_82573(sc);
5649 break;
5650 default:
5651 break;
5652 }
5653
5654 /* Set Phy Config Counter to 50msec */
5655 if (sc->sc_type == WM_T_PCH2) {
5656 reg = CSR_READ(sc, WMREG_FEXTNVM3);
5657 reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
5658 reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
5659 CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
5660 }
5661
5662 if (phy_reset != 0)
5663 wm_get_cfg_done(sc);
5664
5665 /* Reload EEPROM */
5666 switch (sc->sc_type) {
5667 case WM_T_82542_2_0:
5668 case WM_T_82542_2_1:
5669 case WM_T_82543:
5670 case WM_T_82544:
5671 delay(10);
5672 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5673 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5674 CSR_WRITE_FLUSH(sc);
5675 delay(2000);
5676 break;
5677 case WM_T_82540:
5678 case WM_T_82545:
5679 case WM_T_82545_3:
5680 case WM_T_82546:
5681 case WM_T_82546_3:
5682 delay(5*1000);
5683 /* XXX Disable HW ARPs on ASF enabled adapters */
5684 break;
5685 case WM_T_82541:
5686 case WM_T_82541_2:
5687 case WM_T_82547:
5688 case WM_T_82547_2:
5689 delay(20000);
5690 /* XXX Disable HW ARPs on ASF enabled adapters */
5691 break;
5692 case WM_T_82571:
5693 case WM_T_82572:
5694 case WM_T_82573:
5695 case WM_T_82574:
5696 case WM_T_82583:
5697 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
5698 delay(10);
5699 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5700 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5701 CSR_WRITE_FLUSH(sc);
5702 }
5703 /* check EECD_EE_AUTORD */
5704 wm_get_auto_rd_done(sc);
5705 /*
5706 * Phy configuration from NVM just starts after EECD_AUTO_RD
5707 * is set.
5708 */
5709 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
5710 || (sc->sc_type == WM_T_82583))
5711 delay(25*1000);
5712 break;
5713 case WM_T_82575:
5714 case WM_T_82576:
5715 case WM_T_82580:
5716 case WM_T_I350:
5717 case WM_T_I354:
5718 case WM_T_I210:
5719 case WM_T_I211:
5720 case WM_T_80003:
5721 /* check EECD_EE_AUTORD */
5722 wm_get_auto_rd_done(sc);
5723 break;
5724 case WM_T_ICH8:
5725 case WM_T_ICH9:
5726 case WM_T_ICH10:
5727 case WM_T_PCH:
5728 case WM_T_PCH2:
5729 case WM_T_PCH_LPT:
5730 case WM_T_PCH_SPT:
5731 case WM_T_PCH_CNP:
5732 break;
5733 default:
5734 panic("%s: unknown type\n", __func__);
5735 }
5736
5737 /* Check whether EEPROM is present or not */
5738 switch (sc->sc_type) {
5739 case WM_T_82575:
5740 case WM_T_82576:
5741 case WM_T_82580:
5742 case WM_T_I350:
5743 case WM_T_I354:
5744 case WM_T_ICH8:
5745 case WM_T_ICH9:
5746 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
5747 /* Not found */
5748 sc->sc_flags |= WM_F_EEPROM_INVALID;
5749 if (sc->sc_type == WM_T_82575)
5750 wm_reset_init_script_82575(sc);
5751 }
5752 break;
5753 default:
5754 break;
5755 }
5756
5757 if (phy_reset != 0)
5758 wm_phy_post_reset(sc);
5759
5760 if ((sc->sc_type == WM_T_82580)
5761 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
5762 /* Clear global device reset status bit */
5763 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
5764 }
5765
5766 /* Clear any pending interrupt events. */
5767 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5768 reg = CSR_READ(sc, WMREG_ICR);
5769 if (wm_is_using_msix(sc)) {
5770 if (sc->sc_type != WM_T_82574) {
5771 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5772 CSR_WRITE(sc, WMREG_EIAC, 0);
5773 } else
5774 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5775 }
5776
5777 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5778 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5779 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
5780 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
5781 reg = CSR_READ(sc, WMREG_KABGTXD);
5782 reg |= KABGTXD_BGSQLBIAS;
5783 CSR_WRITE(sc, WMREG_KABGTXD, reg);
5784 }
5785
5786 /* Reload sc_ctrl */
5787 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5788
5789 wm_set_eee(sc);
5790
5791 /*
5792 * For PCH, this write will make sure that any noise will be detected
5793 * as a CRC error and be dropped rather than show up as a bad packet
5794 * to the DMA engine
5795 */
5796 if (sc->sc_type == WM_T_PCH)
5797 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
5798
5799 if (sc->sc_type >= WM_T_82544)
5800 CSR_WRITE(sc, WMREG_WUC, 0);
5801
5802 if (sc->sc_type < WM_T_82575)
5803 wm_disable_aspm(sc); /* Workaround for some chips */
5804
5805 wm_reset_mdicnfg_82580(sc);
5806
5807 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
5808 wm_pll_workaround_i210(sc);
5809
5810 if (sc->sc_type == WM_T_80003) {
5811 /* Default to TRUE to enable the MDIC W/A */
5812 sc->sc_flags |= WM_F_80003_MDIC_WA;
5813
5814 rv = wm_kmrn_readreg(sc,
5815 KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
5816 if (rv == 0) {
5817 if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
5818 == KUMCTRLSTA_OPMODE_INBAND_MDIO)
5819 sc->sc_flags &= ~WM_F_80003_MDIC_WA;
5820 else
5821 sc->sc_flags |= WM_F_80003_MDIC_WA;
5822 }
5823 }
5824 }
5825
5826 /*
5827 * wm_add_rxbuf:
5828 *
5829 * Add a receive buffer to the indiciated descriptor.
5830 */
5831 static int
5832 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
5833 {
5834 struct wm_softc *sc = rxq->rxq_sc;
5835 struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
5836 struct mbuf *m;
5837 int error;
5838
5839 KASSERT(mutex_owned(rxq->rxq_lock));
5840
5841 MGETHDR(m, M_DONTWAIT, MT_DATA);
5842 if (m == NULL)
5843 return ENOBUFS;
5844
5845 MCLGET(m, M_DONTWAIT);
5846 if ((m->m_flags & M_EXT) == 0) {
5847 m_freem(m);
5848 return ENOBUFS;
5849 }
5850
5851 if (rxs->rxs_mbuf != NULL)
5852 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5853
5854 rxs->rxs_mbuf = m;
5855
5856 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5857 /*
5858 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
5859 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
5860 */
5861 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
5862 m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
5863 if (error) {
5864 /* XXX XXX XXX */
5865 aprint_error_dev(sc->sc_dev,
5866 "unable to load rx DMA map %d, error = %d\n", idx, error);
5867 panic("wm_add_rxbuf");
5868 }
5869
5870 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5871 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5872
5873 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5874 if ((sc->sc_rctl & RCTL_EN) != 0)
5875 wm_init_rxdesc(rxq, idx);
5876 } else
5877 wm_init_rxdesc(rxq, idx);
5878
5879 return 0;
5880 }
5881
5882 /*
5883 * wm_rxdrain:
5884 *
5885 * Drain the receive queue.
5886 */
5887 static void
5888 wm_rxdrain(struct wm_rxqueue *rxq)
5889 {
5890 struct wm_softc *sc = rxq->rxq_sc;
5891 struct wm_rxsoft *rxs;
5892 int i;
5893
5894 KASSERT(mutex_owned(rxq->rxq_lock));
5895
5896 for (i = 0; i < WM_NRXDESC; i++) {
5897 rxs = &rxq->rxq_soft[i];
5898 if (rxs->rxs_mbuf != NULL) {
5899 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5900 m_freem(rxs->rxs_mbuf);
5901 rxs->rxs_mbuf = NULL;
5902 }
5903 }
5904 }
5905
5906 /*
5907 * Setup registers for RSS.
5908 *
5909 * XXX not yet VMDq support
5910 */
5911 static void
5912 wm_init_rss(struct wm_softc *sc)
5913 {
5914 uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
5915 int i;
5916
5917 CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
5918
5919 for (i = 0; i < RETA_NUM_ENTRIES; i++) {
5920 unsigned int qid, reta_ent;
5921
5922 qid = i % sc->sc_nqueues;
5923 switch (sc->sc_type) {
5924 case WM_T_82574:
5925 reta_ent = __SHIFTIN(qid,
5926 RETA_ENT_QINDEX_MASK_82574);
5927 break;
5928 case WM_T_82575:
5929 reta_ent = __SHIFTIN(qid,
5930 RETA_ENT_QINDEX1_MASK_82575);
5931 break;
5932 default:
5933 reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
5934 break;
5935 }
5936
5937 reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
5938 reta_reg &= ~RETA_ENTRY_MASK_Q(i);
5939 reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
5940 CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
5941 }
5942
5943 rss_getkey((uint8_t *)rss_key);
5944 for (i = 0; i < RSSRK_NUM_REGS; i++)
5945 CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
5946
5947 if (sc->sc_type == WM_T_82574)
5948 mrqc = MRQC_ENABLE_RSS_MQ_82574;
5949 else
5950 mrqc = MRQC_ENABLE_RSS_MQ;
5951
5952 /*
5953 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
5954 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
5955 */
5956 mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
5957 mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
5958 #if 0
5959 mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
5960 mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
5961 #endif
5962 mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
5963
5964 CSR_WRITE(sc, WMREG_MRQC, mrqc);
5965 }
5966
5967 /*
5968 * Adjust TX and RX queue numbers which the system actulally uses.
5969 *
5970 * The numbers are affected by below parameters.
5971 * - The nubmer of hardware queues
5972 * - The number of MSI-X vectors (= "nvectors" argument)
5973 * - ncpu
5974 */
5975 static void
5976 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
5977 {
5978 int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
5979
5980 if (nvectors < 2) {
5981 sc->sc_nqueues = 1;
5982 return;
5983 }
5984
5985 switch (sc->sc_type) {
5986 case WM_T_82572:
5987 hw_ntxqueues = 2;
5988 hw_nrxqueues = 2;
5989 break;
5990 case WM_T_82574:
5991 hw_ntxqueues = 2;
5992 hw_nrxqueues = 2;
5993 break;
5994 case WM_T_82575:
5995 hw_ntxqueues = 4;
5996 hw_nrxqueues = 4;
5997 break;
5998 case WM_T_82576:
5999 hw_ntxqueues = 16;
6000 hw_nrxqueues = 16;
6001 break;
6002 case WM_T_82580:
6003 case WM_T_I350:
6004 case WM_T_I354:
6005 hw_ntxqueues = 8;
6006 hw_nrxqueues = 8;
6007 break;
6008 case WM_T_I210:
6009 hw_ntxqueues = 4;
6010 hw_nrxqueues = 4;
6011 break;
6012 case WM_T_I211:
6013 hw_ntxqueues = 2;
6014 hw_nrxqueues = 2;
6015 break;
6016 /*
6017 * The below Ethernet controllers do not support MSI-X;
6018 * this driver doesn't let them use multiqueue.
6019 * - WM_T_80003
6020 * - WM_T_ICH8
6021 * - WM_T_ICH9
6022 * - WM_T_ICH10
6023 * - WM_T_PCH
6024 * - WM_T_PCH2
6025 * - WM_T_PCH_LPT
6026 */
6027 default:
6028 hw_ntxqueues = 1;
6029 hw_nrxqueues = 1;
6030 break;
6031 }
6032
6033 hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
6034
6035 /*
6036 * As queues more than MSI-X vectors cannot improve scaling, we limit
6037 * the number of queues used actually.
6038 */
6039 if (nvectors < hw_nqueues + 1)
6040 sc->sc_nqueues = nvectors - 1;
6041 else
6042 sc->sc_nqueues = hw_nqueues;
6043
6044 /*
6045 * As queues more than CPUs cannot improve scaling, we limit
6046 * the number of queues used actually.
6047 */
6048 if (ncpu < sc->sc_nqueues)
6049 sc->sc_nqueues = ncpu;
6050 }
6051
6052 static inline bool
6053 wm_is_using_msix(struct wm_softc *sc)
6054 {
6055
6056 return (sc->sc_nintrs > 1);
6057 }
6058
6059 static inline bool
6060 wm_is_using_multiqueue(struct wm_softc *sc)
6061 {
6062
6063 return (sc->sc_nqueues > 1);
6064 }
6065
6066 static int
6067 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
6068 {
6069 struct wm_queue *wmq = &sc->sc_queue[qidx];
6070
6071 wmq->wmq_id = qidx;
6072 wmq->wmq_intr_idx = intr_idx;
6073 wmq->wmq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
6074 wm_handle_queue, wmq);
6075 if (wmq->wmq_si != NULL)
6076 return 0;
6077
6078 aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
6079 wmq->wmq_id);
6080 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
6081 sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
6082 return ENOMEM;
6083 }
6084
6085 /*
6086 * Both single interrupt MSI and INTx can use this function.
6087 */
6088 static int
6089 wm_setup_legacy(struct wm_softc *sc)
6090 {
6091 pci_chipset_tag_t pc = sc->sc_pc;
6092 const char *intrstr = NULL;
6093 char intrbuf[PCI_INTRSTR_LEN];
6094 int error;
6095
6096 error = wm_alloc_txrx_queues(sc);
6097 if (error) {
6098 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
6099 error);
6100 return ENOMEM;
6101 }
6102 intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
6103 sizeof(intrbuf));
6104 pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
6105 sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
6106 IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
6107 if (sc->sc_ihs[0] == NULL) {
6108 aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
6109 (pci_intr_type(pc, sc->sc_intrs[0])
6110 == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6111 return ENOMEM;
6112 }
6113
6114 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
6115 sc->sc_nintrs = 1;
6116
6117 return wm_softint_establish_queue(sc, 0, 0);
6118 }
6119
6120 static int
6121 wm_setup_msix(struct wm_softc *sc)
6122 {
6123 void *vih;
6124 kcpuset_t *affinity;
6125 int qidx, error, intr_idx, txrx_established;
6126 pci_chipset_tag_t pc = sc->sc_pc;
6127 const char *intrstr = NULL;
6128 char intrbuf[PCI_INTRSTR_LEN];
6129 char intr_xname[INTRDEVNAMEBUF];
6130
6131 if (sc->sc_nqueues < ncpu) {
6132 /*
6133 * To avoid other devices' interrupts, the affinity of Tx/Rx
6134 * interrupts start from CPU#1.
6135 */
6136 sc->sc_affinity_offset = 1;
6137 } else {
6138 /*
6139 * In this case, this device use all CPUs. So, we unify
6140 * affinitied cpu_index to msix vector number for readability.
6141 */
6142 sc->sc_affinity_offset = 0;
6143 }
6144
6145 error = wm_alloc_txrx_queues(sc);
6146 if (error) {
6147 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
6148 error);
6149 return ENOMEM;
6150 }
6151
6152 kcpuset_create(&affinity, false);
6153 intr_idx = 0;
6154
6155 /*
6156 * TX and RX
6157 */
6158 txrx_established = 0;
6159 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6160 struct wm_queue *wmq = &sc->sc_queue[qidx];
6161 int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
6162
6163 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
6164 sizeof(intrbuf));
6165 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
6166 PCI_INTR_MPSAFE, true);
6167 memset(intr_xname, 0, sizeof(intr_xname));
6168 snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
6169 device_xname(sc->sc_dev), qidx);
6170 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
6171 IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
6172 if (vih == NULL) {
6173 aprint_error_dev(sc->sc_dev,
6174 "unable to establish MSI-X(for TX and RX)%s%s\n",
6175 intrstr ? " at " : "",
6176 intrstr ? intrstr : "");
6177
6178 goto fail;
6179 }
6180 kcpuset_zero(affinity);
6181 /* Round-robin affinity */
6182 kcpuset_set(affinity, affinity_to);
6183 error = interrupt_distribute(vih, affinity, NULL);
6184 if (error == 0) {
6185 aprint_normal_dev(sc->sc_dev,
6186 "for TX and RX interrupting at %s affinity to %u\n",
6187 intrstr, affinity_to);
6188 } else {
6189 aprint_normal_dev(sc->sc_dev,
6190 "for TX and RX interrupting at %s\n", intrstr);
6191 }
6192 sc->sc_ihs[intr_idx] = vih;
6193 if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
6194 goto fail;
6195 txrx_established++;
6196 intr_idx++;
6197 }
6198
6199 /* LINK */
6200 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
6201 sizeof(intrbuf));
6202 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
6203 memset(intr_xname, 0, sizeof(intr_xname));
6204 snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
6205 device_xname(sc->sc_dev));
6206 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
6207 IPL_NET, wm_linkintr_msix, sc, intr_xname);
6208 if (vih == NULL) {
6209 aprint_error_dev(sc->sc_dev,
6210 "unable to establish MSI-X(for LINK)%s%s\n",
6211 intrstr ? " at " : "",
6212 intrstr ? intrstr : "");
6213
6214 goto fail;
6215 }
6216 /* Keep default affinity to LINK interrupt */
6217 aprint_normal_dev(sc->sc_dev,
6218 "for LINK interrupting at %s\n", intrstr);
6219 sc->sc_ihs[intr_idx] = vih;
6220 sc->sc_link_intr_idx = intr_idx;
6221
6222 sc->sc_nintrs = sc->sc_nqueues + 1;
6223 kcpuset_destroy(affinity);
6224 return 0;
6225
6226 fail:
6227 for (qidx = 0; qidx < txrx_established; qidx++) {
6228 struct wm_queue *wmq = &sc->sc_queue[qidx];
6229 pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
6230 sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
6231 }
6232
6233 kcpuset_destroy(affinity);
6234 return ENOMEM;
6235 }
6236
6237 static void
6238 wm_unset_stopping_flags(struct wm_softc *sc)
6239 {
6240 int i;
6241
6242 KASSERT(mutex_owned(sc->sc_core_lock));
6243
6244 /* Must unset stopping flags in ascending order. */
6245 for (i = 0; i < sc->sc_nqueues; i++) {
6246 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6247 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6248
6249 mutex_enter(txq->txq_lock);
6250 txq->txq_stopping = false;
6251 mutex_exit(txq->txq_lock);
6252
6253 mutex_enter(rxq->rxq_lock);
6254 rxq->rxq_stopping = false;
6255 mutex_exit(rxq->rxq_lock);
6256 }
6257
6258 sc->sc_core_stopping = false;
6259 }
6260
6261 static void
6262 wm_set_stopping_flags(struct wm_softc *sc)
6263 {
6264 int i;
6265
6266 KASSERT(mutex_owned(sc->sc_core_lock));
6267
6268 sc->sc_core_stopping = true;
6269
6270 /* Must set stopping flags in ascending order. */
6271 for (i = 0; i < sc->sc_nqueues; i++) {
6272 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6273 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6274
6275 mutex_enter(rxq->rxq_lock);
6276 rxq->rxq_stopping = true;
6277 mutex_exit(rxq->rxq_lock);
6278
6279 mutex_enter(txq->txq_lock);
6280 txq->txq_stopping = true;
6281 mutex_exit(txq->txq_lock);
6282 }
6283 }
6284
6285 /*
6286 * Write interrupt interval value to ITR or EITR
6287 */
6288 static void
6289 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
6290 {
6291
6292 if (!wmq->wmq_set_itr)
6293 return;
6294
6295 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6296 uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
6297
6298 /*
6299 * 82575 doesn't have CNT_INGR field.
6300 * So, overwrite counter field by software.
6301 */
6302 if (sc->sc_type == WM_T_82575)
6303 eitr |= __SHIFTIN(wmq->wmq_itr,
6304 EITR_COUNTER_MASK_82575);
6305 else
6306 eitr |= EITR_CNT_INGR;
6307
6308 CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
6309 } else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
6310 /*
6311 * 82574 has both ITR and EITR. SET EITR when we use
6312 * the multi queue function with MSI-X.
6313 */
6314 CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
6315 wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
6316 } else {
6317 KASSERT(wmq->wmq_id == 0);
6318 CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
6319 }
6320
6321 wmq->wmq_set_itr = false;
6322 }
6323
6324 /*
6325 * TODO
6326 * Below dynamic calculation of itr is almost the same as Linux igb,
6327 * however it does not fit to wm(4). So, we will have been disable AIM
6328 * until we will find appropriate calculation of itr.
6329 */
6330 /*
6331 * Calculate interrupt interval value to be going to write register in
6332 * wm_itrs_writereg(). This function does not write ITR/EITR register.
6333 */
6334 static void
6335 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
6336 {
6337 #ifdef NOTYET
6338 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6339 struct wm_txqueue *txq = &wmq->wmq_txq;
6340 uint32_t avg_size = 0;
6341 uint32_t new_itr;
6342
6343 if (rxq->rxq_packets)
6344 avg_size = rxq->rxq_bytes / rxq->rxq_packets;
6345 if (txq->txq_packets)
6346 avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
6347
6348 if (avg_size == 0) {
6349 new_itr = 450; /* restore default value */
6350 goto out;
6351 }
6352
6353 /* Add 24 bytes to size to account for CRC, preamble, and gap */
6354 avg_size += 24;
6355
6356 /* Don't starve jumbo frames */
6357 avg_size = uimin(avg_size, 3000);
6358
6359 /* Give a little boost to mid-size frames */
6360 if ((avg_size > 300) && (avg_size < 1200))
6361 new_itr = avg_size / 3;
6362 else
6363 new_itr = avg_size / 2;
6364
6365 out:
6366 /*
6367 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
6368 * controllers. See sc->sc_itr_init setting in wm_init_locked().
6369 */
6370 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
6371 new_itr *= 4;
6372
6373 if (new_itr != wmq->wmq_itr) {
6374 wmq->wmq_itr = new_itr;
6375 wmq->wmq_set_itr = true;
6376 } else
6377 wmq->wmq_set_itr = false;
6378
6379 rxq->rxq_packets = 0;
6380 rxq->rxq_bytes = 0;
6381 txq->txq_packets = 0;
6382 txq->txq_bytes = 0;
6383 #endif
6384 }
6385
6386 static void
6387 wm_init_sysctls(struct wm_softc *sc)
6388 {
6389 struct sysctllog **log;
6390 const struct sysctlnode *rnode, *qnode, *cnode;
6391 int i, rv;
6392 const char *dvname;
6393
6394 log = &sc->sc_sysctllog;
6395 dvname = device_xname(sc->sc_dev);
6396
6397 rv = sysctl_createv(log, 0, NULL, &rnode,
6398 0, CTLTYPE_NODE, dvname,
6399 SYSCTL_DESCR("wm information and settings"),
6400 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
6401 if (rv != 0)
6402 goto err;
6403
6404 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6405 CTLTYPE_BOOL, "txrx_workqueue",
6406 SYSCTL_DESCR("Use workqueue for packet processing"),
6407 NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
6408 if (rv != 0)
6409 goto teardown;
6410
6411 for (i = 0; i < sc->sc_nqueues; i++) {
6412 struct wm_queue *wmq = &sc->sc_queue[i];
6413 struct wm_txqueue *txq = &wmq->wmq_txq;
6414 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6415
6416 snprintf(sc->sc_queue[i].sysctlname,
6417 sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
6418
6419 if (sysctl_createv(log, 0, &rnode, &qnode,
6420 0, CTLTYPE_NODE,
6421 sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
6422 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
6423 break;
6424
6425 if (sysctl_createv(log, 0, &qnode, &cnode,
6426 CTLFLAG_READONLY, CTLTYPE_INT,
6427 "txq_free", SYSCTL_DESCR("TX queue free"),
6428 NULL, 0, &txq->txq_free,
6429 0, CTL_CREATE, CTL_EOL) != 0)
6430 break;
6431 if (sysctl_createv(log, 0, &qnode, &cnode,
6432 CTLFLAG_READONLY, CTLTYPE_INT,
6433 "txd_head", SYSCTL_DESCR("TX descriptor head"),
6434 wm_sysctl_tdh_handler, 0, (void *)txq,
6435 0, CTL_CREATE, CTL_EOL) != 0)
6436 break;
6437 if (sysctl_createv(log, 0, &qnode, &cnode,
6438 CTLFLAG_READONLY, CTLTYPE_INT,
6439 "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
6440 wm_sysctl_tdt_handler, 0, (void *)txq,
6441 0, CTL_CREATE, CTL_EOL) != 0)
6442 break;
6443 if (sysctl_createv(log, 0, &qnode, &cnode,
6444 CTLFLAG_READONLY, CTLTYPE_INT,
6445 "txq_next", SYSCTL_DESCR("TX queue next"),
6446 NULL, 0, &txq->txq_next,
6447 0, CTL_CREATE, CTL_EOL) != 0)
6448 break;
6449 if (sysctl_createv(log, 0, &qnode, &cnode,
6450 CTLFLAG_READONLY, CTLTYPE_INT,
6451 "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
6452 NULL, 0, &txq->txq_sfree,
6453 0, CTL_CREATE, CTL_EOL) != 0)
6454 break;
6455 if (sysctl_createv(log, 0, &qnode, &cnode,
6456 CTLFLAG_READONLY, CTLTYPE_INT,
6457 "txq_snext", SYSCTL_DESCR("TX queue snext"),
6458 NULL, 0, &txq->txq_snext,
6459 0, CTL_CREATE, CTL_EOL) != 0)
6460 break;
6461 if (sysctl_createv(log, 0, &qnode, &cnode,
6462 CTLFLAG_READONLY, CTLTYPE_INT,
6463 "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
6464 NULL, 0, &txq->txq_sdirty,
6465 0, CTL_CREATE, CTL_EOL) != 0)
6466 break;
6467 if (sysctl_createv(log, 0, &qnode, &cnode,
6468 CTLFLAG_READONLY, CTLTYPE_INT,
6469 "txq_flags", SYSCTL_DESCR("TX queue flags"),
6470 NULL, 0, &txq->txq_flags,
6471 0, CTL_CREATE, CTL_EOL) != 0)
6472 break;
6473 if (sysctl_createv(log, 0, &qnode, &cnode,
6474 CTLFLAG_READONLY, CTLTYPE_BOOL,
6475 "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
6476 NULL, 0, &txq->txq_stopping,
6477 0, CTL_CREATE, CTL_EOL) != 0)
6478 break;
6479 if (sysctl_createv(log, 0, &qnode, &cnode,
6480 CTLFLAG_READONLY, CTLTYPE_BOOL,
6481 "txq_sending", SYSCTL_DESCR("TX queue sending"),
6482 NULL, 0, &txq->txq_sending,
6483 0, CTL_CREATE, CTL_EOL) != 0)
6484 break;
6485
6486 if (sysctl_createv(log, 0, &qnode, &cnode,
6487 CTLFLAG_READONLY, CTLTYPE_INT,
6488 "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
6489 NULL, 0, &rxq->rxq_ptr,
6490 0, CTL_CREATE, CTL_EOL) != 0)
6491 break;
6492 }
6493
6494 #ifdef WM_DEBUG
6495 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6496 CTLTYPE_INT, "debug_flags",
6497 SYSCTL_DESCR(
6498 "Debug flags:\n" \
6499 "\t0x01 LINK\n" \
6500 "\t0x02 TX\n" \
6501 "\t0x04 RX\n" \
6502 "\t0x08 GMII\n" \
6503 "\t0x10 MANAGE\n" \
6504 "\t0x20 NVM\n" \
6505 "\t0x40 INIT\n" \
6506 "\t0x80 LOCK"),
6507 wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
6508 if (rv != 0)
6509 goto teardown;
6510 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6511 CTLTYPE_BOOL, "trigger_reset",
6512 SYSCTL_DESCR("Trigger an interface reset"),
6513 NULL, 0, &sc->sc_trigger_reset, 0, CTL_CREATE, CTL_EOL);
6514 if (rv != 0)
6515 goto teardown;
6516 #endif
6517
6518 return;
6519
6520 teardown:
6521 sysctl_teardown(log);
6522 err:
6523 sc->sc_sysctllog = NULL;
6524 device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
6525 __func__, rv);
6526 }
6527
6528 static void
6529 wm_update_stats(struct wm_softc *sc)
6530 {
6531 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6532 uint64_t crcerrs, algnerrc, symerrc, mpc, colc, sec, rlec, rxerrc,
6533 cexterr;
6534
6535 crcerrs = CSR_READ(sc, WMREG_CRCERRS);
6536 symerrc = CSR_READ(sc, WMREG_SYMERRC);
6537 mpc = CSR_READ(sc, WMREG_MPC);
6538 colc = CSR_READ(sc, WMREG_COLC);
6539 sec = CSR_READ(sc, WMREG_SEC);
6540 rlec = CSR_READ(sc, WMREG_RLEC);
6541
6542 WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs);
6543 WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc);
6544 WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc);
6545 WM_EVCNT_ADD(&sc->sc_ev_colc, colc);
6546 WM_EVCNT_ADD(&sc->sc_ev_sec, sec);
6547 WM_EVCNT_ADD(&sc->sc_ev_rlec, rlec);
6548
6549 if (sc->sc_type >= WM_T_82543) {
6550 algnerrc = CSR_READ(sc, WMREG_ALGNERRC);
6551 rxerrc = CSR_READ(sc, WMREG_RXERRC);
6552 WM_EVCNT_ADD(&sc->sc_ev_algnerrc, algnerrc);
6553 WM_EVCNT_ADD(&sc->sc_ev_rxerrc, rxerrc);
6554 if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc)) {
6555 cexterr = CSR_READ(sc, WMREG_CEXTERR);
6556 WM_EVCNT_ADD(&sc->sc_ev_cexterr, cexterr);
6557 } else {
6558 cexterr = 0;
6559 /* Excessive collision + Link down */
6560 WM_EVCNT_ADD(&sc->sc_ev_htdpmc,
6561 CSR_READ(sc, WMREG_HTDPMC));
6562 }
6563
6564 WM_EVCNT_ADD(&sc->sc_ev_tncrs, CSR_READ(sc, WMREG_TNCRS));
6565 WM_EVCNT_ADD(&sc->sc_ev_tsctc, CSR_READ(sc, WMREG_TSCTC));
6566 if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
6567 WM_EVCNT_ADD(&sc->sc_ev_tsctfc,
6568 CSR_READ(sc, WMREG_TSCTFC));
6569 else {
6570 WM_EVCNT_ADD(&sc->sc_ev_cbrmpc,
6571 CSR_READ(sc, WMREG_CBRMPC));
6572 }
6573 } else
6574 algnerrc = rxerrc = cexterr = 0;
6575
6576 if (sc->sc_type >= WM_T_82542_2_1) {
6577 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
6578 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
6579 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
6580 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
6581 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
6582 }
6583
6584 WM_EVCNT_ADD(&sc->sc_ev_scc, CSR_READ(sc, WMREG_SCC));
6585 WM_EVCNT_ADD(&sc->sc_ev_ecol, CSR_READ(sc, WMREG_ECOL));
6586 WM_EVCNT_ADD(&sc->sc_ev_mcc, CSR_READ(sc, WMREG_MCC));
6587 WM_EVCNT_ADD(&sc->sc_ev_latecol, CSR_READ(sc, WMREG_LATECOL));
6588
6589 if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
6590 WM_EVCNT_ADD(&sc->sc_ev_cbtmpc, CSR_READ(sc, WMREG_CBTMPC));
6591 }
6592
6593 WM_EVCNT_ADD(&sc->sc_ev_dc, CSR_READ(sc, WMREG_DC));
6594 WM_EVCNT_ADD(&sc->sc_ev_prc64, CSR_READ(sc, WMREG_PRC64));
6595 WM_EVCNT_ADD(&sc->sc_ev_prc127, CSR_READ(sc, WMREG_PRC127));
6596 WM_EVCNT_ADD(&sc->sc_ev_prc255, CSR_READ(sc, WMREG_PRC255));
6597 WM_EVCNT_ADD(&sc->sc_ev_prc511, CSR_READ(sc, WMREG_PRC511));
6598 WM_EVCNT_ADD(&sc->sc_ev_prc1023, CSR_READ(sc, WMREG_PRC1023));
6599 WM_EVCNT_ADD(&sc->sc_ev_prc1522, CSR_READ(sc, WMREG_PRC1522));
6600 WM_EVCNT_ADD(&sc->sc_ev_gprc, CSR_READ(sc, WMREG_GPRC));
6601 WM_EVCNT_ADD(&sc->sc_ev_bprc, CSR_READ(sc, WMREG_BPRC));
6602 WM_EVCNT_ADD(&sc->sc_ev_mprc, CSR_READ(sc, WMREG_MPRC));
6603 WM_EVCNT_ADD(&sc->sc_ev_gptc, CSR_READ(sc, WMREG_GPTC));
6604
6605 WM_EVCNT_ADD(&sc->sc_ev_gorc,
6606 CSR_READ(sc, WMREG_GORCL) +
6607 ((uint64_t)CSR_READ(sc, WMREG_GORCH) << 32));
6608 WM_EVCNT_ADD(&sc->sc_ev_gotc,
6609 CSR_READ(sc, WMREG_GOTCL) +
6610 ((uint64_t)CSR_READ(sc, WMREG_GOTCH) << 32));
6611
6612 WM_EVCNT_ADD(&sc->sc_ev_rnbc, CSR_READ(sc, WMREG_RNBC));
6613 WM_EVCNT_ADD(&sc->sc_ev_ruc, CSR_READ(sc, WMREG_RUC));
6614 WM_EVCNT_ADD(&sc->sc_ev_rfc, CSR_READ(sc, WMREG_RFC));
6615 WM_EVCNT_ADD(&sc->sc_ev_roc, CSR_READ(sc, WMREG_ROC));
6616 WM_EVCNT_ADD(&sc->sc_ev_rjc, CSR_READ(sc, WMREG_RJC));
6617
6618 if (sc->sc_type >= WM_T_82540) {
6619 WM_EVCNT_ADD(&sc->sc_ev_mgtprc, CSR_READ(sc, WMREG_MGTPRC));
6620 WM_EVCNT_ADD(&sc->sc_ev_mgtpdc, CSR_READ(sc, WMREG_MGTPDC));
6621 WM_EVCNT_ADD(&sc->sc_ev_mgtptc, CSR_READ(sc, WMREG_MGTPTC));
6622 }
6623
6624 /*
6625 * The TOR(L) register includes:
6626 * - Error
6627 * - Flow control
6628 * - Broadcast rejected (This note is described in 82574 and newer
6629 * datasheets. What does "broadcast rejected" mean?)
6630 */
6631 WM_EVCNT_ADD(&sc->sc_ev_tor,
6632 CSR_READ(sc, WMREG_TORL) +
6633 ((uint64_t)CSR_READ(sc, WMREG_TORH) << 32));
6634 WM_EVCNT_ADD(&sc->sc_ev_tot,
6635 CSR_READ(sc, WMREG_TOTL) +
6636 ((uint64_t)CSR_READ(sc, WMREG_TOTH) << 32));
6637
6638 WM_EVCNT_ADD(&sc->sc_ev_tpr, CSR_READ(sc, WMREG_TPR));
6639 WM_EVCNT_ADD(&sc->sc_ev_tpt, CSR_READ(sc, WMREG_TPT));
6640 WM_EVCNT_ADD(&sc->sc_ev_ptc64, CSR_READ(sc, WMREG_PTC64));
6641 WM_EVCNT_ADD(&sc->sc_ev_ptc127, CSR_READ(sc, WMREG_PTC127));
6642 WM_EVCNT_ADD(&sc->sc_ev_ptc255, CSR_READ(sc, WMREG_PTC255));
6643 WM_EVCNT_ADD(&sc->sc_ev_ptc511, CSR_READ(sc, WMREG_PTC511));
6644 WM_EVCNT_ADD(&sc->sc_ev_ptc1023, CSR_READ(sc, WMREG_PTC1023));
6645 WM_EVCNT_ADD(&sc->sc_ev_ptc1522, CSR_READ(sc, WMREG_PTC1522));
6646 WM_EVCNT_ADD(&sc->sc_ev_mptc, CSR_READ(sc, WMREG_MPTC));
6647 WM_EVCNT_ADD(&sc->sc_ev_bptc, CSR_READ(sc, WMREG_BPTC));
6648 WM_EVCNT_ADD(&sc->sc_ev_iac, CSR_READ(sc, WMREG_IAC));
6649 if (sc->sc_type < WM_T_82575) {
6650 WM_EVCNT_ADD(&sc->sc_ev_icrxptc, CSR_READ(sc, WMREG_ICRXPTC));
6651 WM_EVCNT_ADD(&sc->sc_ev_icrxatc, CSR_READ(sc, WMREG_ICRXATC));
6652 WM_EVCNT_ADD(&sc->sc_ev_ictxptc, CSR_READ(sc, WMREG_ICTXPTC));
6653 WM_EVCNT_ADD(&sc->sc_ev_ictxact, CSR_READ(sc, WMREG_ICTXATC));
6654 WM_EVCNT_ADD(&sc->sc_ev_ictxqec, CSR_READ(sc, WMREG_ICTXQEC));
6655 WM_EVCNT_ADD(&sc->sc_ev_ictxqmtc,
6656 CSR_READ(sc, WMREG_ICTXQMTC));
6657 WM_EVCNT_ADD(&sc->sc_ev_rxdmtc,
6658 CSR_READ(sc, WMREG_ICRXDMTC));
6659 WM_EVCNT_ADD(&sc->sc_ev_icrxoc, CSR_READ(sc, WMREG_ICRXOC));
6660 } else if (!WM_IS_ICHPCH(sc)) {
6661 WM_EVCNT_ADD(&sc->sc_ev_rpthc, CSR_READ(sc, WMREG_RPTHC));
6662 WM_EVCNT_ADD(&sc->sc_ev_debug1, CSR_READ(sc, WMREG_DEBUG1));
6663 WM_EVCNT_ADD(&sc->sc_ev_debug2, CSR_READ(sc, WMREG_DEBUG2));
6664 WM_EVCNT_ADD(&sc->sc_ev_debug3, CSR_READ(sc, WMREG_DEBUG3));
6665 WM_EVCNT_ADD(&sc->sc_ev_hgptc, CSR_READ(sc, WMREG_HGPTC));
6666 WM_EVCNT_ADD(&sc->sc_ev_debug4, CSR_READ(sc, WMREG_DEBUG4));
6667 WM_EVCNT_ADD(&sc->sc_ev_rxdmtc, CSR_READ(sc, WMREG_RXDMTC));
6668 WM_EVCNT_ADD(&sc->sc_ev_htcbdpc, CSR_READ(sc, WMREG_HTCBDPC));
6669
6670 WM_EVCNT_ADD(&sc->sc_ev_hgorc,
6671 CSR_READ(sc, WMREG_HGORCL) +
6672 ((uint64_t)CSR_READ(sc, WMREG_HGORCH) << 32));
6673 WM_EVCNT_ADD(&sc->sc_ev_hgotc,
6674 CSR_READ(sc, WMREG_HGOTCL) +
6675 ((uint64_t)CSR_READ(sc, WMREG_HGOTCH) << 32));
6676 WM_EVCNT_ADD(&sc->sc_ev_lenerrs, CSR_READ(sc, WMREG_LENERRS));
6677 }
6678 if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
6679 WM_EVCNT_ADD(&sc->sc_ev_tlpic, CSR_READ(sc, WMREG_TLPIC));
6680 WM_EVCNT_ADD(&sc->sc_ev_rlpic, CSR_READ(sc, WMREG_RLPIC));
6681 if ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0) {
6682 WM_EVCNT_ADD(&sc->sc_ev_b2ogprc,
6683 CSR_READ(sc, WMREG_B2OGPRC));
6684 WM_EVCNT_ADD(&sc->sc_ev_o2bspc,
6685 CSR_READ(sc, WMREG_O2BSPC));
6686 WM_EVCNT_ADD(&sc->sc_ev_b2ospc,
6687 CSR_READ(sc, WMREG_B2OSPC));
6688 WM_EVCNT_ADD(&sc->sc_ev_o2bgptc,
6689 CSR_READ(sc, WMREG_O2BGPTC));
6690 }
6691 WM_EVCNT_ADD(&sc->sc_ev_scvpc, CSR_READ(sc, WMREG_SCVPC));
6692 WM_EVCNT_ADD(&sc->sc_ev_hrmpc, CSR_READ(sc, WMREG_HRMPC));
6693 }
6694 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
6695 if_statadd_ref(nsr, if_collisions, colc);
6696 if_statadd_ref(nsr, if_ierrors,
6697 crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec);
6698 /*
6699 * WMREG_RNBC is incremented when there are no available buffers in
6700 * host memory. It does not mean the number of dropped packets, because
6701 * an Ethernet controller can receive packets in such case if there is
6702 * space in the phy's FIFO.
6703 *
6704 * If you want to know the nubmer of WMREG_RMBC, you should use such as
6705 * own EVCNT instead of if_iqdrops.
6706 */
6707 if_statadd_ref(nsr, if_iqdrops, mpc);
6708 IF_STAT_PUTREF(ifp);
6709 }
6710
6711 /*
6712 * wm_init: [ifnet interface function]
6713 *
6714 * Initialize the interface.
6715 */
6716 static int
6717 wm_init(struct ifnet *ifp)
6718 {
6719 struct wm_softc *sc = ifp->if_softc;
6720 int ret;
6721
6722 KASSERT(IFNET_LOCKED(ifp));
6723
6724 if (sc->sc_dying)
6725 return ENXIO;
6726
6727 mutex_enter(sc->sc_core_lock);
6728 ret = wm_init_locked(ifp);
6729 mutex_exit(sc->sc_core_lock);
6730
6731 return ret;
6732 }
6733
6734 static int
6735 wm_init_locked(struct ifnet *ifp)
6736 {
6737 struct wm_softc *sc = ifp->if_softc;
6738 struct ethercom *ec = &sc->sc_ethercom;
6739 int i, j, trynum, error = 0;
6740 uint32_t reg, sfp_mask = 0;
6741
6742 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
6743 device_xname(sc->sc_dev), __func__));
6744 KASSERT(IFNET_LOCKED(ifp));
6745 KASSERT(mutex_owned(sc->sc_core_lock));
6746
6747 /*
6748 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
6749 * There is a small but measurable benefit to avoiding the adjusment
6750 * of the descriptor so that the headers are aligned, for normal mtu,
6751 * on such platforms. One possibility is that the DMA itself is
6752 * slightly more efficient if the front of the entire packet (instead
6753 * of the front of the headers) is aligned.
6754 *
6755 * Note we must always set align_tweak to 0 if we are using
6756 * jumbo frames.
6757 */
6758 #ifdef __NO_STRICT_ALIGNMENT
6759 sc->sc_align_tweak = 0;
6760 #else
6761 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
6762 sc->sc_align_tweak = 0;
6763 else
6764 sc->sc_align_tweak = 2;
6765 #endif /* __NO_STRICT_ALIGNMENT */
6766
6767 /* Cancel any pending I/O. */
6768 wm_stop_locked(ifp, false, false);
6769
6770 /* Update statistics before reset */
6771 if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
6772 if_ierrors, CSR_READ(sc, WMREG_RXERRC));
6773
6774 /* >= PCH_SPT hardware workaround before reset. */
6775 if (sc->sc_type >= WM_T_PCH_SPT)
6776 wm_flush_desc_rings(sc);
6777
6778 /* Reset the chip to a known state. */
6779 wm_reset(sc);
6780
6781 /*
6782 * AMT based hardware can now take control from firmware
6783 * Do this after reset.
6784 */
6785 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
6786 wm_get_hw_control(sc);
6787
6788 if ((sc->sc_type >= WM_T_PCH_SPT) &&
6789 pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
6790 wm_legacy_irq_quirk_spt(sc);
6791
6792 /* Init hardware bits */
6793 wm_initialize_hardware_bits(sc);
6794
6795 /* Reset the PHY. */
6796 if (sc->sc_flags & WM_F_HAS_MII)
6797 wm_gmii_reset(sc);
6798
6799 if (sc->sc_type >= WM_T_ICH8) {
6800 reg = CSR_READ(sc, WMREG_GCR);
6801 /*
6802 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
6803 * default after reset.
6804 */
6805 if (sc->sc_type == WM_T_ICH8)
6806 reg |= GCR_NO_SNOOP_ALL;
6807 else
6808 reg &= ~GCR_NO_SNOOP_ALL;
6809 CSR_WRITE(sc, WMREG_GCR, reg);
6810 }
6811
6812 if ((sc->sc_type >= WM_T_ICH8)
6813 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
6814 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
6815
6816 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6817 reg |= CTRL_EXT_RO_DIS;
6818 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6819 }
6820
6821 /* Calculate (E)ITR value */
6822 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
6823 /*
6824 * For NEWQUEUE's EITR (except for 82575).
6825 * 82575's EITR should be set same throttling value as other
6826 * old controllers' ITR because the interrupt/sec calculation
6827 * is the same, that is, 1,000,000,000 / (N * 256).
6828 *
6829 * 82574's EITR should be set same throttling value as ITR.
6830 *
6831 * For N interrupts/sec, set this value to:
6832 * 1,000,000 / N in contrast to ITR throttling value.
6833 */
6834 sc->sc_itr_init = 450;
6835 } else if (sc->sc_type >= WM_T_82543) {
6836 /*
6837 * Set up the interrupt throttling register (units of 256ns)
6838 * Note that a footnote in Intel's documentation says this
6839 * ticker runs at 1/4 the rate when the chip is in 100Mbit
6840 * or 10Mbit mode. Empirically, it appears to be the case
6841 * that that is also true for the 1024ns units of the other
6842 * interrupt-related timer registers -- so, really, we ought
6843 * to divide this value by 4 when the link speed is low.
6844 *
6845 * XXX implement this division at link speed change!
6846 */
6847
6848 /*
6849 * For N interrupts/sec, set this value to:
6850 * 1,000,000,000 / (N * 256). Note that we set the
6851 * absolute and packet timer values to this value
6852 * divided by 4 to get "simple timer" behavior.
6853 */
6854 sc->sc_itr_init = 1500; /* 2604 ints/sec */
6855 }
6856
6857 error = wm_init_txrx_queues(sc);
6858 if (error)
6859 goto out;
6860
6861 if (((sc->sc_flags & WM_F_SGMII) == 0) &&
6862 (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
6863 (sc->sc_type >= WM_T_82575))
6864 wm_serdes_power_up_link_82575(sc);
6865
6866 /* Clear out the VLAN table -- we don't use it (yet). */
6867 CSR_WRITE(sc, WMREG_VET, 0);
6868 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
6869 trynum = 10; /* Due to hw errata */
6870 else
6871 trynum = 1;
6872 for (i = 0; i < WM_VLAN_TABSIZE; i++)
6873 for (j = 0; j < trynum; j++)
6874 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
6875
6876 /*
6877 * Set up flow-control parameters.
6878 *
6879 * XXX Values could probably stand some tuning.
6880 */
6881 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
6882 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
6883 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
6884 && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
6885 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
6886 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
6887 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
6888 }
6889
6890 sc->sc_fcrtl = FCRTL_DFLT;
6891 if (sc->sc_type < WM_T_82543) {
6892 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
6893 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
6894 } else {
6895 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
6896 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
6897 }
6898
6899 if (sc->sc_type == WM_T_80003)
6900 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
6901 else
6902 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
6903
6904 /* Writes the control register. */
6905 wm_set_vlan(sc);
6906
6907 if (sc->sc_flags & WM_F_HAS_MII) {
6908 uint16_t kmreg;
6909
6910 switch (sc->sc_type) {
6911 case WM_T_80003:
6912 case WM_T_ICH8:
6913 case WM_T_ICH9:
6914 case WM_T_ICH10:
6915 case WM_T_PCH:
6916 case WM_T_PCH2:
6917 case WM_T_PCH_LPT:
6918 case WM_T_PCH_SPT:
6919 case WM_T_PCH_CNP:
6920 /*
6921 * Set the mac to wait the maximum time between each
6922 * iteration and increase the max iterations when
6923 * polling the phy; this fixes erroneous timeouts at
6924 * 10Mbps.
6925 */
6926 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
6927 0xFFFF);
6928 wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
6929 &kmreg);
6930 kmreg |= 0x3F;
6931 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
6932 kmreg);
6933 break;
6934 default:
6935 break;
6936 }
6937
6938 if (sc->sc_type == WM_T_80003) {
6939 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6940 reg &= ~CTRL_EXT_LINK_MODE_MASK;
6941 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6942
6943 /* Bypass RX and TX FIFOs */
6944 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
6945 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
6946 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
6947 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
6948 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
6949 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
6950 }
6951 }
6952 #if 0
6953 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
6954 #endif
6955
6956 /* Set up checksum offload parameters. */
6957 reg = CSR_READ(sc, WMREG_RXCSUM);
6958 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
6959 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
6960 reg |= RXCSUM_IPOFL;
6961 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
6962 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
6963 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
6964 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
6965 CSR_WRITE(sc, WMREG_RXCSUM, reg);
6966
6967 /* Set registers about MSI-X */
6968 if (wm_is_using_msix(sc)) {
6969 uint32_t ivar, qintr_idx;
6970 struct wm_queue *wmq;
6971 unsigned int qid;
6972
6973 if (sc->sc_type == WM_T_82575) {
6974 /* Interrupt control */
6975 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6976 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
6977 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6978
6979 /* TX and RX */
6980 for (i = 0; i < sc->sc_nqueues; i++) {
6981 wmq = &sc->sc_queue[i];
6982 CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
6983 EITR_TX_QUEUE(wmq->wmq_id)
6984 | EITR_RX_QUEUE(wmq->wmq_id));
6985 }
6986 /* Link status */
6987 CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
6988 EITR_OTHER);
6989 } else if (sc->sc_type == WM_T_82574) {
6990 /* Interrupt control */
6991 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6992 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
6993 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6994
6995 /*
6996 * Work around issue with spurious interrupts
6997 * in MSI-X mode.
6998 * At wm_initialize_hardware_bits(), sc_nintrs has not
6999 * initialized yet. So re-initialize WMREG_RFCTL here.
7000 */
7001 reg = CSR_READ(sc, WMREG_RFCTL);
7002 reg |= WMREG_RFCTL_ACKDIS;
7003 CSR_WRITE(sc, WMREG_RFCTL, reg);
7004
7005 ivar = 0;
7006 /* TX and RX */
7007 for (i = 0; i < sc->sc_nqueues; i++) {
7008 wmq = &sc->sc_queue[i];
7009 qid = wmq->wmq_id;
7010 qintr_idx = wmq->wmq_intr_idx;
7011
7012 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
7013 IVAR_TX_MASK_Q_82574(qid));
7014 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
7015 IVAR_RX_MASK_Q_82574(qid));
7016 }
7017 /* Link status */
7018 ivar |= __SHIFTIN((IVAR_VALID_82574
7019 | sc->sc_link_intr_idx), IVAR_OTHER_MASK);
7020 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
7021 } else {
7022 /* Interrupt control */
7023 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
7024 | GPIE_EIAME | GPIE_PBA);
7025
7026 switch (sc->sc_type) {
7027 case WM_T_82580:
7028 case WM_T_I350:
7029 case WM_T_I354:
7030 case WM_T_I210:
7031 case WM_T_I211:
7032 /* TX and RX */
7033 for (i = 0; i < sc->sc_nqueues; i++) {
7034 wmq = &sc->sc_queue[i];
7035 qid = wmq->wmq_id;
7036 qintr_idx = wmq->wmq_intr_idx;
7037
7038 ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
7039 ivar &= ~IVAR_TX_MASK_Q(qid);
7040 ivar |= __SHIFTIN((qintr_idx
7041 | IVAR_VALID),
7042 IVAR_TX_MASK_Q(qid));
7043 ivar &= ~IVAR_RX_MASK_Q(qid);
7044 ivar |= __SHIFTIN((qintr_idx
7045 | IVAR_VALID),
7046 IVAR_RX_MASK_Q(qid));
7047 CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
7048 }
7049 break;
7050 case WM_T_82576:
7051 /* TX and RX */
7052 for (i = 0; i < sc->sc_nqueues; i++) {
7053 wmq = &sc->sc_queue[i];
7054 qid = wmq->wmq_id;
7055 qintr_idx = wmq->wmq_intr_idx;
7056
7057 ivar = CSR_READ(sc,
7058 WMREG_IVAR_Q_82576(qid));
7059 ivar &= ~IVAR_TX_MASK_Q_82576(qid);
7060 ivar |= __SHIFTIN((qintr_idx
7061 | IVAR_VALID),
7062 IVAR_TX_MASK_Q_82576(qid));
7063 ivar &= ~IVAR_RX_MASK_Q_82576(qid);
7064 ivar |= __SHIFTIN((qintr_idx
7065 | IVAR_VALID),
7066 IVAR_RX_MASK_Q_82576(qid));
7067 CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
7068 ivar);
7069 }
7070 break;
7071 default:
7072 break;
7073 }
7074
7075 /* Link status */
7076 ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
7077 IVAR_MISC_OTHER);
7078 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
7079 }
7080
7081 if (wm_is_using_multiqueue(sc)) {
7082 wm_init_rss(sc);
7083
7084 /*
7085 ** NOTE: Receive Full-Packet Checksum Offload
7086 ** is mutually exclusive with Multiqueue. However
7087 ** this is not the same as TCP/IP checksums which
7088 ** still work.
7089 */
7090 reg = CSR_READ(sc, WMREG_RXCSUM);
7091 reg |= RXCSUM_PCSD;
7092 CSR_WRITE(sc, WMREG_RXCSUM, reg);
7093 }
7094 }
7095
7096 /* Set up the interrupt registers. */
7097 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
7098
7099 /* Enable SFP module insertion interrupt if it's required */
7100 if ((sc->sc_flags & WM_F_SFP) != 0) {
7101 sc->sc_ctrl |= CTRL_EXTLINK_EN;
7102 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7103 sfp_mask = ICR_GPI(0);
7104 }
7105
7106 if (wm_is_using_msix(sc)) {
7107 uint32_t mask;
7108 struct wm_queue *wmq;
7109
7110 switch (sc->sc_type) {
7111 case WM_T_82574:
7112 mask = 0;
7113 for (i = 0; i < sc->sc_nqueues; i++) {
7114 wmq = &sc->sc_queue[i];
7115 mask |= ICR_TXQ(wmq->wmq_id);
7116 mask |= ICR_RXQ(wmq->wmq_id);
7117 }
7118 mask |= ICR_OTHER;
7119 CSR_WRITE(sc, WMREG_EIAC_82574, mask);
7120 CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
7121 break;
7122 default:
7123 if (sc->sc_type == WM_T_82575) {
7124 mask = 0;
7125 for (i = 0; i < sc->sc_nqueues; i++) {
7126 wmq = &sc->sc_queue[i];
7127 mask |= EITR_TX_QUEUE(wmq->wmq_id);
7128 mask |= EITR_RX_QUEUE(wmq->wmq_id);
7129 }
7130 mask |= EITR_OTHER;
7131 } else {
7132 mask = 0;
7133 for (i = 0; i < sc->sc_nqueues; i++) {
7134 wmq = &sc->sc_queue[i];
7135 mask |= 1 << wmq->wmq_intr_idx;
7136 }
7137 mask |= 1 << sc->sc_link_intr_idx;
7138 }
7139 CSR_WRITE(sc, WMREG_EIAC, mask);
7140 CSR_WRITE(sc, WMREG_EIAM, mask);
7141 CSR_WRITE(sc, WMREG_EIMS, mask);
7142
7143 /* For other interrupts */
7144 CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
7145 break;
7146 }
7147 } else {
7148 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
7149 ICR_RXO | ICR_RXT0 | sfp_mask;
7150 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
7151 }
7152
7153 /* Set up the inter-packet gap. */
7154 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7155
7156 if (sc->sc_type >= WM_T_82543) {
7157 for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
7158 struct wm_queue *wmq = &sc->sc_queue[qidx];
7159 wm_itrs_writereg(sc, wmq);
7160 }
7161 /*
7162 * Link interrupts occur much less than TX
7163 * interrupts and RX interrupts. So, we don't
7164 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
7165 * FreeBSD's if_igb.
7166 */
7167 }
7168
7169 /* Set the VLAN EtherType. */
7170 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
7171
7172 /*
7173 * Set up the transmit control register; we start out with
7174 * a collision distance suitable for FDX, but update it when
7175 * we resolve the media type.
7176 */
7177 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
7178 | TCTL_CT(TX_COLLISION_THRESHOLD)
7179 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7180 if (sc->sc_type >= WM_T_82571)
7181 sc->sc_tctl |= TCTL_MULR;
7182 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7183
7184 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7185 /* Write TDT after TCTL.EN is set. See the document. */
7186 CSR_WRITE(sc, WMREG_TDT(0), 0);
7187 }
7188
7189 if (sc->sc_type == WM_T_80003) {
7190 reg = CSR_READ(sc, WMREG_TCTL_EXT);
7191 reg &= ~TCTL_EXT_GCEX_MASK;
7192 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
7193 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
7194 }
7195
7196 /* Set the media. */
7197 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
7198 goto out;
7199
7200 /* Configure for OS presence */
7201 wm_init_manageability(sc);
7202
7203 /*
7204 * Set up the receive control register; we actually program the
7205 * register when we set the receive filter. Use multicast address
7206 * offset type 0.
7207 *
7208 * Only the i82544 has the ability to strip the incoming CRC, so we
7209 * don't enable that feature.
7210 */
7211 sc->sc_mchash_type = 0;
7212 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
7213 | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
7214
7215 /* 82574 use one buffer extended Rx descriptor. */
7216 if (sc->sc_type == WM_T_82574)
7217 sc->sc_rctl |= RCTL_DTYP_ONEBUF;
7218
7219 if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
7220 sc->sc_rctl |= RCTL_SECRC;
7221
7222 if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
7223 && (ifp->if_mtu > ETHERMTU)) {
7224 sc->sc_rctl |= RCTL_LPE;
7225 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7226 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
7227 }
7228
7229 if (MCLBYTES == 2048)
7230 sc->sc_rctl |= RCTL_2k;
7231 else {
7232 if (sc->sc_type >= WM_T_82543) {
7233 switch (MCLBYTES) {
7234 case 4096:
7235 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
7236 break;
7237 case 8192:
7238 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
7239 break;
7240 case 16384:
7241 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
7242 break;
7243 default:
7244 panic("wm_init: MCLBYTES %d unsupported",
7245 MCLBYTES);
7246 break;
7247 }
7248 } else
7249 panic("wm_init: i82542 requires MCLBYTES = 2048");
7250 }
7251
7252 /* Enable ECC */
7253 switch (sc->sc_type) {
7254 case WM_T_82571:
7255 reg = CSR_READ(sc, WMREG_PBA_ECC);
7256 reg |= PBA_ECC_CORR_EN;
7257 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
7258 break;
7259 case WM_T_PCH_LPT:
7260 case WM_T_PCH_SPT:
7261 case WM_T_PCH_CNP:
7262 reg = CSR_READ(sc, WMREG_PBECCSTS);
7263 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
7264 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
7265
7266 sc->sc_ctrl |= CTRL_MEHE;
7267 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7268 break;
7269 default:
7270 break;
7271 }
7272
7273 /*
7274 * Set the receive filter.
7275 *
7276 * For 82575 and 82576, the RX descriptors must be initialized after
7277 * the setting of RCTL.EN in wm_set_filter()
7278 */
7279 wm_set_filter(sc);
7280
7281 /* On 575 and later set RDT only if RX enabled */
7282 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7283 int qidx;
7284 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
7285 struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
7286 for (i = 0; i < WM_NRXDESC; i++) {
7287 mutex_enter(rxq->rxq_lock);
7288 wm_init_rxdesc(rxq, i);
7289 mutex_exit(rxq->rxq_lock);
7290
7291 }
7292 }
7293 }
7294
7295 wm_unset_stopping_flags(sc);
7296
7297 /* Start the one second link check clock. */
7298 callout_schedule(&sc->sc_tick_ch, hz);
7299
7300 /*
7301 * ...all done! (IFNET_LOCKED asserted above.)
7302 */
7303 ifp->if_flags |= IFF_RUNNING;
7304
7305 out:
7306 /* Save last flags for the callback */
7307 sc->sc_if_flags = ifp->if_flags;
7308 sc->sc_ec_capenable = ec->ec_capenable;
7309 if (error)
7310 log(LOG_ERR, "%s: interface not running\n",
7311 device_xname(sc->sc_dev));
7312 return error;
7313 }
7314
7315 /*
7316 * wm_stop: [ifnet interface function]
7317 *
7318 * Stop transmission on the interface.
7319 */
7320 static void
7321 wm_stop(struct ifnet *ifp, int disable)
7322 {
7323 struct wm_softc *sc = ifp->if_softc;
7324
7325 ASSERT_SLEEPABLE();
7326 KASSERT(IFNET_LOCKED(ifp));
7327
7328 mutex_enter(sc->sc_core_lock);
7329 wm_stop_locked(ifp, disable ? true : false, true);
7330 mutex_exit(sc->sc_core_lock);
7331
7332 /*
7333 * After wm_set_stopping_flags(), it is guaranteed that
7334 * wm_handle_queue_work() does not call workqueue_enqueue().
7335 * However, workqueue_wait() cannot call in wm_stop_locked()
7336 * because it can sleep...
7337 * so, call workqueue_wait() here.
7338 */
7339 for (int i = 0; i < sc->sc_nqueues; i++)
7340 workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
7341 workqueue_wait(sc->sc_reset_wq, &sc->sc_reset_work);
7342 }
7343
7344 static void
7345 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
7346 {
7347 struct wm_softc *sc = ifp->if_softc;
7348 struct wm_txsoft *txs;
7349 int i, qidx;
7350
7351 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
7352 device_xname(sc->sc_dev), __func__));
7353 KASSERT(IFNET_LOCKED(ifp));
7354 KASSERT(mutex_owned(sc->sc_core_lock));
7355
7356 wm_set_stopping_flags(sc);
7357
7358 if (sc->sc_flags & WM_F_HAS_MII) {
7359 /* Down the MII. */
7360 mii_down(&sc->sc_mii);
7361 } else {
7362 #if 0
7363 /* Should we clear PHY's status properly? */
7364 wm_reset(sc);
7365 #endif
7366 }
7367
7368 /* Stop the transmit and receive processes. */
7369 CSR_WRITE(sc, WMREG_TCTL, 0);
7370 CSR_WRITE(sc, WMREG_RCTL, 0);
7371 sc->sc_rctl &= ~RCTL_EN;
7372
7373 /*
7374 * Clear the interrupt mask to ensure the device cannot assert its
7375 * interrupt line.
7376 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
7377 * service any currently pending or shared interrupt.
7378 */
7379 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
7380 sc->sc_icr = 0;
7381 if (wm_is_using_msix(sc)) {
7382 if (sc->sc_type != WM_T_82574) {
7383 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
7384 CSR_WRITE(sc, WMREG_EIAC, 0);
7385 } else
7386 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
7387 }
7388
7389 /*
7390 * Stop callouts after interrupts are disabled; if we have
7391 * to wait for them, we will be releasing the CORE_LOCK
7392 * briefly, which will unblock interrupts on the current CPU.
7393 */
7394
7395 /* Stop the one second clock. */
7396 if (wait)
7397 callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
7398 else
7399 callout_stop(&sc->sc_tick_ch);
7400
7401 /* Stop the 82547 Tx FIFO stall check timer. */
7402 if (sc->sc_type == WM_T_82547) {
7403 if (wait)
7404 callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
7405 else
7406 callout_stop(&sc->sc_txfifo_ch);
7407 }
7408
7409 /* Release any queued transmit buffers. */
7410 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
7411 struct wm_queue *wmq = &sc->sc_queue[qidx];
7412 struct wm_txqueue *txq = &wmq->wmq_txq;
7413 struct mbuf *m;
7414
7415 mutex_enter(txq->txq_lock);
7416 txq->txq_sending = false; /* Ensure watchdog disabled */
7417 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7418 txs = &txq->txq_soft[i];
7419 if (txs->txs_mbuf != NULL) {
7420 bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
7421 m_freem(txs->txs_mbuf);
7422 txs->txs_mbuf = NULL;
7423 }
7424 }
7425 /* Drain txq_interq */
7426 while ((m = pcq_get(txq->txq_interq)) != NULL)
7427 m_freem(m);
7428 mutex_exit(txq->txq_lock);
7429 }
7430
7431 /* Mark the interface as down and cancel the watchdog timer. */
7432 ifp->if_flags &= ~IFF_RUNNING;
7433 sc->sc_if_flags = ifp->if_flags;
7434
7435 if (disable) {
7436 for (i = 0; i < sc->sc_nqueues; i++) {
7437 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7438 mutex_enter(rxq->rxq_lock);
7439 wm_rxdrain(rxq);
7440 mutex_exit(rxq->rxq_lock);
7441 }
7442 }
7443
7444 #if 0 /* notyet */
7445 if (sc->sc_type >= WM_T_82544)
7446 CSR_WRITE(sc, WMREG_WUC, 0);
7447 #endif
7448 }
7449
7450 static void
7451 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
7452 {
7453 struct mbuf *m;
7454 int i;
7455
7456 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
7457 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
7458 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
7459 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
7460 m->m_data, m->m_len, m->m_flags);
7461 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
7462 i, i == 1 ? "" : "s");
7463 }
7464
7465 /*
7466 * wm_82547_txfifo_stall:
7467 *
7468 * Callout used to wait for the 82547 Tx FIFO to drain,
7469 * reset the FIFO pointers, and restart packet transmission.
7470 */
7471 static void
7472 wm_82547_txfifo_stall(void *arg)
7473 {
7474 struct wm_softc *sc = arg;
7475 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7476
7477 mutex_enter(txq->txq_lock);
7478
7479 if (txq->txq_stopping)
7480 goto out;
7481
7482 if (txq->txq_fifo_stall) {
7483 if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
7484 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
7485 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
7486 /*
7487 * Packets have drained. Stop transmitter, reset
7488 * FIFO pointers, restart transmitter, and kick
7489 * the packet queue.
7490 */
7491 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
7492 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
7493 CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
7494 CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
7495 CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
7496 CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
7497 CSR_WRITE(sc, WMREG_TCTL, tctl);
7498 CSR_WRITE_FLUSH(sc);
7499
7500 txq->txq_fifo_head = 0;
7501 txq->txq_fifo_stall = 0;
7502 wm_start_locked(&sc->sc_ethercom.ec_if);
7503 } else {
7504 /*
7505 * Still waiting for packets to drain; try again in
7506 * another tick.
7507 */
7508 callout_schedule(&sc->sc_txfifo_ch, 1);
7509 }
7510 }
7511
7512 out:
7513 mutex_exit(txq->txq_lock);
7514 }
7515
7516 /*
7517 * wm_82547_txfifo_bugchk:
7518 *
7519 * Check for bug condition in the 82547 Tx FIFO. We need to
7520 * prevent enqueueing a packet that would wrap around the end
7521 * if the Tx FIFO ring buffer, otherwise the chip will croak.
7522 *
7523 * We do this by checking the amount of space before the end
7524 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
7525 * the Tx FIFO, wait for all remaining packets to drain, reset
7526 * the internal FIFO pointers to the beginning, and restart
7527 * transmission on the interface.
7528 */
7529 #define WM_FIFO_HDR 0x10
7530 #define WM_82547_PAD_LEN 0x3e0
7531 static int
7532 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
7533 {
7534 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7535 int space = txq->txq_fifo_size - txq->txq_fifo_head;
7536 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
7537
7538 /* Just return if already stalled. */
7539 if (txq->txq_fifo_stall)
7540 return 1;
7541
7542 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7543 /* Stall only occurs in half-duplex mode. */
7544 goto send_packet;
7545 }
7546
7547 if (len >= WM_82547_PAD_LEN + space) {
7548 txq->txq_fifo_stall = 1;
7549 callout_schedule(&sc->sc_txfifo_ch, 1);
7550 return 1;
7551 }
7552
7553 send_packet:
7554 txq->txq_fifo_head += len;
7555 if (txq->txq_fifo_head >= txq->txq_fifo_size)
7556 txq->txq_fifo_head -= txq->txq_fifo_size;
7557
7558 return 0;
7559 }
7560
7561 static int
7562 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
7563 {
7564 int error;
7565
7566 /*
7567 * Allocate the control data structures, and create and load the
7568 * DMA map for it.
7569 *
7570 * NOTE: All Tx descriptors must be in the same 4G segment of
7571 * memory. So must Rx descriptors. We simplify by allocating
7572 * both sets within the same 4G segment.
7573 */
7574 if (sc->sc_type < WM_T_82544)
7575 WM_NTXDESC(txq) = WM_NTXDESC_82542;
7576 else
7577 WM_NTXDESC(txq) = WM_NTXDESC_82544;
7578 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7579 txq->txq_descsize = sizeof(nq_txdesc_t);
7580 else
7581 txq->txq_descsize = sizeof(wiseman_txdesc_t);
7582
7583 if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
7584 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
7585 1, &txq->txq_desc_rseg, 0)) != 0) {
7586 aprint_error_dev(sc->sc_dev,
7587 "unable to allocate TX control data, error = %d\n",
7588 error);
7589 goto fail_0;
7590 }
7591
7592 if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
7593 txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
7594 (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
7595 aprint_error_dev(sc->sc_dev,
7596 "unable to map TX control data, error = %d\n", error);
7597 goto fail_1;
7598 }
7599
7600 if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
7601 WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
7602 aprint_error_dev(sc->sc_dev,
7603 "unable to create TX control data DMA map, error = %d\n",
7604 error);
7605 goto fail_2;
7606 }
7607
7608 if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
7609 txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
7610 aprint_error_dev(sc->sc_dev,
7611 "unable to load TX control data DMA map, error = %d\n",
7612 error);
7613 goto fail_3;
7614 }
7615
7616 return 0;
7617
7618 fail_3:
7619 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
7620 fail_2:
7621 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
7622 WM_TXDESCS_SIZE(txq));
7623 fail_1:
7624 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
7625 fail_0:
7626 return error;
7627 }
7628
7629 static void
7630 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
7631 {
7632
7633 bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
7634 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
7635 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
7636 WM_TXDESCS_SIZE(txq));
7637 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
7638 }
7639
7640 static int
7641 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
7642 {
7643 int error;
7644 size_t rxq_descs_size;
7645
7646 /*
7647 * Allocate the control data structures, and create and load the
7648 * DMA map for it.
7649 *
7650 * NOTE: All Tx descriptors must be in the same 4G segment of
7651 * memory. So must Rx descriptors. We simplify by allocating
7652 * both sets within the same 4G segment.
7653 */
7654 rxq->rxq_ndesc = WM_NRXDESC;
7655 if (sc->sc_type == WM_T_82574)
7656 rxq->rxq_descsize = sizeof(ext_rxdesc_t);
7657 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7658 rxq->rxq_descsize = sizeof(nq_rxdesc_t);
7659 else
7660 rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
7661 rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
7662
7663 if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
7664 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
7665 1, &rxq->rxq_desc_rseg, 0)) != 0) {
7666 aprint_error_dev(sc->sc_dev,
7667 "unable to allocate RX control data, error = %d\n",
7668 error);
7669 goto fail_0;
7670 }
7671
7672 if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
7673 rxq->rxq_desc_rseg, rxq_descs_size,
7674 (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
7675 aprint_error_dev(sc->sc_dev,
7676 "unable to map RX control data, error = %d\n", error);
7677 goto fail_1;
7678 }
7679
7680 if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
7681 rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
7682 aprint_error_dev(sc->sc_dev,
7683 "unable to create RX control data DMA map, error = %d\n",
7684 error);
7685 goto fail_2;
7686 }
7687
7688 if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
7689 rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
7690 aprint_error_dev(sc->sc_dev,
7691 "unable to load RX control data DMA map, error = %d\n",
7692 error);
7693 goto fail_3;
7694 }
7695
7696 return 0;
7697
7698 fail_3:
7699 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
7700 fail_2:
7701 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
7702 rxq_descs_size);
7703 fail_1:
7704 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
7705 fail_0:
7706 return error;
7707 }
7708
7709 static void
7710 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
7711 {
7712
7713 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
7714 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
7715 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
7716 rxq->rxq_descsize * rxq->rxq_ndesc);
7717 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
7718 }
7719
7720
7721 static int
7722 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
7723 {
7724 int i, error;
7725
7726 /* Create the transmit buffer DMA maps. */
7727 WM_TXQUEUELEN(txq) =
7728 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
7729 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
7730 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7731 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
7732 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
7733 &txq->txq_soft[i].txs_dmamap)) != 0) {
7734 aprint_error_dev(sc->sc_dev,
7735 "unable to create Tx DMA map %d, error = %d\n",
7736 i, error);
7737 goto fail;
7738 }
7739 }
7740
7741 return 0;
7742
7743 fail:
7744 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7745 if (txq->txq_soft[i].txs_dmamap != NULL)
7746 bus_dmamap_destroy(sc->sc_dmat,
7747 txq->txq_soft[i].txs_dmamap);
7748 }
7749 return error;
7750 }
7751
7752 static void
7753 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
7754 {
7755 int i;
7756
7757 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7758 if (txq->txq_soft[i].txs_dmamap != NULL)
7759 bus_dmamap_destroy(sc->sc_dmat,
7760 txq->txq_soft[i].txs_dmamap);
7761 }
7762 }
7763
7764 static int
7765 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7766 {
7767 int i, error;
7768
7769 /* Create the receive buffer DMA maps. */
7770 for (i = 0; i < rxq->rxq_ndesc; i++) {
7771 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
7772 MCLBYTES, 0, 0,
7773 &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
7774 aprint_error_dev(sc->sc_dev,
7775 "unable to create Rx DMA map %d error = %d\n",
7776 i, error);
7777 goto fail;
7778 }
7779 rxq->rxq_soft[i].rxs_mbuf = NULL;
7780 }
7781
7782 return 0;
7783
7784 fail:
7785 for (i = 0; i < rxq->rxq_ndesc; i++) {
7786 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
7787 bus_dmamap_destroy(sc->sc_dmat,
7788 rxq->rxq_soft[i].rxs_dmamap);
7789 }
7790 return error;
7791 }
7792
7793 static void
7794 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7795 {
7796 int i;
7797
7798 for (i = 0; i < rxq->rxq_ndesc; i++) {
7799 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
7800 bus_dmamap_destroy(sc->sc_dmat,
7801 rxq->rxq_soft[i].rxs_dmamap);
7802 }
7803 }
7804
7805 /*
7806 * wm_alloc_quques:
7807 * Allocate {tx,rx}descs and {tx,rx} buffers
7808 */
7809 static int
7810 wm_alloc_txrx_queues(struct wm_softc *sc)
7811 {
7812 int i, error, tx_done, rx_done;
7813
7814 sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
7815 KM_SLEEP);
7816 if (sc->sc_queue == NULL) {
7817 aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
7818 error = ENOMEM;
7819 goto fail_0;
7820 }
7821
7822 /* For transmission */
7823 error = 0;
7824 tx_done = 0;
7825 for (i = 0; i < sc->sc_nqueues; i++) {
7826 #ifdef WM_EVENT_COUNTERS
7827 int j;
7828 const char *xname;
7829 #endif
7830 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7831 txq->txq_sc = sc;
7832 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
7833
7834 error = wm_alloc_tx_descs(sc, txq);
7835 if (error)
7836 break;
7837 error = wm_alloc_tx_buffer(sc, txq);
7838 if (error) {
7839 wm_free_tx_descs(sc, txq);
7840 break;
7841 }
7842 txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
7843 if (txq->txq_interq == NULL) {
7844 wm_free_tx_descs(sc, txq);
7845 wm_free_tx_buffer(sc, txq);
7846 error = ENOMEM;
7847 break;
7848 }
7849
7850 #ifdef WM_EVENT_COUNTERS
7851 xname = device_xname(sc->sc_dev);
7852
7853 WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
7854 WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
7855 WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
7856 WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
7857 WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
7858 WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
7859 WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
7860 WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
7861 WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
7862 WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
7863 WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
7864
7865 for (j = 0; j < WM_NTXSEGS; j++) {
7866 snprintf(txq->txq_txseg_evcnt_names[j],
7867 sizeof(txq->txq_txseg_evcnt_names[j]),
7868 "txq%02dtxseg%d", i, j);
7869 evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
7870 EVCNT_TYPE_MISC,
7871 NULL, xname, txq->txq_txseg_evcnt_names[j]);
7872 }
7873
7874 WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
7875 WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
7876 WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
7877 WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
7878 /* Only for 82544 (and earlier?) */
7879 if (sc->sc_type <= WM_T_82544)
7880 WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
7881 WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
7882 #endif /* WM_EVENT_COUNTERS */
7883
7884 tx_done++;
7885 }
7886 if (error)
7887 goto fail_1;
7888
7889 /* For receive */
7890 error = 0;
7891 rx_done = 0;
7892 for (i = 0; i < sc->sc_nqueues; i++) {
7893 #ifdef WM_EVENT_COUNTERS
7894 const char *xname;
7895 #endif
7896 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7897 rxq->rxq_sc = sc;
7898 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
7899
7900 error = wm_alloc_rx_descs(sc, rxq);
7901 if (error)
7902 break;
7903
7904 error = wm_alloc_rx_buffer(sc, rxq);
7905 if (error) {
7906 wm_free_rx_descs(sc, rxq);
7907 break;
7908 }
7909
7910 #ifdef WM_EVENT_COUNTERS
7911 xname = device_xname(sc->sc_dev);
7912
7913 WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
7914 WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
7915
7916 WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
7917 WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
7918 #endif /* WM_EVENT_COUNTERS */
7919
7920 rx_done++;
7921 }
7922 if (error)
7923 goto fail_2;
7924
7925 return 0;
7926
7927 fail_2:
7928 for (i = 0; i < rx_done; i++) {
7929 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7930 wm_free_rx_buffer(sc, rxq);
7931 wm_free_rx_descs(sc, rxq);
7932 if (rxq->rxq_lock)
7933 mutex_obj_free(rxq->rxq_lock);
7934 }
7935 fail_1:
7936 for (i = 0; i < tx_done; i++) {
7937 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7938 pcq_destroy(txq->txq_interq);
7939 wm_free_tx_buffer(sc, txq);
7940 wm_free_tx_descs(sc, txq);
7941 if (txq->txq_lock)
7942 mutex_obj_free(txq->txq_lock);
7943 }
7944
7945 kmem_free(sc->sc_queue,
7946 sizeof(struct wm_queue) * sc->sc_nqueues);
7947 fail_0:
7948 return error;
7949 }
7950
7951 /*
7952 * wm_free_quques:
7953 * Free {tx,rx}descs and {tx,rx} buffers
7954 */
7955 static void
7956 wm_free_txrx_queues(struct wm_softc *sc)
7957 {
7958 int i;
7959
7960 for (i = 0; i < sc->sc_nqueues; i++) {
7961 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7962
7963 #ifdef WM_EVENT_COUNTERS
7964 WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
7965 WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
7966 WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
7967 WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
7968 #endif /* WM_EVENT_COUNTERS */
7969
7970 wm_free_rx_buffer(sc, rxq);
7971 wm_free_rx_descs(sc, rxq);
7972 if (rxq->rxq_lock)
7973 mutex_obj_free(rxq->rxq_lock);
7974 }
7975
7976 for (i = 0; i < sc->sc_nqueues; i++) {
7977 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7978 struct mbuf *m;
7979 #ifdef WM_EVENT_COUNTERS
7980 int j;
7981
7982 WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
7983 WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
7984 WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
7985 WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
7986 WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
7987 WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
7988 WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
7989 WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
7990 WM_Q_EVCNT_DETACH(txq, tso, txq, i);
7991 WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
7992 WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
7993
7994 for (j = 0; j < WM_NTXSEGS; j++)
7995 evcnt_detach(&txq->txq_ev_txseg[j]);
7996
7997 WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
7998 WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
7999 WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
8000 WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
8001 if (sc->sc_type <= WM_T_82544)
8002 WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
8003 WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
8004 #endif /* WM_EVENT_COUNTERS */
8005
8006 /* Drain txq_interq */
8007 while ((m = pcq_get(txq->txq_interq)) != NULL)
8008 m_freem(m);
8009 pcq_destroy(txq->txq_interq);
8010
8011 wm_free_tx_buffer(sc, txq);
8012 wm_free_tx_descs(sc, txq);
8013 if (txq->txq_lock)
8014 mutex_obj_free(txq->txq_lock);
8015 }
8016
8017 kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
8018 }
8019
8020 static void
8021 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
8022 {
8023
8024 KASSERT(mutex_owned(txq->txq_lock));
8025
8026 /* Initialize the transmit descriptor ring. */
8027 memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
8028 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
8029 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
8030 txq->txq_free = WM_NTXDESC(txq);
8031 txq->txq_next = 0;
8032 }
8033
8034 static void
8035 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
8036 struct wm_txqueue *txq)
8037 {
8038
8039 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
8040 device_xname(sc->sc_dev), __func__));
8041 KASSERT(mutex_owned(txq->txq_lock));
8042
8043 if (sc->sc_type < WM_T_82543) {
8044 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
8045 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
8046 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
8047 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
8048 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
8049 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
8050 } else {
8051 int qid = wmq->wmq_id;
8052
8053 CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
8054 CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
8055 CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
8056 CSR_WRITE(sc, WMREG_TDH(qid), 0);
8057
8058 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8059 /*
8060 * Don't write TDT before TCTL.EN is set.
8061 * See the document.
8062 */
8063 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
8064 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
8065 | TXDCTL_WTHRESH(0));
8066 else {
8067 /* XXX should update with AIM? */
8068 CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
8069 if (sc->sc_type >= WM_T_82540) {
8070 /* Should be the same */
8071 CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
8072 }
8073
8074 CSR_WRITE(sc, WMREG_TDT(qid), 0);
8075 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
8076 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
8077 }
8078 }
8079 }
8080
8081 static void
8082 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
8083 {
8084 int i;
8085
8086 KASSERT(mutex_owned(txq->txq_lock));
8087
8088 /* Initialize the transmit job descriptors. */
8089 for (i = 0; i < WM_TXQUEUELEN(txq); i++)
8090 txq->txq_soft[i].txs_mbuf = NULL;
8091 txq->txq_sfree = WM_TXQUEUELEN(txq);
8092 txq->txq_snext = 0;
8093 txq->txq_sdirty = 0;
8094 }
8095
8096 static void
8097 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
8098 struct wm_txqueue *txq)
8099 {
8100
8101 KASSERT(mutex_owned(txq->txq_lock));
8102
8103 /*
8104 * Set up some register offsets that are different between
8105 * the i82542 and the i82543 and later chips.
8106 */
8107 if (sc->sc_type < WM_T_82543)
8108 txq->txq_tdt_reg = WMREG_OLD_TDT;
8109 else
8110 txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
8111
8112 wm_init_tx_descs(sc, txq);
8113 wm_init_tx_regs(sc, wmq, txq);
8114 wm_init_tx_buffer(sc, txq);
8115
8116 /* Clear other than WM_TXQ_LINKDOWN_DISCARD */
8117 txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
8118
8119 txq->txq_sending = false;
8120 }
8121
8122 static void
8123 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
8124 struct wm_rxqueue *rxq)
8125 {
8126
8127 KASSERT(mutex_owned(rxq->rxq_lock));
8128
8129 /*
8130 * Initialize the receive descriptor and receive job
8131 * descriptor rings.
8132 */
8133 if (sc->sc_type < WM_T_82543) {
8134 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
8135 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
8136 CSR_WRITE(sc, WMREG_OLD_RDLEN0,
8137 rxq->rxq_descsize * rxq->rxq_ndesc);
8138 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
8139 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
8140 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
8141
8142 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
8143 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
8144 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
8145 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
8146 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
8147 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
8148 } else {
8149 int qid = wmq->wmq_id;
8150
8151 CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
8152 CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
8153 CSR_WRITE(sc, WMREG_RDLEN(qid),
8154 rxq->rxq_descsize * rxq->rxq_ndesc);
8155
8156 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
8157 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
8158 panic("%s: MCLBYTES %d unsupported for 82575 "
8159 "or higher\n", __func__, MCLBYTES);
8160
8161 /*
8162 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
8163 * only.
8164 */
8165 CSR_WRITE(sc, WMREG_SRRCTL(qid),
8166 SRRCTL_DESCTYPE_ADV_ONEBUF
8167 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
8168 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
8169 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
8170 | RXDCTL_WTHRESH(1));
8171 CSR_WRITE(sc, WMREG_RDH(qid), 0);
8172 CSR_WRITE(sc, WMREG_RDT(qid), 0);
8173 } else {
8174 CSR_WRITE(sc, WMREG_RDH(qid), 0);
8175 CSR_WRITE(sc, WMREG_RDT(qid), 0);
8176 /* XXX should update with AIM? */
8177 CSR_WRITE(sc, WMREG_RDTR,
8178 (wmq->wmq_itr / 4) | RDTR_FPD);
8179 /* MUST be same */
8180 CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
8181 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
8182 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
8183 }
8184 }
8185 }
8186
8187 static int
8188 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
8189 {
8190 struct wm_rxsoft *rxs;
8191 int error, i;
8192
8193 KASSERT(mutex_owned(rxq->rxq_lock));
8194
8195 for (i = 0; i < rxq->rxq_ndesc; i++) {
8196 rxs = &rxq->rxq_soft[i];
8197 if (rxs->rxs_mbuf == NULL) {
8198 if ((error = wm_add_rxbuf(rxq, i)) != 0) {
8199 log(LOG_ERR, "%s: unable to allocate or map "
8200 "rx buffer %d, error = %d\n",
8201 device_xname(sc->sc_dev), i, error);
8202 /*
8203 * XXX Should attempt to run with fewer receive
8204 * XXX buffers instead of just failing.
8205 */
8206 wm_rxdrain(rxq);
8207 return ENOMEM;
8208 }
8209 } else {
8210 /*
8211 * For 82575 and 82576, the RX descriptors must be
8212 * initialized after the setting of RCTL.EN in
8213 * wm_set_filter()
8214 */
8215 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
8216 wm_init_rxdesc(rxq, i);
8217 }
8218 }
8219 rxq->rxq_ptr = 0;
8220 rxq->rxq_discard = 0;
8221 WM_RXCHAIN_RESET(rxq);
8222
8223 return 0;
8224 }
8225
8226 static int
8227 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
8228 struct wm_rxqueue *rxq)
8229 {
8230
8231 KASSERT(mutex_owned(rxq->rxq_lock));
8232
8233 /*
8234 * Set up some register offsets that are different between
8235 * the i82542 and the i82543 and later chips.
8236 */
8237 if (sc->sc_type < WM_T_82543)
8238 rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
8239 else
8240 rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
8241
8242 wm_init_rx_regs(sc, wmq, rxq);
8243 return wm_init_rx_buffer(sc, rxq);
8244 }
8245
8246 /*
8247 * wm_init_quques:
8248 * Initialize {tx,rx}descs and {tx,rx} buffers
8249 */
8250 static int
8251 wm_init_txrx_queues(struct wm_softc *sc)
8252 {
8253 int i, error = 0;
8254
8255 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
8256 device_xname(sc->sc_dev), __func__));
8257
8258 for (i = 0; i < sc->sc_nqueues; i++) {
8259 struct wm_queue *wmq = &sc->sc_queue[i];
8260 struct wm_txqueue *txq = &wmq->wmq_txq;
8261 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
8262
8263 /*
8264 * TODO
8265 * Currently, use constant variable instead of AIM.
8266 * Furthermore, the interrupt interval of multiqueue which use
8267 * polling mode is less than default value.
8268 * More tuning and AIM are required.
8269 */
8270 if (wm_is_using_multiqueue(sc))
8271 wmq->wmq_itr = 50;
8272 else
8273 wmq->wmq_itr = sc->sc_itr_init;
8274 wmq->wmq_set_itr = true;
8275
8276 mutex_enter(txq->txq_lock);
8277 wm_init_tx_queue(sc, wmq, txq);
8278 mutex_exit(txq->txq_lock);
8279
8280 mutex_enter(rxq->rxq_lock);
8281 error = wm_init_rx_queue(sc, wmq, rxq);
8282 mutex_exit(rxq->rxq_lock);
8283 if (error)
8284 break;
8285 }
8286
8287 return error;
8288 }
8289
8290 /*
8291 * wm_tx_offload:
8292 *
8293 * Set up TCP/IP checksumming parameters for the
8294 * specified packet.
8295 */
8296 static void
8297 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
8298 struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
8299 {
8300 struct mbuf *m0 = txs->txs_mbuf;
8301 struct livengood_tcpip_ctxdesc *t;
8302 uint32_t ipcs, tucs, cmd, cmdlen, seg;
8303 uint32_t ipcse;
8304 struct ether_header *eh;
8305 int offset, iphl;
8306 uint8_t fields;
8307
8308 /*
8309 * XXX It would be nice if the mbuf pkthdr had offset
8310 * fields for the protocol headers.
8311 */
8312
8313 eh = mtod(m0, struct ether_header *);
8314 switch (htons(eh->ether_type)) {
8315 case ETHERTYPE_IP:
8316 case ETHERTYPE_IPV6:
8317 offset = ETHER_HDR_LEN;
8318 break;
8319
8320 case ETHERTYPE_VLAN:
8321 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
8322 break;
8323
8324 default:
8325 /* Don't support this protocol or encapsulation. */
8326 txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
8327 txq->txq_last_hw_ipcs = 0;
8328 txq->txq_last_hw_tucs = 0;
8329 *fieldsp = 0;
8330 *cmdp = 0;
8331 return;
8332 }
8333
8334 if ((m0->m_pkthdr.csum_flags &
8335 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
8336 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
8337 } else
8338 iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
8339
8340 ipcse = offset + iphl - 1;
8341
8342 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
8343 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
8344 seg = 0;
8345 fields = 0;
8346
8347 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
8348 int hlen = offset + iphl;
8349 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
8350
8351 if (__predict_false(m0->m_len <
8352 (hlen + sizeof(struct tcphdr)))) {
8353 /*
8354 * TCP/IP headers are not in the first mbuf; we need
8355 * to do this the slow and painful way. Let's just
8356 * hope this doesn't happen very often.
8357 */
8358 struct tcphdr th;
8359
8360 WM_Q_EVCNT_INCR(txq, tsopain);
8361
8362 m_copydata(m0, hlen, sizeof(th), &th);
8363 if (v4) {
8364 struct ip ip;
8365
8366 m_copydata(m0, offset, sizeof(ip), &ip);
8367 ip.ip_len = 0;
8368 m_copyback(m0,
8369 offset + offsetof(struct ip, ip_len),
8370 sizeof(ip.ip_len), &ip.ip_len);
8371 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
8372 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
8373 } else {
8374 struct ip6_hdr ip6;
8375
8376 m_copydata(m0, offset, sizeof(ip6), &ip6);
8377 ip6.ip6_plen = 0;
8378 m_copyback(m0,
8379 offset + offsetof(struct ip6_hdr, ip6_plen),
8380 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
8381 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
8382 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
8383 }
8384 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
8385 sizeof(th.th_sum), &th.th_sum);
8386
8387 hlen += th.th_off << 2;
8388 } else {
8389 /*
8390 * TCP/IP headers are in the first mbuf; we can do
8391 * this the easy way.
8392 */
8393 struct tcphdr *th;
8394
8395 if (v4) {
8396 struct ip *ip =
8397 (void *)(mtod(m0, char *) + offset);
8398 th = (void *)(mtod(m0, char *) + hlen);
8399
8400 ip->ip_len = 0;
8401 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
8402 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
8403 } else {
8404 struct ip6_hdr *ip6 =
8405 (void *)(mtod(m0, char *) + offset);
8406 th = (void *)(mtod(m0, char *) + hlen);
8407
8408 ip6->ip6_plen = 0;
8409 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
8410 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
8411 }
8412 hlen += th->th_off << 2;
8413 }
8414
8415 if (v4) {
8416 WM_Q_EVCNT_INCR(txq, tso);
8417 cmdlen |= WTX_TCPIP_CMD_IP;
8418 } else {
8419 WM_Q_EVCNT_INCR(txq, tso6);
8420 ipcse = 0;
8421 }
8422 cmd |= WTX_TCPIP_CMD_TSE;
8423 cmdlen |= WTX_TCPIP_CMD_TSE |
8424 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
8425 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
8426 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
8427 }
8428
8429 /*
8430 * NOTE: Even if we're not using the IP or TCP/UDP checksum
8431 * offload feature, if we load the context descriptor, we
8432 * MUST provide valid values for IPCSS and TUCSS fields.
8433 */
8434
8435 ipcs = WTX_TCPIP_IPCSS(offset) |
8436 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
8437 WTX_TCPIP_IPCSE(ipcse);
8438 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
8439 WM_Q_EVCNT_INCR(txq, ipsum);
8440 fields |= WTX_IXSM;
8441 }
8442
8443 offset += iphl;
8444
8445 if (m0->m_pkthdr.csum_flags &
8446 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
8447 WM_Q_EVCNT_INCR(txq, tusum);
8448 fields |= WTX_TXSM;
8449 tucs = WTX_TCPIP_TUCSS(offset) |
8450 WTX_TCPIP_TUCSO(offset +
8451 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
8452 WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8453 } else if ((m0->m_pkthdr.csum_flags &
8454 (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
8455 WM_Q_EVCNT_INCR(txq, tusum6);
8456 fields |= WTX_TXSM;
8457 tucs = WTX_TCPIP_TUCSS(offset) |
8458 WTX_TCPIP_TUCSO(offset +
8459 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
8460 WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8461 } else {
8462 /* Just initialize it to a valid TCP context. */
8463 tucs = WTX_TCPIP_TUCSS(offset) |
8464 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
8465 WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8466 }
8467
8468 *cmdp = cmd;
8469 *fieldsp = fields;
8470
8471 /*
8472 * We don't have to write context descriptor for every packet
8473 * except for 82574. For 82574, we must write context descriptor
8474 * for every packet when we use two descriptor queues.
8475 *
8476 * The 82574L can only remember the *last* context used
8477 * regardless of queue that it was use for. We cannot reuse
8478 * contexts on this hardware platform and must generate a new
8479 * context every time. 82574L hardware spec, section 7.2.6,
8480 * second note.
8481 */
8482 if (sc->sc_nqueues < 2) {
8483 /*
8484 * Setting up new checksum offload context for every
8485 * frames takes a lot of processing time for hardware.
8486 * This also reduces performance a lot for small sized
8487 * frames so avoid it if driver can use previously
8488 * configured checksum offload context.
8489 * For TSO, in theory we can use the same TSO context only if
8490 * frame is the same type(IP/TCP) and the same MSS. However
8491 * checking whether a frame has the same IP/TCP structure is a
8492 * hard thing so just ignore that and always restablish a
8493 * new TSO context.
8494 */
8495 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
8496 == 0) {
8497 if (txq->txq_last_hw_cmd == cmd &&
8498 txq->txq_last_hw_fields == fields &&
8499 txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
8500 txq->txq_last_hw_tucs == (tucs & 0xffff)) {
8501 WM_Q_EVCNT_INCR(txq, skipcontext);
8502 return;
8503 }
8504 }
8505
8506 txq->txq_last_hw_cmd = cmd;
8507 txq->txq_last_hw_fields = fields;
8508 txq->txq_last_hw_ipcs = (ipcs & 0xffff);
8509 txq->txq_last_hw_tucs = (tucs & 0xffff);
8510 }
8511
8512 /* Fill in the context descriptor. */
8513 t = (struct livengood_tcpip_ctxdesc *)
8514 &txq->txq_descs[txq->txq_next];
8515 t->tcpip_ipcs = htole32(ipcs);
8516 t->tcpip_tucs = htole32(tucs);
8517 t->tcpip_cmdlen = htole32(cmdlen);
8518 t->tcpip_seg = htole32(seg);
8519 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
8520
8521 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
8522 txs->txs_ndesc++;
8523 }
8524
8525 static inline int
8526 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
8527 {
8528 struct wm_softc *sc = ifp->if_softc;
8529 u_int cpuid = cpu_index(curcpu());
8530
8531 /*
8532 * Currently, simple distribute strategy.
8533 * TODO:
8534 * distribute by flowid(RSS has value).
8535 */
8536 return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
8537 }
8538
8539 static inline bool
8540 wm_linkdown_discard(struct wm_txqueue *txq)
8541 {
8542
8543 if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
8544 return true;
8545
8546 return false;
8547 }
8548
8549 /*
8550 * wm_start: [ifnet interface function]
8551 *
8552 * Start packet transmission on the interface.
8553 */
8554 static void
8555 wm_start(struct ifnet *ifp)
8556 {
8557 struct wm_softc *sc = ifp->if_softc;
8558 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8559
8560 KASSERT(if_is_mpsafe(ifp));
8561 /*
8562 * if_obytes and if_omcasts are added in if_transmit()@if.c.
8563 */
8564
8565 mutex_enter(txq->txq_lock);
8566 if (!txq->txq_stopping)
8567 wm_start_locked(ifp);
8568 mutex_exit(txq->txq_lock);
8569 }
8570
8571 static void
8572 wm_start_locked(struct ifnet *ifp)
8573 {
8574 struct wm_softc *sc = ifp->if_softc;
8575 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8576
8577 wm_send_common_locked(ifp, txq, false);
8578 }
8579
8580 static int
8581 wm_transmit(struct ifnet *ifp, struct mbuf *m)
8582 {
8583 int qid;
8584 struct wm_softc *sc = ifp->if_softc;
8585 struct wm_txqueue *txq;
8586
8587 qid = wm_select_txqueue(ifp, m);
8588 txq = &sc->sc_queue[qid].wmq_txq;
8589
8590 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
8591 m_freem(m);
8592 WM_Q_EVCNT_INCR(txq, pcqdrop);
8593 return ENOBUFS;
8594 }
8595
8596 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
8597 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
8598 if (m->m_flags & M_MCAST)
8599 if_statinc_ref(nsr, if_omcasts);
8600 IF_STAT_PUTREF(ifp);
8601
8602 if (mutex_tryenter(txq->txq_lock)) {
8603 if (!txq->txq_stopping)
8604 wm_transmit_locked(ifp, txq);
8605 mutex_exit(txq->txq_lock);
8606 }
8607
8608 return 0;
8609 }
8610
8611 static void
8612 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
8613 {
8614
8615 wm_send_common_locked(ifp, txq, true);
8616 }
8617
8618 static void
8619 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
8620 bool is_transmit)
8621 {
8622 struct wm_softc *sc = ifp->if_softc;
8623 struct mbuf *m0;
8624 struct wm_txsoft *txs;
8625 bus_dmamap_t dmamap;
8626 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
8627 bus_addr_t curaddr;
8628 bus_size_t seglen, curlen;
8629 uint32_t cksumcmd;
8630 uint8_t cksumfields;
8631 bool remap = true;
8632
8633 KASSERT(mutex_owned(txq->txq_lock));
8634 KASSERT(!txq->txq_stopping);
8635
8636 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
8637 return;
8638
8639 if (__predict_false(wm_linkdown_discard(txq))) {
8640 do {
8641 if (is_transmit)
8642 m0 = pcq_get(txq->txq_interq);
8643 else
8644 IFQ_DEQUEUE(&ifp->if_snd, m0);
8645 /*
8646 * increment successed packet counter as in the case
8647 * which the packet is discarded by link down PHY.
8648 */
8649 if (m0 != NULL) {
8650 if_statinc(ifp, if_opackets);
8651 m_freem(m0);
8652 }
8653 } while (m0 != NULL);
8654 return;
8655 }
8656
8657 /* Remember the previous number of free descriptors. */
8658 ofree = txq->txq_free;
8659
8660 /*
8661 * Loop through the send queue, setting up transmit descriptors
8662 * until we drain the queue, or use up all available transmit
8663 * descriptors.
8664 */
8665 for (;;) {
8666 m0 = NULL;
8667
8668 /* Get a work queue entry. */
8669 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
8670 wm_txeof(txq, UINT_MAX);
8671 if (txq->txq_sfree == 0) {
8672 DPRINTF(sc, WM_DEBUG_TX,
8673 ("%s: TX: no free job descriptors\n",
8674 device_xname(sc->sc_dev)));
8675 WM_Q_EVCNT_INCR(txq, txsstall);
8676 break;
8677 }
8678 }
8679
8680 /* Grab a packet off the queue. */
8681 if (is_transmit)
8682 m0 = pcq_get(txq->txq_interq);
8683 else
8684 IFQ_DEQUEUE(&ifp->if_snd, m0);
8685 if (m0 == NULL)
8686 break;
8687
8688 DPRINTF(sc, WM_DEBUG_TX,
8689 ("%s: TX: have packet to transmit: %p\n",
8690 device_xname(sc->sc_dev), m0));
8691
8692 txs = &txq->txq_soft[txq->txq_snext];
8693 dmamap = txs->txs_dmamap;
8694
8695 use_tso = (m0->m_pkthdr.csum_flags &
8696 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
8697
8698 /*
8699 * So says the Linux driver:
8700 * The controller does a simple calculation to make sure
8701 * there is enough room in the FIFO before initiating the
8702 * DMA for each buffer. The calc is:
8703 * 4 = ceil(buffer len / MSS)
8704 * To make sure we don't overrun the FIFO, adjust the max
8705 * buffer len if the MSS drops.
8706 */
8707 dmamap->dm_maxsegsz =
8708 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
8709 ? m0->m_pkthdr.segsz << 2
8710 : WTX_MAX_LEN;
8711
8712 /*
8713 * Load the DMA map. If this fails, the packet either
8714 * didn't fit in the allotted number of segments, or we
8715 * were short on resources. For the too-many-segments
8716 * case, we simply report an error and drop the packet,
8717 * since we can't sanely copy a jumbo packet to a single
8718 * buffer.
8719 */
8720 retry:
8721 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
8722 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
8723 if (__predict_false(error)) {
8724 if (error == EFBIG) {
8725 if (remap == true) {
8726 struct mbuf *m;
8727
8728 remap = false;
8729 m = m_defrag(m0, M_NOWAIT);
8730 if (m != NULL) {
8731 WM_Q_EVCNT_INCR(txq, defrag);
8732 m0 = m;
8733 goto retry;
8734 }
8735 }
8736 WM_Q_EVCNT_INCR(txq, toomanyseg);
8737 log(LOG_ERR, "%s: Tx packet consumes too many "
8738 "DMA segments, dropping...\n",
8739 device_xname(sc->sc_dev));
8740 wm_dump_mbuf_chain(sc, m0);
8741 m_freem(m0);
8742 continue;
8743 }
8744 /* Short on resources, just stop for now. */
8745 DPRINTF(sc, WM_DEBUG_TX,
8746 ("%s: TX: dmamap load failed: %d\n",
8747 device_xname(sc->sc_dev), error));
8748 break;
8749 }
8750
8751 segs_needed = dmamap->dm_nsegs;
8752 if (use_tso) {
8753 /* For sentinel descriptor; see below. */
8754 segs_needed++;
8755 }
8756
8757 /*
8758 * Ensure we have enough descriptors free to describe
8759 * the packet. Note, we always reserve one descriptor
8760 * at the end of the ring due to the semantics of the
8761 * TDT register, plus one more in the event we need
8762 * to load offload context.
8763 */
8764 if (segs_needed > txq->txq_free - 2) {
8765 /*
8766 * Not enough free descriptors to transmit this
8767 * packet. We haven't committed anything yet,
8768 * so just unload the DMA map, put the packet
8769 * pack on the queue, and punt. Notify the upper
8770 * layer that there are no more slots left.
8771 */
8772 DPRINTF(sc, WM_DEBUG_TX,
8773 ("%s: TX: need %d (%d) descriptors, have %d\n",
8774 device_xname(sc->sc_dev), dmamap->dm_nsegs,
8775 segs_needed, txq->txq_free - 1));
8776 txq->txq_flags |= WM_TXQ_NO_SPACE;
8777 bus_dmamap_unload(sc->sc_dmat, dmamap);
8778 WM_Q_EVCNT_INCR(txq, txdstall);
8779 break;
8780 }
8781
8782 /*
8783 * Check for 82547 Tx FIFO bug. We need to do this
8784 * once we know we can transmit the packet, since we
8785 * do some internal FIFO space accounting here.
8786 */
8787 if (sc->sc_type == WM_T_82547 &&
8788 wm_82547_txfifo_bugchk(sc, m0)) {
8789 DPRINTF(sc, WM_DEBUG_TX,
8790 ("%s: TX: 82547 Tx FIFO bug detected\n",
8791 device_xname(sc->sc_dev)));
8792 txq->txq_flags |= WM_TXQ_NO_SPACE;
8793 bus_dmamap_unload(sc->sc_dmat, dmamap);
8794 WM_Q_EVCNT_INCR(txq, fifo_stall);
8795 break;
8796 }
8797
8798 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
8799
8800 DPRINTF(sc, WM_DEBUG_TX,
8801 ("%s: TX: packet has %d (%d) DMA segments\n",
8802 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
8803
8804 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
8805
8806 /*
8807 * Store a pointer to the packet so that we can free it
8808 * later.
8809 *
8810 * Initially, we consider the number of descriptors the
8811 * packet uses the number of DMA segments. This may be
8812 * incremented by 1 if we do checksum offload (a descriptor
8813 * is used to set the checksum context).
8814 */
8815 txs->txs_mbuf = m0;
8816 txs->txs_firstdesc = txq->txq_next;
8817 txs->txs_ndesc = segs_needed;
8818
8819 /* Set up offload parameters for this packet. */
8820 if (m0->m_pkthdr.csum_flags &
8821 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
8822 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
8823 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
8824 wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
8825 } else {
8826 txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
8827 txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
8828 cksumcmd = 0;
8829 cksumfields = 0;
8830 }
8831
8832 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
8833
8834 /* Sync the DMA map. */
8835 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
8836 BUS_DMASYNC_PREWRITE);
8837
8838 /* Initialize the transmit descriptor. */
8839 for (nexttx = txq->txq_next, seg = 0;
8840 seg < dmamap->dm_nsegs; seg++) {
8841 for (seglen = dmamap->dm_segs[seg].ds_len,
8842 curaddr = dmamap->dm_segs[seg].ds_addr;
8843 seglen != 0;
8844 curaddr += curlen, seglen -= curlen,
8845 nexttx = WM_NEXTTX(txq, nexttx)) {
8846 curlen = seglen;
8847
8848 /*
8849 * So says the Linux driver:
8850 * Work around for premature descriptor
8851 * write-backs in TSO mode. Append a
8852 * 4-byte sentinel descriptor.
8853 */
8854 if (use_tso && seg == dmamap->dm_nsegs - 1 &&
8855 curlen > 8)
8856 curlen -= 4;
8857
8858 wm_set_dma_addr(
8859 &txq->txq_descs[nexttx].wtx_addr, curaddr);
8860 txq->txq_descs[nexttx].wtx_cmdlen
8861 = htole32(cksumcmd | curlen);
8862 txq->txq_descs[nexttx].wtx_fields.wtxu_status
8863 = 0;
8864 txq->txq_descs[nexttx].wtx_fields.wtxu_options
8865 = cksumfields;
8866 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
8867 lasttx = nexttx;
8868
8869 DPRINTF(sc, WM_DEBUG_TX,
8870 ("%s: TX: desc %d: low %#" PRIx64 ", "
8871 "len %#04zx\n",
8872 device_xname(sc->sc_dev), nexttx,
8873 (uint64_t)curaddr, curlen));
8874 }
8875 }
8876
8877 KASSERT(lasttx != -1);
8878
8879 /*
8880 * Set up the command byte on the last descriptor of
8881 * the packet. If we're in the interrupt delay window,
8882 * delay the interrupt.
8883 */
8884 txq->txq_descs[lasttx].wtx_cmdlen |=
8885 htole32(WTX_CMD_EOP | WTX_CMD_RS);
8886
8887 /*
8888 * If VLANs are enabled and the packet has a VLAN tag, set
8889 * up the descriptor to encapsulate the packet for us.
8890 *
8891 * This is only valid on the last descriptor of the packet.
8892 */
8893 if (vlan_has_tag(m0)) {
8894 txq->txq_descs[lasttx].wtx_cmdlen |=
8895 htole32(WTX_CMD_VLE);
8896 txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
8897 = htole16(vlan_get_tag(m0));
8898 }
8899
8900 txs->txs_lastdesc = lasttx;
8901
8902 DPRINTF(sc, WM_DEBUG_TX,
8903 ("%s: TX: desc %d: cmdlen 0x%08x\n",
8904 device_xname(sc->sc_dev),
8905 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
8906
8907 /* Sync the descriptors we're using. */
8908 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
8909 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
8910
8911 /* Give the packet to the chip. */
8912 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
8913
8914 DPRINTF(sc, WM_DEBUG_TX,
8915 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
8916
8917 DPRINTF(sc, WM_DEBUG_TX,
8918 ("%s: TX: finished transmitting packet, job %d\n",
8919 device_xname(sc->sc_dev), txq->txq_snext));
8920
8921 /* Advance the tx pointer. */
8922 txq->txq_free -= txs->txs_ndesc;
8923 txq->txq_next = nexttx;
8924
8925 txq->txq_sfree--;
8926 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
8927
8928 /* Pass the packet to any BPF listeners. */
8929 bpf_mtap(ifp, m0, BPF_D_OUT);
8930 }
8931
8932 if (m0 != NULL) {
8933 txq->txq_flags |= WM_TXQ_NO_SPACE;
8934 WM_Q_EVCNT_INCR(txq, descdrop);
8935 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
8936 __func__));
8937 m_freem(m0);
8938 }
8939
8940 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
8941 /* No more slots; notify upper layer. */
8942 txq->txq_flags |= WM_TXQ_NO_SPACE;
8943 }
8944
8945 if (txq->txq_free != ofree) {
8946 /* Set a watchdog timer in case the chip flakes out. */
8947 txq->txq_lastsent = time_uptime;
8948 txq->txq_sending = true;
8949 }
8950 }
8951
8952 /*
8953 * wm_nq_tx_offload:
8954 *
8955 * Set up TCP/IP checksumming parameters for the
8956 * specified packet, for NEWQUEUE devices
8957 */
8958 static void
8959 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
8960 struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
8961 {
8962 struct mbuf *m0 = txs->txs_mbuf;
8963 uint32_t vl_len, mssidx, cmdc;
8964 struct ether_header *eh;
8965 int offset, iphl;
8966
8967 /*
8968 * XXX It would be nice if the mbuf pkthdr had offset
8969 * fields for the protocol headers.
8970 */
8971 *cmdlenp = 0;
8972 *fieldsp = 0;
8973
8974 eh = mtod(m0, struct ether_header *);
8975 switch (htons(eh->ether_type)) {
8976 case ETHERTYPE_IP:
8977 case ETHERTYPE_IPV6:
8978 offset = ETHER_HDR_LEN;
8979 break;
8980
8981 case ETHERTYPE_VLAN:
8982 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
8983 break;
8984
8985 default:
8986 /* Don't support this protocol or encapsulation. */
8987 *do_csum = false;
8988 return;
8989 }
8990 *do_csum = true;
8991 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
8992 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
8993
8994 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
8995 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
8996
8997 if ((m0->m_pkthdr.csum_flags &
8998 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
8999 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
9000 } else {
9001 iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
9002 }
9003 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
9004 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
9005
9006 if (vlan_has_tag(m0)) {
9007 vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
9008 << NQTXC_VLLEN_VLAN_SHIFT);
9009 *cmdlenp |= NQTX_CMD_VLE;
9010 }
9011
9012 mssidx = 0;
9013
9014 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
9015 int hlen = offset + iphl;
9016 int tcp_hlen;
9017 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
9018
9019 if (__predict_false(m0->m_len <
9020 (hlen + sizeof(struct tcphdr)))) {
9021 /*
9022 * TCP/IP headers are not in the first mbuf; we need
9023 * to do this the slow and painful way. Let's just
9024 * hope this doesn't happen very often.
9025 */
9026 struct tcphdr th;
9027
9028 WM_Q_EVCNT_INCR(txq, tsopain);
9029
9030 m_copydata(m0, hlen, sizeof(th), &th);
9031 if (v4) {
9032 struct ip ip;
9033
9034 m_copydata(m0, offset, sizeof(ip), &ip);
9035 ip.ip_len = 0;
9036 m_copyback(m0,
9037 offset + offsetof(struct ip, ip_len),
9038 sizeof(ip.ip_len), &ip.ip_len);
9039 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
9040 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
9041 } else {
9042 struct ip6_hdr ip6;
9043
9044 m_copydata(m0, offset, sizeof(ip6), &ip6);
9045 ip6.ip6_plen = 0;
9046 m_copyback(m0,
9047 offset + offsetof(struct ip6_hdr, ip6_plen),
9048 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
9049 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
9050 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
9051 }
9052 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
9053 sizeof(th.th_sum), &th.th_sum);
9054
9055 tcp_hlen = th.th_off << 2;
9056 } else {
9057 /*
9058 * TCP/IP headers are in the first mbuf; we can do
9059 * this the easy way.
9060 */
9061 struct tcphdr *th;
9062
9063 if (v4) {
9064 struct ip *ip =
9065 (void *)(mtod(m0, char *) + offset);
9066 th = (void *)(mtod(m0, char *) + hlen);
9067
9068 ip->ip_len = 0;
9069 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
9070 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
9071 } else {
9072 struct ip6_hdr *ip6 =
9073 (void *)(mtod(m0, char *) + offset);
9074 th = (void *)(mtod(m0, char *) + hlen);
9075
9076 ip6->ip6_plen = 0;
9077 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
9078 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
9079 }
9080 tcp_hlen = th->th_off << 2;
9081 }
9082 hlen += tcp_hlen;
9083 *cmdlenp |= NQTX_CMD_TSE;
9084
9085 if (v4) {
9086 WM_Q_EVCNT_INCR(txq, tso);
9087 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
9088 } else {
9089 WM_Q_EVCNT_INCR(txq, tso6);
9090 *fieldsp |= NQTXD_FIELDS_TUXSM;
9091 }
9092 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
9093 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
9094 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
9095 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
9096 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
9097 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
9098 } else {
9099 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
9100 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
9101 }
9102
9103 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
9104 *fieldsp |= NQTXD_FIELDS_IXSM;
9105 cmdc |= NQTXC_CMD_IP4;
9106 }
9107
9108 if (m0->m_pkthdr.csum_flags &
9109 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
9110 WM_Q_EVCNT_INCR(txq, tusum);
9111 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
9112 cmdc |= NQTXC_CMD_TCP;
9113 else
9114 cmdc |= NQTXC_CMD_UDP;
9115
9116 cmdc |= NQTXC_CMD_IP4;
9117 *fieldsp |= NQTXD_FIELDS_TUXSM;
9118 }
9119 if (m0->m_pkthdr.csum_flags &
9120 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
9121 WM_Q_EVCNT_INCR(txq, tusum6);
9122 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
9123 cmdc |= NQTXC_CMD_TCP;
9124 else
9125 cmdc |= NQTXC_CMD_UDP;
9126
9127 cmdc |= NQTXC_CMD_IP6;
9128 *fieldsp |= NQTXD_FIELDS_TUXSM;
9129 }
9130
9131 /*
9132 * We don't have to write context descriptor for every packet to
9133 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
9134 * I210 and I211. It is enough to write once per a Tx queue for these
9135 * controllers.
9136 * It would be overhead to write context descriptor for every packet,
9137 * however it does not cause problems.
9138 */
9139 /* Fill in the context descriptor. */
9140 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_vl_len =
9141 htole32(vl_len);
9142 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_sn = 0;
9143 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_cmd =
9144 htole32(cmdc);
9145 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_mssidx =
9146 htole32(mssidx);
9147 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
9148 DPRINTF(sc, WM_DEBUG_TX,
9149 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
9150 txq->txq_next, 0, vl_len));
9151 DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
9152 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
9153 txs->txs_ndesc++;
9154 }
9155
9156 /*
9157 * wm_nq_start: [ifnet interface function]
9158 *
9159 * Start packet transmission on the interface for NEWQUEUE devices
9160 */
9161 static void
9162 wm_nq_start(struct ifnet *ifp)
9163 {
9164 struct wm_softc *sc = ifp->if_softc;
9165 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
9166
9167 KASSERT(if_is_mpsafe(ifp));
9168 /*
9169 * if_obytes and if_omcasts are added in if_transmit()@if.c.
9170 */
9171
9172 mutex_enter(txq->txq_lock);
9173 if (!txq->txq_stopping)
9174 wm_nq_start_locked(ifp);
9175 mutex_exit(txq->txq_lock);
9176 }
9177
9178 static void
9179 wm_nq_start_locked(struct ifnet *ifp)
9180 {
9181 struct wm_softc *sc = ifp->if_softc;
9182 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
9183
9184 wm_nq_send_common_locked(ifp, txq, false);
9185 }
9186
9187 static int
9188 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
9189 {
9190 int qid;
9191 struct wm_softc *sc = ifp->if_softc;
9192 struct wm_txqueue *txq;
9193
9194 qid = wm_select_txqueue(ifp, m);
9195 txq = &sc->sc_queue[qid].wmq_txq;
9196
9197 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
9198 m_freem(m);
9199 WM_Q_EVCNT_INCR(txq, pcqdrop);
9200 return ENOBUFS;
9201 }
9202
9203 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
9204 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
9205 if (m->m_flags & M_MCAST)
9206 if_statinc_ref(nsr, if_omcasts);
9207 IF_STAT_PUTREF(ifp);
9208
9209 /*
9210 * The situations which this mutex_tryenter() fails at running time
9211 * are below two patterns.
9212 * (1) contention with interrupt handler(wm_txrxintr_msix())
9213 * (2) contention with deferred if_start softint(wm_handle_queue())
9214 * In the case of (1), the last packet enqueued to txq->txq_interq is
9215 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
9216 * In the case of (2), the last packet enqueued to txq->txq_interq is
9217 * also dequeued by wm_deferred_start_locked(). So, it does not get
9218 * stuck, either.
9219 */
9220 if (mutex_tryenter(txq->txq_lock)) {
9221 if (!txq->txq_stopping)
9222 wm_nq_transmit_locked(ifp, txq);
9223 mutex_exit(txq->txq_lock);
9224 }
9225
9226 return 0;
9227 }
9228
9229 static void
9230 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
9231 {
9232
9233 wm_nq_send_common_locked(ifp, txq, true);
9234 }
9235
9236 static void
9237 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
9238 bool is_transmit)
9239 {
9240 struct wm_softc *sc = ifp->if_softc;
9241 struct mbuf *m0;
9242 struct wm_txsoft *txs;
9243 bus_dmamap_t dmamap;
9244 int error, nexttx, lasttx = -1, seg, segs_needed;
9245 bool do_csum, sent;
9246 bool remap = true;
9247
9248 KASSERT(mutex_owned(txq->txq_lock));
9249 KASSERT(!txq->txq_stopping);
9250
9251 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
9252 return;
9253
9254 if (__predict_false(wm_linkdown_discard(txq))) {
9255 do {
9256 if (is_transmit)
9257 m0 = pcq_get(txq->txq_interq);
9258 else
9259 IFQ_DEQUEUE(&ifp->if_snd, m0);
9260 /*
9261 * increment successed packet counter as in the case
9262 * which the packet is discarded by link down PHY.
9263 */
9264 if (m0 != NULL) {
9265 if_statinc(ifp, if_opackets);
9266 m_freem(m0);
9267 }
9268 } while (m0 != NULL);
9269 return;
9270 }
9271
9272 sent = false;
9273
9274 /*
9275 * Loop through the send queue, setting up transmit descriptors
9276 * until we drain the queue, or use up all available transmit
9277 * descriptors.
9278 */
9279 for (;;) {
9280 m0 = NULL;
9281
9282 /* Get a work queue entry. */
9283 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
9284 wm_txeof(txq, UINT_MAX);
9285 if (txq->txq_sfree == 0) {
9286 DPRINTF(sc, WM_DEBUG_TX,
9287 ("%s: TX: no free job descriptors\n",
9288 device_xname(sc->sc_dev)));
9289 WM_Q_EVCNT_INCR(txq, txsstall);
9290 break;
9291 }
9292 }
9293
9294 /* Grab a packet off the queue. */
9295 if (is_transmit)
9296 m0 = pcq_get(txq->txq_interq);
9297 else
9298 IFQ_DEQUEUE(&ifp->if_snd, m0);
9299 if (m0 == NULL)
9300 break;
9301
9302 DPRINTF(sc, WM_DEBUG_TX,
9303 ("%s: TX: have packet to transmit: %p\n",
9304 device_xname(sc->sc_dev), m0));
9305
9306 txs = &txq->txq_soft[txq->txq_snext];
9307 dmamap = txs->txs_dmamap;
9308
9309 /*
9310 * Load the DMA map. If this fails, the packet either
9311 * didn't fit in the allotted number of segments, or we
9312 * were short on resources. For the too-many-segments
9313 * case, we simply report an error and drop the packet,
9314 * since we can't sanely copy a jumbo packet to a single
9315 * buffer.
9316 */
9317 retry:
9318 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
9319 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
9320 if (__predict_false(error)) {
9321 if (error == EFBIG) {
9322 if (remap == true) {
9323 struct mbuf *m;
9324
9325 remap = false;
9326 m = m_defrag(m0, M_NOWAIT);
9327 if (m != NULL) {
9328 WM_Q_EVCNT_INCR(txq, defrag);
9329 m0 = m;
9330 goto retry;
9331 }
9332 }
9333 WM_Q_EVCNT_INCR(txq, toomanyseg);
9334 log(LOG_ERR, "%s: Tx packet consumes too many "
9335 "DMA segments, dropping...\n",
9336 device_xname(sc->sc_dev));
9337 wm_dump_mbuf_chain(sc, m0);
9338 m_freem(m0);
9339 continue;
9340 }
9341 /* Short on resources, just stop for now. */
9342 DPRINTF(sc, WM_DEBUG_TX,
9343 ("%s: TX: dmamap load failed: %d\n",
9344 device_xname(sc->sc_dev), error));
9345 break;
9346 }
9347
9348 segs_needed = dmamap->dm_nsegs;
9349
9350 /*
9351 * Ensure we have enough descriptors free to describe
9352 * the packet. Note, we always reserve one descriptor
9353 * at the end of the ring due to the semantics of the
9354 * TDT register, plus one more in the event we need
9355 * to load offload context.
9356 */
9357 if (segs_needed > txq->txq_free - 2) {
9358 /*
9359 * Not enough free descriptors to transmit this
9360 * packet. We haven't committed anything yet,
9361 * so just unload the DMA map, put the packet
9362 * pack on the queue, and punt. Notify the upper
9363 * layer that there are no more slots left.
9364 */
9365 DPRINTF(sc, WM_DEBUG_TX,
9366 ("%s: TX: need %d (%d) descriptors, have %d\n",
9367 device_xname(sc->sc_dev), dmamap->dm_nsegs,
9368 segs_needed, txq->txq_free - 1));
9369 txq->txq_flags |= WM_TXQ_NO_SPACE;
9370 bus_dmamap_unload(sc->sc_dmat, dmamap);
9371 WM_Q_EVCNT_INCR(txq, txdstall);
9372 break;
9373 }
9374
9375 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
9376
9377 DPRINTF(sc, WM_DEBUG_TX,
9378 ("%s: TX: packet has %d (%d) DMA segments\n",
9379 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
9380
9381 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
9382
9383 /*
9384 * Store a pointer to the packet so that we can free it
9385 * later.
9386 *
9387 * Initially, we consider the number of descriptors the
9388 * packet uses the number of DMA segments. This may be
9389 * incremented by 1 if we do checksum offload (a descriptor
9390 * is used to set the checksum context).
9391 */
9392 txs->txs_mbuf = m0;
9393 txs->txs_firstdesc = txq->txq_next;
9394 txs->txs_ndesc = segs_needed;
9395
9396 /* Set up offload parameters for this packet. */
9397 uint32_t cmdlen, fields, dcmdlen;
9398 if (m0->m_pkthdr.csum_flags &
9399 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
9400 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
9401 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
9402 wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
9403 &do_csum);
9404 } else {
9405 do_csum = false;
9406 cmdlen = 0;
9407 fields = 0;
9408 }
9409
9410 /* Sync the DMA map. */
9411 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
9412 BUS_DMASYNC_PREWRITE);
9413
9414 /* Initialize the first transmit descriptor. */
9415 nexttx = txq->txq_next;
9416 if (!do_csum) {
9417 /* Set up a legacy descriptor */
9418 wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
9419 dmamap->dm_segs[0].ds_addr);
9420 txq->txq_descs[nexttx].wtx_cmdlen =
9421 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
9422 txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
9423 txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
9424 if (vlan_has_tag(m0)) {
9425 txq->txq_descs[nexttx].wtx_cmdlen |=
9426 htole32(WTX_CMD_VLE);
9427 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
9428 htole16(vlan_get_tag(m0));
9429 } else
9430 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
9431
9432 dcmdlen = 0;
9433 } else {
9434 /* Set up an advanced data descriptor */
9435 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
9436 htole64(dmamap->dm_segs[0].ds_addr);
9437 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
9438 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
9439 htole32(dmamap->dm_segs[0].ds_len | cmdlen);
9440 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
9441 htole32(fields);
9442 DPRINTF(sc, WM_DEBUG_TX,
9443 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
9444 device_xname(sc->sc_dev), nexttx,
9445 (uint64_t)dmamap->dm_segs[0].ds_addr));
9446 DPRINTF(sc, WM_DEBUG_TX,
9447 ("\t 0x%08x%08x\n", fields,
9448 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
9449 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
9450 }
9451
9452 lasttx = nexttx;
9453 nexttx = WM_NEXTTX(txq, nexttx);
9454 /*
9455 * Fill in the next descriptors. Legacy or advanced format
9456 * is the same here.
9457 */
9458 for (seg = 1; seg < dmamap->dm_nsegs;
9459 seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
9460 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
9461 htole64(dmamap->dm_segs[seg].ds_addr);
9462 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
9463 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
9464 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
9465 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
9466 lasttx = nexttx;
9467
9468 DPRINTF(sc, WM_DEBUG_TX,
9469 ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
9470 device_xname(sc->sc_dev), nexttx,
9471 (uint64_t)dmamap->dm_segs[seg].ds_addr,
9472 dmamap->dm_segs[seg].ds_len));
9473 }
9474
9475 KASSERT(lasttx != -1);
9476
9477 /*
9478 * Set up the command byte on the last descriptor of
9479 * the packet. If we're in the interrupt delay window,
9480 * delay the interrupt.
9481 */
9482 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
9483 (NQTX_CMD_EOP | NQTX_CMD_RS));
9484 txq->txq_descs[lasttx].wtx_cmdlen |=
9485 htole32(WTX_CMD_EOP | WTX_CMD_RS);
9486
9487 txs->txs_lastdesc = lasttx;
9488
9489 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
9490 device_xname(sc->sc_dev),
9491 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
9492
9493 /* Sync the descriptors we're using. */
9494 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
9495 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
9496
9497 /* Give the packet to the chip. */
9498 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
9499 sent = true;
9500
9501 DPRINTF(sc, WM_DEBUG_TX,
9502 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
9503
9504 DPRINTF(sc, WM_DEBUG_TX,
9505 ("%s: TX: finished transmitting packet, job %d\n",
9506 device_xname(sc->sc_dev), txq->txq_snext));
9507
9508 /* Advance the tx pointer. */
9509 txq->txq_free -= txs->txs_ndesc;
9510 txq->txq_next = nexttx;
9511
9512 txq->txq_sfree--;
9513 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
9514
9515 /* Pass the packet to any BPF listeners. */
9516 bpf_mtap(ifp, m0, BPF_D_OUT);
9517 }
9518
9519 if (m0 != NULL) {
9520 txq->txq_flags |= WM_TXQ_NO_SPACE;
9521 WM_Q_EVCNT_INCR(txq, descdrop);
9522 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
9523 __func__));
9524 m_freem(m0);
9525 }
9526
9527 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
9528 /* No more slots; notify upper layer. */
9529 txq->txq_flags |= WM_TXQ_NO_SPACE;
9530 }
9531
9532 if (sent) {
9533 /* Set a watchdog timer in case the chip flakes out. */
9534 txq->txq_lastsent = time_uptime;
9535 txq->txq_sending = true;
9536 }
9537 }
9538
9539 static void
9540 wm_deferred_start_locked(struct wm_txqueue *txq)
9541 {
9542 struct wm_softc *sc = txq->txq_sc;
9543 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9544 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
9545 int qid = wmq->wmq_id;
9546
9547 KASSERT(mutex_owned(txq->txq_lock));
9548 KASSERT(!txq->txq_stopping);
9549
9550 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
9551 /* XXX need for ALTQ or one CPU system */
9552 if (qid == 0)
9553 wm_nq_start_locked(ifp);
9554 wm_nq_transmit_locked(ifp, txq);
9555 } else {
9556 /* XXX need for ALTQ or one CPU system */
9557 if (qid == 0)
9558 wm_start_locked(ifp);
9559 wm_transmit_locked(ifp, txq);
9560 }
9561 }
9562
9563 /* Interrupt */
9564
9565 /*
9566 * wm_txeof:
9567 *
9568 * Helper; handle transmit interrupts.
9569 */
9570 static bool
9571 wm_txeof(struct wm_txqueue *txq, u_int limit)
9572 {
9573 struct wm_softc *sc = txq->txq_sc;
9574 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9575 struct wm_txsoft *txs;
9576 int count = 0;
9577 int i;
9578 uint8_t status;
9579 bool more = false;
9580
9581 KASSERT(mutex_owned(txq->txq_lock));
9582
9583 if (txq->txq_stopping)
9584 return false;
9585
9586 txq->txq_flags &= ~WM_TXQ_NO_SPACE;
9587
9588 /*
9589 * Go through the Tx list and free mbufs for those
9590 * frames which have been transmitted.
9591 */
9592 for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
9593 i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
9594 txs = &txq->txq_soft[i];
9595
9596 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
9597 device_xname(sc->sc_dev), i));
9598
9599 wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
9600 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
9601
9602 status =
9603 txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
9604 if ((status & WTX_ST_DD) == 0) {
9605 wm_cdtxsync(txq, txs->txs_lastdesc, 1,
9606 BUS_DMASYNC_PREREAD);
9607 break;
9608 }
9609
9610 if (limit-- == 0) {
9611 more = true;
9612 DPRINTF(sc, WM_DEBUG_TX,
9613 ("%s: TX: loop limited, job %d is not processed\n",
9614 device_xname(sc->sc_dev), i));
9615 break;
9616 }
9617
9618 count++;
9619 DPRINTF(sc, WM_DEBUG_TX,
9620 ("%s: TX: job %d done: descs %d..%d\n",
9621 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
9622 txs->txs_lastdesc));
9623
9624 #ifdef WM_EVENT_COUNTERS
9625 if ((status & WTX_ST_TU) && (sc->sc_type <= WM_T_82544))
9626 WM_Q_EVCNT_INCR(txq, underrun);
9627 #endif /* WM_EVENT_COUNTERS */
9628
9629 /*
9630 * 82574 and newer's document says the status field has neither
9631 * EC (Excessive Collision) bit nor LC (Late Collision) bit
9632 * (reserved). Refer "PCIe GbE Controller Open Source Software
9633 * Developer's Manual", 82574 datasheet and newer.
9634 *
9635 * XXX I saw the LC bit was set on I218 even though the media
9636 * was full duplex, so the bit might be used for other
9637 * meaning ...(I have no document).
9638 */
9639
9640 if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
9641 && ((sc->sc_type < WM_T_82574)
9642 || (sc->sc_type == WM_T_80003))) {
9643 if_statinc(ifp, if_oerrors);
9644 if (status & WTX_ST_LC)
9645 log(LOG_WARNING, "%s: late collision\n",
9646 device_xname(sc->sc_dev));
9647 else if (status & WTX_ST_EC) {
9648 if_statadd(ifp, if_collisions,
9649 TX_COLLISION_THRESHOLD + 1);
9650 log(LOG_WARNING, "%s: excessive collisions\n",
9651 device_xname(sc->sc_dev));
9652 }
9653 } else
9654 if_statinc(ifp, if_opackets);
9655
9656 txq->txq_packets++;
9657 txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
9658
9659 txq->txq_free += txs->txs_ndesc;
9660 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
9661 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
9662 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
9663 m_freem(txs->txs_mbuf);
9664 txs->txs_mbuf = NULL;
9665 }
9666
9667 /* Update the dirty transmit buffer pointer. */
9668 txq->txq_sdirty = i;
9669 DPRINTF(sc, WM_DEBUG_TX,
9670 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
9671
9672 if (count != 0)
9673 rnd_add_uint32(&sc->rnd_source, count);
9674
9675 /*
9676 * If there are no more pending transmissions, cancel the watchdog
9677 * timer.
9678 */
9679 if (txq->txq_sfree == WM_TXQUEUELEN(txq))
9680 txq->txq_sending = false;
9681
9682 return more;
9683 }
9684
9685 static inline uint32_t
9686 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
9687 {
9688 struct wm_softc *sc = rxq->rxq_sc;
9689
9690 if (sc->sc_type == WM_T_82574)
9691 return EXTRXC_STATUS(
9692 le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
9693 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9694 return NQRXC_STATUS(
9695 le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
9696 else
9697 return rxq->rxq_descs[idx].wrx_status;
9698 }
9699
9700 static inline uint32_t
9701 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
9702 {
9703 struct wm_softc *sc = rxq->rxq_sc;
9704
9705 if (sc->sc_type == WM_T_82574)
9706 return EXTRXC_ERROR(
9707 le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
9708 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9709 return NQRXC_ERROR(
9710 le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
9711 else
9712 return rxq->rxq_descs[idx].wrx_errors;
9713 }
9714
9715 static inline uint16_t
9716 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
9717 {
9718 struct wm_softc *sc = rxq->rxq_sc;
9719
9720 if (sc->sc_type == WM_T_82574)
9721 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
9722 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9723 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
9724 else
9725 return rxq->rxq_descs[idx].wrx_special;
9726 }
9727
9728 static inline int
9729 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
9730 {
9731 struct wm_softc *sc = rxq->rxq_sc;
9732
9733 if (sc->sc_type == WM_T_82574)
9734 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
9735 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9736 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
9737 else
9738 return rxq->rxq_descs[idx].wrx_len;
9739 }
9740
9741 #ifdef WM_DEBUG
9742 static inline uint32_t
9743 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
9744 {
9745 struct wm_softc *sc = rxq->rxq_sc;
9746
9747 if (sc->sc_type == WM_T_82574)
9748 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
9749 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9750 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
9751 else
9752 return 0;
9753 }
9754
9755 static inline uint8_t
9756 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
9757 {
9758 struct wm_softc *sc = rxq->rxq_sc;
9759
9760 if (sc->sc_type == WM_T_82574)
9761 return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
9762 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9763 return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
9764 else
9765 return 0;
9766 }
9767 #endif /* WM_DEBUG */
9768
9769 static inline bool
9770 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
9771 uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
9772 {
9773
9774 if (sc->sc_type == WM_T_82574)
9775 return (status & ext_bit) != 0;
9776 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9777 return (status & nq_bit) != 0;
9778 else
9779 return (status & legacy_bit) != 0;
9780 }
9781
9782 static inline bool
9783 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
9784 uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
9785 {
9786
9787 if (sc->sc_type == WM_T_82574)
9788 return (error & ext_bit) != 0;
9789 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9790 return (error & nq_bit) != 0;
9791 else
9792 return (error & legacy_bit) != 0;
9793 }
9794
9795 static inline bool
9796 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
9797 {
9798
9799 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
9800 WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
9801 return true;
9802 else
9803 return false;
9804 }
9805
9806 static inline bool
9807 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
9808 {
9809 struct wm_softc *sc = rxq->rxq_sc;
9810
9811 /* XXX missing error bit for newqueue? */
9812 if (wm_rxdesc_is_set_error(sc, errors,
9813 WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
9814 EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
9815 | EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
9816 NQRXC_ERROR_RXE)) {
9817 if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
9818 EXTRXC_ERROR_SE, 0))
9819 log(LOG_WARNING, "%s: symbol error\n",
9820 device_xname(sc->sc_dev));
9821 else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
9822 EXTRXC_ERROR_SEQ, 0))
9823 log(LOG_WARNING, "%s: receive sequence error\n",
9824 device_xname(sc->sc_dev));
9825 else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
9826 EXTRXC_ERROR_CE, 0))
9827 log(LOG_WARNING, "%s: CRC error\n",
9828 device_xname(sc->sc_dev));
9829 return true;
9830 }
9831
9832 return false;
9833 }
9834
9835 static inline bool
9836 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
9837 {
9838 struct wm_softc *sc = rxq->rxq_sc;
9839
9840 if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
9841 NQRXC_STATUS_DD)) {
9842 /* We have processed all of the receive descriptors. */
9843 wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
9844 return false;
9845 }
9846
9847 return true;
9848 }
9849
9850 static inline bool
9851 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
9852 uint16_t vlantag, struct mbuf *m)
9853 {
9854
9855 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
9856 WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
9857 vlan_set_tag(m, le16toh(vlantag));
9858 }
9859
9860 return true;
9861 }
9862
9863 static inline void
9864 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
9865 uint32_t errors, struct mbuf *m)
9866 {
9867 struct wm_softc *sc = rxq->rxq_sc;
9868
9869 if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
9870 if (wm_rxdesc_is_set_status(sc, status,
9871 WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
9872 WM_Q_EVCNT_INCR(rxq, ipsum);
9873 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
9874 if (wm_rxdesc_is_set_error(sc, errors,
9875 WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
9876 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
9877 }
9878 if (wm_rxdesc_is_set_status(sc, status,
9879 WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
9880 /*
9881 * Note: we don't know if this was TCP or UDP,
9882 * so we just set both bits, and expect the
9883 * upper layers to deal.
9884 */
9885 WM_Q_EVCNT_INCR(rxq, tusum);
9886 m->m_pkthdr.csum_flags |=
9887 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
9888 M_CSUM_TCPv6 | M_CSUM_UDPv6;
9889 if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
9890 EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
9891 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
9892 }
9893 }
9894 }
9895
9896 /*
9897 * wm_rxeof:
9898 *
9899 * Helper; handle receive interrupts.
9900 */
9901 static bool
9902 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
9903 {
9904 struct wm_softc *sc = rxq->rxq_sc;
9905 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9906 struct wm_rxsoft *rxs;
9907 struct mbuf *m;
9908 int i, len;
9909 int count = 0;
9910 uint32_t status, errors;
9911 uint16_t vlantag;
9912 bool more = false;
9913
9914 KASSERT(mutex_owned(rxq->rxq_lock));
9915
9916 for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
9917 rxs = &rxq->rxq_soft[i];
9918
9919 DPRINTF(sc, WM_DEBUG_RX,
9920 ("%s: RX: checking descriptor %d\n",
9921 device_xname(sc->sc_dev), i));
9922 wm_cdrxsync(rxq, i,
9923 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
9924
9925 status = wm_rxdesc_get_status(rxq, i);
9926 errors = wm_rxdesc_get_errors(rxq, i);
9927 len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
9928 vlantag = wm_rxdesc_get_vlantag(rxq, i);
9929 #ifdef WM_DEBUG
9930 uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
9931 uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
9932 #endif
9933
9934 if (!wm_rxdesc_dd(rxq, i, status))
9935 break;
9936
9937 if (limit-- == 0) {
9938 more = true;
9939 DPRINTF(sc, WM_DEBUG_RX,
9940 ("%s: RX: loop limited, descriptor %d is not processed\n",
9941 device_xname(sc->sc_dev), i));
9942 break;
9943 }
9944
9945 count++;
9946 if (__predict_false(rxq->rxq_discard)) {
9947 DPRINTF(sc, WM_DEBUG_RX,
9948 ("%s: RX: discarding contents of descriptor %d\n",
9949 device_xname(sc->sc_dev), i));
9950 wm_init_rxdesc(rxq, i);
9951 if (wm_rxdesc_is_eop(rxq, status)) {
9952 /* Reset our state. */
9953 DPRINTF(sc, WM_DEBUG_RX,
9954 ("%s: RX: resetting rxdiscard -> 0\n",
9955 device_xname(sc->sc_dev)));
9956 rxq->rxq_discard = 0;
9957 }
9958 continue;
9959 }
9960
9961 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
9962 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
9963
9964 m = rxs->rxs_mbuf;
9965
9966 /*
9967 * Add a new receive buffer to the ring, unless of
9968 * course the length is zero. Treat the latter as a
9969 * failed mapping.
9970 */
9971 if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
9972 /*
9973 * Failed, throw away what we've done so
9974 * far, and discard the rest of the packet.
9975 */
9976 if_statinc(ifp, if_ierrors);
9977 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
9978 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
9979 wm_init_rxdesc(rxq, i);
9980 if (!wm_rxdesc_is_eop(rxq, status))
9981 rxq->rxq_discard = 1;
9982 if (rxq->rxq_head != NULL)
9983 m_freem(rxq->rxq_head);
9984 WM_RXCHAIN_RESET(rxq);
9985 DPRINTF(sc, WM_DEBUG_RX,
9986 ("%s: RX: Rx buffer allocation failed, "
9987 "dropping packet%s\n", device_xname(sc->sc_dev),
9988 rxq->rxq_discard ? " (discard)" : ""));
9989 continue;
9990 }
9991
9992 m->m_len = len;
9993 rxq->rxq_len += len;
9994 DPRINTF(sc, WM_DEBUG_RX,
9995 ("%s: RX: buffer at %p len %d\n",
9996 device_xname(sc->sc_dev), m->m_data, len));
9997
9998 /* If this is not the end of the packet, keep looking. */
9999 if (!wm_rxdesc_is_eop(rxq, status)) {
10000 WM_RXCHAIN_LINK(rxq, m);
10001 DPRINTF(sc, WM_DEBUG_RX,
10002 ("%s: RX: not yet EOP, rxlen -> %d\n",
10003 device_xname(sc->sc_dev), rxq->rxq_len));
10004 continue;
10005 }
10006
10007 /*
10008 * Okay, we have the entire packet now. The chip is
10009 * configured to include the FCS except I35[04], I21[01].
10010 * (not all chips can be configured to strip it), so we need
10011 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
10012 * in RCTL register is always set, so we don't trim it.
10013 * PCH2 and newer chip also not include FCS when jumbo
10014 * frame is used to do workaround an errata.
10015 * May need to adjust length of previous mbuf in the
10016 * chain if the current mbuf is too short.
10017 */
10018 if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
10019 if (m->m_len < ETHER_CRC_LEN) {
10020 rxq->rxq_tail->m_len
10021 -= (ETHER_CRC_LEN - m->m_len);
10022 m->m_len = 0;
10023 } else
10024 m->m_len -= ETHER_CRC_LEN;
10025 len = rxq->rxq_len - ETHER_CRC_LEN;
10026 } else
10027 len = rxq->rxq_len;
10028
10029 WM_RXCHAIN_LINK(rxq, m);
10030
10031 *rxq->rxq_tailp = NULL;
10032 m = rxq->rxq_head;
10033
10034 WM_RXCHAIN_RESET(rxq);
10035
10036 DPRINTF(sc, WM_DEBUG_RX,
10037 ("%s: RX: have entire packet, len -> %d\n",
10038 device_xname(sc->sc_dev), len));
10039
10040 /* If an error occurred, update stats and drop the packet. */
10041 if (wm_rxdesc_has_errors(rxq, errors)) {
10042 m_freem(m);
10043 continue;
10044 }
10045
10046 /* No errors. Receive the packet. */
10047 m_set_rcvif(m, ifp);
10048 m->m_pkthdr.len = len;
10049 /*
10050 * TODO
10051 * should be save rsshash and rsstype to this mbuf.
10052 */
10053 DPRINTF(sc, WM_DEBUG_RX,
10054 ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
10055 device_xname(sc->sc_dev), rsstype, rsshash));
10056
10057 /*
10058 * If VLANs are enabled, VLAN packets have been unwrapped
10059 * for us. Associate the tag with the packet.
10060 */
10061 if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
10062 continue;
10063
10064 /* Set up checksum info for this packet. */
10065 wm_rxdesc_ensure_checksum(rxq, status, errors, m);
10066
10067 rxq->rxq_packets++;
10068 rxq->rxq_bytes += len;
10069 /* Pass it on. */
10070 if_percpuq_enqueue(sc->sc_ipq, m);
10071
10072 if (rxq->rxq_stopping)
10073 break;
10074 }
10075 rxq->rxq_ptr = i;
10076
10077 if (count != 0)
10078 rnd_add_uint32(&sc->rnd_source, count);
10079
10080 DPRINTF(sc, WM_DEBUG_RX,
10081 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
10082
10083 return more;
10084 }
10085
10086 /*
10087 * wm_linkintr_gmii:
10088 *
10089 * Helper; handle link interrupts for GMII.
10090 */
10091 static void
10092 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
10093 {
10094 device_t dev = sc->sc_dev;
10095 uint32_t status, reg;
10096 bool link;
10097 int rv;
10098
10099 KASSERT(mutex_owned(sc->sc_core_lock));
10100
10101 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
10102 __func__));
10103
10104 if ((icr & ICR_LSC) == 0) {
10105 if (icr & ICR_RXSEQ)
10106 DPRINTF(sc, WM_DEBUG_LINK,
10107 ("%s: LINK Receive sequence error\n",
10108 device_xname(dev)));
10109 return;
10110 }
10111
10112 /* Link status changed */
10113 status = CSR_READ(sc, WMREG_STATUS);
10114 link = status & STATUS_LU;
10115 if (link) {
10116 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
10117 device_xname(dev),
10118 (status & STATUS_FD) ? "FDX" : "HDX"));
10119 if (wm_phy_need_linkdown_discard(sc)) {
10120 DPRINTF(sc, WM_DEBUG_LINK,
10121 ("%s: linkintr: Clear linkdown discard flag\n",
10122 device_xname(dev)));
10123 wm_clear_linkdown_discard(sc);
10124 }
10125 } else {
10126 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
10127 device_xname(dev)));
10128 if (wm_phy_need_linkdown_discard(sc)) {
10129 DPRINTF(sc, WM_DEBUG_LINK,
10130 ("%s: linkintr: Set linkdown discard flag\n",
10131 device_xname(dev)));
10132 wm_set_linkdown_discard(sc);
10133 }
10134 }
10135 if ((sc->sc_type == WM_T_ICH8) && (link == false))
10136 wm_gig_downshift_workaround_ich8lan(sc);
10137
10138 if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
10139 wm_kmrn_lock_loss_workaround_ich8lan(sc);
10140
10141 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
10142 device_xname(dev)));
10143 mii_pollstat(&sc->sc_mii);
10144 if (sc->sc_type == WM_T_82543) {
10145 int miistatus, active;
10146
10147 /*
10148 * With 82543, we need to force speed and
10149 * duplex on the MAC equal to what the PHY
10150 * speed and duplex configuration is.
10151 */
10152 miistatus = sc->sc_mii.mii_media_status;
10153
10154 if (miistatus & IFM_ACTIVE) {
10155 active = sc->sc_mii.mii_media_active;
10156 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
10157 switch (IFM_SUBTYPE(active)) {
10158 case IFM_10_T:
10159 sc->sc_ctrl |= CTRL_SPEED_10;
10160 break;
10161 case IFM_100_TX:
10162 sc->sc_ctrl |= CTRL_SPEED_100;
10163 break;
10164 case IFM_1000_T:
10165 sc->sc_ctrl |= CTRL_SPEED_1000;
10166 break;
10167 default:
10168 /*
10169 * Fiber?
10170 * Shoud not enter here.
10171 */
10172 device_printf(dev, "unknown media (%x)\n",
10173 active);
10174 break;
10175 }
10176 if (active & IFM_FDX)
10177 sc->sc_ctrl |= CTRL_FD;
10178 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10179 }
10180 } else if (sc->sc_type == WM_T_PCH) {
10181 wm_k1_gig_workaround_hv(sc,
10182 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
10183 }
10184
10185 /*
10186 * When connected at 10Mbps half-duplex, some parts are excessively
10187 * aggressive resulting in many collisions. To avoid this, increase
10188 * the IPG and reduce Rx latency in the PHY.
10189 */
10190 if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
10191 && link) {
10192 uint32_t tipg_reg;
10193 uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
10194 bool fdx;
10195 uint16_t emi_addr, emi_val;
10196
10197 tipg_reg = CSR_READ(sc, WMREG_TIPG);
10198 tipg_reg &= ~TIPG_IPGT_MASK;
10199 fdx = status & STATUS_FD;
10200
10201 if (!fdx && (speed == STATUS_SPEED_10)) {
10202 tipg_reg |= 0xff;
10203 /* Reduce Rx latency in analog PHY */
10204 emi_val = 0;
10205 } else if ((sc->sc_type >= WM_T_PCH_SPT) &&
10206 fdx && speed != STATUS_SPEED_1000) {
10207 tipg_reg |= 0xc;
10208 emi_val = 1;
10209 } else {
10210 /* Roll back the default values */
10211 tipg_reg |= 0x08;
10212 emi_val = 1;
10213 }
10214
10215 CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
10216
10217 rv = sc->phy.acquire(sc);
10218 if (rv)
10219 return;
10220
10221 if (sc->sc_type == WM_T_PCH2)
10222 emi_addr = I82579_RX_CONFIG;
10223 else
10224 emi_addr = I217_RX_CONFIG;
10225 rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
10226
10227 if (sc->sc_type >= WM_T_PCH_LPT) {
10228 uint16_t phy_reg;
10229
10230 sc->phy.readreg_locked(dev, 2,
10231 I217_PLL_CLOCK_GATE_REG, &phy_reg);
10232 phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
10233 if (speed == STATUS_SPEED_100
10234 || speed == STATUS_SPEED_10)
10235 phy_reg |= 0x3e8;
10236 else
10237 phy_reg |= 0xfa;
10238 sc->phy.writereg_locked(dev, 2,
10239 I217_PLL_CLOCK_GATE_REG, phy_reg);
10240
10241 if (speed == STATUS_SPEED_1000) {
10242 sc->phy.readreg_locked(dev, 2,
10243 HV_PM_CTRL, &phy_reg);
10244
10245 phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
10246
10247 sc->phy.writereg_locked(dev, 2,
10248 HV_PM_CTRL, phy_reg);
10249 }
10250 }
10251 sc->phy.release(sc);
10252
10253 if (rv)
10254 return;
10255
10256 if (sc->sc_type >= WM_T_PCH_SPT) {
10257 uint16_t data, ptr_gap;
10258
10259 if (speed == STATUS_SPEED_1000) {
10260 rv = sc->phy.acquire(sc);
10261 if (rv)
10262 return;
10263
10264 rv = sc->phy.readreg_locked(dev, 2,
10265 I82579_UNKNOWN1, &data);
10266 if (rv) {
10267 sc->phy.release(sc);
10268 return;
10269 }
10270
10271 ptr_gap = (data & (0x3ff << 2)) >> 2;
10272 if (ptr_gap < 0x18) {
10273 data &= ~(0x3ff << 2);
10274 data |= (0x18 << 2);
10275 rv = sc->phy.writereg_locked(dev,
10276 2, I82579_UNKNOWN1, data);
10277 }
10278 sc->phy.release(sc);
10279 if (rv)
10280 return;
10281 } else {
10282 rv = sc->phy.acquire(sc);
10283 if (rv)
10284 return;
10285
10286 rv = sc->phy.writereg_locked(dev, 2,
10287 I82579_UNKNOWN1, 0xc023);
10288 sc->phy.release(sc);
10289 if (rv)
10290 return;
10291
10292 }
10293 }
10294 }
10295
10296 /*
10297 * I217 Packet Loss issue:
10298 * ensure that FEXTNVM4 Beacon Duration is set correctly
10299 * on power up.
10300 * Set the Beacon Duration for I217 to 8 usec
10301 */
10302 if (sc->sc_type >= WM_T_PCH_LPT) {
10303 reg = CSR_READ(sc, WMREG_FEXTNVM4);
10304 reg &= ~FEXTNVM4_BEACON_DURATION;
10305 reg |= FEXTNVM4_BEACON_DURATION_8US;
10306 CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
10307 }
10308
10309 /* Work-around I218 hang issue */
10310 if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
10311 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
10312 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
10313 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
10314 wm_k1_workaround_lpt_lp(sc, link);
10315
10316 if (sc->sc_type >= WM_T_PCH_LPT) {
10317 /*
10318 * Set platform power management values for Latency
10319 * Tolerance Reporting (LTR)
10320 */
10321 wm_platform_pm_pch_lpt(sc,
10322 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
10323 }
10324
10325 /* Clear link partner's EEE ability */
10326 sc->eee_lp_ability = 0;
10327
10328 /* FEXTNVM6 K1-off workaround */
10329 if (sc->sc_type == WM_T_PCH_SPT) {
10330 reg = CSR_READ(sc, WMREG_FEXTNVM6);
10331 if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
10332 reg |= FEXTNVM6_K1_OFF_ENABLE;
10333 else
10334 reg &= ~FEXTNVM6_K1_OFF_ENABLE;
10335 CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
10336 }
10337
10338 if (!link)
10339 return;
10340
10341 switch (sc->sc_type) {
10342 case WM_T_PCH2:
10343 wm_k1_workaround_lv(sc);
10344 /* FALLTHROUGH */
10345 case WM_T_PCH:
10346 if (sc->sc_phytype == WMPHY_82578)
10347 wm_link_stall_workaround_hv(sc);
10348 break;
10349 default:
10350 break;
10351 }
10352
10353 /* Enable/Disable EEE after link up */
10354 if (sc->sc_phytype > WMPHY_82579)
10355 wm_set_eee_pchlan(sc);
10356 }
10357
10358 /*
10359 * wm_linkintr_tbi:
10360 *
10361 * Helper; handle link interrupts for TBI mode.
10362 */
10363 static void
10364 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
10365 {
10366 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10367 uint32_t status;
10368
10369 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
10370 __func__));
10371
10372 status = CSR_READ(sc, WMREG_STATUS);
10373 if (icr & ICR_LSC) {
10374 wm_check_for_link(sc);
10375 if (status & STATUS_LU) {
10376 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
10377 device_xname(sc->sc_dev),
10378 (status & STATUS_FD) ? "FDX" : "HDX"));
10379 /*
10380 * NOTE: CTRL will update TFCE and RFCE automatically,
10381 * so we should update sc->sc_ctrl
10382 */
10383
10384 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
10385 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
10386 sc->sc_fcrtl &= ~FCRTL_XONE;
10387 if (status & STATUS_FD)
10388 sc->sc_tctl |=
10389 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
10390 else
10391 sc->sc_tctl |=
10392 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
10393 if (sc->sc_ctrl & CTRL_TFCE)
10394 sc->sc_fcrtl |= FCRTL_XONE;
10395 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
10396 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
10397 WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
10398 sc->sc_tbi_linkup = 1;
10399 if_link_state_change(ifp, LINK_STATE_UP);
10400 } else {
10401 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
10402 device_xname(sc->sc_dev)));
10403 sc->sc_tbi_linkup = 0;
10404 if_link_state_change(ifp, LINK_STATE_DOWN);
10405 }
10406 /* Update LED */
10407 wm_tbi_serdes_set_linkled(sc);
10408 } else if (icr & ICR_RXSEQ)
10409 DPRINTF(sc, WM_DEBUG_LINK,
10410 ("%s: LINK: Receive sequence error\n",
10411 device_xname(sc->sc_dev)));
10412 }
10413
10414 /*
10415 * wm_linkintr_serdes:
10416 *
10417 * Helper; handle link interrupts for TBI mode.
10418 */
10419 static void
10420 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
10421 {
10422 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10423 struct mii_data *mii = &sc->sc_mii;
10424 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
10425 uint32_t pcs_adv, pcs_lpab, reg;
10426
10427 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
10428 __func__));
10429
10430 if (icr & ICR_LSC) {
10431 /* Check PCS */
10432 reg = CSR_READ(sc, WMREG_PCS_LSTS);
10433 if ((reg & PCS_LSTS_LINKOK) != 0) {
10434 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
10435 device_xname(sc->sc_dev)));
10436 mii->mii_media_status |= IFM_ACTIVE;
10437 sc->sc_tbi_linkup = 1;
10438 if_link_state_change(ifp, LINK_STATE_UP);
10439 } else {
10440 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
10441 device_xname(sc->sc_dev)));
10442 mii->mii_media_status |= IFM_NONE;
10443 sc->sc_tbi_linkup = 0;
10444 if_link_state_change(ifp, LINK_STATE_DOWN);
10445 wm_tbi_serdes_set_linkled(sc);
10446 return;
10447 }
10448 mii->mii_media_active |= IFM_1000_SX;
10449 if ((reg & PCS_LSTS_FDX) != 0)
10450 mii->mii_media_active |= IFM_FDX;
10451 else
10452 mii->mii_media_active |= IFM_HDX;
10453 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
10454 /* Check flow */
10455 reg = CSR_READ(sc, WMREG_PCS_LSTS);
10456 if ((reg & PCS_LSTS_AN_COMP) == 0) {
10457 DPRINTF(sc, WM_DEBUG_LINK,
10458 ("XXX LINKOK but not ACOMP\n"));
10459 return;
10460 }
10461 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
10462 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
10463 DPRINTF(sc, WM_DEBUG_LINK,
10464 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
10465 if ((pcs_adv & TXCW_SYM_PAUSE)
10466 && (pcs_lpab & TXCW_SYM_PAUSE)) {
10467 mii->mii_media_active |= IFM_FLOW
10468 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
10469 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
10470 && (pcs_adv & TXCW_ASYM_PAUSE)
10471 && (pcs_lpab & TXCW_SYM_PAUSE)
10472 && (pcs_lpab & TXCW_ASYM_PAUSE))
10473 mii->mii_media_active |= IFM_FLOW
10474 | IFM_ETH_TXPAUSE;
10475 else if ((pcs_adv & TXCW_SYM_PAUSE)
10476 && (pcs_adv & TXCW_ASYM_PAUSE)
10477 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
10478 && (pcs_lpab & TXCW_ASYM_PAUSE))
10479 mii->mii_media_active |= IFM_FLOW
10480 | IFM_ETH_RXPAUSE;
10481 }
10482 /* Update LED */
10483 wm_tbi_serdes_set_linkled(sc);
10484 } else
10485 DPRINTF(sc, WM_DEBUG_LINK,
10486 ("%s: LINK: Receive sequence error\n",
10487 device_xname(sc->sc_dev)));
10488 }
10489
10490 /*
10491 * wm_linkintr:
10492 *
10493 * Helper; handle link interrupts.
10494 */
10495 static void
10496 wm_linkintr(struct wm_softc *sc, uint32_t icr)
10497 {
10498
10499 KASSERT(mutex_owned(sc->sc_core_lock));
10500
10501 if (sc->sc_flags & WM_F_HAS_MII)
10502 wm_linkintr_gmii(sc, icr);
10503 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
10504 && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
10505 wm_linkintr_serdes(sc, icr);
10506 else
10507 wm_linkintr_tbi(sc, icr);
10508 }
10509
10510
10511 static inline void
10512 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
10513 {
10514
10515 if (wmq->wmq_txrx_use_workqueue) {
10516 if (!wmq->wmq_wq_enqueued) {
10517 wmq->wmq_wq_enqueued = true;
10518 workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie,
10519 curcpu());
10520 }
10521 } else
10522 softint_schedule(wmq->wmq_si);
10523 }
10524
10525 static inline void
10526 wm_legacy_intr_disable(struct wm_softc *sc)
10527 {
10528
10529 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
10530 }
10531
10532 static inline void
10533 wm_legacy_intr_enable(struct wm_softc *sc)
10534 {
10535
10536 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
10537 }
10538
10539 /*
10540 * wm_intr_legacy:
10541 *
10542 * Interrupt service routine for INTx and MSI.
10543 */
10544 static int
10545 wm_intr_legacy(void *arg)
10546 {
10547 struct wm_softc *sc = arg;
10548 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10549 struct wm_queue *wmq = &sc->sc_queue[0];
10550 struct wm_txqueue *txq = &wmq->wmq_txq;
10551 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10552 u_int txlimit = sc->sc_tx_intr_process_limit;
10553 u_int rxlimit = sc->sc_rx_intr_process_limit;
10554 uint32_t icr, rndval = 0;
10555 bool more = false;
10556
10557 icr = CSR_READ(sc, WMREG_ICR);
10558 if ((icr & sc->sc_icr) == 0)
10559 return 0;
10560
10561 DPRINTF(sc, WM_DEBUG_TX,
10562 ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
10563 if (rndval == 0)
10564 rndval = icr;
10565
10566 mutex_enter(txq->txq_lock);
10567
10568 if (txq->txq_stopping) {
10569 mutex_exit(txq->txq_lock);
10570 return 1;
10571 }
10572
10573 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
10574 if (icr & ICR_TXDW) {
10575 DPRINTF(sc, WM_DEBUG_TX,
10576 ("%s: TX: got TXDW interrupt\n",
10577 device_xname(sc->sc_dev)));
10578 WM_Q_EVCNT_INCR(txq, txdw);
10579 }
10580 #endif
10581 if (txlimit > 0) {
10582 more |= wm_txeof(txq, txlimit);
10583 if (!IF_IS_EMPTY(&ifp->if_snd))
10584 more = true;
10585 } else
10586 more = true;
10587 mutex_exit(txq->txq_lock);
10588
10589 mutex_enter(rxq->rxq_lock);
10590
10591 if (rxq->rxq_stopping) {
10592 mutex_exit(rxq->rxq_lock);
10593 return 1;
10594 }
10595
10596 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
10597 if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
10598 DPRINTF(sc, WM_DEBUG_RX,
10599 ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
10600 device_xname(sc->sc_dev),
10601 icr & (ICR_RXDMT0 | ICR_RXT0)));
10602 WM_Q_EVCNT_INCR(rxq, intr);
10603 }
10604 #endif
10605 if (rxlimit > 0) {
10606 /*
10607 * wm_rxeof() does *not* call upper layer functions directly,
10608 * as if_percpuq_enqueue() just call softint_schedule().
10609 * So, we can call wm_rxeof() in interrupt context.
10610 */
10611 more = wm_rxeof(rxq, rxlimit);
10612 } else
10613 more = true;
10614
10615 mutex_exit(rxq->rxq_lock);
10616
10617 mutex_enter(sc->sc_core_lock);
10618
10619 if (sc->sc_core_stopping) {
10620 mutex_exit(sc->sc_core_lock);
10621 return 1;
10622 }
10623
10624 if (icr & (ICR_LSC | ICR_RXSEQ)) {
10625 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
10626 wm_linkintr(sc, icr);
10627 }
10628 if ((icr & ICR_GPI(0)) != 0)
10629 device_printf(sc->sc_dev, "got module interrupt\n");
10630
10631 mutex_exit(sc->sc_core_lock);
10632
10633 if (icr & ICR_RXO) {
10634 #if defined(WM_DEBUG)
10635 log(LOG_WARNING, "%s: Receive overrun\n",
10636 device_xname(sc->sc_dev));
10637 #endif /* defined(WM_DEBUG) */
10638 }
10639
10640 rnd_add_uint32(&sc->rnd_source, rndval);
10641
10642 if (more) {
10643 /* Try to get more packets going. */
10644 wm_legacy_intr_disable(sc);
10645 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10646 wm_sched_handle_queue(sc, wmq);
10647 }
10648
10649 return 1;
10650 }
10651
10652 static inline void
10653 wm_txrxintr_disable(struct wm_queue *wmq)
10654 {
10655 struct wm_softc *sc = wmq->wmq_txq.txq_sc;
10656
10657 if (__predict_false(!wm_is_using_msix(sc))) {
10658 wm_legacy_intr_disable(sc);
10659 return;
10660 }
10661
10662 if (sc->sc_type == WM_T_82574)
10663 CSR_WRITE(sc, WMREG_IMC,
10664 ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
10665 else if (sc->sc_type == WM_T_82575)
10666 CSR_WRITE(sc, WMREG_EIMC,
10667 EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
10668 else
10669 CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
10670 }
10671
10672 static inline void
10673 wm_txrxintr_enable(struct wm_queue *wmq)
10674 {
10675 struct wm_softc *sc = wmq->wmq_txq.txq_sc;
10676
10677 wm_itrs_calculate(sc, wmq);
10678
10679 if (__predict_false(!wm_is_using_msix(sc))) {
10680 wm_legacy_intr_enable(sc);
10681 return;
10682 }
10683
10684 /*
10685 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
10686 * There is no need to care about which of RXQ(0) and RXQ(1) enable
10687 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
10688 * while each wm_handle_queue(wmq) is runnig.
10689 */
10690 if (sc->sc_type == WM_T_82574)
10691 CSR_WRITE(sc, WMREG_IMS,
10692 ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
10693 else if (sc->sc_type == WM_T_82575)
10694 CSR_WRITE(sc, WMREG_EIMS,
10695 EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
10696 else
10697 CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
10698 }
10699
10700 static int
10701 wm_txrxintr_msix(void *arg)
10702 {
10703 struct wm_queue *wmq = arg;
10704 struct wm_txqueue *txq = &wmq->wmq_txq;
10705 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10706 struct wm_softc *sc = txq->txq_sc;
10707 u_int txlimit = sc->sc_tx_intr_process_limit;
10708 u_int rxlimit = sc->sc_rx_intr_process_limit;
10709 bool txmore;
10710 bool rxmore;
10711
10712 KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
10713
10714 DPRINTF(sc, WM_DEBUG_TX,
10715 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
10716
10717 wm_txrxintr_disable(wmq);
10718
10719 mutex_enter(txq->txq_lock);
10720
10721 if (txq->txq_stopping) {
10722 mutex_exit(txq->txq_lock);
10723 return 1;
10724 }
10725
10726 WM_Q_EVCNT_INCR(txq, txdw);
10727 if (txlimit > 0) {
10728 txmore = wm_txeof(txq, txlimit);
10729 /* wm_deferred start() is done in wm_handle_queue(). */
10730 } else
10731 txmore = true;
10732 mutex_exit(txq->txq_lock);
10733
10734 DPRINTF(sc, WM_DEBUG_RX,
10735 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
10736 mutex_enter(rxq->rxq_lock);
10737
10738 if (rxq->rxq_stopping) {
10739 mutex_exit(rxq->rxq_lock);
10740 return 1;
10741 }
10742
10743 WM_Q_EVCNT_INCR(rxq, intr);
10744 if (rxlimit > 0) {
10745 rxmore = wm_rxeof(rxq, rxlimit);
10746 } else
10747 rxmore = true;
10748 mutex_exit(rxq->rxq_lock);
10749
10750 wm_itrs_writereg(sc, wmq);
10751
10752 if (txmore || rxmore) {
10753 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10754 wm_sched_handle_queue(sc, wmq);
10755 } else
10756 wm_txrxintr_enable(wmq);
10757
10758 return 1;
10759 }
10760
10761 static void
10762 wm_handle_queue(void *arg)
10763 {
10764 struct wm_queue *wmq = arg;
10765 struct wm_txqueue *txq = &wmq->wmq_txq;
10766 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10767 struct wm_softc *sc = txq->txq_sc;
10768 u_int txlimit = sc->sc_tx_process_limit;
10769 u_int rxlimit = sc->sc_rx_process_limit;
10770 bool txmore;
10771 bool rxmore;
10772
10773 mutex_enter(txq->txq_lock);
10774 if (txq->txq_stopping) {
10775 mutex_exit(txq->txq_lock);
10776 return;
10777 }
10778 txmore = wm_txeof(txq, txlimit);
10779 wm_deferred_start_locked(txq);
10780 mutex_exit(txq->txq_lock);
10781
10782 mutex_enter(rxq->rxq_lock);
10783 if (rxq->rxq_stopping) {
10784 mutex_exit(rxq->rxq_lock);
10785 return;
10786 }
10787 WM_Q_EVCNT_INCR(rxq, defer);
10788 rxmore = wm_rxeof(rxq, rxlimit);
10789 mutex_exit(rxq->rxq_lock);
10790
10791 if (txmore || rxmore) {
10792 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10793 wm_sched_handle_queue(sc, wmq);
10794 } else
10795 wm_txrxintr_enable(wmq);
10796 }
10797
10798 static void
10799 wm_handle_queue_work(struct work *wk, void *context)
10800 {
10801 struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
10802
10803 /*
10804 * Some qemu environment workaround. They don't stop interrupt
10805 * immediately.
10806 */
10807 wmq->wmq_wq_enqueued = false;
10808 wm_handle_queue(wmq);
10809 }
10810
10811 /*
10812 * wm_linkintr_msix:
10813 *
10814 * Interrupt service routine for link status change for MSI-X.
10815 */
10816 static int
10817 wm_linkintr_msix(void *arg)
10818 {
10819 struct wm_softc *sc = arg;
10820 uint32_t reg;
10821 bool has_rxo;
10822
10823 reg = CSR_READ(sc, WMREG_ICR);
10824 mutex_enter(sc->sc_core_lock);
10825 DPRINTF(sc, WM_DEBUG_LINK,
10826 ("%s: LINK: got link intr. ICR = %08x\n",
10827 device_xname(sc->sc_dev), reg));
10828
10829 if (sc->sc_core_stopping)
10830 goto out;
10831
10832 if ((reg & ICR_LSC) != 0) {
10833 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
10834 wm_linkintr(sc, ICR_LSC);
10835 }
10836 if ((reg & ICR_GPI(0)) != 0)
10837 device_printf(sc->sc_dev, "got module interrupt\n");
10838
10839 /*
10840 * XXX 82574 MSI-X mode workaround
10841 *
10842 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
10843 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
10844 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
10845 * interrupts by writing WMREG_ICS to process receive packets.
10846 */
10847 if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
10848 #if defined(WM_DEBUG)
10849 log(LOG_WARNING, "%s: Receive overrun\n",
10850 device_xname(sc->sc_dev));
10851 #endif /* defined(WM_DEBUG) */
10852
10853 has_rxo = true;
10854 /*
10855 * The RXO interrupt is very high rate when receive traffic is
10856 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
10857 * interrupts. ICR_OTHER will be enabled at the end of
10858 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
10859 * ICR_RXQ(1) interrupts.
10860 */
10861 CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
10862
10863 CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
10864 }
10865
10866
10867
10868 out:
10869 mutex_exit(sc->sc_core_lock);
10870
10871 if (sc->sc_type == WM_T_82574) {
10872 if (!has_rxo)
10873 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
10874 else
10875 CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
10876 } else if (sc->sc_type == WM_T_82575)
10877 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
10878 else
10879 CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
10880
10881 return 1;
10882 }
10883
10884 /*
10885 * Media related.
10886 * GMII, SGMII, TBI (and SERDES)
10887 */
10888
10889 /* Common */
10890
10891 /*
10892 * wm_tbi_serdes_set_linkled:
10893 *
10894 * Update the link LED on TBI and SERDES devices.
10895 */
10896 static void
10897 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
10898 {
10899
10900 if (sc->sc_tbi_linkup)
10901 sc->sc_ctrl |= CTRL_SWDPIN(0);
10902 else
10903 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
10904
10905 /* 82540 or newer devices are active low */
10906 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
10907
10908 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10909 }
10910
10911 /* GMII related */
10912
10913 /*
10914 * wm_gmii_reset:
10915 *
10916 * Reset the PHY.
10917 */
10918 static void
10919 wm_gmii_reset(struct wm_softc *sc)
10920 {
10921 uint32_t reg;
10922 int rv;
10923
10924 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
10925 device_xname(sc->sc_dev), __func__));
10926
10927 rv = sc->phy.acquire(sc);
10928 if (rv != 0) {
10929 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10930 __func__);
10931 return;
10932 }
10933
10934 switch (sc->sc_type) {
10935 case WM_T_82542_2_0:
10936 case WM_T_82542_2_1:
10937 /* null */
10938 break;
10939 case WM_T_82543:
10940 /*
10941 * With 82543, we need to force speed and duplex on the MAC
10942 * equal to what the PHY speed and duplex configuration is.
10943 * In addition, we need to perform a hardware reset on the PHY
10944 * to take it out of reset.
10945 */
10946 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
10947 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10948
10949 /* The PHY reset pin is active-low. */
10950 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10951 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
10952 CTRL_EXT_SWDPIN(4));
10953 reg |= CTRL_EXT_SWDPIO(4);
10954
10955 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10956 CSR_WRITE_FLUSH(sc);
10957 delay(10*1000);
10958
10959 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
10960 CSR_WRITE_FLUSH(sc);
10961 delay(150);
10962 #if 0
10963 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
10964 #endif
10965 delay(20*1000); /* XXX extra delay to get PHY ID? */
10966 break;
10967 case WM_T_82544: /* Reset 10000us */
10968 case WM_T_82540:
10969 case WM_T_82545:
10970 case WM_T_82545_3:
10971 case WM_T_82546:
10972 case WM_T_82546_3:
10973 case WM_T_82541:
10974 case WM_T_82541_2:
10975 case WM_T_82547:
10976 case WM_T_82547_2:
10977 case WM_T_82571: /* Reset 100us */
10978 case WM_T_82572:
10979 case WM_T_82573:
10980 case WM_T_82574:
10981 case WM_T_82575:
10982 case WM_T_82576:
10983 case WM_T_82580:
10984 case WM_T_I350:
10985 case WM_T_I354:
10986 case WM_T_I210:
10987 case WM_T_I211:
10988 case WM_T_82583:
10989 case WM_T_80003:
10990 /* Generic reset */
10991 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10992 CSR_WRITE_FLUSH(sc);
10993 delay(20000);
10994 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10995 CSR_WRITE_FLUSH(sc);
10996 delay(20000);
10997
10998 if ((sc->sc_type == WM_T_82541)
10999 || (sc->sc_type == WM_T_82541_2)
11000 || (sc->sc_type == WM_T_82547)
11001 || (sc->sc_type == WM_T_82547_2)) {
11002 /* Workaround for igp are done in igp_reset() */
11003 /* XXX add code to set LED after phy reset */
11004 }
11005 break;
11006 case WM_T_ICH8:
11007 case WM_T_ICH9:
11008 case WM_T_ICH10:
11009 case WM_T_PCH:
11010 case WM_T_PCH2:
11011 case WM_T_PCH_LPT:
11012 case WM_T_PCH_SPT:
11013 case WM_T_PCH_CNP:
11014 /* Generic reset */
11015 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
11016 CSR_WRITE_FLUSH(sc);
11017 delay(100);
11018 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11019 CSR_WRITE_FLUSH(sc);
11020 delay(150);
11021 break;
11022 default:
11023 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
11024 __func__);
11025 break;
11026 }
11027
11028 sc->phy.release(sc);
11029
11030 /* get_cfg_done */
11031 wm_get_cfg_done(sc);
11032
11033 /* Extra setup */
11034 switch (sc->sc_type) {
11035 case WM_T_82542_2_0:
11036 case WM_T_82542_2_1:
11037 case WM_T_82543:
11038 case WM_T_82544:
11039 case WM_T_82540:
11040 case WM_T_82545:
11041 case WM_T_82545_3:
11042 case WM_T_82546:
11043 case WM_T_82546_3:
11044 case WM_T_82541_2:
11045 case WM_T_82547_2:
11046 case WM_T_82571:
11047 case WM_T_82572:
11048 case WM_T_82573:
11049 case WM_T_82574:
11050 case WM_T_82583:
11051 case WM_T_82575:
11052 case WM_T_82576:
11053 case WM_T_82580:
11054 case WM_T_I350:
11055 case WM_T_I354:
11056 case WM_T_I210:
11057 case WM_T_I211:
11058 case WM_T_80003:
11059 /* Null */
11060 break;
11061 case WM_T_82541:
11062 case WM_T_82547:
11063 /* XXX Configure actively LED after PHY reset */
11064 break;
11065 case WM_T_ICH8:
11066 case WM_T_ICH9:
11067 case WM_T_ICH10:
11068 case WM_T_PCH:
11069 case WM_T_PCH2:
11070 case WM_T_PCH_LPT:
11071 case WM_T_PCH_SPT:
11072 case WM_T_PCH_CNP:
11073 wm_phy_post_reset(sc);
11074 break;
11075 default:
11076 panic("%s: unknown type\n", __func__);
11077 break;
11078 }
11079 }
11080
11081 /*
11082 * Set up sc_phytype and mii_{read|write}reg.
11083 *
11084 * To identify PHY type, correct read/write function should be selected.
11085 * To select correct read/write function, PCI ID or MAC type are required
11086 * without accessing PHY registers.
11087 *
11088 * On the first call of this function, PHY ID is not known yet. Check
11089 * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
11090 * result might be incorrect.
11091 *
11092 * In the second call, PHY OUI and model is used to identify PHY type.
11093 * It might not be perfect because of the lack of compared entry, but it
11094 * would be better than the first call.
11095 *
11096 * If the detected new result and previous assumption is different,
11097 * a diagnostic message will be printed.
11098 */
11099 static void
11100 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
11101 uint16_t phy_model)
11102 {
11103 device_t dev = sc->sc_dev;
11104 struct mii_data *mii = &sc->sc_mii;
11105 uint16_t new_phytype = WMPHY_UNKNOWN;
11106 uint16_t doubt_phytype = WMPHY_UNKNOWN;
11107 mii_readreg_t new_readreg;
11108 mii_writereg_t new_writereg;
11109 bool dodiag = true;
11110
11111 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
11112 device_xname(sc->sc_dev), __func__));
11113
11114 /*
11115 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
11116 * incorrect. So don't print diag output when it's 2nd call.
11117 */
11118 if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
11119 dodiag = false;
11120
11121 if (mii->mii_readreg == NULL) {
11122 /*
11123 * This is the first call of this function. For ICH and PCH
11124 * variants, it's difficult to determine the PHY access method
11125 * by sc_type, so use the PCI product ID for some devices.
11126 */
11127
11128 switch (sc->sc_pcidevid) {
11129 case PCI_PRODUCT_INTEL_PCH_M_LM:
11130 case PCI_PRODUCT_INTEL_PCH_M_LC:
11131 /* 82577 */
11132 new_phytype = WMPHY_82577;
11133 break;
11134 case PCI_PRODUCT_INTEL_PCH_D_DM:
11135 case PCI_PRODUCT_INTEL_PCH_D_DC:
11136 /* 82578 */
11137 new_phytype = WMPHY_82578;
11138 break;
11139 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
11140 case PCI_PRODUCT_INTEL_PCH2_LV_V:
11141 /* 82579 */
11142 new_phytype = WMPHY_82579;
11143 break;
11144 case PCI_PRODUCT_INTEL_82801H_82567V_3:
11145 case PCI_PRODUCT_INTEL_82801I_BM:
11146 case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
11147 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
11148 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
11149 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
11150 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
11151 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
11152 /* ICH8, 9, 10 with 82567 */
11153 new_phytype = WMPHY_BM;
11154 break;
11155 default:
11156 break;
11157 }
11158 } else {
11159 /* It's not the first call. Use PHY OUI and model */
11160 switch (phy_oui) {
11161 case MII_OUI_ATTANSIC: /* atphy(4) */
11162 switch (phy_model) {
11163 case MII_MODEL_ATTANSIC_AR8021:
11164 new_phytype = WMPHY_82578;
11165 break;
11166 default:
11167 break;
11168 }
11169 break;
11170 case MII_OUI_xxMARVELL:
11171 switch (phy_model) {
11172 case MII_MODEL_xxMARVELL_I210:
11173 new_phytype = WMPHY_I210;
11174 break;
11175 case MII_MODEL_xxMARVELL_E1011:
11176 case MII_MODEL_xxMARVELL_E1000_3:
11177 case MII_MODEL_xxMARVELL_E1000_5:
11178 case MII_MODEL_xxMARVELL_E1112:
11179 new_phytype = WMPHY_M88;
11180 break;
11181 case MII_MODEL_xxMARVELL_E1149:
11182 new_phytype = WMPHY_BM;
11183 break;
11184 case MII_MODEL_xxMARVELL_E1111:
11185 case MII_MODEL_xxMARVELL_I347:
11186 case MII_MODEL_xxMARVELL_E1512:
11187 case MII_MODEL_xxMARVELL_E1340M:
11188 case MII_MODEL_xxMARVELL_E1543:
11189 new_phytype = WMPHY_M88;
11190 break;
11191 case MII_MODEL_xxMARVELL_I82563:
11192 new_phytype = WMPHY_GG82563;
11193 break;
11194 default:
11195 break;
11196 }
11197 break;
11198 case MII_OUI_INTEL:
11199 switch (phy_model) {
11200 case MII_MODEL_INTEL_I82577:
11201 new_phytype = WMPHY_82577;
11202 break;
11203 case MII_MODEL_INTEL_I82579:
11204 new_phytype = WMPHY_82579;
11205 break;
11206 case MII_MODEL_INTEL_I217:
11207 new_phytype = WMPHY_I217;
11208 break;
11209 case MII_MODEL_INTEL_I82580:
11210 new_phytype = WMPHY_82580;
11211 break;
11212 case MII_MODEL_INTEL_I350:
11213 new_phytype = WMPHY_I350;
11214 break;
11215 default:
11216 break;
11217 }
11218 break;
11219 case MII_OUI_yyINTEL:
11220 switch (phy_model) {
11221 case MII_MODEL_yyINTEL_I82562G:
11222 case MII_MODEL_yyINTEL_I82562EM:
11223 case MII_MODEL_yyINTEL_I82562ET:
11224 new_phytype = WMPHY_IFE;
11225 break;
11226 case MII_MODEL_yyINTEL_IGP01E1000:
11227 new_phytype = WMPHY_IGP;
11228 break;
11229 case MII_MODEL_yyINTEL_I82566:
11230 new_phytype = WMPHY_IGP_3;
11231 break;
11232 default:
11233 break;
11234 }
11235 break;
11236 default:
11237 break;
11238 }
11239
11240 if (dodiag) {
11241 if (new_phytype == WMPHY_UNKNOWN)
11242 aprint_verbose_dev(dev,
11243 "%s: Unknown PHY model. OUI=%06x, "
11244 "model=%04x\n", __func__, phy_oui,
11245 phy_model);
11246
11247 if ((sc->sc_phytype != WMPHY_UNKNOWN)
11248 && (sc->sc_phytype != new_phytype)) {
11249 aprint_error_dev(dev, "Previously assumed PHY "
11250 "type(%u) was incorrect. PHY type from PHY"
11251 "ID = %u\n", sc->sc_phytype, new_phytype);
11252 }
11253 }
11254 }
11255
11256 /* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
11257 if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
11258 /* SGMII */
11259 new_readreg = wm_sgmii_readreg;
11260 new_writereg = wm_sgmii_writereg;
11261 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
11262 /* BM2 (phyaddr == 1) */
11263 if ((sc->sc_phytype != WMPHY_UNKNOWN)
11264 && (new_phytype != WMPHY_BM)
11265 && (new_phytype != WMPHY_UNKNOWN))
11266 doubt_phytype = new_phytype;
11267 new_phytype = WMPHY_BM;
11268 new_readreg = wm_gmii_bm_readreg;
11269 new_writereg = wm_gmii_bm_writereg;
11270 } else if (sc->sc_type >= WM_T_PCH) {
11271 /* All PCH* use _hv_ */
11272 new_readreg = wm_gmii_hv_readreg;
11273 new_writereg = wm_gmii_hv_writereg;
11274 } else if (sc->sc_type >= WM_T_ICH8) {
11275 /* non-82567 ICH8, 9 and 10 */
11276 new_readreg = wm_gmii_i82544_readreg;
11277 new_writereg = wm_gmii_i82544_writereg;
11278 } else if (sc->sc_type >= WM_T_80003) {
11279 /* 80003 */
11280 if ((sc->sc_phytype != WMPHY_UNKNOWN)
11281 && (new_phytype != WMPHY_GG82563)
11282 && (new_phytype != WMPHY_UNKNOWN))
11283 doubt_phytype = new_phytype;
11284 new_phytype = WMPHY_GG82563;
11285 new_readreg = wm_gmii_i80003_readreg;
11286 new_writereg = wm_gmii_i80003_writereg;
11287 } else if (sc->sc_type >= WM_T_I210) {
11288 /* I210 and I211 */
11289 if ((sc->sc_phytype != WMPHY_UNKNOWN)
11290 && (new_phytype != WMPHY_I210)
11291 && (new_phytype != WMPHY_UNKNOWN))
11292 doubt_phytype = new_phytype;
11293 new_phytype = WMPHY_I210;
11294 new_readreg = wm_gmii_gs40g_readreg;
11295 new_writereg = wm_gmii_gs40g_writereg;
11296 } else if (sc->sc_type >= WM_T_82580) {
11297 /* 82580, I350 and I354 */
11298 new_readreg = wm_gmii_82580_readreg;
11299 new_writereg = wm_gmii_82580_writereg;
11300 } else if (sc->sc_type >= WM_T_82544) {
11301 /* 82544, 0, [56], [17], 8257[1234] and 82583 */
11302 new_readreg = wm_gmii_i82544_readreg;
11303 new_writereg = wm_gmii_i82544_writereg;
11304 } else {
11305 new_readreg = wm_gmii_i82543_readreg;
11306 new_writereg = wm_gmii_i82543_writereg;
11307 }
11308
11309 if (new_phytype == WMPHY_BM) {
11310 /* All BM use _bm_ */
11311 new_readreg = wm_gmii_bm_readreg;
11312 new_writereg = wm_gmii_bm_writereg;
11313 }
11314 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
11315 /* All PCH* use _hv_ */
11316 new_readreg = wm_gmii_hv_readreg;
11317 new_writereg = wm_gmii_hv_writereg;
11318 }
11319
11320 /* Diag output */
11321 if (dodiag) {
11322 if (doubt_phytype != WMPHY_UNKNOWN)
11323 aprint_error_dev(dev, "Assumed new PHY type was "
11324 "incorrect. old = %u, new = %u\n", sc->sc_phytype,
11325 new_phytype);
11326 else if ((sc->sc_phytype != WMPHY_UNKNOWN)
11327 && (sc->sc_phytype != new_phytype))
11328 aprint_error_dev(dev, "Previously assumed PHY type(%u)"
11329 "was incorrect. New PHY type = %u\n",
11330 sc->sc_phytype, new_phytype);
11331
11332 if ((mii->mii_readreg != NULL) &&
11333 (new_phytype == WMPHY_UNKNOWN))
11334 aprint_error_dev(dev, "PHY type is still unknown.\n");
11335
11336 if ((mii->mii_readreg != NULL) &&
11337 (mii->mii_readreg != new_readreg))
11338 aprint_error_dev(dev, "Previously assumed PHY "
11339 "read/write function was incorrect.\n");
11340 }
11341
11342 /* Update now */
11343 sc->sc_phytype = new_phytype;
11344 mii->mii_readreg = new_readreg;
11345 mii->mii_writereg = new_writereg;
11346 if (new_readreg == wm_gmii_hv_readreg) {
11347 sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
11348 sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
11349 } else if (new_readreg == wm_sgmii_readreg) {
11350 sc->phy.readreg_locked = wm_sgmii_readreg_locked;
11351 sc->phy.writereg_locked = wm_sgmii_writereg_locked;
11352 } else if (new_readreg == wm_gmii_i82544_readreg) {
11353 sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
11354 sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
11355 }
11356 }
11357
11358 /*
11359 * wm_get_phy_id_82575:
11360 *
11361 * Return PHY ID. Return -1 if it failed.
11362 */
11363 static int
11364 wm_get_phy_id_82575(struct wm_softc *sc)
11365 {
11366 uint32_t reg;
11367 int phyid = -1;
11368
11369 /* XXX */
11370 if ((sc->sc_flags & WM_F_SGMII) == 0)
11371 return -1;
11372
11373 if (wm_sgmii_uses_mdio(sc)) {
11374 switch (sc->sc_type) {
11375 case WM_T_82575:
11376 case WM_T_82576:
11377 reg = CSR_READ(sc, WMREG_MDIC);
11378 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
11379 break;
11380 case WM_T_82580:
11381 case WM_T_I350:
11382 case WM_T_I354:
11383 case WM_T_I210:
11384 case WM_T_I211:
11385 reg = CSR_READ(sc, WMREG_MDICNFG);
11386 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
11387 break;
11388 default:
11389 return -1;
11390 }
11391 }
11392
11393 return phyid;
11394 }
11395
11396 /*
11397 * wm_gmii_mediainit:
11398 *
11399 * Initialize media for use on 1000BASE-T devices.
11400 */
11401 static void
11402 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
11403 {
11404 device_t dev = sc->sc_dev;
11405 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
11406 struct mii_data *mii = &sc->sc_mii;
11407
11408 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11409 device_xname(sc->sc_dev), __func__));
11410
11411 /* We have GMII. */
11412 sc->sc_flags |= WM_F_HAS_MII;
11413
11414 if (sc->sc_type == WM_T_80003)
11415 sc->sc_tipg = TIPG_1000T_80003_DFLT;
11416 else
11417 sc->sc_tipg = TIPG_1000T_DFLT;
11418
11419 /*
11420 * Let the chip set speed/duplex on its own based on
11421 * signals from the PHY.
11422 * XXXbouyer - I'm not sure this is right for the 80003,
11423 * the em driver only sets CTRL_SLU here - but it seems to work.
11424 */
11425 sc->sc_ctrl |= CTRL_SLU;
11426 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11427
11428 /* Initialize our media structures and probe the GMII. */
11429 mii->mii_ifp = ifp;
11430
11431 mii->mii_statchg = wm_gmii_statchg;
11432
11433 /* get PHY control from SMBus to PCIe */
11434 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
11435 || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
11436 || (sc->sc_type == WM_T_PCH_CNP))
11437 wm_init_phy_workarounds_pchlan(sc);
11438
11439 wm_gmii_reset(sc);
11440
11441 sc->sc_ethercom.ec_mii = &sc->sc_mii;
11442 ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
11443 wm_gmii_mediastatus, sc->sc_core_lock);
11444
11445 /* Setup internal SGMII PHY for SFP */
11446 wm_sgmii_sfp_preconfig(sc);
11447
11448 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
11449 || (sc->sc_type == WM_T_82580)
11450 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
11451 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
11452 if ((sc->sc_flags & WM_F_SGMII) == 0) {
11453 /* Attach only one port */
11454 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
11455 MII_OFFSET_ANY, MIIF_DOPAUSE);
11456 } else {
11457 int i, id;
11458 uint32_t ctrl_ext;
11459
11460 id = wm_get_phy_id_82575(sc);
11461 if (id != -1) {
11462 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
11463 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
11464 }
11465 if ((id == -1)
11466 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
11467 /* Power on sgmii phy if it is disabled */
11468 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
11469 CSR_WRITE(sc, WMREG_CTRL_EXT,
11470 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
11471 CSR_WRITE_FLUSH(sc);
11472 delay(300*1000); /* XXX too long */
11473
11474 /*
11475 * From 1 to 8.
11476 *
11477 * I2C access fails with I2C register's ERROR
11478 * bit set, so prevent error message while
11479 * scanning.
11480 */
11481 sc->phy.no_errprint = true;
11482 for (i = 1; i < 8; i++)
11483 mii_attach(sc->sc_dev, &sc->sc_mii,
11484 0xffffffff, i, MII_OFFSET_ANY,
11485 MIIF_DOPAUSE);
11486 sc->phy.no_errprint = false;
11487
11488 /* Restore previous sfp cage power state */
11489 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
11490 }
11491 }
11492 } else
11493 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11494 MII_OFFSET_ANY, MIIF_DOPAUSE);
11495
11496 /*
11497 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
11498 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
11499 */
11500 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
11501 || (sc->sc_type == WM_T_PCH_SPT)
11502 || (sc->sc_type == WM_T_PCH_CNP))
11503 && (LIST_FIRST(&mii->mii_phys) == NULL)) {
11504 wm_set_mdio_slow_mode_hv(sc);
11505 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11506 MII_OFFSET_ANY, MIIF_DOPAUSE);
11507 }
11508
11509 /*
11510 * (For ICH8 variants)
11511 * If PHY detection failed, use BM's r/w function and retry.
11512 */
11513 if (LIST_FIRST(&mii->mii_phys) == NULL) {
11514 /* if failed, retry with *_bm_* */
11515 aprint_verbose_dev(dev, "Assumed PHY access function "
11516 "(type = %d) might be incorrect. Use BM and retry.\n",
11517 sc->sc_phytype);
11518 sc->sc_phytype = WMPHY_BM;
11519 mii->mii_readreg = wm_gmii_bm_readreg;
11520 mii->mii_writereg = wm_gmii_bm_writereg;
11521
11522 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11523 MII_OFFSET_ANY, MIIF_DOPAUSE);
11524 }
11525
11526 if (LIST_FIRST(&mii->mii_phys) == NULL) {
11527 /* Any PHY wasn't found */
11528 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
11529 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
11530 sc->sc_phytype = WMPHY_NONE;
11531 } else {
11532 struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
11533
11534 /*
11535 * PHY found! Check PHY type again by the second call of
11536 * wm_gmii_setup_phytype.
11537 */
11538 wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
11539 child->mii_mpd_model);
11540
11541 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
11542 }
11543 }
11544
11545 /*
11546 * wm_gmii_mediachange: [ifmedia interface function]
11547 *
11548 * Set hardware to newly-selected media on a 1000BASE-T device.
11549 */
11550 static int
11551 wm_gmii_mediachange(struct ifnet *ifp)
11552 {
11553 struct wm_softc *sc = ifp->if_softc;
11554 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
11555 uint32_t reg;
11556 int rc;
11557
11558 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11559 device_xname(sc->sc_dev), __func__));
11560
11561 KASSERT(mutex_owned(sc->sc_core_lock));
11562
11563 if ((sc->sc_if_flags & IFF_UP) == 0)
11564 return 0;
11565
11566 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
11567 if ((sc->sc_type == WM_T_82580)
11568 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
11569 || (sc->sc_type == WM_T_I211)) {
11570 reg = CSR_READ(sc, WMREG_PHPM);
11571 reg &= ~PHPM_GO_LINK_D;
11572 CSR_WRITE(sc, WMREG_PHPM, reg);
11573 }
11574
11575 /* Disable D0 LPLU. */
11576 wm_lplu_d0_disable(sc);
11577
11578 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
11579 sc->sc_ctrl |= CTRL_SLU;
11580 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
11581 || (sc->sc_type > WM_T_82543)) {
11582 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
11583 } else {
11584 sc->sc_ctrl &= ~CTRL_ASDE;
11585 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
11586 if (ife->ifm_media & IFM_FDX)
11587 sc->sc_ctrl |= CTRL_FD;
11588 switch (IFM_SUBTYPE(ife->ifm_media)) {
11589 case IFM_10_T:
11590 sc->sc_ctrl |= CTRL_SPEED_10;
11591 break;
11592 case IFM_100_TX:
11593 sc->sc_ctrl |= CTRL_SPEED_100;
11594 break;
11595 case IFM_1000_T:
11596 sc->sc_ctrl |= CTRL_SPEED_1000;
11597 break;
11598 case IFM_NONE:
11599 /* There is no specific setting for IFM_NONE */
11600 break;
11601 default:
11602 panic("wm_gmii_mediachange: bad media 0x%x",
11603 ife->ifm_media);
11604 }
11605 }
11606 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11607 CSR_WRITE_FLUSH(sc);
11608
11609 if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
11610 wm_serdes_mediachange(ifp);
11611
11612 if (sc->sc_type <= WM_T_82543)
11613 wm_gmii_reset(sc);
11614 else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
11615 && ((sc->sc_flags & WM_F_SGMII) != 0)) {
11616 /* allow time for SFP cage time to power up phy */
11617 delay(300 * 1000);
11618 wm_gmii_reset(sc);
11619 }
11620
11621 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
11622 return 0;
11623 return rc;
11624 }
11625
11626 /*
11627 * wm_gmii_mediastatus: [ifmedia interface function]
11628 *
11629 * Get the current interface media status on a 1000BASE-T device.
11630 */
11631 static void
11632 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
11633 {
11634 struct wm_softc *sc = ifp->if_softc;
11635
11636 KASSERT(mutex_owned(sc->sc_core_lock));
11637
11638 ether_mediastatus(ifp, ifmr);
11639 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
11640 | sc->sc_flowflags;
11641 }
11642
11643 #define MDI_IO CTRL_SWDPIN(2)
11644 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
11645 #define MDI_CLK CTRL_SWDPIN(3)
11646
11647 static void
11648 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
11649 {
11650 uint32_t i, v;
11651
11652 v = CSR_READ(sc, WMREG_CTRL);
11653 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
11654 v |= MDI_DIR | CTRL_SWDPIO(3);
11655
11656 for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
11657 if (data & i)
11658 v |= MDI_IO;
11659 else
11660 v &= ~MDI_IO;
11661 CSR_WRITE(sc, WMREG_CTRL, v);
11662 CSR_WRITE_FLUSH(sc);
11663 delay(10);
11664 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11665 CSR_WRITE_FLUSH(sc);
11666 delay(10);
11667 CSR_WRITE(sc, WMREG_CTRL, v);
11668 CSR_WRITE_FLUSH(sc);
11669 delay(10);
11670 }
11671 }
11672
11673 static uint16_t
11674 wm_i82543_mii_recvbits(struct wm_softc *sc)
11675 {
11676 uint32_t v, i;
11677 uint16_t data = 0;
11678
11679 v = CSR_READ(sc, WMREG_CTRL);
11680 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
11681 v |= CTRL_SWDPIO(3);
11682
11683 CSR_WRITE(sc, WMREG_CTRL, v);
11684 CSR_WRITE_FLUSH(sc);
11685 delay(10);
11686 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11687 CSR_WRITE_FLUSH(sc);
11688 delay(10);
11689 CSR_WRITE(sc, WMREG_CTRL, v);
11690 CSR_WRITE_FLUSH(sc);
11691 delay(10);
11692
11693 for (i = 0; i < 16; i++) {
11694 data <<= 1;
11695 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11696 CSR_WRITE_FLUSH(sc);
11697 delay(10);
11698 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
11699 data |= 1;
11700 CSR_WRITE(sc, WMREG_CTRL, v);
11701 CSR_WRITE_FLUSH(sc);
11702 delay(10);
11703 }
11704
11705 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11706 CSR_WRITE_FLUSH(sc);
11707 delay(10);
11708 CSR_WRITE(sc, WMREG_CTRL, v);
11709 CSR_WRITE_FLUSH(sc);
11710 delay(10);
11711
11712 return data;
11713 }
11714
11715 #undef MDI_IO
11716 #undef MDI_DIR
11717 #undef MDI_CLK
11718
11719 /*
11720 * wm_gmii_i82543_readreg: [mii interface function]
11721 *
11722 * Read a PHY register on the GMII (i82543 version).
11723 */
11724 static int
11725 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
11726 {
11727 struct wm_softc *sc = device_private(dev);
11728
11729 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
11730 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
11731 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
11732 *val = wm_i82543_mii_recvbits(sc) & 0xffff;
11733
11734 DPRINTF(sc, WM_DEBUG_GMII,
11735 ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
11736 device_xname(dev), phy, reg, *val));
11737
11738 return 0;
11739 }
11740
11741 /*
11742 * wm_gmii_i82543_writereg: [mii interface function]
11743 *
11744 * Write a PHY register on the GMII (i82543 version).
11745 */
11746 static int
11747 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
11748 {
11749 struct wm_softc *sc = device_private(dev);
11750
11751 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
11752 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
11753 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
11754 (MII_COMMAND_START << 30), 32);
11755
11756 return 0;
11757 }
11758
11759 /*
11760 * wm_gmii_mdic_readreg: [mii interface function]
11761 *
11762 * Read a PHY register on the GMII.
11763 */
11764 static int
11765 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
11766 {
11767 struct wm_softc *sc = device_private(dev);
11768 uint32_t mdic = 0;
11769 int i;
11770
11771 if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
11772 && (reg > MII_ADDRMASK)) {
11773 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11774 __func__, sc->sc_phytype, reg);
11775 reg &= MII_ADDRMASK;
11776 }
11777
11778 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
11779 MDIC_REGADD(reg));
11780
11781 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
11782 delay(50);
11783 mdic = CSR_READ(sc, WMREG_MDIC);
11784 if (mdic & MDIC_READY)
11785 break;
11786 }
11787
11788 if ((mdic & MDIC_READY) == 0) {
11789 DPRINTF(sc, WM_DEBUG_GMII,
11790 ("%s: MDIC read timed out: phy %d reg %d\n",
11791 device_xname(dev), phy, reg));
11792 return ETIMEDOUT;
11793 } else if (mdic & MDIC_E) {
11794 /* This is normal if no PHY is present. */
11795 DPRINTF(sc, WM_DEBUG_GMII,
11796 ("%s: MDIC read error: phy %d reg %d\n",
11797 device_xname(sc->sc_dev), phy, reg));
11798 return -1;
11799 } else
11800 *val = MDIC_DATA(mdic);
11801
11802 /*
11803 * Allow some time after each MDIC transaction to avoid
11804 * reading duplicate data in the next MDIC transaction.
11805 */
11806 if (sc->sc_type == WM_T_PCH2)
11807 delay(100);
11808
11809 return 0;
11810 }
11811
11812 /*
11813 * wm_gmii_mdic_writereg: [mii interface function]
11814 *
11815 * Write a PHY register on the GMII.
11816 */
11817 static int
11818 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
11819 {
11820 struct wm_softc *sc = device_private(dev);
11821 uint32_t mdic = 0;
11822 int i;
11823
11824 if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
11825 && (reg > MII_ADDRMASK)) {
11826 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11827 __func__, sc->sc_phytype, reg);
11828 reg &= MII_ADDRMASK;
11829 }
11830
11831 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
11832 MDIC_REGADD(reg) | MDIC_DATA(val));
11833
11834 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
11835 delay(50);
11836 mdic = CSR_READ(sc, WMREG_MDIC);
11837 if (mdic & MDIC_READY)
11838 break;
11839 }
11840
11841 if ((mdic & MDIC_READY) == 0) {
11842 DPRINTF(sc, WM_DEBUG_GMII,
11843 ("%s: MDIC write timed out: phy %d reg %d\n",
11844 device_xname(dev), phy, reg));
11845 return ETIMEDOUT;
11846 } else if (mdic & MDIC_E) {
11847 DPRINTF(sc, WM_DEBUG_GMII,
11848 ("%s: MDIC write error: phy %d reg %d\n",
11849 device_xname(dev), phy, reg));
11850 return -1;
11851 }
11852
11853 /*
11854 * Allow some time after each MDIC transaction to avoid
11855 * reading duplicate data in the next MDIC transaction.
11856 */
11857 if (sc->sc_type == WM_T_PCH2)
11858 delay(100);
11859
11860 return 0;
11861 }
11862
11863 /*
11864 * wm_gmii_i82544_readreg: [mii interface function]
11865 *
11866 * Read a PHY register on the GMII.
11867 */
11868 static int
11869 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
11870 {
11871 struct wm_softc *sc = device_private(dev);
11872 int rv;
11873
11874 rv = sc->phy.acquire(sc);
11875 if (rv != 0) {
11876 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11877 return rv;
11878 }
11879
11880 rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
11881
11882 sc->phy.release(sc);
11883
11884 return rv;
11885 }
11886
11887 static int
11888 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
11889 {
11890 struct wm_softc *sc = device_private(dev);
11891 int rv;
11892
11893 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11894 switch (sc->sc_phytype) {
11895 case WMPHY_IGP:
11896 case WMPHY_IGP_2:
11897 case WMPHY_IGP_3:
11898 rv = wm_gmii_mdic_writereg(dev, phy,
11899 IGPHY_PAGE_SELECT, reg);
11900 if (rv != 0)
11901 return rv;
11902 break;
11903 default:
11904 #ifdef WM_DEBUG
11905 device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
11906 __func__, sc->sc_phytype, reg);
11907 #endif
11908 break;
11909 }
11910 }
11911
11912 return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11913 }
11914
11915 /*
11916 * wm_gmii_i82544_writereg: [mii interface function]
11917 *
11918 * Write a PHY register on the GMII.
11919 */
11920 static int
11921 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
11922 {
11923 struct wm_softc *sc = device_private(dev);
11924 int rv;
11925
11926 rv = sc->phy.acquire(sc);
11927 if (rv != 0) {
11928 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11929 return rv;
11930 }
11931
11932 rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
11933 sc->phy.release(sc);
11934
11935 return rv;
11936 }
11937
11938 static int
11939 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
11940 {
11941 struct wm_softc *sc = device_private(dev);
11942 int rv;
11943
11944 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11945 switch (sc->sc_phytype) {
11946 case WMPHY_IGP:
11947 case WMPHY_IGP_2:
11948 case WMPHY_IGP_3:
11949 rv = wm_gmii_mdic_writereg(dev, phy,
11950 IGPHY_PAGE_SELECT, reg);
11951 if (rv != 0)
11952 return rv;
11953 break;
11954 default:
11955 #ifdef WM_DEBUG
11956 device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
11957 __func__, sc->sc_phytype, reg);
11958 #endif
11959 break;
11960 }
11961 }
11962
11963 return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11964 }
11965
11966 /*
11967 * wm_gmii_i80003_readreg: [mii interface function]
11968 *
11969 * Read a PHY register on the kumeran
11970 * This could be handled by the PHY layer if we didn't have to lock the
11971 * resource ...
11972 */
11973 static int
11974 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
11975 {
11976 struct wm_softc *sc = device_private(dev);
11977 int page_select;
11978 uint16_t temp, temp2;
11979 int rv;
11980
11981 if (phy != 1) /* Only one PHY on kumeran bus */
11982 return -1;
11983
11984 rv = sc->phy.acquire(sc);
11985 if (rv != 0) {
11986 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11987 return rv;
11988 }
11989
11990 if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
11991 page_select = GG82563_PHY_PAGE_SELECT;
11992 else {
11993 /*
11994 * Use Alternative Page Select register to access registers
11995 * 30 and 31.
11996 */
11997 page_select = GG82563_PHY_PAGE_SELECT_ALT;
11998 }
11999 temp = reg >> GG82563_PAGE_SHIFT;
12000 if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
12001 goto out;
12002
12003 if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
12004 /*
12005 * Wait more 200us for a bug of the ready bit in the MDIC
12006 * register.
12007 */
12008 delay(200);
12009 rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
12010 if ((rv != 0) || (temp2 != temp)) {
12011 device_printf(dev, "%s failed\n", __func__);
12012 rv = -1;
12013 goto out;
12014 }
12015 delay(200);
12016 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
12017 delay(200);
12018 } else
12019 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
12020
12021 out:
12022 sc->phy.release(sc);
12023 return rv;
12024 }
12025
12026 /*
12027 * wm_gmii_i80003_writereg: [mii interface function]
12028 *
12029 * Write a PHY register on the kumeran.
12030 * This could be handled by the PHY layer if we didn't have to lock the
12031 * resource ...
12032 */
12033 static int
12034 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
12035 {
12036 struct wm_softc *sc = device_private(dev);
12037 int page_select, rv;
12038 uint16_t temp, temp2;
12039
12040 if (phy != 1) /* Only one PHY on kumeran bus */
12041 return -1;
12042
12043 rv = sc->phy.acquire(sc);
12044 if (rv != 0) {
12045 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12046 return rv;
12047 }
12048
12049 if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
12050 page_select = GG82563_PHY_PAGE_SELECT;
12051 else {
12052 /*
12053 * Use Alternative Page Select register to access registers
12054 * 30 and 31.
12055 */
12056 page_select = GG82563_PHY_PAGE_SELECT_ALT;
12057 }
12058 temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
12059 if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
12060 goto out;
12061
12062 if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
12063 /*
12064 * Wait more 200us for a bug of the ready bit in the MDIC
12065 * register.
12066 */
12067 delay(200);
12068 rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
12069 if ((rv != 0) || (temp2 != temp)) {
12070 device_printf(dev, "%s failed\n", __func__);
12071 rv = -1;
12072 goto out;
12073 }
12074 delay(200);
12075 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
12076 delay(200);
12077 } else
12078 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
12079
12080 out:
12081 sc->phy.release(sc);
12082 return rv;
12083 }
12084
12085 /*
12086 * wm_gmii_bm_readreg: [mii interface function]
12087 *
12088 * Read a PHY register on the kumeran
12089 * This could be handled by the PHY layer if we didn't have to lock the
12090 * resource ...
12091 */
12092 static int
12093 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
12094 {
12095 struct wm_softc *sc = device_private(dev);
12096 uint16_t page = reg >> BME1000_PAGE_SHIFT;
12097 int rv;
12098
12099 rv = sc->phy.acquire(sc);
12100 if (rv != 0) {
12101 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12102 return rv;
12103 }
12104
12105 if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
12106 phy = ((page >= 768) || ((page == 0) && (reg == 25))
12107 || (reg == 31)) ? 1 : phy;
12108 /* Page 800 works differently than the rest so it has its own func */
12109 if (page == BM_WUC_PAGE) {
12110 rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
12111 goto release;
12112 }
12113
12114 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
12115 if ((phy == 1) && (sc->sc_type != WM_T_82574)
12116 && (sc->sc_type != WM_T_82583))
12117 rv = wm_gmii_mdic_writereg(dev, phy,
12118 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
12119 else
12120 rv = wm_gmii_mdic_writereg(dev, phy,
12121 BME1000_PHY_PAGE_SELECT, page);
12122 if (rv != 0)
12123 goto release;
12124 }
12125
12126 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
12127
12128 release:
12129 sc->phy.release(sc);
12130 return rv;
12131 }
12132
12133 /*
12134 * wm_gmii_bm_writereg: [mii interface function]
12135 *
12136 * Write a PHY register on the kumeran.
12137 * This could be handled by the PHY layer if we didn't have to lock the
12138 * resource ...
12139 */
12140 static int
12141 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
12142 {
12143 struct wm_softc *sc = device_private(dev);
12144 uint16_t page = reg >> BME1000_PAGE_SHIFT;
12145 int rv;
12146
12147 rv = sc->phy.acquire(sc);
12148 if (rv != 0) {
12149 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12150 return rv;
12151 }
12152
12153 if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
12154 phy = ((page >= 768) || ((page == 0) && (reg == 25))
12155 || (reg == 31)) ? 1 : phy;
12156 /* Page 800 works differently than the rest so it has its own func */
12157 if (page == BM_WUC_PAGE) {
12158 rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
12159 goto release;
12160 }
12161
12162 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
12163 if ((phy == 1) && (sc->sc_type != WM_T_82574)
12164 && (sc->sc_type != WM_T_82583))
12165 rv = wm_gmii_mdic_writereg(dev, phy,
12166 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
12167 else
12168 rv = wm_gmii_mdic_writereg(dev, phy,
12169 BME1000_PHY_PAGE_SELECT, page);
12170 if (rv != 0)
12171 goto release;
12172 }
12173
12174 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
12175
12176 release:
12177 sc->phy.release(sc);
12178 return rv;
12179 }
12180
12181 /*
12182 * wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
12183 * @dev: pointer to the HW structure
12184 * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
12185 *
12186 * Assumes semaphore already acquired and phy_reg points to a valid memory
12187 * address to store contents of the BM_WUC_ENABLE_REG register.
12188 */
12189 static int
12190 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
12191 {
12192 #ifdef WM_DEBUG
12193 struct wm_softc *sc = device_private(dev);
12194 #endif
12195 uint16_t temp;
12196 int rv;
12197
12198 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
12199 device_xname(dev), __func__));
12200
12201 if (!phy_regp)
12202 return -1;
12203
12204 /* All page select, port ctrl and wakeup registers use phy address 1 */
12205
12206 /* Select Port Control Registers page */
12207 rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12208 BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
12209 if (rv != 0)
12210 return rv;
12211
12212 /* Read WUCE and save it */
12213 rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
12214 if (rv != 0)
12215 return rv;
12216
12217 /* Enable both PHY wakeup mode and Wakeup register page writes.
12218 * Prevent a power state change by disabling ME and Host PHY wakeup.
12219 */
12220 temp = *phy_regp;
12221 temp |= BM_WUC_ENABLE_BIT;
12222 temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
12223
12224 if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
12225 return rv;
12226
12227 /* Select Host Wakeup Registers page - caller now able to write
12228 * registers on the Wakeup registers page
12229 */
12230 return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12231 BM_WUC_PAGE << IGP3_PAGE_SHIFT);
12232 }
12233
12234 /*
12235 * wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
12236 * @dev: pointer to the HW structure
12237 * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
12238 *
12239 * Restore BM_WUC_ENABLE_REG to its original value.
12240 *
12241 * Assumes semaphore already acquired and *phy_reg is the contents of the
12242 * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
12243 * caller.
12244 */
12245 static int
12246 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
12247 {
12248 #ifdef WM_DEBUG
12249 struct wm_softc *sc = device_private(dev);
12250 #endif
12251
12252 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
12253 device_xname(dev), __func__));
12254
12255 if (!phy_regp)
12256 return -1;
12257
12258 /* Select Port Control Registers page */
12259 wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12260 BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
12261
12262 /* Restore 769.17 to its original value */
12263 wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
12264
12265 return 0;
12266 }
12267
12268 /*
12269 * wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
12270 * @sc: pointer to the HW structure
12271 * @offset: register offset to be read or written
12272 * @val: pointer to the data to read or write
12273 * @rd: determines if operation is read or write
12274 * @page_set: BM_WUC_PAGE already set and access enabled
12275 *
12276 * Read the PHY register at offset and store the retrieved information in
12277 * data, or write data to PHY register at offset. Note the procedure to
12278 * access the PHY wakeup registers is different than reading the other PHY
12279 * registers. It works as such:
12280 * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1
12281 * 2) Set page to 800 for host (801 if we were manageability)
12282 * 3) Write the address using the address opcode (0x11)
12283 * 4) Read or write the data using the data opcode (0x12)
12284 * 5) Restore 769.17.2 to its original value
12285 *
12286 * Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
12287 * step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
12288 *
12289 * Assumes semaphore is already acquired. When page_set==TRUE, assumes
12290 * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
12291 * is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
12292 */
12293 static int
12294 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
12295 bool page_set)
12296 {
12297 struct wm_softc *sc = device_private(dev);
12298 uint16_t regnum = BM_PHY_REG_NUM(offset);
12299 uint16_t page = BM_PHY_REG_PAGE(offset);
12300 uint16_t wuce;
12301 int rv = 0;
12302
12303 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
12304 device_xname(dev), __func__));
12305 /* XXX Gig must be disabled for MDIO accesses to page 800 */
12306 if ((sc->sc_type == WM_T_PCH)
12307 && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
12308 device_printf(dev,
12309 "Attempting to access page %d while gig enabled.\n", page);
12310 }
12311
12312 if (!page_set) {
12313 /* Enable access to PHY wakeup registers */
12314 rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
12315 if (rv != 0) {
12316 device_printf(dev,
12317 "%s: Could not enable PHY wakeup reg access\n",
12318 __func__);
12319 return rv;
12320 }
12321 }
12322 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
12323 device_xname(sc->sc_dev), __func__, page, regnum));
12324
12325 /*
12326 * 2) Access PHY wakeup register.
12327 * See wm_access_phy_wakeup_reg_bm.
12328 */
12329
12330 /* Write the Wakeup register page offset value using opcode 0x11 */
12331 rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
12332 if (rv != 0)
12333 return rv;
12334
12335 if (rd) {
12336 /* Read the Wakeup register page value using opcode 0x12 */
12337 rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
12338 } else {
12339 /* Write the Wakeup register page value using opcode 0x12 */
12340 rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
12341 }
12342 if (rv != 0)
12343 return rv;
12344
12345 if (!page_set)
12346 rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
12347
12348 return rv;
12349 }
12350
12351 /*
12352 * wm_gmii_hv_readreg: [mii interface function]
12353 *
12354 * Read a PHY register on the kumeran
12355 * This could be handled by the PHY layer if we didn't have to lock the
12356 * resource ...
12357 */
12358 static int
12359 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
12360 {
12361 struct wm_softc *sc = device_private(dev);
12362 int rv;
12363
12364 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
12365 device_xname(dev), __func__));
12366
12367 rv = sc->phy.acquire(sc);
12368 if (rv != 0) {
12369 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12370 return rv;
12371 }
12372
12373 rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
12374 sc->phy.release(sc);
12375 return rv;
12376 }
12377
12378 static int
12379 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
12380 {
12381 uint16_t page = BM_PHY_REG_PAGE(reg);
12382 uint16_t regnum = BM_PHY_REG_NUM(reg);
12383 int rv;
12384
12385 phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
12386
12387 /* Page 800 works differently than the rest so it has its own func */
12388 if (page == BM_WUC_PAGE)
12389 return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
12390
12391 /*
12392 * Lower than page 768 works differently than the rest so it has its
12393 * own func
12394 */
12395 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
12396 device_printf(dev, "gmii_hv_readreg!!!\n");
12397 return -1;
12398 }
12399
12400 /*
12401 * XXX I21[789] documents say that the SMBus Address register is at
12402 * PHY address 01, Page 0 (not 768), Register 26.
12403 */
12404 if (page == HV_INTC_FC_PAGE_START)
12405 page = 0;
12406
12407 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
12408 rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12409 page << BME1000_PAGE_SHIFT);
12410 if (rv != 0)
12411 return rv;
12412 }
12413
12414 return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
12415 }
12416
12417 /*
12418 * wm_gmii_hv_writereg: [mii interface function]
12419 *
12420 * Write a PHY register on the kumeran.
12421 * This could be handled by the PHY layer if we didn't have to lock the
12422 * resource ...
12423 */
12424 static int
12425 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
12426 {
12427 struct wm_softc *sc = device_private(dev);
12428 int rv;
12429
12430 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
12431 device_xname(dev), __func__));
12432
12433 rv = sc->phy.acquire(sc);
12434 if (rv != 0) {
12435 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12436 return rv;
12437 }
12438
12439 rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
12440 sc->phy.release(sc);
12441
12442 return rv;
12443 }
12444
12445 static int
12446 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
12447 {
12448 struct wm_softc *sc = device_private(dev);
12449 uint16_t page = BM_PHY_REG_PAGE(reg);
12450 uint16_t regnum = BM_PHY_REG_NUM(reg);
12451 int rv;
12452
12453 phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
12454
12455 /* Page 800 works differently than the rest so it has its own func */
12456 if (page == BM_WUC_PAGE)
12457 return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
12458 false);
12459
12460 /*
12461 * Lower than page 768 works differently than the rest so it has its
12462 * own func
12463 */
12464 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
12465 device_printf(dev, "gmii_hv_writereg!!!\n");
12466 return -1;
12467 }
12468
12469 {
12470 /*
12471 * XXX I21[789] documents say that the SMBus Address register
12472 * is at PHY address 01, Page 0 (not 768), Register 26.
12473 */
12474 if (page == HV_INTC_FC_PAGE_START)
12475 page = 0;
12476
12477 /*
12478 * XXX Workaround MDIO accesses being disabled after entering
12479 * IEEE Power Down (whenever bit 11 of the PHY control
12480 * register is set)
12481 */
12482 if (sc->sc_phytype == WMPHY_82578) {
12483 struct mii_softc *child;
12484
12485 child = LIST_FIRST(&sc->sc_mii.mii_phys);
12486 if ((child != NULL) && (child->mii_mpd_rev >= 1)
12487 && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
12488 && ((val & (1 << 11)) != 0)) {
12489 device_printf(dev, "XXX need workaround\n");
12490 }
12491 }
12492
12493 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
12494 rv = wm_gmii_mdic_writereg(dev, 1,
12495 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
12496 if (rv != 0)
12497 return rv;
12498 }
12499 }
12500
12501 return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
12502 }
12503
12504 /*
12505 * wm_gmii_82580_readreg: [mii interface function]
12506 *
12507 * Read a PHY register on the 82580 and I350.
12508 * This could be handled by the PHY layer if we didn't have to lock the
12509 * resource ...
12510 */
12511 static int
12512 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
12513 {
12514 struct wm_softc *sc = device_private(dev);
12515 int rv;
12516
12517 rv = sc->phy.acquire(sc);
12518 if (rv != 0) {
12519 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12520 return rv;
12521 }
12522
12523 #ifdef DIAGNOSTIC
12524 if (reg > MII_ADDRMASK) {
12525 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
12526 __func__, sc->sc_phytype, reg);
12527 reg &= MII_ADDRMASK;
12528 }
12529 #endif
12530 rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
12531
12532 sc->phy.release(sc);
12533 return rv;
12534 }
12535
12536 /*
12537 * wm_gmii_82580_writereg: [mii interface function]
12538 *
12539 * Write a PHY register on the 82580 and I350.
12540 * This could be handled by the PHY layer if we didn't have to lock the
12541 * resource ...
12542 */
12543 static int
12544 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
12545 {
12546 struct wm_softc *sc = device_private(dev);
12547 int rv;
12548
12549 rv = sc->phy.acquire(sc);
12550 if (rv != 0) {
12551 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12552 return rv;
12553 }
12554
12555 #ifdef DIAGNOSTIC
12556 if (reg > MII_ADDRMASK) {
12557 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
12558 __func__, sc->sc_phytype, reg);
12559 reg &= MII_ADDRMASK;
12560 }
12561 #endif
12562 rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
12563
12564 sc->phy.release(sc);
12565 return rv;
12566 }
12567
12568 /*
12569 * wm_gmii_gs40g_readreg: [mii interface function]
12570 *
12571 * Read a PHY register on the I2100 and I211.
12572 * This could be handled by the PHY layer if we didn't have to lock the
12573 * resource ...
12574 */
12575 static int
12576 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
12577 {
12578 struct wm_softc *sc = device_private(dev);
12579 int page, offset;
12580 int rv;
12581
12582 /* Acquire semaphore */
12583 rv = sc->phy.acquire(sc);
12584 if (rv != 0) {
12585 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12586 return rv;
12587 }
12588
12589 /* Page select */
12590 page = reg >> GS40G_PAGE_SHIFT;
12591 rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
12592 if (rv != 0)
12593 goto release;
12594
12595 /* Read reg */
12596 offset = reg & GS40G_OFFSET_MASK;
12597 rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
12598
12599 release:
12600 sc->phy.release(sc);
12601 return rv;
12602 }
12603
12604 /*
12605 * wm_gmii_gs40g_writereg: [mii interface function]
12606 *
12607 * Write a PHY register on the I210 and I211.
12608 * This could be handled by the PHY layer if we didn't have to lock the
12609 * resource ...
12610 */
12611 static int
12612 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
12613 {
12614 struct wm_softc *sc = device_private(dev);
12615 uint16_t page;
12616 int offset, rv;
12617
12618 /* Acquire semaphore */
12619 rv = sc->phy.acquire(sc);
12620 if (rv != 0) {
12621 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12622 return rv;
12623 }
12624
12625 /* Page select */
12626 page = reg >> GS40G_PAGE_SHIFT;
12627 rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
12628 if (rv != 0)
12629 goto release;
12630
12631 /* Write reg */
12632 offset = reg & GS40G_OFFSET_MASK;
12633 rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
12634
12635 release:
12636 /* Release semaphore */
12637 sc->phy.release(sc);
12638 return rv;
12639 }
12640
12641 /*
12642 * wm_gmii_statchg: [mii interface function]
12643 *
12644 * Callback from MII layer when media changes.
12645 */
12646 static void
12647 wm_gmii_statchg(struct ifnet *ifp)
12648 {
12649 struct wm_softc *sc = ifp->if_softc;
12650 struct mii_data *mii = &sc->sc_mii;
12651
12652 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
12653 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
12654 sc->sc_fcrtl &= ~FCRTL_XONE;
12655
12656 /* Get flow control negotiation result. */
12657 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
12658 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
12659 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
12660 mii->mii_media_active &= ~IFM_ETH_FMASK;
12661 }
12662
12663 if (sc->sc_flowflags & IFM_FLOW) {
12664 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
12665 sc->sc_ctrl |= CTRL_TFCE;
12666 sc->sc_fcrtl |= FCRTL_XONE;
12667 }
12668 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
12669 sc->sc_ctrl |= CTRL_RFCE;
12670 }
12671
12672 if (mii->mii_media_active & IFM_FDX) {
12673 DPRINTF(sc, WM_DEBUG_LINK,
12674 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
12675 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
12676 } else {
12677 DPRINTF(sc, WM_DEBUG_LINK,
12678 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
12679 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
12680 }
12681
12682 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12683 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
12684 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
12685 WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
12686 if (sc->sc_type == WM_T_80003) {
12687 switch (IFM_SUBTYPE(mii->mii_media_active)) {
12688 case IFM_1000_T:
12689 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
12690 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
12691 sc->sc_tipg = TIPG_1000T_80003_DFLT;
12692 break;
12693 default:
12694 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
12695 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
12696 sc->sc_tipg = TIPG_10_100_80003_DFLT;
12697 break;
12698 }
12699 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
12700 }
12701 }
12702
12703 /* kumeran related (80003, ICH* and PCH*) */
12704
12705 /*
12706 * wm_kmrn_readreg:
12707 *
12708 * Read a kumeran register
12709 */
12710 static int
12711 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
12712 {
12713 int rv;
12714
12715 if (sc->sc_type == WM_T_80003)
12716 rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12717 else
12718 rv = sc->phy.acquire(sc);
12719 if (rv != 0) {
12720 device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
12721 __func__);
12722 return rv;
12723 }
12724
12725 rv = wm_kmrn_readreg_locked(sc, reg, val);
12726
12727 if (sc->sc_type == WM_T_80003)
12728 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12729 else
12730 sc->phy.release(sc);
12731
12732 return rv;
12733 }
12734
12735 static int
12736 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
12737 {
12738
12739 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
12740 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
12741 KUMCTRLSTA_REN);
12742 CSR_WRITE_FLUSH(sc);
12743 delay(2);
12744
12745 *val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
12746
12747 return 0;
12748 }
12749
12750 /*
12751 * wm_kmrn_writereg:
12752 *
12753 * Write a kumeran register
12754 */
12755 static int
12756 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
12757 {
12758 int rv;
12759
12760 if (sc->sc_type == WM_T_80003)
12761 rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12762 else
12763 rv = sc->phy.acquire(sc);
12764 if (rv != 0) {
12765 device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
12766 __func__);
12767 return rv;
12768 }
12769
12770 rv = wm_kmrn_writereg_locked(sc, reg, val);
12771
12772 if (sc->sc_type == WM_T_80003)
12773 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12774 else
12775 sc->phy.release(sc);
12776
12777 return rv;
12778 }
12779
12780 static int
12781 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
12782 {
12783
12784 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
12785 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
12786
12787 return 0;
12788 }
12789
12790 /*
12791 * EMI register related (82579, WMPHY_I217(PCH2 and newer))
12792 * This access method is different from IEEE MMD.
12793 */
12794 static int
12795 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
12796 {
12797 struct wm_softc *sc = device_private(dev);
12798 int rv;
12799
12800 rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
12801 if (rv != 0)
12802 return rv;
12803
12804 if (rd)
12805 rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
12806 else
12807 rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
12808 return rv;
12809 }
12810
12811 static int
12812 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
12813 {
12814
12815 return wm_access_emi_reg_locked(dev, reg, val, true);
12816 }
12817
12818 static int
12819 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
12820 {
12821
12822 return wm_access_emi_reg_locked(dev, reg, &val, false);
12823 }
12824
12825 /* SGMII related */
12826
12827 /*
12828 * wm_sgmii_uses_mdio
12829 *
12830 * Check whether the transaction is to the internal PHY or the external
12831 * MDIO interface. Return true if it's MDIO.
12832 */
12833 static bool
12834 wm_sgmii_uses_mdio(struct wm_softc *sc)
12835 {
12836 uint32_t reg;
12837 bool ismdio = false;
12838
12839 switch (sc->sc_type) {
12840 case WM_T_82575:
12841 case WM_T_82576:
12842 reg = CSR_READ(sc, WMREG_MDIC);
12843 ismdio = ((reg & MDIC_DEST) != 0);
12844 break;
12845 case WM_T_82580:
12846 case WM_T_I350:
12847 case WM_T_I354:
12848 case WM_T_I210:
12849 case WM_T_I211:
12850 reg = CSR_READ(sc, WMREG_MDICNFG);
12851 ismdio = ((reg & MDICNFG_DEST) != 0);
12852 break;
12853 default:
12854 break;
12855 }
12856
12857 return ismdio;
12858 }
12859
12860 /* Setup internal SGMII PHY for SFP */
12861 static void
12862 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
12863 {
12864 uint16_t id1, id2, phyreg;
12865 int i, rv;
12866
12867 if (((sc->sc_flags & WM_F_SGMII) == 0)
12868 || ((sc->sc_flags & WM_F_SFP) == 0))
12869 return;
12870
12871 for (i = 0; i < MII_NPHY; i++) {
12872 sc->phy.no_errprint = true;
12873 rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
12874 if (rv != 0)
12875 continue;
12876 rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
12877 if (rv != 0)
12878 continue;
12879 if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
12880 continue;
12881 sc->phy.no_errprint = false;
12882
12883 sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
12884 phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
12885 phyreg |= ESSR_SGMII_WOC_COPPER;
12886 sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
12887 break;
12888 }
12889
12890 }
12891
12892 /*
12893 * wm_sgmii_readreg: [mii interface function]
12894 *
12895 * Read a PHY register on the SGMII
12896 * This could be handled by the PHY layer if we didn't have to lock the
12897 * resource ...
12898 */
12899 static int
12900 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
12901 {
12902 struct wm_softc *sc = device_private(dev);
12903 int rv;
12904
12905 rv = sc->phy.acquire(sc);
12906 if (rv != 0) {
12907 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12908 return rv;
12909 }
12910
12911 rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
12912
12913 sc->phy.release(sc);
12914 return rv;
12915 }
12916
12917 static int
12918 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
12919 {
12920 struct wm_softc *sc = device_private(dev);
12921 uint32_t i2ccmd;
12922 int i, rv = 0;
12923
12924 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
12925 | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
12926 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
12927
12928 /* Poll the ready bit */
12929 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
12930 delay(50);
12931 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
12932 if (i2ccmd & I2CCMD_READY)
12933 break;
12934 }
12935 if ((i2ccmd & I2CCMD_READY) == 0) {
12936 device_printf(dev, "I2CCMD Read did not complete\n");
12937 rv = ETIMEDOUT;
12938 }
12939 if ((i2ccmd & I2CCMD_ERROR) != 0) {
12940 if (!sc->phy.no_errprint)
12941 device_printf(dev, "I2CCMD Error bit set\n");
12942 rv = EIO;
12943 }
12944
12945 *val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
12946
12947 return rv;
12948 }
12949
12950 /*
12951 * wm_sgmii_writereg: [mii interface function]
12952 *
12953 * Write a PHY register on the SGMII.
12954 * This could be handled by the PHY layer if we didn't have to lock the
12955 * resource ...
12956 */
12957 static int
12958 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
12959 {
12960 struct wm_softc *sc = device_private(dev);
12961 int rv;
12962
12963 rv = sc->phy.acquire(sc);
12964 if (rv != 0) {
12965 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12966 return rv;
12967 }
12968
12969 rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
12970
12971 sc->phy.release(sc);
12972
12973 return rv;
12974 }
12975
12976 static int
12977 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
12978 {
12979 struct wm_softc *sc = device_private(dev);
12980 uint32_t i2ccmd;
12981 uint16_t swapdata;
12982 int rv = 0;
12983 int i;
12984
12985 /* Swap the data bytes for the I2C interface */
12986 swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
12987 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
12988 | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
12989 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
12990
12991 /* Poll the ready bit */
12992 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
12993 delay(50);
12994 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
12995 if (i2ccmd & I2CCMD_READY)
12996 break;
12997 }
12998 if ((i2ccmd & I2CCMD_READY) == 0) {
12999 device_printf(dev, "I2CCMD Write did not complete\n");
13000 rv = ETIMEDOUT;
13001 }
13002 if ((i2ccmd & I2CCMD_ERROR) != 0) {
13003 device_printf(dev, "I2CCMD Error bit set\n");
13004 rv = EIO;
13005 }
13006
13007 return rv;
13008 }
13009
13010 /* TBI related */
13011
13012 static bool
13013 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
13014 {
13015 bool sig;
13016
13017 sig = ctrl & CTRL_SWDPIN(1);
13018
13019 /*
13020 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
13021 * detect a signal, 1 if they don't.
13022 */
13023 if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
13024 sig = !sig;
13025
13026 return sig;
13027 }
13028
13029 /*
13030 * wm_tbi_mediainit:
13031 *
13032 * Initialize media for use on 1000BASE-X devices.
13033 */
13034 static void
13035 wm_tbi_mediainit(struct wm_softc *sc)
13036 {
13037 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
13038 const char *sep = "";
13039
13040 if (sc->sc_type < WM_T_82543)
13041 sc->sc_tipg = TIPG_WM_DFLT;
13042 else
13043 sc->sc_tipg = TIPG_LG_DFLT;
13044
13045 sc->sc_tbi_serdes_anegticks = 5;
13046
13047 /* Initialize our media structures */
13048 sc->sc_mii.mii_ifp = ifp;
13049 sc->sc_ethercom.ec_mii = &sc->sc_mii;
13050
13051 ifp->if_baudrate = IF_Gbps(1);
13052 if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
13053 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
13054 ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
13055 wm_serdes_mediachange, wm_serdes_mediastatus,
13056 sc->sc_core_lock);
13057 } else {
13058 ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
13059 wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
13060 }
13061
13062 /*
13063 * SWD Pins:
13064 *
13065 * 0 = Link LED (output)
13066 * 1 = Loss Of Signal (input)
13067 */
13068 sc->sc_ctrl |= CTRL_SWDPIO(0);
13069
13070 /* XXX Perhaps this is only for TBI */
13071 if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
13072 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
13073
13074 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
13075 sc->sc_ctrl &= ~CTRL_LRST;
13076
13077 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13078
13079 #define ADD(ss, mm, dd) \
13080 do { \
13081 aprint_normal("%s%s", sep, ss); \
13082 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
13083 sep = ", "; \
13084 } while (/*CONSTCOND*/0)
13085
13086 aprint_normal_dev(sc->sc_dev, "");
13087
13088 if (sc->sc_type == WM_T_I354) {
13089 uint32_t status;
13090
13091 status = CSR_READ(sc, WMREG_STATUS);
13092 if (((status & STATUS_2P5_SKU) != 0)
13093 && ((status & STATUS_2P5_SKU_OVER) == 0)) {
13094 ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
13095 } else
13096 ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
13097 } else if (sc->sc_type == WM_T_82545) {
13098 /* Only 82545 is LX (XXX except SFP) */
13099 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
13100 ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
13101 } else if (sc->sc_sfptype != 0) {
13102 /* XXX wm(4) fiber/serdes don't use ifm_data */
13103 switch (sc->sc_sfptype) {
13104 default:
13105 case SFF_SFP_ETH_FLAGS_1000SX:
13106 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
13107 ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
13108 break;
13109 case SFF_SFP_ETH_FLAGS_1000LX:
13110 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
13111 ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
13112 break;
13113 case SFF_SFP_ETH_FLAGS_1000CX:
13114 ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
13115 ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
13116 break;
13117 case SFF_SFP_ETH_FLAGS_1000T:
13118 ADD("1000baseT", IFM_1000_T, 0);
13119 ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
13120 break;
13121 case SFF_SFP_ETH_FLAGS_100FX:
13122 ADD("100baseFX", IFM_100_FX, ANAR_TX);
13123 ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
13124 break;
13125 }
13126 } else {
13127 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
13128 ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
13129 }
13130 ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
13131 aprint_normal("\n");
13132
13133 #undef ADD
13134
13135 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
13136 }
13137
13138 /*
13139 * wm_tbi_mediachange: [ifmedia interface function]
13140 *
13141 * Set hardware to newly-selected media on a 1000BASE-X device.
13142 */
13143 static int
13144 wm_tbi_mediachange(struct ifnet *ifp)
13145 {
13146 struct wm_softc *sc = ifp->if_softc;
13147 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
13148 uint32_t status, ctrl;
13149 bool signal;
13150 int i;
13151
13152 KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
13153 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
13154 /* XXX need some work for >= 82571 and < 82575 */
13155 if (sc->sc_type < WM_T_82575)
13156 return 0;
13157 }
13158
13159 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
13160 || (sc->sc_type >= WM_T_82575))
13161 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
13162
13163 sc->sc_ctrl &= ~CTRL_LRST;
13164 sc->sc_txcw = TXCW_ANE;
13165 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
13166 sc->sc_txcw |= TXCW_FD | TXCW_HD;
13167 else if (ife->ifm_media & IFM_FDX)
13168 sc->sc_txcw |= TXCW_FD;
13169 else
13170 sc->sc_txcw |= TXCW_HD;
13171
13172 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
13173 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
13174
13175 DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
13176 device_xname(sc->sc_dev), sc->sc_txcw));
13177 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
13178 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13179 CSR_WRITE_FLUSH(sc);
13180 delay(1000);
13181
13182 ctrl = CSR_READ(sc, WMREG_CTRL);
13183 signal = wm_tbi_havesignal(sc, ctrl);
13184
13185 DPRINTF(sc, WM_DEBUG_LINK,
13186 ("%s: signal = %d\n", device_xname(sc->sc_dev), signal));
13187
13188 if (signal) {
13189 /* Have signal; wait for the link to come up. */
13190 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
13191 delay(10000);
13192 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
13193 break;
13194 }
13195
13196 DPRINTF(sc, WM_DEBUG_LINK,
13197 ("%s: i = %d after waiting for link\n",
13198 device_xname(sc->sc_dev), i));
13199
13200 status = CSR_READ(sc, WMREG_STATUS);
13201 DPRINTF(sc, WM_DEBUG_LINK,
13202 ("%s: status after final read = 0x%x, STATUS_LU = %#"
13203 __PRIxBIT "\n",
13204 device_xname(sc->sc_dev), status, STATUS_LU));
13205 if (status & STATUS_LU) {
13206 /* Link is up. */
13207 DPRINTF(sc, WM_DEBUG_LINK,
13208 ("%s: LINK: set media -> link up %s\n",
13209 device_xname(sc->sc_dev),
13210 (status & STATUS_FD) ? "FDX" : "HDX"));
13211
13212 /*
13213 * NOTE: CTRL will update TFCE and RFCE automatically,
13214 * so we should update sc->sc_ctrl
13215 */
13216 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
13217 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
13218 sc->sc_fcrtl &= ~FCRTL_XONE;
13219 if (status & STATUS_FD)
13220 sc->sc_tctl |=
13221 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
13222 else
13223 sc->sc_tctl |=
13224 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
13225 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
13226 sc->sc_fcrtl |= FCRTL_XONE;
13227 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
13228 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
13229 WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
13230 sc->sc_tbi_linkup = 1;
13231 } else {
13232 if (i == WM_LINKUP_TIMEOUT)
13233 wm_check_for_link(sc);
13234 /* Link is down. */
13235 DPRINTF(sc, WM_DEBUG_LINK,
13236 ("%s: LINK: set media -> link down\n",
13237 device_xname(sc->sc_dev)));
13238 sc->sc_tbi_linkup = 0;
13239 }
13240 } else {
13241 DPRINTF(sc, WM_DEBUG_LINK,
13242 ("%s: LINK: set media -> no signal\n",
13243 device_xname(sc->sc_dev)));
13244 sc->sc_tbi_linkup = 0;
13245 }
13246
13247 wm_tbi_serdes_set_linkled(sc);
13248
13249 return 0;
13250 }
13251
13252 /*
13253 * wm_tbi_mediastatus: [ifmedia interface function]
13254 *
13255 * Get the current interface media status on a 1000BASE-X device.
13256 */
13257 static void
13258 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
13259 {
13260 struct wm_softc *sc = ifp->if_softc;
13261 uint32_t ctrl, status;
13262
13263 ifmr->ifm_status = IFM_AVALID;
13264 ifmr->ifm_active = IFM_ETHER;
13265
13266 status = CSR_READ(sc, WMREG_STATUS);
13267 if ((status & STATUS_LU) == 0) {
13268 ifmr->ifm_active |= IFM_NONE;
13269 return;
13270 }
13271
13272 ifmr->ifm_status |= IFM_ACTIVE;
13273 /* Only 82545 is LX */
13274 if (sc->sc_type == WM_T_82545)
13275 ifmr->ifm_active |= IFM_1000_LX;
13276 else
13277 ifmr->ifm_active |= IFM_1000_SX;
13278 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
13279 ifmr->ifm_active |= IFM_FDX;
13280 else
13281 ifmr->ifm_active |= IFM_HDX;
13282 ctrl = CSR_READ(sc, WMREG_CTRL);
13283 if (ctrl & CTRL_RFCE)
13284 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
13285 if (ctrl & CTRL_TFCE)
13286 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
13287 }
13288
13289 /* XXX TBI only */
13290 static int
13291 wm_check_for_link(struct wm_softc *sc)
13292 {
13293 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
13294 uint32_t rxcw;
13295 uint32_t ctrl;
13296 uint32_t status;
13297 bool signal;
13298
13299 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
13300 device_xname(sc->sc_dev), __func__));
13301
13302 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
13303 /* XXX need some work for >= 82571 */
13304 if (sc->sc_type >= WM_T_82571) {
13305 sc->sc_tbi_linkup = 1;
13306 return 0;
13307 }
13308 }
13309
13310 rxcw = CSR_READ(sc, WMREG_RXCW);
13311 ctrl = CSR_READ(sc, WMREG_CTRL);
13312 status = CSR_READ(sc, WMREG_STATUS);
13313 signal = wm_tbi_havesignal(sc, ctrl);
13314
13315 DPRINTF(sc, WM_DEBUG_LINK,
13316 ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
13317 device_xname(sc->sc_dev), __func__, signal,
13318 ((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
13319
13320 /*
13321 * SWDPIN LU RXCW
13322 * 0 0 0
13323 * 0 0 1 (should not happen)
13324 * 0 1 0 (should not happen)
13325 * 0 1 1 (should not happen)
13326 * 1 0 0 Disable autonego and force linkup
13327 * 1 0 1 got /C/ but not linkup yet
13328 * 1 1 0 (linkup)
13329 * 1 1 1 If IFM_AUTO, back to autonego
13330 *
13331 */
13332 if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
13333 DPRINTF(sc, WM_DEBUG_LINK,
13334 ("%s: %s: force linkup and fullduplex\n",
13335 device_xname(sc->sc_dev), __func__));
13336 sc->sc_tbi_linkup = 0;
13337 /* Disable auto-negotiation in the TXCW register */
13338 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
13339
13340 /*
13341 * Force link-up and also force full-duplex.
13342 *
13343 * NOTE: CTRL was updated TFCE and RFCE automatically,
13344 * so we should update sc->sc_ctrl
13345 */
13346 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
13347 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13348 } else if (((status & STATUS_LU) != 0)
13349 && ((rxcw & RXCW_C) != 0)
13350 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
13351 sc->sc_tbi_linkup = 1;
13352 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
13353 device_xname(sc->sc_dev), __func__));
13354 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
13355 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
13356 } else if (signal && ((rxcw & RXCW_C) != 0)) {
13357 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
13358 device_xname(sc->sc_dev), __func__));
13359 } else {
13360 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
13361 device_xname(sc->sc_dev), __func__, rxcw, ctrl,
13362 status));
13363 }
13364
13365 return 0;
13366 }
13367
13368 /*
13369 * wm_tbi_tick:
13370 *
13371 * Check the link on TBI devices.
13372 * This function acts as mii_tick().
13373 */
13374 static void
13375 wm_tbi_tick(struct wm_softc *sc)
13376 {
13377 struct mii_data *mii = &sc->sc_mii;
13378 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
13379 uint32_t status;
13380
13381 KASSERT(mutex_owned(sc->sc_core_lock));
13382
13383 status = CSR_READ(sc, WMREG_STATUS);
13384
13385 /* XXX is this needed? */
13386 (void)CSR_READ(sc, WMREG_RXCW);
13387 (void)CSR_READ(sc, WMREG_CTRL);
13388
13389 /* set link status */
13390 if ((status & STATUS_LU) == 0) {
13391 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
13392 device_xname(sc->sc_dev)));
13393 sc->sc_tbi_linkup = 0;
13394 } else if (sc->sc_tbi_linkup == 0) {
13395 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
13396 device_xname(sc->sc_dev),
13397 (status & STATUS_FD) ? "FDX" : "HDX"));
13398 sc->sc_tbi_linkup = 1;
13399 sc->sc_tbi_serdes_ticks = 0;
13400 }
13401
13402 if ((sc->sc_if_flags & IFF_UP) == 0)
13403 goto setled;
13404
13405 if ((status & STATUS_LU) == 0) {
13406 sc->sc_tbi_linkup = 0;
13407 /* If the timer expired, retry autonegotiation */
13408 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
13409 && (++sc->sc_tbi_serdes_ticks
13410 >= sc->sc_tbi_serdes_anegticks)) {
13411 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
13412 device_xname(sc->sc_dev), __func__));
13413 sc->sc_tbi_serdes_ticks = 0;
13414 /*
13415 * Reset the link, and let autonegotiation do
13416 * its thing
13417 */
13418 sc->sc_ctrl |= CTRL_LRST;
13419 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13420 CSR_WRITE_FLUSH(sc);
13421 delay(1000);
13422 sc->sc_ctrl &= ~CTRL_LRST;
13423 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13424 CSR_WRITE_FLUSH(sc);
13425 delay(1000);
13426 CSR_WRITE(sc, WMREG_TXCW,
13427 sc->sc_txcw & ~TXCW_ANE);
13428 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
13429 }
13430 }
13431
13432 setled:
13433 wm_tbi_serdes_set_linkled(sc);
13434 }
13435
13436 /* SERDES related */
13437 static void
13438 wm_serdes_power_up_link_82575(struct wm_softc *sc)
13439 {
13440 uint32_t reg;
13441
13442 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
13443 && ((sc->sc_flags & WM_F_SGMII) == 0))
13444 return;
13445
13446 /* Enable PCS to turn on link */
13447 reg = CSR_READ(sc, WMREG_PCS_CFG);
13448 reg |= PCS_CFG_PCS_EN;
13449 CSR_WRITE(sc, WMREG_PCS_CFG, reg);
13450
13451 /* Power up the laser */
13452 reg = CSR_READ(sc, WMREG_CTRL_EXT);
13453 reg &= ~CTRL_EXT_SWDPIN(3);
13454 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13455
13456 /* Flush the write to verify completion */
13457 CSR_WRITE_FLUSH(sc);
13458 delay(1000);
13459 }
13460
13461 static int
13462 wm_serdes_mediachange(struct ifnet *ifp)
13463 {
13464 struct wm_softc *sc = ifp->if_softc;
13465 bool pcs_autoneg = true; /* XXX */
13466 uint32_t ctrl_ext, pcs_lctl, reg;
13467
13468 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
13469 && ((sc->sc_flags & WM_F_SGMII) == 0))
13470 return 0;
13471
13472 /* XXX Currently, this function is not called on 8257[12] */
13473 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
13474 || (sc->sc_type >= WM_T_82575))
13475 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
13476
13477 /* Power on the sfp cage if present */
13478 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
13479 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
13480 ctrl_ext |= CTRL_EXT_I2C_ENA;
13481 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
13482
13483 sc->sc_ctrl |= CTRL_SLU;
13484
13485 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
13486 sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
13487
13488 reg = CSR_READ(sc, WMREG_CONNSW);
13489 reg |= CONNSW_ENRGSRC;
13490 CSR_WRITE(sc, WMREG_CONNSW, reg);
13491 }
13492
13493 pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
13494 switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
13495 case CTRL_EXT_LINK_MODE_SGMII:
13496 /* SGMII mode lets the phy handle forcing speed/duplex */
13497 pcs_autoneg = true;
13498 /* Autoneg time out should be disabled for SGMII mode */
13499 pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
13500 break;
13501 case CTRL_EXT_LINK_MODE_1000KX:
13502 pcs_autoneg = false;
13503 /* FALLTHROUGH */
13504 default:
13505 if ((sc->sc_type == WM_T_82575)
13506 || (sc->sc_type == WM_T_82576)) {
13507 if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
13508 pcs_autoneg = false;
13509 }
13510 sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
13511 | CTRL_FRCFDX;
13512
13513 /* Set speed of 1000/Full if speed/duplex is forced */
13514 pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
13515 }
13516 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13517
13518 pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
13519 PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
13520
13521 if (pcs_autoneg) {
13522 /* Set PCS register for autoneg */
13523 pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
13524
13525 /* Disable force flow control for autoneg */
13526 pcs_lctl &= ~PCS_LCTL_FORCE_FC;
13527
13528 /* Configure flow control advertisement for autoneg */
13529 reg = CSR_READ(sc, WMREG_PCS_ANADV);
13530 reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
13531 reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
13532 CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
13533 } else
13534 pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
13535
13536 CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
13537
13538 return 0;
13539 }
13540
13541 static void
13542 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
13543 {
13544 struct wm_softc *sc = ifp->if_softc;
13545 struct mii_data *mii = &sc->sc_mii;
13546 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
13547 uint32_t pcs_adv, pcs_lpab, reg;
13548
13549 ifmr->ifm_status = IFM_AVALID;
13550 ifmr->ifm_active = IFM_ETHER;
13551
13552 /* Check PCS */
13553 reg = CSR_READ(sc, WMREG_PCS_LSTS);
13554 if ((reg & PCS_LSTS_LINKOK) == 0) {
13555 ifmr->ifm_active |= IFM_NONE;
13556 sc->sc_tbi_linkup = 0;
13557 goto setled;
13558 }
13559
13560 sc->sc_tbi_linkup = 1;
13561 ifmr->ifm_status |= IFM_ACTIVE;
13562 if (sc->sc_type == WM_T_I354) {
13563 uint32_t status;
13564
13565 status = CSR_READ(sc, WMREG_STATUS);
13566 if (((status & STATUS_2P5_SKU) != 0)
13567 && ((status & STATUS_2P5_SKU_OVER) == 0)) {
13568 ifmr->ifm_active |= IFM_2500_KX;
13569 } else
13570 ifmr->ifm_active |= IFM_1000_KX;
13571 } else {
13572 switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
13573 case PCS_LSTS_SPEED_10:
13574 ifmr->ifm_active |= IFM_10_T; /* XXX */
13575 break;
13576 case PCS_LSTS_SPEED_100:
13577 ifmr->ifm_active |= IFM_100_FX; /* XXX */
13578 break;
13579 case PCS_LSTS_SPEED_1000:
13580 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
13581 break;
13582 default:
13583 device_printf(sc->sc_dev, "Unknown speed\n");
13584 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
13585 break;
13586 }
13587 }
13588 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
13589 if ((reg & PCS_LSTS_FDX) != 0)
13590 ifmr->ifm_active |= IFM_FDX;
13591 else
13592 ifmr->ifm_active |= IFM_HDX;
13593 mii->mii_media_active &= ~IFM_ETH_FMASK;
13594 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
13595 /* Check flow */
13596 reg = CSR_READ(sc, WMREG_PCS_LSTS);
13597 if ((reg & PCS_LSTS_AN_COMP) == 0) {
13598 DPRINTF(sc, WM_DEBUG_LINK,
13599 ("XXX LINKOK but not ACOMP\n"));
13600 goto setled;
13601 }
13602 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
13603 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
13604 DPRINTF(sc, WM_DEBUG_LINK,
13605 ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
13606 if ((pcs_adv & TXCW_SYM_PAUSE)
13607 && (pcs_lpab & TXCW_SYM_PAUSE)) {
13608 mii->mii_media_active |= IFM_FLOW
13609 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
13610 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
13611 && (pcs_adv & TXCW_ASYM_PAUSE)
13612 && (pcs_lpab & TXCW_SYM_PAUSE)
13613 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
13614 mii->mii_media_active |= IFM_FLOW
13615 | IFM_ETH_TXPAUSE;
13616 } else if ((pcs_adv & TXCW_SYM_PAUSE)
13617 && (pcs_adv & TXCW_ASYM_PAUSE)
13618 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
13619 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
13620 mii->mii_media_active |= IFM_FLOW
13621 | IFM_ETH_RXPAUSE;
13622 }
13623 }
13624 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
13625 | (mii->mii_media_active & IFM_ETH_FMASK);
13626 setled:
13627 wm_tbi_serdes_set_linkled(sc);
13628 }
13629
13630 /*
13631 * wm_serdes_tick:
13632 *
13633 * Check the link on serdes devices.
13634 */
13635 static void
13636 wm_serdes_tick(struct wm_softc *sc)
13637 {
13638 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
13639 struct mii_data *mii = &sc->sc_mii;
13640 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
13641 uint32_t reg;
13642
13643 KASSERT(mutex_owned(sc->sc_core_lock));
13644
13645 mii->mii_media_status = IFM_AVALID;
13646 mii->mii_media_active = IFM_ETHER;
13647
13648 /* Check PCS */
13649 reg = CSR_READ(sc, WMREG_PCS_LSTS);
13650 if ((reg & PCS_LSTS_LINKOK) != 0) {
13651 mii->mii_media_status |= IFM_ACTIVE;
13652 sc->sc_tbi_linkup = 1;
13653 sc->sc_tbi_serdes_ticks = 0;
13654 mii->mii_media_active |= IFM_1000_SX; /* XXX */
13655 if ((reg & PCS_LSTS_FDX) != 0)
13656 mii->mii_media_active |= IFM_FDX;
13657 else
13658 mii->mii_media_active |= IFM_HDX;
13659 } else {
13660 mii->mii_media_status |= IFM_NONE;
13661 sc->sc_tbi_linkup = 0;
13662 /* If the timer expired, retry autonegotiation */
13663 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
13664 && (++sc->sc_tbi_serdes_ticks
13665 >= sc->sc_tbi_serdes_anegticks)) {
13666 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
13667 device_xname(sc->sc_dev), __func__));
13668 sc->sc_tbi_serdes_ticks = 0;
13669 /* XXX */
13670 wm_serdes_mediachange(ifp);
13671 }
13672 }
13673
13674 wm_tbi_serdes_set_linkled(sc);
13675 }
13676
13677 /* SFP related */
13678
13679 static int
13680 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
13681 {
13682 uint32_t i2ccmd;
13683 int i;
13684
13685 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
13686 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
13687
13688 /* Poll the ready bit */
13689 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
13690 delay(50);
13691 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
13692 if (i2ccmd & I2CCMD_READY)
13693 break;
13694 }
13695 if ((i2ccmd & I2CCMD_READY) == 0)
13696 return -1;
13697 if ((i2ccmd & I2CCMD_ERROR) != 0)
13698 return -1;
13699
13700 *data = i2ccmd & 0x00ff;
13701
13702 return 0;
13703 }
13704
13705 static uint32_t
13706 wm_sfp_get_media_type(struct wm_softc *sc)
13707 {
13708 uint32_t ctrl_ext;
13709 uint8_t val = 0;
13710 int timeout = 3;
13711 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
13712 int rv = -1;
13713
13714 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
13715 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
13716 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
13717 CSR_WRITE_FLUSH(sc);
13718
13719 /* Read SFP module data */
13720 while (timeout) {
13721 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
13722 if (rv == 0)
13723 break;
13724 delay(100*1000); /* XXX too big */
13725 timeout--;
13726 }
13727 if (rv != 0)
13728 goto out;
13729
13730 switch (val) {
13731 case SFF_SFP_ID_SFF:
13732 aprint_normal_dev(sc->sc_dev,
13733 "Module/Connector soldered to board\n");
13734 break;
13735 case SFF_SFP_ID_SFP:
13736 sc->sc_flags |= WM_F_SFP;
13737 break;
13738 case SFF_SFP_ID_UNKNOWN:
13739 goto out;
13740 default:
13741 break;
13742 }
13743
13744 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
13745 if (rv != 0)
13746 goto out;
13747
13748 sc->sc_sfptype = val;
13749 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
13750 mediatype = WM_MEDIATYPE_SERDES;
13751 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
13752 sc->sc_flags |= WM_F_SGMII;
13753 mediatype = WM_MEDIATYPE_COPPER;
13754 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
13755 sc->sc_flags |= WM_F_SGMII;
13756 mediatype = WM_MEDIATYPE_SERDES;
13757 } else {
13758 device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
13759 __func__, sc->sc_sfptype);
13760 sc->sc_sfptype = 0; /* XXX unknown */
13761 }
13762
13763 out:
13764 /* Restore I2C interface setting */
13765 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
13766
13767 return mediatype;
13768 }
13769
13770 /*
13771 * NVM related.
13772 * Microwire, SPI (w/wo EERD) and Flash.
13773 */
13774
13775 /* Both spi and uwire */
13776
13777 /*
13778 * wm_eeprom_sendbits:
13779 *
13780 * Send a series of bits to the EEPROM.
13781 */
13782 static void
13783 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
13784 {
13785 uint32_t reg;
13786 int x;
13787
13788 reg = CSR_READ(sc, WMREG_EECD);
13789
13790 for (x = nbits; x > 0; x--) {
13791 if (bits & (1U << (x - 1)))
13792 reg |= EECD_DI;
13793 else
13794 reg &= ~EECD_DI;
13795 CSR_WRITE(sc, WMREG_EECD, reg);
13796 CSR_WRITE_FLUSH(sc);
13797 delay(2);
13798 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
13799 CSR_WRITE_FLUSH(sc);
13800 delay(2);
13801 CSR_WRITE(sc, WMREG_EECD, reg);
13802 CSR_WRITE_FLUSH(sc);
13803 delay(2);
13804 }
13805 }
13806
13807 /*
13808 * wm_eeprom_recvbits:
13809 *
13810 * Receive a series of bits from the EEPROM.
13811 */
13812 static void
13813 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
13814 {
13815 uint32_t reg, val;
13816 int x;
13817
13818 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
13819
13820 val = 0;
13821 for (x = nbits; x > 0; x--) {
13822 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
13823 CSR_WRITE_FLUSH(sc);
13824 delay(2);
13825 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
13826 val |= (1U << (x - 1));
13827 CSR_WRITE(sc, WMREG_EECD, reg);
13828 CSR_WRITE_FLUSH(sc);
13829 delay(2);
13830 }
13831 *valp = val;
13832 }
13833
13834 /* Microwire */
13835
13836 /*
13837 * wm_nvm_read_uwire:
13838 *
13839 * Read a word from the EEPROM using the MicroWire protocol.
13840 */
13841 static int
13842 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
13843 {
13844 uint32_t reg, val;
13845 int i, rv;
13846
13847 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13848 device_xname(sc->sc_dev), __func__));
13849
13850 rv = sc->nvm.acquire(sc);
13851 if (rv != 0)
13852 return rv;
13853
13854 for (i = 0; i < wordcnt; i++) {
13855 /* Clear SK and DI. */
13856 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
13857 CSR_WRITE(sc, WMREG_EECD, reg);
13858
13859 /*
13860 * XXX: workaround for a bug in qemu-0.12.x and prior
13861 * and Xen.
13862 *
13863 * We use this workaround only for 82540 because qemu's
13864 * e1000 act as 82540.
13865 */
13866 if (sc->sc_type == WM_T_82540) {
13867 reg |= EECD_SK;
13868 CSR_WRITE(sc, WMREG_EECD, reg);
13869 reg &= ~EECD_SK;
13870 CSR_WRITE(sc, WMREG_EECD, reg);
13871 CSR_WRITE_FLUSH(sc);
13872 delay(2);
13873 }
13874 /* XXX: end of workaround */
13875
13876 /* Set CHIP SELECT. */
13877 reg |= EECD_CS;
13878 CSR_WRITE(sc, WMREG_EECD, reg);
13879 CSR_WRITE_FLUSH(sc);
13880 delay(2);
13881
13882 /* Shift in the READ command. */
13883 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
13884
13885 /* Shift in address. */
13886 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
13887
13888 /* Shift out the data. */
13889 wm_eeprom_recvbits(sc, &val, 16);
13890 data[i] = val & 0xffff;
13891
13892 /* Clear CHIP SELECT. */
13893 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
13894 CSR_WRITE(sc, WMREG_EECD, reg);
13895 CSR_WRITE_FLUSH(sc);
13896 delay(2);
13897 }
13898
13899 sc->nvm.release(sc);
13900 return 0;
13901 }
13902
13903 /* SPI */
13904
13905 /*
13906 * Set SPI and FLASH related information from the EECD register.
13907 * For 82541 and 82547, the word size is taken from EEPROM.
13908 */
13909 static int
13910 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
13911 {
13912 int size;
13913 uint32_t reg;
13914 uint16_t data;
13915
13916 reg = CSR_READ(sc, WMREG_EECD);
13917 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
13918
13919 /* Read the size of NVM from EECD by default */
13920 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
13921 switch (sc->sc_type) {
13922 case WM_T_82541:
13923 case WM_T_82541_2:
13924 case WM_T_82547:
13925 case WM_T_82547_2:
13926 /* Set dummy value to access EEPROM */
13927 sc->sc_nvm_wordsize = 64;
13928 if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
13929 aprint_error_dev(sc->sc_dev,
13930 "%s: failed to read EEPROM size\n", __func__);
13931 }
13932 reg = data;
13933 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
13934 if (size == 0)
13935 size = 6; /* 64 word size */
13936 else
13937 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
13938 break;
13939 case WM_T_80003:
13940 case WM_T_82571:
13941 case WM_T_82572:
13942 case WM_T_82573: /* SPI case */
13943 case WM_T_82574: /* SPI case */
13944 case WM_T_82583: /* SPI case */
13945 size += NVM_WORD_SIZE_BASE_SHIFT;
13946 if (size > 14)
13947 size = 14;
13948 break;
13949 case WM_T_82575:
13950 case WM_T_82576:
13951 case WM_T_82580:
13952 case WM_T_I350:
13953 case WM_T_I354:
13954 case WM_T_I210:
13955 case WM_T_I211:
13956 size += NVM_WORD_SIZE_BASE_SHIFT;
13957 if (size > 15)
13958 size = 15;
13959 break;
13960 default:
13961 aprint_error_dev(sc->sc_dev,
13962 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
13963 return -1;
13964 break;
13965 }
13966
13967 sc->sc_nvm_wordsize = 1 << size;
13968
13969 return 0;
13970 }
13971
13972 /*
13973 * wm_nvm_ready_spi:
13974 *
13975 * Wait for a SPI EEPROM to be ready for commands.
13976 */
13977 static int
13978 wm_nvm_ready_spi(struct wm_softc *sc)
13979 {
13980 uint32_t val;
13981 int usec;
13982
13983 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13984 device_xname(sc->sc_dev), __func__));
13985
13986 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
13987 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
13988 wm_eeprom_recvbits(sc, &val, 8);
13989 if ((val & SPI_SR_RDY) == 0)
13990 break;
13991 }
13992 if (usec >= SPI_MAX_RETRIES) {
13993 aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
13994 return -1;
13995 }
13996 return 0;
13997 }
13998
13999 /*
14000 * wm_nvm_read_spi:
14001 *
14002 * Read a work from the EEPROM using the SPI protocol.
14003 */
14004 static int
14005 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
14006 {
14007 uint32_t reg, val;
14008 int i;
14009 uint8_t opc;
14010 int rv;
14011
14012 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14013 device_xname(sc->sc_dev), __func__));
14014
14015 rv = sc->nvm.acquire(sc);
14016 if (rv != 0)
14017 return rv;
14018
14019 /* Clear SK and CS. */
14020 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
14021 CSR_WRITE(sc, WMREG_EECD, reg);
14022 CSR_WRITE_FLUSH(sc);
14023 delay(2);
14024
14025 if ((rv = wm_nvm_ready_spi(sc)) != 0)
14026 goto out;
14027
14028 /* Toggle CS to flush commands. */
14029 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
14030 CSR_WRITE_FLUSH(sc);
14031 delay(2);
14032 CSR_WRITE(sc, WMREG_EECD, reg);
14033 CSR_WRITE_FLUSH(sc);
14034 delay(2);
14035
14036 opc = SPI_OPC_READ;
14037 if (sc->sc_nvm_addrbits == 8 && word >= 128)
14038 opc |= SPI_OPC_A8;
14039
14040 wm_eeprom_sendbits(sc, opc, 8);
14041 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
14042
14043 for (i = 0; i < wordcnt; i++) {
14044 wm_eeprom_recvbits(sc, &val, 16);
14045 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
14046 }
14047
14048 /* Raise CS and clear SK. */
14049 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
14050 CSR_WRITE(sc, WMREG_EECD, reg);
14051 CSR_WRITE_FLUSH(sc);
14052 delay(2);
14053
14054 out:
14055 sc->nvm.release(sc);
14056 return rv;
14057 }
14058
14059 /* Using with EERD */
14060
14061 static int
14062 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
14063 {
14064 uint32_t attempts = 100000;
14065 uint32_t i, reg = 0;
14066 int32_t done = -1;
14067
14068 for (i = 0; i < attempts; i++) {
14069 reg = CSR_READ(sc, rw);
14070
14071 if (reg & EERD_DONE) {
14072 done = 0;
14073 break;
14074 }
14075 delay(5);
14076 }
14077
14078 return done;
14079 }
14080
14081 static int
14082 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
14083 {
14084 int i, eerd = 0;
14085 int rv;
14086
14087 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14088 device_xname(sc->sc_dev), __func__));
14089
14090 rv = sc->nvm.acquire(sc);
14091 if (rv != 0)
14092 return rv;
14093
14094 for (i = 0; i < wordcnt; i++) {
14095 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
14096 CSR_WRITE(sc, WMREG_EERD, eerd);
14097 rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
14098 if (rv != 0) {
14099 aprint_error_dev(sc->sc_dev, "EERD polling failed: "
14100 "offset=%d. wordcnt=%d\n", offset, wordcnt);
14101 break;
14102 }
14103 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
14104 }
14105
14106 sc->nvm.release(sc);
14107 return rv;
14108 }
14109
14110 /* Flash */
14111
14112 static int
14113 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
14114 {
14115 uint32_t eecd;
14116 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
14117 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
14118 uint32_t nvm_dword = 0;
14119 uint8_t sig_byte = 0;
14120 int rv;
14121
14122 switch (sc->sc_type) {
14123 case WM_T_PCH_SPT:
14124 case WM_T_PCH_CNP:
14125 bank1_offset = sc->sc_ich8_flash_bank_size * 2;
14126 act_offset = ICH_NVM_SIG_WORD * 2;
14127
14128 /* Set bank to 0 in case flash read fails. */
14129 *bank = 0;
14130
14131 /* Check bank 0 */
14132 rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
14133 if (rv != 0)
14134 return rv;
14135 sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
14136 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
14137 *bank = 0;
14138 return 0;
14139 }
14140
14141 /* Check bank 1 */
14142 rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
14143 &nvm_dword);
14144 sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
14145 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
14146 *bank = 1;
14147 return 0;
14148 }
14149 aprint_error_dev(sc->sc_dev,
14150 "%s: no valid NVM bank present (%u)\n", __func__, *bank);
14151 return -1;
14152 case WM_T_ICH8:
14153 case WM_T_ICH9:
14154 eecd = CSR_READ(sc, WMREG_EECD);
14155 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
14156 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
14157 return 0;
14158 }
14159 /* FALLTHROUGH */
14160 default:
14161 /* Default to 0 */
14162 *bank = 0;
14163
14164 /* Check bank 0 */
14165 wm_read_ich8_byte(sc, act_offset, &sig_byte);
14166 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
14167 *bank = 0;
14168 return 0;
14169 }
14170
14171 /* Check bank 1 */
14172 wm_read_ich8_byte(sc, act_offset + bank1_offset,
14173 &sig_byte);
14174 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
14175 *bank = 1;
14176 return 0;
14177 }
14178 }
14179
14180 DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
14181 device_xname(sc->sc_dev)));
14182 return -1;
14183 }
14184
14185 /******************************************************************************
14186 * This function does initial flash setup so that a new read/write/erase cycle
14187 * can be started.
14188 *
14189 * sc - The pointer to the hw structure
14190 ****************************************************************************/
14191 static int32_t
14192 wm_ich8_cycle_init(struct wm_softc *sc)
14193 {
14194 uint16_t hsfsts;
14195 int32_t error = 1;
14196 int32_t i = 0;
14197
14198 if (sc->sc_type >= WM_T_PCH_SPT)
14199 hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
14200 else
14201 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
14202
14203 /* May be check the Flash Des Valid bit in Hw status */
14204 if ((hsfsts & HSFSTS_FLDVAL) == 0)
14205 return error;
14206
14207 /* Clear FCERR in Hw status by writing 1 */
14208 /* Clear DAEL in Hw status by writing a 1 */
14209 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
14210
14211 if (sc->sc_type >= WM_T_PCH_SPT)
14212 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
14213 else
14214 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
14215
14216 /*
14217 * Either we should have a hardware SPI cycle in progress bit to check
14218 * against, in order to start a new cycle or FDONE bit should be
14219 * changed in the hardware so that it is 1 after hardware reset, which
14220 * can then be used as an indication whether a cycle is in progress or
14221 * has been completed .. we should also have some software semaphore
14222 * mechanism to guard FDONE or the cycle in progress bit so that two
14223 * threads access to those bits can be sequentiallized or a way so that
14224 * 2 threads don't start the cycle at the same time
14225 */
14226
14227 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
14228 /*
14229 * There is no cycle running at present, so we can start a
14230 * cycle
14231 */
14232
14233 /* Begin by setting Flash Cycle Done. */
14234 hsfsts |= HSFSTS_DONE;
14235 if (sc->sc_type >= WM_T_PCH_SPT)
14236 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14237 hsfsts & 0xffffUL);
14238 else
14239 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
14240 error = 0;
14241 } else {
14242 /*
14243 * Otherwise poll for sometime so the current cycle has a
14244 * chance to end before giving up.
14245 */
14246 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
14247 if (sc->sc_type >= WM_T_PCH_SPT)
14248 hsfsts = ICH8_FLASH_READ32(sc,
14249 ICH_FLASH_HSFSTS) & 0xffffUL;
14250 else
14251 hsfsts = ICH8_FLASH_READ16(sc,
14252 ICH_FLASH_HSFSTS);
14253 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
14254 error = 0;
14255 break;
14256 }
14257 delay(1);
14258 }
14259 if (error == 0) {
14260 /*
14261 * Successful in waiting for previous cycle to timeout,
14262 * now set the Flash Cycle Done.
14263 */
14264 hsfsts |= HSFSTS_DONE;
14265 if (sc->sc_type >= WM_T_PCH_SPT)
14266 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14267 hsfsts & 0xffffUL);
14268 else
14269 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
14270 hsfsts);
14271 }
14272 }
14273 return error;
14274 }
14275
14276 /******************************************************************************
14277 * This function starts a flash cycle and waits for its completion
14278 *
14279 * sc - The pointer to the hw structure
14280 ****************************************************************************/
14281 static int32_t
14282 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
14283 {
14284 uint16_t hsflctl;
14285 uint16_t hsfsts;
14286 int32_t error = 1;
14287 uint32_t i = 0;
14288
14289 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
14290 if (sc->sc_type >= WM_T_PCH_SPT)
14291 hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
14292 else
14293 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
14294 hsflctl |= HSFCTL_GO;
14295 if (sc->sc_type >= WM_T_PCH_SPT)
14296 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14297 (uint32_t)hsflctl << 16);
14298 else
14299 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
14300
14301 /* Wait till FDONE bit is set to 1 */
14302 do {
14303 if (sc->sc_type >= WM_T_PCH_SPT)
14304 hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
14305 & 0xffffUL;
14306 else
14307 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
14308 if (hsfsts & HSFSTS_DONE)
14309 break;
14310 delay(1);
14311 i++;
14312 } while (i < timeout);
14313 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
14314 error = 0;
14315
14316 return error;
14317 }
14318
14319 /******************************************************************************
14320 * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
14321 *
14322 * sc - The pointer to the hw structure
14323 * index - The index of the byte or word to read.
14324 * size - Size of data to read, 1=byte 2=word, 4=dword
14325 * data - Pointer to the word to store the value read.
14326 *****************************************************************************/
14327 static int32_t
14328 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
14329 uint32_t size, uint32_t *data)
14330 {
14331 uint16_t hsfsts;
14332 uint16_t hsflctl;
14333 uint32_t flash_linear_address;
14334 uint32_t flash_data = 0;
14335 int32_t error = 1;
14336 int32_t count = 0;
14337
14338 if (size < 1 || size > 4 || data == 0x0 ||
14339 index > ICH_FLASH_LINEAR_ADDR_MASK)
14340 return error;
14341
14342 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
14343 sc->sc_ich8_flash_base;
14344
14345 do {
14346 delay(1);
14347 /* Steps */
14348 error = wm_ich8_cycle_init(sc);
14349 if (error)
14350 break;
14351
14352 if (sc->sc_type >= WM_T_PCH_SPT)
14353 hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
14354 >> 16;
14355 else
14356 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
14357 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
14358 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
14359 & HSFCTL_BCOUNT_MASK;
14360 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
14361 if (sc->sc_type >= WM_T_PCH_SPT) {
14362 /*
14363 * In SPT, This register is in Lan memory space, not
14364 * flash. Therefore, only 32 bit access is supported.
14365 */
14366 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14367 (uint32_t)hsflctl << 16);
14368 } else
14369 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
14370
14371 /*
14372 * Write the last 24 bits of index into Flash Linear address
14373 * field in Flash Address
14374 */
14375 /* TODO: TBD maybe check the index against the size of flash */
14376
14377 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
14378
14379 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
14380
14381 /*
14382 * Check if FCERR is set to 1, if set to 1, clear it and try
14383 * the whole sequence a few more times, else read in (shift in)
14384 * the Flash Data0, the order is least significant byte first
14385 * msb to lsb
14386 */
14387 if (error == 0) {
14388 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
14389 if (size == 1)
14390 *data = (uint8_t)(flash_data & 0x000000FF);
14391 else if (size == 2)
14392 *data = (uint16_t)(flash_data & 0x0000FFFF);
14393 else if (size == 4)
14394 *data = (uint32_t)flash_data;
14395 break;
14396 } else {
14397 /*
14398 * If we've gotten here, then things are probably
14399 * completely hosed, but if the error condition is
14400 * detected, it won't hurt to give it another try...
14401 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
14402 */
14403 if (sc->sc_type >= WM_T_PCH_SPT)
14404 hsfsts = ICH8_FLASH_READ32(sc,
14405 ICH_FLASH_HSFSTS) & 0xffffUL;
14406 else
14407 hsfsts = ICH8_FLASH_READ16(sc,
14408 ICH_FLASH_HSFSTS);
14409
14410 if (hsfsts & HSFSTS_ERR) {
14411 /* Repeat for some time before giving up. */
14412 continue;
14413 } else if ((hsfsts & HSFSTS_DONE) == 0)
14414 break;
14415 }
14416 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
14417
14418 return error;
14419 }
14420
14421 /******************************************************************************
14422 * Reads a single byte from the NVM using the ICH8 flash access registers.
14423 *
14424 * sc - pointer to wm_hw structure
14425 * index - The index of the byte to read.
14426 * data - Pointer to a byte to store the value read.
14427 *****************************************************************************/
14428 static int32_t
14429 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
14430 {
14431 int32_t status;
14432 uint32_t word = 0;
14433
14434 status = wm_read_ich8_data(sc, index, 1, &word);
14435 if (status == 0)
14436 *data = (uint8_t)word;
14437 else
14438 *data = 0;
14439
14440 return status;
14441 }
14442
14443 /******************************************************************************
14444 * Reads a word from the NVM using the ICH8 flash access registers.
14445 *
14446 * sc - pointer to wm_hw structure
14447 * index - The starting byte index of the word to read.
14448 * data - Pointer to a word to store the value read.
14449 *****************************************************************************/
14450 static int32_t
14451 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
14452 {
14453 int32_t status;
14454 uint32_t word = 0;
14455
14456 status = wm_read_ich8_data(sc, index, 2, &word);
14457 if (status == 0)
14458 *data = (uint16_t)word;
14459 else
14460 *data = 0;
14461
14462 return status;
14463 }
14464
14465 /******************************************************************************
14466 * Reads a dword from the NVM using the ICH8 flash access registers.
14467 *
14468 * sc - pointer to wm_hw structure
14469 * index - The starting byte index of the word to read.
14470 * data - Pointer to a word to store the value read.
14471 *****************************************************************************/
14472 static int32_t
14473 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
14474 {
14475 int32_t status;
14476
14477 status = wm_read_ich8_data(sc, index, 4, data);
14478 return status;
14479 }
14480
14481 /******************************************************************************
14482 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
14483 * register.
14484 *
14485 * sc - Struct containing variables accessed by shared code
14486 * offset - offset of word in the EEPROM to read
14487 * data - word read from the EEPROM
14488 * words - number of words to read
14489 *****************************************************************************/
14490 static int
14491 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
14492 {
14493 int rv;
14494 uint32_t flash_bank = 0;
14495 uint32_t act_offset = 0;
14496 uint32_t bank_offset = 0;
14497 uint16_t word = 0;
14498 uint16_t i = 0;
14499
14500 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14501 device_xname(sc->sc_dev), __func__));
14502
14503 rv = sc->nvm.acquire(sc);
14504 if (rv != 0)
14505 return rv;
14506
14507 /*
14508 * We need to know which is the valid flash bank. In the event
14509 * that we didn't allocate eeprom_shadow_ram, we may not be
14510 * managing flash_bank. So it cannot be trusted and needs
14511 * to be updated with each read.
14512 */
14513 rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
14514 if (rv) {
14515 DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
14516 device_xname(sc->sc_dev)));
14517 flash_bank = 0;
14518 }
14519
14520 /*
14521 * Adjust offset appropriately if we're on bank 1 - adjust for word
14522 * size
14523 */
14524 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
14525
14526 for (i = 0; i < words; i++) {
14527 /* The NVM part needs a byte offset, hence * 2 */
14528 act_offset = bank_offset + ((offset + i) * 2);
14529 rv = wm_read_ich8_word(sc, act_offset, &word);
14530 if (rv) {
14531 aprint_error_dev(sc->sc_dev,
14532 "%s: failed to read NVM\n", __func__);
14533 break;
14534 }
14535 data[i] = word;
14536 }
14537
14538 sc->nvm.release(sc);
14539 return rv;
14540 }
14541
14542 /******************************************************************************
14543 * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
14544 * register.
14545 *
14546 * sc - Struct containing variables accessed by shared code
14547 * offset - offset of word in the EEPROM to read
14548 * data - word read from the EEPROM
14549 * words - number of words to read
14550 *****************************************************************************/
14551 static int
14552 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
14553 {
14554 int rv;
14555 uint32_t flash_bank = 0;
14556 uint32_t act_offset = 0;
14557 uint32_t bank_offset = 0;
14558 uint32_t dword = 0;
14559 uint16_t i = 0;
14560
14561 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14562 device_xname(sc->sc_dev), __func__));
14563
14564 rv = sc->nvm.acquire(sc);
14565 if (rv != 0)
14566 return rv;
14567
14568 /*
14569 * We need to know which is the valid flash bank. In the event
14570 * that we didn't allocate eeprom_shadow_ram, we may not be
14571 * managing flash_bank. So it cannot be trusted and needs
14572 * to be updated with each read.
14573 */
14574 rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
14575 if (rv) {
14576 DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
14577 device_xname(sc->sc_dev)));
14578 flash_bank = 0;
14579 }
14580
14581 /*
14582 * Adjust offset appropriately if we're on bank 1 - adjust for word
14583 * size
14584 */
14585 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
14586
14587 for (i = 0; i < words; i++) {
14588 /* The NVM part needs a byte offset, hence * 2 */
14589 act_offset = bank_offset + ((offset + i) * 2);
14590 /* but we must read dword aligned, so mask ... */
14591 rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
14592 if (rv) {
14593 aprint_error_dev(sc->sc_dev,
14594 "%s: failed to read NVM\n", __func__);
14595 break;
14596 }
14597 /* ... and pick out low or high word */
14598 if ((act_offset & 0x2) == 0)
14599 data[i] = (uint16_t)(dword & 0xFFFF);
14600 else
14601 data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
14602 }
14603
14604 sc->nvm.release(sc);
14605 return rv;
14606 }
14607
14608 /* iNVM */
14609
14610 static int
14611 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
14612 {
14613 int32_t rv = 0;
14614 uint32_t invm_dword;
14615 uint16_t i;
14616 uint8_t record_type, word_address;
14617
14618 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14619 device_xname(sc->sc_dev), __func__));
14620
14621 for (i = 0; i < INVM_SIZE; i++) {
14622 invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
14623 /* Get record type */
14624 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
14625 if (record_type == INVM_UNINITIALIZED_STRUCTURE)
14626 break;
14627 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
14628 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
14629 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
14630 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
14631 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
14632 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
14633 if (word_address == address) {
14634 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
14635 rv = 0;
14636 break;
14637 }
14638 }
14639 }
14640
14641 return rv;
14642 }
14643
14644 static int
14645 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
14646 {
14647 int i, rv;
14648
14649 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14650 device_xname(sc->sc_dev), __func__));
14651
14652 rv = sc->nvm.acquire(sc);
14653 if (rv != 0)
14654 return rv;
14655
14656 for (i = 0; i < words; i++) {
14657 switch (offset + i) {
14658 case NVM_OFF_MACADDR:
14659 case NVM_OFF_MACADDR1:
14660 case NVM_OFF_MACADDR2:
14661 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
14662 if (rv != 0) {
14663 data[i] = 0xffff;
14664 rv = -1;
14665 }
14666 break;
14667 case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
14668 rv = wm_nvm_read_word_invm(sc, offset, data);
14669 if (rv != 0) {
14670 *data = INVM_DEFAULT_AL;
14671 rv = 0;
14672 }
14673 break;
14674 case NVM_OFF_CFG2:
14675 rv = wm_nvm_read_word_invm(sc, offset, data);
14676 if (rv != 0) {
14677 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
14678 rv = 0;
14679 }
14680 break;
14681 case NVM_OFF_CFG4:
14682 rv = wm_nvm_read_word_invm(sc, offset, data);
14683 if (rv != 0) {
14684 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
14685 rv = 0;
14686 }
14687 break;
14688 case NVM_OFF_LED_1_CFG:
14689 rv = wm_nvm_read_word_invm(sc, offset, data);
14690 if (rv != 0) {
14691 *data = NVM_LED_1_CFG_DEFAULT_I211;
14692 rv = 0;
14693 }
14694 break;
14695 case NVM_OFF_LED_0_2_CFG:
14696 rv = wm_nvm_read_word_invm(sc, offset, data);
14697 if (rv != 0) {
14698 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
14699 rv = 0;
14700 }
14701 break;
14702 case NVM_OFF_ID_LED_SETTINGS:
14703 rv = wm_nvm_read_word_invm(sc, offset, data);
14704 if (rv != 0) {
14705 *data = ID_LED_RESERVED_FFFF;
14706 rv = 0;
14707 }
14708 break;
14709 default:
14710 DPRINTF(sc, WM_DEBUG_NVM,
14711 ("NVM word 0x%02x is not mapped.\n", offset));
14712 *data = NVM_RESERVED_WORD;
14713 break;
14714 }
14715 }
14716
14717 sc->nvm.release(sc);
14718 return rv;
14719 }
14720
14721 /* Lock, detecting NVM type, validate checksum, version and read */
14722
14723 static int
14724 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
14725 {
14726 uint32_t eecd = 0;
14727
14728 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
14729 || sc->sc_type == WM_T_82583) {
14730 eecd = CSR_READ(sc, WMREG_EECD);
14731
14732 /* Isolate bits 15 & 16 */
14733 eecd = ((eecd >> 15) & 0x03);
14734
14735 /* If both bits are set, device is Flash type */
14736 if (eecd == 0x03)
14737 return 0;
14738 }
14739 return 1;
14740 }
14741
14742 static int
14743 wm_nvm_flash_presence_i210(struct wm_softc *sc)
14744 {
14745 uint32_t eec;
14746
14747 eec = CSR_READ(sc, WMREG_EEC);
14748 if ((eec & EEC_FLASH_DETECTED) != 0)
14749 return 1;
14750
14751 return 0;
14752 }
14753
14754 /*
14755 * wm_nvm_validate_checksum
14756 *
14757 * The checksum is defined as the sum of the first 64 (16 bit) words.
14758 */
14759 static int
14760 wm_nvm_validate_checksum(struct wm_softc *sc)
14761 {
14762 uint16_t checksum;
14763 uint16_t eeprom_data;
14764 #ifdef WM_DEBUG
14765 uint16_t csum_wordaddr, valid_checksum;
14766 #endif
14767 int i;
14768
14769 checksum = 0;
14770
14771 /* Don't check for I211 */
14772 if (sc->sc_type == WM_T_I211)
14773 return 0;
14774
14775 #ifdef WM_DEBUG
14776 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
14777 || (sc->sc_type == WM_T_PCH_CNP)) {
14778 csum_wordaddr = NVM_OFF_COMPAT;
14779 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
14780 } else {
14781 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
14782 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
14783 }
14784
14785 /* Dump EEPROM image for debug */
14786 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
14787 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
14788 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
14789 /* XXX PCH_SPT? */
14790 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
14791 if ((eeprom_data & valid_checksum) == 0)
14792 DPRINTF(sc, WM_DEBUG_NVM,
14793 ("%s: NVM need to be updated (%04x != %04x)\n",
14794 device_xname(sc->sc_dev), eeprom_data,
14795 valid_checksum));
14796 }
14797
14798 if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
14799 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
14800 for (i = 0; i < NVM_SIZE; i++) {
14801 if (wm_nvm_read(sc, i, 1, &eeprom_data))
14802 printf("XXXX ");
14803 else
14804 printf("%04hx ", eeprom_data);
14805 if (i % 8 == 7)
14806 printf("\n");
14807 }
14808 }
14809
14810 #endif /* WM_DEBUG */
14811
14812 for (i = 0; i < NVM_SIZE; i++) {
14813 if (wm_nvm_read(sc, i, 1, &eeprom_data))
14814 return -1;
14815 checksum += eeprom_data;
14816 }
14817
14818 if (checksum != (uint16_t) NVM_CHECKSUM) {
14819 #ifdef WM_DEBUG
14820 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
14821 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
14822 #endif
14823 }
14824
14825 return 0;
14826 }
14827
14828 static void
14829 wm_nvm_version_invm(struct wm_softc *sc)
14830 {
14831 uint32_t dword;
14832
14833 /*
14834 * Linux's code to decode version is very strange, so we don't
14835 * obey that algorithm and just use word 61 as the document.
14836 * Perhaps it's not perfect though...
14837 *
14838 * Example:
14839 *
14840 * Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
14841 */
14842 dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
14843 dword = __SHIFTOUT(dword, INVM_VER_1);
14844 sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
14845 sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
14846 }
14847
14848 static void
14849 wm_nvm_version(struct wm_softc *sc)
14850 {
14851 uint16_t major, minor, build, patch;
14852 uint16_t uid0, uid1;
14853 uint16_t nvm_data;
14854 uint16_t off;
14855 bool check_version = false;
14856 bool check_optionrom = false;
14857 bool have_build = false;
14858 bool have_uid = true;
14859
14860 /*
14861 * Version format:
14862 *
14863 * XYYZ
14864 * X0YZ
14865 * X0YY
14866 *
14867 * Example:
14868 *
14869 * 82571 0x50a2 5.10.2? (the spec update notes about 5.6-5.10)
14870 * 82571 0x50a6 5.10.6?
14871 * 82572 0x506a 5.6.10?
14872 * 82572EI 0x5069 5.6.9?
14873 * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4)
14874 * 0x2013 2.1.3?
14875 * 82583 0x10a0 1.10.0? (document says it's default value)
14876 * ICH8+82567 0x0040 0.4.0?
14877 * ICH9+82566 0x1040 1.4.0?
14878 *ICH10+82567 0x0043 0.4.3?
14879 * PCH+82577 0x00c1 0.12.1?
14880 * PCH2+82579 0x00d3 0.13.3?
14881 * 0x00d4 0.13.4?
14882 * LPT+I218 0x0023 0.2.3?
14883 * SPT+I219 0x0084 0.8.4?
14884 * CNP+I219 0x0054 0.5.4?
14885 */
14886
14887 /*
14888 * XXX
14889 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
14890 * I've never seen real 82574 hardware with such small SPI ROM.
14891 */
14892 if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
14893 || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
14894 have_uid = false;
14895
14896 switch (sc->sc_type) {
14897 case WM_T_82571:
14898 case WM_T_82572:
14899 case WM_T_82574:
14900 case WM_T_82583:
14901 check_version = true;
14902 check_optionrom = true;
14903 have_build = true;
14904 break;
14905 case WM_T_ICH8:
14906 case WM_T_ICH9:
14907 case WM_T_ICH10:
14908 case WM_T_PCH:
14909 case WM_T_PCH2:
14910 case WM_T_PCH_LPT:
14911 case WM_T_PCH_SPT:
14912 case WM_T_PCH_CNP:
14913 check_version = true;
14914 have_build = true;
14915 have_uid = false;
14916 break;
14917 case WM_T_82575:
14918 case WM_T_82576:
14919 case WM_T_82580:
14920 if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
14921 check_version = true;
14922 break;
14923 case WM_T_I211:
14924 wm_nvm_version_invm(sc);
14925 have_uid = false;
14926 goto printver;
14927 case WM_T_I210:
14928 if (!wm_nvm_flash_presence_i210(sc)) {
14929 wm_nvm_version_invm(sc);
14930 have_uid = false;
14931 goto printver;
14932 }
14933 /* FALLTHROUGH */
14934 case WM_T_I350:
14935 case WM_T_I354:
14936 check_version = true;
14937 check_optionrom = true;
14938 break;
14939 default:
14940 return;
14941 }
14942 if (check_version
14943 && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
14944 major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
14945 if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
14946 minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
14947 build = nvm_data & NVM_BUILD_MASK;
14948 have_build = true;
14949 } else
14950 minor = nvm_data & 0x00ff;
14951
14952 /* Decimal */
14953 minor = (minor / 16) * 10 + (minor % 16);
14954 sc->sc_nvm_ver_major = major;
14955 sc->sc_nvm_ver_minor = minor;
14956
14957 printver:
14958 aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
14959 sc->sc_nvm_ver_minor);
14960 if (have_build) {
14961 sc->sc_nvm_ver_build = build;
14962 aprint_verbose(".%d", build);
14963 }
14964 }
14965
14966 /* Assume the Option ROM area is at avove NVM_SIZE */
14967 if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
14968 && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
14969 /* Option ROM Version */
14970 if ((off != 0x0000) && (off != 0xffff)) {
14971 int rv;
14972
14973 off += NVM_COMBO_VER_OFF;
14974 rv = wm_nvm_read(sc, off + 1, 1, &uid1);
14975 rv |= wm_nvm_read(sc, off, 1, &uid0);
14976 if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
14977 && (uid1 != 0) && (uid1 != 0xffff)) {
14978 /* 16bits */
14979 major = uid0 >> 8;
14980 build = (uid0 << 8) | (uid1 >> 8);
14981 patch = uid1 & 0x00ff;
14982 aprint_verbose(", option ROM Version %d.%d.%d",
14983 major, build, patch);
14984 }
14985 }
14986 }
14987
14988 if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
14989 aprint_verbose(", Image Unique ID %08x",
14990 ((uint32_t)uid1 << 16) | uid0);
14991 }
14992
14993 /*
14994 * wm_nvm_read:
14995 *
14996 * Read data from the serial EEPROM.
14997 */
14998 static int
14999 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
15000 {
15001 int rv;
15002
15003 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
15004 device_xname(sc->sc_dev), __func__));
15005
15006 if (sc->sc_flags & WM_F_EEPROM_INVALID)
15007 return -1;
15008
15009 rv = sc->nvm.read(sc, word, wordcnt, data);
15010
15011 return rv;
15012 }
15013
15014 /*
15015 * Hardware semaphores.
15016 * Very complexed...
15017 */
15018
15019 static int
15020 wm_get_null(struct wm_softc *sc)
15021 {
15022
15023 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15024 device_xname(sc->sc_dev), __func__));
15025 return 0;
15026 }
15027
15028 static void
15029 wm_put_null(struct wm_softc *sc)
15030 {
15031
15032 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15033 device_xname(sc->sc_dev), __func__));
15034 return;
15035 }
15036
15037 static int
15038 wm_get_eecd(struct wm_softc *sc)
15039 {
15040 uint32_t reg;
15041 int x;
15042
15043 DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
15044 device_xname(sc->sc_dev), __func__));
15045
15046 reg = CSR_READ(sc, WMREG_EECD);
15047
15048 /* Request EEPROM access. */
15049 reg |= EECD_EE_REQ;
15050 CSR_WRITE(sc, WMREG_EECD, reg);
15051
15052 /* ..and wait for it to be granted. */
15053 for (x = 0; x < 1000; x++) {
15054 reg = CSR_READ(sc, WMREG_EECD);
15055 if (reg & EECD_EE_GNT)
15056 break;
15057 delay(5);
15058 }
15059 if ((reg & EECD_EE_GNT) == 0) {
15060 aprint_error_dev(sc->sc_dev,
15061 "could not acquire EEPROM GNT\n");
15062 reg &= ~EECD_EE_REQ;
15063 CSR_WRITE(sc, WMREG_EECD, reg);
15064 return -1;
15065 }
15066
15067 return 0;
15068 }
15069
15070 static void
15071 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
15072 {
15073
15074 *eecd |= EECD_SK;
15075 CSR_WRITE(sc, WMREG_EECD, *eecd);
15076 CSR_WRITE_FLUSH(sc);
15077 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
15078 delay(1);
15079 else
15080 delay(50);
15081 }
15082
15083 static void
15084 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
15085 {
15086
15087 *eecd &= ~EECD_SK;
15088 CSR_WRITE(sc, WMREG_EECD, *eecd);
15089 CSR_WRITE_FLUSH(sc);
15090 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
15091 delay(1);
15092 else
15093 delay(50);
15094 }
15095
15096 static void
15097 wm_put_eecd(struct wm_softc *sc)
15098 {
15099 uint32_t reg;
15100
15101 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15102 device_xname(sc->sc_dev), __func__));
15103
15104 /* Stop nvm */
15105 reg = CSR_READ(sc, WMREG_EECD);
15106 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
15107 /* Pull CS high */
15108 reg |= EECD_CS;
15109 wm_nvm_eec_clock_lower(sc, ®);
15110 } else {
15111 /* CS on Microwire is active-high */
15112 reg &= ~(EECD_CS | EECD_DI);
15113 CSR_WRITE(sc, WMREG_EECD, reg);
15114 wm_nvm_eec_clock_raise(sc, ®);
15115 wm_nvm_eec_clock_lower(sc, ®);
15116 }
15117
15118 reg = CSR_READ(sc, WMREG_EECD);
15119 reg &= ~EECD_EE_REQ;
15120 CSR_WRITE(sc, WMREG_EECD, reg);
15121
15122 return;
15123 }
15124
15125 /*
15126 * Get hardware semaphore.
15127 * Same as e1000_get_hw_semaphore_generic()
15128 */
15129 static int
15130 wm_get_swsm_semaphore(struct wm_softc *sc)
15131 {
15132 int32_t timeout;
15133 uint32_t swsm;
15134
15135 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15136 device_xname(sc->sc_dev), __func__));
15137 KASSERT(sc->sc_nvm_wordsize > 0);
15138
15139 retry:
15140 /* Get the SW semaphore. */
15141 timeout = sc->sc_nvm_wordsize + 1;
15142 while (timeout) {
15143 swsm = CSR_READ(sc, WMREG_SWSM);
15144
15145 if ((swsm & SWSM_SMBI) == 0)
15146 break;
15147
15148 delay(50);
15149 timeout--;
15150 }
15151
15152 if (timeout == 0) {
15153 if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
15154 /*
15155 * In rare circumstances, the SW semaphore may already
15156 * be held unintentionally. Clear the semaphore once
15157 * before giving up.
15158 */
15159 sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
15160 wm_put_swsm_semaphore(sc);
15161 goto retry;
15162 }
15163 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
15164 return -1;
15165 }
15166
15167 /* Get the FW semaphore. */
15168 timeout = sc->sc_nvm_wordsize + 1;
15169 while (timeout) {
15170 swsm = CSR_READ(sc, WMREG_SWSM);
15171 swsm |= SWSM_SWESMBI;
15172 CSR_WRITE(sc, WMREG_SWSM, swsm);
15173 /* If we managed to set the bit we got the semaphore. */
15174 swsm = CSR_READ(sc, WMREG_SWSM);
15175 if (swsm & SWSM_SWESMBI)
15176 break;
15177
15178 delay(50);
15179 timeout--;
15180 }
15181
15182 if (timeout == 0) {
15183 aprint_error_dev(sc->sc_dev,
15184 "could not acquire SWSM SWESMBI\n");
15185 /* Release semaphores */
15186 wm_put_swsm_semaphore(sc);
15187 return -1;
15188 }
15189 return 0;
15190 }
15191
15192 /*
15193 * Put hardware semaphore.
15194 * Same as e1000_put_hw_semaphore_generic()
15195 */
15196 static void
15197 wm_put_swsm_semaphore(struct wm_softc *sc)
15198 {
15199 uint32_t swsm;
15200
15201 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15202 device_xname(sc->sc_dev), __func__));
15203
15204 swsm = CSR_READ(sc, WMREG_SWSM);
15205 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
15206 CSR_WRITE(sc, WMREG_SWSM, swsm);
15207 }
15208
15209 /*
15210 * Get SW/FW semaphore.
15211 * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
15212 */
15213 static int
15214 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
15215 {
15216 uint32_t swfw_sync;
15217 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
15218 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
15219 int timeout;
15220
15221 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15222 device_xname(sc->sc_dev), __func__));
15223
15224 if (sc->sc_type == WM_T_80003)
15225 timeout = 50;
15226 else
15227 timeout = 200;
15228
15229 while (timeout) {
15230 if (wm_get_swsm_semaphore(sc)) {
15231 aprint_error_dev(sc->sc_dev,
15232 "%s: failed to get semaphore\n",
15233 __func__);
15234 return -1;
15235 }
15236 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
15237 if ((swfw_sync & (swmask | fwmask)) == 0) {
15238 swfw_sync |= swmask;
15239 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
15240 wm_put_swsm_semaphore(sc);
15241 return 0;
15242 }
15243 wm_put_swsm_semaphore(sc);
15244 delay(5000);
15245 timeout--;
15246 }
15247 device_printf(sc->sc_dev,
15248 "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
15249 mask, swfw_sync);
15250 return -1;
15251 }
15252
15253 static void
15254 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
15255 {
15256 uint32_t swfw_sync;
15257
15258 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15259 device_xname(sc->sc_dev), __func__));
15260
15261 while (wm_get_swsm_semaphore(sc) != 0)
15262 continue;
15263
15264 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
15265 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
15266 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
15267
15268 wm_put_swsm_semaphore(sc);
15269 }
15270
15271 static int
15272 wm_get_nvm_80003(struct wm_softc *sc)
15273 {
15274 int rv;
15275
15276 DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
15277 device_xname(sc->sc_dev), __func__));
15278
15279 if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
15280 aprint_error_dev(sc->sc_dev,
15281 "%s: failed to get semaphore(SWFW)\n", __func__);
15282 return rv;
15283 }
15284
15285 if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15286 && (rv = wm_get_eecd(sc)) != 0) {
15287 aprint_error_dev(sc->sc_dev,
15288 "%s: failed to get semaphore(EECD)\n", __func__);
15289 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
15290 return rv;
15291 }
15292
15293 return 0;
15294 }
15295
15296 static void
15297 wm_put_nvm_80003(struct wm_softc *sc)
15298 {
15299
15300 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15301 device_xname(sc->sc_dev), __func__));
15302
15303 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15304 wm_put_eecd(sc);
15305 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
15306 }
15307
15308 static int
15309 wm_get_nvm_82571(struct wm_softc *sc)
15310 {
15311 int rv;
15312
15313 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15314 device_xname(sc->sc_dev), __func__));
15315
15316 if ((rv = wm_get_swsm_semaphore(sc)) != 0)
15317 return rv;
15318
15319 switch (sc->sc_type) {
15320 case WM_T_82573:
15321 break;
15322 default:
15323 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15324 rv = wm_get_eecd(sc);
15325 break;
15326 }
15327
15328 if (rv != 0) {
15329 aprint_error_dev(sc->sc_dev,
15330 "%s: failed to get semaphore\n",
15331 __func__);
15332 wm_put_swsm_semaphore(sc);
15333 }
15334
15335 return rv;
15336 }
15337
15338 static void
15339 wm_put_nvm_82571(struct wm_softc *sc)
15340 {
15341
15342 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15343 device_xname(sc->sc_dev), __func__));
15344
15345 switch (sc->sc_type) {
15346 case WM_T_82573:
15347 break;
15348 default:
15349 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15350 wm_put_eecd(sc);
15351 break;
15352 }
15353
15354 wm_put_swsm_semaphore(sc);
15355 }
15356
15357 static int
15358 wm_get_phy_82575(struct wm_softc *sc)
15359 {
15360
15361 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15362 device_xname(sc->sc_dev), __func__));
15363 return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
15364 }
15365
15366 static void
15367 wm_put_phy_82575(struct wm_softc *sc)
15368 {
15369
15370 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15371 device_xname(sc->sc_dev), __func__));
15372 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
15373 }
15374
15375 static int
15376 wm_get_swfwhw_semaphore(struct wm_softc *sc)
15377 {
15378 uint32_t ext_ctrl;
15379 int timeout = 200;
15380
15381 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15382 device_xname(sc->sc_dev), __func__));
15383
15384 mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15385 for (timeout = 0; timeout < 200; timeout++) {
15386 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15387 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
15388 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15389
15390 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15391 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
15392 return 0;
15393 delay(5000);
15394 }
15395 device_printf(sc->sc_dev,
15396 "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
15397 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15398 return -1;
15399 }
15400
15401 static void
15402 wm_put_swfwhw_semaphore(struct wm_softc *sc)
15403 {
15404 uint32_t ext_ctrl;
15405
15406 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15407 device_xname(sc->sc_dev), __func__));
15408
15409 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15410 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15411 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15412
15413 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15414 }
15415
15416 static int
15417 wm_get_swflag_ich8lan(struct wm_softc *sc)
15418 {
15419 uint32_t ext_ctrl;
15420 int timeout;
15421
15422 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15423 device_xname(sc->sc_dev), __func__));
15424 mutex_enter(sc->sc_ich_phymtx);
15425 for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
15426 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15427 if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
15428 break;
15429 delay(1000);
15430 }
15431 if (timeout >= WM_PHY_CFG_TIMEOUT) {
15432 device_printf(sc->sc_dev,
15433 "SW has already locked the resource\n");
15434 goto out;
15435 }
15436
15437 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
15438 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15439 for (timeout = 0; timeout < 1000; timeout++) {
15440 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15441 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
15442 break;
15443 delay(1000);
15444 }
15445 if (timeout >= 1000) {
15446 device_printf(sc->sc_dev, "failed to acquire semaphore\n");
15447 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15448 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15449 goto out;
15450 }
15451 return 0;
15452
15453 out:
15454 mutex_exit(sc->sc_ich_phymtx);
15455 return -1;
15456 }
15457
15458 static void
15459 wm_put_swflag_ich8lan(struct wm_softc *sc)
15460 {
15461 uint32_t ext_ctrl;
15462
15463 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15464 device_xname(sc->sc_dev), __func__));
15465 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15466 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
15467 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15468 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15469 } else
15470 device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
15471
15472 mutex_exit(sc->sc_ich_phymtx);
15473 }
15474
15475 static int
15476 wm_get_nvm_ich8lan(struct wm_softc *sc)
15477 {
15478
15479 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15480 device_xname(sc->sc_dev), __func__));
15481 mutex_enter(sc->sc_ich_nvmmtx);
15482
15483 return 0;
15484 }
15485
15486 static void
15487 wm_put_nvm_ich8lan(struct wm_softc *sc)
15488 {
15489
15490 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15491 device_xname(sc->sc_dev), __func__));
15492 mutex_exit(sc->sc_ich_nvmmtx);
15493 }
15494
15495 static int
15496 wm_get_hw_semaphore_82573(struct wm_softc *sc)
15497 {
15498 int i = 0;
15499 uint32_t reg;
15500
15501 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15502 device_xname(sc->sc_dev), __func__));
15503
15504 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15505 do {
15506 CSR_WRITE(sc, WMREG_EXTCNFCTR,
15507 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
15508 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15509 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
15510 break;
15511 delay(2*1000);
15512 i++;
15513 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
15514
15515 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
15516 wm_put_hw_semaphore_82573(sc);
15517 log(LOG_ERR, "%s: Driver can't access the PHY\n",
15518 device_xname(sc->sc_dev));
15519 return -1;
15520 }
15521
15522 return 0;
15523 }
15524
15525 static void
15526 wm_put_hw_semaphore_82573(struct wm_softc *sc)
15527 {
15528 uint32_t reg;
15529
15530 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15531 device_xname(sc->sc_dev), __func__));
15532
15533 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15534 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15535 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
15536 }
15537
15538 /*
15539 * Management mode and power management related subroutines.
15540 * BMC, AMT, suspend/resume and EEE.
15541 */
15542
15543 #ifdef WM_WOL
15544 static int
15545 wm_check_mng_mode(struct wm_softc *sc)
15546 {
15547 int rv;
15548
15549 switch (sc->sc_type) {
15550 case WM_T_ICH8:
15551 case WM_T_ICH9:
15552 case WM_T_ICH10:
15553 case WM_T_PCH:
15554 case WM_T_PCH2:
15555 case WM_T_PCH_LPT:
15556 case WM_T_PCH_SPT:
15557 case WM_T_PCH_CNP:
15558 rv = wm_check_mng_mode_ich8lan(sc);
15559 break;
15560 case WM_T_82574:
15561 case WM_T_82583:
15562 rv = wm_check_mng_mode_82574(sc);
15563 break;
15564 case WM_T_82571:
15565 case WM_T_82572:
15566 case WM_T_82573:
15567 case WM_T_80003:
15568 rv = wm_check_mng_mode_generic(sc);
15569 break;
15570 default:
15571 /* Noting to do */
15572 rv = 0;
15573 break;
15574 }
15575
15576 return rv;
15577 }
15578
15579 static int
15580 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
15581 {
15582 uint32_t fwsm;
15583
15584 fwsm = CSR_READ(sc, WMREG_FWSM);
15585
15586 if (((fwsm & FWSM_FW_VALID) != 0)
15587 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
15588 return 1;
15589
15590 return 0;
15591 }
15592
15593 static int
15594 wm_check_mng_mode_82574(struct wm_softc *sc)
15595 {
15596 uint16_t data;
15597
15598 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
15599
15600 if ((data & NVM_CFG2_MNGM_MASK) != 0)
15601 return 1;
15602
15603 return 0;
15604 }
15605
15606 static int
15607 wm_check_mng_mode_generic(struct wm_softc *sc)
15608 {
15609 uint32_t fwsm;
15610
15611 fwsm = CSR_READ(sc, WMREG_FWSM);
15612
15613 if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
15614 return 1;
15615
15616 return 0;
15617 }
15618 #endif /* WM_WOL */
15619
15620 static int
15621 wm_enable_mng_pass_thru(struct wm_softc *sc)
15622 {
15623 uint32_t manc, fwsm, factps;
15624
15625 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
15626 return 0;
15627
15628 manc = CSR_READ(sc, WMREG_MANC);
15629
15630 DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
15631 device_xname(sc->sc_dev), manc));
15632 if ((manc & MANC_RECV_TCO_EN) == 0)
15633 return 0;
15634
15635 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
15636 fwsm = CSR_READ(sc, WMREG_FWSM);
15637 factps = CSR_READ(sc, WMREG_FACTPS);
15638 if (((factps & FACTPS_MNGCG) == 0)
15639 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
15640 return 1;
15641 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
15642 uint16_t data;
15643
15644 factps = CSR_READ(sc, WMREG_FACTPS);
15645 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
15646 DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
15647 device_xname(sc->sc_dev), factps, data));
15648 if (((factps & FACTPS_MNGCG) == 0)
15649 && ((data & NVM_CFG2_MNGM_MASK)
15650 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
15651 return 1;
15652 } else if (((manc & MANC_SMBUS_EN) != 0)
15653 && ((manc & MANC_ASF_EN) == 0))
15654 return 1;
15655
15656 return 0;
15657 }
15658
15659 static bool
15660 wm_phy_resetisblocked(struct wm_softc *sc)
15661 {
15662 bool blocked = false;
15663 uint32_t reg;
15664 int i = 0;
15665
15666 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15667 device_xname(sc->sc_dev), __func__));
15668
15669 switch (sc->sc_type) {
15670 case WM_T_ICH8:
15671 case WM_T_ICH9:
15672 case WM_T_ICH10:
15673 case WM_T_PCH:
15674 case WM_T_PCH2:
15675 case WM_T_PCH_LPT:
15676 case WM_T_PCH_SPT:
15677 case WM_T_PCH_CNP:
15678 do {
15679 reg = CSR_READ(sc, WMREG_FWSM);
15680 if ((reg & FWSM_RSPCIPHY) == 0) {
15681 blocked = true;
15682 delay(10*1000);
15683 continue;
15684 }
15685 blocked = false;
15686 } while (blocked && (i++ < 30));
15687 return blocked;
15688 break;
15689 case WM_T_82571:
15690 case WM_T_82572:
15691 case WM_T_82573:
15692 case WM_T_82574:
15693 case WM_T_82583:
15694 case WM_T_80003:
15695 reg = CSR_READ(sc, WMREG_MANC);
15696 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
15697 return true;
15698 else
15699 return false;
15700 break;
15701 default:
15702 /* No problem */
15703 break;
15704 }
15705
15706 return false;
15707 }
15708
15709 static void
15710 wm_get_hw_control(struct wm_softc *sc)
15711 {
15712 uint32_t reg;
15713
15714 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15715 device_xname(sc->sc_dev), __func__));
15716
15717 if (sc->sc_type == WM_T_82573) {
15718 reg = CSR_READ(sc, WMREG_SWSM);
15719 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
15720 } else if (sc->sc_type >= WM_T_82571) {
15721 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15722 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
15723 }
15724 }
15725
15726 static void
15727 wm_release_hw_control(struct wm_softc *sc)
15728 {
15729 uint32_t reg;
15730
15731 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15732 device_xname(sc->sc_dev), __func__));
15733
15734 if (sc->sc_type == WM_T_82573) {
15735 reg = CSR_READ(sc, WMREG_SWSM);
15736 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
15737 } else if (sc->sc_type >= WM_T_82571) {
15738 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15739 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
15740 }
15741 }
15742
15743 static void
15744 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
15745 {
15746 uint32_t reg;
15747
15748 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15749 device_xname(sc->sc_dev), __func__));
15750
15751 if (sc->sc_type < WM_T_PCH2)
15752 return;
15753
15754 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15755
15756 if (gate)
15757 reg |= EXTCNFCTR_GATE_PHY_CFG;
15758 else
15759 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
15760
15761 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
15762 }
15763
15764 static int
15765 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
15766 {
15767 uint32_t fwsm, reg;
15768 int rv;
15769
15770 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15771 device_xname(sc->sc_dev), __func__));
15772
15773 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
15774 wm_gate_hw_phy_config_ich8lan(sc, true);
15775
15776 /* Disable ULP */
15777 wm_ulp_disable(sc);
15778
15779 /* Acquire PHY semaphore */
15780 rv = sc->phy.acquire(sc);
15781 if (rv != 0) {
15782 DPRINTF(sc, WM_DEBUG_INIT,
15783 ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
15784 return rv;
15785 }
15786
15787 /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
15788 * inaccessible and resetting the PHY is not blocked, toggle the
15789 * LANPHYPC Value bit to force the interconnect to PCIe mode.
15790 */
15791 fwsm = CSR_READ(sc, WMREG_FWSM);
15792 switch (sc->sc_type) {
15793 case WM_T_PCH_LPT:
15794 case WM_T_PCH_SPT:
15795 case WM_T_PCH_CNP:
15796 if (wm_phy_is_accessible_pchlan(sc))
15797 break;
15798
15799 /* Before toggling LANPHYPC, see if PHY is accessible by
15800 * forcing MAC to SMBus mode first.
15801 */
15802 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15803 reg |= CTRL_EXT_FORCE_SMBUS;
15804 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15805 #if 0
15806 /* XXX Isn't this required??? */
15807 CSR_WRITE_FLUSH(sc);
15808 #endif
15809 /* Wait 50 milliseconds for MAC to finish any retries
15810 * that it might be trying to perform from previous
15811 * attempts to acknowledge any phy read requests.
15812 */
15813 delay(50 * 1000);
15814 /* FALLTHROUGH */
15815 case WM_T_PCH2:
15816 if (wm_phy_is_accessible_pchlan(sc) == true)
15817 break;
15818 /* FALLTHROUGH */
15819 case WM_T_PCH:
15820 if (sc->sc_type == WM_T_PCH)
15821 if ((fwsm & FWSM_FW_VALID) != 0)
15822 break;
15823
15824 if (wm_phy_resetisblocked(sc) == true) {
15825 device_printf(sc->sc_dev, "XXX reset is blocked(2)\n");
15826 break;
15827 }
15828
15829 /* Toggle LANPHYPC Value bit */
15830 wm_toggle_lanphypc_pch_lpt(sc);
15831
15832 if (sc->sc_type >= WM_T_PCH_LPT) {
15833 if (wm_phy_is_accessible_pchlan(sc) == true)
15834 break;
15835
15836 /* Toggling LANPHYPC brings the PHY out of SMBus mode
15837 * so ensure that the MAC is also out of SMBus mode
15838 */
15839 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15840 reg &= ~CTRL_EXT_FORCE_SMBUS;
15841 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15842
15843 if (wm_phy_is_accessible_pchlan(sc) == true)
15844 break;
15845 rv = -1;
15846 }
15847 break;
15848 default:
15849 break;
15850 }
15851
15852 /* Release semaphore */
15853 sc->phy.release(sc);
15854
15855 if (rv == 0) {
15856 /* Check to see if able to reset PHY. Print error if not */
15857 if (wm_phy_resetisblocked(sc)) {
15858 device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
15859 goto out;
15860 }
15861
15862 /* Reset the PHY before any access to it. Doing so, ensures
15863 * that the PHY is in a known good state before we read/write
15864 * PHY registers. The generic reset is sufficient here,
15865 * because we haven't determined the PHY type yet.
15866 */
15867 if (wm_reset_phy(sc) != 0)
15868 goto out;
15869
15870 /* On a successful reset, possibly need to wait for the PHY
15871 * to quiesce to an accessible state before returning control
15872 * to the calling function. If the PHY does not quiesce, then
15873 * return E1000E_BLK_PHY_RESET, as this is the condition that
15874 * the PHY is in.
15875 */
15876 if (wm_phy_resetisblocked(sc))
15877 device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
15878 }
15879
15880 out:
15881 /* Ungate automatic PHY configuration on non-managed 82579 */
15882 if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
15883 delay(10*1000);
15884 wm_gate_hw_phy_config_ich8lan(sc, false);
15885 }
15886
15887 return 0;
15888 }
15889
15890 static void
15891 wm_init_manageability(struct wm_softc *sc)
15892 {
15893
15894 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15895 device_xname(sc->sc_dev), __func__));
15896 KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
15897
15898 if (sc->sc_flags & WM_F_HAS_MANAGE) {
15899 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
15900 uint32_t manc = CSR_READ(sc, WMREG_MANC);
15901
15902 /* Disable hardware interception of ARP */
15903 manc &= ~MANC_ARP_EN;
15904
15905 /* Enable receiving management packets to the host */
15906 if (sc->sc_type >= WM_T_82571) {
15907 manc |= MANC_EN_MNG2HOST;
15908 manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
15909 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
15910 }
15911
15912 CSR_WRITE(sc, WMREG_MANC, manc);
15913 }
15914 }
15915
15916 static void
15917 wm_release_manageability(struct wm_softc *sc)
15918 {
15919
15920 if (sc->sc_flags & WM_F_HAS_MANAGE) {
15921 uint32_t manc = CSR_READ(sc, WMREG_MANC);
15922
15923 manc |= MANC_ARP_EN;
15924 if (sc->sc_type >= WM_T_82571)
15925 manc &= ~MANC_EN_MNG2HOST;
15926
15927 CSR_WRITE(sc, WMREG_MANC, manc);
15928 }
15929 }
15930
15931 static void
15932 wm_get_wakeup(struct wm_softc *sc)
15933 {
15934
15935 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
15936 switch (sc->sc_type) {
15937 case WM_T_82573:
15938 case WM_T_82583:
15939 sc->sc_flags |= WM_F_HAS_AMT;
15940 /* FALLTHROUGH */
15941 case WM_T_80003:
15942 case WM_T_82575:
15943 case WM_T_82576:
15944 case WM_T_82580:
15945 case WM_T_I350:
15946 case WM_T_I354:
15947 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
15948 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
15949 /* FALLTHROUGH */
15950 case WM_T_82541:
15951 case WM_T_82541_2:
15952 case WM_T_82547:
15953 case WM_T_82547_2:
15954 case WM_T_82571:
15955 case WM_T_82572:
15956 case WM_T_82574:
15957 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
15958 break;
15959 case WM_T_ICH8:
15960 case WM_T_ICH9:
15961 case WM_T_ICH10:
15962 case WM_T_PCH:
15963 case WM_T_PCH2:
15964 case WM_T_PCH_LPT:
15965 case WM_T_PCH_SPT:
15966 case WM_T_PCH_CNP:
15967 sc->sc_flags |= WM_F_HAS_AMT;
15968 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
15969 break;
15970 default:
15971 break;
15972 }
15973
15974 /* 1: HAS_MANAGE */
15975 if (wm_enable_mng_pass_thru(sc) != 0)
15976 sc->sc_flags |= WM_F_HAS_MANAGE;
15977
15978 /*
15979 * Note that the WOL flags is set after the resetting of the eeprom
15980 * stuff
15981 */
15982 }
15983
15984 /*
15985 * Unconfigure Ultra Low Power mode.
15986 * Only for I217 and newer (see below).
15987 */
15988 static int
15989 wm_ulp_disable(struct wm_softc *sc)
15990 {
15991 uint32_t reg;
15992 uint16_t phyreg;
15993 int i = 0, rv;
15994
15995 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15996 device_xname(sc->sc_dev), __func__));
15997 /* Exclude old devices */
15998 if ((sc->sc_type < WM_T_PCH_LPT)
15999 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
16000 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
16001 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
16002 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
16003 return 0;
16004
16005 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
16006 /* Request ME un-configure ULP mode in the PHY */
16007 reg = CSR_READ(sc, WMREG_H2ME);
16008 reg &= ~H2ME_ULP;
16009 reg |= H2ME_ENFORCE_SETTINGS;
16010 CSR_WRITE(sc, WMREG_H2ME, reg);
16011
16012 /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
16013 while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
16014 if (i++ == 30) {
16015 device_printf(sc->sc_dev, "%s timed out\n",
16016 __func__);
16017 return -1;
16018 }
16019 delay(10 * 1000);
16020 }
16021 reg = CSR_READ(sc, WMREG_H2ME);
16022 reg &= ~H2ME_ENFORCE_SETTINGS;
16023 CSR_WRITE(sc, WMREG_H2ME, reg);
16024
16025 return 0;
16026 }
16027
16028 /* Acquire semaphore */
16029 rv = sc->phy.acquire(sc);
16030 if (rv != 0) {
16031 DPRINTF(sc, WM_DEBUG_INIT,
16032 ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
16033 return rv;
16034 }
16035
16036 /* Toggle LANPHYPC */
16037 wm_toggle_lanphypc_pch_lpt(sc);
16038
16039 /* Unforce SMBus mode in PHY */
16040 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
16041 if (rv != 0) {
16042 uint32_t reg2;
16043
16044 aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
16045 __func__);
16046 reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
16047 reg2 |= CTRL_EXT_FORCE_SMBUS;
16048 CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
16049 delay(50 * 1000);
16050
16051 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
16052 &phyreg);
16053 if (rv != 0)
16054 goto release;
16055 }
16056 phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
16057 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
16058
16059 /* Unforce SMBus mode in MAC */
16060 reg = CSR_READ(sc, WMREG_CTRL_EXT);
16061 reg &= ~CTRL_EXT_FORCE_SMBUS;
16062 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16063
16064 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
16065 if (rv != 0)
16066 goto release;
16067 phyreg |= HV_PM_CTRL_K1_ENA;
16068 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
16069
16070 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
16071 &phyreg);
16072 if (rv != 0)
16073 goto release;
16074 phyreg &= ~(I218_ULP_CONFIG1_IND
16075 | I218_ULP_CONFIG1_STICKY_ULP
16076 | I218_ULP_CONFIG1_RESET_TO_SMBUS
16077 | I218_ULP_CONFIG1_WOL_HOST
16078 | I218_ULP_CONFIG1_INBAND_EXIT
16079 | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
16080 | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
16081 | I218_ULP_CONFIG1_DIS_SMB_PERST);
16082 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
16083 phyreg |= I218_ULP_CONFIG1_START;
16084 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
16085
16086 reg = CSR_READ(sc, WMREG_FEXTNVM7);
16087 reg &= ~FEXTNVM7_DIS_SMB_PERST;
16088 CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
16089
16090 release:
16091 /* Release semaphore */
16092 sc->phy.release(sc);
16093 wm_gmii_reset(sc);
16094 delay(50 * 1000);
16095
16096 return rv;
16097 }
16098
16099 /* WOL in the newer chipset interfaces (pchlan) */
16100 static int
16101 wm_enable_phy_wakeup(struct wm_softc *sc)
16102 {
16103 device_t dev = sc->sc_dev;
16104 uint32_t mreg, moff;
16105 uint16_t wuce, wuc, wufc, preg;
16106 int i, rv;
16107
16108 KASSERT(sc->sc_type >= WM_T_PCH);
16109
16110 /* Copy MAC RARs to PHY RARs */
16111 wm_copy_rx_addrs_to_phy_ich8lan(sc);
16112
16113 /* Activate PHY wakeup */
16114 rv = sc->phy.acquire(sc);
16115 if (rv != 0) {
16116 device_printf(dev, "%s: failed to acquire semaphore\n",
16117 __func__);
16118 return rv;
16119 }
16120
16121 /*
16122 * Enable access to PHY wakeup registers.
16123 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
16124 */
16125 rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
16126 if (rv != 0) {
16127 device_printf(dev,
16128 "%s: Could not enable PHY wakeup reg access\n", __func__);
16129 goto release;
16130 }
16131
16132 /* Copy MAC MTA to PHY MTA */
16133 for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
16134 uint16_t lo, hi;
16135
16136 mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
16137 lo = (uint16_t)(mreg & 0xffff);
16138 hi = (uint16_t)((mreg >> 16) & 0xffff);
16139 wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
16140 wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
16141 }
16142
16143 /* Configure PHY Rx Control register */
16144 wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
16145 mreg = CSR_READ(sc, WMREG_RCTL);
16146 if (mreg & RCTL_UPE)
16147 preg |= BM_RCTL_UPE;
16148 if (mreg & RCTL_MPE)
16149 preg |= BM_RCTL_MPE;
16150 preg &= ~(BM_RCTL_MO_MASK);
16151 moff = __SHIFTOUT(mreg, RCTL_MO);
16152 if (moff != 0)
16153 preg |= moff << BM_RCTL_MO_SHIFT;
16154 if (mreg & RCTL_BAM)
16155 preg |= BM_RCTL_BAM;
16156 if (mreg & RCTL_PMCF)
16157 preg |= BM_RCTL_PMCF;
16158 mreg = CSR_READ(sc, WMREG_CTRL);
16159 if (mreg & CTRL_RFCE)
16160 preg |= BM_RCTL_RFCE;
16161 wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
16162
16163 wuc = WUC_APME | WUC_PME_EN;
16164 wufc = WUFC_MAG;
16165 /* Enable PHY wakeup in MAC register */
16166 CSR_WRITE(sc, WMREG_WUC,
16167 WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
16168 CSR_WRITE(sc, WMREG_WUFC, wufc);
16169
16170 /* Configure and enable PHY wakeup in PHY registers */
16171 wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
16172 wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
16173
16174 wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
16175 wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
16176
16177 release:
16178 sc->phy.release(sc);
16179
16180 return 0;
16181 }
16182
16183 /* Power down workaround on D3 */
16184 static void
16185 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
16186 {
16187 uint32_t reg;
16188 uint16_t phyreg;
16189 int i;
16190
16191 for (i = 0; i < 2; i++) {
16192 /* Disable link */
16193 reg = CSR_READ(sc, WMREG_PHY_CTRL);
16194 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
16195 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
16196
16197 /*
16198 * Call gig speed drop workaround on Gig disable before
16199 * accessing any PHY registers
16200 */
16201 if (sc->sc_type == WM_T_ICH8)
16202 wm_gig_downshift_workaround_ich8lan(sc);
16203
16204 /* Write VR power-down enable */
16205 sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
16206 phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
16207 phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
16208 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
16209
16210 /* Read it back and test */
16211 sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
16212 phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
16213 if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
16214 break;
16215
16216 /* Issue PHY reset and repeat at most one more time */
16217 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
16218 }
16219 }
16220
16221 /*
16222 * wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
16223 * @sc: pointer to the HW structure
16224 *
16225 * During S0 to Sx transition, it is possible the link remains at gig
16226 * instead of negotiating to a lower speed. Before going to Sx, set
16227 * 'Gig Disable' to force link speed negotiation to a lower speed based on
16228 * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
16229 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
16230 * needs to be written.
16231 * Parts that support (and are linked to a partner which support) EEE in
16232 * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
16233 * than 10Mbps w/o EEE.
16234 */
16235 static void
16236 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
16237 {
16238 device_t dev = sc->sc_dev;
16239 struct ethercom *ec = &sc->sc_ethercom;
16240 uint32_t phy_ctrl;
16241 int rv;
16242
16243 phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
16244 phy_ctrl |= PHY_CTRL_GBE_DIS;
16245
16246 KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
16247
16248 if (sc->sc_phytype == WMPHY_I217) {
16249 uint16_t devid = sc->sc_pcidevid;
16250
16251 if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
16252 (devid == PCI_PRODUCT_INTEL_I218_V) ||
16253 (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
16254 (devid == PCI_PRODUCT_INTEL_I218_V3) ||
16255 (sc->sc_type >= WM_T_PCH_SPT))
16256 CSR_WRITE(sc, WMREG_FEXTNVM6,
16257 CSR_READ(sc, WMREG_FEXTNVM6)
16258 & ~FEXTNVM6_REQ_PLL_CLK);
16259
16260 if (sc->phy.acquire(sc) != 0)
16261 goto out;
16262
16263 if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
16264 uint16_t eee_advert;
16265
16266 rv = wm_read_emi_reg_locked(dev,
16267 I217_EEE_ADVERTISEMENT, &eee_advert);
16268 if (rv)
16269 goto release;
16270
16271 /*
16272 * Disable LPLU if both link partners support 100BaseT
16273 * EEE and 100Full is advertised on both ends of the
16274 * link, and enable Auto Enable LPI since there will
16275 * be no driver to enable LPI while in Sx.
16276 */
16277 if ((eee_advert & AN_EEEADVERT_100_TX) &&
16278 (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
16279 uint16_t anar, phy_reg;
16280
16281 sc->phy.readreg_locked(dev, 2, MII_ANAR,
16282 &anar);
16283 if (anar & ANAR_TX_FD) {
16284 phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
16285 PHY_CTRL_NOND0A_LPLU);
16286
16287 /* Set Auto Enable LPI after link up */
16288 sc->phy.readreg_locked(dev, 2,
16289 I217_LPI_GPIO_CTRL, &phy_reg);
16290 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
16291 sc->phy.writereg_locked(dev, 2,
16292 I217_LPI_GPIO_CTRL, phy_reg);
16293 }
16294 }
16295 }
16296
16297 /*
16298 * For i217 Intel Rapid Start Technology support,
16299 * when the system is going into Sx and no manageability engine
16300 * is present, the driver must configure proxy to reset only on
16301 * power good. LPI (Low Power Idle) state must also reset only
16302 * on power good, as well as the MTA (Multicast table array).
16303 * The SMBus release must also be disabled on LCD reset.
16304 */
16305
16306 /*
16307 * Enable MTA to reset for Intel Rapid Start Technology
16308 * Support
16309 */
16310
16311 release:
16312 sc->phy.release(sc);
16313 }
16314 out:
16315 CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
16316
16317 if (sc->sc_type == WM_T_ICH8)
16318 wm_gig_downshift_workaround_ich8lan(sc);
16319
16320 if (sc->sc_type >= WM_T_PCH) {
16321 wm_oem_bits_config_ich8lan(sc, false);
16322
16323 /* Reset PHY to activate OEM bits on 82577/8 */
16324 if (sc->sc_type == WM_T_PCH)
16325 wm_reset_phy(sc);
16326
16327 if (sc->phy.acquire(sc) != 0)
16328 return;
16329 wm_write_smbus_addr(sc);
16330 sc->phy.release(sc);
16331 }
16332 }
16333
16334 /*
16335 * wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
16336 * @sc: pointer to the HW structure
16337 *
16338 * During Sx to S0 transitions on non-managed devices or managed devices
16339 * on which PHY resets are not blocked, if the PHY registers cannot be
16340 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
16341 * the PHY.
16342 * On i217, setup Intel Rapid Start Technology.
16343 */
16344 static int
16345 wm_resume_workarounds_pchlan(struct wm_softc *sc)
16346 {
16347 device_t dev = sc->sc_dev;
16348 int rv;
16349
16350 if (sc->sc_type < WM_T_PCH2)
16351 return 0;
16352
16353 rv = wm_init_phy_workarounds_pchlan(sc);
16354 if (rv != 0)
16355 return rv;
16356
16357 /* For i217 Intel Rapid Start Technology support when the system
16358 * is transitioning from Sx and no manageability engine is present
16359 * configure SMBus to restore on reset, disable proxy, and enable
16360 * the reset on MTA (Multicast table array).
16361 */
16362 if (sc->sc_phytype == WMPHY_I217) {
16363 uint16_t phy_reg;
16364
16365 rv = sc->phy.acquire(sc);
16366 if (rv != 0)
16367 return rv;
16368
16369 /* Clear Auto Enable LPI after link up */
16370 sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
16371 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
16372 sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
16373
16374 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
16375 /* Restore clear on SMB if no manageability engine
16376 * is present
16377 */
16378 rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
16379 &phy_reg);
16380 if (rv != 0)
16381 goto release;
16382 phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
16383 sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
16384
16385 /* Disable Proxy */
16386 sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
16387 }
16388 /* Enable reset on MTA */
16389 sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
16390 if (rv != 0)
16391 goto release;
16392 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
16393 sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
16394
16395 release:
16396 sc->phy.release(sc);
16397 return rv;
16398 }
16399
16400 return 0;
16401 }
16402
16403 static void
16404 wm_enable_wakeup(struct wm_softc *sc)
16405 {
16406 uint32_t reg, pmreg;
16407 pcireg_t pmode;
16408 int rv = 0;
16409
16410 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16411 device_xname(sc->sc_dev), __func__));
16412
16413 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
16414 &pmreg, NULL) == 0)
16415 return;
16416
16417 if ((sc->sc_flags & WM_F_WOL) == 0)
16418 goto pme;
16419
16420 /* Advertise the wakeup capability */
16421 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
16422 | CTRL_SWDPIN(3));
16423
16424 /* Keep the laser running on fiber adapters */
16425 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
16426 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
16427 reg = CSR_READ(sc, WMREG_CTRL_EXT);
16428 reg |= CTRL_EXT_SWDPIN(3);
16429 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16430 }
16431
16432 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
16433 (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
16434 (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
16435 (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
16436 wm_suspend_workarounds_ich8lan(sc);
16437
16438 #if 0 /* For the multicast packet */
16439 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
16440 reg |= WUFC_MC;
16441 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
16442 #endif
16443
16444 if (sc->sc_type >= WM_T_PCH) {
16445 rv = wm_enable_phy_wakeup(sc);
16446 if (rv != 0)
16447 goto pme;
16448 } else {
16449 /* Enable wakeup by the MAC */
16450 CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
16451 CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
16452 }
16453
16454 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
16455 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
16456 || (sc->sc_type == WM_T_PCH2))
16457 && (sc->sc_phytype == WMPHY_IGP_3))
16458 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
16459
16460 pme:
16461 /* Request PME */
16462 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
16463 pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
16464 if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
16465 /* For WOL */
16466 pmode |= PCI_PMCSR_PME_EN;
16467 } else {
16468 /* Disable WOL */
16469 pmode &= ~PCI_PMCSR_PME_EN;
16470 }
16471 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
16472 }
16473
16474 /* Disable ASPM L0s and/or L1 for workaround */
16475 static void
16476 wm_disable_aspm(struct wm_softc *sc)
16477 {
16478 pcireg_t reg, mask = 0;
16479 unsigned const char *str = "";
16480
16481 /*
16482 * Only for PCIe device which has PCIe capability in the PCI config
16483 * space.
16484 */
16485 if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
16486 return;
16487
16488 switch (sc->sc_type) {
16489 case WM_T_82571:
16490 case WM_T_82572:
16491 /*
16492 * 8257[12] Errata 13: Device Does Not Support PCIe Active
16493 * State Power management L1 State (ASPM L1).
16494 */
16495 mask = PCIE_LCSR_ASPM_L1;
16496 str = "L1 is";
16497 break;
16498 case WM_T_82573:
16499 case WM_T_82574:
16500 case WM_T_82583:
16501 /*
16502 * The 82573 disappears when PCIe ASPM L0s is enabled.
16503 *
16504 * The 82574 and 82583 does not support PCIe ASPM L0s with
16505 * some chipset. The document of 82574 and 82583 says that
16506 * disabling L0s with some specific chipset is sufficient,
16507 * but we follow as of the Intel em driver does.
16508 *
16509 * References:
16510 * Errata 8 of the Specification Update of i82573.
16511 * Errata 20 of the Specification Update of i82574.
16512 * Errata 9 of the Specification Update of i82583.
16513 */
16514 mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
16515 str = "L0s and L1 are";
16516 break;
16517 default:
16518 return;
16519 }
16520
16521 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
16522 sc->sc_pcixe_capoff + PCIE_LCSR);
16523 reg &= ~mask;
16524 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
16525 sc->sc_pcixe_capoff + PCIE_LCSR, reg);
16526
16527 /* Print only in wm_attach() */
16528 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
16529 aprint_verbose_dev(sc->sc_dev,
16530 "ASPM %s disabled to workaround the errata.\n", str);
16531 }
16532
16533 /* LPLU */
16534
16535 static void
16536 wm_lplu_d0_disable(struct wm_softc *sc)
16537 {
16538 struct mii_data *mii = &sc->sc_mii;
16539 uint32_t reg;
16540 uint16_t phyval;
16541
16542 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16543 device_xname(sc->sc_dev), __func__));
16544
16545 if (sc->sc_phytype == WMPHY_IFE)
16546 return;
16547
16548 switch (sc->sc_type) {
16549 case WM_T_82571:
16550 case WM_T_82572:
16551 case WM_T_82573:
16552 case WM_T_82575:
16553 case WM_T_82576:
16554 mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
16555 phyval &= ~PMR_D0_LPLU;
16556 mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
16557 break;
16558 case WM_T_82580:
16559 case WM_T_I350:
16560 case WM_T_I210:
16561 case WM_T_I211:
16562 reg = CSR_READ(sc, WMREG_PHPM);
16563 reg &= ~PHPM_D0A_LPLU;
16564 CSR_WRITE(sc, WMREG_PHPM, reg);
16565 break;
16566 case WM_T_82574:
16567 case WM_T_82583:
16568 case WM_T_ICH8:
16569 case WM_T_ICH9:
16570 case WM_T_ICH10:
16571 reg = CSR_READ(sc, WMREG_PHY_CTRL);
16572 reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
16573 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
16574 CSR_WRITE_FLUSH(sc);
16575 break;
16576 case WM_T_PCH:
16577 case WM_T_PCH2:
16578 case WM_T_PCH_LPT:
16579 case WM_T_PCH_SPT:
16580 case WM_T_PCH_CNP:
16581 wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
16582 phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
16583 if (wm_phy_resetisblocked(sc) == false)
16584 phyval |= HV_OEM_BITS_ANEGNOW;
16585 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
16586 break;
16587 default:
16588 break;
16589 }
16590 }
16591
16592 /* EEE */
16593
16594 static int
16595 wm_set_eee_i350(struct wm_softc *sc)
16596 {
16597 struct ethercom *ec = &sc->sc_ethercom;
16598 uint32_t ipcnfg, eeer;
16599 uint32_t ipcnfg_mask
16600 = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
16601 uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
16602
16603 KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
16604
16605 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
16606 eeer = CSR_READ(sc, WMREG_EEER);
16607
16608 /* Enable or disable per user setting */
16609 if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
16610 ipcnfg |= ipcnfg_mask;
16611 eeer |= eeer_mask;
16612 } else {
16613 ipcnfg &= ~ipcnfg_mask;
16614 eeer &= ~eeer_mask;
16615 }
16616
16617 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
16618 CSR_WRITE(sc, WMREG_EEER, eeer);
16619 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
16620 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
16621
16622 return 0;
16623 }
16624
16625 static int
16626 wm_set_eee_pchlan(struct wm_softc *sc)
16627 {
16628 device_t dev = sc->sc_dev;
16629 struct ethercom *ec = &sc->sc_ethercom;
16630 uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
16631 int rv;
16632
16633 switch (sc->sc_phytype) {
16634 case WMPHY_82579:
16635 lpa = I82579_EEE_LP_ABILITY;
16636 pcs_status = I82579_EEE_PCS_STATUS;
16637 adv_addr = I82579_EEE_ADVERTISEMENT;
16638 break;
16639 case WMPHY_I217:
16640 lpa = I217_EEE_LP_ABILITY;
16641 pcs_status = I217_EEE_PCS_STATUS;
16642 adv_addr = I217_EEE_ADVERTISEMENT;
16643 break;
16644 default:
16645 return 0;
16646 }
16647
16648 rv = sc->phy.acquire(sc);
16649 if (rv != 0) {
16650 device_printf(dev, "%s: failed to get semaphore\n", __func__);
16651 return rv;
16652 }
16653
16654 rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
16655 if (rv != 0)
16656 goto release;
16657
16658 /* Clear bits that enable EEE in various speeds */
16659 lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
16660
16661 if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
16662 /* Save off link partner's EEE ability */
16663 rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
16664 if (rv != 0)
16665 goto release;
16666
16667 /* Read EEE advertisement */
16668 if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
16669 goto release;
16670
16671 /*
16672 * Enable EEE only for speeds in which the link partner is
16673 * EEE capable and for which we advertise EEE.
16674 */
16675 if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
16676 lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
16677 if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
16678 sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
16679 if ((data & ANLPAR_TX_FD) != 0)
16680 lpi_ctrl |= I82579_LPI_CTRL_EN_100;
16681 else {
16682 /*
16683 * EEE is not supported in 100Half, so ignore
16684 * partner's EEE in 100 ability if full-duplex
16685 * is not advertised.
16686 */
16687 sc->eee_lp_ability
16688 &= ~AN_EEEADVERT_100_TX;
16689 }
16690 }
16691 }
16692
16693 if (sc->sc_phytype == WMPHY_82579) {
16694 rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
16695 if (rv != 0)
16696 goto release;
16697
16698 data &= ~I82579_LPI_PLL_SHUT_100;
16699 rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
16700 }
16701
16702 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
16703 if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
16704 goto release;
16705
16706 rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
16707 release:
16708 sc->phy.release(sc);
16709
16710 return rv;
16711 }
16712
16713 static int
16714 wm_set_eee(struct wm_softc *sc)
16715 {
16716 struct ethercom *ec = &sc->sc_ethercom;
16717
16718 if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
16719 return 0;
16720
16721 if (sc->sc_type == WM_T_I354) {
16722 /* I354 uses an external PHY */
16723 return 0; /* not yet */
16724 } else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
16725 return wm_set_eee_i350(sc);
16726 else if (sc->sc_type >= WM_T_PCH2)
16727 return wm_set_eee_pchlan(sc);
16728
16729 return 0;
16730 }
16731
16732 /*
16733 * Workarounds (mainly PHY related).
16734 * Basically, PHY's workarounds are in the PHY drivers.
16735 */
16736
16737 /* Workaround for 82566 Kumeran PCS lock loss */
16738 static int
16739 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
16740 {
16741 struct mii_data *mii = &sc->sc_mii;
16742 uint32_t status = CSR_READ(sc, WMREG_STATUS);
16743 int i, reg, rv;
16744 uint16_t phyreg;
16745
16746 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16747 device_xname(sc->sc_dev), __func__));
16748
16749 /* If the link is not up, do nothing */
16750 if ((status & STATUS_LU) == 0)
16751 return 0;
16752
16753 /* Nothing to do if the link is other than 1Gbps */
16754 if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
16755 return 0;
16756
16757 for (i = 0; i < 10; i++) {
16758 /* read twice */
16759 rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
16760 if (rv != 0)
16761 return rv;
16762 rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
16763 if (rv != 0)
16764 return rv;
16765
16766 if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
16767 goto out; /* GOOD! */
16768
16769 /* Reset the PHY */
16770 wm_reset_phy(sc);
16771 delay(5*1000);
16772 }
16773
16774 /* Disable GigE link negotiation */
16775 reg = CSR_READ(sc, WMREG_PHY_CTRL);
16776 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
16777 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
16778
16779 /*
16780 * Call gig speed drop workaround on Gig disable before accessing
16781 * any PHY registers.
16782 */
16783 wm_gig_downshift_workaround_ich8lan(sc);
16784
16785 out:
16786 return 0;
16787 }
16788
16789 /*
16790 * wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
16791 * @sc: pointer to the HW structure
16792 *
16793 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
16794 * LPLU, Gig disable, MDIC PHY reset):
16795 * 1) Set Kumeran Near-end loopback
16796 * 2) Clear Kumeran Near-end loopback
16797 * Should only be called for ICH8[m] devices with any 1G Phy.
16798 */
16799 static void
16800 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
16801 {
16802 uint16_t kmreg;
16803
16804 /* Only for igp3 */
16805 if (sc->sc_phytype == WMPHY_IGP_3) {
16806 if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
16807 return;
16808 kmreg |= KUMCTRLSTA_DIAG_NELPBK;
16809 if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
16810 return;
16811 kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
16812 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
16813 }
16814 }
16815
16816 /*
16817 * Workaround for pch's PHYs
16818 * XXX should be moved to new PHY driver?
16819 */
16820 static int
16821 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
16822 {
16823 device_t dev = sc->sc_dev;
16824 struct mii_data *mii = &sc->sc_mii;
16825 struct mii_softc *child;
16826 uint16_t phy_data, phyrev = 0;
16827 int phytype = sc->sc_phytype;
16828 int rv;
16829
16830 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16831 device_xname(dev), __func__));
16832 KASSERT(sc->sc_type == WM_T_PCH);
16833
16834 /* Set MDIO slow mode before any other MDIO access */
16835 if (phytype == WMPHY_82577)
16836 if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
16837 return rv;
16838
16839 child = LIST_FIRST(&mii->mii_phys);
16840 if (child != NULL)
16841 phyrev = child->mii_mpd_rev;
16842
16843 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
16844 if ((child != NULL) &&
16845 (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
16846 ((phytype == WMPHY_82578) && (phyrev == 1)))) {
16847 /* Disable generation of early preamble (0x4431) */
16848 rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
16849 &phy_data);
16850 if (rv != 0)
16851 return rv;
16852 phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
16853 BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
16854 rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
16855 phy_data);
16856 if (rv != 0)
16857 return rv;
16858
16859 /* Preamble tuning for SSC */
16860 rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
16861 if (rv != 0)
16862 return rv;
16863 }
16864
16865 /* 82578 */
16866 if (phytype == WMPHY_82578) {
16867 /*
16868 * Return registers to default by doing a soft reset then
16869 * writing 0x3140 to the control register
16870 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
16871 */
16872 if ((child != NULL) && (phyrev < 2)) {
16873 PHY_RESET(child);
16874 rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
16875 if (rv != 0)
16876 return rv;
16877 }
16878 }
16879
16880 /* Select page 0 */
16881 if ((rv = sc->phy.acquire(sc)) != 0)
16882 return rv;
16883 rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
16884 sc->phy.release(sc);
16885 if (rv != 0)
16886 return rv;
16887
16888 /*
16889 * Configure the K1 Si workaround during phy reset assuming there is
16890 * link so that it disables K1 if link is in 1Gbps.
16891 */
16892 if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
16893 return rv;
16894
16895 /* Workaround for link disconnects on a busy hub in half duplex */
16896 rv = sc->phy.acquire(sc);
16897 if (rv)
16898 return rv;
16899 rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
16900 if (rv)
16901 goto release;
16902 rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
16903 phy_data & 0x00ff);
16904 if (rv)
16905 goto release;
16906
16907 /* Set MSE higher to enable link to stay up when noise is high */
16908 rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
16909 release:
16910 sc->phy.release(sc);
16911
16912 return rv;
16913 }
16914
16915 /*
16916 * wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
16917 * @sc: pointer to the HW structure
16918 */
16919 static void
16920 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
16921 {
16922
16923 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16924 device_xname(sc->sc_dev), __func__));
16925
16926 if (sc->phy.acquire(sc) != 0)
16927 return;
16928
16929 wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
16930
16931 sc->phy.release(sc);
16932 }
16933
16934 static void
16935 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
16936 {
16937 device_t dev = sc->sc_dev;
16938 uint32_t mac_reg;
16939 uint16_t i, wuce;
16940 int count;
16941
16942 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16943 device_xname(dev), __func__));
16944
16945 if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
16946 return;
16947
16948 /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
16949 count = wm_rar_count(sc);
16950 for (i = 0; i < count; i++) {
16951 uint16_t lo, hi;
16952 mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
16953 lo = (uint16_t)(mac_reg & 0xffff);
16954 hi = (uint16_t)((mac_reg >> 16) & 0xffff);
16955 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
16956 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
16957
16958 mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
16959 lo = (uint16_t)(mac_reg & 0xffff);
16960 hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
16961 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
16962 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
16963 }
16964
16965 wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
16966 }
16967
16968 /*
16969 * wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
16970 * with 82579 PHY
16971 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
16972 */
16973 static int
16974 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
16975 {
16976 device_t dev = sc->sc_dev;
16977 int rar_count;
16978 int rv;
16979 uint32_t mac_reg;
16980 uint16_t dft_ctrl, data;
16981 uint16_t i;
16982
16983 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16984 device_xname(dev), __func__));
16985
16986 if (sc->sc_type < WM_T_PCH2)
16987 return 0;
16988
16989 /* Acquire PHY semaphore */
16990 rv = sc->phy.acquire(sc);
16991 if (rv != 0)
16992 return rv;
16993
16994 /* Disable Rx path while enabling/disabling workaround */
16995 rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
16996 if (rv != 0)
16997 goto out;
16998 rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
16999 dft_ctrl | (1 << 14));
17000 if (rv != 0)
17001 goto out;
17002
17003 if (enable) {
17004 /* Write Rx addresses (rar_entry_count for RAL/H, and
17005 * SHRAL/H) and initial CRC values to the MAC
17006 */
17007 rar_count = wm_rar_count(sc);
17008 for (i = 0; i < rar_count; i++) {
17009 uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
17010 uint32_t addr_high, addr_low;
17011
17012 addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
17013 if (!(addr_high & RAL_AV))
17014 continue;
17015 addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
17016 mac_addr[0] = (addr_low & 0xFF);
17017 mac_addr[1] = ((addr_low >> 8) & 0xFF);
17018 mac_addr[2] = ((addr_low >> 16) & 0xFF);
17019 mac_addr[3] = ((addr_low >> 24) & 0xFF);
17020 mac_addr[4] = (addr_high & 0xFF);
17021 mac_addr[5] = ((addr_high >> 8) & 0xFF);
17022
17023 CSR_WRITE(sc, WMREG_PCH_RAICC(i),
17024 ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
17025 }
17026
17027 /* Write Rx addresses to the PHY */
17028 wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
17029 }
17030
17031 /*
17032 * If enable ==
17033 * true: Enable jumbo frame workaround in the MAC.
17034 * false: Write MAC register values back to h/w defaults.
17035 */
17036 mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
17037 if (enable) {
17038 mac_reg &= ~(1 << 14);
17039 mac_reg |= (7 << 15);
17040 } else
17041 mac_reg &= ~(0xf << 14);
17042 CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
17043
17044 mac_reg = CSR_READ(sc, WMREG_RCTL);
17045 if (enable) {
17046 mac_reg |= RCTL_SECRC;
17047 sc->sc_rctl |= RCTL_SECRC;
17048 sc->sc_flags |= WM_F_CRC_STRIP;
17049 } else {
17050 mac_reg &= ~RCTL_SECRC;
17051 sc->sc_rctl &= ~RCTL_SECRC;
17052 sc->sc_flags &= ~WM_F_CRC_STRIP;
17053 }
17054 CSR_WRITE(sc, WMREG_RCTL, mac_reg);
17055
17056 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
17057 if (rv != 0)
17058 goto out;
17059 if (enable)
17060 data |= 1 << 0;
17061 else
17062 data &= ~(1 << 0);
17063 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
17064 if (rv != 0)
17065 goto out;
17066
17067 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
17068 if (rv != 0)
17069 goto out;
17070 /*
17071 * XXX FreeBSD and Linux do the same thing that they set the same value
17072 * on both the enable case and the disable case. Is it correct?
17073 */
17074 data &= ~(0xf << 8);
17075 data |= (0xb << 8);
17076 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
17077 if (rv != 0)
17078 goto out;
17079
17080 /*
17081 * If enable ==
17082 * true: Enable jumbo frame workaround in the PHY.
17083 * false: Write PHY register values back to h/w defaults.
17084 */
17085 rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
17086 if (rv != 0)
17087 goto out;
17088 data &= ~(0x7F << 5);
17089 if (enable)
17090 data |= (0x37 << 5);
17091 rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
17092 if (rv != 0)
17093 goto out;
17094
17095 rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
17096 if (rv != 0)
17097 goto out;
17098 if (enable)
17099 data &= ~(1 << 13);
17100 else
17101 data |= (1 << 13);
17102 rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
17103 if (rv != 0)
17104 goto out;
17105
17106 rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
17107 if (rv != 0)
17108 goto out;
17109 data &= ~(0x3FF << 2);
17110 if (enable)
17111 data |= (I82579_TX_PTR_GAP << 2);
17112 else
17113 data |= (0x8 << 2);
17114 rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
17115 if (rv != 0)
17116 goto out;
17117
17118 rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
17119 enable ? 0xf100 : 0x7e00);
17120 if (rv != 0)
17121 goto out;
17122
17123 rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
17124 if (rv != 0)
17125 goto out;
17126 if (enable)
17127 data |= 1 << 10;
17128 else
17129 data &= ~(1 << 10);
17130 rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
17131 if (rv != 0)
17132 goto out;
17133
17134 /* Re-enable Rx path after enabling/disabling workaround */
17135 rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
17136 dft_ctrl & ~(1 << 14));
17137
17138 out:
17139 sc->phy.release(sc);
17140
17141 return rv;
17142 }
17143
17144 /*
17145 * wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
17146 * done after every PHY reset.
17147 */
17148 static int
17149 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
17150 {
17151 device_t dev = sc->sc_dev;
17152 int rv;
17153
17154 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17155 device_xname(dev), __func__));
17156 KASSERT(sc->sc_type == WM_T_PCH2);
17157
17158 /* Set MDIO slow mode before any other MDIO access */
17159 rv = wm_set_mdio_slow_mode_hv(sc);
17160 if (rv != 0)
17161 return rv;
17162
17163 rv = sc->phy.acquire(sc);
17164 if (rv != 0)
17165 return rv;
17166 /* Set MSE higher to enable link to stay up when noise is high */
17167 rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
17168 if (rv != 0)
17169 goto release;
17170 /* Drop link after 5 times MSE threshold was reached */
17171 rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
17172 release:
17173 sc->phy.release(sc);
17174
17175 return rv;
17176 }
17177
17178 /**
17179 * wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
17180 * @link: link up bool flag
17181 *
17182 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
17183 * preventing further DMA write requests. Workaround the issue by disabling
17184 * the de-assertion of the clock request when in 1Gpbs mode.
17185 * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
17186 * speeds in order to avoid Tx hangs.
17187 **/
17188 static int
17189 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
17190 {
17191 uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
17192 uint32_t status = CSR_READ(sc, WMREG_STATUS);
17193 uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
17194 uint16_t phyreg;
17195
17196 if (link && (speed == STATUS_SPEED_1000)) {
17197 int rv;
17198
17199 rv = sc->phy.acquire(sc);
17200 if (rv != 0)
17201 return rv;
17202 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
17203 &phyreg);
17204 if (rv != 0)
17205 goto release;
17206 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
17207 phyreg & ~KUMCTRLSTA_K1_ENABLE);
17208 if (rv != 0)
17209 goto release;
17210 delay(20);
17211 CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
17212
17213 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
17214 &phyreg);
17215 release:
17216 sc->phy.release(sc);
17217 return rv;
17218 }
17219
17220 fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
17221
17222 struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
17223 if (((child != NULL) && (child->mii_mpd_rev > 5))
17224 || !link
17225 || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
17226 goto update_fextnvm6;
17227
17228 wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
17229
17230 /* Clear link status transmit timeout */
17231 phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
17232 if (speed == STATUS_SPEED_100) {
17233 /* Set inband Tx timeout to 5x10us for 100Half */
17234 phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
17235
17236 /* Do not extend the K1 entry latency for 100Half */
17237 fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
17238 } else {
17239 /* Set inband Tx timeout to 50x10us for 10Full/Half */
17240 phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
17241
17242 /* Extend the K1 entry latency for 10 Mbps */
17243 fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
17244 }
17245
17246 wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
17247
17248 update_fextnvm6:
17249 CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
17250 return 0;
17251 }
17252
17253 /*
17254 * wm_k1_gig_workaround_hv - K1 Si workaround
17255 * @sc: pointer to the HW structure
17256 * @link: link up bool flag
17257 *
17258 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
17259 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
17260 * If link is down, the function will restore the default K1 setting located
17261 * in the NVM.
17262 */
17263 static int
17264 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
17265 {
17266 int k1_enable = sc->sc_nvm_k1_enabled;
17267 int rv;
17268
17269 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17270 device_xname(sc->sc_dev), __func__));
17271
17272 rv = sc->phy.acquire(sc);
17273 if (rv != 0)
17274 return rv;
17275
17276 if (link) {
17277 k1_enable = 0;
17278
17279 /* Link stall fix for link up */
17280 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
17281 0x0100);
17282 } else {
17283 /* Link stall fix for link down */
17284 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
17285 0x4100);
17286 }
17287
17288 wm_configure_k1_ich8lan(sc, k1_enable);
17289 sc->phy.release(sc);
17290
17291 return 0;
17292 }
17293
17294 /*
17295 * wm_k1_workaround_lv - K1 Si workaround
17296 * @sc: pointer to the HW structure
17297 *
17298 * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
17299 * Disable K1 for 1000 and 100 speeds
17300 */
17301 static int
17302 wm_k1_workaround_lv(struct wm_softc *sc)
17303 {
17304 uint32_t reg;
17305 uint16_t phyreg;
17306 int rv;
17307
17308 if (sc->sc_type != WM_T_PCH2)
17309 return 0;
17310
17311 /* Set K1 beacon duration based on 10Mbps speed */
17312 rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
17313 if (rv != 0)
17314 return rv;
17315
17316 if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
17317 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
17318 if (phyreg &
17319 (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
17320 /* LV 1G/100 Packet drop issue wa */
17321 rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
17322 &phyreg);
17323 if (rv != 0)
17324 return rv;
17325 phyreg &= ~HV_PM_CTRL_K1_ENA;
17326 rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
17327 phyreg);
17328 if (rv != 0)
17329 return rv;
17330 } else {
17331 /* For 10Mbps */
17332 reg = CSR_READ(sc, WMREG_FEXTNVM4);
17333 reg &= ~FEXTNVM4_BEACON_DURATION;
17334 reg |= FEXTNVM4_BEACON_DURATION_16US;
17335 CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
17336 }
17337 }
17338
17339 return 0;
17340 }
17341
17342 /*
17343 * wm_link_stall_workaround_hv - Si workaround
17344 * @sc: pointer to the HW structure
17345 *
17346 * This function works around a Si bug where the link partner can get
17347 * a link up indication before the PHY does. If small packets are sent
17348 * by the link partner they can be placed in the packet buffer without
17349 * being properly accounted for by the PHY and will stall preventing
17350 * further packets from being received. The workaround is to clear the
17351 * packet buffer after the PHY detects link up.
17352 */
17353 static int
17354 wm_link_stall_workaround_hv(struct wm_softc *sc)
17355 {
17356 uint16_t phyreg;
17357
17358 if (sc->sc_phytype != WMPHY_82578)
17359 return 0;
17360
17361 /* Do not apply workaround if in PHY loopback bit 14 set */
17362 wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
17363 if ((phyreg & BMCR_LOOP) != 0)
17364 return 0;
17365
17366 /* Check if link is up and at 1Gbps */
17367 wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
17368 phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
17369 | BM_CS_STATUS_SPEED_MASK;
17370 if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
17371 | BM_CS_STATUS_SPEED_1000))
17372 return 0;
17373
17374 delay(200 * 1000); /* XXX too big */
17375
17376 /* Flush the packets in the fifo buffer */
17377 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
17378 HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
17379 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
17380 HV_MUX_DATA_CTRL_GEN_TO_MAC);
17381
17382 return 0;
17383 }
17384
17385 static int
17386 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
17387 {
17388 int rv;
17389
17390 rv = sc->phy.acquire(sc);
17391 if (rv != 0) {
17392 device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
17393 __func__);
17394 return rv;
17395 }
17396
17397 rv = wm_set_mdio_slow_mode_hv_locked(sc);
17398
17399 sc->phy.release(sc);
17400
17401 return rv;
17402 }
17403
17404 static int
17405 wm_set_mdio_slow_mode_hv_locked(struct wm_softc *sc)
17406 {
17407 int rv;
17408 uint16_t reg;
17409
17410 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, ®);
17411 if (rv != 0)
17412 return rv;
17413
17414 return wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
17415 reg | HV_KMRN_MDIO_SLOW);
17416 }
17417
17418 /*
17419 * wm_configure_k1_ich8lan - Configure K1 power state
17420 * @sc: pointer to the HW structure
17421 * @enable: K1 state to configure
17422 *
17423 * Configure the K1 power state based on the provided parameter.
17424 * Assumes semaphore already acquired.
17425 */
17426 static void
17427 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
17428 {
17429 uint32_t ctrl, ctrl_ext, tmp;
17430 uint16_t kmreg;
17431 int rv;
17432
17433 KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
17434
17435 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
17436 if (rv != 0)
17437 return;
17438
17439 if (k1_enable)
17440 kmreg |= KUMCTRLSTA_K1_ENABLE;
17441 else
17442 kmreg &= ~KUMCTRLSTA_K1_ENABLE;
17443
17444 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
17445 if (rv != 0)
17446 return;
17447
17448 delay(20);
17449
17450 ctrl = CSR_READ(sc, WMREG_CTRL);
17451 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
17452
17453 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
17454 tmp |= CTRL_FRCSPD;
17455
17456 CSR_WRITE(sc, WMREG_CTRL, tmp);
17457 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
17458 CSR_WRITE_FLUSH(sc);
17459 delay(20);
17460
17461 CSR_WRITE(sc, WMREG_CTRL, ctrl);
17462 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
17463 CSR_WRITE_FLUSH(sc);
17464 delay(20);
17465
17466 return;
17467 }
17468
17469 /* special case - for 82575 - need to do manual init ... */
17470 static void
17471 wm_reset_init_script_82575(struct wm_softc *sc)
17472 {
17473 /*
17474 * Remark: this is untested code - we have no board without EEPROM
17475 * same setup as mentioned int the FreeBSD driver for the i82575
17476 */
17477
17478 /* SerDes configuration via SERDESCTRL */
17479 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
17480 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
17481 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
17482 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
17483
17484 /* CCM configuration via CCMCTL register */
17485 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
17486 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
17487
17488 /* PCIe lanes configuration */
17489 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
17490 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
17491 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
17492 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
17493
17494 /* PCIe PLL Configuration */
17495 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
17496 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
17497 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
17498 }
17499
17500 static void
17501 wm_reset_mdicnfg_82580(struct wm_softc *sc)
17502 {
17503 uint32_t reg;
17504 uint16_t nvmword;
17505 int rv;
17506
17507 if (sc->sc_type != WM_T_82580)
17508 return;
17509 if ((sc->sc_flags & WM_F_SGMII) == 0)
17510 return;
17511
17512 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
17513 + NVM_OFF_CFG3_PORTA, 1, &nvmword);
17514 if (rv != 0) {
17515 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
17516 __func__);
17517 return;
17518 }
17519
17520 reg = CSR_READ(sc, WMREG_MDICNFG);
17521 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
17522 reg |= MDICNFG_DEST;
17523 if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
17524 reg |= MDICNFG_COM_MDIO;
17525 CSR_WRITE(sc, WMREG_MDICNFG, reg);
17526 }
17527
17528 #define MII_INVALIDID(x) (((x) == 0x0000) || ((x) == 0xffff))
17529
17530 static bool
17531 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
17532 {
17533 uint32_t reg;
17534 uint16_t id1, id2;
17535 int i, rv;
17536
17537 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17538 device_xname(sc->sc_dev), __func__));
17539 KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
17540
17541 id1 = id2 = 0xffff;
17542 for (i = 0; i < 2; i++) {
17543 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
17544 &id1);
17545 if ((rv != 0) || MII_INVALIDID(id1))
17546 continue;
17547 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
17548 &id2);
17549 if ((rv != 0) || MII_INVALIDID(id2))
17550 continue;
17551 break;
17552 }
17553 if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
17554 goto out;
17555
17556 /*
17557 * In case the PHY needs to be in mdio slow mode,
17558 * set slow mode and try to get the PHY id again.
17559 */
17560 rv = 0;
17561 if (sc->sc_type < WM_T_PCH_LPT) {
17562 wm_set_mdio_slow_mode_hv_locked(sc);
17563 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
17564 &id1);
17565 rv |= wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
17566 &id2);
17567 }
17568 if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
17569 device_printf(sc->sc_dev, "XXX return with false\n");
17570 return false;
17571 }
17572 out:
17573 if (sc->sc_type >= WM_T_PCH_LPT) {
17574 /* Only unforce SMBus if ME is not active */
17575 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
17576 uint16_t phyreg;
17577
17578 /* Unforce SMBus mode in PHY */
17579 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
17580 CV_SMB_CTRL, &phyreg);
17581 phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
17582 wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
17583 CV_SMB_CTRL, phyreg);
17584
17585 /* Unforce SMBus mode in MAC */
17586 reg = CSR_READ(sc, WMREG_CTRL_EXT);
17587 reg &= ~CTRL_EXT_FORCE_SMBUS;
17588 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
17589 }
17590 }
17591 return true;
17592 }
17593
17594 static void
17595 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
17596 {
17597 uint32_t reg;
17598 int i;
17599
17600 /* Set PHY Config Counter to 50msec */
17601 reg = CSR_READ(sc, WMREG_FEXTNVM3);
17602 reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
17603 reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
17604 CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
17605
17606 /* Toggle LANPHYPC */
17607 reg = CSR_READ(sc, WMREG_CTRL);
17608 reg |= CTRL_LANPHYPC_OVERRIDE;
17609 reg &= ~CTRL_LANPHYPC_VALUE;
17610 CSR_WRITE(sc, WMREG_CTRL, reg);
17611 CSR_WRITE_FLUSH(sc);
17612 delay(1000);
17613 reg &= ~CTRL_LANPHYPC_OVERRIDE;
17614 CSR_WRITE(sc, WMREG_CTRL, reg);
17615 CSR_WRITE_FLUSH(sc);
17616
17617 if (sc->sc_type < WM_T_PCH_LPT)
17618 delay(50 * 1000);
17619 else {
17620 i = 20;
17621
17622 do {
17623 delay(5 * 1000);
17624 } while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
17625 && i--);
17626
17627 delay(30 * 1000);
17628 }
17629 }
17630
17631 static int
17632 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
17633 {
17634 uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
17635 | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
17636 uint32_t rxa;
17637 uint16_t scale = 0, lat_enc = 0;
17638 int32_t obff_hwm = 0;
17639 int64_t lat_ns, value;
17640
17641 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17642 device_xname(sc->sc_dev), __func__));
17643
17644 if (link) {
17645 uint16_t max_snoop, max_nosnoop, max_ltr_enc;
17646 uint32_t status;
17647 uint16_t speed;
17648 pcireg_t preg;
17649
17650 status = CSR_READ(sc, WMREG_STATUS);
17651 switch (__SHIFTOUT(status, STATUS_SPEED)) {
17652 case STATUS_SPEED_10:
17653 speed = 10;
17654 break;
17655 case STATUS_SPEED_100:
17656 speed = 100;
17657 break;
17658 case STATUS_SPEED_1000:
17659 speed = 1000;
17660 break;
17661 default:
17662 device_printf(sc->sc_dev, "Unknown speed "
17663 "(status = %08x)\n", status);
17664 return -1;
17665 }
17666
17667 /* Rx Packet Buffer Allocation size (KB) */
17668 rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
17669
17670 /*
17671 * Determine the maximum latency tolerated by the device.
17672 *
17673 * Per the PCIe spec, the tolerated latencies are encoded as
17674 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
17675 * a 10-bit value (0-1023) to provide a range from 1 ns to
17676 * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
17677 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
17678 */
17679 lat_ns = ((int64_t)rxa * 1024 -
17680 (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
17681 + ETHER_HDR_LEN))) * 8 * 1000;
17682 if (lat_ns < 0)
17683 lat_ns = 0;
17684 else
17685 lat_ns /= speed;
17686 value = lat_ns;
17687
17688 while (value > LTRV_VALUE) {
17689 scale ++;
17690 value = howmany(value, __BIT(5));
17691 }
17692 if (scale > LTRV_SCALE_MAX) {
17693 device_printf(sc->sc_dev,
17694 "Invalid LTR latency scale %d\n", scale);
17695 return -1;
17696 }
17697 lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
17698
17699 /* Determine the maximum latency tolerated by the platform */
17700 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
17701 WM_PCI_LTR_CAP_LPT);
17702 max_snoop = preg & 0xffff;
17703 max_nosnoop = preg >> 16;
17704
17705 max_ltr_enc = MAX(max_snoop, max_nosnoop);
17706
17707 if (lat_enc > max_ltr_enc) {
17708 lat_enc = max_ltr_enc;
17709 lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
17710 * PCI_LTR_SCALETONS(
17711 __SHIFTOUT(lat_enc,
17712 PCI_LTR_MAXSNOOPLAT_SCALE));
17713 }
17714
17715 if (lat_ns) {
17716 lat_ns *= speed * 1000;
17717 lat_ns /= 8;
17718 lat_ns /= 1000000000;
17719 obff_hwm = (int32_t)(rxa - lat_ns);
17720 }
17721 if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
17722 device_printf(sc->sc_dev, "Invalid high water mark %d"
17723 "(rxa = %d, lat_ns = %d)\n",
17724 obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
17725 return -1;
17726 }
17727 }
17728 /* Snoop and No-Snoop latencies the same */
17729 reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
17730 CSR_WRITE(sc, WMREG_LTRV, reg);
17731
17732 /* Set OBFF high water mark */
17733 reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
17734 reg |= obff_hwm;
17735 CSR_WRITE(sc, WMREG_SVT, reg);
17736
17737 /* Enable OBFF */
17738 reg = CSR_READ(sc, WMREG_SVCR);
17739 reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
17740 CSR_WRITE(sc, WMREG_SVCR, reg);
17741
17742 return 0;
17743 }
17744
17745 /*
17746 * I210 Errata 25 and I211 Errata 10
17747 * Slow System Clock.
17748 *
17749 * Note that this function is called on both FLASH and iNVM case on NetBSD.
17750 */
17751 static int
17752 wm_pll_workaround_i210(struct wm_softc *sc)
17753 {
17754 uint32_t mdicnfg, wuc;
17755 uint32_t reg;
17756 pcireg_t pcireg;
17757 uint32_t pmreg;
17758 uint16_t nvmword, tmp_nvmword;
17759 uint16_t phyval;
17760 bool wa_done = false;
17761 int i, rv = 0;
17762
17763 /* Get Power Management cap offset */
17764 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
17765 &pmreg, NULL) == 0)
17766 return -1;
17767
17768 /* Save WUC and MDICNFG registers */
17769 wuc = CSR_READ(sc, WMREG_WUC);
17770 mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
17771
17772 reg = mdicnfg & ~MDICNFG_DEST;
17773 CSR_WRITE(sc, WMREG_MDICNFG, reg);
17774
17775 if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
17776 /*
17777 * The default value of the Initialization Control Word 1
17778 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
17779 */
17780 nvmword = INVM_DEFAULT_AL;
17781 }
17782 tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
17783
17784 for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
17785 wm_gmii_gs40g_readreg(sc->sc_dev, 1,
17786 GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
17787
17788 if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
17789 rv = 0;
17790 break; /* OK */
17791 } else
17792 rv = -1;
17793
17794 wa_done = true;
17795 /* Directly reset the internal PHY */
17796 reg = CSR_READ(sc, WMREG_CTRL);
17797 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
17798
17799 reg = CSR_READ(sc, WMREG_CTRL_EXT);
17800 reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
17801 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
17802
17803 CSR_WRITE(sc, WMREG_WUC, 0);
17804 reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
17805 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
17806
17807 pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
17808 pmreg + PCI_PMCSR);
17809 pcireg |= PCI_PMCSR_STATE_D3;
17810 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
17811 pmreg + PCI_PMCSR, pcireg);
17812 delay(1000);
17813 pcireg &= ~PCI_PMCSR_STATE_D3;
17814 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
17815 pmreg + PCI_PMCSR, pcireg);
17816
17817 reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
17818 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
17819
17820 /* Restore WUC register */
17821 CSR_WRITE(sc, WMREG_WUC, wuc);
17822 }
17823
17824 /* Restore MDICNFG setting */
17825 CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
17826 if (wa_done)
17827 aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
17828 return rv;
17829 }
17830
17831 static void
17832 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
17833 {
17834 uint32_t reg;
17835
17836 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17837 device_xname(sc->sc_dev), __func__));
17838 KASSERT((sc->sc_type == WM_T_PCH_SPT)
17839 || (sc->sc_type == WM_T_PCH_CNP));
17840
17841 reg = CSR_READ(sc, WMREG_FEXTNVM7);
17842 reg |= FEXTNVM7_SIDE_CLK_UNGATE;
17843 CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
17844
17845 reg = CSR_READ(sc, WMREG_FEXTNVM9);
17846 reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
17847 CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
17848 }
17849
17850 /* Sysctl functions */
17851 static int
17852 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
17853 {
17854 struct sysctlnode node = *rnode;
17855 struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
17856 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
17857 struct wm_softc *sc = txq->txq_sc;
17858 uint32_t reg;
17859
17860 reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
17861 node.sysctl_data = ®
17862 return sysctl_lookup(SYSCTLFN_CALL(&node));
17863 }
17864
17865 static int
17866 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
17867 {
17868 struct sysctlnode node = *rnode;
17869 struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
17870 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
17871 struct wm_softc *sc = txq->txq_sc;
17872 uint32_t reg;
17873
17874 reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
17875 node.sysctl_data = ®
17876 return sysctl_lookup(SYSCTLFN_CALL(&node));
17877 }
17878
17879 #ifdef WM_DEBUG
17880 static int
17881 wm_sysctl_debug(SYSCTLFN_ARGS)
17882 {
17883 struct sysctlnode node = *rnode;
17884 struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
17885 uint32_t dflags;
17886 int error;
17887
17888 dflags = sc->sc_debug;
17889 node.sysctl_data = &dflags;
17890 error = sysctl_lookup(SYSCTLFN_CALL(&node));
17891
17892 if (error || newp == NULL)
17893 return error;
17894
17895 sc->sc_debug = dflags;
17896 device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
17897 device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
17898
17899 return 0;
17900 }
17901 #endif
17902