if_wm.c revision 1.701 1 /* $NetBSD: if_wm.c,v 1.701 2021/03/01 04:50:17 knakahara Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - TX Multi queue improvement (refine queue selection logic)
77 * - Split header buffer for newer descriptors
78 * - EEE (Energy Efficiency Ethernet) for I354
79 * - Virtual Function
80 * - Set LED correctly (based on contents in EEPROM)
81 * - Rework how parameters are loaded from the EEPROM.
82 */
83
84 #include <sys/cdefs.h>
85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.701 2021/03/01 04:50:17 knakahara Exp $");
86
87 #ifdef _KERNEL_OPT
88 #include "opt_net_mpsafe.h"
89 #include "opt_if_wm.h"
90 #endif
91
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/callout.h>
95 #include <sys/mbuf.h>
96 #include <sys/malloc.h>
97 #include <sys/kmem.h>
98 #include <sys/kernel.h>
99 #include <sys/socket.h>
100 #include <sys/ioctl.h>
101 #include <sys/errno.h>
102 #include <sys/device.h>
103 #include <sys/queue.h>
104 #include <sys/syslog.h>
105 #include <sys/interrupt.h>
106 #include <sys/cpu.h>
107 #include <sys/pcq.h>
108 #include <sys/sysctl.h>
109 #include <sys/workqueue.h>
110
111 #include <sys/rndsource.h>
112
113 #include <net/if.h>
114 #include <net/if_dl.h>
115 #include <net/if_media.h>
116 #include <net/if_ether.h>
117
118 #include <net/bpf.h>
119
120 #include <net/rss_config.h>
121
122 #include <netinet/in.h> /* XXX for struct ip */
123 #include <netinet/in_systm.h> /* XXX for struct ip */
124 #include <netinet/ip.h> /* XXX for struct ip */
125 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
126 #include <netinet/tcp.h> /* XXX for struct tcphdr */
127
128 #include <sys/bus.h>
129 #include <sys/intr.h>
130 #include <machine/endian.h>
131
132 #include <dev/mii/mii.h>
133 #include <dev/mii/mdio.h>
134 #include <dev/mii/miivar.h>
135 #include <dev/mii/miidevs.h>
136 #include <dev/mii/mii_bitbang.h>
137 #include <dev/mii/ikphyreg.h>
138 #include <dev/mii/igphyreg.h>
139 #include <dev/mii/igphyvar.h>
140 #include <dev/mii/inbmphyreg.h>
141 #include <dev/mii/ihphyreg.h>
142 #include <dev/mii/makphyreg.h>
143
144 #include <dev/pci/pcireg.h>
145 #include <dev/pci/pcivar.h>
146 #include <dev/pci/pcidevs.h>
147
148 #include <dev/pci/if_wmreg.h>
149 #include <dev/pci/if_wmvar.h>
150
151 #ifdef WM_DEBUG
152 #define WM_DEBUG_LINK __BIT(0)
153 #define WM_DEBUG_TX __BIT(1)
154 #define WM_DEBUG_RX __BIT(2)
155 #define WM_DEBUG_GMII __BIT(3)
156 #define WM_DEBUG_MANAGE __BIT(4)
157 #define WM_DEBUG_NVM __BIT(5)
158 #define WM_DEBUG_INIT __BIT(6)
159 #define WM_DEBUG_LOCK __BIT(7)
160
161 #if 0
162 #define WM_DEBUG_DEFAULT WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
163 WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | \
164 WM_DEBUG_LOCK
165 #endif
166
167 #define DPRINTF(sc, x, y) \
168 do { \
169 if ((sc)->sc_debug & (x)) \
170 printf y; \
171 } while (0)
172 #else
173 #define DPRINTF(sc, x, y) __nothing
174 #endif /* WM_DEBUG */
175
176 #ifdef NET_MPSAFE
177 #define WM_MPSAFE 1
178 #define WM_CALLOUT_FLAGS CALLOUT_MPSAFE
179 #define WM_SOFTINT_FLAGS SOFTINT_MPSAFE
180 #define WM_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
181 #else
182 #define WM_CALLOUT_FLAGS 0
183 #define WM_SOFTINT_FLAGS 0
184 #define WM_WORKQUEUE_FLAGS WQ_PERCPU
185 #endif
186
187 #define WM_WORKQUEUE_PRI PRI_SOFTNET
188
189 /*
190 * This device driver's max interrupt numbers.
191 */
192 #define WM_MAX_NQUEUEINTR 16
193 #define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1)
194
195 #ifndef WM_DISABLE_MSI
196 #define WM_DISABLE_MSI 0
197 #endif
198 #ifndef WM_DISABLE_MSIX
199 #define WM_DISABLE_MSIX 0
200 #endif
201
202 int wm_disable_msi = WM_DISABLE_MSI;
203 int wm_disable_msix = WM_DISABLE_MSIX;
204
205 #ifndef WM_WATCHDOG_TIMEOUT
206 #define WM_WATCHDOG_TIMEOUT 5
207 #endif
208 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
209
210 /*
211 * Transmit descriptor list size. Due to errata, we can only have
212 * 256 hardware descriptors in the ring on < 82544, but we use 4096
213 * on >= 82544. We tell the upper layers that they can queue a lot
214 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
215 * of them at a time.
216 *
217 * We allow up to 64 DMA segments per packet. Pathological packet
218 * chains containing many small mbufs have been observed in zero-copy
219 * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
220 * m_defrag() is called to reduce it.
221 */
222 #define WM_NTXSEGS 64
223 #define WM_IFQUEUELEN 256
224 #define WM_TXQUEUELEN_MAX 64
225 #define WM_TXQUEUELEN_MAX_82547 16
226 #define WM_TXQUEUELEN(txq) ((txq)->txq_num)
227 #define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1)
228 #define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8)
229 #define WM_NTXDESC_82542 256
230 #define WM_NTXDESC_82544 4096
231 #define WM_NTXDESC(txq) ((txq)->txq_ndesc)
232 #define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1)
233 #define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize)
234 #define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq))
235 #define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq))
236
237 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
238
239 #define WM_TXINTERQSIZE 256
240
241 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
242 #define WM_TX_PROCESS_LIMIT_DEFAULT 100U
243 #endif
244 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
245 #define WM_TX_INTR_PROCESS_LIMIT_DEFAULT 0U
246 #endif
247
248 /*
249 * Receive descriptor list size. We have one Rx buffer for normal
250 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
251 * packet. We allocate 256 receive descriptors, each with a 2k
252 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
253 */
254 #define WM_NRXDESC 256U
255 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
256 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
257 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
258
259 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
260 #define WM_RX_PROCESS_LIMIT_DEFAULT 100U
261 #endif
262 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
263 #define WM_RX_INTR_PROCESS_LIMIT_DEFAULT 0U
264 #endif
265
266 typedef union txdescs {
267 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
268 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544];
269 } txdescs_t;
270
271 typedef union rxdescs {
272 wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
273 ext_rxdesc_t sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
274 nq_rxdesc_t sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
275 } rxdescs_t;
276
277 #define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x))
278 #define WM_CDRXOFF(rxq, x) ((rxq)->rxq_descsize * (x))
279
280 /*
281 * Software state for transmit jobs.
282 */
283 struct wm_txsoft {
284 struct mbuf *txs_mbuf; /* head of our mbuf chain */
285 bus_dmamap_t txs_dmamap; /* our DMA map */
286 int txs_firstdesc; /* first descriptor in packet */
287 int txs_lastdesc; /* last descriptor in packet */
288 int txs_ndesc; /* # of descriptors used */
289 };
290
291 /*
292 * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
293 * buffer and a DMA map. For packets which fill more than one buffer, we chain
294 * them together.
295 */
296 struct wm_rxsoft {
297 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
298 bus_dmamap_t rxs_dmamap; /* our DMA map */
299 };
300
301 #define WM_LINKUP_TIMEOUT 50
302
303 static uint16_t swfwphysem[] = {
304 SWFW_PHY0_SM,
305 SWFW_PHY1_SM,
306 SWFW_PHY2_SM,
307 SWFW_PHY3_SM
308 };
309
310 static const uint32_t wm_82580_rxpbs_table[] = {
311 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
312 };
313
314 struct wm_softc;
315
316 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
317 #if !defined(WM_EVENT_COUNTERS)
318 #define WM_EVENT_COUNTERS 1
319 #endif
320 #endif
321
322 #ifdef WM_EVENT_COUNTERS
323 #define WM_Q_EVCNT_DEFINE(qname, evname) \
324 char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
325 struct evcnt qname##_ev_##evname;
326
327 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \
328 do { \
329 snprintf((q)->qname##_##evname##_evcnt_name, \
330 sizeof((q)->qname##_##evname##_evcnt_name), \
331 "%s%02d%s", #qname, (qnum), #evname); \
332 evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \
333 (evtype), NULL, (xname), \
334 (q)->qname##_##evname##_evcnt_name); \
335 } while (0)
336
337 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
338 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
339
340 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
341 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
342
343 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum) \
344 evcnt_detach(&(q)->qname##_ev_##evname);
345 #endif /* WM_EVENT_COUNTERS */
346
347 struct wm_txqueue {
348 kmutex_t *txq_lock; /* lock for tx operations */
349
350 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */
351
352 /* Software state for the transmit descriptors. */
353 int txq_num; /* must be a power of two */
354 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
355
356 /* TX control data structures. */
357 int txq_ndesc; /* must be a power of two */
358 size_t txq_descsize; /* a tx descriptor size */
359 txdescs_t *txq_descs_u;
360 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */
361 bus_dma_segment_t txq_desc_seg; /* control data segment */
362 int txq_desc_rseg; /* real number of control segment */
363 #define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr
364 #define txq_descs txq_descs_u->sctxu_txdescs
365 #define txq_nq_descs txq_descs_u->sctxu_nq_txdescs
366
367 bus_addr_t txq_tdt_reg; /* offset of TDT register */
368
369 int txq_free; /* number of free Tx descriptors */
370 int txq_next; /* next ready Tx descriptor */
371
372 int txq_sfree; /* number of free Tx jobs */
373 int txq_snext; /* next free Tx job */
374 int txq_sdirty; /* dirty Tx jobs */
375
376 /* These 4 variables are used only on the 82547. */
377 int txq_fifo_size; /* Tx FIFO size */
378 int txq_fifo_head; /* current head of FIFO */
379 uint32_t txq_fifo_addr; /* internal address of start of FIFO */
380 int txq_fifo_stall; /* Tx FIFO is stalled */
381
382 /*
383 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
384 * CPUs. This queue intermediate them without block.
385 */
386 pcq_t *txq_interq;
387
388 /*
389 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
390 * to manage Tx H/W queue's busy flag.
391 */
392 int txq_flags; /* flags for H/W queue, see below */
393 #define WM_TXQ_NO_SPACE 0x1
394 #define WM_TXQ_LINKDOWN_DISCARD 0x2
395
396 bool txq_stopping;
397
398 bool txq_sending;
399 time_t txq_lastsent;
400
401 /* Checksum flags used for previous packet */
402 uint32_t txq_last_hw_cmd;
403 uint8_t txq_last_hw_fields;
404 uint16_t txq_last_hw_ipcs;
405 uint16_t txq_last_hw_tucs;
406
407 uint32_t txq_packets; /* for AIM */
408 uint32_t txq_bytes; /* for AIM */
409 #ifdef WM_EVENT_COUNTERS
410 /* TX event counters */
411 WM_Q_EVCNT_DEFINE(txq, txsstall) /* Stalled due to no txs */
412 WM_Q_EVCNT_DEFINE(txq, txdstall) /* Stalled due to no txd */
413 WM_Q_EVCNT_DEFINE(txq, fifo_stall) /* FIFO stalls (82547) */
414 WM_Q_EVCNT_DEFINE(txq, txdw) /* Tx descriptor interrupts */
415 WM_Q_EVCNT_DEFINE(txq, txqe) /* Tx queue empty interrupts */
416 /* XXX not used? */
417
418 WM_Q_EVCNT_DEFINE(txq, ipsum) /* IP checksums comp. */
419 WM_Q_EVCNT_DEFINE(txq, tusum) /* TCP/UDP cksums comp. */
420 WM_Q_EVCNT_DEFINE(txq, tusum6) /* TCP/UDP v6 cksums comp. */
421 WM_Q_EVCNT_DEFINE(txq, tso) /* TCP seg offload (IPv4) */
422 WM_Q_EVCNT_DEFINE(txq, tso6) /* TCP seg offload (IPv6) */
423 WM_Q_EVCNT_DEFINE(txq, tsopain) /* Painful header manip. for TSO */
424 WM_Q_EVCNT_DEFINE(txq, pcqdrop) /* Pkt dropped in pcq */
425 WM_Q_EVCNT_DEFINE(txq, descdrop) /* Pkt dropped in MAC desc ring */
426 /* other than toomanyseg */
427
428 WM_Q_EVCNT_DEFINE(txq, toomanyseg) /* Pkt dropped(toomany DMA segs) */
429 WM_Q_EVCNT_DEFINE(txq, defrag) /* m_defrag() */
430 WM_Q_EVCNT_DEFINE(txq, underrun) /* Tx underrun */
431 WM_Q_EVCNT_DEFINE(txq, skipcontext) /* Tx skip wring cksum context */
432
433 char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
434 struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
435 #endif /* WM_EVENT_COUNTERS */
436 };
437
438 struct wm_rxqueue {
439 kmutex_t *rxq_lock; /* lock for rx operations */
440
441 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */
442
443 /* Software state for the receive descriptors. */
444 struct wm_rxsoft rxq_soft[WM_NRXDESC];
445
446 /* RX control data structures. */
447 int rxq_ndesc; /* must be a power of two */
448 size_t rxq_descsize; /* a rx descriptor size */
449 rxdescs_t *rxq_descs_u;
450 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */
451 bus_dma_segment_t rxq_desc_seg; /* control data segment */
452 int rxq_desc_rseg; /* real number of control segment */
453 #define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr
454 #define rxq_descs rxq_descs_u->sctxu_rxdescs
455 #define rxq_ext_descs rxq_descs_u->sctxu_ext_rxdescs
456 #define rxq_nq_descs rxq_descs_u->sctxu_nq_rxdescs
457
458 bus_addr_t rxq_rdt_reg; /* offset of RDT register */
459
460 int rxq_ptr; /* next ready Rx desc/queue ent */
461 int rxq_discard;
462 int rxq_len;
463 struct mbuf *rxq_head;
464 struct mbuf *rxq_tail;
465 struct mbuf **rxq_tailp;
466
467 bool rxq_stopping;
468
469 uint32_t rxq_packets; /* for AIM */
470 uint32_t rxq_bytes; /* for AIM */
471 #ifdef WM_EVENT_COUNTERS
472 /* RX event counters */
473 WM_Q_EVCNT_DEFINE(rxq, intr); /* Interrupts */
474 WM_Q_EVCNT_DEFINE(rxq, defer); /* Rx deferred processing */
475
476 WM_Q_EVCNT_DEFINE(rxq, ipsum); /* IP checksums checked */
477 WM_Q_EVCNT_DEFINE(rxq, tusum); /* TCP/UDP cksums checked */
478 #endif
479 };
480
481 struct wm_queue {
482 int wmq_id; /* index of TX/RX queues */
483 int wmq_intr_idx; /* index of MSI-X tables */
484
485 uint32_t wmq_itr; /* interrupt interval per queue. */
486 bool wmq_set_itr;
487
488 struct wm_txqueue wmq_txq;
489 struct wm_rxqueue wmq_rxq;
490 char sysctlname[32]; /* Name for sysctl */
491
492 bool wmq_txrx_use_workqueue;
493 struct work wmq_cookie;
494 void *wmq_si;
495 };
496
497 struct wm_phyop {
498 int (*acquire)(struct wm_softc *);
499 void (*release)(struct wm_softc *);
500 int (*readreg_locked)(device_t, int, int, uint16_t *);
501 int (*writereg_locked)(device_t, int, int, uint16_t);
502 int reset_delay_us;
503 bool no_errprint;
504 };
505
506 struct wm_nvmop {
507 int (*acquire)(struct wm_softc *);
508 void (*release)(struct wm_softc *);
509 int (*read)(struct wm_softc *, int, int, uint16_t *);
510 };
511
512 /*
513 * Software state per device.
514 */
515 struct wm_softc {
516 device_t sc_dev; /* generic device information */
517 bus_space_tag_t sc_st; /* bus space tag */
518 bus_space_handle_t sc_sh; /* bus space handle */
519 bus_size_t sc_ss; /* bus space size */
520 bus_space_tag_t sc_iot; /* I/O space tag */
521 bus_space_handle_t sc_ioh; /* I/O space handle */
522 bus_size_t sc_ios; /* I/O space size */
523 bus_space_tag_t sc_flasht; /* flash registers space tag */
524 bus_space_handle_t sc_flashh; /* flash registers space handle */
525 bus_size_t sc_flashs; /* flash registers space size */
526 off_t sc_flashreg_offset; /*
527 * offset to flash registers from
528 * start of BAR
529 */
530 bus_dma_tag_t sc_dmat; /* bus DMA tag */
531
532 struct ethercom sc_ethercom; /* ethernet common data */
533 struct mii_data sc_mii; /* MII/media information */
534
535 pci_chipset_tag_t sc_pc;
536 pcitag_t sc_pcitag;
537 int sc_bus_speed; /* PCI/PCIX bus speed */
538 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
539
540 uint16_t sc_pcidevid; /* PCI device ID */
541 wm_chip_type sc_type; /* MAC type */
542 int sc_rev; /* MAC revision */
543 wm_phy_type sc_phytype; /* PHY type */
544 uint8_t sc_sfptype; /* SFP type */
545 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
546 #define WM_MEDIATYPE_UNKNOWN 0x00
547 #define WM_MEDIATYPE_FIBER 0x01
548 #define WM_MEDIATYPE_COPPER 0x02
549 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
550 int sc_funcid; /* unit number of the chip (0 to 3) */
551 int sc_flags; /* flags; see below */
552 u_short sc_if_flags; /* last if_flags */
553 int sc_ec_capenable; /* last ec_capenable */
554 int sc_flowflags; /* 802.3x flow control flags */
555 uint16_t eee_lp_ability; /* EEE link partner's ability */
556 int sc_align_tweak;
557
558 void *sc_ihs[WM_MAX_NINTR]; /*
559 * interrupt cookie.
560 * - legacy and msi use sc_ihs[0] only
561 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
562 */
563 pci_intr_handle_t *sc_intrs; /*
564 * legacy and msi use sc_intrs[0] only
565 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
566 */
567 int sc_nintrs; /* number of interrupts */
568
569 int sc_link_intr_idx; /* index of MSI-X tables */
570
571 callout_t sc_tick_ch; /* tick callout */
572 bool sc_core_stopping;
573
574 int sc_nvm_ver_major;
575 int sc_nvm_ver_minor;
576 int sc_nvm_ver_build;
577 int sc_nvm_addrbits; /* NVM address bits */
578 unsigned int sc_nvm_wordsize; /* NVM word size */
579 int sc_ich8_flash_base;
580 int sc_ich8_flash_bank_size;
581 int sc_nvm_k1_enabled;
582
583 int sc_nqueues;
584 struct wm_queue *sc_queue;
585 u_int sc_tx_process_limit; /* Tx proc. repeat limit in softint */
586 u_int sc_tx_intr_process_limit; /* Tx proc. repeat limit in H/W intr */
587 u_int sc_rx_process_limit; /* Rx proc. repeat limit in softint */
588 u_int sc_rx_intr_process_limit; /* Rx proc. repeat limit in H/W intr */
589 struct workqueue *sc_queue_wq;
590 bool sc_txrx_use_workqueue;
591
592 int sc_affinity_offset;
593
594 #ifdef WM_EVENT_COUNTERS
595 /* Event counters. */
596 struct evcnt sc_ev_linkintr; /* Link interrupts */
597
598 /* WM_T_82542_2_1 only */
599 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
600 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
601 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
602 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
603 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
604 #endif /* WM_EVENT_COUNTERS */
605
606 struct sysctllog *sc_sysctllog;
607
608 /* This variable are used only on the 82547. */
609 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
610
611 uint32_t sc_ctrl; /* prototype CTRL register */
612 #if 0
613 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
614 #endif
615 uint32_t sc_icr; /* prototype interrupt bits */
616 uint32_t sc_itr_init; /* prototype intr throttling reg */
617 uint32_t sc_tctl; /* prototype TCTL register */
618 uint32_t sc_rctl; /* prototype RCTL register */
619 uint32_t sc_txcw; /* prototype TXCW register */
620 uint32_t sc_tipg; /* prototype TIPG register */
621 uint32_t sc_fcrtl; /* prototype FCRTL register */
622 uint32_t sc_pba; /* prototype PBA register */
623
624 int sc_tbi_linkup; /* TBI link status */
625 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
626 int sc_tbi_serdes_ticks; /* tbi ticks */
627
628 int sc_mchash_type; /* multicast filter offset */
629
630 krndsource_t rnd_source; /* random source */
631
632 struct if_percpuq *sc_ipq; /* softint-based input queues */
633
634 kmutex_t *sc_core_lock; /* lock for softc operations */
635 kmutex_t *sc_ich_phymtx; /*
636 * 82574/82583/ICH/PCH specific PHY
637 * mutex. For 82574/82583, the mutex
638 * is used for both PHY and NVM.
639 */
640 kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */
641
642 struct wm_phyop phy;
643 struct wm_nvmop nvm;
644 #ifdef WM_DEBUG
645 uint32_t sc_debug;
646 #endif
647 };
648
649 #define WM_CORE_LOCK(_sc) \
650 if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
651 #define WM_CORE_UNLOCK(_sc) \
652 if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
653 #define WM_CORE_LOCKED(_sc) \
654 (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
655
656 #define WM_RXCHAIN_RESET(rxq) \
657 do { \
658 (rxq)->rxq_tailp = &(rxq)->rxq_head; \
659 *(rxq)->rxq_tailp = NULL; \
660 (rxq)->rxq_len = 0; \
661 } while (/*CONSTCOND*/0)
662
663 #define WM_RXCHAIN_LINK(rxq, m) \
664 do { \
665 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \
666 (rxq)->rxq_tailp = &(m)->m_next; \
667 } while (/*CONSTCOND*/0)
668
669 #ifdef WM_EVENT_COUNTERS
670 #define WM_EVCNT_INCR(ev) \
671 atomic_store_relaxed(&((ev)->ev_count), \
672 atomic_load_relaxed(&(ev)->ev_count) + 1)
673 #define WM_EVCNT_ADD(ev, val) \
674 atomic_store_relaxed(&((ev)->ev_count), \
675 atomic_load_relaxed(&(ev)->ev_count) + (val))
676
677 #define WM_Q_EVCNT_INCR(qname, evname) \
678 WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
679 #define WM_Q_EVCNT_ADD(qname, evname, val) \
680 WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
681 #else /* !WM_EVENT_COUNTERS */
682 #define WM_EVCNT_INCR(ev) /* nothing */
683 #define WM_EVCNT_ADD(ev, val) /* nothing */
684
685 #define WM_Q_EVCNT_INCR(qname, evname) /* nothing */
686 #define WM_Q_EVCNT_ADD(qname, evname, val) /* nothing */
687 #endif /* !WM_EVENT_COUNTERS */
688
689 #define CSR_READ(sc, reg) \
690 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
691 #define CSR_WRITE(sc, reg, val) \
692 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
693 #define CSR_WRITE_FLUSH(sc) \
694 (void)CSR_READ((sc), WMREG_STATUS)
695
696 #define ICH8_FLASH_READ32(sc, reg) \
697 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \
698 (reg) + sc->sc_flashreg_offset)
699 #define ICH8_FLASH_WRITE32(sc, reg, data) \
700 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \
701 (reg) + sc->sc_flashreg_offset, (data))
702
703 #define ICH8_FLASH_READ16(sc, reg) \
704 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \
705 (reg) + sc->sc_flashreg_offset)
706 #define ICH8_FLASH_WRITE16(sc, reg, data) \
707 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \
708 (reg) + sc->sc_flashreg_offset, (data))
709
710 #define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
711 #define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
712
713 #define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU)
714 #define WM_CDTXADDR_HI(txq, x) \
715 (sizeof(bus_addr_t) == 8 ? \
716 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
717
718 #define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
719 #define WM_CDRXADDR_HI(rxq, x) \
720 (sizeof(bus_addr_t) == 8 ? \
721 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
722
723 /*
724 * Register read/write functions.
725 * Other than CSR_{READ|WRITE}().
726 */
727 #if 0
728 static inline uint32_t wm_io_read(struct wm_softc *, int);
729 #endif
730 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
731 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
732 uint32_t, uint32_t);
733 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
734
735 /*
736 * Descriptor sync/init functions.
737 */
738 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
739 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
740 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
741
742 /*
743 * Device driver interface functions and commonly used functions.
744 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
745 */
746 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
747 static int wm_match(device_t, cfdata_t, void *);
748 static void wm_attach(device_t, device_t, void *);
749 static int wm_detach(device_t, int);
750 static bool wm_suspend(device_t, const pmf_qual_t *);
751 static bool wm_resume(device_t, const pmf_qual_t *);
752 static void wm_watchdog(struct ifnet *);
753 static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
754 uint16_t *);
755 static void wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
756 uint16_t *);
757 static void wm_tick(void *);
758 static int wm_ifflags_cb(struct ethercom *);
759 static int wm_ioctl(struct ifnet *, u_long, void *);
760 /* MAC address related */
761 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
762 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
763 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
764 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
765 static int wm_rar_count(struct wm_softc *);
766 static void wm_set_filter(struct wm_softc *);
767 /* Reset and init related */
768 static void wm_set_vlan(struct wm_softc *);
769 static void wm_set_pcie_completion_timeout(struct wm_softc *);
770 static void wm_get_auto_rd_done(struct wm_softc *);
771 static void wm_lan_init_done(struct wm_softc *);
772 static void wm_get_cfg_done(struct wm_softc *);
773 static int wm_phy_post_reset(struct wm_softc *);
774 static int wm_write_smbus_addr(struct wm_softc *);
775 static int wm_init_lcd_from_nvm(struct wm_softc *);
776 static int wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
777 static void wm_initialize_hardware_bits(struct wm_softc *);
778 static uint32_t wm_rxpbs_adjust_82580(uint32_t);
779 static int wm_reset_phy(struct wm_softc *);
780 static void wm_flush_desc_rings(struct wm_softc *);
781 static void wm_reset(struct wm_softc *);
782 static int wm_add_rxbuf(struct wm_rxqueue *, int);
783 static void wm_rxdrain(struct wm_rxqueue *);
784 static void wm_init_rss(struct wm_softc *);
785 static void wm_adjust_qnum(struct wm_softc *, int);
786 static inline bool wm_is_using_msix(struct wm_softc *);
787 static inline bool wm_is_using_multiqueue(struct wm_softc *);
788 static int wm_softint_establish_queue(struct wm_softc *, int, int);
789 static int wm_setup_legacy(struct wm_softc *);
790 static int wm_setup_msix(struct wm_softc *);
791 static int wm_init(struct ifnet *);
792 static int wm_init_locked(struct ifnet *);
793 static void wm_init_sysctls(struct wm_softc *);
794 static void wm_unset_stopping_flags(struct wm_softc *);
795 static void wm_set_stopping_flags(struct wm_softc *);
796 static void wm_stop(struct ifnet *, int);
797 static void wm_stop_locked(struct ifnet *, bool, bool);
798 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
799 static void wm_82547_txfifo_stall(void *);
800 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
801 static void wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
802 /* DMA related */
803 static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
804 static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
805 static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
806 static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
807 struct wm_txqueue *);
808 static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
809 static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
810 static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
811 struct wm_rxqueue *);
812 static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
813 static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
814 static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
815 static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
816 static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
817 static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
818 static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
819 struct wm_txqueue *);
820 static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
821 struct wm_rxqueue *);
822 static int wm_alloc_txrx_queues(struct wm_softc *);
823 static void wm_free_txrx_queues(struct wm_softc *);
824 static int wm_init_txrx_queues(struct wm_softc *);
825 /* Start */
826 static void wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
827 struct wm_txsoft *, uint32_t *, uint8_t *);
828 static inline int wm_select_txqueue(struct ifnet *, struct mbuf *);
829 static void wm_start(struct ifnet *);
830 static void wm_start_locked(struct ifnet *);
831 static int wm_transmit(struct ifnet *, struct mbuf *);
832 static void wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
833 static void wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
834 bool);
835 static void wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
836 struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
837 static void wm_nq_start(struct ifnet *);
838 static void wm_nq_start_locked(struct ifnet *);
839 static int wm_nq_transmit(struct ifnet *, struct mbuf *);
840 static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
841 static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
842 bool);
843 static void wm_deferred_start_locked(struct wm_txqueue *);
844 static void wm_handle_queue(void *);
845 static void wm_handle_queue_work(struct work *, void *);
846 /* Interrupt */
847 static bool wm_txeof(struct wm_txqueue *, u_int);
848 static bool wm_rxeof(struct wm_rxqueue *, u_int);
849 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
850 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
851 static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
852 static void wm_linkintr(struct wm_softc *, uint32_t);
853 static int wm_intr_legacy(void *);
854 static inline void wm_txrxintr_disable(struct wm_queue *);
855 static inline void wm_txrxintr_enable(struct wm_queue *);
856 static void wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
857 static int wm_txrxintr_msix(void *);
858 static int wm_linkintr_msix(void *);
859
860 /*
861 * Media related.
862 * GMII, SGMII, TBI, SERDES and SFP.
863 */
864 /* Common */
865 static void wm_tbi_serdes_set_linkled(struct wm_softc *);
866 /* GMII related */
867 static void wm_gmii_reset(struct wm_softc *);
868 static void wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
869 static int wm_get_phy_id_82575(struct wm_softc *);
870 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
871 static int wm_gmii_mediachange(struct ifnet *);
872 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
873 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
874 static uint16_t wm_i82543_mii_recvbits(struct wm_softc *);
875 static int wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
876 static int wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
877 static int wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
878 static int wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
879 static int wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
880 static int wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
881 static int wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
882 static int wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
883 static int wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
884 static int wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
885 static int wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
886 static int wm_gmii_bm_writereg(device_t, int, int, uint16_t);
887 static int wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
888 static int wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
889 static int wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
890 bool);
891 static int wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
892 static int wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
893 static int wm_gmii_hv_writereg(device_t, int, int, uint16_t);
894 static int wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
895 static int wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
896 static int wm_gmii_82580_writereg(device_t, int, int, uint16_t);
897 static int wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
898 static int wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
899 static void wm_gmii_statchg(struct ifnet *);
900 /*
901 * kumeran related (80003, ICH* and PCH*).
902 * These functions are not for accessing MII registers but for accessing
903 * kumeran specific registers.
904 */
905 static int wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
906 static int wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
907 static int wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
908 static int wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
909 /* EMI register related */
910 static int wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
911 static int wm_read_emi_reg_locked(device_t, int, uint16_t *);
912 static int wm_write_emi_reg_locked(device_t, int, uint16_t);
913 /* SGMII */
914 static bool wm_sgmii_uses_mdio(struct wm_softc *);
915 static void wm_sgmii_sfp_preconfig(struct wm_softc *);
916 static int wm_sgmii_readreg(device_t, int, int, uint16_t *);
917 static int wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
918 static int wm_sgmii_writereg(device_t, int, int, uint16_t);
919 static int wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
920 /* TBI related */
921 static bool wm_tbi_havesignal(struct wm_softc *, uint32_t);
922 static void wm_tbi_mediainit(struct wm_softc *);
923 static int wm_tbi_mediachange(struct ifnet *);
924 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
925 static int wm_check_for_link(struct wm_softc *);
926 static void wm_tbi_tick(struct wm_softc *);
927 /* SERDES related */
928 static void wm_serdes_power_up_link_82575(struct wm_softc *);
929 static int wm_serdes_mediachange(struct ifnet *);
930 static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
931 static void wm_serdes_tick(struct wm_softc *);
932 /* SFP related */
933 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
934 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
935
936 /*
937 * NVM related.
938 * Microwire, SPI (w/wo EERD) and Flash.
939 */
940 /* Misc functions */
941 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
942 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
943 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
944 /* Microwire */
945 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
946 /* SPI */
947 static int wm_nvm_ready_spi(struct wm_softc *);
948 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
949 /* Using with EERD */
950 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
951 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
952 /* Flash */
953 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
954 unsigned int *);
955 static int32_t wm_ich8_cycle_init(struct wm_softc *);
956 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
957 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
958 uint32_t *);
959 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
960 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
961 static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
962 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
963 static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
964 /* iNVM */
965 static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
966 static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
967 /* Lock, detecting NVM type, validate checksum and read */
968 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
969 static int wm_nvm_flash_presence_i210(struct wm_softc *);
970 static int wm_nvm_validate_checksum(struct wm_softc *);
971 static void wm_nvm_version_invm(struct wm_softc *);
972 static void wm_nvm_version(struct wm_softc *);
973 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
974
975 /*
976 * Hardware semaphores.
977 * Very complexed...
978 */
979 static int wm_get_null(struct wm_softc *);
980 static void wm_put_null(struct wm_softc *);
981 static int wm_get_eecd(struct wm_softc *);
982 static void wm_put_eecd(struct wm_softc *);
983 static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
984 static void wm_put_swsm_semaphore(struct wm_softc *);
985 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
986 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
987 static int wm_get_nvm_80003(struct wm_softc *);
988 static void wm_put_nvm_80003(struct wm_softc *);
989 static int wm_get_nvm_82571(struct wm_softc *);
990 static void wm_put_nvm_82571(struct wm_softc *);
991 static int wm_get_phy_82575(struct wm_softc *);
992 static void wm_put_phy_82575(struct wm_softc *);
993 static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
994 static void wm_put_swfwhw_semaphore(struct wm_softc *);
995 static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */
996 static void wm_put_swflag_ich8lan(struct wm_softc *);
997 static int wm_get_nvm_ich8lan(struct wm_softc *);
998 static void wm_put_nvm_ich8lan(struct wm_softc *);
999 static int wm_get_hw_semaphore_82573(struct wm_softc *);
1000 static void wm_put_hw_semaphore_82573(struct wm_softc *);
1001
1002 /*
1003 * Management mode and power management related subroutines.
1004 * BMC, AMT, suspend/resume and EEE.
1005 */
1006 #if 0
1007 static int wm_check_mng_mode(struct wm_softc *);
1008 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
1009 static int wm_check_mng_mode_82574(struct wm_softc *);
1010 static int wm_check_mng_mode_generic(struct wm_softc *);
1011 #endif
1012 static int wm_enable_mng_pass_thru(struct wm_softc *);
1013 static bool wm_phy_resetisblocked(struct wm_softc *);
1014 static void wm_get_hw_control(struct wm_softc *);
1015 static void wm_release_hw_control(struct wm_softc *);
1016 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
1017 static int wm_init_phy_workarounds_pchlan(struct wm_softc *);
1018 static void wm_init_manageability(struct wm_softc *);
1019 static void wm_release_manageability(struct wm_softc *);
1020 static void wm_get_wakeup(struct wm_softc *);
1021 static int wm_ulp_disable(struct wm_softc *);
1022 static int wm_enable_phy_wakeup(struct wm_softc *);
1023 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
1024 static void wm_suspend_workarounds_ich8lan(struct wm_softc *);
1025 static int wm_resume_workarounds_pchlan(struct wm_softc *);
1026 static void wm_enable_wakeup(struct wm_softc *);
1027 static void wm_disable_aspm(struct wm_softc *);
1028 /* LPLU (Low Power Link Up) */
1029 static void wm_lplu_d0_disable(struct wm_softc *);
1030 /* EEE */
1031 static int wm_set_eee_i350(struct wm_softc *);
1032 static int wm_set_eee_pchlan(struct wm_softc *);
1033 static int wm_set_eee(struct wm_softc *);
1034
1035 /*
1036 * Workarounds (mainly PHY related).
1037 * Basically, PHY's workarounds are in the PHY drivers.
1038 */
1039 static int wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
1040 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
1041 static int wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
1042 static void wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
1043 static void wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
1044 static int wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
1045 static int wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
1046 static int wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
1047 static int wm_k1_gig_workaround_hv(struct wm_softc *, int);
1048 static int wm_k1_workaround_lv(struct wm_softc *);
1049 static int wm_link_stall_workaround_hv(struct wm_softc *);
1050 static int wm_set_mdio_slow_mode_hv(struct wm_softc *);
1051 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
1052 static void wm_reset_init_script_82575(struct wm_softc *);
1053 static void wm_reset_mdicnfg_82580(struct wm_softc *);
1054 static bool wm_phy_is_accessible_pchlan(struct wm_softc *);
1055 static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
1056 static int wm_platform_pm_pch_lpt(struct wm_softc *, bool);
1057 static int wm_pll_workaround_i210(struct wm_softc *);
1058 static void wm_legacy_irq_quirk_spt(struct wm_softc *);
1059 static bool wm_phy_need_linkdown_discard(struct wm_softc *);
1060 static void wm_set_linkdown_discard(struct wm_softc *);
1061 static void wm_clear_linkdown_discard(struct wm_softc *);
1062
1063 #ifdef WM_DEBUG
1064 static int wm_sysctl_debug(SYSCTLFN_PROTO);
1065 #endif
1066
1067 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
1068 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
1069
1070 /*
1071 * Devices supported by this driver.
1072 */
1073 static const struct wm_product {
1074 pci_vendor_id_t wmp_vendor;
1075 pci_product_id_t wmp_product;
1076 const char *wmp_name;
1077 wm_chip_type wmp_type;
1078 uint32_t wmp_flags;
1079 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
1080 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
1081 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
1082 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
1083 #define WMP_MEDIATYPE(x) ((x) & 0x03)
1084 } wm_products[] = {
1085 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
1086 "Intel i82542 1000BASE-X Ethernet",
1087 WM_T_82542_2_1, WMP_F_FIBER },
1088
1089 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
1090 "Intel i82543GC 1000BASE-X Ethernet",
1091 WM_T_82543, WMP_F_FIBER },
1092
1093 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
1094 "Intel i82543GC 1000BASE-T Ethernet",
1095 WM_T_82543, WMP_F_COPPER },
1096
1097 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
1098 "Intel i82544EI 1000BASE-T Ethernet",
1099 WM_T_82544, WMP_F_COPPER },
1100
1101 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
1102 "Intel i82544EI 1000BASE-X Ethernet",
1103 WM_T_82544, WMP_F_FIBER },
1104
1105 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
1106 "Intel i82544GC 1000BASE-T Ethernet",
1107 WM_T_82544, WMP_F_COPPER },
1108
1109 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
1110 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
1111 WM_T_82544, WMP_F_COPPER },
1112
1113 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
1114 "Intel i82540EM 1000BASE-T Ethernet",
1115 WM_T_82540, WMP_F_COPPER },
1116
1117 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
1118 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
1119 WM_T_82540, WMP_F_COPPER },
1120
1121 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
1122 "Intel i82540EP 1000BASE-T Ethernet",
1123 WM_T_82540, WMP_F_COPPER },
1124
1125 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
1126 "Intel i82540EP 1000BASE-T Ethernet",
1127 WM_T_82540, WMP_F_COPPER },
1128
1129 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
1130 "Intel i82540EP 1000BASE-T Ethernet",
1131 WM_T_82540, WMP_F_COPPER },
1132
1133 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
1134 "Intel i82545EM 1000BASE-T Ethernet",
1135 WM_T_82545, WMP_F_COPPER },
1136
1137 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
1138 "Intel i82545GM 1000BASE-T Ethernet",
1139 WM_T_82545_3, WMP_F_COPPER },
1140
1141 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
1142 "Intel i82545GM 1000BASE-X Ethernet",
1143 WM_T_82545_3, WMP_F_FIBER },
1144
1145 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
1146 "Intel i82545GM Gigabit Ethernet (SERDES)",
1147 WM_T_82545_3, WMP_F_SERDES },
1148
1149 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
1150 "Intel i82546EB 1000BASE-T Ethernet",
1151 WM_T_82546, WMP_F_COPPER },
1152
1153 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
1154 "Intel i82546EB 1000BASE-T Ethernet",
1155 WM_T_82546, WMP_F_COPPER },
1156
1157 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
1158 "Intel i82545EM 1000BASE-X Ethernet",
1159 WM_T_82545, WMP_F_FIBER },
1160
1161 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
1162 "Intel i82546EB 1000BASE-X Ethernet",
1163 WM_T_82546, WMP_F_FIBER },
1164
1165 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
1166 "Intel i82546GB 1000BASE-T Ethernet",
1167 WM_T_82546_3, WMP_F_COPPER },
1168
1169 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
1170 "Intel i82546GB 1000BASE-X Ethernet",
1171 WM_T_82546_3, WMP_F_FIBER },
1172
1173 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
1174 "Intel i82546GB Gigabit Ethernet (SERDES)",
1175 WM_T_82546_3, WMP_F_SERDES },
1176
1177 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
1178 "i82546GB quad-port Gigabit Ethernet",
1179 WM_T_82546_3, WMP_F_COPPER },
1180
1181 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
1182 "i82546GB quad-port Gigabit Ethernet (KSP3)",
1183 WM_T_82546_3, WMP_F_COPPER },
1184
1185 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
1186 "Intel PRO/1000MT (82546GB)",
1187 WM_T_82546_3, WMP_F_COPPER },
1188
1189 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
1190 "Intel i82541EI 1000BASE-T Ethernet",
1191 WM_T_82541, WMP_F_COPPER },
1192
1193 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
1194 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
1195 WM_T_82541, WMP_F_COPPER },
1196
1197 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
1198 "Intel i82541EI Mobile 1000BASE-T Ethernet",
1199 WM_T_82541, WMP_F_COPPER },
1200
1201 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
1202 "Intel i82541ER 1000BASE-T Ethernet",
1203 WM_T_82541_2, WMP_F_COPPER },
1204
1205 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
1206 "Intel i82541GI 1000BASE-T Ethernet",
1207 WM_T_82541_2, WMP_F_COPPER },
1208
1209 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
1210 "Intel i82541GI Mobile 1000BASE-T Ethernet",
1211 WM_T_82541_2, WMP_F_COPPER },
1212
1213 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
1214 "Intel i82541PI 1000BASE-T Ethernet",
1215 WM_T_82541_2, WMP_F_COPPER },
1216
1217 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
1218 "Intel i82547EI 1000BASE-T Ethernet",
1219 WM_T_82547, WMP_F_COPPER },
1220
1221 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
1222 "Intel i82547EI Mobile 1000BASE-T Ethernet",
1223 WM_T_82547, WMP_F_COPPER },
1224
1225 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
1226 "Intel i82547GI 1000BASE-T Ethernet",
1227 WM_T_82547_2, WMP_F_COPPER },
1228
1229 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
1230 "Intel PRO/1000 PT (82571EB)",
1231 WM_T_82571, WMP_F_COPPER },
1232
1233 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
1234 "Intel PRO/1000 PF (82571EB)",
1235 WM_T_82571, WMP_F_FIBER },
1236
1237 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
1238 "Intel PRO/1000 PB (82571EB)",
1239 WM_T_82571, WMP_F_SERDES },
1240
1241 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1242 "Intel PRO/1000 QT (82571EB)",
1243 WM_T_82571, WMP_F_COPPER },
1244
1245 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1246 "Intel PRO/1000 PT Quad Port Server Adapter",
1247 WM_T_82571, WMP_F_COPPER },
1248
1249 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1250 "Intel Gigabit PT Quad Port Server ExpressModule",
1251 WM_T_82571, WMP_F_COPPER },
1252
1253 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1254 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1255 WM_T_82571, WMP_F_SERDES },
1256
1257 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1258 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1259 WM_T_82571, WMP_F_SERDES },
1260
1261 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1262 "Intel 82571EB Quad 1000baseX Ethernet",
1263 WM_T_82571, WMP_F_FIBER },
1264
1265 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
1266 "Intel i82572EI 1000baseT Ethernet",
1267 WM_T_82572, WMP_F_COPPER },
1268
1269 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
1270 "Intel i82572EI 1000baseX Ethernet",
1271 WM_T_82572, WMP_F_FIBER },
1272
1273 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
1274 "Intel i82572EI Gigabit Ethernet (SERDES)",
1275 WM_T_82572, WMP_F_SERDES },
1276
1277 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
1278 "Intel i82572EI 1000baseT Ethernet",
1279 WM_T_82572, WMP_F_COPPER },
1280
1281 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
1282 "Intel i82573E",
1283 WM_T_82573, WMP_F_COPPER },
1284
1285 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
1286 "Intel i82573E IAMT",
1287 WM_T_82573, WMP_F_COPPER },
1288
1289 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
1290 "Intel i82573L Gigabit Ethernet",
1291 WM_T_82573, WMP_F_COPPER },
1292
1293 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
1294 "Intel i82574L",
1295 WM_T_82574, WMP_F_COPPER },
1296
1297 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
1298 "Intel i82574L",
1299 WM_T_82574, WMP_F_COPPER },
1300
1301 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
1302 "Intel i82583V",
1303 WM_T_82583, WMP_F_COPPER },
1304
1305 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1306 "i80003 dual 1000baseT Ethernet",
1307 WM_T_80003, WMP_F_COPPER },
1308
1309 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1310 "i80003 dual 1000baseX Ethernet",
1311 WM_T_80003, WMP_F_COPPER },
1312
1313 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1314 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1315 WM_T_80003, WMP_F_SERDES },
1316
1317 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1318 "Intel i80003 1000baseT Ethernet",
1319 WM_T_80003, WMP_F_COPPER },
1320
1321 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1322 "Intel i80003 Gigabit Ethernet (SERDES)",
1323 WM_T_80003, WMP_F_SERDES },
1324
1325 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
1326 "Intel i82801H (M_AMT) LAN Controller",
1327 WM_T_ICH8, WMP_F_COPPER },
1328 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
1329 "Intel i82801H (AMT) LAN Controller",
1330 WM_T_ICH8, WMP_F_COPPER },
1331 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
1332 "Intel i82801H LAN Controller",
1333 WM_T_ICH8, WMP_F_COPPER },
1334 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1335 "Intel i82801H (IFE) 10/100 LAN Controller",
1336 WM_T_ICH8, WMP_F_COPPER },
1337 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
1338 "Intel i82801H (M) LAN Controller",
1339 WM_T_ICH8, WMP_F_COPPER },
1340 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1341 "Intel i82801H IFE (GT) 10/100 LAN Controller",
1342 WM_T_ICH8, WMP_F_COPPER },
1343 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1344 "Intel i82801H IFE (G) 10/100 LAN Controller",
1345 WM_T_ICH8, WMP_F_COPPER },
1346 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_82567V_3,
1347 "82567V-3 LAN Controller",
1348 WM_T_ICH8, WMP_F_COPPER },
1349 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1350 "82801I (AMT) LAN Controller",
1351 WM_T_ICH9, WMP_F_COPPER },
1352 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1353 "82801I 10/100 LAN Controller",
1354 WM_T_ICH9, WMP_F_COPPER },
1355 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1356 "82801I (G) 10/100 LAN Controller",
1357 WM_T_ICH9, WMP_F_COPPER },
1358 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1359 "82801I (GT) 10/100 LAN Controller",
1360 WM_T_ICH9, WMP_F_COPPER },
1361 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1362 "82801I (C) LAN Controller",
1363 WM_T_ICH9, WMP_F_COPPER },
1364 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1365 "82801I mobile LAN Controller",
1366 WM_T_ICH9, WMP_F_COPPER },
1367 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_V,
1368 "82801I mobile (V) LAN Controller",
1369 WM_T_ICH9, WMP_F_COPPER },
1370 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1371 "82801I mobile (AMT) LAN Controller",
1372 WM_T_ICH9, WMP_F_COPPER },
1373 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1374 "82567LM-4 LAN Controller",
1375 WM_T_ICH9, WMP_F_COPPER },
1376 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1377 "82567LM-2 LAN Controller",
1378 WM_T_ICH10, WMP_F_COPPER },
1379 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1380 "82567LF-2 LAN Controller",
1381 WM_T_ICH10, WMP_F_COPPER },
1382 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1383 "82567LM-3 LAN Controller",
1384 WM_T_ICH10, WMP_F_COPPER },
1385 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1386 "82567LF-3 LAN Controller",
1387 WM_T_ICH10, WMP_F_COPPER },
1388 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1389 "82567V-2 LAN Controller",
1390 WM_T_ICH10, WMP_F_COPPER },
1391 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1392 "82567V-3? LAN Controller",
1393 WM_T_ICH10, WMP_F_COPPER },
1394 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1395 "HANKSVILLE LAN Controller",
1396 WM_T_ICH10, WMP_F_COPPER },
1397 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1398 "PCH LAN (82577LM) Controller",
1399 WM_T_PCH, WMP_F_COPPER },
1400 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1401 "PCH LAN (82577LC) Controller",
1402 WM_T_PCH, WMP_F_COPPER },
1403 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1404 "PCH LAN (82578DM) Controller",
1405 WM_T_PCH, WMP_F_COPPER },
1406 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1407 "PCH LAN (82578DC) Controller",
1408 WM_T_PCH, WMP_F_COPPER },
1409 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1410 "PCH2 LAN (82579LM) Controller",
1411 WM_T_PCH2, WMP_F_COPPER },
1412 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1413 "PCH2 LAN (82579V) Controller",
1414 WM_T_PCH2, WMP_F_COPPER },
1415 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1416 "82575EB dual-1000baseT Ethernet",
1417 WM_T_82575, WMP_F_COPPER },
1418 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1419 "82575EB dual-1000baseX Ethernet (SERDES)",
1420 WM_T_82575, WMP_F_SERDES },
1421 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1422 "82575GB quad-1000baseT Ethernet",
1423 WM_T_82575, WMP_F_COPPER },
1424 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1425 "82575GB quad-1000baseT Ethernet (PM)",
1426 WM_T_82575, WMP_F_COPPER },
1427 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1428 "82576 1000BaseT Ethernet",
1429 WM_T_82576, WMP_F_COPPER },
1430 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1431 "82576 1000BaseX Ethernet",
1432 WM_T_82576, WMP_F_FIBER },
1433
1434 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1435 "82576 gigabit Ethernet (SERDES)",
1436 WM_T_82576, WMP_F_SERDES },
1437
1438 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1439 "82576 quad-1000BaseT Ethernet",
1440 WM_T_82576, WMP_F_COPPER },
1441
1442 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1443 "82576 Gigabit ET2 Quad Port Server Adapter",
1444 WM_T_82576, WMP_F_COPPER },
1445
1446 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1447 "82576 gigabit Ethernet",
1448 WM_T_82576, WMP_F_COPPER },
1449
1450 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1451 "82576 gigabit Ethernet (SERDES)",
1452 WM_T_82576, WMP_F_SERDES },
1453 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1454 "82576 quad-gigabit Ethernet (SERDES)",
1455 WM_T_82576, WMP_F_SERDES },
1456
1457 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1458 "82580 1000BaseT Ethernet",
1459 WM_T_82580, WMP_F_COPPER },
1460 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1461 "82580 1000BaseX Ethernet",
1462 WM_T_82580, WMP_F_FIBER },
1463
1464 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1465 "82580 1000BaseT Ethernet (SERDES)",
1466 WM_T_82580, WMP_F_SERDES },
1467
1468 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1469 "82580 gigabit Ethernet (SGMII)",
1470 WM_T_82580, WMP_F_COPPER },
1471 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1472 "82580 dual-1000BaseT Ethernet",
1473 WM_T_82580, WMP_F_COPPER },
1474
1475 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1476 "82580 quad-1000BaseX Ethernet",
1477 WM_T_82580, WMP_F_FIBER },
1478
1479 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1480 "DH89XXCC Gigabit Ethernet (SGMII)",
1481 WM_T_82580, WMP_F_COPPER },
1482
1483 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1484 "DH89XXCC Gigabit Ethernet (SERDES)",
1485 WM_T_82580, WMP_F_SERDES },
1486
1487 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1488 "DH89XXCC 1000BASE-KX Ethernet",
1489 WM_T_82580, WMP_F_SERDES },
1490
1491 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1492 "DH89XXCC Gigabit Ethernet (SFP)",
1493 WM_T_82580, WMP_F_SERDES },
1494
1495 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1496 "I350 Gigabit Network Connection",
1497 WM_T_I350, WMP_F_COPPER },
1498
1499 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1500 "I350 Gigabit Fiber Network Connection",
1501 WM_T_I350, WMP_F_FIBER },
1502
1503 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1504 "I350 Gigabit Backplane Connection",
1505 WM_T_I350, WMP_F_SERDES },
1506
1507 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1508 "I350 Quad Port Gigabit Ethernet",
1509 WM_T_I350, WMP_F_SERDES },
1510
1511 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1512 "I350 Gigabit Connection",
1513 WM_T_I350, WMP_F_COPPER },
1514
1515 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1516 "I354 Gigabit Ethernet (KX)",
1517 WM_T_I354, WMP_F_SERDES },
1518
1519 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1520 "I354 Gigabit Ethernet (SGMII)",
1521 WM_T_I354, WMP_F_COPPER },
1522
1523 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1524 "I354 Gigabit Ethernet (2.5G)",
1525 WM_T_I354, WMP_F_COPPER },
1526
1527 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1528 "I210-T1 Ethernet Server Adapter",
1529 WM_T_I210, WMP_F_COPPER },
1530
1531 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1532 "I210 Ethernet (Copper OEM)",
1533 WM_T_I210, WMP_F_COPPER },
1534
1535 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1536 "I210 Ethernet (Copper IT)",
1537 WM_T_I210, WMP_F_COPPER },
1538
1539 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1540 "I210 Ethernet (Copper, FLASH less)",
1541 WM_T_I210, WMP_F_COPPER },
1542
1543 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1544 "I210 Gigabit Ethernet (Fiber)",
1545 WM_T_I210, WMP_F_FIBER },
1546
1547 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1548 "I210 Gigabit Ethernet (SERDES)",
1549 WM_T_I210, WMP_F_SERDES },
1550
1551 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1552 "I210 Gigabit Ethernet (SERDES, FLASH less)",
1553 WM_T_I210, WMP_F_SERDES },
1554
1555 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1556 "I210 Gigabit Ethernet (SGMII)",
1557 WM_T_I210, WMP_F_COPPER },
1558
1559 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII_WOF,
1560 "I210 Gigabit Ethernet (SGMII, FLASH less)",
1561 WM_T_I210, WMP_F_COPPER },
1562
1563 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1564 "I211 Ethernet (COPPER)",
1565 WM_T_I211, WMP_F_COPPER },
1566 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1567 "I217 V Ethernet Connection",
1568 WM_T_PCH_LPT, WMP_F_COPPER },
1569 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1570 "I217 LM Ethernet Connection",
1571 WM_T_PCH_LPT, WMP_F_COPPER },
1572 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1573 "I218 V Ethernet Connection",
1574 WM_T_PCH_LPT, WMP_F_COPPER },
1575 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1576 "I218 V Ethernet Connection",
1577 WM_T_PCH_LPT, WMP_F_COPPER },
1578 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1579 "I218 V Ethernet Connection",
1580 WM_T_PCH_LPT, WMP_F_COPPER },
1581 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1582 "I218 LM Ethernet Connection",
1583 WM_T_PCH_LPT, WMP_F_COPPER },
1584 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1585 "I218 LM Ethernet Connection",
1586 WM_T_PCH_LPT, WMP_F_COPPER },
1587 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1588 "I218 LM Ethernet Connection",
1589 WM_T_PCH_LPT, WMP_F_COPPER },
1590 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM,
1591 "I219 LM Ethernet Connection",
1592 WM_T_PCH_SPT, WMP_F_COPPER },
1593 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM2,
1594 "I219 LM Ethernet Connection",
1595 WM_T_PCH_SPT, WMP_F_COPPER },
1596 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM3,
1597 "I219 LM Ethernet Connection",
1598 WM_T_PCH_SPT, WMP_F_COPPER },
1599 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM4,
1600 "I219 LM Ethernet Connection",
1601 WM_T_PCH_SPT, WMP_F_COPPER },
1602 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM5,
1603 "I219 LM Ethernet Connection",
1604 WM_T_PCH_SPT, WMP_F_COPPER },
1605 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM6,
1606 "I219 LM Ethernet Connection",
1607 WM_T_PCH_CNP, WMP_F_COPPER },
1608 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM7,
1609 "I219 LM Ethernet Connection",
1610 WM_T_PCH_CNP, WMP_F_COPPER },
1611 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM8,
1612 "I219 LM Ethernet Connection",
1613 WM_T_PCH_CNP, WMP_F_COPPER },
1614 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM9,
1615 "I219 LM Ethernet Connection",
1616 WM_T_PCH_CNP, WMP_F_COPPER },
1617 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM10,
1618 "I219 LM Ethernet Connection",
1619 WM_T_PCH_CNP, WMP_F_COPPER },
1620 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM11,
1621 "I219 LM Ethernet Connection",
1622 WM_T_PCH_CNP, WMP_F_COPPER },
1623 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM12,
1624 "I219 LM Ethernet Connection",
1625 WM_T_PCH_SPT, WMP_F_COPPER },
1626 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM13,
1627 "I219 LM Ethernet Connection",
1628 WM_T_PCH_CNP, WMP_F_COPPER },
1629 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM14,
1630 "I219 LM Ethernet Connection",
1631 WM_T_PCH_CNP, WMP_F_COPPER },
1632 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM15,
1633 "I219 LM Ethernet Connection",
1634 WM_T_PCH_CNP, WMP_F_COPPER },
1635 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V,
1636 "I219 V Ethernet Connection",
1637 WM_T_PCH_SPT, WMP_F_COPPER },
1638 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V2,
1639 "I219 V Ethernet Connection",
1640 WM_T_PCH_SPT, WMP_F_COPPER },
1641 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V4,
1642 "I219 V Ethernet Connection",
1643 WM_T_PCH_SPT, WMP_F_COPPER },
1644 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V5,
1645 "I219 V Ethernet Connection",
1646 WM_T_PCH_SPT, WMP_F_COPPER },
1647 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V6,
1648 "I219 V Ethernet Connection",
1649 WM_T_PCH_CNP, WMP_F_COPPER },
1650 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V7,
1651 "I219 V Ethernet Connection",
1652 WM_T_PCH_CNP, WMP_F_COPPER },
1653 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V8,
1654 "I219 V Ethernet Connection",
1655 WM_T_PCH_CNP, WMP_F_COPPER },
1656 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V9,
1657 "I219 V Ethernet Connection",
1658 WM_T_PCH_CNP, WMP_F_COPPER },
1659 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V10,
1660 "I219 V Ethernet Connection",
1661 WM_T_PCH_CNP, WMP_F_COPPER },
1662 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V11,
1663 "I219 V Ethernet Connection",
1664 WM_T_PCH_CNP, WMP_F_COPPER },
1665 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V12,
1666 "I219 V Ethernet Connection",
1667 WM_T_PCH_SPT, WMP_F_COPPER },
1668 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V13,
1669 "I219 V Ethernet Connection",
1670 WM_T_PCH_CNP, WMP_F_COPPER },
1671 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V14,
1672 "I219 V Ethernet Connection",
1673 WM_T_PCH_CNP, WMP_F_COPPER },
1674 { 0, 0,
1675 NULL,
1676 0, 0 },
1677 };
1678
1679 /*
1680 * Register read/write functions.
1681 * Other than CSR_{READ|WRITE}().
1682 */
1683
1684 #if 0 /* Not currently used */
1685 static inline uint32_t
1686 wm_io_read(struct wm_softc *sc, int reg)
1687 {
1688
1689 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1690 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1691 }
1692 #endif
1693
1694 static inline void
1695 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1696 {
1697
1698 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1699 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1700 }
1701
1702 static inline void
1703 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1704 uint32_t data)
1705 {
1706 uint32_t regval;
1707 int i;
1708
1709 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1710
1711 CSR_WRITE(sc, reg, regval);
1712
1713 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1714 delay(5);
1715 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1716 break;
1717 }
1718 if (i == SCTL_CTL_POLL_TIMEOUT) {
1719 aprint_error("%s: WARNING:"
1720 " i82575 reg 0x%08x setup did not indicate ready\n",
1721 device_xname(sc->sc_dev), reg);
1722 }
1723 }
1724
1725 static inline void
1726 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1727 {
1728 wa->wa_low = htole32(v & 0xffffffffU);
1729 if (sizeof(bus_addr_t) == 8)
1730 wa->wa_high = htole32((uint64_t) v >> 32);
1731 else
1732 wa->wa_high = 0;
1733 }
1734
1735 /*
1736 * Descriptor sync/init functions.
1737 */
1738 static inline void
1739 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1740 {
1741 struct wm_softc *sc = txq->txq_sc;
1742
1743 /* If it will wrap around, sync to the end of the ring. */
1744 if ((start + num) > WM_NTXDESC(txq)) {
1745 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1746 WM_CDTXOFF(txq, start), txq->txq_descsize *
1747 (WM_NTXDESC(txq) - start), ops);
1748 num -= (WM_NTXDESC(txq) - start);
1749 start = 0;
1750 }
1751
1752 /* Now sync whatever is left. */
1753 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1754 WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1755 }
1756
1757 static inline void
1758 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1759 {
1760 struct wm_softc *sc = rxq->rxq_sc;
1761
1762 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1763 WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
1764 }
1765
1766 static inline void
1767 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1768 {
1769 struct wm_softc *sc = rxq->rxq_sc;
1770 struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1771 struct mbuf *m = rxs->rxs_mbuf;
1772
1773 /*
1774 * Note: We scoot the packet forward 2 bytes in the buffer
1775 * so that the payload after the Ethernet header is aligned
1776 * to a 4-byte boundary.
1777
1778 * XXX BRAINDAMAGE ALERT!
1779 * The stupid chip uses the same size for every buffer, which
1780 * is set in the Receive Control register. We are using the 2K
1781 * size option, but what we REALLY want is (2K - 2)! For this
1782 * reason, we can't "scoot" packets longer than the standard
1783 * Ethernet MTU. On strict-alignment platforms, if the total
1784 * size exceeds (2K - 2) we set align_tweak to 0 and let
1785 * the upper layer copy the headers.
1786 */
1787 m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1788
1789 if (sc->sc_type == WM_T_82574) {
1790 ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
1791 rxd->erx_data.erxd_addr =
1792 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1793 rxd->erx_data.erxd_dd = 0;
1794 } else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
1795 nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
1796
1797 rxd->nqrx_data.nrxd_paddr =
1798 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1799 /* Currently, split header is not supported. */
1800 rxd->nqrx_data.nrxd_haddr = 0;
1801 } else {
1802 wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1803
1804 wm_set_dma_addr(&rxd->wrx_addr,
1805 rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1806 rxd->wrx_len = 0;
1807 rxd->wrx_cksum = 0;
1808 rxd->wrx_status = 0;
1809 rxd->wrx_errors = 0;
1810 rxd->wrx_special = 0;
1811 }
1812 wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1813
1814 CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1815 }
1816
1817 /*
1818 * Device driver interface functions and commonly used functions.
1819 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1820 */
1821
1822 /* Lookup supported device table */
1823 static const struct wm_product *
1824 wm_lookup(const struct pci_attach_args *pa)
1825 {
1826 const struct wm_product *wmp;
1827
1828 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1829 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1830 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1831 return wmp;
1832 }
1833 return NULL;
1834 }
1835
1836 /* The match function (ca_match) */
1837 static int
1838 wm_match(device_t parent, cfdata_t cf, void *aux)
1839 {
1840 struct pci_attach_args *pa = aux;
1841
1842 if (wm_lookup(pa) != NULL)
1843 return 1;
1844
1845 return 0;
1846 }
1847
1848 /* The attach function (ca_attach) */
1849 static void
1850 wm_attach(device_t parent, device_t self, void *aux)
1851 {
1852 struct wm_softc *sc = device_private(self);
1853 struct pci_attach_args *pa = aux;
1854 prop_dictionary_t dict;
1855 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1856 pci_chipset_tag_t pc = pa->pa_pc;
1857 int counts[PCI_INTR_TYPE_SIZE];
1858 pci_intr_type_t max_type;
1859 const char *eetype, *xname;
1860 bus_space_tag_t memt;
1861 bus_space_handle_t memh;
1862 bus_size_t memsize;
1863 int memh_valid;
1864 int i, error;
1865 const struct wm_product *wmp;
1866 prop_data_t ea;
1867 prop_number_t pn;
1868 uint8_t enaddr[ETHER_ADDR_LEN];
1869 char buf[256];
1870 char wqname[MAXCOMLEN];
1871 uint16_t cfg1, cfg2, swdpin, nvmword;
1872 pcireg_t preg, memtype;
1873 uint16_t eeprom_data, apme_mask;
1874 bool force_clear_smbi;
1875 uint32_t link_mode;
1876 uint32_t reg;
1877
1878 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
1879 sc->sc_debug = WM_DEBUG_DEFAULT;
1880 #endif
1881 sc->sc_dev = self;
1882 callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS);
1883 callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
1884 sc->sc_core_stopping = false;
1885
1886 wmp = wm_lookup(pa);
1887 #ifdef DIAGNOSTIC
1888 if (wmp == NULL) {
1889 printf("\n");
1890 panic("wm_attach: impossible");
1891 }
1892 #endif
1893 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1894
1895 sc->sc_pc = pa->pa_pc;
1896 sc->sc_pcitag = pa->pa_tag;
1897
1898 if (pci_dma64_available(pa))
1899 sc->sc_dmat = pa->pa_dmat64;
1900 else
1901 sc->sc_dmat = pa->pa_dmat;
1902
1903 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1904 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1905 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1906
1907 sc->sc_type = wmp->wmp_type;
1908
1909 /* Set default function pointers */
1910 sc->phy.acquire = sc->nvm.acquire = wm_get_null;
1911 sc->phy.release = sc->nvm.release = wm_put_null;
1912 sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
1913
1914 if (sc->sc_type < WM_T_82543) {
1915 if (sc->sc_rev < 2) {
1916 aprint_error_dev(sc->sc_dev,
1917 "i82542 must be at least rev. 2\n");
1918 return;
1919 }
1920 if (sc->sc_rev < 3)
1921 sc->sc_type = WM_T_82542_2_0;
1922 }
1923
1924 /*
1925 * Disable MSI for Errata:
1926 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1927 *
1928 * 82544: Errata 25
1929 * 82540: Errata 6 (easy to reproduce device timeout)
1930 * 82545: Errata 4 (easy to reproduce device timeout)
1931 * 82546: Errata 26 (easy to reproduce device timeout)
1932 * 82541: Errata 7 (easy to reproduce device timeout)
1933 *
1934 * "Byte Enables 2 and 3 are not set on MSI writes"
1935 *
1936 * 82571 & 82572: Errata 63
1937 */
1938 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1939 || (sc->sc_type == WM_T_82572))
1940 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1941
1942 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1943 || (sc->sc_type == WM_T_82580)
1944 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1945 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1946 sc->sc_flags |= WM_F_NEWQUEUE;
1947
1948 /* Set device properties (mactype) */
1949 dict = device_properties(sc->sc_dev);
1950 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1951
1952 /*
1953 * Map the device. All devices support memory-mapped acccess,
1954 * and it is really required for normal operation.
1955 */
1956 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1957 switch (memtype) {
1958 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1959 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1960 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1961 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1962 break;
1963 default:
1964 memh_valid = 0;
1965 break;
1966 }
1967
1968 if (memh_valid) {
1969 sc->sc_st = memt;
1970 sc->sc_sh = memh;
1971 sc->sc_ss = memsize;
1972 } else {
1973 aprint_error_dev(sc->sc_dev,
1974 "unable to map device registers\n");
1975 return;
1976 }
1977
1978 /*
1979 * In addition, i82544 and later support I/O mapped indirect
1980 * register access. It is not desirable (nor supported in
1981 * this driver) to use it for normal operation, though it is
1982 * required to work around bugs in some chip versions.
1983 */
1984 if (sc->sc_type >= WM_T_82544) {
1985 /* First we have to find the I/O BAR. */
1986 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1987 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1988 if (memtype == PCI_MAPREG_TYPE_IO)
1989 break;
1990 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1991 PCI_MAPREG_MEM_TYPE_64BIT)
1992 i += 4; /* skip high bits, too */
1993 }
1994 if (i < PCI_MAPREG_END) {
1995 /*
1996 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1997 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1998 * It's no problem because newer chips has no this
1999 * bug.
2000 *
2001 * The i8254x doesn't apparently respond when the
2002 * I/O BAR is 0, which looks somewhat like it's not
2003 * been configured.
2004 */
2005 preg = pci_conf_read(pc, pa->pa_tag, i);
2006 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
2007 aprint_error_dev(sc->sc_dev,
2008 "WARNING: I/O BAR at zero.\n");
2009 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
2010 0, &sc->sc_iot, &sc->sc_ioh,
2011 NULL, &sc->sc_ios) == 0) {
2012 sc->sc_flags |= WM_F_IOH_VALID;
2013 } else
2014 aprint_error_dev(sc->sc_dev,
2015 "WARNING: unable to map I/O space\n");
2016 }
2017
2018 }
2019
2020 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
2021 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
2022 preg |= PCI_COMMAND_MASTER_ENABLE;
2023 if (sc->sc_type < WM_T_82542_2_1)
2024 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
2025 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
2026
2027 /* Power up chip */
2028 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
2029 && error != EOPNOTSUPP) {
2030 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
2031 return;
2032 }
2033
2034 wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
2035 /*
2036 * Don't use MSI-X if we can use only one queue to save interrupt
2037 * resource.
2038 */
2039 if (sc->sc_nqueues > 1) {
2040 max_type = PCI_INTR_TYPE_MSIX;
2041 /*
2042 * 82583 has a MSI-X capability in the PCI configuration space
2043 * but it doesn't support it. At least the document doesn't
2044 * say anything about MSI-X.
2045 */
2046 counts[PCI_INTR_TYPE_MSIX]
2047 = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
2048 } else {
2049 max_type = PCI_INTR_TYPE_MSI;
2050 counts[PCI_INTR_TYPE_MSIX] = 0;
2051 }
2052
2053 /* Allocation settings */
2054 counts[PCI_INTR_TYPE_MSI] = 1;
2055 counts[PCI_INTR_TYPE_INTX] = 1;
2056 /* overridden by disable flags */
2057 if (wm_disable_msi != 0) {
2058 counts[PCI_INTR_TYPE_MSI] = 0;
2059 if (wm_disable_msix != 0) {
2060 max_type = PCI_INTR_TYPE_INTX;
2061 counts[PCI_INTR_TYPE_MSIX] = 0;
2062 }
2063 } else if (wm_disable_msix != 0) {
2064 max_type = PCI_INTR_TYPE_MSI;
2065 counts[PCI_INTR_TYPE_MSIX] = 0;
2066 }
2067
2068 alloc_retry:
2069 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
2070 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
2071 return;
2072 }
2073
2074 if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
2075 error = wm_setup_msix(sc);
2076 if (error) {
2077 pci_intr_release(pc, sc->sc_intrs,
2078 counts[PCI_INTR_TYPE_MSIX]);
2079
2080 /* Setup for MSI: Disable MSI-X */
2081 max_type = PCI_INTR_TYPE_MSI;
2082 counts[PCI_INTR_TYPE_MSI] = 1;
2083 counts[PCI_INTR_TYPE_INTX] = 1;
2084 goto alloc_retry;
2085 }
2086 } else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
2087 wm_adjust_qnum(sc, 0); /* Must not use multiqueue */
2088 error = wm_setup_legacy(sc);
2089 if (error) {
2090 pci_intr_release(sc->sc_pc, sc->sc_intrs,
2091 counts[PCI_INTR_TYPE_MSI]);
2092
2093 /* The next try is for INTx: Disable MSI */
2094 max_type = PCI_INTR_TYPE_INTX;
2095 counts[PCI_INTR_TYPE_INTX] = 1;
2096 goto alloc_retry;
2097 }
2098 } else {
2099 wm_adjust_qnum(sc, 0); /* Must not use multiqueue */
2100 error = wm_setup_legacy(sc);
2101 if (error) {
2102 pci_intr_release(sc->sc_pc, sc->sc_intrs,
2103 counts[PCI_INTR_TYPE_INTX]);
2104 return;
2105 }
2106 }
2107
2108 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
2109 error = workqueue_create(&sc->sc_queue_wq, wqname,
2110 wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
2111 WM_WORKQUEUE_FLAGS);
2112 if (error) {
2113 aprint_error_dev(sc->sc_dev,
2114 "unable to create workqueue\n");
2115 goto out;
2116 }
2117
2118 /*
2119 * Check the function ID (unit number of the chip).
2120 */
2121 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
2122 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
2123 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2124 || (sc->sc_type == WM_T_82580)
2125 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2126 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
2127 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
2128 else
2129 sc->sc_funcid = 0;
2130
2131 /*
2132 * Determine a few things about the bus we're connected to.
2133 */
2134 if (sc->sc_type < WM_T_82543) {
2135 /* We don't really know the bus characteristics here. */
2136 sc->sc_bus_speed = 33;
2137 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
2138 /*
2139 * CSA (Communication Streaming Architecture) is about as fast
2140 * a 32-bit 66MHz PCI Bus.
2141 */
2142 sc->sc_flags |= WM_F_CSA;
2143 sc->sc_bus_speed = 66;
2144 aprint_verbose_dev(sc->sc_dev,
2145 "Communication Streaming Architecture\n");
2146 if (sc->sc_type == WM_T_82547) {
2147 callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS);
2148 callout_setfunc(&sc->sc_txfifo_ch,
2149 wm_82547_txfifo_stall, sc);
2150 aprint_verbose_dev(sc->sc_dev,
2151 "using 82547 Tx FIFO stall work-around\n");
2152 }
2153 } else if (sc->sc_type >= WM_T_82571) {
2154 sc->sc_flags |= WM_F_PCIE;
2155 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
2156 && (sc->sc_type != WM_T_ICH10)
2157 && (sc->sc_type != WM_T_PCH)
2158 && (sc->sc_type != WM_T_PCH2)
2159 && (sc->sc_type != WM_T_PCH_LPT)
2160 && (sc->sc_type != WM_T_PCH_SPT)
2161 && (sc->sc_type != WM_T_PCH_CNP)) {
2162 /* ICH* and PCH* have no PCIe capability registers */
2163 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2164 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
2165 NULL) == 0)
2166 aprint_error_dev(sc->sc_dev,
2167 "unable to find PCIe capability\n");
2168 }
2169 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
2170 } else {
2171 reg = CSR_READ(sc, WMREG_STATUS);
2172 if (reg & STATUS_BUS64)
2173 sc->sc_flags |= WM_F_BUS64;
2174 if ((reg & STATUS_PCIX_MODE) != 0) {
2175 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
2176
2177 sc->sc_flags |= WM_F_PCIX;
2178 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2179 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
2180 aprint_error_dev(sc->sc_dev,
2181 "unable to find PCIX capability\n");
2182 else if (sc->sc_type != WM_T_82545_3 &&
2183 sc->sc_type != WM_T_82546_3) {
2184 /*
2185 * Work around a problem caused by the BIOS
2186 * setting the max memory read byte count
2187 * incorrectly.
2188 */
2189 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
2190 sc->sc_pcixe_capoff + PCIX_CMD);
2191 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
2192 sc->sc_pcixe_capoff + PCIX_STATUS);
2193
2194 bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
2195 PCIX_CMD_BYTECNT_SHIFT;
2196 maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
2197 PCIX_STATUS_MAXB_SHIFT;
2198 if (bytecnt > maxb) {
2199 aprint_verbose_dev(sc->sc_dev,
2200 "resetting PCI-X MMRBC: %d -> %d\n",
2201 512 << bytecnt, 512 << maxb);
2202 pcix_cmd = (pcix_cmd &
2203 ~PCIX_CMD_BYTECNT_MASK) |
2204 (maxb << PCIX_CMD_BYTECNT_SHIFT);
2205 pci_conf_write(pa->pa_pc, pa->pa_tag,
2206 sc->sc_pcixe_capoff + PCIX_CMD,
2207 pcix_cmd);
2208 }
2209 }
2210 }
2211 /*
2212 * The quad port adapter is special; it has a PCIX-PCIX
2213 * bridge on the board, and can run the secondary bus at
2214 * a higher speed.
2215 */
2216 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
2217 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
2218 : 66;
2219 } else if (sc->sc_flags & WM_F_PCIX) {
2220 switch (reg & STATUS_PCIXSPD_MASK) {
2221 case STATUS_PCIXSPD_50_66:
2222 sc->sc_bus_speed = 66;
2223 break;
2224 case STATUS_PCIXSPD_66_100:
2225 sc->sc_bus_speed = 100;
2226 break;
2227 case STATUS_PCIXSPD_100_133:
2228 sc->sc_bus_speed = 133;
2229 break;
2230 default:
2231 aprint_error_dev(sc->sc_dev,
2232 "unknown PCIXSPD %d; assuming 66MHz\n",
2233 reg & STATUS_PCIXSPD_MASK);
2234 sc->sc_bus_speed = 66;
2235 break;
2236 }
2237 } else
2238 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
2239 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
2240 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
2241 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
2242 }
2243
2244 /* clear interesting stat counters */
2245 CSR_READ(sc, WMREG_COLC);
2246 CSR_READ(sc, WMREG_RXERRC);
2247
2248 if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
2249 || (sc->sc_type >= WM_T_ICH8))
2250 sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2251 if (sc->sc_type >= WM_T_ICH8)
2252 sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2253
2254 /* Set PHY, NVM mutex related stuff */
2255 switch (sc->sc_type) {
2256 case WM_T_82542_2_0:
2257 case WM_T_82542_2_1:
2258 case WM_T_82543:
2259 case WM_T_82544:
2260 /* Microwire */
2261 sc->nvm.read = wm_nvm_read_uwire;
2262 sc->sc_nvm_wordsize = 64;
2263 sc->sc_nvm_addrbits = 6;
2264 break;
2265 case WM_T_82540:
2266 case WM_T_82545:
2267 case WM_T_82545_3:
2268 case WM_T_82546:
2269 case WM_T_82546_3:
2270 /* Microwire */
2271 sc->nvm.read = wm_nvm_read_uwire;
2272 reg = CSR_READ(sc, WMREG_EECD);
2273 if (reg & EECD_EE_SIZE) {
2274 sc->sc_nvm_wordsize = 256;
2275 sc->sc_nvm_addrbits = 8;
2276 } else {
2277 sc->sc_nvm_wordsize = 64;
2278 sc->sc_nvm_addrbits = 6;
2279 }
2280 sc->sc_flags |= WM_F_LOCK_EECD;
2281 sc->nvm.acquire = wm_get_eecd;
2282 sc->nvm.release = wm_put_eecd;
2283 break;
2284 case WM_T_82541:
2285 case WM_T_82541_2:
2286 case WM_T_82547:
2287 case WM_T_82547_2:
2288 reg = CSR_READ(sc, WMREG_EECD);
2289 /*
2290 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
2291 * on 8254[17], so set flags and functios before calling it.
2292 */
2293 sc->sc_flags |= WM_F_LOCK_EECD;
2294 sc->nvm.acquire = wm_get_eecd;
2295 sc->nvm.release = wm_put_eecd;
2296 if (reg & EECD_EE_TYPE) {
2297 /* SPI */
2298 sc->nvm.read = wm_nvm_read_spi;
2299 sc->sc_flags |= WM_F_EEPROM_SPI;
2300 wm_nvm_set_addrbits_size_eecd(sc);
2301 } else {
2302 /* Microwire */
2303 sc->nvm.read = wm_nvm_read_uwire;
2304 if ((reg & EECD_EE_ABITS) != 0) {
2305 sc->sc_nvm_wordsize = 256;
2306 sc->sc_nvm_addrbits = 8;
2307 } else {
2308 sc->sc_nvm_wordsize = 64;
2309 sc->sc_nvm_addrbits = 6;
2310 }
2311 }
2312 break;
2313 case WM_T_82571:
2314 case WM_T_82572:
2315 /* SPI */
2316 sc->nvm.read = wm_nvm_read_eerd;
2317 /* Not use WM_F_LOCK_EECD because we use EERD */
2318 sc->sc_flags |= WM_F_EEPROM_SPI;
2319 wm_nvm_set_addrbits_size_eecd(sc);
2320 sc->phy.acquire = wm_get_swsm_semaphore;
2321 sc->phy.release = wm_put_swsm_semaphore;
2322 sc->nvm.acquire = wm_get_nvm_82571;
2323 sc->nvm.release = wm_put_nvm_82571;
2324 break;
2325 case WM_T_82573:
2326 case WM_T_82574:
2327 case WM_T_82583:
2328 sc->nvm.read = wm_nvm_read_eerd;
2329 /* Not use WM_F_LOCK_EECD because we use EERD */
2330 if (sc->sc_type == WM_T_82573) {
2331 sc->phy.acquire = wm_get_swsm_semaphore;
2332 sc->phy.release = wm_put_swsm_semaphore;
2333 sc->nvm.acquire = wm_get_nvm_82571;
2334 sc->nvm.release = wm_put_nvm_82571;
2335 } else {
2336 /* Both PHY and NVM use the same semaphore. */
2337 sc->phy.acquire = sc->nvm.acquire
2338 = wm_get_swfwhw_semaphore;
2339 sc->phy.release = sc->nvm.release
2340 = wm_put_swfwhw_semaphore;
2341 }
2342 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2343 sc->sc_flags |= WM_F_EEPROM_FLASH;
2344 sc->sc_nvm_wordsize = 2048;
2345 } else {
2346 /* SPI */
2347 sc->sc_flags |= WM_F_EEPROM_SPI;
2348 wm_nvm_set_addrbits_size_eecd(sc);
2349 }
2350 break;
2351 case WM_T_82575:
2352 case WM_T_82576:
2353 case WM_T_82580:
2354 case WM_T_I350:
2355 case WM_T_I354:
2356 case WM_T_80003:
2357 /* SPI */
2358 sc->sc_flags |= WM_F_EEPROM_SPI;
2359 wm_nvm_set_addrbits_size_eecd(sc);
2360 if ((sc->sc_type == WM_T_80003)
2361 || (sc->sc_nvm_wordsize < (1 << 15))) {
2362 sc->nvm.read = wm_nvm_read_eerd;
2363 /* Don't use WM_F_LOCK_EECD because we use EERD */
2364 } else {
2365 sc->nvm.read = wm_nvm_read_spi;
2366 sc->sc_flags |= WM_F_LOCK_EECD;
2367 }
2368 sc->phy.acquire = wm_get_phy_82575;
2369 sc->phy.release = wm_put_phy_82575;
2370 sc->nvm.acquire = wm_get_nvm_80003;
2371 sc->nvm.release = wm_put_nvm_80003;
2372 break;
2373 case WM_T_ICH8:
2374 case WM_T_ICH9:
2375 case WM_T_ICH10:
2376 case WM_T_PCH:
2377 case WM_T_PCH2:
2378 case WM_T_PCH_LPT:
2379 sc->nvm.read = wm_nvm_read_ich8;
2380 /* FLASH */
2381 sc->sc_flags |= WM_F_EEPROM_FLASH;
2382 sc->sc_nvm_wordsize = 2048;
2383 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2384 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2385 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2386 aprint_error_dev(sc->sc_dev,
2387 "can't map FLASH registers\n");
2388 goto out;
2389 }
2390 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2391 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2392 ICH_FLASH_SECTOR_SIZE;
2393 sc->sc_ich8_flash_bank_size =
2394 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2395 sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2396 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2397 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2398 sc->sc_flashreg_offset = 0;
2399 sc->phy.acquire = wm_get_swflag_ich8lan;
2400 sc->phy.release = wm_put_swflag_ich8lan;
2401 sc->nvm.acquire = wm_get_nvm_ich8lan;
2402 sc->nvm.release = wm_put_nvm_ich8lan;
2403 break;
2404 case WM_T_PCH_SPT:
2405 case WM_T_PCH_CNP:
2406 sc->nvm.read = wm_nvm_read_spt;
2407 /* SPT has no GFPREG; flash registers mapped through BAR0 */
2408 sc->sc_flags |= WM_F_EEPROM_FLASH;
2409 sc->sc_flasht = sc->sc_st;
2410 sc->sc_flashh = sc->sc_sh;
2411 sc->sc_ich8_flash_base = 0;
2412 sc->sc_nvm_wordsize =
2413 (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2414 * NVM_SIZE_MULTIPLIER;
2415 /* It is size in bytes, we want words */
2416 sc->sc_nvm_wordsize /= 2;
2417 /* Assume 2 banks */
2418 sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2419 sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2420 sc->phy.acquire = wm_get_swflag_ich8lan;
2421 sc->phy.release = wm_put_swflag_ich8lan;
2422 sc->nvm.acquire = wm_get_nvm_ich8lan;
2423 sc->nvm.release = wm_put_nvm_ich8lan;
2424 break;
2425 case WM_T_I210:
2426 case WM_T_I211:
2427 /* Allow a single clear of the SW semaphore on I210 and newer*/
2428 sc->sc_flags |= WM_F_WA_I210_CLSEM;
2429 if (wm_nvm_flash_presence_i210(sc)) {
2430 sc->nvm.read = wm_nvm_read_eerd;
2431 /* Don't use WM_F_LOCK_EECD because we use EERD */
2432 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2433 wm_nvm_set_addrbits_size_eecd(sc);
2434 } else {
2435 sc->nvm.read = wm_nvm_read_invm;
2436 sc->sc_flags |= WM_F_EEPROM_INVM;
2437 sc->sc_nvm_wordsize = INVM_SIZE;
2438 }
2439 sc->phy.acquire = wm_get_phy_82575;
2440 sc->phy.release = wm_put_phy_82575;
2441 sc->nvm.acquire = wm_get_nvm_80003;
2442 sc->nvm.release = wm_put_nvm_80003;
2443 break;
2444 default:
2445 break;
2446 }
2447
2448 /* Ensure the SMBI bit is clear before first NVM or PHY access */
2449 switch (sc->sc_type) {
2450 case WM_T_82571:
2451 case WM_T_82572:
2452 reg = CSR_READ(sc, WMREG_SWSM2);
2453 if ((reg & SWSM2_LOCK) == 0) {
2454 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2455 force_clear_smbi = true;
2456 } else
2457 force_clear_smbi = false;
2458 break;
2459 case WM_T_82573:
2460 case WM_T_82574:
2461 case WM_T_82583:
2462 force_clear_smbi = true;
2463 break;
2464 default:
2465 force_clear_smbi = false;
2466 break;
2467 }
2468 if (force_clear_smbi) {
2469 reg = CSR_READ(sc, WMREG_SWSM);
2470 if ((reg & SWSM_SMBI) != 0)
2471 aprint_error_dev(sc->sc_dev,
2472 "Please update the Bootagent\n");
2473 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2474 }
2475
2476 /*
2477 * Defer printing the EEPROM type until after verifying the checksum
2478 * This allows the EEPROM type to be printed correctly in the case
2479 * that no EEPROM is attached.
2480 */
2481 /*
2482 * Validate the EEPROM checksum. If the checksum fails, flag
2483 * this for later, so we can fail future reads from the EEPROM.
2484 */
2485 if (wm_nvm_validate_checksum(sc)) {
2486 /*
2487 * Read twice again because some PCI-e parts fail the
2488 * first check due to the link being in sleep state.
2489 */
2490 if (wm_nvm_validate_checksum(sc))
2491 sc->sc_flags |= WM_F_EEPROM_INVALID;
2492 }
2493
2494 if (sc->sc_flags & WM_F_EEPROM_INVALID)
2495 aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2496 else {
2497 aprint_verbose_dev(sc->sc_dev, "%u words ",
2498 sc->sc_nvm_wordsize);
2499 if (sc->sc_flags & WM_F_EEPROM_INVM)
2500 aprint_verbose("iNVM");
2501 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2502 aprint_verbose("FLASH(HW)");
2503 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2504 aprint_verbose("FLASH");
2505 else {
2506 if (sc->sc_flags & WM_F_EEPROM_SPI)
2507 eetype = "SPI";
2508 else
2509 eetype = "MicroWire";
2510 aprint_verbose("(%d address bits) %s EEPROM",
2511 sc->sc_nvm_addrbits, eetype);
2512 }
2513 }
2514 wm_nvm_version(sc);
2515 aprint_verbose("\n");
2516
2517 /*
2518 * XXX The first call of wm_gmii_setup_phytype. The result might be
2519 * incorrect.
2520 */
2521 wm_gmii_setup_phytype(sc, 0, 0);
2522
2523 /* Check for WM_F_WOL on some chips before wm_reset() */
2524 switch (sc->sc_type) {
2525 case WM_T_ICH8:
2526 case WM_T_ICH9:
2527 case WM_T_ICH10:
2528 case WM_T_PCH:
2529 case WM_T_PCH2:
2530 case WM_T_PCH_LPT:
2531 case WM_T_PCH_SPT:
2532 case WM_T_PCH_CNP:
2533 apme_mask = WUC_APME;
2534 eeprom_data = CSR_READ(sc, WMREG_WUC);
2535 if ((eeprom_data & apme_mask) != 0)
2536 sc->sc_flags |= WM_F_WOL;
2537 break;
2538 default:
2539 break;
2540 }
2541
2542 /* Reset the chip to a known state. */
2543 wm_reset(sc);
2544
2545 /*
2546 * Check for I21[01] PLL workaround.
2547 *
2548 * Three cases:
2549 * a) Chip is I211.
2550 * b) Chip is I210 and it uses INVM (not FLASH).
2551 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
2552 */
2553 if (sc->sc_type == WM_T_I211)
2554 sc->sc_flags |= WM_F_PLL_WA_I210;
2555 if (sc->sc_type == WM_T_I210) {
2556 if (!wm_nvm_flash_presence_i210(sc))
2557 sc->sc_flags |= WM_F_PLL_WA_I210;
2558 else if ((sc->sc_nvm_ver_major < 3)
2559 || ((sc->sc_nvm_ver_major == 3)
2560 && (sc->sc_nvm_ver_minor < 25))) {
2561 aprint_verbose_dev(sc->sc_dev,
2562 "ROM image version %d.%d is older than 3.25\n",
2563 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2564 sc->sc_flags |= WM_F_PLL_WA_I210;
2565 }
2566 }
2567 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2568 wm_pll_workaround_i210(sc);
2569
2570 wm_get_wakeup(sc);
2571
2572 /* Non-AMT based hardware can now take control from firmware */
2573 if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2574 wm_get_hw_control(sc);
2575
2576 /*
2577 * Read the Ethernet address from the EEPROM, if not first found
2578 * in device properties.
2579 */
2580 ea = prop_dictionary_get(dict, "mac-address");
2581 if (ea != NULL) {
2582 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2583 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2584 memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
2585 } else {
2586 if (wm_read_mac_addr(sc, enaddr) != 0) {
2587 aprint_error_dev(sc->sc_dev,
2588 "unable to read Ethernet address\n");
2589 goto out;
2590 }
2591 }
2592
2593 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2594 ether_sprintf(enaddr));
2595
2596 /*
2597 * Read the config info from the EEPROM, and set up various
2598 * bits in the control registers based on their contents.
2599 */
2600 pn = prop_dictionary_get(dict, "i82543-cfg1");
2601 if (pn != NULL) {
2602 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2603 cfg1 = (uint16_t) prop_number_signed_value(pn);
2604 } else {
2605 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2606 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2607 goto out;
2608 }
2609 }
2610
2611 pn = prop_dictionary_get(dict, "i82543-cfg2");
2612 if (pn != NULL) {
2613 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2614 cfg2 = (uint16_t) prop_number_signed_value(pn);
2615 } else {
2616 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2617 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2618 goto out;
2619 }
2620 }
2621
2622 /* check for WM_F_WOL */
2623 switch (sc->sc_type) {
2624 case WM_T_82542_2_0:
2625 case WM_T_82542_2_1:
2626 case WM_T_82543:
2627 /* dummy? */
2628 eeprom_data = 0;
2629 apme_mask = NVM_CFG3_APME;
2630 break;
2631 case WM_T_82544:
2632 apme_mask = NVM_CFG2_82544_APM_EN;
2633 eeprom_data = cfg2;
2634 break;
2635 case WM_T_82546:
2636 case WM_T_82546_3:
2637 case WM_T_82571:
2638 case WM_T_82572:
2639 case WM_T_82573:
2640 case WM_T_82574:
2641 case WM_T_82583:
2642 case WM_T_80003:
2643 case WM_T_82575:
2644 case WM_T_82576:
2645 apme_mask = NVM_CFG3_APME;
2646 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2647 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2648 break;
2649 case WM_T_82580:
2650 case WM_T_I350:
2651 case WM_T_I354:
2652 case WM_T_I210:
2653 case WM_T_I211:
2654 apme_mask = NVM_CFG3_APME;
2655 wm_nvm_read(sc,
2656 NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2657 1, &eeprom_data);
2658 break;
2659 case WM_T_ICH8:
2660 case WM_T_ICH9:
2661 case WM_T_ICH10:
2662 case WM_T_PCH:
2663 case WM_T_PCH2:
2664 case WM_T_PCH_LPT:
2665 case WM_T_PCH_SPT:
2666 case WM_T_PCH_CNP:
2667 /* Already checked before wm_reset () */
2668 apme_mask = eeprom_data = 0;
2669 break;
2670 default: /* XXX 82540 */
2671 apme_mask = NVM_CFG3_APME;
2672 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2673 break;
2674 }
2675 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2676 if ((eeprom_data & apme_mask) != 0)
2677 sc->sc_flags |= WM_F_WOL;
2678
2679 /*
2680 * We have the eeprom settings, now apply the special cases
2681 * where the eeprom may be wrong or the board won't support
2682 * wake on lan on a particular port
2683 */
2684 switch (sc->sc_pcidevid) {
2685 case PCI_PRODUCT_INTEL_82546GB_PCIE:
2686 sc->sc_flags &= ~WM_F_WOL;
2687 break;
2688 case PCI_PRODUCT_INTEL_82546EB_FIBER:
2689 case PCI_PRODUCT_INTEL_82546GB_FIBER:
2690 /* Wake events only supported on port A for dual fiber
2691 * regardless of eeprom setting */
2692 if (sc->sc_funcid == 1)
2693 sc->sc_flags &= ~WM_F_WOL;
2694 break;
2695 case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
2696 /* If quad port adapter, disable WoL on all but port A */
2697 if (sc->sc_funcid != 0)
2698 sc->sc_flags &= ~WM_F_WOL;
2699 break;
2700 case PCI_PRODUCT_INTEL_82571EB_FIBER:
2701 /* Wake events only supported on port A for dual fiber
2702 * regardless of eeprom setting */
2703 if (sc->sc_funcid == 1)
2704 sc->sc_flags &= ~WM_F_WOL;
2705 break;
2706 case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
2707 case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
2708 case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
2709 /* If quad port adapter, disable WoL on all but port A */
2710 if (sc->sc_funcid != 0)
2711 sc->sc_flags &= ~WM_F_WOL;
2712 break;
2713 }
2714
2715 if (sc->sc_type >= WM_T_82575) {
2716 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2717 aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
2718 nvmword);
2719 if ((sc->sc_type == WM_T_82575) ||
2720 (sc->sc_type == WM_T_82576)) {
2721 /* Check NVM for autonegotiation */
2722 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
2723 != 0)
2724 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2725 }
2726 if ((sc->sc_type == WM_T_82575) ||
2727 (sc->sc_type == WM_T_I350)) {
2728 if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
2729 sc->sc_flags |= WM_F_MAS;
2730 }
2731 }
2732 }
2733
2734 /*
2735 * XXX need special handling for some multiple port cards
2736 * to disable a paticular port.
2737 */
2738
2739 if (sc->sc_type >= WM_T_82544) {
2740 pn = prop_dictionary_get(dict, "i82543-swdpin");
2741 if (pn != NULL) {
2742 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2743 swdpin = (uint16_t) prop_number_signed_value(pn);
2744 } else {
2745 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2746 aprint_error_dev(sc->sc_dev,
2747 "unable to read SWDPIN\n");
2748 goto out;
2749 }
2750 }
2751 }
2752
2753 if (cfg1 & NVM_CFG1_ILOS)
2754 sc->sc_ctrl |= CTRL_ILOS;
2755
2756 /*
2757 * XXX
2758 * This code isn't correct because pin 2 and 3 are located
2759 * in different position on newer chips. Check all datasheet.
2760 *
2761 * Until resolve this problem, check if a chip < 82580
2762 */
2763 if (sc->sc_type <= WM_T_82580) {
2764 if (sc->sc_type >= WM_T_82544) {
2765 sc->sc_ctrl |=
2766 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2767 CTRL_SWDPIO_SHIFT;
2768 sc->sc_ctrl |=
2769 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2770 CTRL_SWDPINS_SHIFT;
2771 } else {
2772 sc->sc_ctrl |=
2773 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2774 CTRL_SWDPIO_SHIFT;
2775 }
2776 }
2777
2778 if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
2779 wm_nvm_read(sc,
2780 NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2781 1, &nvmword);
2782 if (nvmword & NVM_CFG3_ILOS)
2783 sc->sc_ctrl |= CTRL_ILOS;
2784 }
2785
2786 #if 0
2787 if (sc->sc_type >= WM_T_82544) {
2788 if (cfg1 & NVM_CFG1_IPS0)
2789 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2790 if (cfg1 & NVM_CFG1_IPS1)
2791 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2792 sc->sc_ctrl_ext |=
2793 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2794 CTRL_EXT_SWDPIO_SHIFT;
2795 sc->sc_ctrl_ext |=
2796 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2797 CTRL_EXT_SWDPINS_SHIFT;
2798 } else {
2799 sc->sc_ctrl_ext |=
2800 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2801 CTRL_EXT_SWDPIO_SHIFT;
2802 }
2803 #endif
2804
2805 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2806 #if 0
2807 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2808 #endif
2809
2810 if (sc->sc_type == WM_T_PCH) {
2811 uint16_t val;
2812
2813 /* Save the NVM K1 bit setting */
2814 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2815
2816 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2817 sc->sc_nvm_k1_enabled = 1;
2818 else
2819 sc->sc_nvm_k1_enabled = 0;
2820 }
2821
2822 /* Determine if we're GMII, TBI, SERDES or SGMII mode */
2823 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2824 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2825 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2826 || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
2827 || sc->sc_type == WM_T_82573
2828 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2829 /* Copper only */
2830 } else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2831 || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
2832 || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
2833 || (sc->sc_type ==WM_T_I211)) {
2834 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2835 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2836 switch (link_mode) {
2837 case CTRL_EXT_LINK_MODE_1000KX:
2838 aprint_normal_dev(sc->sc_dev, "1000KX\n");
2839 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2840 break;
2841 case CTRL_EXT_LINK_MODE_SGMII:
2842 if (wm_sgmii_uses_mdio(sc)) {
2843 aprint_normal_dev(sc->sc_dev,
2844 "SGMII(MDIO)\n");
2845 sc->sc_flags |= WM_F_SGMII;
2846 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2847 break;
2848 }
2849 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2850 /*FALLTHROUGH*/
2851 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2852 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2853 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2854 if (link_mode
2855 == CTRL_EXT_LINK_MODE_SGMII) {
2856 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2857 sc->sc_flags |= WM_F_SGMII;
2858 aprint_verbose_dev(sc->sc_dev,
2859 "SGMII\n");
2860 } else {
2861 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2862 aprint_verbose_dev(sc->sc_dev,
2863 "SERDES\n");
2864 }
2865 break;
2866 }
2867 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2868 aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
2869 else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2870 aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
2871 sc->sc_flags |= WM_F_SGMII;
2872 }
2873 /* Do not change link mode for 100BaseFX */
2874 if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
2875 break;
2876
2877 /* Change current link mode setting */
2878 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2879 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2880 reg |= CTRL_EXT_LINK_MODE_SGMII;
2881 else
2882 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2883 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2884 break;
2885 case CTRL_EXT_LINK_MODE_GMII:
2886 default:
2887 aprint_normal_dev(sc->sc_dev, "Copper\n");
2888 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2889 break;
2890 }
2891
2892 reg &= ~CTRL_EXT_I2C_ENA;
2893 if ((sc->sc_flags & WM_F_SGMII) != 0)
2894 reg |= CTRL_EXT_I2C_ENA;
2895 else
2896 reg &= ~CTRL_EXT_I2C_ENA;
2897 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2898 if ((sc->sc_flags & WM_F_SGMII) != 0) {
2899 if (!wm_sgmii_uses_mdio(sc))
2900 wm_gmii_setup_phytype(sc, 0, 0);
2901 wm_reset_mdicnfg_82580(sc);
2902 }
2903 } else if (sc->sc_type < WM_T_82543 ||
2904 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2905 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2906 aprint_error_dev(sc->sc_dev,
2907 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2908 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2909 }
2910 } else {
2911 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
2912 aprint_error_dev(sc->sc_dev,
2913 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2914 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2915 }
2916 }
2917
2918 if (sc->sc_type >= WM_T_PCH2)
2919 sc->sc_flags |= WM_F_EEE;
2920 else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
2921 && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
2922 /* XXX: Need special handling for I354. (not yet) */
2923 if (sc->sc_type != WM_T_I354)
2924 sc->sc_flags |= WM_F_EEE;
2925 }
2926
2927 /*
2928 * The I350 has a bug where it always strips the CRC whether
2929 * asked to or not. So ask for stripped CRC here and cope in rxeof
2930 */
2931 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
2932 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
2933 sc->sc_flags |= WM_F_CRC_STRIP;
2934
2935 /* Set device properties (macflags) */
2936 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2937
2938 if (sc->sc_flags != 0) {
2939 snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
2940 aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
2941 }
2942
2943 #ifdef WM_MPSAFE
2944 sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2945 #else
2946 sc->sc_core_lock = NULL;
2947 #endif
2948
2949 /* Initialize the media structures accordingly. */
2950 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2951 wm_gmii_mediainit(sc, wmp->wmp_product);
2952 else
2953 wm_tbi_mediainit(sc); /* All others */
2954
2955 ifp = &sc->sc_ethercom.ec_if;
2956 xname = device_xname(sc->sc_dev);
2957 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2958 ifp->if_softc = sc;
2959 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2960 #ifdef WM_MPSAFE
2961 ifp->if_extflags = IFEF_MPSAFE;
2962 #endif
2963 ifp->if_ioctl = wm_ioctl;
2964 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
2965 ifp->if_start = wm_nq_start;
2966 /*
2967 * When the number of CPUs is one and the controller can use
2968 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
2969 * That is, wm(4) use two interrupts, one is used for Tx/Rx
2970 * and the other is used for link status changing.
2971 * In this situation, wm_nq_transmit() is disadvantageous
2972 * because of wm_select_txqueue() and pcq(9) overhead.
2973 */
2974 if (wm_is_using_multiqueue(sc))
2975 ifp->if_transmit = wm_nq_transmit;
2976 } else {
2977 ifp->if_start = wm_start;
2978 /*
2979 * wm_transmit() has the same disadvantage as wm_transmit().
2980 */
2981 if (wm_is_using_multiqueue(sc))
2982 ifp->if_transmit = wm_transmit;
2983 }
2984 /* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
2985 ifp->if_init = wm_init;
2986 ifp->if_stop = wm_stop;
2987 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
2988 IFQ_SET_READY(&ifp->if_snd);
2989
2990 /* Check for jumbo frame */
2991 switch (sc->sc_type) {
2992 case WM_T_82573:
2993 /* XXX limited to 9234 if ASPM is disabled */
2994 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2995 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2996 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2997 break;
2998 case WM_T_82571:
2999 case WM_T_82572:
3000 case WM_T_82574:
3001 case WM_T_82583:
3002 case WM_T_82575:
3003 case WM_T_82576:
3004 case WM_T_82580:
3005 case WM_T_I350:
3006 case WM_T_I354:
3007 case WM_T_I210:
3008 case WM_T_I211:
3009 case WM_T_80003:
3010 case WM_T_ICH9:
3011 case WM_T_ICH10:
3012 case WM_T_PCH2: /* PCH2 supports 9K frame size */
3013 case WM_T_PCH_LPT:
3014 case WM_T_PCH_SPT:
3015 case WM_T_PCH_CNP:
3016 /* XXX limited to 9234 */
3017 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3018 break;
3019 case WM_T_PCH:
3020 /* XXX limited to 4096 */
3021 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3022 break;
3023 case WM_T_82542_2_0:
3024 case WM_T_82542_2_1:
3025 case WM_T_ICH8:
3026 /* No support for jumbo frame */
3027 break;
3028 default:
3029 /* ETHER_MAX_LEN_JUMBO */
3030 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3031 break;
3032 }
3033
3034 /* If we're a i82543 or greater, we can support VLANs. */
3035 if (sc->sc_type >= WM_T_82543) {
3036 sc->sc_ethercom.ec_capabilities |=
3037 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
3038 sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
3039 }
3040
3041 if ((sc->sc_flags & WM_F_EEE) != 0)
3042 sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
3043
3044 /*
3045 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
3046 * on i82543 and later.
3047 */
3048 if (sc->sc_type >= WM_T_82543) {
3049 ifp->if_capabilities |=
3050 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
3051 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
3052 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
3053 IFCAP_CSUM_TCPv6_Tx |
3054 IFCAP_CSUM_UDPv6_Tx;
3055 }
3056
3057 /*
3058 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
3059 *
3060 * 82541GI (8086:1076) ... no
3061 * 82572EI (8086:10b9) ... yes
3062 */
3063 if (sc->sc_type >= WM_T_82571) {
3064 ifp->if_capabilities |=
3065 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
3066 }
3067
3068 /*
3069 * If we're a i82544 or greater (except i82547), we can do
3070 * TCP segmentation offload.
3071 */
3072 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
3073 ifp->if_capabilities |= IFCAP_TSOv4;
3074 }
3075
3076 if (sc->sc_type >= WM_T_82571) {
3077 ifp->if_capabilities |= IFCAP_TSOv6;
3078 }
3079
3080 sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
3081 sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
3082 sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
3083 sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
3084
3085 /* Attach the interface. */
3086 error = if_initialize(ifp);
3087 if (error != 0) {
3088 aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
3089 error);
3090 return; /* Error */
3091 }
3092 sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
3093 ether_ifattach(ifp, enaddr);
3094 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
3095 if_register(ifp);
3096 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
3097 RND_FLAG_DEFAULT);
3098
3099 #ifdef WM_EVENT_COUNTERS
3100 /* Attach event counters. */
3101 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
3102 NULL, xname, "linkintr");
3103
3104 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
3105 NULL, xname, "tx_xoff");
3106 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
3107 NULL, xname, "tx_xon");
3108 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
3109 NULL, xname, "rx_xoff");
3110 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
3111 NULL, xname, "rx_xon");
3112 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
3113 NULL, xname, "rx_macctl");
3114 #endif /* WM_EVENT_COUNTERS */
3115
3116 sc->sc_txrx_use_workqueue = false;
3117
3118 if (wm_phy_need_linkdown_discard(sc))
3119 wm_set_linkdown_discard(sc);
3120
3121 wm_init_sysctls(sc);
3122
3123 if (pmf_device_register(self, wm_suspend, wm_resume))
3124 pmf_class_network_register(self, ifp);
3125 else
3126 aprint_error_dev(self, "couldn't establish power handler\n");
3127
3128 sc->sc_flags |= WM_F_ATTACHED;
3129 out:
3130 return;
3131 }
3132
3133 /* The detach function (ca_detach) */
3134 static int
3135 wm_detach(device_t self, int flags __unused)
3136 {
3137 struct wm_softc *sc = device_private(self);
3138 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3139 int i;
3140
3141 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
3142 return 0;
3143
3144 /* Stop the interface. Callouts are stopped in it. */
3145 wm_stop(ifp, 1);
3146
3147 pmf_device_deregister(self);
3148
3149 sysctl_teardown(&sc->sc_sysctllog);
3150
3151 #ifdef WM_EVENT_COUNTERS
3152 evcnt_detach(&sc->sc_ev_linkintr);
3153
3154 evcnt_detach(&sc->sc_ev_tx_xoff);
3155 evcnt_detach(&sc->sc_ev_tx_xon);
3156 evcnt_detach(&sc->sc_ev_rx_xoff);
3157 evcnt_detach(&sc->sc_ev_rx_xon);
3158 evcnt_detach(&sc->sc_ev_rx_macctl);
3159 #endif /* WM_EVENT_COUNTERS */
3160
3161 rnd_detach_source(&sc->rnd_source);
3162
3163 /* Tell the firmware about the release */
3164 WM_CORE_LOCK(sc);
3165 wm_release_manageability(sc);
3166 wm_release_hw_control(sc);
3167 wm_enable_wakeup(sc);
3168 WM_CORE_UNLOCK(sc);
3169
3170 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
3171
3172 ether_ifdetach(ifp);
3173 if_detach(ifp);
3174 if_percpuq_destroy(sc->sc_ipq);
3175
3176 /* Delete all remaining media. */
3177 ifmedia_fini(&sc->sc_mii.mii_media);
3178
3179 /* Unload RX dmamaps and free mbufs */
3180 for (i = 0; i < sc->sc_nqueues; i++) {
3181 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
3182 mutex_enter(rxq->rxq_lock);
3183 wm_rxdrain(rxq);
3184 mutex_exit(rxq->rxq_lock);
3185 }
3186 /* Must unlock here */
3187
3188 /* Disestablish the interrupt handler */
3189 for (i = 0; i < sc->sc_nintrs; i++) {
3190 if (sc->sc_ihs[i] != NULL) {
3191 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
3192 sc->sc_ihs[i] = NULL;
3193 }
3194 }
3195 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
3196
3197 /* wm_stop() ensure workqueue is stopped. */
3198 workqueue_destroy(sc->sc_queue_wq);
3199
3200 for (i = 0; i < sc->sc_nqueues; i++)
3201 softint_disestablish(sc->sc_queue[i].wmq_si);
3202
3203 wm_free_txrx_queues(sc);
3204
3205 /* Unmap the registers */
3206 if (sc->sc_ss) {
3207 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
3208 sc->sc_ss = 0;
3209 }
3210 if (sc->sc_ios) {
3211 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
3212 sc->sc_ios = 0;
3213 }
3214 if (sc->sc_flashs) {
3215 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
3216 sc->sc_flashs = 0;
3217 }
3218
3219 if (sc->sc_core_lock)
3220 mutex_obj_free(sc->sc_core_lock);
3221 if (sc->sc_ich_phymtx)
3222 mutex_obj_free(sc->sc_ich_phymtx);
3223 if (sc->sc_ich_nvmmtx)
3224 mutex_obj_free(sc->sc_ich_nvmmtx);
3225
3226 return 0;
3227 }
3228
3229 static bool
3230 wm_suspend(device_t self, const pmf_qual_t *qual)
3231 {
3232 struct wm_softc *sc = device_private(self);
3233
3234 wm_release_manageability(sc);
3235 wm_release_hw_control(sc);
3236 wm_enable_wakeup(sc);
3237
3238 return true;
3239 }
3240
3241 static bool
3242 wm_resume(device_t self, const pmf_qual_t *qual)
3243 {
3244 struct wm_softc *sc = device_private(self);
3245 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3246 pcireg_t reg;
3247 char buf[256];
3248
3249 reg = CSR_READ(sc, WMREG_WUS);
3250 if (reg != 0) {
3251 snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
3252 device_printf(sc->sc_dev, "wakeup status %s\n", buf);
3253 CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
3254 }
3255
3256 if (sc->sc_type >= WM_T_PCH2)
3257 wm_resume_workarounds_pchlan(sc);
3258 if ((ifp->if_flags & IFF_UP) == 0) {
3259 wm_reset(sc);
3260 /* Non-AMT based hardware can now take control from firmware */
3261 if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
3262 wm_get_hw_control(sc);
3263 wm_init_manageability(sc);
3264 } else {
3265 /*
3266 * We called pmf_class_network_register(), so if_init() is
3267 * automatically called when IFF_UP. wm_reset(),
3268 * wm_get_hw_control() and wm_init_manageability() are called
3269 * via wm_init().
3270 */
3271 }
3272
3273 return true;
3274 }
3275
3276 /*
3277 * wm_watchdog: [ifnet interface function]
3278 *
3279 * Watchdog timer handler.
3280 */
3281 static void
3282 wm_watchdog(struct ifnet *ifp)
3283 {
3284 int qid;
3285 struct wm_softc *sc = ifp->if_softc;
3286 uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
3287
3288 for (qid = 0; qid < sc->sc_nqueues; qid++) {
3289 struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
3290
3291 wm_watchdog_txq(ifp, txq, &hang_queue);
3292 }
3293
3294 /* IF any of queues hanged up, reset the interface. */
3295 if (hang_queue != 0) {
3296 (void)wm_init(ifp);
3297
3298 /*
3299 * There are still some upper layer processing which call
3300 * ifp->if_start(). e.g. ALTQ or one CPU system
3301 */
3302 /* Try to get more packets going. */
3303 ifp->if_start(ifp);
3304 }
3305 }
3306
3307
3308 static void
3309 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
3310 {
3311
3312 mutex_enter(txq->txq_lock);
3313 if (txq->txq_sending &&
3314 time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
3315 wm_watchdog_txq_locked(ifp, txq, hang);
3316
3317 mutex_exit(txq->txq_lock);
3318 }
3319
3320 static void
3321 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
3322 uint16_t *hang)
3323 {
3324 struct wm_softc *sc = ifp->if_softc;
3325 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
3326
3327 KASSERT(mutex_owned(txq->txq_lock));
3328
3329 /*
3330 * Since we're using delayed interrupts, sweep up
3331 * before we report an error.
3332 */
3333 wm_txeof(txq, UINT_MAX);
3334
3335 if (txq->txq_sending)
3336 *hang |= __BIT(wmq->wmq_id);
3337
3338 if (txq->txq_free == WM_NTXDESC(txq)) {
3339 log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
3340 device_xname(sc->sc_dev));
3341 } else {
3342 #ifdef WM_DEBUG
3343 int i, j;
3344 struct wm_txsoft *txs;
3345 #endif
3346 log(LOG_ERR,
3347 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3348 device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
3349 txq->txq_next);
3350 if_statinc(ifp, if_oerrors);
3351 #ifdef WM_DEBUG
3352 for (i = txq->txq_sdirty; i != txq->txq_snext;
3353 i = WM_NEXTTXS(txq, i)) {
3354 txs = &txq->txq_soft[i];
3355 printf("txs %d tx %d -> %d\n",
3356 i, txs->txs_firstdesc, txs->txs_lastdesc);
3357 for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
3358 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3359 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3360 txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
3361 printf("\t %#08x%08x\n",
3362 txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
3363 txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
3364 } else {
3365 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3366 (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
3367 txq->txq_descs[j].wtx_addr.wa_low);
3368 printf("\t %#04x%02x%02x%08x\n",
3369 txq->txq_descs[j].wtx_fields.wtxu_vlan,
3370 txq->txq_descs[j].wtx_fields.wtxu_options,
3371 txq->txq_descs[j].wtx_fields.wtxu_status,
3372 txq->txq_descs[j].wtx_cmdlen);
3373 }
3374 if (j == txs->txs_lastdesc)
3375 break;
3376 }
3377 }
3378 #endif
3379 }
3380 }
3381
3382 /*
3383 * wm_tick:
3384 *
3385 * One second timer, used to check link status, sweep up
3386 * completed transmit jobs, etc.
3387 */
3388 static void
3389 wm_tick(void *arg)
3390 {
3391 struct wm_softc *sc = arg;
3392 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3393 #ifndef WM_MPSAFE
3394 int s = splnet();
3395 #endif
3396
3397 WM_CORE_LOCK(sc);
3398
3399 if (sc->sc_core_stopping) {
3400 WM_CORE_UNLOCK(sc);
3401 #ifndef WM_MPSAFE
3402 splx(s);
3403 #endif
3404 return;
3405 }
3406
3407 if (sc->sc_type >= WM_T_82542_2_1) {
3408 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3409 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3410 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3411 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3412 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3413 }
3414
3415 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
3416 if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
3417 if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
3418 + CSR_READ(sc, WMREG_CRCERRS)
3419 + CSR_READ(sc, WMREG_ALGNERRC)
3420 + CSR_READ(sc, WMREG_SYMERRC)
3421 + CSR_READ(sc, WMREG_RXERRC)
3422 + CSR_READ(sc, WMREG_SEC)
3423 + CSR_READ(sc, WMREG_CEXTERR)
3424 + CSR_READ(sc, WMREG_RLEC));
3425 /*
3426 * WMREG_RNBC is incremented when there is no available buffers in host
3427 * memory. It does not mean the number of dropped packet. Because
3428 * ethernet controller can receive packets in such case if there is
3429 * space in phy's FIFO.
3430 *
3431 * If you want to know the nubmer of WMREG_RMBC, you should use such as
3432 * own EVCNT instead of if_iqdrops.
3433 */
3434 if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
3435 IF_STAT_PUTREF(ifp);
3436
3437 if (sc->sc_flags & WM_F_HAS_MII)
3438 mii_tick(&sc->sc_mii);
3439 else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
3440 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3441 wm_serdes_tick(sc);
3442 else
3443 wm_tbi_tick(sc);
3444
3445 WM_CORE_UNLOCK(sc);
3446
3447 wm_watchdog(ifp);
3448
3449 callout_schedule(&sc->sc_tick_ch, hz);
3450 }
3451
3452 static int
3453 wm_ifflags_cb(struct ethercom *ec)
3454 {
3455 struct ifnet *ifp = &ec->ec_if;
3456 struct wm_softc *sc = ifp->if_softc;
3457 u_short iffchange;
3458 int ecchange;
3459 bool needreset = false;
3460 int rc = 0;
3461
3462 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
3463 device_xname(sc->sc_dev), __func__));
3464
3465 WM_CORE_LOCK(sc);
3466
3467 /*
3468 * Check for if_flags.
3469 * Main usage is to prevent linkdown when opening bpf.
3470 */
3471 iffchange = ifp->if_flags ^ sc->sc_if_flags;
3472 sc->sc_if_flags = ifp->if_flags;
3473 if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
3474 needreset = true;
3475 goto ec;
3476 }
3477
3478 /* iff related updates */
3479 if ((iffchange & IFF_PROMISC) != 0)
3480 wm_set_filter(sc);
3481
3482 wm_set_vlan(sc);
3483
3484 ec:
3485 /* Check for ec_capenable. */
3486 ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
3487 sc->sc_ec_capenable = ec->ec_capenable;
3488 if ((ecchange & ~ETHERCAP_EEE) != 0) {
3489 needreset = true;
3490 goto out;
3491 }
3492
3493 /* ec related updates */
3494 wm_set_eee(sc);
3495
3496 out:
3497 if (needreset)
3498 rc = ENETRESET;
3499 WM_CORE_UNLOCK(sc);
3500
3501 return rc;
3502 }
3503
3504 static bool
3505 wm_phy_need_linkdown_discard(struct wm_softc *sc)
3506 {
3507
3508 switch(sc->sc_phytype) {
3509 case WMPHY_82577: /* ihphy */
3510 case WMPHY_82578: /* atphy */
3511 case WMPHY_82579: /* ihphy */
3512 case WMPHY_I217: /* ihphy */
3513 case WMPHY_82580: /* ihphy */
3514 case WMPHY_I350: /* ihphy */
3515 return true;
3516 default:
3517 return false;
3518 }
3519 }
3520
3521 static void
3522 wm_set_linkdown_discard(struct wm_softc *sc)
3523 {
3524
3525 for (int i = 0; i < sc->sc_nqueues; i++) {
3526 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3527
3528 mutex_enter(txq->txq_lock);
3529 txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
3530 mutex_exit(txq->txq_lock);
3531 }
3532 }
3533
3534 static void
3535 wm_clear_linkdown_discard(struct wm_softc *sc)
3536 {
3537
3538 for (int i = 0; i < sc->sc_nqueues; i++) {
3539 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3540
3541 mutex_enter(txq->txq_lock);
3542 txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
3543 mutex_exit(txq->txq_lock);
3544 }
3545 }
3546
3547 /*
3548 * wm_ioctl: [ifnet interface function]
3549 *
3550 * Handle control requests from the operator.
3551 */
3552 static int
3553 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3554 {
3555 struct wm_softc *sc = ifp->if_softc;
3556 struct ifreq *ifr = (struct ifreq *)data;
3557 struct ifaddr *ifa = (struct ifaddr *)data;
3558 struct sockaddr_dl *sdl;
3559 int s, error;
3560
3561 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
3562 device_xname(sc->sc_dev), __func__));
3563
3564 #ifndef WM_MPSAFE
3565 s = splnet();
3566 #endif
3567 switch (cmd) {
3568 case SIOCSIFMEDIA:
3569 WM_CORE_LOCK(sc);
3570 /* Flow control requires full-duplex mode. */
3571 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3572 (ifr->ifr_media & IFM_FDX) == 0)
3573 ifr->ifr_media &= ~IFM_ETH_FMASK;
3574 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3575 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3576 /* We can do both TXPAUSE and RXPAUSE. */
3577 ifr->ifr_media |=
3578 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3579 }
3580 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3581 }
3582 WM_CORE_UNLOCK(sc);
3583 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3584 if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
3585 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE)
3586 wm_set_linkdown_discard(sc);
3587 else
3588 wm_clear_linkdown_discard(sc);
3589 }
3590 break;
3591 case SIOCINITIFADDR:
3592 WM_CORE_LOCK(sc);
3593 if (ifa->ifa_addr->sa_family == AF_LINK) {
3594 sdl = satosdl(ifp->if_dl->ifa_addr);
3595 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3596 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3597 /* Unicast address is the first multicast entry */
3598 wm_set_filter(sc);
3599 error = 0;
3600 WM_CORE_UNLOCK(sc);
3601 break;
3602 }
3603 WM_CORE_UNLOCK(sc);
3604 if (((ifp->if_flags & IFF_UP) == 0) && wm_phy_need_linkdown_discard(sc))
3605 wm_clear_linkdown_discard(sc);
3606 /*FALLTHROUGH*/
3607 default:
3608 if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
3609 if (((ifp->if_flags & IFF_UP) == 0) && ((ifr->ifr_flags & IFF_UP) != 0)) {
3610 wm_clear_linkdown_discard(sc);
3611 } else if (((ifp->if_flags & IFF_UP) != 0) && ((ifr->ifr_flags & IFF_UP) == 0)) {
3612 wm_set_linkdown_discard(sc);
3613 }
3614 }
3615 #ifdef WM_MPSAFE
3616 s = splnet();
3617 #endif
3618 /* It may call wm_start, so unlock here */
3619 error = ether_ioctl(ifp, cmd, data);
3620 #ifdef WM_MPSAFE
3621 splx(s);
3622 #endif
3623 if (error != ENETRESET)
3624 break;
3625
3626 error = 0;
3627
3628 if (cmd == SIOCSIFCAP)
3629 error = (*ifp->if_init)(ifp);
3630 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3631 ;
3632 else if (ifp->if_flags & IFF_RUNNING) {
3633 /*
3634 * Multicast list has changed; set the hardware filter
3635 * accordingly.
3636 */
3637 WM_CORE_LOCK(sc);
3638 wm_set_filter(sc);
3639 WM_CORE_UNLOCK(sc);
3640 }
3641 break;
3642 }
3643
3644 #ifndef WM_MPSAFE
3645 splx(s);
3646 #endif
3647 return error;
3648 }
3649
3650 /* MAC address related */
3651
3652 /*
3653 * Get the offset of MAC address and return it.
3654 * If error occured, use offset 0.
3655 */
3656 static uint16_t
3657 wm_check_alt_mac_addr(struct wm_softc *sc)
3658 {
3659 uint16_t myea[ETHER_ADDR_LEN / 2];
3660 uint16_t offset = NVM_OFF_MACADDR;
3661
3662 /* Try to read alternative MAC address pointer */
3663 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
3664 return 0;
3665
3666 /* Check pointer if it's valid or not. */
3667 if ((offset == 0x0000) || (offset == 0xffff))
3668 return 0;
3669
3670 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
3671 /*
3672 * Check whether alternative MAC address is valid or not.
3673 * Some cards have non 0xffff pointer but those don't use
3674 * alternative MAC address in reality.
3675 *
3676 * Check whether the broadcast bit is set or not.
3677 */
3678 if (wm_nvm_read(sc, offset, 1, myea) == 0)
3679 if (((myea[0] & 0xff) & 0x01) == 0)
3680 return offset; /* Found */
3681
3682 /* Not found */
3683 return 0;
3684 }
3685
3686 static int
3687 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
3688 {
3689 uint16_t myea[ETHER_ADDR_LEN / 2];
3690 uint16_t offset = NVM_OFF_MACADDR;
3691 int do_invert = 0;
3692
3693 switch (sc->sc_type) {
3694 case WM_T_82580:
3695 case WM_T_I350:
3696 case WM_T_I354:
3697 /* EEPROM Top Level Partitioning */
3698 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
3699 break;
3700 case WM_T_82571:
3701 case WM_T_82575:
3702 case WM_T_82576:
3703 case WM_T_80003:
3704 case WM_T_I210:
3705 case WM_T_I211:
3706 offset = wm_check_alt_mac_addr(sc);
3707 if (offset == 0)
3708 if ((sc->sc_funcid & 0x01) == 1)
3709 do_invert = 1;
3710 break;
3711 default:
3712 if ((sc->sc_funcid & 0x01) == 1)
3713 do_invert = 1;
3714 break;
3715 }
3716
3717 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
3718 goto bad;
3719
3720 enaddr[0] = myea[0] & 0xff;
3721 enaddr[1] = myea[0] >> 8;
3722 enaddr[2] = myea[1] & 0xff;
3723 enaddr[3] = myea[1] >> 8;
3724 enaddr[4] = myea[2] & 0xff;
3725 enaddr[5] = myea[2] >> 8;
3726
3727 /*
3728 * Toggle the LSB of the MAC address on the second port
3729 * of some dual port cards.
3730 */
3731 if (do_invert != 0)
3732 enaddr[5] ^= 1;
3733
3734 return 0;
3735
3736 bad:
3737 return -1;
3738 }
3739
3740 /*
3741 * wm_set_ral:
3742 *
3743 * Set an entery in the receive address list.
3744 */
3745 static void
3746 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3747 {
3748 uint32_t ral_lo, ral_hi, addrl, addrh;
3749 uint32_t wlock_mac;
3750 int rv;
3751
3752 if (enaddr != NULL) {
3753 ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
3754 ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
3755 ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
3756 ral_hi |= RAL_AV;
3757 } else {
3758 ral_lo = 0;
3759 ral_hi = 0;
3760 }
3761
3762 switch (sc->sc_type) {
3763 case WM_T_82542_2_0:
3764 case WM_T_82542_2_1:
3765 case WM_T_82543:
3766 CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
3767 CSR_WRITE_FLUSH(sc);
3768 CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
3769 CSR_WRITE_FLUSH(sc);
3770 break;
3771 case WM_T_PCH2:
3772 case WM_T_PCH_LPT:
3773 case WM_T_PCH_SPT:
3774 case WM_T_PCH_CNP:
3775 if (idx == 0) {
3776 CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
3777 CSR_WRITE_FLUSH(sc);
3778 CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
3779 CSR_WRITE_FLUSH(sc);
3780 return;
3781 }
3782 if (sc->sc_type != WM_T_PCH2) {
3783 wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
3784 FWSM_WLOCK_MAC);
3785 addrl = WMREG_SHRAL(idx - 1);
3786 addrh = WMREG_SHRAH(idx - 1);
3787 } else {
3788 wlock_mac = 0;
3789 addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
3790 addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
3791 }
3792
3793 if ((wlock_mac == 0) || (idx <= wlock_mac)) {
3794 rv = wm_get_swflag_ich8lan(sc);
3795 if (rv != 0)
3796 return;
3797 CSR_WRITE(sc, addrl, ral_lo);
3798 CSR_WRITE_FLUSH(sc);
3799 CSR_WRITE(sc, addrh, ral_hi);
3800 CSR_WRITE_FLUSH(sc);
3801 wm_put_swflag_ich8lan(sc);
3802 }
3803
3804 break;
3805 default:
3806 CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
3807 CSR_WRITE_FLUSH(sc);
3808 CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
3809 CSR_WRITE_FLUSH(sc);
3810 break;
3811 }
3812 }
3813
3814 /*
3815 * wm_mchash:
3816 *
3817 * Compute the hash of the multicast address for the 4096-bit
3818 * multicast filter.
3819 */
3820 static uint32_t
3821 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3822 {
3823 static const int lo_shift[4] = { 4, 3, 2, 0 };
3824 static const int hi_shift[4] = { 4, 5, 6, 8 };
3825 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3826 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3827 uint32_t hash;
3828
3829 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3830 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3831 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3832 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
3833 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3834 (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3835 return (hash & 0x3ff);
3836 }
3837 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3838 (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3839
3840 return (hash & 0xfff);
3841 }
3842
3843 /*
3844 *
3845 *
3846 */
3847 static int
3848 wm_rar_count(struct wm_softc *sc)
3849 {
3850 int size;
3851
3852 switch (sc->sc_type) {
3853 case WM_T_ICH8:
3854 size = WM_RAL_TABSIZE_ICH8 -1;
3855 break;
3856 case WM_T_ICH9:
3857 case WM_T_ICH10:
3858 case WM_T_PCH:
3859 size = WM_RAL_TABSIZE_ICH8;
3860 break;
3861 case WM_T_PCH2:
3862 size = WM_RAL_TABSIZE_PCH2;
3863 break;
3864 case WM_T_PCH_LPT:
3865 case WM_T_PCH_SPT:
3866 case WM_T_PCH_CNP:
3867 size = WM_RAL_TABSIZE_PCH_LPT;
3868 break;
3869 case WM_T_82575:
3870 case WM_T_I210:
3871 case WM_T_I211:
3872 size = WM_RAL_TABSIZE_82575;
3873 break;
3874 case WM_T_82576:
3875 case WM_T_82580:
3876 size = WM_RAL_TABSIZE_82576;
3877 break;
3878 case WM_T_I350:
3879 case WM_T_I354:
3880 size = WM_RAL_TABSIZE_I350;
3881 break;
3882 default:
3883 size = WM_RAL_TABSIZE;
3884 }
3885
3886 return size;
3887 }
3888
3889 /*
3890 * wm_set_filter:
3891 *
3892 * Set up the receive filter.
3893 */
3894 static void
3895 wm_set_filter(struct wm_softc *sc)
3896 {
3897 struct ethercom *ec = &sc->sc_ethercom;
3898 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3899 struct ether_multi *enm;
3900 struct ether_multistep step;
3901 bus_addr_t mta_reg;
3902 uint32_t hash, reg, bit;
3903 int i, size, ralmax, rv;
3904
3905 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
3906 device_xname(sc->sc_dev), __func__));
3907
3908 if (sc->sc_type >= WM_T_82544)
3909 mta_reg = WMREG_CORDOVA_MTA;
3910 else
3911 mta_reg = WMREG_MTA;
3912
3913 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3914
3915 if (ifp->if_flags & IFF_BROADCAST)
3916 sc->sc_rctl |= RCTL_BAM;
3917 if (ifp->if_flags & IFF_PROMISC) {
3918 sc->sc_rctl |= RCTL_UPE;
3919 ETHER_LOCK(ec);
3920 ec->ec_flags |= ETHER_F_ALLMULTI;
3921 ETHER_UNLOCK(ec);
3922 goto allmulti;
3923 }
3924
3925 /*
3926 * Set the station address in the first RAL slot, and
3927 * clear the remaining slots.
3928 */
3929 size = wm_rar_count(sc);
3930 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3931
3932 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
3933 || (sc->sc_type == WM_T_PCH_CNP)) {
3934 i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
3935 switch (i) {
3936 case 0:
3937 /* We can use all entries */
3938 ralmax = size;
3939 break;
3940 case 1:
3941 /* Only RAR[0] */
3942 ralmax = 1;
3943 break;
3944 default:
3945 /* Available SHRA + RAR[0] */
3946 ralmax = i + 1;
3947 }
3948 } else
3949 ralmax = size;
3950 for (i = 1; i < size; i++) {
3951 if (i < ralmax)
3952 wm_set_ral(sc, NULL, i);
3953 }
3954
3955 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3956 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3957 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3958 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
3959 size = WM_ICH8_MC_TABSIZE;
3960 else
3961 size = WM_MC_TABSIZE;
3962 /* Clear out the multicast table. */
3963 for (i = 0; i < size; i++) {
3964 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3965 CSR_WRITE_FLUSH(sc);
3966 }
3967
3968 ETHER_LOCK(ec);
3969 ETHER_FIRST_MULTI(step, ec, enm);
3970 while (enm != NULL) {
3971 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3972 ec->ec_flags |= ETHER_F_ALLMULTI;
3973 ETHER_UNLOCK(ec);
3974 /*
3975 * We must listen to a range of multicast addresses.
3976 * For now, just accept all multicasts, rather than
3977 * trying to set only those filter bits needed to match
3978 * the range. (At this time, the only use of address
3979 * ranges is for IP multicast routing, for which the
3980 * range is big enough to require all bits set.)
3981 */
3982 goto allmulti;
3983 }
3984
3985 hash = wm_mchash(sc, enm->enm_addrlo);
3986
3987 reg = (hash >> 5);
3988 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3989 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3990 || (sc->sc_type == WM_T_PCH2)
3991 || (sc->sc_type == WM_T_PCH_LPT)
3992 || (sc->sc_type == WM_T_PCH_SPT)
3993 || (sc->sc_type == WM_T_PCH_CNP))
3994 reg &= 0x1f;
3995 else
3996 reg &= 0x7f;
3997 bit = hash & 0x1f;
3998
3999 hash = CSR_READ(sc, mta_reg + (reg << 2));
4000 hash |= 1U << bit;
4001
4002 if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
4003 /*
4004 * 82544 Errata 9: Certain register cannot be written
4005 * with particular alignments in PCI-X bus operation
4006 * (FCAH, MTA and VFTA).
4007 */
4008 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4009 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4010 CSR_WRITE_FLUSH(sc);
4011 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4012 CSR_WRITE_FLUSH(sc);
4013 } else {
4014 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4015 CSR_WRITE_FLUSH(sc);
4016 }
4017
4018 ETHER_NEXT_MULTI(step, enm);
4019 }
4020 ec->ec_flags &= ~ETHER_F_ALLMULTI;
4021 ETHER_UNLOCK(ec);
4022
4023 goto setit;
4024
4025 allmulti:
4026 sc->sc_rctl |= RCTL_MPE;
4027
4028 setit:
4029 if (sc->sc_type >= WM_T_PCH2) {
4030 if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4031 && (ifp->if_mtu > ETHERMTU))
4032 rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
4033 else
4034 rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
4035 if (rv != 0)
4036 device_printf(sc->sc_dev,
4037 "Failed to do workaround for jumbo frame.\n");
4038 }
4039
4040 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4041 }
4042
4043 /* Reset and init related */
4044
4045 static void
4046 wm_set_vlan(struct wm_softc *sc)
4047 {
4048
4049 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4050 device_xname(sc->sc_dev), __func__));
4051
4052 /* Deal with VLAN enables. */
4053 if (VLAN_ATTACHED(&sc->sc_ethercom))
4054 sc->sc_ctrl |= CTRL_VME;
4055 else
4056 sc->sc_ctrl &= ~CTRL_VME;
4057
4058 /* Write the control registers. */
4059 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4060 }
4061
4062 static void
4063 wm_set_pcie_completion_timeout(struct wm_softc *sc)
4064 {
4065 uint32_t gcr;
4066 pcireg_t ctrl2;
4067
4068 gcr = CSR_READ(sc, WMREG_GCR);
4069
4070 /* Only take action if timeout value is defaulted to 0 */
4071 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
4072 goto out;
4073
4074 if ((gcr & GCR_CAP_VER2) == 0) {
4075 gcr |= GCR_CMPL_TMOUT_10MS;
4076 goto out;
4077 }
4078
4079 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
4080 sc->sc_pcixe_capoff + PCIE_DCSR2);
4081 ctrl2 |= WM_PCIE_DCSR2_16MS;
4082 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
4083 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
4084
4085 out:
4086 /* Disable completion timeout resend */
4087 gcr &= ~GCR_CMPL_TMOUT_RESEND;
4088
4089 CSR_WRITE(sc, WMREG_GCR, gcr);
4090 }
4091
4092 void
4093 wm_get_auto_rd_done(struct wm_softc *sc)
4094 {
4095 int i;
4096
4097 /* wait for eeprom to reload */
4098 switch (sc->sc_type) {
4099 case WM_T_82571:
4100 case WM_T_82572:
4101 case WM_T_82573:
4102 case WM_T_82574:
4103 case WM_T_82583:
4104 case WM_T_82575:
4105 case WM_T_82576:
4106 case WM_T_82580:
4107 case WM_T_I350:
4108 case WM_T_I354:
4109 case WM_T_I210:
4110 case WM_T_I211:
4111 case WM_T_80003:
4112 case WM_T_ICH8:
4113 case WM_T_ICH9:
4114 for (i = 0; i < 10; i++) {
4115 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4116 break;
4117 delay(1000);
4118 }
4119 if (i == 10) {
4120 log(LOG_ERR, "%s: auto read from eeprom failed to "
4121 "complete\n", device_xname(sc->sc_dev));
4122 }
4123 break;
4124 default:
4125 break;
4126 }
4127 }
4128
4129 void
4130 wm_lan_init_done(struct wm_softc *sc)
4131 {
4132 uint32_t reg = 0;
4133 int i;
4134
4135 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4136 device_xname(sc->sc_dev), __func__));
4137
4138 /* Wait for eeprom to reload */
4139 switch (sc->sc_type) {
4140 case WM_T_ICH10:
4141 case WM_T_PCH:
4142 case WM_T_PCH2:
4143 case WM_T_PCH_LPT:
4144 case WM_T_PCH_SPT:
4145 case WM_T_PCH_CNP:
4146 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4147 reg = CSR_READ(sc, WMREG_STATUS);
4148 if ((reg & STATUS_LAN_INIT_DONE) != 0)
4149 break;
4150 delay(100);
4151 }
4152 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4153 log(LOG_ERR, "%s: %s: lan_init_done failed to "
4154 "complete\n", device_xname(sc->sc_dev), __func__);
4155 }
4156 break;
4157 default:
4158 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4159 __func__);
4160 break;
4161 }
4162
4163 reg &= ~STATUS_LAN_INIT_DONE;
4164 CSR_WRITE(sc, WMREG_STATUS, reg);
4165 }
4166
4167 void
4168 wm_get_cfg_done(struct wm_softc *sc)
4169 {
4170 int mask;
4171 uint32_t reg;
4172 int i;
4173
4174 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4175 device_xname(sc->sc_dev), __func__));
4176
4177 /* Wait for eeprom to reload */
4178 switch (sc->sc_type) {
4179 case WM_T_82542_2_0:
4180 case WM_T_82542_2_1:
4181 /* null */
4182 break;
4183 case WM_T_82543:
4184 case WM_T_82544:
4185 case WM_T_82540:
4186 case WM_T_82545:
4187 case WM_T_82545_3:
4188 case WM_T_82546:
4189 case WM_T_82546_3:
4190 case WM_T_82541:
4191 case WM_T_82541_2:
4192 case WM_T_82547:
4193 case WM_T_82547_2:
4194 case WM_T_82573:
4195 case WM_T_82574:
4196 case WM_T_82583:
4197 /* generic */
4198 delay(10*1000);
4199 break;
4200 case WM_T_80003:
4201 case WM_T_82571:
4202 case WM_T_82572:
4203 case WM_T_82575:
4204 case WM_T_82576:
4205 case WM_T_82580:
4206 case WM_T_I350:
4207 case WM_T_I354:
4208 case WM_T_I210:
4209 case WM_T_I211:
4210 if (sc->sc_type == WM_T_82571) {
4211 /* Only 82571 shares port 0 */
4212 mask = EEMNGCTL_CFGDONE_0;
4213 } else
4214 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4215 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4216 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4217 break;
4218 delay(1000);
4219 }
4220 if (i >= WM_PHY_CFG_TIMEOUT)
4221 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
4222 device_xname(sc->sc_dev), __func__));
4223 break;
4224 case WM_T_ICH8:
4225 case WM_T_ICH9:
4226 case WM_T_ICH10:
4227 case WM_T_PCH:
4228 case WM_T_PCH2:
4229 case WM_T_PCH_LPT:
4230 case WM_T_PCH_SPT:
4231 case WM_T_PCH_CNP:
4232 delay(10*1000);
4233 if (sc->sc_type >= WM_T_ICH10)
4234 wm_lan_init_done(sc);
4235 else
4236 wm_get_auto_rd_done(sc);
4237
4238 /* Clear PHY Reset Asserted bit */
4239 reg = CSR_READ(sc, WMREG_STATUS);
4240 if ((reg & STATUS_PHYRA) != 0)
4241 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
4242 break;
4243 default:
4244 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4245 __func__);
4246 break;
4247 }
4248 }
4249
4250 int
4251 wm_phy_post_reset(struct wm_softc *sc)
4252 {
4253 device_t dev = sc->sc_dev;
4254 uint16_t reg;
4255 int rv = 0;
4256
4257 /* This function is only for ICH8 and newer. */
4258 if (sc->sc_type < WM_T_ICH8)
4259 return 0;
4260
4261 if (wm_phy_resetisblocked(sc)) {
4262 /* XXX */
4263 device_printf(dev, "PHY is blocked\n");
4264 return -1;
4265 }
4266
4267 /* Allow time for h/w to get to quiescent state after reset */
4268 delay(10*1000);
4269
4270 /* Perform any necessary post-reset workarounds */
4271 if (sc->sc_type == WM_T_PCH)
4272 rv = wm_hv_phy_workarounds_ich8lan(sc);
4273 else if (sc->sc_type == WM_T_PCH2)
4274 rv = wm_lv_phy_workarounds_ich8lan(sc);
4275 if (rv != 0)
4276 return rv;
4277
4278 /* Clear the host wakeup bit after lcd reset */
4279 if (sc->sc_type >= WM_T_PCH) {
4280 wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, ®);
4281 reg &= ~BM_WUC_HOST_WU_BIT;
4282 wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
4283 }
4284
4285 /* Configure the LCD with the extended configuration region in NVM */
4286 if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
4287 return rv;
4288
4289 /* Configure the LCD with the OEM bits in NVM */
4290 rv = wm_oem_bits_config_ich8lan(sc, true);
4291
4292 if (sc->sc_type == WM_T_PCH2) {
4293 /* Ungate automatic PHY configuration on non-managed 82579 */
4294 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
4295 delay(10 * 1000);
4296 wm_gate_hw_phy_config_ich8lan(sc, false);
4297 }
4298 /* Set EEE LPI Update Timer to 200usec */
4299 rv = sc->phy.acquire(sc);
4300 if (rv)
4301 return rv;
4302 rv = wm_write_emi_reg_locked(dev,
4303 I82579_LPI_UPDATE_TIMER, 0x1387);
4304 sc->phy.release(sc);
4305 }
4306
4307 return rv;
4308 }
4309
4310 /* Only for PCH and newer */
4311 static int
4312 wm_write_smbus_addr(struct wm_softc *sc)
4313 {
4314 uint32_t strap, freq;
4315 uint16_t phy_data;
4316 int rv;
4317
4318 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4319 device_xname(sc->sc_dev), __func__));
4320 KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
4321
4322 strap = CSR_READ(sc, WMREG_STRAP);
4323 freq = __SHIFTOUT(strap, STRAP_FREQ);
4324
4325 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
4326 if (rv != 0)
4327 return -1;
4328
4329 phy_data &= ~HV_SMB_ADDR_ADDR;
4330 phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
4331 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
4332
4333 if (sc->sc_phytype == WMPHY_I217) {
4334 /* Restore SMBus frequency */
4335 if (freq --) {
4336 phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
4337 | HV_SMB_ADDR_FREQ_HIGH);
4338 phy_data |= __SHIFTIN((freq & 0x01) != 0,
4339 HV_SMB_ADDR_FREQ_LOW);
4340 phy_data |= __SHIFTIN((freq & 0x02) != 0,
4341 HV_SMB_ADDR_FREQ_HIGH);
4342 } else
4343 DPRINTF(sc, WM_DEBUG_INIT,
4344 ("%s: %s Unsupported SMB frequency in PHY\n",
4345 device_xname(sc->sc_dev), __func__));
4346 }
4347
4348 return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
4349 phy_data);
4350 }
4351
4352 static int
4353 wm_init_lcd_from_nvm(struct wm_softc *sc)
4354 {
4355 uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
4356 uint16_t phy_page = 0;
4357 int rv = 0;
4358
4359 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4360 device_xname(sc->sc_dev), __func__));
4361
4362 switch (sc->sc_type) {
4363 case WM_T_ICH8:
4364 if ((sc->sc_phytype == WMPHY_UNKNOWN)
4365 || (sc->sc_phytype != WMPHY_IGP_3))
4366 return 0;
4367
4368 if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
4369 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
4370 sw_cfg_mask = FEXTNVM_SW_CONFIG;
4371 break;
4372 }
4373 /* FALLTHROUGH */
4374 case WM_T_PCH:
4375 case WM_T_PCH2:
4376 case WM_T_PCH_LPT:
4377 case WM_T_PCH_SPT:
4378 case WM_T_PCH_CNP:
4379 sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
4380 break;
4381 default:
4382 return 0;
4383 }
4384
4385 if ((rv = sc->phy.acquire(sc)) != 0)
4386 return rv;
4387
4388 reg = CSR_READ(sc, WMREG_FEXTNVM);
4389 if ((reg & sw_cfg_mask) == 0)
4390 goto release;
4391
4392 /*
4393 * Make sure HW does not configure LCD from PHY extended configuration
4394 * before SW configuration
4395 */
4396 extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
4397 if ((sc->sc_type < WM_T_PCH2)
4398 && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
4399 goto release;
4400
4401 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
4402 device_xname(sc->sc_dev), __func__));
4403 /* word_addr is in DWORD */
4404 word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
4405
4406 reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
4407 cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
4408 if (cnf_size == 0)
4409 goto release;
4410
4411 if (((sc->sc_type == WM_T_PCH)
4412 && ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
4413 || (sc->sc_type > WM_T_PCH)) {
4414 /*
4415 * HW configures the SMBus address and LEDs when the OEM and
4416 * LCD Write Enable bits are set in the NVM. When both NVM bits
4417 * are cleared, SW will configure them instead.
4418 */
4419 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
4420 device_xname(sc->sc_dev), __func__));
4421 if ((rv = wm_write_smbus_addr(sc)) != 0)
4422 goto release;
4423
4424 reg = CSR_READ(sc, WMREG_LEDCTL);
4425 rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
4426 (uint16_t)reg);
4427 if (rv != 0)
4428 goto release;
4429 }
4430
4431 /* Configure LCD from extended configuration region. */
4432 for (i = 0; i < cnf_size; i++) {
4433 uint16_t reg_data, reg_addr;
4434
4435 if (wm_nvm_read(sc, (word_addr + i * 2), 1, ®_data) != 0)
4436 goto release;
4437
4438 if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, ®_addr) !=0)
4439 goto release;
4440
4441 if (reg_addr == IGPHY_PAGE_SELECT)
4442 phy_page = reg_data;
4443
4444 reg_addr &= IGPHY_MAXREGADDR;
4445 reg_addr |= phy_page;
4446
4447 KASSERT(sc->phy.writereg_locked != NULL);
4448 rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
4449 reg_data);
4450 }
4451
4452 release:
4453 sc->phy.release(sc);
4454 return rv;
4455 }
4456
4457 /*
4458 * wm_oem_bits_config_ich8lan - SW-based LCD Configuration
4459 * @sc: pointer to the HW structure
4460 * @d0_state: boolean if entering d0 or d3 device state
4461 *
4462 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
4463 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
4464 * in NVM determines whether HW should configure LPLU and Gbe Disable.
4465 */
4466 int
4467 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
4468 {
4469 uint32_t mac_reg;
4470 uint16_t oem_reg;
4471 int rv;
4472
4473 if (sc->sc_type < WM_T_PCH)
4474 return 0;
4475
4476 rv = sc->phy.acquire(sc);
4477 if (rv != 0)
4478 return rv;
4479
4480 if (sc->sc_type == WM_T_PCH) {
4481 mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
4482 if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
4483 goto release;
4484 }
4485
4486 mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
4487 if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
4488 goto release;
4489
4490 mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
4491
4492 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
4493 if (rv != 0)
4494 goto release;
4495 oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
4496
4497 if (d0_state) {
4498 if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
4499 oem_reg |= HV_OEM_BITS_A1KDIS;
4500 if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
4501 oem_reg |= HV_OEM_BITS_LPLU;
4502 } else {
4503 if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
4504 != 0)
4505 oem_reg |= HV_OEM_BITS_A1KDIS;
4506 if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
4507 != 0)
4508 oem_reg |= HV_OEM_BITS_LPLU;
4509 }
4510
4511 /* Set Restart auto-neg to activate the bits */
4512 if ((d0_state || (sc->sc_type != WM_T_PCH))
4513 && (wm_phy_resetisblocked(sc) == false))
4514 oem_reg |= HV_OEM_BITS_ANEGNOW;
4515
4516 rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
4517
4518 release:
4519 sc->phy.release(sc);
4520
4521 return rv;
4522 }
4523
4524 /* Init hardware bits */
4525 void
4526 wm_initialize_hardware_bits(struct wm_softc *sc)
4527 {
4528 uint32_t tarc0, tarc1, reg;
4529
4530 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4531 device_xname(sc->sc_dev), __func__));
4532
4533 /* For 82571 variant, 80003 and ICHs */
4534 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
4535 || (sc->sc_type >= WM_T_80003)) {
4536
4537 /* Transmit Descriptor Control 0 */
4538 reg = CSR_READ(sc, WMREG_TXDCTL(0));
4539 reg |= TXDCTL_COUNT_DESC;
4540 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
4541
4542 /* Transmit Descriptor Control 1 */
4543 reg = CSR_READ(sc, WMREG_TXDCTL(1));
4544 reg |= TXDCTL_COUNT_DESC;
4545 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
4546
4547 /* TARC0 */
4548 tarc0 = CSR_READ(sc, WMREG_TARC0);
4549 switch (sc->sc_type) {
4550 case WM_T_82571:
4551 case WM_T_82572:
4552 case WM_T_82573:
4553 case WM_T_82574:
4554 case WM_T_82583:
4555 case WM_T_80003:
4556 /* Clear bits 30..27 */
4557 tarc0 &= ~__BITS(30, 27);
4558 break;
4559 default:
4560 break;
4561 }
4562
4563 switch (sc->sc_type) {
4564 case WM_T_82571:
4565 case WM_T_82572:
4566 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
4567
4568 tarc1 = CSR_READ(sc, WMREG_TARC1);
4569 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
4570 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
4571 /* 8257[12] Errata No.7 */
4572 tarc1 |= __BIT(22); /* TARC1 bits 22 */
4573
4574 /* TARC1 bit 28 */
4575 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4576 tarc1 &= ~__BIT(28);
4577 else
4578 tarc1 |= __BIT(28);
4579 CSR_WRITE(sc, WMREG_TARC1, tarc1);
4580
4581 /*
4582 * 8257[12] Errata No.13
4583 * Disable Dyamic Clock Gating.
4584 */
4585 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4586 reg &= ~CTRL_EXT_DMA_DYN_CLK;
4587 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4588 break;
4589 case WM_T_82573:
4590 case WM_T_82574:
4591 case WM_T_82583:
4592 if ((sc->sc_type == WM_T_82574)
4593 || (sc->sc_type == WM_T_82583))
4594 tarc0 |= __BIT(26); /* TARC0 bit 26 */
4595
4596 /* Extended Device Control */
4597 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4598 reg &= ~__BIT(23); /* Clear bit 23 */
4599 reg |= __BIT(22); /* Set bit 22 */
4600 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4601
4602 /* Device Control */
4603 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
4604 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4605
4606 /* PCIe Control Register */
4607 /*
4608 * 82573 Errata (unknown).
4609 *
4610 * 82574 Errata 25 and 82583 Errata 12
4611 * "Dropped Rx Packets":
4612 * NVM Image Version 2.1.4 and newer has no this bug.
4613 */
4614 reg = CSR_READ(sc, WMREG_GCR);
4615 reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
4616 CSR_WRITE(sc, WMREG_GCR, reg);
4617
4618 if ((sc->sc_type == WM_T_82574)
4619 || (sc->sc_type == WM_T_82583)) {
4620 /*
4621 * Document says this bit must be set for
4622 * proper operation.
4623 */
4624 reg = CSR_READ(sc, WMREG_GCR);
4625 reg |= __BIT(22);
4626 CSR_WRITE(sc, WMREG_GCR, reg);
4627
4628 /*
4629 * Apply workaround for hardware errata
4630 * documented in errata docs Fixes issue where
4631 * some error prone or unreliable PCIe
4632 * completions are occurring, particularly
4633 * with ASPM enabled. Without fix, issue can
4634 * cause Tx timeouts.
4635 */
4636 reg = CSR_READ(sc, WMREG_GCR2);
4637 reg |= __BIT(0);
4638 CSR_WRITE(sc, WMREG_GCR2, reg);
4639 }
4640 break;
4641 case WM_T_80003:
4642 /* TARC0 */
4643 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
4644 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
4645 tarc0 &= ~__BIT(20); /* Clear bits 20 */
4646
4647 /* TARC1 bit 28 */
4648 tarc1 = CSR_READ(sc, WMREG_TARC1);
4649 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4650 tarc1 &= ~__BIT(28);
4651 else
4652 tarc1 |= __BIT(28);
4653 CSR_WRITE(sc, WMREG_TARC1, tarc1);
4654 break;
4655 case WM_T_ICH8:
4656 case WM_T_ICH9:
4657 case WM_T_ICH10:
4658 case WM_T_PCH:
4659 case WM_T_PCH2:
4660 case WM_T_PCH_LPT:
4661 case WM_T_PCH_SPT:
4662 case WM_T_PCH_CNP:
4663 /* TARC0 */
4664 if (sc->sc_type == WM_T_ICH8) {
4665 /* Set TARC0 bits 29 and 28 */
4666 tarc0 |= __BITS(29, 28);
4667 } else if (sc->sc_type == WM_T_PCH_SPT) {
4668 tarc0 |= __BIT(29);
4669 /*
4670 * Drop bit 28. From Linux.
4671 * See I218/I219 spec update
4672 * "5. Buffer Overrun While the I219 is
4673 * Processing DMA Transactions"
4674 */
4675 tarc0 &= ~__BIT(28);
4676 }
4677 /* Set TARC0 bits 23,24,26,27 */
4678 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
4679
4680 /* CTRL_EXT */
4681 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4682 reg |= __BIT(22); /* Set bit 22 */
4683 /*
4684 * Enable PHY low-power state when MAC is at D3
4685 * w/o WoL
4686 */
4687 if (sc->sc_type >= WM_T_PCH)
4688 reg |= CTRL_EXT_PHYPDEN;
4689 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4690
4691 /* TARC1 */
4692 tarc1 = CSR_READ(sc, WMREG_TARC1);
4693 /* bit 28 */
4694 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4695 tarc1 &= ~__BIT(28);
4696 else
4697 tarc1 |= __BIT(28);
4698 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
4699 CSR_WRITE(sc, WMREG_TARC1, tarc1);
4700
4701 /* Device Status */
4702 if (sc->sc_type == WM_T_ICH8) {
4703 reg = CSR_READ(sc, WMREG_STATUS);
4704 reg &= ~__BIT(31);
4705 CSR_WRITE(sc, WMREG_STATUS, reg);
4706
4707 }
4708
4709 /* IOSFPC */
4710 if (sc->sc_type == WM_T_PCH_SPT) {
4711 reg = CSR_READ(sc, WMREG_IOSFPC);
4712 reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
4713 CSR_WRITE(sc, WMREG_IOSFPC, reg);
4714 }
4715 /*
4716 * Work-around descriptor data corruption issue during
4717 * NFS v2 UDP traffic, just disable the NFS filtering
4718 * capability.
4719 */
4720 reg = CSR_READ(sc, WMREG_RFCTL);
4721 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
4722 CSR_WRITE(sc, WMREG_RFCTL, reg);
4723 break;
4724 default:
4725 break;
4726 }
4727 CSR_WRITE(sc, WMREG_TARC0, tarc0);
4728
4729 switch (sc->sc_type) {
4730 /*
4731 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
4732 * Avoid RSS Hash Value bug.
4733 */
4734 case WM_T_82571:
4735 case WM_T_82572:
4736 case WM_T_82573:
4737 case WM_T_80003:
4738 case WM_T_ICH8:
4739 reg = CSR_READ(sc, WMREG_RFCTL);
4740 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
4741 CSR_WRITE(sc, WMREG_RFCTL, reg);
4742 break;
4743 case WM_T_82574:
4744 /* Use extened Rx descriptor. */
4745 reg = CSR_READ(sc, WMREG_RFCTL);
4746 reg |= WMREG_RFCTL_EXSTEN;
4747 CSR_WRITE(sc, WMREG_RFCTL, reg);
4748 break;
4749 default:
4750 break;
4751 }
4752 } else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
4753 /*
4754 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
4755 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
4756 * "Certain Malformed IPv6 Extension Headers are Not Processed
4757 * Correctly by the Device"
4758 *
4759 * I354(C2000) Errata AVR53:
4760 * "Malformed IPv6 Extension Headers May Result in LAN Device
4761 * Hang"
4762 */
4763 reg = CSR_READ(sc, WMREG_RFCTL);
4764 reg |= WMREG_RFCTL_IPV6EXDIS;
4765 CSR_WRITE(sc, WMREG_RFCTL, reg);
4766 }
4767 }
4768
4769 static uint32_t
4770 wm_rxpbs_adjust_82580(uint32_t val)
4771 {
4772 uint32_t rv = 0;
4773
4774 if (val < __arraycount(wm_82580_rxpbs_table))
4775 rv = wm_82580_rxpbs_table[val];
4776
4777 return rv;
4778 }
4779
4780 /*
4781 * wm_reset_phy:
4782 *
4783 * generic PHY reset function.
4784 * Same as e1000_phy_hw_reset_generic()
4785 */
4786 static int
4787 wm_reset_phy(struct wm_softc *sc)
4788 {
4789 uint32_t reg;
4790
4791 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4792 device_xname(sc->sc_dev), __func__));
4793 if (wm_phy_resetisblocked(sc))
4794 return -1;
4795
4796 sc->phy.acquire(sc);
4797
4798 reg = CSR_READ(sc, WMREG_CTRL);
4799 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
4800 CSR_WRITE_FLUSH(sc);
4801
4802 delay(sc->phy.reset_delay_us);
4803
4804 CSR_WRITE(sc, WMREG_CTRL, reg);
4805 CSR_WRITE_FLUSH(sc);
4806
4807 delay(150);
4808
4809 sc->phy.release(sc);
4810
4811 wm_get_cfg_done(sc);
4812 wm_phy_post_reset(sc);
4813
4814 return 0;
4815 }
4816
4817 /*
4818 * Only used by WM_T_PCH_SPT which does not use multiqueue,
4819 * so it is enough to check sc->sc_queue[0] only.
4820 */
4821 static void
4822 wm_flush_desc_rings(struct wm_softc *sc)
4823 {
4824 pcireg_t preg;
4825 uint32_t reg;
4826 struct wm_txqueue *txq;
4827 wiseman_txdesc_t *txd;
4828 int nexttx;
4829 uint32_t rctl;
4830
4831 /* First, disable MULR fix in FEXTNVM11 */
4832 reg = CSR_READ(sc, WMREG_FEXTNVM11);
4833 reg |= FEXTNVM11_DIS_MULRFIX;
4834 CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
4835
4836 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
4837 reg = CSR_READ(sc, WMREG_TDLEN(0));
4838 if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
4839 return;
4840
4841 /* TX */
4842 device_printf(sc->sc_dev, "Need TX flush (reg = %08x, len = %u)\n",
4843 preg, reg);
4844 reg = CSR_READ(sc, WMREG_TCTL);
4845 CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
4846
4847 txq = &sc->sc_queue[0].wmq_txq;
4848 nexttx = txq->txq_next;
4849 txd = &txq->txq_descs[nexttx];
4850 wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
4851 txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
4852 txd->wtx_fields.wtxu_status = 0;
4853 txd->wtx_fields.wtxu_options = 0;
4854 txd->wtx_fields.wtxu_vlan = 0;
4855
4856 bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
4857 BUS_SPACE_BARRIER_WRITE);
4858
4859 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
4860 CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
4861 bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
4862 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
4863 delay(250);
4864
4865 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
4866 if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
4867 return;
4868
4869 /* RX */
4870 device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
4871 rctl = CSR_READ(sc, WMREG_RCTL);
4872 CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
4873 CSR_WRITE_FLUSH(sc);
4874 delay(150);
4875
4876 reg = CSR_READ(sc, WMREG_RXDCTL(0));
4877 /* Zero the lower 14 bits (prefetch and host thresholds) */
4878 reg &= 0xffffc000;
4879 /*
4880 * Update thresholds: prefetch threshold to 31, host threshold
4881 * to 1 and make sure the granularity is "descriptors" and not
4882 * "cache lines"
4883 */
4884 reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
4885 CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
4886
4887 /* Momentarily enable the RX ring for the changes to take effect */
4888 CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
4889 CSR_WRITE_FLUSH(sc);
4890 delay(150);
4891 CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
4892 }
4893
4894 /*
4895 * wm_reset:
4896 *
4897 * Reset the i82542 chip.
4898 */
4899 static void
4900 wm_reset(struct wm_softc *sc)
4901 {
4902 int phy_reset = 0;
4903 int i, error = 0;
4904 uint32_t reg;
4905 uint16_t kmreg;
4906 int rv;
4907
4908 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4909 device_xname(sc->sc_dev), __func__));
4910 KASSERT(sc->sc_type != 0);
4911
4912 /*
4913 * Allocate on-chip memory according to the MTU size.
4914 * The Packet Buffer Allocation register must be written
4915 * before the chip is reset.
4916 */
4917 switch (sc->sc_type) {
4918 case WM_T_82547:
4919 case WM_T_82547_2:
4920 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4921 PBA_22K : PBA_30K;
4922 for (i = 0; i < sc->sc_nqueues; i++) {
4923 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4924 txq->txq_fifo_head = 0;
4925 txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
4926 txq->txq_fifo_size =
4927 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
4928 txq->txq_fifo_stall = 0;
4929 }
4930 break;
4931 case WM_T_82571:
4932 case WM_T_82572:
4933 case WM_T_82575: /* XXX need special handing for jumbo frames */
4934 case WM_T_80003:
4935 sc->sc_pba = PBA_32K;
4936 break;
4937 case WM_T_82573:
4938 sc->sc_pba = PBA_12K;
4939 break;
4940 case WM_T_82574:
4941 case WM_T_82583:
4942 sc->sc_pba = PBA_20K;
4943 break;
4944 case WM_T_82576:
4945 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
4946 sc->sc_pba &= RXPBS_SIZE_MASK_82576;
4947 break;
4948 case WM_T_82580:
4949 case WM_T_I350:
4950 case WM_T_I354:
4951 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
4952 break;
4953 case WM_T_I210:
4954 case WM_T_I211:
4955 sc->sc_pba = PBA_34K;
4956 break;
4957 case WM_T_ICH8:
4958 /* Workaround for a bit corruption issue in FIFO memory */
4959 sc->sc_pba = PBA_8K;
4960 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
4961 break;
4962 case WM_T_ICH9:
4963 case WM_T_ICH10:
4964 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
4965 PBA_14K : PBA_10K;
4966 break;
4967 case WM_T_PCH:
4968 case WM_T_PCH2: /* XXX 14K? */
4969 case WM_T_PCH_LPT:
4970 case WM_T_PCH_SPT:
4971 case WM_T_PCH_CNP:
4972 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
4973 PBA_12K : PBA_26K;
4974 break;
4975 default:
4976 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4977 PBA_40K : PBA_48K;
4978 break;
4979 }
4980 /*
4981 * Only old or non-multiqueue devices have the PBA register
4982 * XXX Need special handling for 82575.
4983 */
4984 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4985 || (sc->sc_type == WM_T_82575))
4986 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
4987
4988 /* Prevent the PCI-E bus from sticking */
4989 if (sc->sc_flags & WM_F_PCIE) {
4990 int timeout = 800;
4991
4992 sc->sc_ctrl |= CTRL_GIO_M_DIS;
4993 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4994
4995 while (timeout--) {
4996 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
4997 == 0)
4998 break;
4999 delay(100);
5000 }
5001 if (timeout == 0)
5002 device_printf(sc->sc_dev,
5003 "failed to disable busmastering\n");
5004 }
5005
5006 /* Set the completion timeout for interface */
5007 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
5008 || (sc->sc_type == WM_T_82580)
5009 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
5010 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
5011 wm_set_pcie_completion_timeout(sc);
5012
5013 /* Clear interrupt */
5014 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5015 if (wm_is_using_msix(sc)) {
5016 if (sc->sc_type != WM_T_82574) {
5017 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5018 CSR_WRITE(sc, WMREG_EIAC, 0);
5019 } else
5020 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5021 }
5022
5023 /* Stop the transmit and receive processes. */
5024 CSR_WRITE(sc, WMREG_RCTL, 0);
5025 sc->sc_rctl &= ~RCTL_EN;
5026 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
5027 CSR_WRITE_FLUSH(sc);
5028
5029 /* XXX set_tbi_sbp_82543() */
5030
5031 delay(10*1000);
5032
5033 /* Must acquire the MDIO ownership before MAC reset */
5034 switch (sc->sc_type) {
5035 case WM_T_82573:
5036 case WM_T_82574:
5037 case WM_T_82583:
5038 error = wm_get_hw_semaphore_82573(sc);
5039 break;
5040 default:
5041 break;
5042 }
5043
5044 /*
5045 * 82541 Errata 29? & 82547 Errata 28?
5046 * See also the description about PHY_RST bit in CTRL register
5047 * in 8254x_GBe_SDM.pdf.
5048 */
5049 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
5050 CSR_WRITE(sc, WMREG_CTRL,
5051 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
5052 CSR_WRITE_FLUSH(sc);
5053 delay(5000);
5054 }
5055
5056 switch (sc->sc_type) {
5057 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
5058 case WM_T_82541:
5059 case WM_T_82541_2:
5060 case WM_T_82547:
5061 case WM_T_82547_2:
5062 /*
5063 * On some chipsets, a reset through a memory-mapped write
5064 * cycle can cause the chip to reset before completing the
5065 * write cycle. This causes major headache that can be avoided
5066 * by issuing the reset via indirect register writes through
5067 * I/O space.
5068 *
5069 * So, if we successfully mapped the I/O BAR at attach time,
5070 * use that. Otherwise, try our luck with a memory-mapped
5071 * reset.
5072 */
5073 if (sc->sc_flags & WM_F_IOH_VALID)
5074 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
5075 else
5076 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
5077 break;
5078 case WM_T_82545_3:
5079 case WM_T_82546_3:
5080 /* Use the shadow control register on these chips. */
5081 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
5082 break;
5083 case WM_T_80003:
5084 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5085 sc->phy.acquire(sc);
5086 CSR_WRITE(sc, WMREG_CTRL, reg);
5087 sc->phy.release(sc);
5088 break;
5089 case WM_T_ICH8:
5090 case WM_T_ICH9:
5091 case WM_T_ICH10:
5092 case WM_T_PCH:
5093 case WM_T_PCH2:
5094 case WM_T_PCH_LPT:
5095 case WM_T_PCH_SPT:
5096 case WM_T_PCH_CNP:
5097 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5098 if (wm_phy_resetisblocked(sc) == false) {
5099 /*
5100 * Gate automatic PHY configuration by hardware on
5101 * non-managed 82579
5102 */
5103 if ((sc->sc_type == WM_T_PCH2)
5104 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
5105 == 0))
5106 wm_gate_hw_phy_config_ich8lan(sc, true);
5107
5108 reg |= CTRL_PHY_RESET;
5109 phy_reset = 1;
5110 } else
5111 device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
5112 sc->phy.acquire(sc);
5113 CSR_WRITE(sc, WMREG_CTRL, reg);
5114 /* Don't insert a completion barrier when reset */
5115 delay(20*1000);
5116 mutex_exit(sc->sc_ich_phymtx);
5117 break;
5118 case WM_T_82580:
5119 case WM_T_I350:
5120 case WM_T_I354:
5121 case WM_T_I210:
5122 case WM_T_I211:
5123 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5124 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
5125 CSR_WRITE_FLUSH(sc);
5126 delay(5000);
5127 break;
5128 case WM_T_82542_2_0:
5129 case WM_T_82542_2_1:
5130 case WM_T_82543:
5131 case WM_T_82540:
5132 case WM_T_82545:
5133 case WM_T_82546:
5134 case WM_T_82571:
5135 case WM_T_82572:
5136 case WM_T_82573:
5137 case WM_T_82574:
5138 case WM_T_82575:
5139 case WM_T_82576:
5140 case WM_T_82583:
5141 default:
5142 /* Everything else can safely use the documented method. */
5143 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5144 break;
5145 }
5146
5147 /* Must release the MDIO ownership after MAC reset */
5148 switch (sc->sc_type) {
5149 case WM_T_82573:
5150 case WM_T_82574:
5151 case WM_T_82583:
5152 if (error == 0)
5153 wm_put_hw_semaphore_82573(sc);
5154 break;
5155 default:
5156 break;
5157 }
5158
5159 /* Set Phy Config Counter to 50msec */
5160 if (sc->sc_type == WM_T_PCH2) {
5161 reg = CSR_READ(sc, WMREG_FEXTNVM3);
5162 reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
5163 reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
5164 CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
5165 }
5166
5167 if (phy_reset != 0)
5168 wm_get_cfg_done(sc);
5169
5170 /* Reload EEPROM */
5171 switch (sc->sc_type) {
5172 case WM_T_82542_2_0:
5173 case WM_T_82542_2_1:
5174 case WM_T_82543:
5175 case WM_T_82544:
5176 delay(10);
5177 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5178 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5179 CSR_WRITE_FLUSH(sc);
5180 delay(2000);
5181 break;
5182 case WM_T_82540:
5183 case WM_T_82545:
5184 case WM_T_82545_3:
5185 case WM_T_82546:
5186 case WM_T_82546_3:
5187 delay(5*1000);
5188 /* XXX Disable HW ARPs on ASF enabled adapters */
5189 break;
5190 case WM_T_82541:
5191 case WM_T_82541_2:
5192 case WM_T_82547:
5193 case WM_T_82547_2:
5194 delay(20000);
5195 /* XXX Disable HW ARPs on ASF enabled adapters */
5196 break;
5197 case WM_T_82571:
5198 case WM_T_82572:
5199 case WM_T_82573:
5200 case WM_T_82574:
5201 case WM_T_82583:
5202 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
5203 delay(10);
5204 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5205 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5206 CSR_WRITE_FLUSH(sc);
5207 }
5208 /* check EECD_EE_AUTORD */
5209 wm_get_auto_rd_done(sc);
5210 /*
5211 * Phy configuration from NVM just starts after EECD_AUTO_RD
5212 * is set.
5213 */
5214 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
5215 || (sc->sc_type == WM_T_82583))
5216 delay(25*1000);
5217 break;
5218 case WM_T_82575:
5219 case WM_T_82576:
5220 case WM_T_82580:
5221 case WM_T_I350:
5222 case WM_T_I354:
5223 case WM_T_I210:
5224 case WM_T_I211:
5225 case WM_T_80003:
5226 /* check EECD_EE_AUTORD */
5227 wm_get_auto_rd_done(sc);
5228 break;
5229 case WM_T_ICH8:
5230 case WM_T_ICH9:
5231 case WM_T_ICH10:
5232 case WM_T_PCH:
5233 case WM_T_PCH2:
5234 case WM_T_PCH_LPT:
5235 case WM_T_PCH_SPT:
5236 case WM_T_PCH_CNP:
5237 break;
5238 default:
5239 panic("%s: unknown type\n", __func__);
5240 }
5241
5242 /* Check whether EEPROM is present or not */
5243 switch (sc->sc_type) {
5244 case WM_T_82575:
5245 case WM_T_82576:
5246 case WM_T_82580:
5247 case WM_T_I350:
5248 case WM_T_I354:
5249 case WM_T_ICH8:
5250 case WM_T_ICH9:
5251 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
5252 /* Not found */
5253 sc->sc_flags |= WM_F_EEPROM_INVALID;
5254 if (sc->sc_type == WM_T_82575)
5255 wm_reset_init_script_82575(sc);
5256 }
5257 break;
5258 default:
5259 break;
5260 }
5261
5262 if (phy_reset != 0)
5263 wm_phy_post_reset(sc);
5264
5265 if ((sc->sc_type == WM_T_82580)
5266 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
5267 /* Clear global device reset status bit */
5268 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
5269 }
5270
5271 /* Clear any pending interrupt events. */
5272 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5273 reg = CSR_READ(sc, WMREG_ICR);
5274 if (wm_is_using_msix(sc)) {
5275 if (sc->sc_type != WM_T_82574) {
5276 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5277 CSR_WRITE(sc, WMREG_EIAC, 0);
5278 } else
5279 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5280 }
5281
5282 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5283 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5284 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
5285 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
5286 reg = CSR_READ(sc, WMREG_KABGTXD);
5287 reg |= KABGTXD_BGSQLBIAS;
5288 CSR_WRITE(sc, WMREG_KABGTXD, reg);
5289 }
5290
5291 /* Reload sc_ctrl */
5292 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5293
5294 wm_set_eee(sc);
5295
5296 /*
5297 * For PCH, this write will make sure that any noise will be detected
5298 * as a CRC error and be dropped rather than show up as a bad packet
5299 * to the DMA engine
5300 */
5301 if (sc->sc_type == WM_T_PCH)
5302 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
5303
5304 if (sc->sc_type >= WM_T_82544)
5305 CSR_WRITE(sc, WMREG_WUC, 0);
5306
5307 if (sc->sc_type < WM_T_82575)
5308 wm_disable_aspm(sc); /* Workaround for some chips */
5309
5310 wm_reset_mdicnfg_82580(sc);
5311
5312 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
5313 wm_pll_workaround_i210(sc);
5314
5315 if (sc->sc_type == WM_T_80003) {
5316 /* Default to TRUE to enable the MDIC W/A */
5317 sc->sc_flags |= WM_F_80003_MDIC_WA;
5318
5319 rv = wm_kmrn_readreg(sc,
5320 KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
5321 if (rv == 0) {
5322 if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
5323 == KUMCTRLSTA_OPMODE_INBAND_MDIO)
5324 sc->sc_flags &= ~WM_F_80003_MDIC_WA;
5325 else
5326 sc->sc_flags |= WM_F_80003_MDIC_WA;
5327 }
5328 }
5329 }
5330
5331 /*
5332 * wm_add_rxbuf:
5333 *
5334 * Add a receive buffer to the indiciated descriptor.
5335 */
5336 static int
5337 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
5338 {
5339 struct wm_softc *sc = rxq->rxq_sc;
5340 struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
5341 struct mbuf *m;
5342 int error;
5343
5344 KASSERT(mutex_owned(rxq->rxq_lock));
5345
5346 MGETHDR(m, M_DONTWAIT, MT_DATA);
5347 if (m == NULL)
5348 return ENOBUFS;
5349
5350 MCLGET(m, M_DONTWAIT);
5351 if ((m->m_flags & M_EXT) == 0) {
5352 m_freem(m);
5353 return ENOBUFS;
5354 }
5355
5356 if (rxs->rxs_mbuf != NULL)
5357 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5358
5359 rxs->rxs_mbuf = m;
5360
5361 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5362 /*
5363 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
5364 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
5365 */
5366 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
5367 m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
5368 if (error) {
5369 /* XXX XXX XXX */
5370 aprint_error_dev(sc->sc_dev,
5371 "unable to load rx DMA map %d, error = %d\n", idx, error);
5372 panic("wm_add_rxbuf");
5373 }
5374
5375 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5376 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5377
5378 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5379 if ((sc->sc_rctl & RCTL_EN) != 0)
5380 wm_init_rxdesc(rxq, idx);
5381 } else
5382 wm_init_rxdesc(rxq, idx);
5383
5384 return 0;
5385 }
5386
5387 /*
5388 * wm_rxdrain:
5389 *
5390 * Drain the receive queue.
5391 */
5392 static void
5393 wm_rxdrain(struct wm_rxqueue *rxq)
5394 {
5395 struct wm_softc *sc = rxq->rxq_sc;
5396 struct wm_rxsoft *rxs;
5397 int i;
5398
5399 KASSERT(mutex_owned(rxq->rxq_lock));
5400
5401 for (i = 0; i < WM_NRXDESC; i++) {
5402 rxs = &rxq->rxq_soft[i];
5403 if (rxs->rxs_mbuf != NULL) {
5404 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5405 m_freem(rxs->rxs_mbuf);
5406 rxs->rxs_mbuf = NULL;
5407 }
5408 }
5409 }
5410
5411 /*
5412 * Setup registers for RSS.
5413 *
5414 * XXX not yet VMDq support
5415 */
5416 static void
5417 wm_init_rss(struct wm_softc *sc)
5418 {
5419 uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
5420 int i;
5421
5422 CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
5423
5424 for (i = 0; i < RETA_NUM_ENTRIES; i++) {
5425 unsigned int qid, reta_ent;
5426
5427 qid = i % sc->sc_nqueues;
5428 switch (sc->sc_type) {
5429 case WM_T_82574:
5430 reta_ent = __SHIFTIN(qid,
5431 RETA_ENT_QINDEX_MASK_82574);
5432 break;
5433 case WM_T_82575:
5434 reta_ent = __SHIFTIN(qid,
5435 RETA_ENT_QINDEX1_MASK_82575);
5436 break;
5437 default:
5438 reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
5439 break;
5440 }
5441
5442 reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
5443 reta_reg &= ~RETA_ENTRY_MASK_Q(i);
5444 reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
5445 CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
5446 }
5447
5448 rss_getkey((uint8_t *)rss_key);
5449 for (i = 0; i < RSSRK_NUM_REGS; i++)
5450 CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
5451
5452 if (sc->sc_type == WM_T_82574)
5453 mrqc = MRQC_ENABLE_RSS_MQ_82574;
5454 else
5455 mrqc = MRQC_ENABLE_RSS_MQ;
5456
5457 /*
5458 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
5459 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
5460 */
5461 mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
5462 mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
5463 #if 0
5464 mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
5465 mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
5466 #endif
5467 mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
5468
5469 CSR_WRITE(sc, WMREG_MRQC, mrqc);
5470 }
5471
5472 /*
5473 * Adjust TX and RX queue numbers which the system actulally uses.
5474 *
5475 * The numbers are affected by below parameters.
5476 * - The nubmer of hardware queues
5477 * - The number of MSI-X vectors (= "nvectors" argument)
5478 * - ncpu
5479 */
5480 static void
5481 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
5482 {
5483 int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
5484
5485 if (nvectors < 2) {
5486 sc->sc_nqueues = 1;
5487 return;
5488 }
5489
5490 switch (sc->sc_type) {
5491 case WM_T_82572:
5492 hw_ntxqueues = 2;
5493 hw_nrxqueues = 2;
5494 break;
5495 case WM_T_82574:
5496 hw_ntxqueues = 2;
5497 hw_nrxqueues = 2;
5498 break;
5499 case WM_T_82575:
5500 hw_ntxqueues = 4;
5501 hw_nrxqueues = 4;
5502 break;
5503 case WM_T_82576:
5504 hw_ntxqueues = 16;
5505 hw_nrxqueues = 16;
5506 break;
5507 case WM_T_82580:
5508 case WM_T_I350:
5509 case WM_T_I354:
5510 hw_ntxqueues = 8;
5511 hw_nrxqueues = 8;
5512 break;
5513 case WM_T_I210:
5514 hw_ntxqueues = 4;
5515 hw_nrxqueues = 4;
5516 break;
5517 case WM_T_I211:
5518 hw_ntxqueues = 2;
5519 hw_nrxqueues = 2;
5520 break;
5521 /*
5522 * As below ethernet controllers does not support MSI-X,
5523 * this driver let them not use multiqueue.
5524 * - WM_T_80003
5525 * - WM_T_ICH8
5526 * - WM_T_ICH9
5527 * - WM_T_ICH10
5528 * - WM_T_PCH
5529 * - WM_T_PCH2
5530 * - WM_T_PCH_LPT
5531 */
5532 default:
5533 hw_ntxqueues = 1;
5534 hw_nrxqueues = 1;
5535 break;
5536 }
5537
5538 hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
5539
5540 /*
5541 * As queues more than MSI-X vectors cannot improve scaling, we limit
5542 * the number of queues used actually.
5543 */
5544 if (nvectors < hw_nqueues + 1)
5545 sc->sc_nqueues = nvectors - 1;
5546 else
5547 sc->sc_nqueues = hw_nqueues;
5548
5549 /*
5550 * As queues more then cpus cannot improve scaling, we limit
5551 * the number of queues used actually.
5552 */
5553 if (ncpu < sc->sc_nqueues)
5554 sc->sc_nqueues = ncpu;
5555 }
5556
5557 static inline bool
5558 wm_is_using_msix(struct wm_softc *sc)
5559 {
5560
5561 return (sc->sc_nintrs > 1);
5562 }
5563
5564 static inline bool
5565 wm_is_using_multiqueue(struct wm_softc *sc)
5566 {
5567
5568 return (sc->sc_nqueues > 1);
5569 }
5570
5571 static int
5572 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
5573 {
5574 struct wm_queue *wmq = &sc->sc_queue[qidx];
5575
5576 wmq->wmq_id = qidx;
5577 wmq->wmq_intr_idx = intr_idx;
5578 wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS,
5579 wm_handle_queue, wmq);
5580 if (wmq->wmq_si != NULL)
5581 return 0;
5582
5583 aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
5584 wmq->wmq_id);
5585 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
5586 sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
5587 return ENOMEM;
5588 }
5589
5590 /*
5591 * Both single interrupt MSI and INTx can use this function.
5592 */
5593 static int
5594 wm_setup_legacy(struct wm_softc *sc)
5595 {
5596 pci_chipset_tag_t pc = sc->sc_pc;
5597 const char *intrstr = NULL;
5598 char intrbuf[PCI_INTRSTR_LEN];
5599 int error;
5600
5601 error = wm_alloc_txrx_queues(sc);
5602 if (error) {
5603 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
5604 error);
5605 return ENOMEM;
5606 }
5607 intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
5608 sizeof(intrbuf));
5609 #ifdef WM_MPSAFE
5610 pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
5611 #endif
5612 sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
5613 IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
5614 if (sc->sc_ihs[0] == NULL) {
5615 aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
5616 (pci_intr_type(pc, sc->sc_intrs[0])
5617 == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
5618 return ENOMEM;
5619 }
5620
5621 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
5622 sc->sc_nintrs = 1;
5623
5624 return wm_softint_establish_queue(sc, 0, 0);
5625 }
5626
5627 static int
5628 wm_setup_msix(struct wm_softc *sc)
5629 {
5630 void *vih;
5631 kcpuset_t *affinity;
5632 int qidx, error, intr_idx, txrx_established;
5633 pci_chipset_tag_t pc = sc->sc_pc;
5634 const char *intrstr = NULL;
5635 char intrbuf[PCI_INTRSTR_LEN];
5636 char intr_xname[INTRDEVNAMEBUF];
5637
5638 if (sc->sc_nqueues < ncpu) {
5639 /*
5640 * To avoid other devices' interrupts, the affinity of Tx/Rx
5641 * interrupts start from CPU#1.
5642 */
5643 sc->sc_affinity_offset = 1;
5644 } else {
5645 /*
5646 * In this case, this device use all CPUs. So, we unify
5647 * affinitied cpu_index to msix vector number for readability.
5648 */
5649 sc->sc_affinity_offset = 0;
5650 }
5651
5652 error = wm_alloc_txrx_queues(sc);
5653 if (error) {
5654 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
5655 error);
5656 return ENOMEM;
5657 }
5658
5659 kcpuset_create(&affinity, false);
5660 intr_idx = 0;
5661
5662 /*
5663 * TX and RX
5664 */
5665 txrx_established = 0;
5666 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5667 struct wm_queue *wmq = &sc->sc_queue[qidx];
5668 int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
5669
5670 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
5671 sizeof(intrbuf));
5672 #ifdef WM_MPSAFE
5673 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
5674 PCI_INTR_MPSAFE, true);
5675 #endif
5676 memset(intr_xname, 0, sizeof(intr_xname));
5677 snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
5678 device_xname(sc->sc_dev), qidx);
5679 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
5680 IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
5681 if (vih == NULL) {
5682 aprint_error_dev(sc->sc_dev,
5683 "unable to establish MSI-X(for TX and RX)%s%s\n",
5684 intrstr ? " at " : "",
5685 intrstr ? intrstr : "");
5686
5687 goto fail;
5688 }
5689 kcpuset_zero(affinity);
5690 /* Round-robin affinity */
5691 kcpuset_set(affinity, affinity_to);
5692 error = interrupt_distribute(vih, affinity, NULL);
5693 if (error == 0) {
5694 aprint_normal_dev(sc->sc_dev,
5695 "for TX and RX interrupting at %s affinity to %u\n",
5696 intrstr, affinity_to);
5697 } else {
5698 aprint_normal_dev(sc->sc_dev,
5699 "for TX and RX interrupting at %s\n", intrstr);
5700 }
5701 sc->sc_ihs[intr_idx] = vih;
5702 if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
5703 goto fail;
5704 txrx_established++;
5705 intr_idx++;
5706 }
5707
5708 /* LINK */
5709 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
5710 sizeof(intrbuf));
5711 #ifdef WM_MPSAFE
5712 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
5713 #endif
5714 memset(intr_xname, 0, sizeof(intr_xname));
5715 snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
5716 device_xname(sc->sc_dev));
5717 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
5718 IPL_NET, wm_linkintr_msix, sc, intr_xname);
5719 if (vih == NULL) {
5720 aprint_error_dev(sc->sc_dev,
5721 "unable to establish MSI-X(for LINK)%s%s\n",
5722 intrstr ? " at " : "",
5723 intrstr ? intrstr : "");
5724
5725 goto fail;
5726 }
5727 /* Keep default affinity to LINK interrupt */
5728 aprint_normal_dev(sc->sc_dev,
5729 "for LINK interrupting at %s\n", intrstr);
5730 sc->sc_ihs[intr_idx] = vih;
5731 sc->sc_link_intr_idx = intr_idx;
5732
5733 sc->sc_nintrs = sc->sc_nqueues + 1;
5734 kcpuset_destroy(affinity);
5735 return 0;
5736
5737 fail:
5738 for (qidx = 0; qidx < txrx_established; qidx++) {
5739 struct wm_queue *wmq = &sc->sc_queue[qidx];
5740 pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
5741 sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
5742 }
5743
5744 kcpuset_destroy(affinity);
5745 return ENOMEM;
5746 }
5747
5748 static void
5749 wm_unset_stopping_flags(struct wm_softc *sc)
5750 {
5751 int i;
5752
5753 KASSERT(WM_CORE_LOCKED(sc));
5754
5755 /* Must unset stopping flags in ascending order. */
5756 for (i = 0; i < sc->sc_nqueues; i++) {
5757 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5758 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5759
5760 mutex_enter(txq->txq_lock);
5761 txq->txq_stopping = false;
5762 mutex_exit(txq->txq_lock);
5763
5764 mutex_enter(rxq->rxq_lock);
5765 rxq->rxq_stopping = false;
5766 mutex_exit(rxq->rxq_lock);
5767 }
5768
5769 sc->sc_core_stopping = false;
5770 }
5771
5772 static void
5773 wm_set_stopping_flags(struct wm_softc *sc)
5774 {
5775 int i;
5776
5777 KASSERT(WM_CORE_LOCKED(sc));
5778
5779 sc->sc_core_stopping = true;
5780
5781 /* Must set stopping flags in ascending order. */
5782 for (i = 0; i < sc->sc_nqueues; i++) {
5783 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5784 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5785
5786 mutex_enter(rxq->rxq_lock);
5787 rxq->rxq_stopping = true;
5788 mutex_exit(rxq->rxq_lock);
5789
5790 mutex_enter(txq->txq_lock);
5791 txq->txq_stopping = true;
5792 mutex_exit(txq->txq_lock);
5793 }
5794 }
5795
5796 /*
5797 * Write interrupt interval value to ITR or EITR
5798 */
5799 static void
5800 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
5801 {
5802
5803 if (!wmq->wmq_set_itr)
5804 return;
5805
5806 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5807 uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
5808
5809 /*
5810 * 82575 doesn't have CNT_INGR field.
5811 * So, overwrite counter field by software.
5812 */
5813 if (sc->sc_type == WM_T_82575)
5814 eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
5815 else
5816 eitr |= EITR_CNT_INGR;
5817
5818 CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
5819 } else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
5820 /*
5821 * 82574 has both ITR and EITR. SET EITR when we use
5822 * the multi queue function with MSI-X.
5823 */
5824 CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
5825 wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
5826 } else {
5827 KASSERT(wmq->wmq_id == 0);
5828 CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
5829 }
5830
5831 wmq->wmq_set_itr = false;
5832 }
5833
5834 /*
5835 * TODO
5836 * Below dynamic calculation of itr is almost the same as linux igb,
5837 * however it does not fit to wm(4). So, we will have been disable AIM
5838 * until we will find appropriate calculation of itr.
5839 */
5840 /*
5841 * calculate interrupt interval value to be going to write register in
5842 * wm_itrs_writereg(). This function does not write ITR/EITR register.
5843 */
5844 static void
5845 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
5846 {
5847 #ifdef NOTYET
5848 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
5849 struct wm_txqueue *txq = &wmq->wmq_txq;
5850 uint32_t avg_size = 0;
5851 uint32_t new_itr;
5852
5853 if (rxq->rxq_packets)
5854 avg_size = rxq->rxq_bytes / rxq->rxq_packets;
5855 if (txq->txq_packets)
5856 avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
5857
5858 if (avg_size == 0) {
5859 new_itr = 450; /* restore default value */
5860 goto out;
5861 }
5862
5863 /* Add 24 bytes to size to account for CRC, preamble, and gap */
5864 avg_size += 24;
5865
5866 /* Don't starve jumbo frames */
5867 avg_size = uimin(avg_size, 3000);
5868
5869 /* Give a little boost to mid-size frames */
5870 if ((avg_size > 300) && (avg_size < 1200))
5871 new_itr = avg_size / 3;
5872 else
5873 new_itr = avg_size / 2;
5874
5875 out:
5876 /*
5877 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
5878 * controllers. See sc->sc_itr_init setting in wm_init_locked().
5879 */
5880 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
5881 new_itr *= 4;
5882
5883 if (new_itr != wmq->wmq_itr) {
5884 wmq->wmq_itr = new_itr;
5885 wmq->wmq_set_itr = true;
5886 } else
5887 wmq->wmq_set_itr = false;
5888
5889 rxq->rxq_packets = 0;
5890 rxq->rxq_bytes = 0;
5891 txq->txq_packets = 0;
5892 txq->txq_bytes = 0;
5893 #endif
5894 }
5895
5896 static void
5897 wm_init_sysctls(struct wm_softc *sc)
5898 {
5899 struct sysctllog **log;
5900 const struct sysctlnode *rnode, *qnode, *cnode;
5901 int i, rv;
5902 const char *dvname;
5903
5904 log = &sc->sc_sysctllog;
5905 dvname = device_xname(sc->sc_dev);
5906
5907 rv = sysctl_createv(log, 0, NULL, &rnode,
5908 0, CTLTYPE_NODE, dvname,
5909 SYSCTL_DESCR("wm information and settings"),
5910 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
5911 if (rv != 0)
5912 goto err;
5913
5914 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
5915 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
5916 NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
5917 if (rv != 0)
5918 goto teardown;
5919
5920 for (i = 0; i < sc->sc_nqueues; i++) {
5921 struct wm_queue *wmq = &sc->sc_queue[i];
5922 struct wm_txqueue *txq = &wmq->wmq_txq;
5923 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
5924
5925 snprintf(sc->sc_queue[i].sysctlname,
5926 sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
5927
5928 if (sysctl_createv(log, 0, &rnode, &qnode,
5929 0, CTLTYPE_NODE,
5930 sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
5931 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
5932 break;
5933 if (sysctl_createv(log, 0, &qnode, &cnode,
5934 CTLFLAG_READONLY, CTLTYPE_INT,
5935 "txq_free", SYSCTL_DESCR("TX queue free"),
5936 NULL, 0, &txq->txq_free,
5937 0, CTL_CREATE, CTL_EOL) != 0)
5938 break;
5939 if (sysctl_createv(log, 0, &qnode, &cnode,
5940 CTLFLAG_READONLY, CTLTYPE_INT,
5941 "txq_next", SYSCTL_DESCR("TX queue next"),
5942 NULL, 0, &txq->txq_next,
5943 0, CTL_CREATE, CTL_EOL) != 0)
5944 break;
5945
5946 if (sysctl_createv(log, 0, &qnode, &cnode,
5947 CTLFLAG_READONLY, CTLTYPE_INT,
5948 "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
5949 NULL, 0, &rxq->rxq_ptr,
5950 0, CTL_CREATE, CTL_EOL) != 0)
5951 break;
5952 }
5953
5954 #ifdef WM_DEBUG
5955 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
5956 CTLTYPE_INT, "debug_flags",
5957 SYSCTL_DESCR(
5958 "Debug flags:\n" \
5959 "\t0x01 LINK\n" \
5960 "\t0x02 TX\n" \
5961 "\t0x04 RX\n" \
5962 "\t0x08 GMII\n" \
5963 "\t0x10 MANAGE\n" \
5964 "\t0x20 NVM\n" \
5965 "\t0x40 INIT\n" \
5966 "\t0x80 LOCK"),
5967 wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
5968 if (rv != 0)
5969 goto teardown;
5970 #endif
5971
5972 return;
5973
5974 teardown:
5975 sysctl_teardown(log);
5976 err:
5977 sc->sc_sysctllog = NULL;
5978 device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
5979 __func__, rv);
5980 }
5981
5982 /*
5983 * wm_init: [ifnet interface function]
5984 *
5985 * Initialize the interface.
5986 */
5987 static int
5988 wm_init(struct ifnet *ifp)
5989 {
5990 struct wm_softc *sc = ifp->if_softc;
5991 int ret;
5992
5993 WM_CORE_LOCK(sc);
5994 ret = wm_init_locked(ifp);
5995 WM_CORE_UNLOCK(sc);
5996
5997 return ret;
5998 }
5999
6000 static int
6001 wm_init_locked(struct ifnet *ifp)
6002 {
6003 struct wm_softc *sc = ifp->if_softc;
6004 struct ethercom *ec = &sc->sc_ethercom;
6005 int i, j, trynum, error = 0;
6006 uint32_t reg, sfp_mask = 0;
6007
6008 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
6009 device_xname(sc->sc_dev), __func__));
6010 KASSERT(WM_CORE_LOCKED(sc));
6011
6012 /*
6013 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
6014 * There is a small but measurable benefit to avoiding the adjusment
6015 * of the descriptor so that the headers are aligned, for normal mtu,
6016 * on such platforms. One possibility is that the DMA itself is
6017 * slightly more efficient if the front of the entire packet (instead
6018 * of the front of the headers) is aligned.
6019 *
6020 * Note we must always set align_tweak to 0 if we are using
6021 * jumbo frames.
6022 */
6023 #ifdef __NO_STRICT_ALIGNMENT
6024 sc->sc_align_tweak = 0;
6025 #else
6026 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
6027 sc->sc_align_tweak = 0;
6028 else
6029 sc->sc_align_tweak = 2;
6030 #endif /* __NO_STRICT_ALIGNMENT */
6031
6032 /* Cancel any pending I/O. */
6033 wm_stop_locked(ifp, false, false);
6034
6035 /* Update statistics before reset */
6036 if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
6037 if_ierrors, CSR_READ(sc, WMREG_RXERRC));
6038
6039 /* PCH_SPT hardware workaround */
6040 if (sc->sc_type == WM_T_PCH_SPT)
6041 wm_flush_desc_rings(sc);
6042
6043 /* Reset the chip to a known state. */
6044 wm_reset(sc);
6045
6046 /*
6047 * AMT based hardware can now take control from firmware
6048 * Do this after reset.
6049 */
6050 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
6051 wm_get_hw_control(sc);
6052
6053 if ((sc->sc_type >= WM_T_PCH_SPT) &&
6054 pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
6055 wm_legacy_irq_quirk_spt(sc);
6056
6057 /* Init hardware bits */
6058 wm_initialize_hardware_bits(sc);
6059
6060 /* Reset the PHY. */
6061 if (sc->sc_flags & WM_F_HAS_MII)
6062 wm_gmii_reset(sc);
6063
6064 if (sc->sc_type >= WM_T_ICH8) {
6065 reg = CSR_READ(sc, WMREG_GCR);
6066 /*
6067 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
6068 * default after reset.
6069 */
6070 if (sc->sc_type == WM_T_ICH8)
6071 reg |= GCR_NO_SNOOP_ALL;
6072 else
6073 reg &= ~GCR_NO_SNOOP_ALL;
6074 CSR_WRITE(sc, WMREG_GCR, reg);
6075 }
6076
6077 if ((sc->sc_type >= WM_T_ICH8)
6078 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
6079 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
6080
6081 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6082 reg |= CTRL_EXT_RO_DIS;
6083 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6084 }
6085
6086 /* Calculate (E)ITR value */
6087 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
6088 /*
6089 * For NEWQUEUE's EITR (except for 82575).
6090 * 82575's EITR should be set same throttling value as other
6091 * old controllers' ITR because the interrupt/sec calculation
6092 * is the same, that is, 1,000,000,000 / (N * 256).
6093 *
6094 * 82574's EITR should be set same throttling value as ITR.
6095 *
6096 * For N interrupts/sec, set this value to:
6097 * 1,000,000 / N in contrast to ITR throttoling value.
6098 */
6099 sc->sc_itr_init = 450;
6100 } else if (sc->sc_type >= WM_T_82543) {
6101 /*
6102 * Set up the interrupt throttling register (units of 256ns)
6103 * Note that a footnote in Intel's documentation says this
6104 * ticker runs at 1/4 the rate when the chip is in 100Mbit
6105 * or 10Mbit mode. Empirically, it appears to be the case
6106 * that that is also true for the 1024ns units of the other
6107 * interrupt-related timer registers -- so, really, we ought
6108 * to divide this value by 4 when the link speed is low.
6109 *
6110 * XXX implement this division at link speed change!
6111 */
6112
6113 /*
6114 * For N interrupts/sec, set this value to:
6115 * 1,000,000,000 / (N * 256). Note that we set the
6116 * absolute and packet timer values to this value
6117 * divided by 4 to get "simple timer" behavior.
6118 */
6119 sc->sc_itr_init = 1500; /* 2604 ints/sec */
6120 }
6121
6122 error = wm_init_txrx_queues(sc);
6123 if (error)
6124 goto out;
6125
6126 if (((sc->sc_flags & WM_F_SGMII) == 0) &&
6127 (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
6128 (sc->sc_type >= WM_T_82575))
6129 wm_serdes_power_up_link_82575(sc);
6130
6131 /* Clear out the VLAN table -- we don't use it (yet). */
6132 CSR_WRITE(sc, WMREG_VET, 0);
6133 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
6134 trynum = 10; /* Due to hw errata */
6135 else
6136 trynum = 1;
6137 for (i = 0; i < WM_VLAN_TABSIZE; i++)
6138 for (j = 0; j < trynum; j++)
6139 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
6140
6141 /*
6142 * Set up flow-control parameters.
6143 *
6144 * XXX Values could probably stand some tuning.
6145 */
6146 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
6147 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
6148 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
6149 && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
6150 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
6151 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
6152 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
6153 }
6154
6155 sc->sc_fcrtl = FCRTL_DFLT;
6156 if (sc->sc_type < WM_T_82543) {
6157 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
6158 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
6159 } else {
6160 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
6161 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
6162 }
6163
6164 if (sc->sc_type == WM_T_80003)
6165 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
6166 else
6167 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
6168
6169 /* Writes the control register. */
6170 wm_set_vlan(sc);
6171
6172 if (sc->sc_flags & WM_F_HAS_MII) {
6173 uint16_t kmreg;
6174
6175 switch (sc->sc_type) {
6176 case WM_T_80003:
6177 case WM_T_ICH8:
6178 case WM_T_ICH9:
6179 case WM_T_ICH10:
6180 case WM_T_PCH:
6181 case WM_T_PCH2:
6182 case WM_T_PCH_LPT:
6183 case WM_T_PCH_SPT:
6184 case WM_T_PCH_CNP:
6185 /*
6186 * Set the mac to wait the maximum time between each
6187 * iteration and increase the max iterations when
6188 * polling the phy; this fixes erroneous timeouts at
6189 * 10Mbps.
6190 */
6191 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
6192 0xFFFF);
6193 wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
6194 &kmreg);
6195 kmreg |= 0x3F;
6196 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
6197 kmreg);
6198 break;
6199 default:
6200 break;
6201 }
6202
6203 if (sc->sc_type == WM_T_80003) {
6204 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6205 reg &= ~CTRL_EXT_LINK_MODE_MASK;
6206 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6207
6208 /* Bypass RX and TX FIFO's */
6209 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
6210 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
6211 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
6212 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
6213 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
6214 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
6215 }
6216 }
6217 #if 0
6218 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
6219 #endif
6220
6221 /* Set up checksum offload parameters. */
6222 reg = CSR_READ(sc, WMREG_RXCSUM);
6223 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
6224 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
6225 reg |= RXCSUM_IPOFL;
6226 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
6227 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
6228 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
6229 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
6230 CSR_WRITE(sc, WMREG_RXCSUM, reg);
6231
6232 /* Set registers about MSI-X */
6233 if (wm_is_using_msix(sc)) {
6234 uint32_t ivar, qintr_idx;
6235 struct wm_queue *wmq;
6236 unsigned int qid;
6237
6238 if (sc->sc_type == WM_T_82575) {
6239 /* Interrupt control */
6240 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6241 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
6242 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6243
6244 /* TX and RX */
6245 for (i = 0; i < sc->sc_nqueues; i++) {
6246 wmq = &sc->sc_queue[i];
6247 CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
6248 EITR_TX_QUEUE(wmq->wmq_id)
6249 | EITR_RX_QUEUE(wmq->wmq_id));
6250 }
6251 /* Link status */
6252 CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
6253 EITR_OTHER);
6254 } else if (sc->sc_type == WM_T_82574) {
6255 /* Interrupt control */
6256 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6257 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
6258 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6259
6260 /*
6261 * Workaround issue with spurious interrupts
6262 * in MSI-X mode.
6263 * At wm_initialize_hardware_bits(), sc_nintrs has not
6264 * initialized yet. So re-initialize WMREG_RFCTL here.
6265 */
6266 reg = CSR_READ(sc, WMREG_RFCTL);
6267 reg |= WMREG_RFCTL_ACKDIS;
6268 CSR_WRITE(sc, WMREG_RFCTL, reg);
6269
6270 ivar = 0;
6271 /* TX and RX */
6272 for (i = 0; i < sc->sc_nqueues; i++) {
6273 wmq = &sc->sc_queue[i];
6274 qid = wmq->wmq_id;
6275 qintr_idx = wmq->wmq_intr_idx;
6276
6277 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
6278 IVAR_TX_MASK_Q_82574(qid));
6279 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
6280 IVAR_RX_MASK_Q_82574(qid));
6281 }
6282 /* Link status */
6283 ivar |= __SHIFTIN((IVAR_VALID_82574
6284 | sc->sc_link_intr_idx), IVAR_OTHER_MASK);
6285 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
6286 } else {
6287 /* Interrupt control */
6288 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
6289 | GPIE_EIAME | GPIE_PBA);
6290
6291 switch (sc->sc_type) {
6292 case WM_T_82580:
6293 case WM_T_I350:
6294 case WM_T_I354:
6295 case WM_T_I210:
6296 case WM_T_I211:
6297 /* TX and RX */
6298 for (i = 0; i < sc->sc_nqueues; i++) {
6299 wmq = &sc->sc_queue[i];
6300 qid = wmq->wmq_id;
6301 qintr_idx = wmq->wmq_intr_idx;
6302
6303 ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
6304 ivar &= ~IVAR_TX_MASK_Q(qid);
6305 ivar |= __SHIFTIN((qintr_idx
6306 | IVAR_VALID),
6307 IVAR_TX_MASK_Q(qid));
6308 ivar &= ~IVAR_RX_MASK_Q(qid);
6309 ivar |= __SHIFTIN((qintr_idx
6310 | IVAR_VALID),
6311 IVAR_RX_MASK_Q(qid));
6312 CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
6313 }
6314 break;
6315 case WM_T_82576:
6316 /* TX and RX */
6317 for (i = 0; i < sc->sc_nqueues; i++) {
6318 wmq = &sc->sc_queue[i];
6319 qid = wmq->wmq_id;
6320 qintr_idx = wmq->wmq_intr_idx;
6321
6322 ivar = CSR_READ(sc,
6323 WMREG_IVAR_Q_82576(qid));
6324 ivar &= ~IVAR_TX_MASK_Q_82576(qid);
6325 ivar |= __SHIFTIN((qintr_idx
6326 | IVAR_VALID),
6327 IVAR_TX_MASK_Q_82576(qid));
6328 ivar &= ~IVAR_RX_MASK_Q_82576(qid);
6329 ivar |= __SHIFTIN((qintr_idx
6330 | IVAR_VALID),
6331 IVAR_RX_MASK_Q_82576(qid));
6332 CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
6333 ivar);
6334 }
6335 break;
6336 default:
6337 break;
6338 }
6339
6340 /* Link status */
6341 ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
6342 IVAR_MISC_OTHER);
6343 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
6344 }
6345
6346 if (wm_is_using_multiqueue(sc)) {
6347 wm_init_rss(sc);
6348
6349 /*
6350 ** NOTE: Receive Full-Packet Checksum Offload
6351 ** is mutually exclusive with Multiqueue. However
6352 ** this is not the same as TCP/IP checksums which
6353 ** still work.
6354 */
6355 reg = CSR_READ(sc, WMREG_RXCSUM);
6356 reg |= RXCSUM_PCSD;
6357 CSR_WRITE(sc, WMREG_RXCSUM, reg);
6358 }
6359 }
6360
6361 /* Set up the interrupt registers. */
6362 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
6363
6364 /* Enable SFP module insertion interrupt if it's required */
6365 if ((sc->sc_flags & WM_F_SFP) != 0) {
6366 sc->sc_ctrl |= CTRL_EXTLINK_EN;
6367 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6368 sfp_mask = ICR_GPI(0);
6369 }
6370
6371 if (wm_is_using_msix(sc)) {
6372 uint32_t mask;
6373 struct wm_queue *wmq;
6374
6375 switch (sc->sc_type) {
6376 case WM_T_82574:
6377 mask = 0;
6378 for (i = 0; i < sc->sc_nqueues; i++) {
6379 wmq = &sc->sc_queue[i];
6380 mask |= ICR_TXQ(wmq->wmq_id);
6381 mask |= ICR_RXQ(wmq->wmq_id);
6382 }
6383 mask |= ICR_OTHER;
6384 CSR_WRITE(sc, WMREG_EIAC_82574, mask);
6385 CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
6386 break;
6387 default:
6388 if (sc->sc_type == WM_T_82575) {
6389 mask = 0;
6390 for (i = 0; i < sc->sc_nqueues; i++) {
6391 wmq = &sc->sc_queue[i];
6392 mask |= EITR_TX_QUEUE(wmq->wmq_id);
6393 mask |= EITR_RX_QUEUE(wmq->wmq_id);
6394 }
6395 mask |= EITR_OTHER;
6396 } else {
6397 mask = 0;
6398 for (i = 0; i < sc->sc_nqueues; i++) {
6399 wmq = &sc->sc_queue[i];
6400 mask |= 1 << wmq->wmq_intr_idx;
6401 }
6402 mask |= 1 << sc->sc_link_intr_idx;
6403 }
6404 CSR_WRITE(sc, WMREG_EIAC, mask);
6405 CSR_WRITE(sc, WMREG_EIAM, mask);
6406 CSR_WRITE(sc, WMREG_EIMS, mask);
6407
6408 /* For other interrupts */
6409 CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
6410 break;
6411 }
6412 } else {
6413 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
6414 ICR_RXO | ICR_RXT0 | sfp_mask;
6415 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
6416 }
6417
6418 /* Set up the inter-packet gap. */
6419 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6420
6421 if (sc->sc_type >= WM_T_82543) {
6422 for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6423 struct wm_queue *wmq = &sc->sc_queue[qidx];
6424 wm_itrs_writereg(sc, wmq);
6425 }
6426 /*
6427 * Link interrupts occur much less than TX
6428 * interrupts and RX interrupts. So, we don't
6429 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
6430 * FreeBSD's if_igb.
6431 */
6432 }
6433
6434 /* Set the VLAN ethernetype. */
6435 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
6436
6437 /*
6438 * Set up the transmit control register; we start out with
6439 * a collision distance suitable for FDX, but update it whe
6440 * we resolve the media type.
6441 */
6442 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
6443 | TCTL_CT(TX_COLLISION_THRESHOLD)
6444 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6445 if (sc->sc_type >= WM_T_82571)
6446 sc->sc_tctl |= TCTL_MULR;
6447 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6448
6449 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6450 /* Write TDT after TCTL.EN is set. See the document. */
6451 CSR_WRITE(sc, WMREG_TDT(0), 0);
6452 }
6453
6454 if (sc->sc_type == WM_T_80003) {
6455 reg = CSR_READ(sc, WMREG_TCTL_EXT);
6456 reg &= ~TCTL_EXT_GCEX_MASK;
6457 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
6458 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
6459 }
6460
6461 /* Set the media. */
6462 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
6463 goto out;
6464
6465 /* Configure for OS presence */
6466 wm_init_manageability(sc);
6467
6468 /*
6469 * Set up the receive control register; we actually program the
6470 * register when we set the receive filter. Use multicast address
6471 * offset type 0.
6472 *
6473 * Only the i82544 has the ability to strip the incoming CRC, so we
6474 * don't enable that feature.
6475 */
6476 sc->sc_mchash_type = 0;
6477 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
6478 | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
6479
6480 /* 82574 use one buffer extended Rx descriptor. */
6481 if (sc->sc_type == WM_T_82574)
6482 sc->sc_rctl |= RCTL_DTYP_ONEBUF;
6483
6484 if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
6485 sc->sc_rctl |= RCTL_SECRC;
6486
6487 if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
6488 && (ifp->if_mtu > ETHERMTU)) {
6489 sc->sc_rctl |= RCTL_LPE;
6490 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6491 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
6492 }
6493
6494 if (MCLBYTES == 2048)
6495 sc->sc_rctl |= RCTL_2k;
6496 else {
6497 if (sc->sc_type >= WM_T_82543) {
6498 switch (MCLBYTES) {
6499 case 4096:
6500 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
6501 break;
6502 case 8192:
6503 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
6504 break;
6505 case 16384:
6506 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
6507 break;
6508 default:
6509 panic("wm_init: MCLBYTES %d unsupported",
6510 MCLBYTES);
6511 break;
6512 }
6513 } else
6514 panic("wm_init: i82542 requires MCLBYTES = 2048");
6515 }
6516
6517 /* Enable ECC */
6518 switch (sc->sc_type) {
6519 case WM_T_82571:
6520 reg = CSR_READ(sc, WMREG_PBA_ECC);
6521 reg |= PBA_ECC_CORR_EN;
6522 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
6523 break;
6524 case WM_T_PCH_LPT:
6525 case WM_T_PCH_SPT:
6526 case WM_T_PCH_CNP:
6527 reg = CSR_READ(sc, WMREG_PBECCSTS);
6528 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
6529 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
6530
6531 sc->sc_ctrl |= CTRL_MEHE;
6532 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6533 break;
6534 default:
6535 break;
6536 }
6537
6538 /*
6539 * Set the receive filter.
6540 *
6541 * For 82575 and 82576, the RX descriptors must be initialized after
6542 * the setting of RCTL.EN in wm_set_filter()
6543 */
6544 wm_set_filter(sc);
6545
6546 /* On 575 and later set RDT only if RX enabled */
6547 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6548 int qidx;
6549 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6550 struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
6551 for (i = 0; i < WM_NRXDESC; i++) {
6552 mutex_enter(rxq->rxq_lock);
6553 wm_init_rxdesc(rxq, i);
6554 mutex_exit(rxq->rxq_lock);
6555
6556 }
6557 }
6558 }
6559
6560 wm_unset_stopping_flags(sc);
6561
6562 /* Start the one second link check clock. */
6563 callout_schedule(&sc->sc_tick_ch, hz);
6564
6565 /* ...all done! */
6566 ifp->if_flags |= IFF_RUNNING;
6567
6568 out:
6569 /* Save last flags for the callback */
6570 sc->sc_if_flags = ifp->if_flags;
6571 sc->sc_ec_capenable = ec->ec_capenable;
6572 if (error)
6573 log(LOG_ERR, "%s: interface not running\n",
6574 device_xname(sc->sc_dev));
6575 return error;
6576 }
6577
6578 /*
6579 * wm_stop: [ifnet interface function]
6580 *
6581 * Stop transmission on the interface.
6582 */
6583 static void
6584 wm_stop(struct ifnet *ifp, int disable)
6585 {
6586 struct wm_softc *sc = ifp->if_softc;
6587
6588 ASSERT_SLEEPABLE();
6589
6590 WM_CORE_LOCK(sc);
6591 wm_stop_locked(ifp, disable ? true : false, true);
6592 WM_CORE_UNLOCK(sc);
6593
6594 /*
6595 * After wm_set_stopping_flags(), it is guaranteed
6596 * wm_handle_queue_work() does not call workqueue_enqueue().
6597 * However, workqueue_wait() cannot call in wm_stop_locked()
6598 * because it can sleep...
6599 * so, call workqueue_wait() here.
6600 */
6601 for (int i = 0; i < sc->sc_nqueues; i++)
6602 workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
6603 }
6604
6605 static void
6606 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
6607 {
6608 struct wm_softc *sc = ifp->if_softc;
6609 struct wm_txsoft *txs;
6610 int i, qidx;
6611
6612 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
6613 device_xname(sc->sc_dev), __func__));
6614 KASSERT(WM_CORE_LOCKED(sc));
6615
6616 wm_set_stopping_flags(sc);
6617
6618 if (sc->sc_flags & WM_F_HAS_MII) {
6619 /* Down the MII. */
6620 mii_down(&sc->sc_mii);
6621 } else {
6622 #if 0
6623 /* Should we clear PHY's status properly? */
6624 wm_reset(sc);
6625 #endif
6626 }
6627
6628 /* Stop the transmit and receive processes. */
6629 CSR_WRITE(sc, WMREG_TCTL, 0);
6630 CSR_WRITE(sc, WMREG_RCTL, 0);
6631 sc->sc_rctl &= ~RCTL_EN;
6632
6633 /*
6634 * Clear the interrupt mask to ensure the device cannot assert its
6635 * interrupt line.
6636 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
6637 * service any currently pending or shared interrupt.
6638 */
6639 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
6640 sc->sc_icr = 0;
6641 if (wm_is_using_msix(sc)) {
6642 if (sc->sc_type != WM_T_82574) {
6643 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
6644 CSR_WRITE(sc, WMREG_EIAC, 0);
6645 } else
6646 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
6647 }
6648
6649 /*
6650 * Stop callouts after interrupts are disabled; if we have
6651 * to wait for them, we will be releasing the CORE_LOCK
6652 * briefly, which will unblock interrupts on the current CPU.
6653 */
6654
6655 /* Stop the one second clock. */
6656 if (wait)
6657 callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
6658 else
6659 callout_stop(&sc->sc_tick_ch);
6660
6661 /* Stop the 82547 Tx FIFO stall check timer. */
6662 if (sc->sc_type == WM_T_82547) {
6663 if (wait)
6664 callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
6665 else
6666 callout_stop(&sc->sc_txfifo_ch);
6667 }
6668
6669 /* Release any queued transmit buffers. */
6670 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6671 struct wm_queue *wmq = &sc->sc_queue[qidx];
6672 struct wm_txqueue *txq = &wmq->wmq_txq;
6673 struct mbuf *m;
6674
6675 mutex_enter(txq->txq_lock);
6676 txq->txq_sending = false; /* Ensure watchdog disabled */
6677 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6678 txs = &txq->txq_soft[i];
6679 if (txs->txs_mbuf != NULL) {
6680 bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
6681 m_freem(txs->txs_mbuf);
6682 txs->txs_mbuf = NULL;
6683 }
6684 }
6685 /* Drain txq_interq */
6686 while ((m = pcq_get(txq->txq_interq)) != NULL)
6687 m_freem(m);
6688 mutex_exit(txq->txq_lock);
6689 }
6690
6691 /* Mark the interface as down and cancel the watchdog timer. */
6692 ifp->if_flags &= ~IFF_RUNNING;
6693
6694 if (disable) {
6695 for (i = 0; i < sc->sc_nqueues; i++) {
6696 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6697 mutex_enter(rxq->rxq_lock);
6698 wm_rxdrain(rxq);
6699 mutex_exit(rxq->rxq_lock);
6700 }
6701 }
6702
6703 #if 0 /* notyet */
6704 if (sc->sc_type >= WM_T_82544)
6705 CSR_WRITE(sc, WMREG_WUC, 0);
6706 #endif
6707 }
6708
6709 static void
6710 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
6711 {
6712 struct mbuf *m;
6713 int i;
6714
6715 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
6716 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
6717 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
6718 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
6719 m->m_data, m->m_len, m->m_flags);
6720 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
6721 i, i == 1 ? "" : "s");
6722 }
6723
6724 /*
6725 * wm_82547_txfifo_stall:
6726 *
6727 * Callout used to wait for the 82547 Tx FIFO to drain,
6728 * reset the FIFO pointers, and restart packet transmission.
6729 */
6730 static void
6731 wm_82547_txfifo_stall(void *arg)
6732 {
6733 struct wm_softc *sc = arg;
6734 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6735
6736 mutex_enter(txq->txq_lock);
6737
6738 if (txq->txq_stopping)
6739 goto out;
6740
6741 if (txq->txq_fifo_stall) {
6742 if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
6743 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
6744 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
6745 /*
6746 * Packets have drained. Stop transmitter, reset
6747 * FIFO pointers, restart transmitter, and kick
6748 * the packet queue.
6749 */
6750 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
6751 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
6752 CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
6753 CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
6754 CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
6755 CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
6756 CSR_WRITE(sc, WMREG_TCTL, tctl);
6757 CSR_WRITE_FLUSH(sc);
6758
6759 txq->txq_fifo_head = 0;
6760 txq->txq_fifo_stall = 0;
6761 wm_start_locked(&sc->sc_ethercom.ec_if);
6762 } else {
6763 /*
6764 * Still waiting for packets to drain; try again in
6765 * another tick.
6766 */
6767 callout_schedule(&sc->sc_txfifo_ch, 1);
6768 }
6769 }
6770
6771 out:
6772 mutex_exit(txq->txq_lock);
6773 }
6774
6775 /*
6776 * wm_82547_txfifo_bugchk:
6777 *
6778 * Check for bug condition in the 82547 Tx FIFO. We need to
6779 * prevent enqueueing a packet that would wrap around the end
6780 * if the Tx FIFO ring buffer, otherwise the chip will croak.
6781 *
6782 * We do this by checking the amount of space before the end
6783 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
6784 * the Tx FIFO, wait for all remaining packets to drain, reset
6785 * the internal FIFO pointers to the beginning, and restart
6786 * transmission on the interface.
6787 */
6788 #define WM_FIFO_HDR 0x10
6789 #define WM_82547_PAD_LEN 0x3e0
6790 static int
6791 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
6792 {
6793 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6794 int space = txq->txq_fifo_size - txq->txq_fifo_head;
6795 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
6796
6797 /* Just return if already stalled. */
6798 if (txq->txq_fifo_stall)
6799 return 1;
6800
6801 if (sc->sc_mii.mii_media_active & IFM_FDX) {
6802 /* Stall only occurs in half-duplex mode. */
6803 goto send_packet;
6804 }
6805
6806 if (len >= WM_82547_PAD_LEN + space) {
6807 txq->txq_fifo_stall = 1;
6808 callout_schedule(&sc->sc_txfifo_ch, 1);
6809 return 1;
6810 }
6811
6812 send_packet:
6813 txq->txq_fifo_head += len;
6814 if (txq->txq_fifo_head >= txq->txq_fifo_size)
6815 txq->txq_fifo_head -= txq->txq_fifo_size;
6816
6817 return 0;
6818 }
6819
6820 static int
6821 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
6822 {
6823 int error;
6824
6825 /*
6826 * Allocate the control data structures, and create and load the
6827 * DMA map for it.
6828 *
6829 * NOTE: All Tx descriptors must be in the same 4G segment of
6830 * memory. So must Rx descriptors. We simplify by allocating
6831 * both sets within the same 4G segment.
6832 */
6833 if (sc->sc_type < WM_T_82544)
6834 WM_NTXDESC(txq) = WM_NTXDESC_82542;
6835 else
6836 WM_NTXDESC(txq) = WM_NTXDESC_82544;
6837 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6838 txq->txq_descsize = sizeof(nq_txdesc_t);
6839 else
6840 txq->txq_descsize = sizeof(wiseman_txdesc_t);
6841
6842 if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
6843 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
6844 1, &txq->txq_desc_rseg, 0)) != 0) {
6845 aprint_error_dev(sc->sc_dev,
6846 "unable to allocate TX control data, error = %d\n",
6847 error);
6848 goto fail_0;
6849 }
6850
6851 if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
6852 txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
6853 (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
6854 aprint_error_dev(sc->sc_dev,
6855 "unable to map TX control data, error = %d\n", error);
6856 goto fail_1;
6857 }
6858
6859 if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
6860 WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
6861 aprint_error_dev(sc->sc_dev,
6862 "unable to create TX control data DMA map, error = %d\n",
6863 error);
6864 goto fail_2;
6865 }
6866
6867 if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
6868 txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
6869 aprint_error_dev(sc->sc_dev,
6870 "unable to load TX control data DMA map, error = %d\n",
6871 error);
6872 goto fail_3;
6873 }
6874
6875 return 0;
6876
6877 fail_3:
6878 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
6879 fail_2:
6880 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
6881 WM_TXDESCS_SIZE(txq));
6882 fail_1:
6883 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
6884 fail_0:
6885 return error;
6886 }
6887
6888 static void
6889 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
6890 {
6891
6892 bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
6893 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
6894 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
6895 WM_TXDESCS_SIZE(txq));
6896 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
6897 }
6898
6899 static int
6900 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
6901 {
6902 int error;
6903 size_t rxq_descs_size;
6904
6905 /*
6906 * Allocate the control data structures, and create and load the
6907 * DMA map for it.
6908 *
6909 * NOTE: All Tx descriptors must be in the same 4G segment of
6910 * memory. So must Rx descriptors. We simplify by allocating
6911 * both sets within the same 4G segment.
6912 */
6913 rxq->rxq_ndesc = WM_NRXDESC;
6914 if (sc->sc_type == WM_T_82574)
6915 rxq->rxq_descsize = sizeof(ext_rxdesc_t);
6916 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6917 rxq->rxq_descsize = sizeof(nq_rxdesc_t);
6918 else
6919 rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
6920 rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
6921
6922 if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
6923 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
6924 1, &rxq->rxq_desc_rseg, 0)) != 0) {
6925 aprint_error_dev(sc->sc_dev,
6926 "unable to allocate RX control data, error = %d\n",
6927 error);
6928 goto fail_0;
6929 }
6930
6931 if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
6932 rxq->rxq_desc_rseg, rxq_descs_size,
6933 (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
6934 aprint_error_dev(sc->sc_dev,
6935 "unable to map RX control data, error = %d\n", error);
6936 goto fail_1;
6937 }
6938
6939 if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
6940 rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
6941 aprint_error_dev(sc->sc_dev,
6942 "unable to create RX control data DMA map, error = %d\n",
6943 error);
6944 goto fail_2;
6945 }
6946
6947 if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
6948 rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
6949 aprint_error_dev(sc->sc_dev,
6950 "unable to load RX control data DMA map, error = %d\n",
6951 error);
6952 goto fail_3;
6953 }
6954
6955 return 0;
6956
6957 fail_3:
6958 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
6959 fail_2:
6960 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
6961 rxq_descs_size);
6962 fail_1:
6963 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
6964 fail_0:
6965 return error;
6966 }
6967
6968 static void
6969 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
6970 {
6971
6972 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
6973 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
6974 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
6975 rxq->rxq_descsize * rxq->rxq_ndesc);
6976 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
6977 }
6978
6979
6980 static int
6981 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
6982 {
6983 int i, error;
6984
6985 /* Create the transmit buffer DMA maps. */
6986 WM_TXQUEUELEN(txq) =
6987 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
6988 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
6989 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6990 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
6991 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
6992 &txq->txq_soft[i].txs_dmamap)) != 0) {
6993 aprint_error_dev(sc->sc_dev,
6994 "unable to create Tx DMA map %d, error = %d\n",
6995 i, error);
6996 goto fail;
6997 }
6998 }
6999
7000 return 0;
7001
7002 fail:
7003 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7004 if (txq->txq_soft[i].txs_dmamap != NULL)
7005 bus_dmamap_destroy(sc->sc_dmat,
7006 txq->txq_soft[i].txs_dmamap);
7007 }
7008 return error;
7009 }
7010
7011 static void
7012 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
7013 {
7014 int i;
7015
7016 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7017 if (txq->txq_soft[i].txs_dmamap != NULL)
7018 bus_dmamap_destroy(sc->sc_dmat,
7019 txq->txq_soft[i].txs_dmamap);
7020 }
7021 }
7022
7023 static int
7024 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7025 {
7026 int i, error;
7027
7028 /* Create the receive buffer DMA maps. */
7029 for (i = 0; i < rxq->rxq_ndesc; i++) {
7030 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
7031 MCLBYTES, 0, 0,
7032 &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
7033 aprint_error_dev(sc->sc_dev,
7034 "unable to create Rx DMA map %d error = %d\n",
7035 i, error);
7036 goto fail;
7037 }
7038 rxq->rxq_soft[i].rxs_mbuf = NULL;
7039 }
7040
7041 return 0;
7042
7043 fail:
7044 for (i = 0; i < rxq->rxq_ndesc; i++) {
7045 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
7046 bus_dmamap_destroy(sc->sc_dmat,
7047 rxq->rxq_soft[i].rxs_dmamap);
7048 }
7049 return error;
7050 }
7051
7052 static void
7053 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7054 {
7055 int i;
7056
7057 for (i = 0; i < rxq->rxq_ndesc; i++) {
7058 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
7059 bus_dmamap_destroy(sc->sc_dmat,
7060 rxq->rxq_soft[i].rxs_dmamap);
7061 }
7062 }
7063
7064 /*
7065 * wm_alloc_quques:
7066 * Allocate {tx,rx}descs and {tx,rx} buffers
7067 */
7068 static int
7069 wm_alloc_txrx_queues(struct wm_softc *sc)
7070 {
7071 int i, error, tx_done, rx_done;
7072
7073 sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
7074 KM_SLEEP);
7075 if (sc->sc_queue == NULL) {
7076 aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
7077 error = ENOMEM;
7078 goto fail_0;
7079 }
7080
7081 /* For transmission */
7082 error = 0;
7083 tx_done = 0;
7084 for (i = 0; i < sc->sc_nqueues; i++) {
7085 #ifdef WM_EVENT_COUNTERS
7086 int j;
7087 const char *xname;
7088 #endif
7089 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7090 txq->txq_sc = sc;
7091 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
7092
7093 error = wm_alloc_tx_descs(sc, txq);
7094 if (error)
7095 break;
7096 error = wm_alloc_tx_buffer(sc, txq);
7097 if (error) {
7098 wm_free_tx_descs(sc, txq);
7099 break;
7100 }
7101 txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
7102 if (txq->txq_interq == NULL) {
7103 wm_free_tx_descs(sc, txq);
7104 wm_free_tx_buffer(sc, txq);
7105 error = ENOMEM;
7106 break;
7107 }
7108
7109 #ifdef WM_EVENT_COUNTERS
7110 xname = device_xname(sc->sc_dev);
7111
7112 WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
7113 WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
7114 WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
7115 WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
7116 WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
7117 WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
7118 WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
7119 WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
7120 WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
7121 WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
7122 WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
7123
7124 for (j = 0; j < WM_NTXSEGS; j++) {
7125 snprintf(txq->txq_txseg_evcnt_names[j],
7126 sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
7127 evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
7128 NULL, xname, txq->txq_txseg_evcnt_names[j]);
7129 }
7130
7131 WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
7132 WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
7133 WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
7134 WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
7135 WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
7136 WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
7137 #endif /* WM_EVENT_COUNTERS */
7138
7139 tx_done++;
7140 }
7141 if (error)
7142 goto fail_1;
7143
7144 /* For receive */
7145 error = 0;
7146 rx_done = 0;
7147 for (i = 0; i < sc->sc_nqueues; i++) {
7148 #ifdef WM_EVENT_COUNTERS
7149 const char *xname;
7150 #endif
7151 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7152 rxq->rxq_sc = sc;
7153 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
7154
7155 error = wm_alloc_rx_descs(sc, rxq);
7156 if (error)
7157 break;
7158
7159 error = wm_alloc_rx_buffer(sc, rxq);
7160 if (error) {
7161 wm_free_rx_descs(sc, rxq);
7162 break;
7163 }
7164
7165 #ifdef WM_EVENT_COUNTERS
7166 xname = device_xname(sc->sc_dev);
7167
7168 WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
7169 WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
7170
7171 WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
7172 WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
7173 #endif /* WM_EVENT_COUNTERS */
7174
7175 rx_done++;
7176 }
7177 if (error)
7178 goto fail_2;
7179
7180 return 0;
7181
7182 fail_2:
7183 for (i = 0; i < rx_done; i++) {
7184 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7185 wm_free_rx_buffer(sc, rxq);
7186 wm_free_rx_descs(sc, rxq);
7187 if (rxq->rxq_lock)
7188 mutex_obj_free(rxq->rxq_lock);
7189 }
7190 fail_1:
7191 for (i = 0; i < tx_done; i++) {
7192 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7193 pcq_destroy(txq->txq_interq);
7194 wm_free_tx_buffer(sc, txq);
7195 wm_free_tx_descs(sc, txq);
7196 if (txq->txq_lock)
7197 mutex_obj_free(txq->txq_lock);
7198 }
7199
7200 kmem_free(sc->sc_queue,
7201 sizeof(struct wm_queue) * sc->sc_nqueues);
7202 fail_0:
7203 return error;
7204 }
7205
7206 /*
7207 * wm_free_quques:
7208 * Free {tx,rx}descs and {tx,rx} buffers
7209 */
7210 static void
7211 wm_free_txrx_queues(struct wm_softc *sc)
7212 {
7213 int i;
7214
7215 for (i = 0; i < sc->sc_nqueues; i++) {
7216 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7217
7218 #ifdef WM_EVENT_COUNTERS
7219 WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
7220 WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
7221 WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
7222 WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
7223 #endif /* WM_EVENT_COUNTERS */
7224
7225 wm_free_rx_buffer(sc, rxq);
7226 wm_free_rx_descs(sc, rxq);
7227 if (rxq->rxq_lock)
7228 mutex_obj_free(rxq->rxq_lock);
7229 }
7230
7231 for (i = 0; i < sc->sc_nqueues; i++) {
7232 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7233 struct mbuf *m;
7234 #ifdef WM_EVENT_COUNTERS
7235 int j;
7236
7237 WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
7238 WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
7239 WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
7240 WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
7241 WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
7242 WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
7243 WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
7244 WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
7245 WM_Q_EVCNT_DETACH(txq, tso, txq, i);
7246 WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
7247 WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
7248
7249 for (j = 0; j < WM_NTXSEGS; j++)
7250 evcnt_detach(&txq->txq_ev_txseg[j]);
7251
7252 WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
7253 WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
7254 WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
7255 WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
7256 WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
7257 WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
7258 #endif /* WM_EVENT_COUNTERS */
7259
7260 /* Drain txq_interq */
7261 while ((m = pcq_get(txq->txq_interq)) != NULL)
7262 m_freem(m);
7263 pcq_destroy(txq->txq_interq);
7264
7265 wm_free_tx_buffer(sc, txq);
7266 wm_free_tx_descs(sc, txq);
7267 if (txq->txq_lock)
7268 mutex_obj_free(txq->txq_lock);
7269 }
7270
7271 kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
7272 }
7273
7274 static void
7275 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
7276 {
7277
7278 KASSERT(mutex_owned(txq->txq_lock));
7279
7280 /* Initialize the transmit descriptor ring. */
7281 memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
7282 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
7283 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7284 txq->txq_free = WM_NTXDESC(txq);
7285 txq->txq_next = 0;
7286 }
7287
7288 static void
7289 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
7290 struct wm_txqueue *txq)
7291 {
7292
7293 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
7294 device_xname(sc->sc_dev), __func__));
7295 KASSERT(mutex_owned(txq->txq_lock));
7296
7297 if (sc->sc_type < WM_T_82543) {
7298 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
7299 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
7300 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
7301 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
7302 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
7303 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
7304 } else {
7305 int qid = wmq->wmq_id;
7306
7307 CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
7308 CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
7309 CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
7310 CSR_WRITE(sc, WMREG_TDH(qid), 0);
7311
7312 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7313 /*
7314 * Don't write TDT before TCTL.EN is set.
7315 * See the document.
7316 */
7317 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
7318 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
7319 | TXDCTL_WTHRESH(0));
7320 else {
7321 /* XXX should update with AIM? */
7322 CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
7323 if (sc->sc_type >= WM_T_82540) {
7324 /* Should be the same */
7325 CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
7326 }
7327
7328 CSR_WRITE(sc, WMREG_TDT(qid), 0);
7329 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
7330 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
7331 }
7332 }
7333 }
7334
7335 static void
7336 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
7337 {
7338 int i;
7339
7340 KASSERT(mutex_owned(txq->txq_lock));
7341
7342 /* Initialize the transmit job descriptors. */
7343 for (i = 0; i < WM_TXQUEUELEN(txq); i++)
7344 txq->txq_soft[i].txs_mbuf = NULL;
7345 txq->txq_sfree = WM_TXQUEUELEN(txq);
7346 txq->txq_snext = 0;
7347 txq->txq_sdirty = 0;
7348 }
7349
7350 static void
7351 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
7352 struct wm_txqueue *txq)
7353 {
7354
7355 KASSERT(mutex_owned(txq->txq_lock));
7356
7357 /*
7358 * Set up some register offsets that are different between
7359 * the i82542 and the i82543 and later chips.
7360 */
7361 if (sc->sc_type < WM_T_82543)
7362 txq->txq_tdt_reg = WMREG_OLD_TDT;
7363 else
7364 txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
7365
7366 wm_init_tx_descs(sc, txq);
7367 wm_init_tx_regs(sc, wmq, txq);
7368 wm_init_tx_buffer(sc, txq);
7369
7370 txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
7371 txq->txq_sending = false;
7372 }
7373
7374 static void
7375 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
7376 struct wm_rxqueue *rxq)
7377 {
7378
7379 KASSERT(mutex_owned(rxq->rxq_lock));
7380
7381 /*
7382 * Initialize the receive descriptor and receive job
7383 * descriptor rings.
7384 */
7385 if (sc->sc_type < WM_T_82543) {
7386 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
7387 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
7388 CSR_WRITE(sc, WMREG_OLD_RDLEN0,
7389 rxq->rxq_descsize * rxq->rxq_ndesc);
7390 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
7391 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
7392 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
7393
7394 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
7395 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
7396 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
7397 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
7398 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
7399 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
7400 } else {
7401 int qid = wmq->wmq_id;
7402
7403 CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
7404 CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
7405 CSR_WRITE(sc, WMREG_RDLEN(qid),
7406 rxq->rxq_descsize * rxq->rxq_ndesc);
7407
7408 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7409 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
7410 panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
7411
7412 /* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
7413 CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
7414 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
7415 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
7416 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
7417 | RXDCTL_WTHRESH(1));
7418 CSR_WRITE(sc, WMREG_RDH(qid), 0);
7419 CSR_WRITE(sc, WMREG_RDT(qid), 0);
7420 } else {
7421 CSR_WRITE(sc, WMREG_RDH(qid), 0);
7422 CSR_WRITE(sc, WMREG_RDT(qid), 0);
7423 /* XXX should update with AIM? */
7424 CSR_WRITE(sc, WMREG_RDTR,
7425 (wmq->wmq_itr / 4) | RDTR_FPD);
7426 /* MUST be same */
7427 CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
7428 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
7429 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
7430 }
7431 }
7432 }
7433
7434 static int
7435 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7436 {
7437 struct wm_rxsoft *rxs;
7438 int error, i;
7439
7440 KASSERT(mutex_owned(rxq->rxq_lock));
7441
7442 for (i = 0; i < rxq->rxq_ndesc; i++) {
7443 rxs = &rxq->rxq_soft[i];
7444 if (rxs->rxs_mbuf == NULL) {
7445 if ((error = wm_add_rxbuf(rxq, i)) != 0) {
7446 log(LOG_ERR, "%s: unable to allocate or map "
7447 "rx buffer %d, error = %d\n",
7448 device_xname(sc->sc_dev), i, error);
7449 /*
7450 * XXX Should attempt to run with fewer receive
7451 * XXX buffers instead of just failing.
7452 */
7453 wm_rxdrain(rxq);
7454 return ENOMEM;
7455 }
7456 } else {
7457 /*
7458 * For 82575 and 82576, the RX descriptors must be
7459 * initialized after the setting of RCTL.EN in
7460 * wm_set_filter()
7461 */
7462 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
7463 wm_init_rxdesc(rxq, i);
7464 }
7465 }
7466 rxq->rxq_ptr = 0;
7467 rxq->rxq_discard = 0;
7468 WM_RXCHAIN_RESET(rxq);
7469
7470 return 0;
7471 }
7472
7473 static int
7474 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
7475 struct wm_rxqueue *rxq)
7476 {
7477
7478 KASSERT(mutex_owned(rxq->rxq_lock));
7479
7480 /*
7481 * Set up some register offsets that are different between
7482 * the i82542 and the i82543 and later chips.
7483 */
7484 if (sc->sc_type < WM_T_82543)
7485 rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
7486 else
7487 rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
7488
7489 wm_init_rx_regs(sc, wmq, rxq);
7490 return wm_init_rx_buffer(sc, rxq);
7491 }
7492
7493 /*
7494 * wm_init_quques:
7495 * Initialize {tx,rx}descs and {tx,rx} buffers
7496 */
7497 static int
7498 wm_init_txrx_queues(struct wm_softc *sc)
7499 {
7500 int i, error = 0;
7501
7502 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
7503 device_xname(sc->sc_dev), __func__));
7504
7505 for (i = 0; i < sc->sc_nqueues; i++) {
7506 struct wm_queue *wmq = &sc->sc_queue[i];
7507 struct wm_txqueue *txq = &wmq->wmq_txq;
7508 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
7509
7510 /*
7511 * TODO
7512 * Currently, use constant variable instead of AIM.
7513 * Furthermore, the interrupt interval of multiqueue which use
7514 * polling mode is less than default value.
7515 * More tuning and AIM are required.
7516 */
7517 if (wm_is_using_multiqueue(sc))
7518 wmq->wmq_itr = 50;
7519 else
7520 wmq->wmq_itr = sc->sc_itr_init;
7521 wmq->wmq_set_itr = true;
7522
7523 mutex_enter(txq->txq_lock);
7524 wm_init_tx_queue(sc, wmq, txq);
7525 mutex_exit(txq->txq_lock);
7526
7527 mutex_enter(rxq->rxq_lock);
7528 error = wm_init_rx_queue(sc, wmq, rxq);
7529 mutex_exit(rxq->rxq_lock);
7530 if (error)
7531 break;
7532 }
7533
7534 return error;
7535 }
7536
7537 /*
7538 * wm_tx_offload:
7539 *
7540 * Set up TCP/IP checksumming parameters for the
7541 * specified packet.
7542 */
7543 static void
7544 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
7545 struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
7546 {
7547 struct mbuf *m0 = txs->txs_mbuf;
7548 struct livengood_tcpip_ctxdesc *t;
7549 uint32_t ipcs, tucs, cmd, cmdlen, seg;
7550 uint32_t ipcse;
7551 struct ether_header *eh;
7552 int offset, iphl;
7553 uint8_t fields;
7554
7555 /*
7556 * XXX It would be nice if the mbuf pkthdr had offset
7557 * fields for the protocol headers.
7558 */
7559
7560 eh = mtod(m0, struct ether_header *);
7561 switch (htons(eh->ether_type)) {
7562 case ETHERTYPE_IP:
7563 case ETHERTYPE_IPV6:
7564 offset = ETHER_HDR_LEN;
7565 break;
7566
7567 case ETHERTYPE_VLAN:
7568 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
7569 break;
7570
7571 default:
7572 /* Don't support this protocol or encapsulation. */
7573 txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
7574 txq->txq_last_hw_ipcs = 0;
7575 txq->txq_last_hw_tucs = 0;
7576 *fieldsp = 0;
7577 *cmdp = 0;
7578 return;
7579 }
7580
7581 if ((m0->m_pkthdr.csum_flags &
7582 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
7583 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
7584 } else
7585 iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
7586
7587 ipcse = offset + iphl - 1;
7588
7589 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
7590 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
7591 seg = 0;
7592 fields = 0;
7593
7594 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
7595 int hlen = offset + iphl;
7596 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
7597
7598 if (__predict_false(m0->m_len <
7599 (hlen + sizeof(struct tcphdr)))) {
7600 /*
7601 * TCP/IP headers are not in the first mbuf; we need
7602 * to do this the slow and painful way. Let's just
7603 * hope this doesn't happen very often.
7604 */
7605 struct tcphdr th;
7606
7607 WM_Q_EVCNT_INCR(txq, tsopain);
7608
7609 m_copydata(m0, hlen, sizeof(th), &th);
7610 if (v4) {
7611 struct ip ip;
7612
7613 m_copydata(m0, offset, sizeof(ip), &ip);
7614 ip.ip_len = 0;
7615 m_copyback(m0,
7616 offset + offsetof(struct ip, ip_len),
7617 sizeof(ip.ip_len), &ip.ip_len);
7618 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
7619 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
7620 } else {
7621 struct ip6_hdr ip6;
7622
7623 m_copydata(m0, offset, sizeof(ip6), &ip6);
7624 ip6.ip6_plen = 0;
7625 m_copyback(m0,
7626 offset + offsetof(struct ip6_hdr, ip6_plen),
7627 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
7628 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
7629 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
7630 }
7631 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
7632 sizeof(th.th_sum), &th.th_sum);
7633
7634 hlen += th.th_off << 2;
7635 } else {
7636 /*
7637 * TCP/IP headers are in the first mbuf; we can do
7638 * this the easy way.
7639 */
7640 struct tcphdr *th;
7641
7642 if (v4) {
7643 struct ip *ip =
7644 (void *)(mtod(m0, char *) + offset);
7645 th = (void *)(mtod(m0, char *) + hlen);
7646
7647 ip->ip_len = 0;
7648 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
7649 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
7650 } else {
7651 struct ip6_hdr *ip6 =
7652 (void *)(mtod(m0, char *) + offset);
7653 th = (void *)(mtod(m0, char *) + hlen);
7654
7655 ip6->ip6_plen = 0;
7656 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
7657 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
7658 }
7659 hlen += th->th_off << 2;
7660 }
7661
7662 if (v4) {
7663 WM_Q_EVCNT_INCR(txq, tso);
7664 cmdlen |= WTX_TCPIP_CMD_IP;
7665 } else {
7666 WM_Q_EVCNT_INCR(txq, tso6);
7667 ipcse = 0;
7668 }
7669 cmd |= WTX_TCPIP_CMD_TSE;
7670 cmdlen |= WTX_TCPIP_CMD_TSE |
7671 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
7672 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
7673 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
7674 }
7675
7676 /*
7677 * NOTE: Even if we're not using the IP or TCP/UDP checksum
7678 * offload feature, if we load the context descriptor, we
7679 * MUST provide valid values for IPCSS and TUCSS fields.
7680 */
7681
7682 ipcs = WTX_TCPIP_IPCSS(offset) |
7683 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
7684 WTX_TCPIP_IPCSE(ipcse);
7685 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
7686 WM_Q_EVCNT_INCR(txq, ipsum);
7687 fields |= WTX_IXSM;
7688 }
7689
7690 offset += iphl;
7691
7692 if (m0->m_pkthdr.csum_flags &
7693 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
7694 WM_Q_EVCNT_INCR(txq, tusum);
7695 fields |= WTX_TXSM;
7696 tucs = WTX_TCPIP_TUCSS(offset) |
7697 WTX_TCPIP_TUCSO(offset +
7698 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
7699 WTX_TCPIP_TUCSE(0) /* Rest of packet */;
7700 } else if ((m0->m_pkthdr.csum_flags &
7701 (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
7702 WM_Q_EVCNT_INCR(txq, tusum6);
7703 fields |= WTX_TXSM;
7704 tucs = WTX_TCPIP_TUCSS(offset) |
7705 WTX_TCPIP_TUCSO(offset +
7706 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
7707 WTX_TCPIP_TUCSE(0) /* Rest of packet */;
7708 } else {
7709 /* Just initialize it to a valid TCP context. */
7710 tucs = WTX_TCPIP_TUCSS(offset) |
7711 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
7712 WTX_TCPIP_TUCSE(0) /* Rest of packet */;
7713 }
7714
7715 *cmdp = cmd;
7716 *fieldsp = fields;
7717
7718 /*
7719 * We don't have to write context descriptor for every packet
7720 * except for 82574. For 82574, we must write context descriptor
7721 * for every packet when we use two descriptor queues.
7722 *
7723 * The 82574L can only remember the *last* context used
7724 * regardless of queue that it was use for. We cannot reuse
7725 * contexts on this hardware platform and must generate a new
7726 * context every time. 82574L hardware spec, section 7.2.6,
7727 * second note.
7728 */
7729 if (sc->sc_nqueues < 2) {
7730 /*
7731 *
7732 * Setting up new checksum offload context for every
7733 * frames takes a lot of processing time for hardware.
7734 * This also reduces performance a lot for small sized
7735 * frames so avoid it if driver can use previously
7736 * configured checksum offload context.
7737 * For TSO, in theory we can use the same TSO context only if
7738 * frame is the same type(IP/TCP) and the same MSS. However
7739 * checking whether a frame has the same IP/TCP structure is
7740 * hard thing so just ignore that and always restablish a
7741 * new TSO context.
7742 */
7743 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
7744 == 0) {
7745 if (txq->txq_last_hw_cmd == cmd &&
7746 txq->txq_last_hw_fields == fields &&
7747 txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
7748 txq->txq_last_hw_tucs == (tucs & 0xffff)) {
7749 WM_Q_EVCNT_INCR(txq, skipcontext);
7750 return;
7751 }
7752 }
7753
7754 txq->txq_last_hw_cmd = cmd;
7755 txq->txq_last_hw_fields = fields;
7756 txq->txq_last_hw_ipcs = (ipcs & 0xffff);
7757 txq->txq_last_hw_tucs = (tucs & 0xffff);
7758 }
7759
7760 /* Fill in the context descriptor. */
7761 t = (struct livengood_tcpip_ctxdesc *)
7762 &txq->txq_descs[txq->txq_next];
7763 t->tcpip_ipcs = htole32(ipcs);
7764 t->tcpip_tucs = htole32(tucs);
7765 t->tcpip_cmdlen = htole32(cmdlen);
7766 t->tcpip_seg = htole32(seg);
7767 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
7768
7769 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
7770 txs->txs_ndesc++;
7771 }
7772
7773 static inline int
7774 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
7775 {
7776 struct wm_softc *sc = ifp->if_softc;
7777 u_int cpuid = cpu_index(curcpu());
7778
7779 /*
7780 * Currently, simple distribute strategy.
7781 * TODO:
7782 * distribute by flowid(RSS has value).
7783 */
7784 return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
7785 }
7786
7787 static inline bool
7788 wm_linkdown_discard(struct wm_txqueue *txq)
7789 {
7790
7791 if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
7792 return true;
7793
7794 return false;
7795 }
7796
7797 /*
7798 * wm_start: [ifnet interface function]
7799 *
7800 * Start packet transmission on the interface.
7801 */
7802 static void
7803 wm_start(struct ifnet *ifp)
7804 {
7805 struct wm_softc *sc = ifp->if_softc;
7806 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7807
7808 #ifdef WM_MPSAFE
7809 KASSERT(if_is_mpsafe(ifp));
7810 #endif
7811 /*
7812 * if_obytes and if_omcasts are added in if_transmit()@if.c.
7813 */
7814
7815 mutex_enter(txq->txq_lock);
7816 if (!txq->txq_stopping)
7817 wm_start_locked(ifp);
7818 mutex_exit(txq->txq_lock);
7819 }
7820
7821 static void
7822 wm_start_locked(struct ifnet *ifp)
7823 {
7824 struct wm_softc *sc = ifp->if_softc;
7825 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7826
7827 wm_send_common_locked(ifp, txq, false);
7828 }
7829
7830 static int
7831 wm_transmit(struct ifnet *ifp, struct mbuf *m)
7832 {
7833 int qid;
7834 struct wm_softc *sc = ifp->if_softc;
7835 struct wm_txqueue *txq;
7836
7837 qid = wm_select_txqueue(ifp, m);
7838 txq = &sc->sc_queue[qid].wmq_txq;
7839
7840 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
7841 m_freem(m);
7842 WM_Q_EVCNT_INCR(txq, pcqdrop);
7843 return ENOBUFS;
7844 }
7845
7846 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
7847 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
7848 if (m->m_flags & M_MCAST)
7849 if_statinc_ref(nsr, if_omcasts);
7850 IF_STAT_PUTREF(ifp);
7851
7852 if (mutex_tryenter(txq->txq_lock)) {
7853 if (!txq->txq_stopping)
7854 wm_transmit_locked(ifp, txq);
7855 mutex_exit(txq->txq_lock);
7856 }
7857
7858 return 0;
7859 }
7860
7861 static void
7862 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
7863 {
7864
7865 wm_send_common_locked(ifp, txq, true);
7866 }
7867
7868 static void
7869 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
7870 bool is_transmit)
7871 {
7872 struct wm_softc *sc = ifp->if_softc;
7873 struct mbuf *m0;
7874 struct wm_txsoft *txs;
7875 bus_dmamap_t dmamap;
7876 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
7877 bus_addr_t curaddr;
7878 bus_size_t seglen, curlen;
7879 uint32_t cksumcmd;
7880 uint8_t cksumfields;
7881 bool remap = true;
7882
7883 KASSERT(mutex_owned(txq->txq_lock));
7884
7885 if ((ifp->if_flags & IFF_RUNNING) == 0)
7886 return;
7887 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
7888 return;
7889
7890 if (__predict_false(wm_linkdown_discard(txq))) {
7891 do {
7892 if (is_transmit)
7893 m0 = pcq_get(txq->txq_interq);
7894 else
7895 IFQ_DEQUEUE(&ifp->if_snd, m0);
7896 /*
7897 * increment successed packet counter as in the case
7898 * which the packet is discarded by link down PHY.
7899 */
7900 if (m0 != NULL)
7901 if_statinc(ifp, if_opackets);
7902 m_freem(m0);
7903 } while (m0 != NULL);
7904 return;
7905 }
7906
7907 /* Remember the previous number of free descriptors. */
7908 ofree = txq->txq_free;
7909
7910 /*
7911 * Loop through the send queue, setting up transmit descriptors
7912 * until we drain the queue, or use up all available transmit
7913 * descriptors.
7914 */
7915 for (;;) {
7916 m0 = NULL;
7917
7918 /* Get a work queue entry. */
7919 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
7920 wm_txeof(txq, UINT_MAX);
7921 if (txq->txq_sfree == 0) {
7922 DPRINTF(sc, WM_DEBUG_TX,
7923 ("%s: TX: no free job descriptors\n",
7924 device_xname(sc->sc_dev)));
7925 WM_Q_EVCNT_INCR(txq, txsstall);
7926 break;
7927 }
7928 }
7929
7930 /* Grab a packet off the queue. */
7931 if (is_transmit)
7932 m0 = pcq_get(txq->txq_interq);
7933 else
7934 IFQ_DEQUEUE(&ifp->if_snd, m0);
7935 if (m0 == NULL)
7936 break;
7937
7938 DPRINTF(sc, WM_DEBUG_TX,
7939 ("%s: TX: have packet to transmit: %p\n",
7940 device_xname(sc->sc_dev), m0));
7941
7942 txs = &txq->txq_soft[txq->txq_snext];
7943 dmamap = txs->txs_dmamap;
7944
7945 use_tso = (m0->m_pkthdr.csum_flags &
7946 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
7947
7948 /*
7949 * So says the Linux driver:
7950 * The controller does a simple calculation to make sure
7951 * there is enough room in the FIFO before initiating the
7952 * DMA for each buffer. The calc is:
7953 * 4 = ceil(buffer len / MSS)
7954 * To make sure we don't overrun the FIFO, adjust the max
7955 * buffer len if the MSS drops.
7956 */
7957 dmamap->dm_maxsegsz =
7958 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
7959 ? m0->m_pkthdr.segsz << 2
7960 : WTX_MAX_LEN;
7961
7962 /*
7963 * Load the DMA map. If this fails, the packet either
7964 * didn't fit in the allotted number of segments, or we
7965 * were short on resources. For the too-many-segments
7966 * case, we simply report an error and drop the packet,
7967 * since we can't sanely copy a jumbo packet to a single
7968 * buffer.
7969 */
7970 retry:
7971 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
7972 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
7973 if (__predict_false(error)) {
7974 if (error == EFBIG) {
7975 if (remap == true) {
7976 struct mbuf *m;
7977
7978 remap = false;
7979 m = m_defrag(m0, M_NOWAIT);
7980 if (m != NULL) {
7981 WM_Q_EVCNT_INCR(txq, defrag);
7982 m0 = m;
7983 goto retry;
7984 }
7985 }
7986 WM_Q_EVCNT_INCR(txq, toomanyseg);
7987 log(LOG_ERR, "%s: Tx packet consumes too many "
7988 "DMA segments, dropping...\n",
7989 device_xname(sc->sc_dev));
7990 wm_dump_mbuf_chain(sc, m0);
7991 m_freem(m0);
7992 continue;
7993 }
7994 /* Short on resources, just stop for now. */
7995 DPRINTF(sc, WM_DEBUG_TX,
7996 ("%s: TX: dmamap load failed: %d\n",
7997 device_xname(sc->sc_dev), error));
7998 break;
7999 }
8000
8001 segs_needed = dmamap->dm_nsegs;
8002 if (use_tso) {
8003 /* For sentinel descriptor; see below. */
8004 segs_needed++;
8005 }
8006
8007 /*
8008 * Ensure we have enough descriptors free to describe
8009 * the packet. Note, we always reserve one descriptor
8010 * at the end of the ring due to the semantics of the
8011 * TDT register, plus one more in the event we need
8012 * to load offload context.
8013 */
8014 if (segs_needed > txq->txq_free - 2) {
8015 /*
8016 * Not enough free descriptors to transmit this
8017 * packet. We haven't committed anything yet,
8018 * so just unload the DMA map, put the packet
8019 * pack on the queue, and punt. Notify the upper
8020 * layer that there are no more slots left.
8021 */
8022 DPRINTF(sc, WM_DEBUG_TX,
8023 ("%s: TX: need %d (%d) descriptors, have %d\n",
8024 device_xname(sc->sc_dev), dmamap->dm_nsegs,
8025 segs_needed, txq->txq_free - 1));
8026 txq->txq_flags |= WM_TXQ_NO_SPACE;
8027 bus_dmamap_unload(sc->sc_dmat, dmamap);
8028 WM_Q_EVCNT_INCR(txq, txdstall);
8029 break;
8030 }
8031
8032 /*
8033 * Check for 82547 Tx FIFO bug. We need to do this
8034 * once we know we can transmit the packet, since we
8035 * do some internal FIFO space accounting here.
8036 */
8037 if (sc->sc_type == WM_T_82547 &&
8038 wm_82547_txfifo_bugchk(sc, m0)) {
8039 DPRINTF(sc, WM_DEBUG_TX,
8040 ("%s: TX: 82547 Tx FIFO bug detected\n",
8041 device_xname(sc->sc_dev)));
8042 txq->txq_flags |= WM_TXQ_NO_SPACE;
8043 bus_dmamap_unload(sc->sc_dmat, dmamap);
8044 WM_Q_EVCNT_INCR(txq, fifo_stall);
8045 break;
8046 }
8047
8048 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
8049
8050 DPRINTF(sc, WM_DEBUG_TX,
8051 ("%s: TX: packet has %d (%d) DMA segments\n",
8052 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
8053
8054 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
8055
8056 /*
8057 * Store a pointer to the packet so that we can free it
8058 * later.
8059 *
8060 * Initially, we consider the number of descriptors the
8061 * packet uses the number of DMA segments. This may be
8062 * incremented by 1 if we do checksum offload (a descriptor
8063 * is used to set the checksum context).
8064 */
8065 txs->txs_mbuf = m0;
8066 txs->txs_firstdesc = txq->txq_next;
8067 txs->txs_ndesc = segs_needed;
8068
8069 /* Set up offload parameters for this packet. */
8070 if (m0->m_pkthdr.csum_flags &
8071 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
8072 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
8073 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
8074 wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
8075 } else {
8076 txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
8077 txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
8078 cksumcmd = 0;
8079 cksumfields = 0;
8080 }
8081
8082 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
8083
8084 /* Sync the DMA map. */
8085 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
8086 BUS_DMASYNC_PREWRITE);
8087
8088 /* Initialize the transmit descriptor. */
8089 for (nexttx = txq->txq_next, seg = 0;
8090 seg < dmamap->dm_nsegs; seg++) {
8091 for (seglen = dmamap->dm_segs[seg].ds_len,
8092 curaddr = dmamap->dm_segs[seg].ds_addr;
8093 seglen != 0;
8094 curaddr += curlen, seglen -= curlen,
8095 nexttx = WM_NEXTTX(txq, nexttx)) {
8096 curlen = seglen;
8097
8098 /*
8099 * So says the Linux driver:
8100 * Work around for premature descriptor
8101 * write-backs in TSO mode. Append a
8102 * 4-byte sentinel descriptor.
8103 */
8104 if (use_tso && seg == dmamap->dm_nsegs - 1 &&
8105 curlen > 8)
8106 curlen -= 4;
8107
8108 wm_set_dma_addr(
8109 &txq->txq_descs[nexttx].wtx_addr, curaddr);
8110 txq->txq_descs[nexttx].wtx_cmdlen
8111 = htole32(cksumcmd | curlen);
8112 txq->txq_descs[nexttx].wtx_fields.wtxu_status
8113 = 0;
8114 txq->txq_descs[nexttx].wtx_fields.wtxu_options
8115 = cksumfields;
8116 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
8117 lasttx = nexttx;
8118
8119 DPRINTF(sc, WM_DEBUG_TX,
8120 ("%s: TX: desc %d: low %#" PRIx64 ", "
8121 "len %#04zx\n",
8122 device_xname(sc->sc_dev), nexttx,
8123 (uint64_t)curaddr, curlen));
8124 }
8125 }
8126
8127 KASSERT(lasttx != -1);
8128
8129 /*
8130 * Set up the command byte on the last descriptor of
8131 * the packet. If we're in the interrupt delay window,
8132 * delay the interrupt.
8133 */
8134 txq->txq_descs[lasttx].wtx_cmdlen |=
8135 htole32(WTX_CMD_EOP | WTX_CMD_RS);
8136
8137 /*
8138 * If VLANs are enabled and the packet has a VLAN tag, set
8139 * up the descriptor to encapsulate the packet for us.
8140 *
8141 * This is only valid on the last descriptor of the packet.
8142 */
8143 if (vlan_has_tag(m0)) {
8144 txq->txq_descs[lasttx].wtx_cmdlen |=
8145 htole32(WTX_CMD_VLE);
8146 txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
8147 = htole16(vlan_get_tag(m0));
8148 }
8149
8150 txs->txs_lastdesc = lasttx;
8151
8152 DPRINTF(sc, WM_DEBUG_TX,
8153 ("%s: TX: desc %d: cmdlen 0x%08x\n",
8154 device_xname(sc->sc_dev),
8155 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
8156
8157 /* Sync the descriptors we're using. */
8158 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
8159 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
8160
8161 /* Give the packet to the chip. */
8162 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
8163
8164 DPRINTF(sc, WM_DEBUG_TX,
8165 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
8166
8167 DPRINTF(sc, WM_DEBUG_TX,
8168 ("%s: TX: finished transmitting packet, job %d\n",
8169 device_xname(sc->sc_dev), txq->txq_snext));
8170
8171 /* Advance the tx pointer. */
8172 txq->txq_free -= txs->txs_ndesc;
8173 txq->txq_next = nexttx;
8174
8175 txq->txq_sfree--;
8176 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
8177
8178 /* Pass the packet to any BPF listeners. */
8179 bpf_mtap(ifp, m0, BPF_D_OUT);
8180 }
8181
8182 if (m0 != NULL) {
8183 txq->txq_flags |= WM_TXQ_NO_SPACE;
8184 WM_Q_EVCNT_INCR(txq, descdrop);
8185 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
8186 __func__));
8187 m_freem(m0);
8188 }
8189
8190 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
8191 /* No more slots; notify upper layer. */
8192 txq->txq_flags |= WM_TXQ_NO_SPACE;
8193 }
8194
8195 if (txq->txq_free != ofree) {
8196 /* Set a watchdog timer in case the chip flakes out. */
8197 txq->txq_lastsent = time_uptime;
8198 txq->txq_sending = true;
8199 }
8200 }
8201
8202 /*
8203 * wm_nq_tx_offload:
8204 *
8205 * Set up TCP/IP checksumming parameters for the
8206 * specified packet, for NEWQUEUE devices
8207 */
8208 static void
8209 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
8210 struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
8211 {
8212 struct mbuf *m0 = txs->txs_mbuf;
8213 uint32_t vl_len, mssidx, cmdc;
8214 struct ether_header *eh;
8215 int offset, iphl;
8216
8217 /*
8218 * XXX It would be nice if the mbuf pkthdr had offset
8219 * fields for the protocol headers.
8220 */
8221 *cmdlenp = 0;
8222 *fieldsp = 0;
8223
8224 eh = mtod(m0, struct ether_header *);
8225 switch (htons(eh->ether_type)) {
8226 case ETHERTYPE_IP:
8227 case ETHERTYPE_IPV6:
8228 offset = ETHER_HDR_LEN;
8229 break;
8230
8231 case ETHERTYPE_VLAN:
8232 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
8233 break;
8234
8235 default:
8236 /* Don't support this protocol or encapsulation. */
8237 *do_csum = false;
8238 return;
8239 }
8240 *do_csum = true;
8241 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
8242 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
8243
8244 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
8245 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
8246
8247 if ((m0->m_pkthdr.csum_flags &
8248 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
8249 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
8250 } else {
8251 iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
8252 }
8253 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
8254 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
8255
8256 if (vlan_has_tag(m0)) {
8257 vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
8258 << NQTXC_VLLEN_VLAN_SHIFT);
8259 *cmdlenp |= NQTX_CMD_VLE;
8260 }
8261
8262 mssidx = 0;
8263
8264 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
8265 int hlen = offset + iphl;
8266 int tcp_hlen;
8267 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
8268
8269 if (__predict_false(m0->m_len <
8270 (hlen + sizeof(struct tcphdr)))) {
8271 /*
8272 * TCP/IP headers are not in the first mbuf; we need
8273 * to do this the slow and painful way. Let's just
8274 * hope this doesn't happen very often.
8275 */
8276 struct tcphdr th;
8277
8278 WM_Q_EVCNT_INCR(txq, tsopain);
8279
8280 m_copydata(m0, hlen, sizeof(th), &th);
8281 if (v4) {
8282 struct ip ip;
8283
8284 m_copydata(m0, offset, sizeof(ip), &ip);
8285 ip.ip_len = 0;
8286 m_copyback(m0,
8287 offset + offsetof(struct ip, ip_len),
8288 sizeof(ip.ip_len), &ip.ip_len);
8289 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
8290 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
8291 } else {
8292 struct ip6_hdr ip6;
8293
8294 m_copydata(m0, offset, sizeof(ip6), &ip6);
8295 ip6.ip6_plen = 0;
8296 m_copyback(m0,
8297 offset + offsetof(struct ip6_hdr, ip6_plen),
8298 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
8299 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
8300 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
8301 }
8302 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
8303 sizeof(th.th_sum), &th.th_sum);
8304
8305 tcp_hlen = th.th_off << 2;
8306 } else {
8307 /*
8308 * TCP/IP headers are in the first mbuf; we can do
8309 * this the easy way.
8310 */
8311 struct tcphdr *th;
8312
8313 if (v4) {
8314 struct ip *ip =
8315 (void *)(mtod(m0, char *) + offset);
8316 th = (void *)(mtod(m0, char *) + hlen);
8317
8318 ip->ip_len = 0;
8319 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
8320 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
8321 } else {
8322 struct ip6_hdr *ip6 =
8323 (void *)(mtod(m0, char *) + offset);
8324 th = (void *)(mtod(m0, char *) + hlen);
8325
8326 ip6->ip6_plen = 0;
8327 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
8328 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
8329 }
8330 tcp_hlen = th->th_off << 2;
8331 }
8332 hlen += tcp_hlen;
8333 *cmdlenp |= NQTX_CMD_TSE;
8334
8335 if (v4) {
8336 WM_Q_EVCNT_INCR(txq, tso);
8337 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
8338 } else {
8339 WM_Q_EVCNT_INCR(txq, tso6);
8340 *fieldsp |= NQTXD_FIELDS_TUXSM;
8341 }
8342 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
8343 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
8344 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
8345 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
8346 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
8347 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
8348 } else {
8349 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
8350 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
8351 }
8352
8353 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
8354 *fieldsp |= NQTXD_FIELDS_IXSM;
8355 cmdc |= NQTXC_CMD_IP4;
8356 }
8357
8358 if (m0->m_pkthdr.csum_flags &
8359 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
8360 WM_Q_EVCNT_INCR(txq, tusum);
8361 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
8362 cmdc |= NQTXC_CMD_TCP;
8363 else
8364 cmdc |= NQTXC_CMD_UDP;
8365
8366 cmdc |= NQTXC_CMD_IP4;
8367 *fieldsp |= NQTXD_FIELDS_TUXSM;
8368 }
8369 if (m0->m_pkthdr.csum_flags &
8370 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
8371 WM_Q_EVCNT_INCR(txq, tusum6);
8372 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
8373 cmdc |= NQTXC_CMD_TCP;
8374 else
8375 cmdc |= NQTXC_CMD_UDP;
8376
8377 cmdc |= NQTXC_CMD_IP6;
8378 *fieldsp |= NQTXD_FIELDS_TUXSM;
8379 }
8380
8381 /*
8382 * We don't have to write context descriptor for every packet to
8383 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
8384 * I210 and I211. It is enough to write once per a Tx queue for these
8385 * controllers.
8386 * It would be overhead to write context descriptor for every packet,
8387 * however it does not cause problems.
8388 */
8389 /* Fill in the context descriptor. */
8390 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
8391 htole32(vl_len);
8392 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
8393 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
8394 htole32(cmdc);
8395 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
8396 htole32(mssidx);
8397 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
8398 DPRINTF(sc, WM_DEBUG_TX,
8399 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
8400 txq->txq_next, 0, vl_len));
8401 DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
8402 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
8403 txs->txs_ndesc++;
8404 }
8405
8406 /*
8407 * wm_nq_start: [ifnet interface function]
8408 *
8409 * Start packet transmission on the interface for NEWQUEUE devices
8410 */
8411 static void
8412 wm_nq_start(struct ifnet *ifp)
8413 {
8414 struct wm_softc *sc = ifp->if_softc;
8415 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8416
8417 #ifdef WM_MPSAFE
8418 KASSERT(if_is_mpsafe(ifp));
8419 #endif
8420 /*
8421 * if_obytes and if_omcasts are added in if_transmit()@if.c.
8422 */
8423
8424 mutex_enter(txq->txq_lock);
8425 if (!txq->txq_stopping)
8426 wm_nq_start_locked(ifp);
8427 mutex_exit(txq->txq_lock);
8428 }
8429
8430 static void
8431 wm_nq_start_locked(struct ifnet *ifp)
8432 {
8433 struct wm_softc *sc = ifp->if_softc;
8434 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8435
8436 wm_nq_send_common_locked(ifp, txq, false);
8437 }
8438
8439 static int
8440 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
8441 {
8442 int qid;
8443 struct wm_softc *sc = ifp->if_softc;
8444 struct wm_txqueue *txq;
8445
8446 qid = wm_select_txqueue(ifp, m);
8447 txq = &sc->sc_queue[qid].wmq_txq;
8448
8449 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
8450 m_freem(m);
8451 WM_Q_EVCNT_INCR(txq, pcqdrop);
8452 return ENOBUFS;
8453 }
8454
8455 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
8456 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
8457 if (m->m_flags & M_MCAST)
8458 if_statinc_ref(nsr, if_omcasts);
8459 IF_STAT_PUTREF(ifp);
8460
8461 /*
8462 * The situations which this mutex_tryenter() fails at running time
8463 * are below two patterns.
8464 * (1) contention with interrupt handler(wm_txrxintr_msix())
8465 * (2) contention with deferred if_start softint(wm_handle_queue())
8466 * In the case of (1), the last packet enqueued to txq->txq_interq is
8467 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
8468 * In the case of (2), the last packet enqueued to txq->txq_interq is
8469 * also dequeued by wm_deferred_start_locked(). So, it does not get
8470 * stuck, either.
8471 */
8472 if (mutex_tryenter(txq->txq_lock)) {
8473 if (!txq->txq_stopping)
8474 wm_nq_transmit_locked(ifp, txq);
8475 mutex_exit(txq->txq_lock);
8476 }
8477
8478 return 0;
8479 }
8480
8481 static void
8482 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
8483 {
8484
8485 wm_nq_send_common_locked(ifp, txq, true);
8486 }
8487
8488 static void
8489 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
8490 bool is_transmit)
8491 {
8492 struct wm_softc *sc = ifp->if_softc;
8493 struct mbuf *m0;
8494 struct wm_txsoft *txs;
8495 bus_dmamap_t dmamap;
8496 int error, nexttx, lasttx = -1, seg, segs_needed;
8497 bool do_csum, sent;
8498 bool remap = true;
8499
8500 KASSERT(mutex_owned(txq->txq_lock));
8501
8502 if ((ifp->if_flags & IFF_RUNNING) == 0)
8503 return;
8504 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
8505 return;
8506
8507 if (__predict_false(wm_linkdown_discard(txq))) {
8508 do {
8509 if (is_transmit)
8510 m0 = pcq_get(txq->txq_interq);
8511 else
8512 IFQ_DEQUEUE(&ifp->if_snd, m0);
8513 /*
8514 * increment successed packet counter as in the case
8515 * which the packet is discarded by link down PHY.
8516 */
8517 if (m0 != NULL)
8518 if_statinc(ifp, if_opackets);
8519 m_freem(m0);
8520 } while (m0 != NULL);
8521 return;
8522 }
8523
8524 sent = false;
8525
8526 /*
8527 * Loop through the send queue, setting up transmit descriptors
8528 * until we drain the queue, or use up all available transmit
8529 * descriptors.
8530 */
8531 for (;;) {
8532 m0 = NULL;
8533
8534 /* Get a work queue entry. */
8535 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
8536 wm_txeof(txq, UINT_MAX);
8537 if (txq->txq_sfree == 0) {
8538 DPRINTF(sc, WM_DEBUG_TX,
8539 ("%s: TX: no free job descriptors\n",
8540 device_xname(sc->sc_dev)));
8541 WM_Q_EVCNT_INCR(txq, txsstall);
8542 break;
8543 }
8544 }
8545
8546 /* Grab a packet off the queue. */
8547 if (is_transmit)
8548 m0 = pcq_get(txq->txq_interq);
8549 else
8550 IFQ_DEQUEUE(&ifp->if_snd, m0);
8551 if (m0 == NULL)
8552 break;
8553
8554 DPRINTF(sc, WM_DEBUG_TX,
8555 ("%s: TX: have packet to transmit: %p\n",
8556 device_xname(sc->sc_dev), m0));
8557
8558 txs = &txq->txq_soft[txq->txq_snext];
8559 dmamap = txs->txs_dmamap;
8560
8561 /*
8562 * Load the DMA map. If this fails, the packet either
8563 * didn't fit in the allotted number of segments, or we
8564 * were short on resources. For the too-many-segments
8565 * case, we simply report an error and drop the packet,
8566 * since we can't sanely copy a jumbo packet to a single
8567 * buffer.
8568 */
8569 retry:
8570 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
8571 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
8572 if (__predict_false(error)) {
8573 if (error == EFBIG) {
8574 if (remap == true) {
8575 struct mbuf *m;
8576
8577 remap = false;
8578 m = m_defrag(m0, M_NOWAIT);
8579 if (m != NULL) {
8580 WM_Q_EVCNT_INCR(txq, defrag);
8581 m0 = m;
8582 goto retry;
8583 }
8584 }
8585 WM_Q_EVCNT_INCR(txq, toomanyseg);
8586 log(LOG_ERR, "%s: Tx packet consumes too many "
8587 "DMA segments, dropping...\n",
8588 device_xname(sc->sc_dev));
8589 wm_dump_mbuf_chain(sc, m0);
8590 m_freem(m0);
8591 continue;
8592 }
8593 /* Short on resources, just stop for now. */
8594 DPRINTF(sc, WM_DEBUG_TX,
8595 ("%s: TX: dmamap load failed: %d\n",
8596 device_xname(sc->sc_dev), error));
8597 break;
8598 }
8599
8600 segs_needed = dmamap->dm_nsegs;
8601
8602 /*
8603 * Ensure we have enough descriptors free to describe
8604 * the packet. Note, we always reserve one descriptor
8605 * at the end of the ring due to the semantics of the
8606 * TDT register, plus one more in the event we need
8607 * to load offload context.
8608 */
8609 if (segs_needed > txq->txq_free - 2) {
8610 /*
8611 * Not enough free descriptors to transmit this
8612 * packet. We haven't committed anything yet,
8613 * so just unload the DMA map, put the packet
8614 * pack on the queue, and punt. Notify the upper
8615 * layer that there are no more slots left.
8616 */
8617 DPRINTF(sc, WM_DEBUG_TX,
8618 ("%s: TX: need %d (%d) descriptors, have %d\n",
8619 device_xname(sc->sc_dev), dmamap->dm_nsegs,
8620 segs_needed, txq->txq_free - 1));
8621 txq->txq_flags |= WM_TXQ_NO_SPACE;
8622 bus_dmamap_unload(sc->sc_dmat, dmamap);
8623 WM_Q_EVCNT_INCR(txq, txdstall);
8624 break;
8625 }
8626
8627 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
8628
8629 DPRINTF(sc, WM_DEBUG_TX,
8630 ("%s: TX: packet has %d (%d) DMA segments\n",
8631 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
8632
8633 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
8634
8635 /*
8636 * Store a pointer to the packet so that we can free it
8637 * later.
8638 *
8639 * Initially, we consider the number of descriptors the
8640 * packet uses the number of DMA segments. This may be
8641 * incremented by 1 if we do checksum offload (a descriptor
8642 * is used to set the checksum context).
8643 */
8644 txs->txs_mbuf = m0;
8645 txs->txs_firstdesc = txq->txq_next;
8646 txs->txs_ndesc = segs_needed;
8647
8648 /* Set up offload parameters for this packet. */
8649 uint32_t cmdlen, fields, dcmdlen;
8650 if (m0->m_pkthdr.csum_flags &
8651 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
8652 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
8653 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
8654 wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
8655 &do_csum);
8656 } else {
8657 do_csum = false;
8658 cmdlen = 0;
8659 fields = 0;
8660 }
8661
8662 /* Sync the DMA map. */
8663 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
8664 BUS_DMASYNC_PREWRITE);
8665
8666 /* Initialize the first transmit descriptor. */
8667 nexttx = txq->txq_next;
8668 if (!do_csum) {
8669 /* Setup a legacy descriptor */
8670 wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
8671 dmamap->dm_segs[0].ds_addr);
8672 txq->txq_descs[nexttx].wtx_cmdlen =
8673 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
8674 txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
8675 txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
8676 if (vlan_has_tag(m0)) {
8677 txq->txq_descs[nexttx].wtx_cmdlen |=
8678 htole32(WTX_CMD_VLE);
8679 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
8680 htole16(vlan_get_tag(m0));
8681 } else
8682 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
8683
8684 dcmdlen = 0;
8685 } else {
8686 /* Setup an advanced data descriptor */
8687 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
8688 htole64(dmamap->dm_segs[0].ds_addr);
8689 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
8690 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
8691 htole32(dmamap->dm_segs[0].ds_len | cmdlen);
8692 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
8693 htole32(fields);
8694 DPRINTF(sc, WM_DEBUG_TX,
8695 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
8696 device_xname(sc->sc_dev), nexttx,
8697 (uint64_t)dmamap->dm_segs[0].ds_addr));
8698 DPRINTF(sc, WM_DEBUG_TX,
8699 ("\t 0x%08x%08x\n", fields,
8700 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
8701 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
8702 }
8703
8704 lasttx = nexttx;
8705 nexttx = WM_NEXTTX(txq, nexttx);
8706 /*
8707 * Fill in the next descriptors. legacy or advanced format
8708 * is the same here
8709 */
8710 for (seg = 1; seg < dmamap->dm_nsegs;
8711 seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
8712 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
8713 htole64(dmamap->dm_segs[seg].ds_addr);
8714 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
8715 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
8716 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
8717 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
8718 lasttx = nexttx;
8719
8720 DPRINTF(sc, WM_DEBUG_TX,
8721 ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
8722 device_xname(sc->sc_dev), nexttx,
8723 (uint64_t)dmamap->dm_segs[seg].ds_addr,
8724 dmamap->dm_segs[seg].ds_len));
8725 }
8726
8727 KASSERT(lasttx != -1);
8728
8729 /*
8730 * Set up the command byte on the last descriptor of
8731 * the packet. If we're in the interrupt delay window,
8732 * delay the interrupt.
8733 */
8734 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
8735 (NQTX_CMD_EOP | NQTX_CMD_RS));
8736 txq->txq_descs[lasttx].wtx_cmdlen |=
8737 htole32(WTX_CMD_EOP | WTX_CMD_RS);
8738
8739 txs->txs_lastdesc = lasttx;
8740
8741 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
8742 device_xname(sc->sc_dev),
8743 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
8744
8745 /* Sync the descriptors we're using. */
8746 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
8747 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
8748
8749 /* Give the packet to the chip. */
8750 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
8751 sent = true;
8752
8753 DPRINTF(sc, WM_DEBUG_TX,
8754 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
8755
8756 DPRINTF(sc, WM_DEBUG_TX,
8757 ("%s: TX: finished transmitting packet, job %d\n",
8758 device_xname(sc->sc_dev), txq->txq_snext));
8759
8760 /* Advance the tx pointer. */
8761 txq->txq_free -= txs->txs_ndesc;
8762 txq->txq_next = nexttx;
8763
8764 txq->txq_sfree--;
8765 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
8766
8767 /* Pass the packet to any BPF listeners. */
8768 bpf_mtap(ifp, m0, BPF_D_OUT);
8769 }
8770
8771 if (m0 != NULL) {
8772 txq->txq_flags |= WM_TXQ_NO_SPACE;
8773 WM_Q_EVCNT_INCR(txq, descdrop);
8774 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
8775 __func__));
8776 m_freem(m0);
8777 }
8778
8779 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
8780 /* No more slots; notify upper layer. */
8781 txq->txq_flags |= WM_TXQ_NO_SPACE;
8782 }
8783
8784 if (sent) {
8785 /* Set a watchdog timer in case the chip flakes out. */
8786 txq->txq_lastsent = time_uptime;
8787 txq->txq_sending = true;
8788 }
8789 }
8790
8791 static void
8792 wm_deferred_start_locked(struct wm_txqueue *txq)
8793 {
8794 struct wm_softc *sc = txq->txq_sc;
8795 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8796 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
8797 int qid = wmq->wmq_id;
8798
8799 KASSERT(mutex_owned(txq->txq_lock));
8800
8801 if (txq->txq_stopping) {
8802 mutex_exit(txq->txq_lock);
8803 return;
8804 }
8805
8806 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
8807 /* XXX need for ALTQ or one CPU system */
8808 if (qid == 0)
8809 wm_nq_start_locked(ifp);
8810 wm_nq_transmit_locked(ifp, txq);
8811 } else {
8812 /* XXX need for ALTQ or one CPU system */
8813 if (qid == 0)
8814 wm_start_locked(ifp);
8815 wm_transmit_locked(ifp, txq);
8816 }
8817 }
8818
8819 /* Interrupt */
8820
8821 /*
8822 * wm_txeof:
8823 *
8824 * Helper; handle transmit interrupts.
8825 */
8826 static bool
8827 wm_txeof(struct wm_txqueue *txq, u_int limit)
8828 {
8829 struct wm_softc *sc = txq->txq_sc;
8830 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8831 struct wm_txsoft *txs;
8832 int count = 0;
8833 int i;
8834 uint8_t status;
8835 bool more = false;
8836
8837 KASSERT(mutex_owned(txq->txq_lock));
8838
8839 if (txq->txq_stopping)
8840 return false;
8841
8842 txq->txq_flags &= ~WM_TXQ_NO_SPACE;
8843
8844 /*
8845 * Go through the Tx list and free mbufs for those
8846 * frames which have been transmitted.
8847 */
8848 for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
8849 i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
8850 if (limit-- == 0) {
8851 more = true;
8852 DPRINTF(sc, WM_DEBUG_TX,
8853 ("%s: TX: loop limited, job %d is not processed\n",
8854 device_xname(sc->sc_dev), i));
8855 break;
8856 }
8857
8858 txs = &txq->txq_soft[i];
8859
8860 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
8861 device_xname(sc->sc_dev), i));
8862
8863 wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
8864 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
8865
8866 status =
8867 txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
8868 if ((status & WTX_ST_DD) == 0) {
8869 wm_cdtxsync(txq, txs->txs_lastdesc, 1,
8870 BUS_DMASYNC_PREREAD);
8871 break;
8872 }
8873
8874 count++;
8875 DPRINTF(sc, WM_DEBUG_TX,
8876 ("%s: TX: job %d done: descs %d..%d\n",
8877 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
8878 txs->txs_lastdesc));
8879
8880 /*
8881 * XXX We should probably be using the statistics
8882 * XXX registers, but I don't know if they exist
8883 * XXX on chips before the i82544.
8884 */
8885
8886 #ifdef WM_EVENT_COUNTERS
8887 if (status & WTX_ST_TU)
8888 WM_Q_EVCNT_INCR(txq, underrun);
8889 #endif /* WM_EVENT_COUNTERS */
8890
8891 /*
8892 * 82574 and newer's document says the status field has neither
8893 * EC (Excessive Collision) bit nor LC (Late Collision) bit
8894 * (reserved). Refer "PCIe GbE Controller Open Source Software
8895 * Developer's Manual", 82574 datasheet and newer.
8896 *
8897 * XXX I saw the LC bit was set on I218 even though the media
8898 * was full duplex, so the bit might be used for other
8899 * meaning ...(I have no document).
8900 */
8901
8902 if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
8903 && ((sc->sc_type < WM_T_82574)
8904 || (sc->sc_type == WM_T_80003))) {
8905 if_statinc(ifp, if_oerrors);
8906 if (status & WTX_ST_LC)
8907 log(LOG_WARNING, "%s: late collision\n",
8908 device_xname(sc->sc_dev));
8909 else if (status & WTX_ST_EC) {
8910 if_statadd(ifp, if_collisions,
8911 TX_COLLISION_THRESHOLD + 1);
8912 log(LOG_WARNING, "%s: excessive collisions\n",
8913 device_xname(sc->sc_dev));
8914 }
8915 } else
8916 if_statinc(ifp, if_opackets);
8917
8918 txq->txq_packets++;
8919 txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
8920
8921 txq->txq_free += txs->txs_ndesc;
8922 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
8923 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
8924 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
8925 m_freem(txs->txs_mbuf);
8926 txs->txs_mbuf = NULL;
8927 }
8928
8929 /* Update the dirty transmit buffer pointer. */
8930 txq->txq_sdirty = i;
8931 DPRINTF(sc, WM_DEBUG_TX,
8932 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
8933
8934 if (count != 0)
8935 rnd_add_uint32(&sc->rnd_source, count);
8936
8937 /*
8938 * If there are no more pending transmissions, cancel the watchdog
8939 * timer.
8940 */
8941 if (txq->txq_sfree == WM_TXQUEUELEN(txq))
8942 txq->txq_sending = false;
8943
8944 return more;
8945 }
8946
8947 static inline uint32_t
8948 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
8949 {
8950 struct wm_softc *sc = rxq->rxq_sc;
8951
8952 if (sc->sc_type == WM_T_82574)
8953 return EXTRXC_STATUS(
8954 le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
8955 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8956 return NQRXC_STATUS(
8957 le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
8958 else
8959 return rxq->rxq_descs[idx].wrx_status;
8960 }
8961
8962 static inline uint32_t
8963 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
8964 {
8965 struct wm_softc *sc = rxq->rxq_sc;
8966
8967 if (sc->sc_type == WM_T_82574)
8968 return EXTRXC_ERROR(
8969 le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
8970 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8971 return NQRXC_ERROR(
8972 le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
8973 else
8974 return rxq->rxq_descs[idx].wrx_errors;
8975 }
8976
8977 static inline uint16_t
8978 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
8979 {
8980 struct wm_softc *sc = rxq->rxq_sc;
8981
8982 if (sc->sc_type == WM_T_82574)
8983 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
8984 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8985 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
8986 else
8987 return rxq->rxq_descs[idx].wrx_special;
8988 }
8989
8990 static inline int
8991 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
8992 {
8993 struct wm_softc *sc = rxq->rxq_sc;
8994
8995 if (sc->sc_type == WM_T_82574)
8996 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
8997 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8998 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
8999 else
9000 return rxq->rxq_descs[idx].wrx_len;
9001 }
9002
9003 #ifdef WM_DEBUG
9004 static inline uint32_t
9005 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
9006 {
9007 struct wm_softc *sc = rxq->rxq_sc;
9008
9009 if (sc->sc_type == WM_T_82574)
9010 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
9011 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9012 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
9013 else
9014 return 0;
9015 }
9016
9017 static inline uint8_t
9018 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
9019 {
9020 struct wm_softc *sc = rxq->rxq_sc;
9021
9022 if (sc->sc_type == WM_T_82574)
9023 return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
9024 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9025 return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
9026 else
9027 return 0;
9028 }
9029 #endif /* WM_DEBUG */
9030
9031 static inline bool
9032 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
9033 uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
9034 {
9035
9036 if (sc->sc_type == WM_T_82574)
9037 return (status & ext_bit) != 0;
9038 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9039 return (status & nq_bit) != 0;
9040 else
9041 return (status & legacy_bit) != 0;
9042 }
9043
9044 static inline bool
9045 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
9046 uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
9047 {
9048
9049 if (sc->sc_type == WM_T_82574)
9050 return (error & ext_bit) != 0;
9051 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9052 return (error & nq_bit) != 0;
9053 else
9054 return (error & legacy_bit) != 0;
9055 }
9056
9057 static inline bool
9058 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
9059 {
9060
9061 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
9062 WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
9063 return true;
9064 else
9065 return false;
9066 }
9067
9068 static inline bool
9069 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
9070 {
9071 struct wm_softc *sc = rxq->rxq_sc;
9072
9073 /* XXX missing error bit for newqueue? */
9074 if (wm_rxdesc_is_set_error(sc, errors,
9075 WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
9076 EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
9077 | EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
9078 NQRXC_ERROR_RXE)) {
9079 if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
9080 EXTRXC_ERROR_SE, 0))
9081 log(LOG_WARNING, "%s: symbol error\n",
9082 device_xname(sc->sc_dev));
9083 else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
9084 EXTRXC_ERROR_SEQ, 0))
9085 log(LOG_WARNING, "%s: receive sequence error\n",
9086 device_xname(sc->sc_dev));
9087 else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
9088 EXTRXC_ERROR_CE, 0))
9089 log(LOG_WARNING, "%s: CRC error\n",
9090 device_xname(sc->sc_dev));
9091 return true;
9092 }
9093
9094 return false;
9095 }
9096
9097 static inline bool
9098 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
9099 {
9100 struct wm_softc *sc = rxq->rxq_sc;
9101
9102 if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
9103 NQRXC_STATUS_DD)) {
9104 /* We have processed all of the receive descriptors. */
9105 wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
9106 return false;
9107 }
9108
9109 return true;
9110 }
9111
9112 static inline bool
9113 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
9114 uint16_t vlantag, struct mbuf *m)
9115 {
9116
9117 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
9118 WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
9119 vlan_set_tag(m, le16toh(vlantag));
9120 }
9121
9122 return true;
9123 }
9124
9125 static inline void
9126 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
9127 uint32_t errors, struct mbuf *m)
9128 {
9129 struct wm_softc *sc = rxq->rxq_sc;
9130
9131 if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
9132 if (wm_rxdesc_is_set_status(sc, status,
9133 WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
9134 WM_Q_EVCNT_INCR(rxq, ipsum);
9135 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
9136 if (wm_rxdesc_is_set_error(sc, errors,
9137 WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
9138 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
9139 }
9140 if (wm_rxdesc_is_set_status(sc, status,
9141 WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
9142 /*
9143 * Note: we don't know if this was TCP or UDP,
9144 * so we just set both bits, and expect the
9145 * upper layers to deal.
9146 */
9147 WM_Q_EVCNT_INCR(rxq, tusum);
9148 m->m_pkthdr.csum_flags |=
9149 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
9150 M_CSUM_TCPv6 | M_CSUM_UDPv6;
9151 if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
9152 EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
9153 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
9154 }
9155 }
9156 }
9157
9158 /*
9159 * wm_rxeof:
9160 *
9161 * Helper; handle receive interrupts.
9162 */
9163 static bool
9164 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
9165 {
9166 struct wm_softc *sc = rxq->rxq_sc;
9167 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9168 struct wm_rxsoft *rxs;
9169 struct mbuf *m;
9170 int i, len;
9171 int count = 0;
9172 uint32_t status, errors;
9173 uint16_t vlantag;
9174 bool more = false;
9175
9176 KASSERT(mutex_owned(rxq->rxq_lock));
9177
9178 for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
9179 if (limit-- == 0) {
9180 more = true;
9181 DPRINTF(sc, WM_DEBUG_RX,
9182 ("%s: RX: loop limited, descriptor %d is not processed\n",
9183 device_xname(sc->sc_dev), i));
9184 break;
9185 }
9186
9187 rxs = &rxq->rxq_soft[i];
9188
9189 DPRINTF(sc, WM_DEBUG_RX,
9190 ("%s: RX: checking descriptor %d\n",
9191 device_xname(sc->sc_dev), i));
9192 wm_cdrxsync(rxq, i,
9193 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
9194
9195 status = wm_rxdesc_get_status(rxq, i);
9196 errors = wm_rxdesc_get_errors(rxq, i);
9197 len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
9198 vlantag = wm_rxdesc_get_vlantag(rxq, i);
9199 #ifdef WM_DEBUG
9200 uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
9201 uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
9202 #endif
9203
9204 if (!wm_rxdesc_dd(rxq, i, status)) {
9205 break;
9206 }
9207
9208 count++;
9209 if (__predict_false(rxq->rxq_discard)) {
9210 DPRINTF(sc, WM_DEBUG_RX,
9211 ("%s: RX: discarding contents of descriptor %d\n",
9212 device_xname(sc->sc_dev), i));
9213 wm_init_rxdesc(rxq, i);
9214 if (wm_rxdesc_is_eop(rxq, status)) {
9215 /* Reset our state. */
9216 DPRINTF(sc, WM_DEBUG_RX,
9217 ("%s: RX: resetting rxdiscard -> 0\n",
9218 device_xname(sc->sc_dev)));
9219 rxq->rxq_discard = 0;
9220 }
9221 continue;
9222 }
9223
9224 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
9225 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
9226
9227 m = rxs->rxs_mbuf;
9228
9229 /*
9230 * Add a new receive buffer to the ring, unless of
9231 * course the length is zero. Treat the latter as a
9232 * failed mapping.
9233 */
9234 if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
9235 /*
9236 * Failed, throw away what we've done so
9237 * far, and discard the rest of the packet.
9238 */
9239 if_statinc(ifp, if_ierrors);
9240 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
9241 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
9242 wm_init_rxdesc(rxq, i);
9243 if (!wm_rxdesc_is_eop(rxq, status))
9244 rxq->rxq_discard = 1;
9245 if (rxq->rxq_head != NULL)
9246 m_freem(rxq->rxq_head);
9247 WM_RXCHAIN_RESET(rxq);
9248 DPRINTF(sc, WM_DEBUG_RX,
9249 ("%s: RX: Rx buffer allocation failed, "
9250 "dropping packet%s\n", device_xname(sc->sc_dev),
9251 rxq->rxq_discard ? " (discard)" : ""));
9252 continue;
9253 }
9254
9255 m->m_len = len;
9256 rxq->rxq_len += len;
9257 DPRINTF(sc, WM_DEBUG_RX,
9258 ("%s: RX: buffer at %p len %d\n",
9259 device_xname(sc->sc_dev), m->m_data, len));
9260
9261 /* If this is not the end of the packet, keep looking. */
9262 if (!wm_rxdesc_is_eop(rxq, status)) {
9263 WM_RXCHAIN_LINK(rxq, m);
9264 DPRINTF(sc, WM_DEBUG_RX,
9265 ("%s: RX: not yet EOP, rxlen -> %d\n",
9266 device_xname(sc->sc_dev), rxq->rxq_len));
9267 continue;
9268 }
9269
9270 /*
9271 * Okay, we have the entire packet now. The chip is
9272 * configured to include the FCS except I35[04], I21[01].
9273 * (not all chips can be configured to strip it), so we need
9274 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
9275 * in RCTL register is always set, so we don't trim it.
9276 * PCH2 and newer chip also not include FCS when jumbo
9277 * frame is used to do workaround an errata.
9278 * May need to adjust length of previous mbuf in the
9279 * chain if the current mbuf is too short.
9280 */
9281 if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
9282 if (m->m_len < ETHER_CRC_LEN) {
9283 rxq->rxq_tail->m_len
9284 -= (ETHER_CRC_LEN - m->m_len);
9285 m->m_len = 0;
9286 } else
9287 m->m_len -= ETHER_CRC_LEN;
9288 len = rxq->rxq_len - ETHER_CRC_LEN;
9289 } else
9290 len = rxq->rxq_len;
9291
9292 WM_RXCHAIN_LINK(rxq, m);
9293
9294 *rxq->rxq_tailp = NULL;
9295 m = rxq->rxq_head;
9296
9297 WM_RXCHAIN_RESET(rxq);
9298
9299 DPRINTF(sc, WM_DEBUG_RX,
9300 ("%s: RX: have entire packet, len -> %d\n",
9301 device_xname(sc->sc_dev), len));
9302
9303 /* If an error occurred, update stats and drop the packet. */
9304 if (wm_rxdesc_has_errors(rxq, errors)) {
9305 m_freem(m);
9306 continue;
9307 }
9308
9309 /* No errors. Receive the packet. */
9310 m_set_rcvif(m, ifp);
9311 m->m_pkthdr.len = len;
9312 /*
9313 * TODO
9314 * should be save rsshash and rsstype to this mbuf.
9315 */
9316 DPRINTF(sc, WM_DEBUG_RX,
9317 ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
9318 device_xname(sc->sc_dev), rsstype, rsshash));
9319
9320 /*
9321 * If VLANs are enabled, VLAN packets have been unwrapped
9322 * for us. Associate the tag with the packet.
9323 */
9324 if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
9325 continue;
9326
9327 /* Set up checksum info for this packet. */
9328 wm_rxdesc_ensure_checksum(rxq, status, errors, m);
9329
9330 rxq->rxq_packets++;
9331 rxq->rxq_bytes += len;
9332 /* Pass it on. */
9333 if_percpuq_enqueue(sc->sc_ipq, m);
9334
9335 if (rxq->rxq_stopping)
9336 break;
9337 }
9338 rxq->rxq_ptr = i;
9339
9340 if (count != 0)
9341 rnd_add_uint32(&sc->rnd_source, count);
9342
9343 DPRINTF(sc, WM_DEBUG_RX,
9344 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
9345
9346 return more;
9347 }
9348
9349 /*
9350 * wm_linkintr_gmii:
9351 *
9352 * Helper; handle link interrupts for GMII.
9353 */
9354 static void
9355 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
9356 {
9357 device_t dev = sc->sc_dev;
9358 uint32_t status, reg;
9359 bool link;
9360 int rv;
9361
9362 KASSERT(WM_CORE_LOCKED(sc));
9363
9364 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
9365 __func__));
9366
9367 if ((icr & ICR_LSC) == 0) {
9368 if (icr & ICR_RXSEQ)
9369 DPRINTF(sc, WM_DEBUG_LINK,
9370 ("%s: LINK Receive sequence error\n",
9371 device_xname(dev)));
9372 return;
9373 }
9374
9375 /* Link status changed */
9376 status = CSR_READ(sc, WMREG_STATUS);
9377 link = status & STATUS_LU;
9378 if (link) {
9379 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
9380 device_xname(dev),
9381 (status & STATUS_FD) ? "FDX" : "HDX"));
9382 if (wm_phy_need_linkdown_discard(sc))
9383 wm_clear_linkdown_discard(sc);
9384 } else {
9385 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9386 device_xname(dev)));
9387 if (wm_phy_need_linkdown_discard(sc))
9388 wm_set_linkdown_discard(sc);
9389 }
9390 if ((sc->sc_type == WM_T_ICH8) && (link == false))
9391 wm_gig_downshift_workaround_ich8lan(sc);
9392
9393 if ((sc->sc_type == WM_T_ICH8)
9394 && (sc->sc_phytype == WMPHY_IGP_3)) {
9395 wm_kmrn_lock_loss_workaround_ich8lan(sc);
9396 }
9397 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
9398 device_xname(dev)));
9399 mii_pollstat(&sc->sc_mii);
9400 if (sc->sc_type == WM_T_82543) {
9401 int miistatus, active;
9402
9403 /*
9404 * With 82543, we need to force speed and
9405 * duplex on the MAC equal to what the PHY
9406 * speed and duplex configuration is.
9407 */
9408 miistatus = sc->sc_mii.mii_media_status;
9409
9410 if (miistatus & IFM_ACTIVE) {
9411 active = sc->sc_mii.mii_media_active;
9412 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
9413 switch (IFM_SUBTYPE(active)) {
9414 case IFM_10_T:
9415 sc->sc_ctrl |= CTRL_SPEED_10;
9416 break;
9417 case IFM_100_TX:
9418 sc->sc_ctrl |= CTRL_SPEED_100;
9419 break;
9420 case IFM_1000_T:
9421 sc->sc_ctrl |= CTRL_SPEED_1000;
9422 break;
9423 default:
9424 /*
9425 * Fiber?
9426 * Shoud not enter here.
9427 */
9428 device_printf(dev, "unknown media (%x)\n",
9429 active);
9430 break;
9431 }
9432 if (active & IFM_FDX)
9433 sc->sc_ctrl |= CTRL_FD;
9434 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9435 }
9436 } else if (sc->sc_type == WM_T_PCH) {
9437 wm_k1_gig_workaround_hv(sc,
9438 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
9439 }
9440
9441 /*
9442 * When connected at 10Mbps half-duplex, some parts are excessively
9443 * aggressive resulting in many collisions. To avoid this, increase
9444 * the IPG and reduce Rx latency in the PHY.
9445 */
9446 if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
9447 && link) {
9448 uint32_t tipg_reg;
9449 uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
9450 bool fdx;
9451 uint16_t emi_addr, emi_val;
9452
9453 tipg_reg = CSR_READ(sc, WMREG_TIPG);
9454 tipg_reg &= ~TIPG_IPGT_MASK;
9455 fdx = status & STATUS_FD;
9456
9457 if (!fdx && (speed == STATUS_SPEED_10)) {
9458 tipg_reg |= 0xff;
9459 /* Reduce Rx latency in analog PHY */
9460 emi_val = 0;
9461 } else if ((sc->sc_type >= WM_T_PCH_SPT) &&
9462 fdx && speed != STATUS_SPEED_1000) {
9463 tipg_reg |= 0xc;
9464 emi_val = 1;
9465 } else {
9466 /* Roll back the default values */
9467 tipg_reg |= 0x08;
9468 emi_val = 1;
9469 }
9470
9471 CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
9472
9473 rv = sc->phy.acquire(sc);
9474 if (rv)
9475 return;
9476
9477 if (sc->sc_type == WM_T_PCH2)
9478 emi_addr = I82579_RX_CONFIG;
9479 else
9480 emi_addr = I217_RX_CONFIG;
9481 rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
9482
9483 if (sc->sc_type >= WM_T_PCH_LPT) {
9484 uint16_t phy_reg;
9485
9486 sc->phy.readreg_locked(dev, 2,
9487 I217_PLL_CLOCK_GATE_REG, &phy_reg);
9488 phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
9489 if (speed == STATUS_SPEED_100
9490 || speed == STATUS_SPEED_10)
9491 phy_reg |= 0x3e8;
9492 else
9493 phy_reg |= 0xfa;
9494 sc->phy.writereg_locked(dev, 2,
9495 I217_PLL_CLOCK_GATE_REG, phy_reg);
9496
9497 if (speed == STATUS_SPEED_1000) {
9498 sc->phy.readreg_locked(dev, 2,
9499 HV_PM_CTRL, &phy_reg);
9500
9501 phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
9502
9503 sc->phy.writereg_locked(dev, 2,
9504 HV_PM_CTRL, phy_reg);
9505 }
9506 }
9507 sc->phy.release(sc);
9508
9509 if (rv)
9510 return;
9511
9512 if (sc->sc_type >= WM_T_PCH_SPT) {
9513 uint16_t data, ptr_gap;
9514
9515 if (speed == STATUS_SPEED_1000) {
9516 rv = sc->phy.acquire(sc);
9517 if (rv)
9518 return;
9519
9520 rv = sc->phy.readreg_locked(dev, 2,
9521 I82579_UNKNOWN1, &data);
9522 if (rv) {
9523 sc->phy.release(sc);
9524 return;
9525 }
9526
9527 ptr_gap = (data & (0x3ff << 2)) >> 2;
9528 if (ptr_gap < 0x18) {
9529 data &= ~(0x3ff << 2);
9530 data |= (0x18 << 2);
9531 rv = sc->phy.writereg_locked(dev,
9532 2, I82579_UNKNOWN1, data);
9533 }
9534 sc->phy.release(sc);
9535 if (rv)
9536 return;
9537 } else {
9538 rv = sc->phy.acquire(sc);
9539 if (rv)
9540 return;
9541
9542 rv = sc->phy.writereg_locked(dev, 2,
9543 I82579_UNKNOWN1, 0xc023);
9544 sc->phy.release(sc);
9545 if (rv)
9546 return;
9547
9548 }
9549 }
9550 }
9551
9552 /*
9553 * I217 Packet Loss issue:
9554 * ensure that FEXTNVM4 Beacon Duration is set correctly
9555 * on power up.
9556 * Set the Beacon Duration for I217 to 8 usec
9557 */
9558 if (sc->sc_type >= WM_T_PCH_LPT) {
9559 reg = CSR_READ(sc, WMREG_FEXTNVM4);
9560 reg &= ~FEXTNVM4_BEACON_DURATION;
9561 reg |= FEXTNVM4_BEACON_DURATION_8US;
9562 CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
9563 }
9564
9565 /* Work-around I218 hang issue */
9566 if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
9567 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
9568 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
9569 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
9570 wm_k1_workaround_lpt_lp(sc, link);
9571
9572 if (sc->sc_type >= WM_T_PCH_LPT) {
9573 /*
9574 * Set platform power management values for Latency
9575 * Tolerance Reporting (LTR)
9576 */
9577 wm_platform_pm_pch_lpt(sc,
9578 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
9579 }
9580
9581 /* Clear link partner's EEE ability */
9582 sc->eee_lp_ability = 0;
9583
9584 /* FEXTNVM6 K1-off workaround */
9585 if (sc->sc_type == WM_T_PCH_SPT) {
9586 reg = CSR_READ(sc, WMREG_FEXTNVM6);
9587 if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
9588 reg |= FEXTNVM6_K1_OFF_ENABLE;
9589 else
9590 reg &= ~FEXTNVM6_K1_OFF_ENABLE;
9591 CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
9592 }
9593
9594 if (!link)
9595 return;
9596
9597 switch (sc->sc_type) {
9598 case WM_T_PCH2:
9599 wm_k1_workaround_lv(sc);
9600 /* FALLTHROUGH */
9601 case WM_T_PCH:
9602 if (sc->sc_phytype == WMPHY_82578)
9603 wm_link_stall_workaround_hv(sc);
9604 break;
9605 default:
9606 break;
9607 }
9608
9609 /* Enable/Disable EEE after link up */
9610 if (sc->sc_phytype > WMPHY_82579)
9611 wm_set_eee_pchlan(sc);
9612 }
9613
9614 /*
9615 * wm_linkintr_tbi:
9616 *
9617 * Helper; handle link interrupts for TBI mode.
9618 */
9619 static void
9620 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
9621 {
9622 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9623 uint32_t status;
9624
9625 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
9626 __func__));
9627
9628 status = CSR_READ(sc, WMREG_STATUS);
9629 if (icr & ICR_LSC) {
9630 wm_check_for_link(sc);
9631 if (status & STATUS_LU) {
9632 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
9633 device_xname(sc->sc_dev),
9634 (status & STATUS_FD) ? "FDX" : "HDX"));
9635 /*
9636 * NOTE: CTRL will update TFCE and RFCE automatically,
9637 * so we should update sc->sc_ctrl
9638 */
9639
9640 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
9641 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9642 sc->sc_fcrtl &= ~FCRTL_XONE;
9643 if (status & STATUS_FD)
9644 sc->sc_tctl |=
9645 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9646 else
9647 sc->sc_tctl |=
9648 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9649 if (sc->sc_ctrl & CTRL_TFCE)
9650 sc->sc_fcrtl |= FCRTL_XONE;
9651 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9652 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
9653 WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
9654 sc->sc_tbi_linkup = 1;
9655 if_link_state_change(ifp, LINK_STATE_UP);
9656 } else {
9657 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9658 device_xname(sc->sc_dev)));
9659 sc->sc_tbi_linkup = 0;
9660 if_link_state_change(ifp, LINK_STATE_DOWN);
9661 }
9662 /* Update LED */
9663 wm_tbi_serdes_set_linkled(sc);
9664 } else if (icr & ICR_RXSEQ)
9665 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
9666 device_xname(sc->sc_dev)));
9667 }
9668
9669 /*
9670 * wm_linkintr_serdes:
9671 *
9672 * Helper; handle link interrupts for TBI mode.
9673 */
9674 static void
9675 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
9676 {
9677 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9678 struct mii_data *mii = &sc->sc_mii;
9679 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9680 uint32_t pcs_adv, pcs_lpab, reg;
9681
9682 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
9683 __func__));
9684
9685 if (icr & ICR_LSC) {
9686 /* Check PCS */
9687 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9688 if ((reg & PCS_LSTS_LINKOK) != 0) {
9689 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
9690 device_xname(sc->sc_dev)));
9691 mii->mii_media_status |= IFM_ACTIVE;
9692 sc->sc_tbi_linkup = 1;
9693 if_link_state_change(ifp, LINK_STATE_UP);
9694 } else {
9695 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9696 device_xname(sc->sc_dev)));
9697 mii->mii_media_status |= IFM_NONE;
9698 sc->sc_tbi_linkup = 0;
9699 if_link_state_change(ifp, LINK_STATE_DOWN);
9700 wm_tbi_serdes_set_linkled(sc);
9701 return;
9702 }
9703 mii->mii_media_active |= IFM_1000_SX;
9704 if ((reg & PCS_LSTS_FDX) != 0)
9705 mii->mii_media_active |= IFM_FDX;
9706 else
9707 mii->mii_media_active |= IFM_HDX;
9708 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
9709 /* Check flow */
9710 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9711 if ((reg & PCS_LSTS_AN_COMP) == 0) {
9712 DPRINTF(sc, WM_DEBUG_LINK,
9713 ("XXX LINKOK but not ACOMP\n"));
9714 return;
9715 }
9716 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
9717 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
9718 DPRINTF(sc, WM_DEBUG_LINK,
9719 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
9720 if ((pcs_adv & TXCW_SYM_PAUSE)
9721 && (pcs_lpab & TXCW_SYM_PAUSE)) {
9722 mii->mii_media_active |= IFM_FLOW
9723 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
9724 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
9725 && (pcs_adv & TXCW_ASYM_PAUSE)
9726 && (pcs_lpab & TXCW_SYM_PAUSE)
9727 && (pcs_lpab & TXCW_ASYM_PAUSE))
9728 mii->mii_media_active |= IFM_FLOW
9729 | IFM_ETH_TXPAUSE;
9730 else if ((pcs_adv & TXCW_SYM_PAUSE)
9731 && (pcs_adv & TXCW_ASYM_PAUSE)
9732 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
9733 && (pcs_lpab & TXCW_ASYM_PAUSE))
9734 mii->mii_media_active |= IFM_FLOW
9735 | IFM_ETH_RXPAUSE;
9736 }
9737 /* Update LED */
9738 wm_tbi_serdes_set_linkled(sc);
9739 } else
9740 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
9741 device_xname(sc->sc_dev)));
9742 }
9743
9744 /*
9745 * wm_linkintr:
9746 *
9747 * Helper; handle link interrupts.
9748 */
9749 static void
9750 wm_linkintr(struct wm_softc *sc, uint32_t icr)
9751 {
9752
9753 KASSERT(WM_CORE_LOCKED(sc));
9754
9755 if (sc->sc_flags & WM_F_HAS_MII)
9756 wm_linkintr_gmii(sc, icr);
9757 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
9758 && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
9759 wm_linkintr_serdes(sc, icr);
9760 else
9761 wm_linkintr_tbi(sc, icr);
9762 }
9763
9764
9765 static inline void
9766 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
9767 {
9768
9769 if (wmq->wmq_txrx_use_workqueue)
9770 workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
9771 else
9772 softint_schedule(wmq->wmq_si);
9773 }
9774
9775 /*
9776 * wm_intr_legacy:
9777 *
9778 * Interrupt service routine for INTx and MSI.
9779 */
9780 static int
9781 wm_intr_legacy(void *arg)
9782 {
9783 struct wm_softc *sc = arg;
9784 struct wm_queue *wmq = &sc->sc_queue[0];
9785 struct wm_txqueue *txq = &wmq->wmq_txq;
9786 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
9787 uint32_t icr, rndval = 0;
9788 int handled = 0;
9789
9790 while (1 /* CONSTCOND */) {
9791 icr = CSR_READ(sc, WMREG_ICR);
9792 if ((icr & sc->sc_icr) == 0)
9793 break;
9794 if (handled == 0)
9795 DPRINTF(sc, WM_DEBUG_TX,
9796 ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
9797 if (rndval == 0)
9798 rndval = icr;
9799
9800 mutex_enter(rxq->rxq_lock);
9801
9802 if (rxq->rxq_stopping) {
9803 mutex_exit(rxq->rxq_lock);
9804 break;
9805 }
9806
9807 handled = 1;
9808
9809 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
9810 if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
9811 DPRINTF(sc, WM_DEBUG_RX,
9812 ("%s: RX: got Rx intr 0x%08x\n",
9813 device_xname(sc->sc_dev),
9814 icr & (ICR_RXDMT0 | ICR_RXT0)));
9815 WM_Q_EVCNT_INCR(rxq, intr);
9816 }
9817 #endif
9818 /*
9819 * wm_rxeof() does *not* call upper layer functions directly,
9820 * as if_percpuq_enqueue() just call softint_schedule().
9821 * So, we can call wm_rxeof() in interrupt context.
9822 */
9823 wm_rxeof(rxq, UINT_MAX);
9824
9825 mutex_exit(rxq->rxq_lock);
9826 mutex_enter(txq->txq_lock);
9827
9828 if (txq->txq_stopping) {
9829 mutex_exit(txq->txq_lock);
9830 break;
9831 }
9832
9833 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
9834 if (icr & ICR_TXDW) {
9835 DPRINTF(sc, WM_DEBUG_TX,
9836 ("%s: TX: got TXDW interrupt\n",
9837 device_xname(sc->sc_dev)));
9838 WM_Q_EVCNT_INCR(txq, txdw);
9839 }
9840 #endif
9841 wm_txeof(txq, UINT_MAX);
9842
9843 mutex_exit(txq->txq_lock);
9844 WM_CORE_LOCK(sc);
9845
9846 if (sc->sc_core_stopping) {
9847 WM_CORE_UNLOCK(sc);
9848 break;
9849 }
9850
9851 if (icr & (ICR_LSC | ICR_RXSEQ)) {
9852 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
9853 wm_linkintr(sc, icr);
9854 }
9855 if ((icr & ICR_GPI(0)) != 0)
9856 device_printf(sc->sc_dev, "got module interrupt\n");
9857
9858 WM_CORE_UNLOCK(sc);
9859
9860 if (icr & ICR_RXO) {
9861 #if defined(WM_DEBUG)
9862 log(LOG_WARNING, "%s: Receive overrun\n",
9863 device_xname(sc->sc_dev));
9864 #endif /* defined(WM_DEBUG) */
9865 }
9866 }
9867
9868 rnd_add_uint32(&sc->rnd_source, rndval);
9869
9870 if (handled) {
9871 /* Try to get more packets going. */
9872 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
9873 wm_sched_handle_queue(sc, wmq);
9874 }
9875
9876 return handled;
9877 }
9878
9879 static inline void
9880 wm_txrxintr_disable(struct wm_queue *wmq)
9881 {
9882 struct wm_softc *sc = wmq->wmq_txq.txq_sc;
9883
9884 if (sc->sc_type == WM_T_82574)
9885 CSR_WRITE(sc, WMREG_IMC,
9886 ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
9887 else if (sc->sc_type == WM_T_82575)
9888 CSR_WRITE(sc, WMREG_EIMC,
9889 EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
9890 else
9891 CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
9892 }
9893
9894 static inline void
9895 wm_txrxintr_enable(struct wm_queue *wmq)
9896 {
9897 struct wm_softc *sc = wmq->wmq_txq.txq_sc;
9898
9899 wm_itrs_calculate(sc, wmq);
9900
9901 /*
9902 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
9903 * There is no need to care about which of RXQ(0) and RXQ(1) enable
9904 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
9905 * while each wm_handle_queue(wmq) is runnig.
9906 */
9907 if (sc->sc_type == WM_T_82574)
9908 CSR_WRITE(sc, WMREG_IMS,
9909 ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
9910 else if (sc->sc_type == WM_T_82575)
9911 CSR_WRITE(sc, WMREG_EIMS,
9912 EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
9913 else
9914 CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
9915 }
9916
9917 static int
9918 wm_txrxintr_msix(void *arg)
9919 {
9920 struct wm_queue *wmq = arg;
9921 struct wm_txqueue *txq = &wmq->wmq_txq;
9922 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
9923 struct wm_softc *sc = txq->txq_sc;
9924 u_int txlimit = sc->sc_tx_intr_process_limit;
9925 u_int rxlimit = sc->sc_rx_intr_process_limit;
9926 bool txmore;
9927 bool rxmore;
9928
9929 KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
9930
9931 DPRINTF(sc, WM_DEBUG_TX,
9932 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
9933
9934 wm_txrxintr_disable(wmq);
9935
9936 mutex_enter(txq->txq_lock);
9937
9938 if (txq->txq_stopping) {
9939 mutex_exit(txq->txq_lock);
9940 return 0;
9941 }
9942
9943 WM_Q_EVCNT_INCR(txq, txdw);
9944 txmore = wm_txeof(txq, txlimit);
9945 /* wm_deferred start() is done in wm_handle_queue(). */
9946 mutex_exit(txq->txq_lock);
9947
9948 DPRINTF(sc, WM_DEBUG_RX,
9949 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
9950 mutex_enter(rxq->rxq_lock);
9951
9952 if (rxq->rxq_stopping) {
9953 mutex_exit(rxq->rxq_lock);
9954 return 0;
9955 }
9956
9957 WM_Q_EVCNT_INCR(rxq, intr);
9958 rxmore = wm_rxeof(rxq, rxlimit);
9959 mutex_exit(rxq->rxq_lock);
9960
9961 wm_itrs_writereg(sc, wmq);
9962
9963 if (txmore || rxmore) {
9964 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
9965 wm_sched_handle_queue(sc, wmq);
9966 } else
9967 wm_txrxintr_enable(wmq);
9968
9969 return 1;
9970 }
9971
9972 static void
9973 wm_handle_queue(void *arg)
9974 {
9975 struct wm_queue *wmq = arg;
9976 struct wm_txqueue *txq = &wmq->wmq_txq;
9977 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
9978 struct wm_softc *sc = txq->txq_sc;
9979 u_int txlimit = sc->sc_tx_process_limit;
9980 u_int rxlimit = sc->sc_rx_process_limit;
9981 bool txmore;
9982 bool rxmore;
9983
9984 mutex_enter(txq->txq_lock);
9985 if (txq->txq_stopping) {
9986 mutex_exit(txq->txq_lock);
9987 return;
9988 }
9989 txmore = wm_txeof(txq, txlimit);
9990 wm_deferred_start_locked(txq);
9991 mutex_exit(txq->txq_lock);
9992
9993 mutex_enter(rxq->rxq_lock);
9994 if (rxq->rxq_stopping) {
9995 mutex_exit(rxq->rxq_lock);
9996 return;
9997 }
9998 WM_Q_EVCNT_INCR(rxq, defer);
9999 rxmore = wm_rxeof(rxq, rxlimit);
10000 mutex_exit(rxq->rxq_lock);
10001
10002 if (txmore || rxmore) {
10003 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10004 wm_sched_handle_queue(sc, wmq);
10005 } else
10006 wm_txrxintr_enable(wmq);
10007 }
10008
10009 static void
10010 wm_handle_queue_work(struct work *wk, void *context)
10011 {
10012 struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
10013
10014 /*
10015 * "enqueued flag" is not required here.
10016 */
10017 wm_handle_queue(wmq);
10018 }
10019
10020 /*
10021 * wm_linkintr_msix:
10022 *
10023 * Interrupt service routine for link status change for MSI-X.
10024 */
10025 static int
10026 wm_linkintr_msix(void *arg)
10027 {
10028 struct wm_softc *sc = arg;
10029 uint32_t reg;
10030 bool has_rxo;
10031
10032 reg = CSR_READ(sc, WMREG_ICR);
10033 WM_CORE_LOCK(sc);
10034 DPRINTF(sc, WM_DEBUG_LINK,
10035 ("%s: LINK: got link intr. ICR = %08x\n",
10036 device_xname(sc->sc_dev), reg));
10037
10038 if (sc->sc_core_stopping)
10039 goto out;
10040
10041 if ((reg & ICR_LSC) != 0) {
10042 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
10043 wm_linkintr(sc, ICR_LSC);
10044 }
10045 if ((reg & ICR_GPI(0)) != 0)
10046 device_printf(sc->sc_dev, "got module interrupt\n");
10047
10048 /*
10049 * XXX 82574 MSI-X mode workaround
10050 *
10051 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
10052 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
10053 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
10054 * interrupts by writing WMREG_ICS to process receive packets.
10055 */
10056 if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
10057 #if defined(WM_DEBUG)
10058 log(LOG_WARNING, "%s: Receive overrun\n",
10059 device_xname(sc->sc_dev));
10060 #endif /* defined(WM_DEBUG) */
10061
10062 has_rxo = true;
10063 /*
10064 * The RXO interrupt is very high rate when receive traffic is
10065 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
10066 * interrupts. ICR_OTHER will be enabled at the end of
10067 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
10068 * ICR_RXQ(1) interrupts.
10069 */
10070 CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
10071
10072 CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
10073 }
10074
10075
10076
10077 out:
10078 WM_CORE_UNLOCK(sc);
10079
10080 if (sc->sc_type == WM_T_82574) {
10081 if (!has_rxo)
10082 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
10083 else
10084 CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
10085 } else if (sc->sc_type == WM_T_82575)
10086 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
10087 else
10088 CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
10089
10090 return 1;
10091 }
10092
10093 /*
10094 * Media related.
10095 * GMII, SGMII, TBI (and SERDES)
10096 */
10097
10098 /* Common */
10099
10100 /*
10101 * wm_tbi_serdes_set_linkled:
10102 *
10103 * Update the link LED on TBI and SERDES devices.
10104 */
10105 static void
10106 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
10107 {
10108
10109 if (sc->sc_tbi_linkup)
10110 sc->sc_ctrl |= CTRL_SWDPIN(0);
10111 else
10112 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
10113
10114 /* 82540 or newer devices are active low */
10115 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
10116
10117 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10118 }
10119
10120 /* GMII related */
10121
10122 /*
10123 * wm_gmii_reset:
10124 *
10125 * Reset the PHY.
10126 */
10127 static void
10128 wm_gmii_reset(struct wm_softc *sc)
10129 {
10130 uint32_t reg;
10131 int rv;
10132
10133 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
10134 device_xname(sc->sc_dev), __func__));
10135
10136 rv = sc->phy.acquire(sc);
10137 if (rv != 0) {
10138 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10139 __func__);
10140 return;
10141 }
10142
10143 switch (sc->sc_type) {
10144 case WM_T_82542_2_0:
10145 case WM_T_82542_2_1:
10146 /* null */
10147 break;
10148 case WM_T_82543:
10149 /*
10150 * With 82543, we need to force speed and duplex on the MAC
10151 * equal to what the PHY speed and duplex configuration is.
10152 * In addition, we need to perform a hardware reset on the PHY
10153 * to take it out of reset.
10154 */
10155 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
10156 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10157
10158 /* The PHY reset pin is active-low. */
10159 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10160 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
10161 CTRL_EXT_SWDPIN(4));
10162 reg |= CTRL_EXT_SWDPIO(4);
10163
10164 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10165 CSR_WRITE_FLUSH(sc);
10166 delay(10*1000);
10167
10168 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
10169 CSR_WRITE_FLUSH(sc);
10170 delay(150);
10171 #if 0
10172 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
10173 #endif
10174 delay(20*1000); /* XXX extra delay to get PHY ID? */
10175 break;
10176 case WM_T_82544: /* Reset 10000us */
10177 case WM_T_82540:
10178 case WM_T_82545:
10179 case WM_T_82545_3:
10180 case WM_T_82546:
10181 case WM_T_82546_3:
10182 case WM_T_82541:
10183 case WM_T_82541_2:
10184 case WM_T_82547:
10185 case WM_T_82547_2:
10186 case WM_T_82571: /* Reset 100us */
10187 case WM_T_82572:
10188 case WM_T_82573:
10189 case WM_T_82574:
10190 case WM_T_82575:
10191 case WM_T_82576:
10192 case WM_T_82580:
10193 case WM_T_I350:
10194 case WM_T_I354:
10195 case WM_T_I210:
10196 case WM_T_I211:
10197 case WM_T_82583:
10198 case WM_T_80003:
10199 /* Generic reset */
10200 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10201 CSR_WRITE_FLUSH(sc);
10202 delay(20000);
10203 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10204 CSR_WRITE_FLUSH(sc);
10205 delay(20000);
10206
10207 if ((sc->sc_type == WM_T_82541)
10208 || (sc->sc_type == WM_T_82541_2)
10209 || (sc->sc_type == WM_T_82547)
10210 || (sc->sc_type == WM_T_82547_2)) {
10211 /* Workaround for igp are done in igp_reset() */
10212 /* XXX add code to set LED after phy reset */
10213 }
10214 break;
10215 case WM_T_ICH8:
10216 case WM_T_ICH9:
10217 case WM_T_ICH10:
10218 case WM_T_PCH:
10219 case WM_T_PCH2:
10220 case WM_T_PCH_LPT:
10221 case WM_T_PCH_SPT:
10222 case WM_T_PCH_CNP:
10223 /* Generic reset */
10224 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10225 CSR_WRITE_FLUSH(sc);
10226 delay(100);
10227 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10228 CSR_WRITE_FLUSH(sc);
10229 delay(150);
10230 break;
10231 default:
10232 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
10233 __func__);
10234 break;
10235 }
10236
10237 sc->phy.release(sc);
10238
10239 /* get_cfg_done */
10240 wm_get_cfg_done(sc);
10241
10242 /* Extra setup */
10243 switch (sc->sc_type) {
10244 case WM_T_82542_2_0:
10245 case WM_T_82542_2_1:
10246 case WM_T_82543:
10247 case WM_T_82544:
10248 case WM_T_82540:
10249 case WM_T_82545:
10250 case WM_T_82545_3:
10251 case WM_T_82546:
10252 case WM_T_82546_3:
10253 case WM_T_82541_2:
10254 case WM_T_82547_2:
10255 case WM_T_82571:
10256 case WM_T_82572:
10257 case WM_T_82573:
10258 case WM_T_82574:
10259 case WM_T_82583:
10260 case WM_T_82575:
10261 case WM_T_82576:
10262 case WM_T_82580:
10263 case WM_T_I350:
10264 case WM_T_I354:
10265 case WM_T_I210:
10266 case WM_T_I211:
10267 case WM_T_80003:
10268 /* Null */
10269 break;
10270 case WM_T_82541:
10271 case WM_T_82547:
10272 /* XXX Configure actively LED after PHY reset */
10273 break;
10274 case WM_T_ICH8:
10275 case WM_T_ICH9:
10276 case WM_T_ICH10:
10277 case WM_T_PCH:
10278 case WM_T_PCH2:
10279 case WM_T_PCH_LPT:
10280 case WM_T_PCH_SPT:
10281 case WM_T_PCH_CNP:
10282 wm_phy_post_reset(sc);
10283 break;
10284 default:
10285 panic("%s: unknown type\n", __func__);
10286 break;
10287 }
10288 }
10289
10290 /*
10291 * Setup sc_phytype and mii_{read|write}reg.
10292 *
10293 * To identify PHY type, correct read/write function should be selected.
10294 * To select correct read/write function, PCI ID or MAC type are required
10295 * without accessing PHY registers.
10296 *
10297 * On the first call of this function, PHY ID is not known yet. Check
10298 * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
10299 * result might be incorrect.
10300 *
10301 * In the second call, PHY OUI and model is used to identify PHY type.
10302 * It might not be perfect because of the lack of compared entry, but it
10303 * would be better than the first call.
10304 *
10305 * If the detected new result and previous assumption is different,
10306 * diagnous message will be printed.
10307 */
10308 static void
10309 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
10310 uint16_t phy_model)
10311 {
10312 device_t dev = sc->sc_dev;
10313 struct mii_data *mii = &sc->sc_mii;
10314 uint16_t new_phytype = WMPHY_UNKNOWN;
10315 uint16_t doubt_phytype = WMPHY_UNKNOWN;
10316 mii_readreg_t new_readreg;
10317 mii_writereg_t new_writereg;
10318 bool dodiag = true;
10319
10320 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
10321 device_xname(sc->sc_dev), __func__));
10322
10323 /*
10324 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
10325 * incorrect. So don't print diag output when it's 2nd call.
10326 */
10327 if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
10328 dodiag = false;
10329
10330 if (mii->mii_readreg == NULL) {
10331 /*
10332 * This is the first call of this function. For ICH and PCH
10333 * variants, it's difficult to determine the PHY access method
10334 * by sc_type, so use the PCI product ID for some devices.
10335 */
10336
10337 switch (sc->sc_pcidevid) {
10338 case PCI_PRODUCT_INTEL_PCH_M_LM:
10339 case PCI_PRODUCT_INTEL_PCH_M_LC:
10340 /* 82577 */
10341 new_phytype = WMPHY_82577;
10342 break;
10343 case PCI_PRODUCT_INTEL_PCH_D_DM:
10344 case PCI_PRODUCT_INTEL_PCH_D_DC:
10345 /* 82578 */
10346 new_phytype = WMPHY_82578;
10347 break;
10348 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
10349 case PCI_PRODUCT_INTEL_PCH2_LV_V:
10350 /* 82579 */
10351 new_phytype = WMPHY_82579;
10352 break;
10353 case PCI_PRODUCT_INTEL_82801H_82567V_3:
10354 case PCI_PRODUCT_INTEL_82801I_BM:
10355 case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
10356 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
10357 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
10358 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
10359 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
10360 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
10361 /* ICH8, 9, 10 with 82567 */
10362 new_phytype = WMPHY_BM;
10363 break;
10364 default:
10365 break;
10366 }
10367 } else {
10368 /* It's not the first call. Use PHY OUI and model */
10369 switch (phy_oui) {
10370 case MII_OUI_ATTANSIC: /* XXX ??? */
10371 switch (phy_model) {
10372 case 0x0004: /* XXX */
10373 new_phytype = WMPHY_82578;
10374 break;
10375 default:
10376 break;
10377 }
10378 break;
10379 case MII_OUI_xxMARVELL:
10380 switch (phy_model) {
10381 case MII_MODEL_xxMARVELL_I210:
10382 new_phytype = WMPHY_I210;
10383 break;
10384 case MII_MODEL_xxMARVELL_E1011:
10385 case MII_MODEL_xxMARVELL_E1000_3:
10386 case MII_MODEL_xxMARVELL_E1000_5:
10387 case MII_MODEL_xxMARVELL_E1112:
10388 new_phytype = WMPHY_M88;
10389 break;
10390 case MII_MODEL_xxMARVELL_E1149:
10391 new_phytype = WMPHY_BM;
10392 break;
10393 case MII_MODEL_xxMARVELL_E1111:
10394 case MII_MODEL_xxMARVELL_I347:
10395 case MII_MODEL_xxMARVELL_E1512:
10396 case MII_MODEL_xxMARVELL_E1340M:
10397 case MII_MODEL_xxMARVELL_E1543:
10398 new_phytype = WMPHY_M88;
10399 break;
10400 case MII_MODEL_xxMARVELL_I82563:
10401 new_phytype = WMPHY_GG82563;
10402 break;
10403 default:
10404 break;
10405 }
10406 break;
10407 case MII_OUI_INTEL:
10408 switch (phy_model) {
10409 case MII_MODEL_INTEL_I82577:
10410 new_phytype = WMPHY_82577;
10411 break;
10412 case MII_MODEL_INTEL_I82579:
10413 new_phytype = WMPHY_82579;
10414 break;
10415 case MII_MODEL_INTEL_I217:
10416 new_phytype = WMPHY_I217;
10417 break;
10418 case MII_MODEL_INTEL_I82580:
10419 new_phytype = WMPHY_82580;
10420 break;
10421 case MII_MODEL_INTEL_I350:
10422 new_phytype = WMPHY_I350;
10423 break;
10424 break;
10425 default:
10426 break;
10427 }
10428 break;
10429 case MII_OUI_yyINTEL:
10430 switch (phy_model) {
10431 case MII_MODEL_yyINTEL_I82562G:
10432 case MII_MODEL_yyINTEL_I82562EM:
10433 case MII_MODEL_yyINTEL_I82562ET:
10434 new_phytype = WMPHY_IFE;
10435 break;
10436 case MII_MODEL_yyINTEL_IGP01E1000:
10437 new_phytype = WMPHY_IGP;
10438 break;
10439 case MII_MODEL_yyINTEL_I82566:
10440 new_phytype = WMPHY_IGP_3;
10441 break;
10442 default:
10443 break;
10444 }
10445 break;
10446 default:
10447 break;
10448 }
10449
10450 if (dodiag) {
10451 if (new_phytype == WMPHY_UNKNOWN)
10452 aprint_verbose_dev(dev,
10453 "%s: Unknown PHY model. OUI=%06x, "
10454 "model=%04x\n", __func__, phy_oui,
10455 phy_model);
10456
10457 if ((sc->sc_phytype != WMPHY_UNKNOWN)
10458 && (sc->sc_phytype != new_phytype)) {
10459 aprint_error_dev(dev, "Previously assumed PHY "
10460 "type(%u) was incorrect. PHY type from PHY"
10461 "ID = %u\n", sc->sc_phytype, new_phytype);
10462 }
10463 }
10464 }
10465
10466 /* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
10467 if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
10468 /* SGMII */
10469 new_readreg = wm_sgmii_readreg;
10470 new_writereg = wm_sgmii_writereg;
10471 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
10472 /* BM2 (phyaddr == 1) */
10473 if ((sc->sc_phytype != WMPHY_UNKNOWN)
10474 && (new_phytype != WMPHY_BM)
10475 && (new_phytype != WMPHY_UNKNOWN))
10476 doubt_phytype = new_phytype;
10477 new_phytype = WMPHY_BM;
10478 new_readreg = wm_gmii_bm_readreg;
10479 new_writereg = wm_gmii_bm_writereg;
10480 } else if (sc->sc_type >= WM_T_PCH) {
10481 /* All PCH* use _hv_ */
10482 new_readreg = wm_gmii_hv_readreg;
10483 new_writereg = wm_gmii_hv_writereg;
10484 } else if (sc->sc_type >= WM_T_ICH8) {
10485 /* non-82567 ICH8, 9 and 10 */
10486 new_readreg = wm_gmii_i82544_readreg;
10487 new_writereg = wm_gmii_i82544_writereg;
10488 } else if (sc->sc_type >= WM_T_80003) {
10489 /* 80003 */
10490 if ((sc->sc_phytype != WMPHY_UNKNOWN)
10491 && (new_phytype != WMPHY_GG82563)
10492 && (new_phytype != WMPHY_UNKNOWN))
10493 doubt_phytype = new_phytype;
10494 new_phytype = WMPHY_GG82563;
10495 new_readreg = wm_gmii_i80003_readreg;
10496 new_writereg = wm_gmii_i80003_writereg;
10497 } else if (sc->sc_type >= WM_T_I210) {
10498 /* I210 and I211 */
10499 if ((sc->sc_phytype != WMPHY_UNKNOWN)
10500 && (new_phytype != WMPHY_I210)
10501 && (new_phytype != WMPHY_UNKNOWN))
10502 doubt_phytype = new_phytype;
10503 new_phytype = WMPHY_I210;
10504 new_readreg = wm_gmii_gs40g_readreg;
10505 new_writereg = wm_gmii_gs40g_writereg;
10506 } else if (sc->sc_type >= WM_T_82580) {
10507 /* 82580, I350 and I354 */
10508 new_readreg = wm_gmii_82580_readreg;
10509 new_writereg = wm_gmii_82580_writereg;
10510 } else if (sc->sc_type >= WM_T_82544) {
10511 /* 82544, 0, [56], [17], 8257[1234] and 82583 */
10512 new_readreg = wm_gmii_i82544_readreg;
10513 new_writereg = wm_gmii_i82544_writereg;
10514 } else {
10515 new_readreg = wm_gmii_i82543_readreg;
10516 new_writereg = wm_gmii_i82543_writereg;
10517 }
10518
10519 if (new_phytype == WMPHY_BM) {
10520 /* All BM use _bm_ */
10521 new_readreg = wm_gmii_bm_readreg;
10522 new_writereg = wm_gmii_bm_writereg;
10523 }
10524 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
10525 /* All PCH* use _hv_ */
10526 new_readreg = wm_gmii_hv_readreg;
10527 new_writereg = wm_gmii_hv_writereg;
10528 }
10529
10530 /* Diag output */
10531 if (dodiag) {
10532 if (doubt_phytype != WMPHY_UNKNOWN)
10533 aprint_error_dev(dev, "Assumed new PHY type was "
10534 "incorrect. old = %u, new = %u\n", sc->sc_phytype,
10535 new_phytype);
10536 else if ((sc->sc_phytype != WMPHY_UNKNOWN)
10537 && (sc->sc_phytype != new_phytype))
10538 aprint_error_dev(dev, "Previously assumed PHY type(%u)"
10539 "was incorrect. New PHY type = %u\n",
10540 sc->sc_phytype, new_phytype);
10541
10542 if ((mii->mii_readreg != NULL) &&
10543 (new_phytype == WMPHY_UNKNOWN))
10544 aprint_error_dev(dev, "PHY type is still unknown.\n");
10545
10546 if ((mii->mii_readreg != NULL) &&
10547 (mii->mii_readreg != new_readreg))
10548 aprint_error_dev(dev, "Previously assumed PHY "
10549 "read/write function was incorrect.\n");
10550 }
10551
10552 /* Update now */
10553 sc->sc_phytype = new_phytype;
10554 mii->mii_readreg = new_readreg;
10555 mii->mii_writereg = new_writereg;
10556 if (new_readreg == wm_gmii_hv_readreg) {
10557 sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
10558 sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
10559 } else if (new_readreg == wm_sgmii_readreg) {
10560 sc->phy.readreg_locked = wm_sgmii_readreg_locked;
10561 sc->phy.writereg_locked = wm_sgmii_writereg_locked;
10562 } else if (new_readreg == wm_gmii_i82544_readreg) {
10563 sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
10564 sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
10565 }
10566 }
10567
10568 /*
10569 * wm_get_phy_id_82575:
10570 *
10571 * Return PHY ID. Return -1 if it failed.
10572 */
10573 static int
10574 wm_get_phy_id_82575(struct wm_softc *sc)
10575 {
10576 uint32_t reg;
10577 int phyid = -1;
10578
10579 /* XXX */
10580 if ((sc->sc_flags & WM_F_SGMII) == 0)
10581 return -1;
10582
10583 if (wm_sgmii_uses_mdio(sc)) {
10584 switch (sc->sc_type) {
10585 case WM_T_82575:
10586 case WM_T_82576:
10587 reg = CSR_READ(sc, WMREG_MDIC);
10588 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
10589 break;
10590 case WM_T_82580:
10591 case WM_T_I350:
10592 case WM_T_I354:
10593 case WM_T_I210:
10594 case WM_T_I211:
10595 reg = CSR_READ(sc, WMREG_MDICNFG);
10596 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
10597 break;
10598 default:
10599 return -1;
10600 }
10601 }
10602
10603 return phyid;
10604 }
10605
10606 /*
10607 * wm_gmii_mediainit:
10608 *
10609 * Initialize media for use on 1000BASE-T devices.
10610 */
10611 static void
10612 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
10613 {
10614 device_t dev = sc->sc_dev;
10615 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10616 struct mii_data *mii = &sc->sc_mii;
10617
10618 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
10619 device_xname(sc->sc_dev), __func__));
10620
10621 /* We have GMII. */
10622 sc->sc_flags |= WM_F_HAS_MII;
10623
10624 if (sc->sc_type == WM_T_80003)
10625 sc->sc_tipg = TIPG_1000T_80003_DFLT;
10626 else
10627 sc->sc_tipg = TIPG_1000T_DFLT;
10628
10629 /*
10630 * Let the chip set speed/duplex on its own based on
10631 * signals from the PHY.
10632 * XXXbouyer - I'm not sure this is right for the 80003,
10633 * the em driver only sets CTRL_SLU here - but it seems to work.
10634 */
10635 sc->sc_ctrl |= CTRL_SLU;
10636 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10637
10638 /* Initialize our media structures and probe the GMII. */
10639 mii->mii_ifp = ifp;
10640
10641 mii->mii_statchg = wm_gmii_statchg;
10642
10643 /* get PHY control from SMBus to PCIe */
10644 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
10645 || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
10646 || (sc->sc_type == WM_T_PCH_CNP))
10647 wm_init_phy_workarounds_pchlan(sc);
10648
10649 wm_gmii_reset(sc);
10650
10651 sc->sc_ethercom.ec_mii = &sc->sc_mii;
10652 ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
10653 wm_gmii_mediastatus, sc->sc_core_lock);
10654
10655 /* Setup internal SGMII PHY for SFP */
10656 wm_sgmii_sfp_preconfig(sc);
10657
10658 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
10659 || (sc->sc_type == WM_T_82580)
10660 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
10661 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
10662 if ((sc->sc_flags & WM_F_SGMII) == 0) {
10663 /* Attach only one port */
10664 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
10665 MII_OFFSET_ANY, MIIF_DOPAUSE);
10666 } else {
10667 int i, id;
10668 uint32_t ctrl_ext;
10669
10670 id = wm_get_phy_id_82575(sc);
10671 if (id != -1) {
10672 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
10673 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
10674 }
10675 if ((id == -1)
10676 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
10677 /* Power on sgmii phy if it is disabled */
10678 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
10679 CSR_WRITE(sc, WMREG_CTRL_EXT,
10680 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
10681 CSR_WRITE_FLUSH(sc);
10682 delay(300*1000); /* XXX too long */
10683
10684 /*
10685 * From 1 to 8.
10686 *
10687 * I2C access fails with I2C register's ERROR
10688 * bit set, so prevent error message while
10689 * scanning.
10690 */
10691 sc->phy.no_errprint = true;
10692 for (i = 1; i < 8; i++)
10693 mii_attach(sc->sc_dev, &sc->sc_mii,
10694 0xffffffff, i, MII_OFFSET_ANY,
10695 MIIF_DOPAUSE);
10696 sc->phy.no_errprint = false;
10697
10698 /* Restore previous sfp cage power state */
10699 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
10700 }
10701 }
10702 } else
10703 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
10704 MII_OFFSET_ANY, MIIF_DOPAUSE);
10705
10706 /*
10707 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
10708 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
10709 */
10710 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
10711 || (sc->sc_type == WM_T_PCH_SPT)
10712 || (sc->sc_type == WM_T_PCH_CNP))
10713 && (LIST_FIRST(&mii->mii_phys) == NULL)) {
10714 wm_set_mdio_slow_mode_hv(sc);
10715 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
10716 MII_OFFSET_ANY, MIIF_DOPAUSE);
10717 }
10718
10719 /*
10720 * (For ICH8 variants)
10721 * If PHY detection failed, use BM's r/w function and retry.
10722 */
10723 if (LIST_FIRST(&mii->mii_phys) == NULL) {
10724 /* if failed, retry with *_bm_* */
10725 aprint_verbose_dev(dev, "Assumed PHY access function "
10726 "(type = %d) might be incorrect. Use BM and retry.\n",
10727 sc->sc_phytype);
10728 sc->sc_phytype = WMPHY_BM;
10729 mii->mii_readreg = wm_gmii_bm_readreg;
10730 mii->mii_writereg = wm_gmii_bm_writereg;
10731
10732 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
10733 MII_OFFSET_ANY, MIIF_DOPAUSE);
10734 }
10735
10736 if (LIST_FIRST(&mii->mii_phys) == NULL) {
10737 /* Any PHY wasn't find */
10738 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
10739 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
10740 sc->sc_phytype = WMPHY_NONE;
10741 } else {
10742 struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
10743
10744 /*
10745 * PHY Found! Check PHY type again by the second call of
10746 * wm_gmii_setup_phytype.
10747 */
10748 wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
10749 child->mii_mpd_model);
10750
10751 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
10752 }
10753 }
10754
10755 /*
10756 * wm_gmii_mediachange: [ifmedia interface function]
10757 *
10758 * Set hardware to newly-selected media on a 1000BASE-T device.
10759 */
10760 static int
10761 wm_gmii_mediachange(struct ifnet *ifp)
10762 {
10763 struct wm_softc *sc = ifp->if_softc;
10764 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
10765 uint32_t reg;
10766 int rc;
10767
10768 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
10769 device_xname(sc->sc_dev), __func__));
10770 if ((ifp->if_flags & IFF_UP) == 0)
10771 return 0;
10772
10773 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
10774 if ((sc->sc_type == WM_T_82580)
10775 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
10776 || (sc->sc_type == WM_T_I211)) {
10777 reg = CSR_READ(sc, WMREG_PHPM);
10778 reg &= ~PHPM_GO_LINK_D;
10779 CSR_WRITE(sc, WMREG_PHPM, reg);
10780 }
10781
10782 /* Disable D0 LPLU. */
10783 wm_lplu_d0_disable(sc);
10784
10785 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
10786 sc->sc_ctrl |= CTRL_SLU;
10787 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
10788 || (sc->sc_type > WM_T_82543)) {
10789 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
10790 } else {
10791 sc->sc_ctrl &= ~CTRL_ASDE;
10792 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
10793 if (ife->ifm_media & IFM_FDX)
10794 sc->sc_ctrl |= CTRL_FD;
10795 switch (IFM_SUBTYPE(ife->ifm_media)) {
10796 case IFM_10_T:
10797 sc->sc_ctrl |= CTRL_SPEED_10;
10798 break;
10799 case IFM_100_TX:
10800 sc->sc_ctrl |= CTRL_SPEED_100;
10801 break;
10802 case IFM_1000_T:
10803 sc->sc_ctrl |= CTRL_SPEED_1000;
10804 break;
10805 case IFM_NONE:
10806 /* There is no specific setting for IFM_NONE */
10807 break;
10808 default:
10809 panic("wm_gmii_mediachange: bad media 0x%x",
10810 ife->ifm_media);
10811 }
10812 }
10813 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10814 CSR_WRITE_FLUSH(sc);
10815
10816 if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
10817 wm_serdes_mediachange(ifp);
10818
10819 if (sc->sc_type <= WM_T_82543)
10820 wm_gmii_reset(sc);
10821 else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
10822 && ((sc->sc_flags & WM_F_SGMII) != 0)) {
10823 /* allow time for SFP cage time to power up phy */
10824 delay(300 * 1000);
10825 wm_gmii_reset(sc);
10826 }
10827
10828 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
10829 return 0;
10830 return rc;
10831 }
10832
10833 /*
10834 * wm_gmii_mediastatus: [ifmedia interface function]
10835 *
10836 * Get the current interface media status on a 1000BASE-T device.
10837 */
10838 static void
10839 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
10840 {
10841 struct wm_softc *sc = ifp->if_softc;
10842
10843 ether_mediastatus(ifp, ifmr);
10844 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
10845 | sc->sc_flowflags;
10846 }
10847
10848 #define MDI_IO CTRL_SWDPIN(2)
10849 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
10850 #define MDI_CLK CTRL_SWDPIN(3)
10851
10852 static void
10853 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
10854 {
10855 uint32_t i, v;
10856
10857 v = CSR_READ(sc, WMREG_CTRL);
10858 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
10859 v |= MDI_DIR | CTRL_SWDPIO(3);
10860
10861 for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
10862 if (data & i)
10863 v |= MDI_IO;
10864 else
10865 v &= ~MDI_IO;
10866 CSR_WRITE(sc, WMREG_CTRL, v);
10867 CSR_WRITE_FLUSH(sc);
10868 delay(10);
10869 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
10870 CSR_WRITE_FLUSH(sc);
10871 delay(10);
10872 CSR_WRITE(sc, WMREG_CTRL, v);
10873 CSR_WRITE_FLUSH(sc);
10874 delay(10);
10875 }
10876 }
10877
10878 static uint16_t
10879 wm_i82543_mii_recvbits(struct wm_softc *sc)
10880 {
10881 uint32_t v, i;
10882 uint16_t data = 0;
10883
10884 v = CSR_READ(sc, WMREG_CTRL);
10885 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
10886 v |= CTRL_SWDPIO(3);
10887
10888 CSR_WRITE(sc, WMREG_CTRL, v);
10889 CSR_WRITE_FLUSH(sc);
10890 delay(10);
10891 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
10892 CSR_WRITE_FLUSH(sc);
10893 delay(10);
10894 CSR_WRITE(sc, WMREG_CTRL, v);
10895 CSR_WRITE_FLUSH(sc);
10896 delay(10);
10897
10898 for (i = 0; i < 16; i++) {
10899 data <<= 1;
10900 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
10901 CSR_WRITE_FLUSH(sc);
10902 delay(10);
10903 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
10904 data |= 1;
10905 CSR_WRITE(sc, WMREG_CTRL, v);
10906 CSR_WRITE_FLUSH(sc);
10907 delay(10);
10908 }
10909
10910 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
10911 CSR_WRITE_FLUSH(sc);
10912 delay(10);
10913 CSR_WRITE(sc, WMREG_CTRL, v);
10914 CSR_WRITE_FLUSH(sc);
10915 delay(10);
10916
10917 return data;
10918 }
10919
10920 #undef MDI_IO
10921 #undef MDI_DIR
10922 #undef MDI_CLK
10923
10924 /*
10925 * wm_gmii_i82543_readreg: [mii interface function]
10926 *
10927 * Read a PHY register on the GMII (i82543 version).
10928 */
10929 static int
10930 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
10931 {
10932 struct wm_softc *sc = device_private(dev);
10933
10934 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
10935 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
10936 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
10937 *val = wm_i82543_mii_recvbits(sc) & 0xffff;
10938
10939 DPRINTF(sc, WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
10940 device_xname(dev), phy, reg, *val));
10941
10942 return 0;
10943 }
10944
10945 /*
10946 * wm_gmii_i82543_writereg: [mii interface function]
10947 *
10948 * Write a PHY register on the GMII (i82543 version).
10949 */
10950 static int
10951 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
10952 {
10953 struct wm_softc *sc = device_private(dev);
10954
10955 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
10956 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
10957 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
10958 (MII_COMMAND_START << 30), 32);
10959
10960 return 0;
10961 }
10962
10963 /*
10964 * wm_gmii_mdic_readreg: [mii interface function]
10965 *
10966 * Read a PHY register on the GMII.
10967 */
10968 static int
10969 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
10970 {
10971 struct wm_softc *sc = device_private(dev);
10972 uint32_t mdic = 0;
10973 int i;
10974
10975 if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
10976 && (reg > MII_ADDRMASK)) {
10977 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
10978 __func__, sc->sc_phytype, reg);
10979 reg &= MII_ADDRMASK;
10980 }
10981
10982 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
10983 MDIC_REGADD(reg));
10984
10985 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
10986 delay(50);
10987 mdic = CSR_READ(sc, WMREG_MDIC);
10988 if (mdic & MDIC_READY)
10989 break;
10990 }
10991
10992 if ((mdic & MDIC_READY) == 0) {
10993 DPRINTF(sc, WM_DEBUG_GMII,
10994 ("%s: MDIC read timed out: phy %d reg %d\n",
10995 device_xname(dev), phy, reg));
10996 return ETIMEDOUT;
10997 } else if (mdic & MDIC_E) {
10998 /* This is normal if no PHY is present. */
10999 DPRINTF(sc, WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
11000 device_xname(sc->sc_dev), phy, reg));
11001 return -1;
11002 } else
11003 *val = MDIC_DATA(mdic);
11004
11005 /*
11006 * Allow some time after each MDIC transaction to avoid
11007 * reading duplicate data in the next MDIC transaction.
11008 */
11009 if (sc->sc_type == WM_T_PCH2)
11010 delay(100);
11011
11012 return 0;
11013 }
11014
11015 /*
11016 * wm_gmii_mdic_writereg: [mii interface function]
11017 *
11018 * Write a PHY register on the GMII.
11019 */
11020 static int
11021 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
11022 {
11023 struct wm_softc *sc = device_private(dev);
11024 uint32_t mdic = 0;
11025 int i;
11026
11027 if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
11028 && (reg > MII_ADDRMASK)) {
11029 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11030 __func__, sc->sc_phytype, reg);
11031 reg &= MII_ADDRMASK;
11032 }
11033
11034 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
11035 MDIC_REGADD(reg) | MDIC_DATA(val));
11036
11037 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
11038 delay(50);
11039 mdic = CSR_READ(sc, WMREG_MDIC);
11040 if (mdic & MDIC_READY)
11041 break;
11042 }
11043
11044 if ((mdic & MDIC_READY) == 0) {
11045 DPRINTF(sc, WM_DEBUG_GMII,
11046 ("%s: MDIC write timed out: phy %d reg %d\n",
11047 device_xname(dev), phy, reg));
11048 return ETIMEDOUT;
11049 } else if (mdic & MDIC_E) {
11050 DPRINTF(sc, WM_DEBUG_GMII,
11051 ("%s: MDIC write error: phy %d reg %d\n",
11052 device_xname(dev), phy, reg));
11053 return -1;
11054 }
11055
11056 /*
11057 * Allow some time after each MDIC transaction to avoid
11058 * reading duplicate data in the next MDIC transaction.
11059 */
11060 if (sc->sc_type == WM_T_PCH2)
11061 delay(100);
11062
11063 return 0;
11064 }
11065
11066 /*
11067 * wm_gmii_i82544_readreg: [mii interface function]
11068 *
11069 * Read a PHY register on the GMII.
11070 */
11071 static int
11072 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
11073 {
11074 struct wm_softc *sc = device_private(dev);
11075 int rv;
11076
11077 if (sc->phy.acquire(sc)) {
11078 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11079 return -1;
11080 }
11081
11082 rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
11083
11084 sc->phy.release(sc);
11085
11086 return rv;
11087 }
11088
11089 static int
11090 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
11091 {
11092 struct wm_softc *sc = device_private(dev);
11093 int rv;
11094
11095 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11096 switch (sc->sc_phytype) {
11097 case WMPHY_IGP:
11098 case WMPHY_IGP_2:
11099 case WMPHY_IGP_3:
11100 rv = wm_gmii_mdic_writereg(dev, phy,
11101 IGPHY_PAGE_SELECT, reg);
11102 if (rv != 0)
11103 return rv;
11104 break;
11105 default:
11106 #ifdef WM_DEBUG
11107 device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
11108 __func__, sc->sc_phytype, reg);
11109 #endif
11110 break;
11111 }
11112 }
11113
11114 return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11115 }
11116
11117 /*
11118 * wm_gmii_i82544_writereg: [mii interface function]
11119 *
11120 * Write a PHY register on the GMII.
11121 */
11122 static int
11123 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
11124 {
11125 struct wm_softc *sc = device_private(dev);
11126 int rv;
11127
11128 if (sc->phy.acquire(sc)) {
11129 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11130 return -1;
11131 }
11132
11133 rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
11134 sc->phy.release(sc);
11135
11136 return rv;
11137 }
11138
11139 static int
11140 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
11141 {
11142 struct wm_softc *sc = device_private(dev);
11143 int rv;
11144
11145 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11146 switch (sc->sc_phytype) {
11147 case WMPHY_IGP:
11148 case WMPHY_IGP_2:
11149 case WMPHY_IGP_3:
11150 rv = wm_gmii_mdic_writereg(dev, phy,
11151 IGPHY_PAGE_SELECT, reg);
11152 if (rv != 0)
11153 return rv;
11154 break;
11155 default:
11156 #ifdef WM_DEBUG
11157 device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
11158 __func__, sc->sc_phytype, reg);
11159 #endif
11160 break;
11161 }
11162 }
11163
11164 return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11165 }
11166
11167 /*
11168 * wm_gmii_i80003_readreg: [mii interface function]
11169 *
11170 * Read a PHY register on the kumeran
11171 * This could be handled by the PHY layer if we didn't have to lock the
11172 * resource ...
11173 */
11174 static int
11175 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
11176 {
11177 struct wm_softc *sc = device_private(dev);
11178 int page_select;
11179 uint16_t temp, temp2;
11180 int rv = 0;
11181
11182 if (phy != 1) /* Only one PHY on kumeran bus */
11183 return -1;
11184
11185 if (sc->phy.acquire(sc)) {
11186 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11187 return -1;
11188 }
11189
11190 if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
11191 page_select = GG82563_PHY_PAGE_SELECT;
11192 else {
11193 /*
11194 * Use Alternative Page Select register to access registers
11195 * 30 and 31.
11196 */
11197 page_select = GG82563_PHY_PAGE_SELECT_ALT;
11198 }
11199 temp = reg >> GG82563_PAGE_SHIFT;
11200 if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
11201 goto out;
11202
11203 if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
11204 /*
11205 * Wait more 200us for a bug of the ready bit in the MDIC
11206 * register.
11207 */
11208 delay(200);
11209 rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
11210 if ((rv != 0) || (temp2 != temp)) {
11211 device_printf(dev, "%s failed\n", __func__);
11212 rv = -1;
11213 goto out;
11214 }
11215 delay(200);
11216 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11217 delay(200);
11218 } else
11219 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11220
11221 out:
11222 sc->phy.release(sc);
11223 return rv;
11224 }
11225
11226 /*
11227 * wm_gmii_i80003_writereg: [mii interface function]
11228 *
11229 * Write a PHY register on the kumeran.
11230 * This could be handled by the PHY layer if we didn't have to lock the
11231 * resource ...
11232 */
11233 static int
11234 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
11235 {
11236 struct wm_softc *sc = device_private(dev);
11237 int page_select, rv;
11238 uint16_t temp, temp2;
11239
11240 if (phy != 1) /* Only one PHY on kumeran bus */
11241 return -1;
11242
11243 if (sc->phy.acquire(sc)) {
11244 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11245 return -1;
11246 }
11247
11248 if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
11249 page_select = GG82563_PHY_PAGE_SELECT;
11250 else {
11251 /*
11252 * Use Alternative Page Select register to access registers
11253 * 30 and 31.
11254 */
11255 page_select = GG82563_PHY_PAGE_SELECT_ALT;
11256 }
11257 temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
11258 if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
11259 goto out;
11260
11261 if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
11262 /*
11263 * Wait more 200us for a bug of the ready bit in the MDIC
11264 * register.
11265 */
11266 delay(200);
11267 rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
11268 if ((rv != 0) || (temp2 != temp)) {
11269 device_printf(dev, "%s failed\n", __func__);
11270 rv = -1;
11271 goto out;
11272 }
11273 delay(200);
11274 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11275 delay(200);
11276 } else
11277 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11278
11279 out:
11280 sc->phy.release(sc);
11281 return rv;
11282 }
11283
11284 /*
11285 * wm_gmii_bm_readreg: [mii interface function]
11286 *
11287 * Read a PHY register on the kumeran
11288 * This could be handled by the PHY layer if we didn't have to lock the
11289 * resource ...
11290 */
11291 static int
11292 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
11293 {
11294 struct wm_softc *sc = device_private(dev);
11295 uint16_t page = reg >> BME1000_PAGE_SHIFT;
11296 int rv;
11297
11298 if (sc->phy.acquire(sc)) {
11299 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11300 return -1;
11301 }
11302
11303 if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
11304 phy = ((page >= 768) || ((page == 0) && (reg == 25))
11305 || (reg == 31)) ? 1 : phy;
11306 /* Page 800 works differently than the rest so it has its own func */
11307 if (page == BM_WUC_PAGE) {
11308 rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
11309 goto release;
11310 }
11311
11312 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11313 if ((phy == 1) && (sc->sc_type != WM_T_82574)
11314 && (sc->sc_type != WM_T_82583))
11315 rv = wm_gmii_mdic_writereg(dev, phy,
11316 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11317 else
11318 rv = wm_gmii_mdic_writereg(dev, phy,
11319 BME1000_PHY_PAGE_SELECT, page);
11320 if (rv != 0)
11321 goto release;
11322 }
11323
11324 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11325
11326 release:
11327 sc->phy.release(sc);
11328 return rv;
11329 }
11330
11331 /*
11332 * wm_gmii_bm_writereg: [mii interface function]
11333 *
11334 * Write a PHY register on the kumeran.
11335 * This could be handled by the PHY layer if we didn't have to lock the
11336 * resource ...
11337 */
11338 static int
11339 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
11340 {
11341 struct wm_softc *sc = device_private(dev);
11342 uint16_t page = reg >> BME1000_PAGE_SHIFT;
11343 int rv;
11344
11345 if (sc->phy.acquire(sc)) {
11346 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11347 return -1;
11348 }
11349
11350 if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
11351 phy = ((page >= 768) || ((page == 0) && (reg == 25))
11352 || (reg == 31)) ? 1 : phy;
11353 /* Page 800 works differently than the rest so it has its own func */
11354 if (page == BM_WUC_PAGE) {
11355 rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
11356 goto release;
11357 }
11358
11359 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11360 if ((phy == 1) && (sc->sc_type != WM_T_82574)
11361 && (sc->sc_type != WM_T_82583))
11362 rv = wm_gmii_mdic_writereg(dev, phy,
11363 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11364 else
11365 rv = wm_gmii_mdic_writereg(dev, phy,
11366 BME1000_PHY_PAGE_SELECT, page);
11367 if (rv != 0)
11368 goto release;
11369 }
11370
11371 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11372
11373 release:
11374 sc->phy.release(sc);
11375 return rv;
11376 }
11377
11378 /*
11379 * wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
11380 * @dev: pointer to the HW structure
11381 * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
11382 *
11383 * Assumes semaphore already acquired and phy_reg points to a valid memory
11384 * address to store contents of the BM_WUC_ENABLE_REG register.
11385 */
11386 static int
11387 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
11388 {
11389 #ifdef WM_DEBUG
11390 struct wm_softc *sc = device_private(dev);
11391 #endif
11392 uint16_t temp;
11393 int rv;
11394
11395 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
11396 device_xname(dev), __func__));
11397
11398 if (!phy_regp)
11399 return -1;
11400
11401 /* All page select, port ctrl and wakeup registers use phy address 1 */
11402
11403 /* Select Port Control Registers page */
11404 rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
11405 BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
11406 if (rv != 0)
11407 return rv;
11408
11409 /* Read WUCE and save it */
11410 rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
11411 if (rv != 0)
11412 return rv;
11413
11414 /* Enable both PHY wakeup mode and Wakeup register page writes.
11415 * Prevent a power state change by disabling ME and Host PHY wakeup.
11416 */
11417 temp = *phy_regp;
11418 temp |= BM_WUC_ENABLE_BIT;
11419 temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
11420
11421 if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
11422 return rv;
11423
11424 /* Select Host Wakeup Registers page - caller now able to write
11425 * registers on the Wakeup registers page
11426 */
11427 return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
11428 BM_WUC_PAGE << IGP3_PAGE_SHIFT);
11429 }
11430
11431 /*
11432 * wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
11433 * @dev: pointer to the HW structure
11434 * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
11435 *
11436 * Restore BM_WUC_ENABLE_REG to its original value.
11437 *
11438 * Assumes semaphore already acquired and *phy_reg is the contents of the
11439 * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
11440 * caller.
11441 */
11442 static int
11443 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
11444 {
11445 #ifdef WM_DEBUG
11446 struct wm_softc *sc = device_private(dev);
11447 #endif
11448
11449 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
11450 device_xname(dev), __func__));
11451
11452 if (!phy_regp)
11453 return -1;
11454
11455 /* Select Port Control Registers page */
11456 wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
11457 BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
11458
11459 /* Restore 769.17 to its original value */
11460 wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
11461
11462 return 0;
11463 }
11464
11465 /*
11466 * wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
11467 * @sc: pointer to the HW structure
11468 * @offset: register offset to be read or written
11469 * @val: pointer to the data to read or write
11470 * @rd: determines if operation is read or write
11471 * @page_set: BM_WUC_PAGE already set and access enabled
11472 *
11473 * Read the PHY register at offset and store the retrieved information in
11474 * data, or write data to PHY register at offset. Note the procedure to
11475 * access the PHY wakeup registers is different than reading the other PHY
11476 * registers. It works as such:
11477 * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1
11478 * 2) Set page to 800 for host (801 if we were manageability)
11479 * 3) Write the address using the address opcode (0x11)
11480 * 4) Read or write the data using the data opcode (0x12)
11481 * 5) Restore 769.17.2 to its original value
11482 *
11483 * Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
11484 * step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
11485 *
11486 * Assumes semaphore is already acquired. When page_set==TRUE, assumes
11487 * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
11488 * is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
11489 */
11490 static int
11491 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
11492 bool page_set)
11493 {
11494 struct wm_softc *sc = device_private(dev);
11495 uint16_t regnum = BM_PHY_REG_NUM(offset);
11496 uint16_t page = BM_PHY_REG_PAGE(offset);
11497 uint16_t wuce;
11498 int rv = 0;
11499
11500 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11501 device_xname(dev), __func__));
11502 /* XXX Gig must be disabled for MDIO accesses to page 800 */
11503 if ((sc->sc_type == WM_T_PCH)
11504 && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
11505 device_printf(dev,
11506 "Attempting to access page %d while gig enabled.\n", page);
11507 }
11508
11509 if (!page_set) {
11510 /* Enable access to PHY wakeup registers */
11511 rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
11512 if (rv != 0) {
11513 device_printf(dev,
11514 "%s: Could not enable PHY wakeup reg access\n",
11515 __func__);
11516 return rv;
11517 }
11518 }
11519 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
11520 device_xname(sc->sc_dev), __func__, page, regnum));
11521
11522 /*
11523 * 2) Access PHY wakeup register.
11524 * See wm_access_phy_wakeup_reg_bm.
11525 */
11526
11527 /* Write the Wakeup register page offset value using opcode 0x11 */
11528 rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
11529 if (rv != 0)
11530 return rv;
11531
11532 if (rd) {
11533 /* Read the Wakeup register page value using opcode 0x12 */
11534 rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
11535 } else {
11536 /* Write the Wakeup register page value using opcode 0x12 */
11537 rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
11538 }
11539 if (rv != 0)
11540 return rv;
11541
11542 if (!page_set)
11543 rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
11544
11545 return rv;
11546 }
11547
11548 /*
11549 * wm_gmii_hv_readreg: [mii interface function]
11550 *
11551 * Read a PHY register on the kumeran
11552 * This could be handled by the PHY layer if we didn't have to lock the
11553 * resource ...
11554 */
11555 static int
11556 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
11557 {
11558 struct wm_softc *sc = device_private(dev);
11559 int rv;
11560
11561 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11562 device_xname(dev), __func__));
11563 if (sc->phy.acquire(sc)) {
11564 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11565 return -1;
11566 }
11567
11568 rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
11569 sc->phy.release(sc);
11570 return rv;
11571 }
11572
11573 static int
11574 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
11575 {
11576 uint16_t page = BM_PHY_REG_PAGE(reg);
11577 uint16_t regnum = BM_PHY_REG_NUM(reg);
11578 int rv;
11579
11580 phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
11581
11582 /* Page 800 works differently than the rest so it has its own func */
11583 if (page == BM_WUC_PAGE)
11584 return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
11585
11586 /*
11587 * Lower than page 768 works differently than the rest so it has its
11588 * own func
11589 */
11590 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
11591 device_printf(dev, "gmii_hv_readreg!!!\n");
11592 return -1;
11593 }
11594
11595 /*
11596 * XXX I21[789] documents say that the SMBus Address register is at
11597 * PHY address 01, Page 0 (not 768), Register 26.
11598 */
11599 if (page == HV_INTC_FC_PAGE_START)
11600 page = 0;
11601
11602 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
11603 rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
11604 page << BME1000_PAGE_SHIFT);
11605 if (rv != 0)
11606 return rv;
11607 }
11608
11609 return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
11610 }
11611
11612 /*
11613 * wm_gmii_hv_writereg: [mii interface function]
11614 *
11615 * Write a PHY register on the kumeran.
11616 * This could be handled by the PHY layer if we didn't have to lock the
11617 * resource ...
11618 */
11619 static int
11620 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
11621 {
11622 struct wm_softc *sc = device_private(dev);
11623 int rv;
11624
11625 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11626 device_xname(dev), __func__));
11627
11628 if (sc->phy.acquire(sc)) {
11629 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11630 return -1;
11631 }
11632
11633 rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
11634 sc->phy.release(sc);
11635
11636 return rv;
11637 }
11638
11639 static int
11640 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
11641 {
11642 struct wm_softc *sc = device_private(dev);
11643 uint16_t page = BM_PHY_REG_PAGE(reg);
11644 uint16_t regnum = BM_PHY_REG_NUM(reg);
11645 int rv;
11646
11647 phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
11648
11649 /* Page 800 works differently than the rest so it has its own func */
11650 if (page == BM_WUC_PAGE)
11651 return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
11652 false);
11653
11654 /*
11655 * Lower than page 768 works differently than the rest so it has its
11656 * own func
11657 */
11658 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
11659 device_printf(dev, "gmii_hv_writereg!!!\n");
11660 return -1;
11661 }
11662
11663 {
11664 /*
11665 * XXX I21[789] documents say that the SMBus Address register
11666 * is at PHY address 01, Page 0 (not 768), Register 26.
11667 */
11668 if (page == HV_INTC_FC_PAGE_START)
11669 page = 0;
11670
11671 /*
11672 * XXX Workaround MDIO accesses being disabled after entering
11673 * IEEE Power Down (whenever bit 11 of the PHY control
11674 * register is set)
11675 */
11676 if (sc->sc_phytype == WMPHY_82578) {
11677 struct mii_softc *child;
11678
11679 child = LIST_FIRST(&sc->sc_mii.mii_phys);
11680 if ((child != NULL) && (child->mii_mpd_rev >= 1)
11681 && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
11682 && ((val & (1 << 11)) != 0)) {
11683 device_printf(dev, "XXX need workaround\n");
11684 }
11685 }
11686
11687 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
11688 rv = wm_gmii_mdic_writereg(dev, 1,
11689 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11690 if (rv != 0)
11691 return rv;
11692 }
11693 }
11694
11695 return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
11696 }
11697
11698 /*
11699 * wm_gmii_82580_readreg: [mii interface function]
11700 *
11701 * Read a PHY register on the 82580 and I350.
11702 * This could be handled by the PHY layer if we didn't have to lock the
11703 * resource ...
11704 */
11705 static int
11706 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
11707 {
11708 struct wm_softc *sc = device_private(dev);
11709 int rv;
11710
11711 if (sc->phy.acquire(sc) != 0) {
11712 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11713 return -1;
11714 }
11715
11716 #ifdef DIAGNOSTIC
11717 if (reg > MII_ADDRMASK) {
11718 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11719 __func__, sc->sc_phytype, reg);
11720 reg &= MII_ADDRMASK;
11721 }
11722 #endif
11723 rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
11724
11725 sc->phy.release(sc);
11726 return rv;
11727 }
11728
11729 /*
11730 * wm_gmii_82580_writereg: [mii interface function]
11731 *
11732 * Write a PHY register on the 82580 and I350.
11733 * This could be handled by the PHY layer if we didn't have to lock the
11734 * resource ...
11735 */
11736 static int
11737 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
11738 {
11739 struct wm_softc *sc = device_private(dev);
11740 int rv;
11741
11742 if (sc->phy.acquire(sc) != 0) {
11743 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11744 return -1;
11745 }
11746
11747 #ifdef DIAGNOSTIC
11748 if (reg > MII_ADDRMASK) {
11749 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11750 __func__, sc->sc_phytype, reg);
11751 reg &= MII_ADDRMASK;
11752 }
11753 #endif
11754 rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
11755
11756 sc->phy.release(sc);
11757 return rv;
11758 }
11759
11760 /*
11761 * wm_gmii_gs40g_readreg: [mii interface function]
11762 *
11763 * Read a PHY register on the I2100 and I211.
11764 * This could be handled by the PHY layer if we didn't have to lock the
11765 * resource ...
11766 */
11767 static int
11768 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
11769 {
11770 struct wm_softc *sc = device_private(dev);
11771 int page, offset;
11772 int rv;
11773
11774 /* Acquire semaphore */
11775 if (sc->phy.acquire(sc)) {
11776 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11777 return -1;
11778 }
11779
11780 /* Page select */
11781 page = reg >> GS40G_PAGE_SHIFT;
11782 rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
11783 if (rv != 0)
11784 goto release;
11785
11786 /* Read reg */
11787 offset = reg & GS40G_OFFSET_MASK;
11788 rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
11789
11790 release:
11791 sc->phy.release(sc);
11792 return rv;
11793 }
11794
11795 /*
11796 * wm_gmii_gs40g_writereg: [mii interface function]
11797 *
11798 * Write a PHY register on the I210 and I211.
11799 * This could be handled by the PHY layer if we didn't have to lock the
11800 * resource ...
11801 */
11802 static int
11803 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
11804 {
11805 struct wm_softc *sc = device_private(dev);
11806 uint16_t page;
11807 int offset, rv;
11808
11809 /* Acquire semaphore */
11810 if (sc->phy.acquire(sc)) {
11811 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11812 return -1;
11813 }
11814
11815 /* Page select */
11816 page = reg >> GS40G_PAGE_SHIFT;
11817 rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
11818 if (rv != 0)
11819 goto release;
11820
11821 /* Write reg */
11822 offset = reg & GS40G_OFFSET_MASK;
11823 rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
11824
11825 release:
11826 /* Release semaphore */
11827 sc->phy.release(sc);
11828 return rv;
11829 }
11830
11831 /*
11832 * wm_gmii_statchg: [mii interface function]
11833 *
11834 * Callback from MII layer when media changes.
11835 */
11836 static void
11837 wm_gmii_statchg(struct ifnet *ifp)
11838 {
11839 struct wm_softc *sc = ifp->if_softc;
11840 struct mii_data *mii = &sc->sc_mii;
11841
11842 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
11843 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
11844 sc->sc_fcrtl &= ~FCRTL_XONE;
11845
11846 /* Get flow control negotiation result. */
11847 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
11848 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
11849 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
11850 mii->mii_media_active &= ~IFM_ETH_FMASK;
11851 }
11852
11853 if (sc->sc_flowflags & IFM_FLOW) {
11854 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
11855 sc->sc_ctrl |= CTRL_TFCE;
11856 sc->sc_fcrtl |= FCRTL_XONE;
11857 }
11858 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
11859 sc->sc_ctrl |= CTRL_RFCE;
11860 }
11861
11862 if (mii->mii_media_active & IFM_FDX) {
11863 DPRINTF(sc, WM_DEBUG_LINK,
11864 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
11865 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
11866 } else {
11867 DPRINTF(sc, WM_DEBUG_LINK,
11868 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
11869 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
11870 }
11871
11872 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11873 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
11874 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
11875 : WMREG_FCRTL, sc->sc_fcrtl);
11876 if (sc->sc_type == WM_T_80003) {
11877 switch (IFM_SUBTYPE(mii->mii_media_active)) {
11878 case IFM_1000_T:
11879 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
11880 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
11881 sc->sc_tipg = TIPG_1000T_80003_DFLT;
11882 break;
11883 default:
11884 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
11885 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
11886 sc->sc_tipg = TIPG_10_100_80003_DFLT;
11887 break;
11888 }
11889 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
11890 }
11891 }
11892
11893 /* kumeran related (80003, ICH* and PCH*) */
11894
11895 /*
11896 * wm_kmrn_readreg:
11897 *
11898 * Read a kumeran register
11899 */
11900 static int
11901 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
11902 {
11903 int rv;
11904
11905 if (sc->sc_type == WM_T_80003)
11906 rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
11907 else
11908 rv = sc->phy.acquire(sc);
11909 if (rv != 0) {
11910 device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
11911 __func__);
11912 return rv;
11913 }
11914
11915 rv = wm_kmrn_readreg_locked(sc, reg, val);
11916
11917 if (sc->sc_type == WM_T_80003)
11918 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
11919 else
11920 sc->phy.release(sc);
11921
11922 return rv;
11923 }
11924
11925 static int
11926 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
11927 {
11928
11929 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
11930 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
11931 KUMCTRLSTA_REN);
11932 CSR_WRITE_FLUSH(sc);
11933 delay(2);
11934
11935 *val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
11936
11937 return 0;
11938 }
11939
11940 /*
11941 * wm_kmrn_writereg:
11942 *
11943 * Write a kumeran register
11944 */
11945 static int
11946 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
11947 {
11948 int rv;
11949
11950 if (sc->sc_type == WM_T_80003)
11951 rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
11952 else
11953 rv = sc->phy.acquire(sc);
11954 if (rv != 0) {
11955 device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
11956 __func__);
11957 return rv;
11958 }
11959
11960 rv = wm_kmrn_writereg_locked(sc, reg, val);
11961
11962 if (sc->sc_type == WM_T_80003)
11963 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
11964 else
11965 sc->phy.release(sc);
11966
11967 return rv;
11968 }
11969
11970 static int
11971 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
11972 {
11973
11974 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
11975 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
11976
11977 return 0;
11978 }
11979
11980 /*
11981 * EMI register related (82579, WMPHY_I217(PCH2 and newer))
11982 * This access method is different from IEEE MMD.
11983 */
11984 static int
11985 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
11986 {
11987 struct wm_softc *sc = device_private(dev);
11988 int rv;
11989
11990 rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
11991 if (rv != 0)
11992 return rv;
11993
11994 if (rd)
11995 rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
11996 else
11997 rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
11998 return rv;
11999 }
12000
12001 static int
12002 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
12003 {
12004
12005 return wm_access_emi_reg_locked(dev, reg, val, true);
12006 }
12007
12008 static int
12009 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
12010 {
12011
12012 return wm_access_emi_reg_locked(dev, reg, &val, false);
12013 }
12014
12015 /* SGMII related */
12016
12017 /*
12018 * wm_sgmii_uses_mdio
12019 *
12020 * Check whether the transaction is to the internal PHY or the external
12021 * MDIO interface. Return true if it's MDIO.
12022 */
12023 static bool
12024 wm_sgmii_uses_mdio(struct wm_softc *sc)
12025 {
12026 uint32_t reg;
12027 bool ismdio = false;
12028
12029 switch (sc->sc_type) {
12030 case WM_T_82575:
12031 case WM_T_82576:
12032 reg = CSR_READ(sc, WMREG_MDIC);
12033 ismdio = ((reg & MDIC_DEST) != 0);
12034 break;
12035 case WM_T_82580:
12036 case WM_T_I350:
12037 case WM_T_I354:
12038 case WM_T_I210:
12039 case WM_T_I211:
12040 reg = CSR_READ(sc, WMREG_MDICNFG);
12041 ismdio = ((reg & MDICNFG_DEST) != 0);
12042 break;
12043 default:
12044 break;
12045 }
12046
12047 return ismdio;
12048 }
12049
12050 /* Setup internal SGMII PHY for SFP */
12051 static void
12052 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
12053 {
12054 uint16_t id1, id2, phyreg;
12055 int i, rv;
12056
12057 if (((sc->sc_flags & WM_F_SGMII) == 0)
12058 || ((sc->sc_flags & WM_F_SFP) == 0))
12059 return;
12060
12061 for (i = 0; i < MII_NPHY; i++) {
12062 sc->phy.no_errprint = true;
12063 rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
12064 if (rv != 0)
12065 continue;
12066 rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
12067 if (rv != 0)
12068 continue;
12069 if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
12070 continue;
12071 sc->phy.no_errprint = false;
12072
12073 sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
12074 phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
12075 phyreg |= ESSR_SGMII_WOC_COPPER;
12076 sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
12077 break;
12078 }
12079
12080 }
12081
12082 /*
12083 * wm_sgmii_readreg: [mii interface function]
12084 *
12085 * Read a PHY register on the SGMII
12086 * This could be handled by the PHY layer if we didn't have to lock the
12087 * resource ...
12088 */
12089 static int
12090 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
12091 {
12092 struct wm_softc *sc = device_private(dev);
12093 int rv;
12094
12095 if (sc->phy.acquire(sc)) {
12096 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12097 return -1;
12098 }
12099
12100 rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
12101
12102 sc->phy.release(sc);
12103 return rv;
12104 }
12105
12106 static int
12107 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
12108 {
12109 struct wm_softc *sc = device_private(dev);
12110 uint32_t i2ccmd;
12111 int i, rv = 0;
12112
12113 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
12114 | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
12115 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
12116
12117 /* Poll the ready bit */
12118 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
12119 delay(50);
12120 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
12121 if (i2ccmd & I2CCMD_READY)
12122 break;
12123 }
12124 if ((i2ccmd & I2CCMD_READY) == 0) {
12125 device_printf(dev, "I2CCMD Read did not complete\n");
12126 rv = ETIMEDOUT;
12127 }
12128 if ((i2ccmd & I2CCMD_ERROR) != 0) {
12129 if (!sc->phy.no_errprint)
12130 device_printf(dev, "I2CCMD Error bit set\n");
12131 rv = EIO;
12132 }
12133
12134 *val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
12135
12136 return rv;
12137 }
12138
12139 /*
12140 * wm_sgmii_writereg: [mii interface function]
12141 *
12142 * Write a PHY register on the SGMII.
12143 * This could be handled by the PHY layer if we didn't have to lock the
12144 * resource ...
12145 */
12146 static int
12147 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
12148 {
12149 struct wm_softc *sc = device_private(dev);
12150 int rv;
12151
12152 if (sc->phy.acquire(sc) != 0) {
12153 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12154 return -1;
12155 }
12156
12157 rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
12158
12159 sc->phy.release(sc);
12160
12161 return rv;
12162 }
12163
12164 static int
12165 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
12166 {
12167 struct wm_softc *sc = device_private(dev);
12168 uint32_t i2ccmd;
12169 uint16_t swapdata;
12170 int rv = 0;
12171 int i;
12172
12173 /* Swap the data bytes for the I2C interface */
12174 swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
12175 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
12176 | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
12177 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
12178
12179 /* Poll the ready bit */
12180 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
12181 delay(50);
12182 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
12183 if (i2ccmd & I2CCMD_READY)
12184 break;
12185 }
12186 if ((i2ccmd & I2CCMD_READY) == 0) {
12187 device_printf(dev, "I2CCMD Write did not complete\n");
12188 rv = ETIMEDOUT;
12189 }
12190 if ((i2ccmd & I2CCMD_ERROR) != 0) {
12191 device_printf(dev, "I2CCMD Error bit set\n");
12192 rv = EIO;
12193 }
12194
12195 return rv;
12196 }
12197
12198 /* TBI related */
12199
12200 static bool
12201 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
12202 {
12203 bool sig;
12204
12205 sig = ctrl & CTRL_SWDPIN(1);
12206
12207 /*
12208 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
12209 * detect a signal, 1 if they don't.
12210 */
12211 if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
12212 sig = !sig;
12213
12214 return sig;
12215 }
12216
12217 /*
12218 * wm_tbi_mediainit:
12219 *
12220 * Initialize media for use on 1000BASE-X devices.
12221 */
12222 static void
12223 wm_tbi_mediainit(struct wm_softc *sc)
12224 {
12225 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
12226 const char *sep = "";
12227
12228 if (sc->sc_type < WM_T_82543)
12229 sc->sc_tipg = TIPG_WM_DFLT;
12230 else
12231 sc->sc_tipg = TIPG_LG_DFLT;
12232
12233 sc->sc_tbi_serdes_anegticks = 5;
12234
12235 /* Initialize our media structures */
12236 sc->sc_mii.mii_ifp = ifp;
12237 sc->sc_ethercom.ec_mii = &sc->sc_mii;
12238
12239 ifp->if_baudrate = IF_Gbps(1);
12240 if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
12241 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
12242 ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
12243 wm_serdes_mediachange, wm_serdes_mediastatus,
12244 sc->sc_core_lock);
12245 } else {
12246 ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
12247 wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
12248 }
12249
12250 /*
12251 * SWD Pins:
12252 *
12253 * 0 = Link LED (output)
12254 * 1 = Loss Of Signal (input)
12255 */
12256 sc->sc_ctrl |= CTRL_SWDPIO(0);
12257
12258 /* XXX Perhaps this is only for TBI */
12259 if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
12260 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
12261
12262 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
12263 sc->sc_ctrl &= ~CTRL_LRST;
12264
12265 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12266
12267 #define ADD(ss, mm, dd) \
12268 do { \
12269 aprint_normal("%s%s", sep, ss); \
12270 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
12271 sep = ", "; \
12272 } while (/*CONSTCOND*/0)
12273
12274 aprint_normal_dev(sc->sc_dev, "");
12275
12276 if (sc->sc_type == WM_T_I354) {
12277 uint32_t status;
12278
12279 status = CSR_READ(sc, WMREG_STATUS);
12280 if (((status & STATUS_2P5_SKU) != 0)
12281 && ((status & STATUS_2P5_SKU_OVER) == 0)) {
12282 ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
12283 } else
12284 ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
12285 } else if (sc->sc_type == WM_T_82545) {
12286 /* Only 82545 is LX (XXX except SFP) */
12287 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
12288 ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
12289 } else if (sc->sc_sfptype != 0) {
12290 /* XXX wm(4) fiber/serdes don't use ifm_data */
12291 switch (sc->sc_sfptype) {
12292 default:
12293 case SFF_SFP_ETH_FLAGS_1000SX:
12294 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
12295 ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
12296 break;
12297 case SFF_SFP_ETH_FLAGS_1000LX:
12298 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
12299 ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
12300 break;
12301 case SFF_SFP_ETH_FLAGS_1000CX:
12302 ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
12303 ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
12304 break;
12305 case SFF_SFP_ETH_FLAGS_1000T:
12306 ADD("1000baseT", IFM_1000_T, 0);
12307 ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
12308 break;
12309 case SFF_SFP_ETH_FLAGS_100FX:
12310 ADD("100baseFX", IFM_100_FX, ANAR_TX);
12311 ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
12312 break;
12313 }
12314 } else {
12315 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
12316 ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
12317 }
12318 ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
12319 aprint_normal("\n");
12320
12321 #undef ADD
12322
12323 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
12324 }
12325
12326 /*
12327 * wm_tbi_mediachange: [ifmedia interface function]
12328 *
12329 * Set hardware to newly-selected media on a 1000BASE-X device.
12330 */
12331 static int
12332 wm_tbi_mediachange(struct ifnet *ifp)
12333 {
12334 struct wm_softc *sc = ifp->if_softc;
12335 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
12336 uint32_t status, ctrl;
12337 bool signal;
12338 int i;
12339
12340 KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
12341 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
12342 /* XXX need some work for >= 82571 and < 82575 */
12343 if (sc->sc_type < WM_T_82575)
12344 return 0;
12345 }
12346
12347 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
12348 || (sc->sc_type >= WM_T_82575))
12349 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
12350
12351 sc->sc_ctrl &= ~CTRL_LRST;
12352 sc->sc_txcw = TXCW_ANE;
12353 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
12354 sc->sc_txcw |= TXCW_FD | TXCW_HD;
12355 else if (ife->ifm_media & IFM_FDX)
12356 sc->sc_txcw |= TXCW_FD;
12357 else
12358 sc->sc_txcw |= TXCW_HD;
12359
12360 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
12361 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
12362
12363 DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
12364 device_xname(sc->sc_dev), sc->sc_txcw));
12365 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
12366 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12367 CSR_WRITE_FLUSH(sc);
12368 delay(1000);
12369
12370 ctrl = CSR_READ(sc, WMREG_CTRL);
12371 signal = wm_tbi_havesignal(sc, ctrl);
12372
12373 DPRINTF(sc, WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
12374 signal));
12375
12376 if (signal) {
12377 /* Have signal; wait for the link to come up. */
12378 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
12379 delay(10000);
12380 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
12381 break;
12382 }
12383
12384 DPRINTF(sc, WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
12385 device_xname(sc->sc_dev), i));
12386
12387 status = CSR_READ(sc, WMREG_STATUS);
12388 DPRINTF(sc, WM_DEBUG_LINK,
12389 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
12390 device_xname(sc->sc_dev), status, STATUS_LU));
12391 if (status & STATUS_LU) {
12392 /* Link is up. */
12393 DPRINTF(sc, WM_DEBUG_LINK,
12394 ("%s: LINK: set media -> link up %s\n",
12395 device_xname(sc->sc_dev),
12396 (status & STATUS_FD) ? "FDX" : "HDX"));
12397
12398 /*
12399 * NOTE: CTRL will update TFCE and RFCE automatically,
12400 * so we should update sc->sc_ctrl
12401 */
12402 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
12403 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
12404 sc->sc_fcrtl &= ~FCRTL_XONE;
12405 if (status & STATUS_FD)
12406 sc->sc_tctl |=
12407 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
12408 else
12409 sc->sc_tctl |=
12410 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
12411 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
12412 sc->sc_fcrtl |= FCRTL_XONE;
12413 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
12414 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
12415 WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
12416 sc->sc_tbi_linkup = 1;
12417 } else {
12418 if (i == WM_LINKUP_TIMEOUT)
12419 wm_check_for_link(sc);
12420 /* Link is down. */
12421 DPRINTF(sc, WM_DEBUG_LINK,
12422 ("%s: LINK: set media -> link down\n",
12423 device_xname(sc->sc_dev)));
12424 sc->sc_tbi_linkup = 0;
12425 }
12426 } else {
12427 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
12428 device_xname(sc->sc_dev)));
12429 sc->sc_tbi_linkup = 0;
12430 }
12431
12432 wm_tbi_serdes_set_linkled(sc);
12433
12434 return 0;
12435 }
12436
12437 /*
12438 * wm_tbi_mediastatus: [ifmedia interface function]
12439 *
12440 * Get the current interface media status on a 1000BASE-X device.
12441 */
12442 static void
12443 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
12444 {
12445 struct wm_softc *sc = ifp->if_softc;
12446 uint32_t ctrl, status;
12447
12448 ifmr->ifm_status = IFM_AVALID;
12449 ifmr->ifm_active = IFM_ETHER;
12450
12451 status = CSR_READ(sc, WMREG_STATUS);
12452 if ((status & STATUS_LU) == 0) {
12453 ifmr->ifm_active |= IFM_NONE;
12454 return;
12455 }
12456
12457 ifmr->ifm_status |= IFM_ACTIVE;
12458 /* Only 82545 is LX */
12459 if (sc->sc_type == WM_T_82545)
12460 ifmr->ifm_active |= IFM_1000_LX;
12461 else
12462 ifmr->ifm_active |= IFM_1000_SX;
12463 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
12464 ifmr->ifm_active |= IFM_FDX;
12465 else
12466 ifmr->ifm_active |= IFM_HDX;
12467 ctrl = CSR_READ(sc, WMREG_CTRL);
12468 if (ctrl & CTRL_RFCE)
12469 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
12470 if (ctrl & CTRL_TFCE)
12471 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
12472 }
12473
12474 /* XXX TBI only */
12475 static int
12476 wm_check_for_link(struct wm_softc *sc)
12477 {
12478 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
12479 uint32_t rxcw;
12480 uint32_t ctrl;
12481 uint32_t status;
12482 bool signal;
12483
12484 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
12485 device_xname(sc->sc_dev), __func__));
12486
12487 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
12488 /* XXX need some work for >= 82571 */
12489 if (sc->sc_type >= WM_T_82571) {
12490 sc->sc_tbi_linkup = 1;
12491 return 0;
12492 }
12493 }
12494
12495 rxcw = CSR_READ(sc, WMREG_RXCW);
12496 ctrl = CSR_READ(sc, WMREG_CTRL);
12497 status = CSR_READ(sc, WMREG_STATUS);
12498 signal = wm_tbi_havesignal(sc, ctrl);
12499
12500 DPRINTF(sc, WM_DEBUG_LINK,
12501 ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
12502 device_xname(sc->sc_dev), __func__, signal,
12503 ((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
12504
12505 /*
12506 * SWDPIN LU RXCW
12507 * 0 0 0
12508 * 0 0 1 (should not happen)
12509 * 0 1 0 (should not happen)
12510 * 0 1 1 (should not happen)
12511 * 1 0 0 Disable autonego and force linkup
12512 * 1 0 1 got /C/ but not linkup yet
12513 * 1 1 0 (linkup)
12514 * 1 1 1 If IFM_AUTO, back to autonego
12515 *
12516 */
12517 if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
12518 DPRINTF(sc, WM_DEBUG_LINK,
12519 ("%s: %s: force linkup and fullduplex\n",
12520 device_xname(sc->sc_dev), __func__));
12521 sc->sc_tbi_linkup = 0;
12522 /* Disable auto-negotiation in the TXCW register */
12523 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
12524
12525 /*
12526 * Force link-up and also force full-duplex.
12527 *
12528 * NOTE: CTRL was updated TFCE and RFCE automatically,
12529 * so we should update sc->sc_ctrl
12530 */
12531 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
12532 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12533 } else if (((status & STATUS_LU) != 0)
12534 && ((rxcw & RXCW_C) != 0)
12535 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
12536 sc->sc_tbi_linkup = 1;
12537 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
12538 device_xname(sc->sc_dev),
12539 __func__));
12540 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
12541 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
12542 } else if (signal && ((rxcw & RXCW_C) != 0)) {
12543 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
12544 device_xname(sc->sc_dev), __func__));
12545 } else {
12546 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
12547 device_xname(sc->sc_dev), __func__, rxcw, ctrl,
12548 status));
12549 }
12550
12551 return 0;
12552 }
12553
12554 /*
12555 * wm_tbi_tick:
12556 *
12557 * Check the link on TBI devices.
12558 * This function acts as mii_tick().
12559 */
12560 static void
12561 wm_tbi_tick(struct wm_softc *sc)
12562 {
12563 struct mii_data *mii = &sc->sc_mii;
12564 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
12565 uint32_t status;
12566
12567 KASSERT(WM_CORE_LOCKED(sc));
12568
12569 status = CSR_READ(sc, WMREG_STATUS);
12570
12571 /* XXX is this needed? */
12572 (void)CSR_READ(sc, WMREG_RXCW);
12573 (void)CSR_READ(sc, WMREG_CTRL);
12574
12575 /* set link status */
12576 if ((status & STATUS_LU) == 0) {
12577 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
12578 device_xname(sc->sc_dev)));
12579 sc->sc_tbi_linkup = 0;
12580 } else if (sc->sc_tbi_linkup == 0) {
12581 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
12582 device_xname(sc->sc_dev),
12583 (status & STATUS_FD) ? "FDX" : "HDX"));
12584 sc->sc_tbi_linkup = 1;
12585 sc->sc_tbi_serdes_ticks = 0;
12586 }
12587
12588 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
12589 goto setled;
12590
12591 if ((status & STATUS_LU) == 0) {
12592 sc->sc_tbi_linkup = 0;
12593 /* If the timer expired, retry autonegotiation */
12594 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
12595 && (++sc->sc_tbi_serdes_ticks
12596 >= sc->sc_tbi_serdes_anegticks)) {
12597 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
12598 device_xname(sc->sc_dev), __func__));
12599 sc->sc_tbi_serdes_ticks = 0;
12600 /*
12601 * Reset the link, and let autonegotiation do
12602 * its thing
12603 */
12604 sc->sc_ctrl |= CTRL_LRST;
12605 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12606 CSR_WRITE_FLUSH(sc);
12607 delay(1000);
12608 sc->sc_ctrl &= ~CTRL_LRST;
12609 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12610 CSR_WRITE_FLUSH(sc);
12611 delay(1000);
12612 CSR_WRITE(sc, WMREG_TXCW,
12613 sc->sc_txcw & ~TXCW_ANE);
12614 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
12615 }
12616 }
12617
12618 setled:
12619 wm_tbi_serdes_set_linkled(sc);
12620 }
12621
12622 /* SERDES related */
12623 static void
12624 wm_serdes_power_up_link_82575(struct wm_softc *sc)
12625 {
12626 uint32_t reg;
12627
12628 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
12629 && ((sc->sc_flags & WM_F_SGMII) == 0))
12630 return;
12631
12632 /* Enable PCS to turn on link */
12633 reg = CSR_READ(sc, WMREG_PCS_CFG);
12634 reg |= PCS_CFG_PCS_EN;
12635 CSR_WRITE(sc, WMREG_PCS_CFG, reg);
12636
12637 /* Power up the laser */
12638 reg = CSR_READ(sc, WMREG_CTRL_EXT);
12639 reg &= ~CTRL_EXT_SWDPIN(3);
12640 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12641
12642 /* Flush the write to verify completion */
12643 CSR_WRITE_FLUSH(sc);
12644 delay(1000);
12645 }
12646
12647 static int
12648 wm_serdes_mediachange(struct ifnet *ifp)
12649 {
12650 struct wm_softc *sc = ifp->if_softc;
12651 bool pcs_autoneg = true; /* XXX */
12652 uint32_t ctrl_ext, pcs_lctl, reg;
12653
12654 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
12655 && ((sc->sc_flags & WM_F_SGMII) == 0))
12656 return 0;
12657
12658 /* XXX Currently, this function is not called on 8257[12] */
12659 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
12660 || (sc->sc_type >= WM_T_82575))
12661 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
12662
12663 /* Power on the sfp cage if present */
12664 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
12665 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
12666 ctrl_ext |= CTRL_EXT_I2C_ENA;
12667 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
12668
12669 sc->sc_ctrl |= CTRL_SLU;
12670
12671 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
12672 sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
12673
12674 reg = CSR_READ(sc, WMREG_CONNSW);
12675 reg |= CONNSW_ENRGSRC;
12676 CSR_WRITE(sc, WMREG_CONNSW, reg);
12677 }
12678
12679 pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
12680 switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
12681 case CTRL_EXT_LINK_MODE_SGMII:
12682 /* SGMII mode lets the phy handle forcing speed/duplex */
12683 pcs_autoneg = true;
12684 /* Autoneg time out should be disabled for SGMII mode */
12685 pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
12686 break;
12687 case CTRL_EXT_LINK_MODE_1000KX:
12688 pcs_autoneg = false;
12689 /* FALLTHROUGH */
12690 default:
12691 if ((sc->sc_type == WM_T_82575)
12692 || (sc->sc_type == WM_T_82576)) {
12693 if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
12694 pcs_autoneg = false;
12695 }
12696 sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
12697 | CTRL_FRCFDX;
12698
12699 /* Set speed of 1000/Full if speed/duplex is forced */
12700 pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
12701 }
12702 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12703
12704 pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
12705 PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
12706
12707 if (pcs_autoneg) {
12708 /* Set PCS register for autoneg */
12709 pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
12710
12711 /* Disable force flow control for autoneg */
12712 pcs_lctl &= ~PCS_LCTL_FORCE_FC;
12713
12714 /* Configure flow control advertisement for autoneg */
12715 reg = CSR_READ(sc, WMREG_PCS_ANADV);
12716 reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
12717 reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
12718 CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
12719 } else
12720 pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
12721
12722 CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
12723
12724 return 0;
12725 }
12726
12727 static void
12728 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
12729 {
12730 struct wm_softc *sc = ifp->if_softc;
12731 struct mii_data *mii = &sc->sc_mii;
12732 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
12733 uint32_t pcs_adv, pcs_lpab, reg;
12734
12735 ifmr->ifm_status = IFM_AVALID;
12736 ifmr->ifm_active = IFM_ETHER;
12737
12738 /* Check PCS */
12739 reg = CSR_READ(sc, WMREG_PCS_LSTS);
12740 if ((reg & PCS_LSTS_LINKOK) == 0) {
12741 ifmr->ifm_active |= IFM_NONE;
12742 sc->sc_tbi_linkup = 0;
12743 goto setled;
12744 }
12745
12746 sc->sc_tbi_linkup = 1;
12747 ifmr->ifm_status |= IFM_ACTIVE;
12748 if (sc->sc_type == WM_T_I354) {
12749 uint32_t status;
12750
12751 status = CSR_READ(sc, WMREG_STATUS);
12752 if (((status & STATUS_2P5_SKU) != 0)
12753 && ((status & STATUS_2P5_SKU_OVER) == 0)) {
12754 ifmr->ifm_active |= IFM_2500_KX;
12755 } else
12756 ifmr->ifm_active |= IFM_1000_KX;
12757 } else {
12758 switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
12759 case PCS_LSTS_SPEED_10:
12760 ifmr->ifm_active |= IFM_10_T; /* XXX */
12761 break;
12762 case PCS_LSTS_SPEED_100:
12763 ifmr->ifm_active |= IFM_100_FX; /* XXX */
12764 break;
12765 case PCS_LSTS_SPEED_1000:
12766 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
12767 break;
12768 default:
12769 device_printf(sc->sc_dev, "Unknown speed\n");
12770 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
12771 break;
12772 }
12773 }
12774 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
12775 if ((reg & PCS_LSTS_FDX) != 0)
12776 ifmr->ifm_active |= IFM_FDX;
12777 else
12778 ifmr->ifm_active |= IFM_HDX;
12779 mii->mii_media_active &= ~IFM_ETH_FMASK;
12780 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
12781 /* Check flow */
12782 reg = CSR_READ(sc, WMREG_PCS_LSTS);
12783 if ((reg & PCS_LSTS_AN_COMP) == 0) {
12784 DPRINTF(sc, WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
12785 goto setled;
12786 }
12787 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
12788 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
12789 DPRINTF(sc, WM_DEBUG_LINK,
12790 ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
12791 if ((pcs_adv & TXCW_SYM_PAUSE)
12792 && (pcs_lpab & TXCW_SYM_PAUSE)) {
12793 mii->mii_media_active |= IFM_FLOW
12794 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
12795 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
12796 && (pcs_adv & TXCW_ASYM_PAUSE)
12797 && (pcs_lpab & TXCW_SYM_PAUSE)
12798 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
12799 mii->mii_media_active |= IFM_FLOW
12800 | IFM_ETH_TXPAUSE;
12801 } else if ((pcs_adv & TXCW_SYM_PAUSE)
12802 && (pcs_adv & TXCW_ASYM_PAUSE)
12803 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
12804 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
12805 mii->mii_media_active |= IFM_FLOW
12806 | IFM_ETH_RXPAUSE;
12807 }
12808 }
12809 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
12810 | (mii->mii_media_active & IFM_ETH_FMASK);
12811 setled:
12812 wm_tbi_serdes_set_linkled(sc);
12813 }
12814
12815 /*
12816 * wm_serdes_tick:
12817 *
12818 * Check the link on serdes devices.
12819 */
12820 static void
12821 wm_serdes_tick(struct wm_softc *sc)
12822 {
12823 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
12824 struct mii_data *mii = &sc->sc_mii;
12825 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
12826 uint32_t reg;
12827
12828 KASSERT(WM_CORE_LOCKED(sc));
12829
12830 mii->mii_media_status = IFM_AVALID;
12831 mii->mii_media_active = IFM_ETHER;
12832
12833 /* Check PCS */
12834 reg = CSR_READ(sc, WMREG_PCS_LSTS);
12835 if ((reg & PCS_LSTS_LINKOK) != 0) {
12836 mii->mii_media_status |= IFM_ACTIVE;
12837 sc->sc_tbi_linkup = 1;
12838 sc->sc_tbi_serdes_ticks = 0;
12839 mii->mii_media_active |= IFM_1000_SX; /* XXX */
12840 if ((reg & PCS_LSTS_FDX) != 0)
12841 mii->mii_media_active |= IFM_FDX;
12842 else
12843 mii->mii_media_active |= IFM_HDX;
12844 } else {
12845 mii->mii_media_status |= IFM_NONE;
12846 sc->sc_tbi_linkup = 0;
12847 /* If the timer expired, retry autonegotiation */
12848 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
12849 && (++sc->sc_tbi_serdes_ticks
12850 >= sc->sc_tbi_serdes_anegticks)) {
12851 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
12852 device_xname(sc->sc_dev), __func__));
12853 sc->sc_tbi_serdes_ticks = 0;
12854 /* XXX */
12855 wm_serdes_mediachange(ifp);
12856 }
12857 }
12858
12859 wm_tbi_serdes_set_linkled(sc);
12860 }
12861
12862 /* SFP related */
12863
12864 static int
12865 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
12866 {
12867 uint32_t i2ccmd;
12868 int i;
12869
12870 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
12871 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
12872
12873 /* Poll the ready bit */
12874 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
12875 delay(50);
12876 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
12877 if (i2ccmd & I2CCMD_READY)
12878 break;
12879 }
12880 if ((i2ccmd & I2CCMD_READY) == 0)
12881 return -1;
12882 if ((i2ccmd & I2CCMD_ERROR) != 0)
12883 return -1;
12884
12885 *data = i2ccmd & 0x00ff;
12886
12887 return 0;
12888 }
12889
12890 static uint32_t
12891 wm_sfp_get_media_type(struct wm_softc *sc)
12892 {
12893 uint32_t ctrl_ext;
12894 uint8_t val = 0;
12895 int timeout = 3;
12896 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
12897 int rv = -1;
12898
12899 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
12900 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
12901 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
12902 CSR_WRITE_FLUSH(sc);
12903
12904 /* Read SFP module data */
12905 while (timeout) {
12906 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
12907 if (rv == 0)
12908 break;
12909 delay(100*1000); /* XXX too big */
12910 timeout--;
12911 }
12912 if (rv != 0)
12913 goto out;
12914
12915 switch (val) {
12916 case SFF_SFP_ID_SFF:
12917 aprint_normal_dev(sc->sc_dev,
12918 "Module/Connector soldered to board\n");
12919 break;
12920 case SFF_SFP_ID_SFP:
12921 sc->sc_flags |= WM_F_SFP;
12922 break;
12923 case SFF_SFP_ID_UNKNOWN:
12924 goto out;
12925 default:
12926 break;
12927 }
12928
12929 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
12930 if (rv != 0)
12931 goto out;
12932
12933 sc->sc_sfptype = val;
12934 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
12935 mediatype = WM_MEDIATYPE_SERDES;
12936 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
12937 sc->sc_flags |= WM_F_SGMII;
12938 mediatype = WM_MEDIATYPE_COPPER;
12939 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
12940 sc->sc_flags |= WM_F_SGMII;
12941 mediatype = WM_MEDIATYPE_SERDES;
12942 } else {
12943 device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
12944 __func__, sc->sc_sfptype);
12945 sc->sc_sfptype = 0; /* XXX unknown */
12946 }
12947
12948 out:
12949 /* Restore I2C interface setting */
12950 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
12951
12952 return mediatype;
12953 }
12954
12955 /*
12956 * NVM related.
12957 * Microwire, SPI (w/wo EERD) and Flash.
12958 */
12959
12960 /* Both spi and uwire */
12961
12962 /*
12963 * wm_eeprom_sendbits:
12964 *
12965 * Send a series of bits to the EEPROM.
12966 */
12967 static void
12968 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
12969 {
12970 uint32_t reg;
12971 int x;
12972
12973 reg = CSR_READ(sc, WMREG_EECD);
12974
12975 for (x = nbits; x > 0; x--) {
12976 if (bits & (1U << (x - 1)))
12977 reg |= EECD_DI;
12978 else
12979 reg &= ~EECD_DI;
12980 CSR_WRITE(sc, WMREG_EECD, reg);
12981 CSR_WRITE_FLUSH(sc);
12982 delay(2);
12983 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
12984 CSR_WRITE_FLUSH(sc);
12985 delay(2);
12986 CSR_WRITE(sc, WMREG_EECD, reg);
12987 CSR_WRITE_FLUSH(sc);
12988 delay(2);
12989 }
12990 }
12991
12992 /*
12993 * wm_eeprom_recvbits:
12994 *
12995 * Receive a series of bits from the EEPROM.
12996 */
12997 static void
12998 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
12999 {
13000 uint32_t reg, val;
13001 int x;
13002
13003 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
13004
13005 val = 0;
13006 for (x = nbits; x > 0; x--) {
13007 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
13008 CSR_WRITE_FLUSH(sc);
13009 delay(2);
13010 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
13011 val |= (1U << (x - 1));
13012 CSR_WRITE(sc, WMREG_EECD, reg);
13013 CSR_WRITE_FLUSH(sc);
13014 delay(2);
13015 }
13016 *valp = val;
13017 }
13018
13019 /* Microwire */
13020
13021 /*
13022 * wm_nvm_read_uwire:
13023 *
13024 * Read a word from the EEPROM using the MicroWire protocol.
13025 */
13026 static int
13027 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
13028 {
13029 uint32_t reg, val;
13030 int i;
13031
13032 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13033 device_xname(sc->sc_dev), __func__));
13034
13035 if (sc->nvm.acquire(sc) != 0)
13036 return -1;
13037
13038 for (i = 0; i < wordcnt; i++) {
13039 /* Clear SK and DI. */
13040 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
13041 CSR_WRITE(sc, WMREG_EECD, reg);
13042
13043 /*
13044 * XXX: workaround for a bug in qemu-0.12.x and prior
13045 * and Xen.
13046 *
13047 * We use this workaround only for 82540 because qemu's
13048 * e1000 act as 82540.
13049 */
13050 if (sc->sc_type == WM_T_82540) {
13051 reg |= EECD_SK;
13052 CSR_WRITE(sc, WMREG_EECD, reg);
13053 reg &= ~EECD_SK;
13054 CSR_WRITE(sc, WMREG_EECD, reg);
13055 CSR_WRITE_FLUSH(sc);
13056 delay(2);
13057 }
13058 /* XXX: end of workaround */
13059
13060 /* Set CHIP SELECT. */
13061 reg |= EECD_CS;
13062 CSR_WRITE(sc, WMREG_EECD, reg);
13063 CSR_WRITE_FLUSH(sc);
13064 delay(2);
13065
13066 /* Shift in the READ command. */
13067 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
13068
13069 /* Shift in address. */
13070 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
13071
13072 /* Shift out the data. */
13073 wm_eeprom_recvbits(sc, &val, 16);
13074 data[i] = val & 0xffff;
13075
13076 /* Clear CHIP SELECT. */
13077 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
13078 CSR_WRITE(sc, WMREG_EECD, reg);
13079 CSR_WRITE_FLUSH(sc);
13080 delay(2);
13081 }
13082
13083 sc->nvm.release(sc);
13084 return 0;
13085 }
13086
13087 /* SPI */
13088
13089 /*
13090 * Set SPI and FLASH related information from the EECD register.
13091 * For 82541 and 82547, the word size is taken from EEPROM.
13092 */
13093 static int
13094 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
13095 {
13096 int size;
13097 uint32_t reg;
13098 uint16_t data;
13099
13100 reg = CSR_READ(sc, WMREG_EECD);
13101 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
13102
13103 /* Read the size of NVM from EECD by default */
13104 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
13105 switch (sc->sc_type) {
13106 case WM_T_82541:
13107 case WM_T_82541_2:
13108 case WM_T_82547:
13109 case WM_T_82547_2:
13110 /* Set dummy value to access EEPROM */
13111 sc->sc_nvm_wordsize = 64;
13112 if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
13113 aprint_error_dev(sc->sc_dev,
13114 "%s: failed to read EEPROM size\n", __func__);
13115 }
13116 reg = data;
13117 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
13118 if (size == 0)
13119 size = 6; /* 64 word size */
13120 else
13121 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
13122 break;
13123 case WM_T_80003:
13124 case WM_T_82571:
13125 case WM_T_82572:
13126 case WM_T_82573: /* SPI case */
13127 case WM_T_82574: /* SPI case */
13128 case WM_T_82583: /* SPI case */
13129 size += NVM_WORD_SIZE_BASE_SHIFT;
13130 if (size > 14)
13131 size = 14;
13132 break;
13133 case WM_T_82575:
13134 case WM_T_82576:
13135 case WM_T_82580:
13136 case WM_T_I350:
13137 case WM_T_I354:
13138 case WM_T_I210:
13139 case WM_T_I211:
13140 size += NVM_WORD_SIZE_BASE_SHIFT;
13141 if (size > 15)
13142 size = 15;
13143 break;
13144 default:
13145 aprint_error_dev(sc->sc_dev,
13146 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
13147 return -1;
13148 break;
13149 }
13150
13151 sc->sc_nvm_wordsize = 1 << size;
13152
13153 return 0;
13154 }
13155
13156 /*
13157 * wm_nvm_ready_spi:
13158 *
13159 * Wait for a SPI EEPROM to be ready for commands.
13160 */
13161 static int
13162 wm_nvm_ready_spi(struct wm_softc *sc)
13163 {
13164 uint32_t val;
13165 int usec;
13166
13167 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13168 device_xname(sc->sc_dev), __func__));
13169
13170 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
13171 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
13172 wm_eeprom_recvbits(sc, &val, 8);
13173 if ((val & SPI_SR_RDY) == 0)
13174 break;
13175 }
13176 if (usec >= SPI_MAX_RETRIES) {
13177 aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
13178 return -1;
13179 }
13180 return 0;
13181 }
13182
13183 /*
13184 * wm_nvm_read_spi:
13185 *
13186 * Read a work from the EEPROM using the SPI protocol.
13187 */
13188 static int
13189 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
13190 {
13191 uint32_t reg, val;
13192 int i;
13193 uint8_t opc;
13194 int rv = 0;
13195
13196 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13197 device_xname(sc->sc_dev), __func__));
13198
13199 if (sc->nvm.acquire(sc) != 0)
13200 return -1;
13201
13202 /* Clear SK and CS. */
13203 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
13204 CSR_WRITE(sc, WMREG_EECD, reg);
13205 CSR_WRITE_FLUSH(sc);
13206 delay(2);
13207
13208 if ((rv = wm_nvm_ready_spi(sc)) != 0)
13209 goto out;
13210
13211 /* Toggle CS to flush commands. */
13212 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
13213 CSR_WRITE_FLUSH(sc);
13214 delay(2);
13215 CSR_WRITE(sc, WMREG_EECD, reg);
13216 CSR_WRITE_FLUSH(sc);
13217 delay(2);
13218
13219 opc = SPI_OPC_READ;
13220 if (sc->sc_nvm_addrbits == 8 && word >= 128)
13221 opc |= SPI_OPC_A8;
13222
13223 wm_eeprom_sendbits(sc, opc, 8);
13224 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
13225
13226 for (i = 0; i < wordcnt; i++) {
13227 wm_eeprom_recvbits(sc, &val, 16);
13228 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
13229 }
13230
13231 /* Raise CS and clear SK. */
13232 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
13233 CSR_WRITE(sc, WMREG_EECD, reg);
13234 CSR_WRITE_FLUSH(sc);
13235 delay(2);
13236
13237 out:
13238 sc->nvm.release(sc);
13239 return rv;
13240 }
13241
13242 /* Using with EERD */
13243
13244 static int
13245 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
13246 {
13247 uint32_t attempts = 100000;
13248 uint32_t i, reg = 0;
13249 int32_t done = -1;
13250
13251 for (i = 0; i < attempts; i++) {
13252 reg = CSR_READ(sc, rw);
13253
13254 if (reg & EERD_DONE) {
13255 done = 0;
13256 break;
13257 }
13258 delay(5);
13259 }
13260
13261 return done;
13262 }
13263
13264 static int
13265 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
13266 {
13267 int i, eerd = 0;
13268 int rv = 0;
13269
13270 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13271 device_xname(sc->sc_dev), __func__));
13272
13273 if (sc->nvm.acquire(sc) != 0)
13274 return -1;
13275
13276 for (i = 0; i < wordcnt; i++) {
13277 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
13278 CSR_WRITE(sc, WMREG_EERD, eerd);
13279 rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
13280 if (rv != 0) {
13281 aprint_error_dev(sc->sc_dev, "EERD polling failed: "
13282 "offset=%d. wordcnt=%d\n", offset, wordcnt);
13283 break;
13284 }
13285 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
13286 }
13287
13288 sc->nvm.release(sc);
13289 return rv;
13290 }
13291
13292 /* Flash */
13293
13294 static int
13295 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
13296 {
13297 uint32_t eecd;
13298 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
13299 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
13300 uint32_t nvm_dword = 0;
13301 uint8_t sig_byte = 0;
13302 int rv;
13303
13304 switch (sc->sc_type) {
13305 case WM_T_PCH_SPT:
13306 case WM_T_PCH_CNP:
13307 bank1_offset = sc->sc_ich8_flash_bank_size * 2;
13308 act_offset = ICH_NVM_SIG_WORD * 2;
13309
13310 /* Set bank to 0 in case flash read fails. */
13311 *bank = 0;
13312
13313 /* Check bank 0 */
13314 rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
13315 if (rv != 0)
13316 return rv;
13317 sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
13318 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13319 *bank = 0;
13320 return 0;
13321 }
13322
13323 /* Check bank 1 */
13324 rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
13325 &nvm_dword);
13326 sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
13327 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13328 *bank = 1;
13329 return 0;
13330 }
13331 aprint_error_dev(sc->sc_dev,
13332 "%s: no valid NVM bank present (%u)\n", __func__, *bank);
13333 return -1;
13334 case WM_T_ICH8:
13335 case WM_T_ICH9:
13336 eecd = CSR_READ(sc, WMREG_EECD);
13337 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
13338 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
13339 return 0;
13340 }
13341 /* FALLTHROUGH */
13342 default:
13343 /* Default to 0 */
13344 *bank = 0;
13345
13346 /* Check bank 0 */
13347 wm_read_ich8_byte(sc, act_offset, &sig_byte);
13348 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13349 *bank = 0;
13350 return 0;
13351 }
13352
13353 /* Check bank 1 */
13354 wm_read_ich8_byte(sc, act_offset + bank1_offset,
13355 &sig_byte);
13356 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13357 *bank = 1;
13358 return 0;
13359 }
13360 }
13361
13362 DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
13363 device_xname(sc->sc_dev)));
13364 return -1;
13365 }
13366
13367 /******************************************************************************
13368 * This function does initial flash setup so that a new read/write/erase cycle
13369 * can be started.
13370 *
13371 * sc - The pointer to the hw structure
13372 ****************************************************************************/
13373 static int32_t
13374 wm_ich8_cycle_init(struct wm_softc *sc)
13375 {
13376 uint16_t hsfsts;
13377 int32_t error = 1;
13378 int32_t i = 0;
13379
13380 if (sc->sc_type >= WM_T_PCH_SPT)
13381 hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
13382 else
13383 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
13384
13385 /* May be check the Flash Des Valid bit in Hw status */
13386 if ((hsfsts & HSFSTS_FLDVAL) == 0)
13387 return error;
13388
13389 /* Clear FCERR in Hw status by writing 1 */
13390 /* Clear DAEL in Hw status by writing a 1 */
13391 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
13392
13393 if (sc->sc_type >= WM_T_PCH_SPT)
13394 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
13395 else
13396 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
13397
13398 /*
13399 * Either we should have a hardware SPI cycle in progress bit to check
13400 * against, in order to start a new cycle or FDONE bit should be
13401 * changed in the hardware so that it is 1 after hardware reset, which
13402 * can then be used as an indication whether a cycle is in progress or
13403 * has been completed .. we should also have some software semaphore
13404 * mechanism to guard FDONE or the cycle in progress bit so that two
13405 * threads access to those bits can be sequentiallized or a way so that
13406 * 2 threads don't start the cycle at the same time
13407 */
13408
13409 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
13410 /*
13411 * There is no cycle running at present, so we can start a
13412 * cycle
13413 */
13414
13415 /* Begin by setting Flash Cycle Done. */
13416 hsfsts |= HSFSTS_DONE;
13417 if (sc->sc_type >= WM_T_PCH_SPT)
13418 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
13419 hsfsts & 0xffffUL);
13420 else
13421 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
13422 error = 0;
13423 } else {
13424 /*
13425 * Otherwise poll for sometime so the current cycle has a
13426 * chance to end before giving up.
13427 */
13428 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
13429 if (sc->sc_type >= WM_T_PCH_SPT)
13430 hsfsts = ICH8_FLASH_READ32(sc,
13431 ICH_FLASH_HSFSTS) & 0xffffUL;
13432 else
13433 hsfsts = ICH8_FLASH_READ16(sc,
13434 ICH_FLASH_HSFSTS);
13435 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
13436 error = 0;
13437 break;
13438 }
13439 delay(1);
13440 }
13441 if (error == 0) {
13442 /*
13443 * Successful in waiting for previous cycle to timeout,
13444 * now set the Flash Cycle Done.
13445 */
13446 hsfsts |= HSFSTS_DONE;
13447 if (sc->sc_type >= WM_T_PCH_SPT)
13448 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
13449 hsfsts & 0xffffUL);
13450 else
13451 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
13452 hsfsts);
13453 }
13454 }
13455 return error;
13456 }
13457
13458 /******************************************************************************
13459 * This function starts a flash cycle and waits for its completion
13460 *
13461 * sc - The pointer to the hw structure
13462 ****************************************************************************/
13463 static int32_t
13464 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
13465 {
13466 uint16_t hsflctl;
13467 uint16_t hsfsts;
13468 int32_t error = 1;
13469 uint32_t i = 0;
13470
13471 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
13472 if (sc->sc_type >= WM_T_PCH_SPT)
13473 hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
13474 else
13475 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
13476 hsflctl |= HSFCTL_GO;
13477 if (sc->sc_type >= WM_T_PCH_SPT)
13478 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
13479 (uint32_t)hsflctl << 16);
13480 else
13481 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
13482
13483 /* Wait till FDONE bit is set to 1 */
13484 do {
13485 if (sc->sc_type >= WM_T_PCH_SPT)
13486 hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
13487 & 0xffffUL;
13488 else
13489 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
13490 if (hsfsts & HSFSTS_DONE)
13491 break;
13492 delay(1);
13493 i++;
13494 } while (i < timeout);
13495 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
13496 error = 0;
13497
13498 return error;
13499 }
13500
13501 /******************************************************************************
13502 * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
13503 *
13504 * sc - The pointer to the hw structure
13505 * index - The index of the byte or word to read.
13506 * size - Size of data to read, 1=byte 2=word, 4=dword
13507 * data - Pointer to the word to store the value read.
13508 *****************************************************************************/
13509 static int32_t
13510 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
13511 uint32_t size, uint32_t *data)
13512 {
13513 uint16_t hsfsts;
13514 uint16_t hsflctl;
13515 uint32_t flash_linear_address;
13516 uint32_t flash_data = 0;
13517 int32_t error = 1;
13518 int32_t count = 0;
13519
13520 if (size < 1 || size > 4 || data == 0x0 ||
13521 index > ICH_FLASH_LINEAR_ADDR_MASK)
13522 return error;
13523
13524 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
13525 sc->sc_ich8_flash_base;
13526
13527 do {
13528 delay(1);
13529 /* Steps */
13530 error = wm_ich8_cycle_init(sc);
13531 if (error)
13532 break;
13533
13534 if (sc->sc_type >= WM_T_PCH_SPT)
13535 hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
13536 >> 16;
13537 else
13538 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
13539 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
13540 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
13541 & HSFCTL_BCOUNT_MASK;
13542 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
13543 if (sc->sc_type >= WM_T_PCH_SPT) {
13544 /*
13545 * In SPT, This register is in Lan memory space, not
13546 * flash. Therefore, only 32 bit access is supported.
13547 */
13548 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
13549 (uint32_t)hsflctl << 16);
13550 } else
13551 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
13552
13553 /*
13554 * Write the last 24 bits of index into Flash Linear address
13555 * field in Flash Address
13556 */
13557 /* TODO: TBD maybe check the index against the size of flash */
13558
13559 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
13560
13561 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
13562
13563 /*
13564 * Check if FCERR is set to 1, if set to 1, clear it and try
13565 * the whole sequence a few more times, else read in (shift in)
13566 * the Flash Data0, the order is least significant byte first
13567 * msb to lsb
13568 */
13569 if (error == 0) {
13570 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
13571 if (size == 1)
13572 *data = (uint8_t)(flash_data & 0x000000FF);
13573 else if (size == 2)
13574 *data = (uint16_t)(flash_data & 0x0000FFFF);
13575 else if (size == 4)
13576 *data = (uint32_t)flash_data;
13577 break;
13578 } else {
13579 /*
13580 * If we've gotten here, then things are probably
13581 * completely hosed, but if the error condition is
13582 * detected, it won't hurt to give it another try...
13583 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
13584 */
13585 if (sc->sc_type >= WM_T_PCH_SPT)
13586 hsfsts = ICH8_FLASH_READ32(sc,
13587 ICH_FLASH_HSFSTS) & 0xffffUL;
13588 else
13589 hsfsts = ICH8_FLASH_READ16(sc,
13590 ICH_FLASH_HSFSTS);
13591
13592 if (hsfsts & HSFSTS_ERR) {
13593 /* Repeat for some time before giving up. */
13594 continue;
13595 } else if ((hsfsts & HSFSTS_DONE) == 0)
13596 break;
13597 }
13598 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
13599
13600 return error;
13601 }
13602
13603 /******************************************************************************
13604 * Reads a single byte from the NVM using the ICH8 flash access registers.
13605 *
13606 * sc - pointer to wm_hw structure
13607 * index - The index of the byte to read.
13608 * data - Pointer to a byte to store the value read.
13609 *****************************************************************************/
13610 static int32_t
13611 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
13612 {
13613 int32_t status;
13614 uint32_t word = 0;
13615
13616 status = wm_read_ich8_data(sc, index, 1, &word);
13617 if (status == 0)
13618 *data = (uint8_t)word;
13619 else
13620 *data = 0;
13621
13622 return status;
13623 }
13624
13625 /******************************************************************************
13626 * Reads a word from the NVM using the ICH8 flash access registers.
13627 *
13628 * sc - pointer to wm_hw structure
13629 * index - The starting byte index of the word to read.
13630 * data - Pointer to a word to store the value read.
13631 *****************************************************************************/
13632 static int32_t
13633 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
13634 {
13635 int32_t status;
13636 uint32_t word = 0;
13637
13638 status = wm_read_ich8_data(sc, index, 2, &word);
13639 if (status == 0)
13640 *data = (uint16_t)word;
13641 else
13642 *data = 0;
13643
13644 return status;
13645 }
13646
13647 /******************************************************************************
13648 * Reads a dword from the NVM using the ICH8 flash access registers.
13649 *
13650 * sc - pointer to wm_hw structure
13651 * index - The starting byte index of the word to read.
13652 * data - Pointer to a word to store the value read.
13653 *****************************************************************************/
13654 static int32_t
13655 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
13656 {
13657 int32_t status;
13658
13659 status = wm_read_ich8_data(sc, index, 4, data);
13660 return status;
13661 }
13662
13663 /******************************************************************************
13664 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
13665 * register.
13666 *
13667 * sc - Struct containing variables accessed by shared code
13668 * offset - offset of word in the EEPROM to read
13669 * data - word read from the EEPROM
13670 * words - number of words to read
13671 *****************************************************************************/
13672 static int
13673 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
13674 {
13675 int32_t rv = 0;
13676 uint32_t flash_bank = 0;
13677 uint32_t act_offset = 0;
13678 uint32_t bank_offset = 0;
13679 uint16_t word = 0;
13680 uint16_t i = 0;
13681
13682 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13683 device_xname(sc->sc_dev), __func__));
13684
13685 if (sc->nvm.acquire(sc) != 0)
13686 return -1;
13687
13688 /*
13689 * We need to know which is the valid flash bank. In the event
13690 * that we didn't allocate eeprom_shadow_ram, we may not be
13691 * managing flash_bank. So it cannot be trusted and needs
13692 * to be updated with each read.
13693 */
13694 rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
13695 if (rv) {
13696 DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
13697 device_xname(sc->sc_dev)));
13698 flash_bank = 0;
13699 }
13700
13701 /*
13702 * Adjust offset appropriately if we're on bank 1 - adjust for word
13703 * size
13704 */
13705 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
13706
13707 for (i = 0; i < words; i++) {
13708 /* The NVM part needs a byte offset, hence * 2 */
13709 act_offset = bank_offset + ((offset + i) * 2);
13710 rv = wm_read_ich8_word(sc, act_offset, &word);
13711 if (rv) {
13712 aprint_error_dev(sc->sc_dev,
13713 "%s: failed to read NVM\n", __func__);
13714 break;
13715 }
13716 data[i] = word;
13717 }
13718
13719 sc->nvm.release(sc);
13720 return rv;
13721 }
13722
13723 /******************************************************************************
13724 * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
13725 * register.
13726 *
13727 * sc - Struct containing variables accessed by shared code
13728 * offset - offset of word in the EEPROM to read
13729 * data - word read from the EEPROM
13730 * words - number of words to read
13731 *****************************************************************************/
13732 static int
13733 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
13734 {
13735 int32_t rv = 0;
13736 uint32_t flash_bank = 0;
13737 uint32_t act_offset = 0;
13738 uint32_t bank_offset = 0;
13739 uint32_t dword = 0;
13740 uint16_t i = 0;
13741
13742 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13743 device_xname(sc->sc_dev), __func__));
13744
13745 if (sc->nvm.acquire(sc) != 0)
13746 return -1;
13747
13748 /*
13749 * We need to know which is the valid flash bank. In the event
13750 * that we didn't allocate eeprom_shadow_ram, we may not be
13751 * managing flash_bank. So it cannot be trusted and needs
13752 * to be updated with each read.
13753 */
13754 rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
13755 if (rv) {
13756 DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
13757 device_xname(sc->sc_dev)));
13758 flash_bank = 0;
13759 }
13760
13761 /*
13762 * Adjust offset appropriately if we're on bank 1 - adjust for word
13763 * size
13764 */
13765 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
13766
13767 for (i = 0; i < words; i++) {
13768 /* The NVM part needs a byte offset, hence * 2 */
13769 act_offset = bank_offset + ((offset + i) * 2);
13770 /* but we must read dword aligned, so mask ... */
13771 rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
13772 if (rv) {
13773 aprint_error_dev(sc->sc_dev,
13774 "%s: failed to read NVM\n", __func__);
13775 break;
13776 }
13777 /* ... and pick out low or high word */
13778 if ((act_offset & 0x2) == 0)
13779 data[i] = (uint16_t)(dword & 0xFFFF);
13780 else
13781 data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
13782 }
13783
13784 sc->nvm.release(sc);
13785 return rv;
13786 }
13787
13788 /* iNVM */
13789
13790 static int
13791 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
13792 {
13793 int32_t rv = 0;
13794 uint32_t invm_dword;
13795 uint16_t i;
13796 uint8_t record_type, word_address;
13797
13798 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13799 device_xname(sc->sc_dev), __func__));
13800
13801 for (i = 0; i < INVM_SIZE; i++) {
13802 invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
13803 /* Get record type */
13804 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
13805 if (record_type == INVM_UNINITIALIZED_STRUCTURE)
13806 break;
13807 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
13808 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
13809 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
13810 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
13811 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
13812 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
13813 if (word_address == address) {
13814 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
13815 rv = 0;
13816 break;
13817 }
13818 }
13819 }
13820
13821 return rv;
13822 }
13823
13824 static int
13825 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
13826 {
13827 int rv = 0;
13828 int i;
13829
13830 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13831 device_xname(sc->sc_dev), __func__));
13832
13833 if (sc->nvm.acquire(sc) != 0)
13834 return -1;
13835
13836 for (i = 0; i < words; i++) {
13837 switch (offset + i) {
13838 case NVM_OFF_MACADDR:
13839 case NVM_OFF_MACADDR1:
13840 case NVM_OFF_MACADDR2:
13841 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
13842 if (rv != 0) {
13843 data[i] = 0xffff;
13844 rv = -1;
13845 }
13846 break;
13847 case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
13848 rv = wm_nvm_read_word_invm(sc, offset, data);
13849 if (rv != 0) {
13850 *data = INVM_DEFAULT_AL;
13851 rv = 0;
13852 }
13853 break;
13854 case NVM_OFF_CFG2:
13855 rv = wm_nvm_read_word_invm(sc, offset, data);
13856 if (rv != 0) {
13857 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
13858 rv = 0;
13859 }
13860 break;
13861 case NVM_OFF_CFG4:
13862 rv = wm_nvm_read_word_invm(sc, offset, data);
13863 if (rv != 0) {
13864 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
13865 rv = 0;
13866 }
13867 break;
13868 case NVM_OFF_LED_1_CFG:
13869 rv = wm_nvm_read_word_invm(sc, offset, data);
13870 if (rv != 0) {
13871 *data = NVM_LED_1_CFG_DEFAULT_I211;
13872 rv = 0;
13873 }
13874 break;
13875 case NVM_OFF_LED_0_2_CFG:
13876 rv = wm_nvm_read_word_invm(sc, offset, data);
13877 if (rv != 0) {
13878 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
13879 rv = 0;
13880 }
13881 break;
13882 case NVM_OFF_ID_LED_SETTINGS:
13883 rv = wm_nvm_read_word_invm(sc, offset, data);
13884 if (rv != 0) {
13885 *data = ID_LED_RESERVED_FFFF;
13886 rv = 0;
13887 }
13888 break;
13889 default:
13890 DPRINTF(sc, WM_DEBUG_NVM,
13891 ("NVM word 0x%02x is not mapped.\n", offset));
13892 *data = NVM_RESERVED_WORD;
13893 break;
13894 }
13895 }
13896
13897 sc->nvm.release(sc);
13898 return rv;
13899 }
13900
13901 /* Lock, detecting NVM type, validate checksum, version and read */
13902
13903 static int
13904 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
13905 {
13906 uint32_t eecd = 0;
13907
13908 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
13909 || sc->sc_type == WM_T_82583) {
13910 eecd = CSR_READ(sc, WMREG_EECD);
13911
13912 /* Isolate bits 15 & 16 */
13913 eecd = ((eecd >> 15) & 0x03);
13914
13915 /* If both bits are set, device is Flash type */
13916 if (eecd == 0x03)
13917 return 0;
13918 }
13919 return 1;
13920 }
13921
13922 static int
13923 wm_nvm_flash_presence_i210(struct wm_softc *sc)
13924 {
13925 uint32_t eec;
13926
13927 eec = CSR_READ(sc, WMREG_EEC);
13928 if ((eec & EEC_FLASH_DETECTED) != 0)
13929 return 1;
13930
13931 return 0;
13932 }
13933
13934 /*
13935 * wm_nvm_validate_checksum
13936 *
13937 * The checksum is defined as the sum of the first 64 (16 bit) words.
13938 */
13939 static int
13940 wm_nvm_validate_checksum(struct wm_softc *sc)
13941 {
13942 uint16_t checksum;
13943 uint16_t eeprom_data;
13944 #ifdef WM_DEBUG
13945 uint16_t csum_wordaddr, valid_checksum;
13946 #endif
13947 int i;
13948
13949 checksum = 0;
13950
13951 /* Don't check for I211 */
13952 if (sc->sc_type == WM_T_I211)
13953 return 0;
13954
13955 #ifdef WM_DEBUG
13956 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
13957 || (sc->sc_type == WM_T_PCH_CNP)) {
13958 csum_wordaddr = NVM_OFF_COMPAT;
13959 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
13960 } else {
13961 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
13962 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
13963 }
13964
13965 /* Dump EEPROM image for debug */
13966 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
13967 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
13968 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
13969 /* XXX PCH_SPT? */
13970 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
13971 if ((eeprom_data & valid_checksum) == 0)
13972 DPRINTF(sc, WM_DEBUG_NVM,
13973 ("%s: NVM need to be updated (%04x != %04x)\n",
13974 device_xname(sc->sc_dev), eeprom_data,
13975 valid_checksum));
13976 }
13977
13978 if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
13979 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
13980 for (i = 0; i < NVM_SIZE; i++) {
13981 if (wm_nvm_read(sc, i, 1, &eeprom_data))
13982 printf("XXXX ");
13983 else
13984 printf("%04hx ", eeprom_data);
13985 if (i % 8 == 7)
13986 printf("\n");
13987 }
13988 }
13989
13990 #endif /* WM_DEBUG */
13991
13992 for (i = 0; i < NVM_SIZE; i++) {
13993 if (wm_nvm_read(sc, i, 1, &eeprom_data))
13994 return 1;
13995 checksum += eeprom_data;
13996 }
13997
13998 if (checksum != (uint16_t) NVM_CHECKSUM) {
13999 #ifdef WM_DEBUG
14000 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
14001 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
14002 #endif
14003 }
14004
14005 return 0;
14006 }
14007
14008 static void
14009 wm_nvm_version_invm(struct wm_softc *sc)
14010 {
14011 uint32_t dword;
14012
14013 /*
14014 * Linux's code to decode version is very strange, so we don't
14015 * obey that algorithm and just use word 61 as the document.
14016 * Perhaps it's not perfect though...
14017 *
14018 * Example:
14019 *
14020 * Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
14021 */
14022 dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
14023 dword = __SHIFTOUT(dword, INVM_VER_1);
14024 sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
14025 sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
14026 }
14027
14028 static void
14029 wm_nvm_version(struct wm_softc *sc)
14030 {
14031 uint16_t major, minor, build, patch;
14032 uint16_t uid0, uid1;
14033 uint16_t nvm_data;
14034 uint16_t off;
14035 bool check_version = false;
14036 bool check_optionrom = false;
14037 bool have_build = false;
14038 bool have_uid = true;
14039
14040 /*
14041 * Version format:
14042 *
14043 * XYYZ
14044 * X0YZ
14045 * X0YY
14046 *
14047 * Example:
14048 *
14049 * 82571 0x50a2 5.10.2? (the spec update notes about 5.6-5.10)
14050 * 82571 0x50a6 5.10.6?
14051 * 82572 0x506a 5.6.10?
14052 * 82572EI 0x5069 5.6.9?
14053 * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4)
14054 * 0x2013 2.1.3?
14055 * 82583 0x10a0 1.10.0? (document says it's default value)
14056 * ICH8+82567 0x0040 0.4.0?
14057 * ICH9+82566 0x1040 1.4.0?
14058 *ICH10+82567 0x0043 0.4.3?
14059 * PCH+82577 0x00c1 0.12.1?
14060 * PCH2+82579 0x00d3 0.13.3?
14061 * 0x00d4 0.13.4?
14062 * LPT+I218 0x0023 0.2.3?
14063 * SPT+I219 0x0084 0.8.4?
14064 * CNP+I219 0x0054 0.5.4?
14065 */
14066
14067 /*
14068 * XXX
14069 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
14070 * I've never seen on real 82574 hardware with such small SPI ROM.
14071 */
14072 if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
14073 || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
14074 have_uid = false;
14075
14076 switch (sc->sc_type) {
14077 case WM_T_82571:
14078 case WM_T_82572:
14079 case WM_T_82574:
14080 case WM_T_82583:
14081 check_version = true;
14082 check_optionrom = true;
14083 have_build = true;
14084 break;
14085 case WM_T_ICH8:
14086 case WM_T_ICH9:
14087 case WM_T_ICH10:
14088 case WM_T_PCH:
14089 case WM_T_PCH2:
14090 case WM_T_PCH_LPT:
14091 case WM_T_PCH_SPT:
14092 case WM_T_PCH_CNP:
14093 check_version = true;
14094 have_build = true;
14095 have_uid = false;
14096 break;
14097 case WM_T_82575:
14098 case WM_T_82576:
14099 case WM_T_82580:
14100 if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
14101 check_version = true;
14102 break;
14103 case WM_T_I211:
14104 wm_nvm_version_invm(sc);
14105 have_uid = false;
14106 goto printver;
14107 case WM_T_I210:
14108 if (!wm_nvm_flash_presence_i210(sc)) {
14109 wm_nvm_version_invm(sc);
14110 have_uid = false;
14111 goto printver;
14112 }
14113 /* FALLTHROUGH */
14114 case WM_T_I350:
14115 case WM_T_I354:
14116 check_version = true;
14117 check_optionrom = true;
14118 break;
14119 default:
14120 return;
14121 }
14122 if (check_version
14123 && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
14124 major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
14125 if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
14126 minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
14127 build = nvm_data & NVM_BUILD_MASK;
14128 have_build = true;
14129 } else
14130 minor = nvm_data & 0x00ff;
14131
14132 /* Decimal */
14133 minor = (minor / 16) * 10 + (minor % 16);
14134 sc->sc_nvm_ver_major = major;
14135 sc->sc_nvm_ver_minor = minor;
14136
14137 printver:
14138 aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
14139 sc->sc_nvm_ver_minor);
14140 if (have_build) {
14141 sc->sc_nvm_ver_build = build;
14142 aprint_verbose(".%d", build);
14143 }
14144 }
14145
14146 /* Assume the Option ROM area is at avove NVM_SIZE */
14147 if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
14148 && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
14149 /* Option ROM Version */
14150 if ((off != 0x0000) && (off != 0xffff)) {
14151 int rv;
14152
14153 off += NVM_COMBO_VER_OFF;
14154 rv = wm_nvm_read(sc, off + 1, 1, &uid1);
14155 rv |= wm_nvm_read(sc, off, 1, &uid0);
14156 if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
14157 && (uid1 != 0) && (uid1 != 0xffff)) {
14158 /* 16bits */
14159 major = uid0 >> 8;
14160 build = (uid0 << 8) | (uid1 >> 8);
14161 patch = uid1 & 0x00ff;
14162 aprint_verbose(", option ROM Version %d.%d.%d",
14163 major, build, patch);
14164 }
14165 }
14166 }
14167
14168 if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
14169 aprint_verbose(", Image Unique ID %08x",
14170 ((uint32_t)uid1 << 16) | uid0);
14171 }
14172
14173 /*
14174 * wm_nvm_read:
14175 *
14176 * Read data from the serial EEPROM.
14177 */
14178 static int
14179 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
14180 {
14181 int rv;
14182
14183 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14184 device_xname(sc->sc_dev), __func__));
14185
14186 if (sc->sc_flags & WM_F_EEPROM_INVALID)
14187 return -1;
14188
14189 rv = sc->nvm.read(sc, word, wordcnt, data);
14190
14191 return rv;
14192 }
14193
14194 /*
14195 * Hardware semaphores.
14196 * Very complexed...
14197 */
14198
14199 static int
14200 wm_get_null(struct wm_softc *sc)
14201 {
14202
14203 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14204 device_xname(sc->sc_dev), __func__));
14205 return 0;
14206 }
14207
14208 static void
14209 wm_put_null(struct wm_softc *sc)
14210 {
14211
14212 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14213 device_xname(sc->sc_dev), __func__));
14214 return;
14215 }
14216
14217 static int
14218 wm_get_eecd(struct wm_softc *sc)
14219 {
14220 uint32_t reg;
14221 int x;
14222
14223 DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
14224 device_xname(sc->sc_dev), __func__));
14225
14226 reg = CSR_READ(sc, WMREG_EECD);
14227
14228 /* Request EEPROM access. */
14229 reg |= EECD_EE_REQ;
14230 CSR_WRITE(sc, WMREG_EECD, reg);
14231
14232 /* ..and wait for it to be granted. */
14233 for (x = 0; x < 1000; x++) {
14234 reg = CSR_READ(sc, WMREG_EECD);
14235 if (reg & EECD_EE_GNT)
14236 break;
14237 delay(5);
14238 }
14239 if ((reg & EECD_EE_GNT) == 0) {
14240 aprint_error_dev(sc->sc_dev,
14241 "could not acquire EEPROM GNT\n");
14242 reg &= ~EECD_EE_REQ;
14243 CSR_WRITE(sc, WMREG_EECD, reg);
14244 return -1;
14245 }
14246
14247 return 0;
14248 }
14249
14250 static void
14251 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
14252 {
14253
14254 *eecd |= EECD_SK;
14255 CSR_WRITE(sc, WMREG_EECD, *eecd);
14256 CSR_WRITE_FLUSH(sc);
14257 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
14258 delay(1);
14259 else
14260 delay(50);
14261 }
14262
14263 static void
14264 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
14265 {
14266
14267 *eecd &= ~EECD_SK;
14268 CSR_WRITE(sc, WMREG_EECD, *eecd);
14269 CSR_WRITE_FLUSH(sc);
14270 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
14271 delay(1);
14272 else
14273 delay(50);
14274 }
14275
14276 static void
14277 wm_put_eecd(struct wm_softc *sc)
14278 {
14279 uint32_t reg;
14280
14281 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14282 device_xname(sc->sc_dev), __func__));
14283
14284 /* Stop nvm */
14285 reg = CSR_READ(sc, WMREG_EECD);
14286 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
14287 /* Pull CS high */
14288 reg |= EECD_CS;
14289 wm_nvm_eec_clock_lower(sc, ®);
14290 } else {
14291 /* CS on Microwire is active-high */
14292 reg &= ~(EECD_CS | EECD_DI);
14293 CSR_WRITE(sc, WMREG_EECD, reg);
14294 wm_nvm_eec_clock_raise(sc, ®);
14295 wm_nvm_eec_clock_lower(sc, ®);
14296 }
14297
14298 reg = CSR_READ(sc, WMREG_EECD);
14299 reg &= ~EECD_EE_REQ;
14300 CSR_WRITE(sc, WMREG_EECD, reg);
14301
14302 return;
14303 }
14304
14305 /*
14306 * Get hardware semaphore.
14307 * Same as e1000_get_hw_semaphore_generic()
14308 */
14309 static int
14310 wm_get_swsm_semaphore(struct wm_softc *sc)
14311 {
14312 int32_t timeout;
14313 uint32_t swsm;
14314
14315 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14316 device_xname(sc->sc_dev), __func__));
14317 KASSERT(sc->sc_nvm_wordsize > 0);
14318
14319 retry:
14320 /* Get the SW semaphore. */
14321 timeout = sc->sc_nvm_wordsize + 1;
14322 while (timeout) {
14323 swsm = CSR_READ(sc, WMREG_SWSM);
14324
14325 if ((swsm & SWSM_SMBI) == 0)
14326 break;
14327
14328 delay(50);
14329 timeout--;
14330 }
14331
14332 if (timeout == 0) {
14333 if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
14334 /*
14335 * In rare circumstances, the SW semaphore may already
14336 * be held unintentionally. Clear the semaphore once
14337 * before giving up.
14338 */
14339 sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
14340 wm_put_swsm_semaphore(sc);
14341 goto retry;
14342 }
14343 aprint_error_dev(sc->sc_dev,
14344 "could not acquire SWSM SMBI\n");
14345 return 1;
14346 }
14347
14348 /* Get the FW semaphore. */
14349 timeout = sc->sc_nvm_wordsize + 1;
14350 while (timeout) {
14351 swsm = CSR_READ(sc, WMREG_SWSM);
14352 swsm |= SWSM_SWESMBI;
14353 CSR_WRITE(sc, WMREG_SWSM, swsm);
14354 /* If we managed to set the bit we got the semaphore. */
14355 swsm = CSR_READ(sc, WMREG_SWSM);
14356 if (swsm & SWSM_SWESMBI)
14357 break;
14358
14359 delay(50);
14360 timeout--;
14361 }
14362
14363 if (timeout == 0) {
14364 aprint_error_dev(sc->sc_dev,
14365 "could not acquire SWSM SWESMBI\n");
14366 /* Release semaphores */
14367 wm_put_swsm_semaphore(sc);
14368 return 1;
14369 }
14370 return 0;
14371 }
14372
14373 /*
14374 * Put hardware semaphore.
14375 * Same as e1000_put_hw_semaphore_generic()
14376 */
14377 static void
14378 wm_put_swsm_semaphore(struct wm_softc *sc)
14379 {
14380 uint32_t swsm;
14381
14382 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14383 device_xname(sc->sc_dev), __func__));
14384
14385 swsm = CSR_READ(sc, WMREG_SWSM);
14386 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
14387 CSR_WRITE(sc, WMREG_SWSM, swsm);
14388 }
14389
14390 /*
14391 * Get SW/FW semaphore.
14392 * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
14393 */
14394 static int
14395 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
14396 {
14397 uint32_t swfw_sync;
14398 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
14399 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
14400 int timeout;
14401
14402 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14403 device_xname(sc->sc_dev), __func__));
14404
14405 if (sc->sc_type == WM_T_80003)
14406 timeout = 50;
14407 else
14408 timeout = 200;
14409
14410 while (timeout) {
14411 if (wm_get_swsm_semaphore(sc)) {
14412 aprint_error_dev(sc->sc_dev,
14413 "%s: failed to get semaphore\n",
14414 __func__);
14415 return 1;
14416 }
14417 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
14418 if ((swfw_sync & (swmask | fwmask)) == 0) {
14419 swfw_sync |= swmask;
14420 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
14421 wm_put_swsm_semaphore(sc);
14422 return 0;
14423 }
14424 wm_put_swsm_semaphore(sc);
14425 delay(5000);
14426 timeout--;
14427 }
14428 device_printf(sc->sc_dev,
14429 "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
14430 mask, swfw_sync);
14431 return 1;
14432 }
14433
14434 static void
14435 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
14436 {
14437 uint32_t swfw_sync;
14438
14439 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14440 device_xname(sc->sc_dev), __func__));
14441
14442 while (wm_get_swsm_semaphore(sc) != 0)
14443 continue;
14444
14445 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
14446 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
14447 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
14448
14449 wm_put_swsm_semaphore(sc);
14450 }
14451
14452 static int
14453 wm_get_nvm_80003(struct wm_softc *sc)
14454 {
14455 int rv;
14456
14457 DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
14458 device_xname(sc->sc_dev), __func__));
14459
14460 if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
14461 aprint_error_dev(sc->sc_dev,
14462 "%s: failed to get semaphore(SWFW)\n", __func__);
14463 return rv;
14464 }
14465
14466 if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
14467 && (rv = wm_get_eecd(sc)) != 0) {
14468 aprint_error_dev(sc->sc_dev,
14469 "%s: failed to get semaphore(EECD)\n", __func__);
14470 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
14471 return rv;
14472 }
14473
14474 return 0;
14475 }
14476
14477 static void
14478 wm_put_nvm_80003(struct wm_softc *sc)
14479 {
14480
14481 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14482 device_xname(sc->sc_dev), __func__));
14483
14484 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
14485 wm_put_eecd(sc);
14486 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
14487 }
14488
14489 static int
14490 wm_get_nvm_82571(struct wm_softc *sc)
14491 {
14492 int rv;
14493
14494 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14495 device_xname(sc->sc_dev), __func__));
14496
14497 if ((rv = wm_get_swsm_semaphore(sc)) != 0)
14498 return rv;
14499
14500 switch (sc->sc_type) {
14501 case WM_T_82573:
14502 break;
14503 default:
14504 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
14505 rv = wm_get_eecd(sc);
14506 break;
14507 }
14508
14509 if (rv != 0) {
14510 aprint_error_dev(sc->sc_dev,
14511 "%s: failed to get semaphore\n",
14512 __func__);
14513 wm_put_swsm_semaphore(sc);
14514 }
14515
14516 return rv;
14517 }
14518
14519 static void
14520 wm_put_nvm_82571(struct wm_softc *sc)
14521 {
14522
14523 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14524 device_xname(sc->sc_dev), __func__));
14525
14526 switch (sc->sc_type) {
14527 case WM_T_82573:
14528 break;
14529 default:
14530 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
14531 wm_put_eecd(sc);
14532 break;
14533 }
14534
14535 wm_put_swsm_semaphore(sc);
14536 }
14537
14538 static int
14539 wm_get_phy_82575(struct wm_softc *sc)
14540 {
14541
14542 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14543 device_xname(sc->sc_dev), __func__));
14544 return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
14545 }
14546
14547 static void
14548 wm_put_phy_82575(struct wm_softc *sc)
14549 {
14550
14551 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14552 device_xname(sc->sc_dev), __func__));
14553 return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
14554 }
14555
14556 static int
14557 wm_get_swfwhw_semaphore(struct wm_softc *sc)
14558 {
14559 uint32_t ext_ctrl;
14560 int timeout = 200;
14561
14562 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14563 device_xname(sc->sc_dev), __func__));
14564
14565 mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
14566 for (timeout = 0; timeout < 200; timeout++) {
14567 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14568 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
14569 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14570
14571 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14572 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
14573 return 0;
14574 delay(5000);
14575 }
14576 device_printf(sc->sc_dev,
14577 "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
14578 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
14579 return 1;
14580 }
14581
14582 static void
14583 wm_put_swfwhw_semaphore(struct wm_softc *sc)
14584 {
14585 uint32_t ext_ctrl;
14586
14587 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14588 device_xname(sc->sc_dev), __func__));
14589
14590 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14591 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14592 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14593
14594 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
14595 }
14596
14597 static int
14598 wm_get_swflag_ich8lan(struct wm_softc *sc)
14599 {
14600 uint32_t ext_ctrl;
14601 int timeout;
14602
14603 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14604 device_xname(sc->sc_dev), __func__));
14605 mutex_enter(sc->sc_ich_phymtx);
14606 for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
14607 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14608 if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
14609 break;
14610 delay(1000);
14611 }
14612 if (timeout >= WM_PHY_CFG_TIMEOUT) {
14613 device_printf(sc->sc_dev,
14614 "SW has already locked the resource\n");
14615 goto out;
14616 }
14617
14618 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
14619 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14620 for (timeout = 0; timeout < 1000; timeout++) {
14621 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14622 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
14623 break;
14624 delay(1000);
14625 }
14626 if (timeout >= 1000) {
14627 device_printf(sc->sc_dev, "failed to acquire semaphore\n");
14628 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14629 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14630 goto out;
14631 }
14632 return 0;
14633
14634 out:
14635 mutex_exit(sc->sc_ich_phymtx);
14636 return 1;
14637 }
14638
14639 static void
14640 wm_put_swflag_ich8lan(struct wm_softc *sc)
14641 {
14642 uint32_t ext_ctrl;
14643
14644 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14645 device_xname(sc->sc_dev), __func__));
14646 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14647 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
14648 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14649 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14650 } else {
14651 device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
14652 }
14653
14654 mutex_exit(sc->sc_ich_phymtx);
14655 }
14656
14657 static int
14658 wm_get_nvm_ich8lan(struct wm_softc *sc)
14659 {
14660
14661 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14662 device_xname(sc->sc_dev), __func__));
14663 mutex_enter(sc->sc_ich_nvmmtx);
14664
14665 return 0;
14666 }
14667
14668 static void
14669 wm_put_nvm_ich8lan(struct wm_softc *sc)
14670 {
14671
14672 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14673 device_xname(sc->sc_dev), __func__));
14674 mutex_exit(sc->sc_ich_nvmmtx);
14675 }
14676
14677 static int
14678 wm_get_hw_semaphore_82573(struct wm_softc *sc)
14679 {
14680 int i = 0;
14681 uint32_t reg;
14682
14683 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14684 device_xname(sc->sc_dev), __func__));
14685
14686 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
14687 do {
14688 CSR_WRITE(sc, WMREG_EXTCNFCTR,
14689 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
14690 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
14691 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
14692 break;
14693 delay(2*1000);
14694 i++;
14695 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
14696
14697 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
14698 wm_put_hw_semaphore_82573(sc);
14699 log(LOG_ERR, "%s: Driver can't access the PHY\n",
14700 device_xname(sc->sc_dev));
14701 return -1;
14702 }
14703
14704 return 0;
14705 }
14706
14707 static void
14708 wm_put_hw_semaphore_82573(struct wm_softc *sc)
14709 {
14710 uint32_t reg;
14711
14712 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14713 device_xname(sc->sc_dev), __func__));
14714
14715 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
14716 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14717 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
14718 }
14719
14720 /*
14721 * Management mode and power management related subroutines.
14722 * BMC, AMT, suspend/resume and EEE.
14723 */
14724
14725 #ifdef WM_WOL
14726 static int
14727 wm_check_mng_mode(struct wm_softc *sc)
14728 {
14729 int rv;
14730
14731 switch (sc->sc_type) {
14732 case WM_T_ICH8:
14733 case WM_T_ICH9:
14734 case WM_T_ICH10:
14735 case WM_T_PCH:
14736 case WM_T_PCH2:
14737 case WM_T_PCH_LPT:
14738 case WM_T_PCH_SPT:
14739 case WM_T_PCH_CNP:
14740 rv = wm_check_mng_mode_ich8lan(sc);
14741 break;
14742 case WM_T_82574:
14743 case WM_T_82583:
14744 rv = wm_check_mng_mode_82574(sc);
14745 break;
14746 case WM_T_82571:
14747 case WM_T_82572:
14748 case WM_T_82573:
14749 case WM_T_80003:
14750 rv = wm_check_mng_mode_generic(sc);
14751 break;
14752 default:
14753 /* Noting to do */
14754 rv = 0;
14755 break;
14756 }
14757
14758 return rv;
14759 }
14760
14761 static int
14762 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
14763 {
14764 uint32_t fwsm;
14765
14766 fwsm = CSR_READ(sc, WMREG_FWSM);
14767
14768 if (((fwsm & FWSM_FW_VALID) != 0)
14769 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
14770 return 1;
14771
14772 return 0;
14773 }
14774
14775 static int
14776 wm_check_mng_mode_82574(struct wm_softc *sc)
14777 {
14778 uint16_t data;
14779
14780 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
14781
14782 if ((data & NVM_CFG2_MNGM_MASK) != 0)
14783 return 1;
14784
14785 return 0;
14786 }
14787
14788 static int
14789 wm_check_mng_mode_generic(struct wm_softc *sc)
14790 {
14791 uint32_t fwsm;
14792
14793 fwsm = CSR_READ(sc, WMREG_FWSM);
14794
14795 if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
14796 return 1;
14797
14798 return 0;
14799 }
14800 #endif /* WM_WOL */
14801
14802 static int
14803 wm_enable_mng_pass_thru(struct wm_softc *sc)
14804 {
14805 uint32_t manc, fwsm, factps;
14806
14807 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
14808 return 0;
14809
14810 manc = CSR_READ(sc, WMREG_MANC);
14811
14812 DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
14813 device_xname(sc->sc_dev), manc));
14814 if ((manc & MANC_RECV_TCO_EN) == 0)
14815 return 0;
14816
14817 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
14818 fwsm = CSR_READ(sc, WMREG_FWSM);
14819 factps = CSR_READ(sc, WMREG_FACTPS);
14820 if (((factps & FACTPS_MNGCG) == 0)
14821 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
14822 return 1;
14823 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
14824 uint16_t data;
14825
14826 factps = CSR_READ(sc, WMREG_FACTPS);
14827 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
14828 DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
14829 device_xname(sc->sc_dev), factps, data));
14830 if (((factps & FACTPS_MNGCG) == 0)
14831 && ((data & NVM_CFG2_MNGM_MASK)
14832 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
14833 return 1;
14834 } else if (((manc & MANC_SMBUS_EN) != 0)
14835 && ((manc & MANC_ASF_EN) == 0))
14836 return 1;
14837
14838 return 0;
14839 }
14840
14841 static bool
14842 wm_phy_resetisblocked(struct wm_softc *sc)
14843 {
14844 bool blocked = false;
14845 uint32_t reg;
14846 int i = 0;
14847
14848 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
14849 device_xname(sc->sc_dev), __func__));
14850
14851 switch (sc->sc_type) {
14852 case WM_T_ICH8:
14853 case WM_T_ICH9:
14854 case WM_T_ICH10:
14855 case WM_T_PCH:
14856 case WM_T_PCH2:
14857 case WM_T_PCH_LPT:
14858 case WM_T_PCH_SPT:
14859 case WM_T_PCH_CNP:
14860 do {
14861 reg = CSR_READ(sc, WMREG_FWSM);
14862 if ((reg & FWSM_RSPCIPHY) == 0) {
14863 blocked = true;
14864 delay(10*1000);
14865 continue;
14866 }
14867 blocked = false;
14868 } while (blocked && (i++ < 30));
14869 return blocked;
14870 break;
14871 case WM_T_82571:
14872 case WM_T_82572:
14873 case WM_T_82573:
14874 case WM_T_82574:
14875 case WM_T_82583:
14876 case WM_T_80003:
14877 reg = CSR_READ(sc, WMREG_MANC);
14878 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
14879 return true;
14880 else
14881 return false;
14882 break;
14883 default:
14884 /* No problem */
14885 break;
14886 }
14887
14888 return false;
14889 }
14890
14891 static void
14892 wm_get_hw_control(struct wm_softc *sc)
14893 {
14894 uint32_t reg;
14895
14896 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14897 device_xname(sc->sc_dev), __func__));
14898
14899 if (sc->sc_type == WM_T_82573) {
14900 reg = CSR_READ(sc, WMREG_SWSM);
14901 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
14902 } else if (sc->sc_type >= WM_T_82571) {
14903 reg = CSR_READ(sc, WMREG_CTRL_EXT);
14904 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
14905 }
14906 }
14907
14908 static void
14909 wm_release_hw_control(struct wm_softc *sc)
14910 {
14911 uint32_t reg;
14912
14913 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14914 device_xname(sc->sc_dev), __func__));
14915
14916 if (sc->sc_type == WM_T_82573) {
14917 reg = CSR_READ(sc, WMREG_SWSM);
14918 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
14919 } else if (sc->sc_type >= WM_T_82571) {
14920 reg = CSR_READ(sc, WMREG_CTRL_EXT);
14921 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
14922 }
14923 }
14924
14925 static void
14926 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
14927 {
14928 uint32_t reg;
14929
14930 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
14931 device_xname(sc->sc_dev), __func__));
14932
14933 if (sc->sc_type < WM_T_PCH2)
14934 return;
14935
14936 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
14937
14938 if (gate)
14939 reg |= EXTCNFCTR_GATE_PHY_CFG;
14940 else
14941 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
14942
14943 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
14944 }
14945
14946 static int
14947 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
14948 {
14949 uint32_t fwsm, reg;
14950 int rv = 0;
14951
14952 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
14953 device_xname(sc->sc_dev), __func__));
14954
14955 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
14956 wm_gate_hw_phy_config_ich8lan(sc, true);
14957
14958 /* Disable ULP */
14959 wm_ulp_disable(sc);
14960
14961 /* Acquire PHY semaphore */
14962 rv = sc->phy.acquire(sc);
14963 if (rv != 0) {
14964 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
14965 device_xname(sc->sc_dev), __func__));
14966 return -1;
14967 }
14968
14969 /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
14970 * inaccessible and resetting the PHY is not blocked, toggle the
14971 * LANPHYPC Value bit to force the interconnect to PCIe mode.
14972 */
14973 fwsm = CSR_READ(sc, WMREG_FWSM);
14974 switch (sc->sc_type) {
14975 case WM_T_PCH_LPT:
14976 case WM_T_PCH_SPT:
14977 case WM_T_PCH_CNP:
14978 if (wm_phy_is_accessible_pchlan(sc))
14979 break;
14980
14981 /* Before toggling LANPHYPC, see if PHY is accessible by
14982 * forcing MAC to SMBus mode first.
14983 */
14984 reg = CSR_READ(sc, WMREG_CTRL_EXT);
14985 reg |= CTRL_EXT_FORCE_SMBUS;
14986 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
14987 #if 0
14988 /* XXX Isn't this required??? */
14989 CSR_WRITE_FLUSH(sc);
14990 #endif
14991 /* Wait 50 milliseconds for MAC to finish any retries
14992 * that it might be trying to perform from previous
14993 * attempts to acknowledge any phy read requests.
14994 */
14995 delay(50 * 1000);
14996 /* FALLTHROUGH */
14997 case WM_T_PCH2:
14998 if (wm_phy_is_accessible_pchlan(sc) == true)
14999 break;
15000 /* FALLTHROUGH */
15001 case WM_T_PCH:
15002 if (sc->sc_type == WM_T_PCH)
15003 if ((fwsm & FWSM_FW_VALID) != 0)
15004 break;
15005
15006 if (wm_phy_resetisblocked(sc) == true) {
15007 device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
15008 break;
15009 }
15010
15011 /* Toggle LANPHYPC Value bit */
15012 wm_toggle_lanphypc_pch_lpt(sc);
15013
15014 if (sc->sc_type >= WM_T_PCH_LPT) {
15015 if (wm_phy_is_accessible_pchlan(sc) == true)
15016 break;
15017
15018 /* Toggling LANPHYPC brings the PHY out of SMBus mode
15019 * so ensure that the MAC is also out of SMBus mode
15020 */
15021 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15022 reg &= ~CTRL_EXT_FORCE_SMBUS;
15023 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15024
15025 if (wm_phy_is_accessible_pchlan(sc) == true)
15026 break;
15027 rv = -1;
15028 }
15029 break;
15030 default:
15031 break;
15032 }
15033
15034 /* Release semaphore */
15035 sc->phy.release(sc);
15036
15037 if (rv == 0) {
15038 /* Check to see if able to reset PHY. Print error if not */
15039 if (wm_phy_resetisblocked(sc)) {
15040 device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
15041 goto out;
15042 }
15043
15044 /* Reset the PHY before any access to it. Doing so, ensures
15045 * that the PHY is in a known good state before we read/write
15046 * PHY registers. The generic reset is sufficient here,
15047 * because we haven't determined the PHY type yet.
15048 */
15049 if (wm_reset_phy(sc) != 0)
15050 goto out;
15051
15052 /* On a successful reset, possibly need to wait for the PHY
15053 * to quiesce to an accessible state before returning control
15054 * to the calling function. If the PHY does not quiesce, then
15055 * return E1000E_BLK_PHY_RESET, as this is the condition that
15056 * the PHY is in.
15057 */
15058 if (wm_phy_resetisblocked(sc))
15059 device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
15060 }
15061
15062 out:
15063 /* Ungate automatic PHY configuration on non-managed 82579 */
15064 if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
15065 delay(10*1000);
15066 wm_gate_hw_phy_config_ich8lan(sc, false);
15067 }
15068
15069 return 0;
15070 }
15071
15072 static void
15073 wm_init_manageability(struct wm_softc *sc)
15074 {
15075
15076 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15077 device_xname(sc->sc_dev), __func__));
15078 if (sc->sc_flags & WM_F_HAS_MANAGE) {
15079 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
15080 uint32_t manc = CSR_READ(sc, WMREG_MANC);
15081
15082 /* Disable hardware interception of ARP */
15083 manc &= ~MANC_ARP_EN;
15084
15085 /* Enable receiving management packets to the host */
15086 if (sc->sc_type >= WM_T_82571) {
15087 manc |= MANC_EN_MNG2HOST;
15088 manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
15089 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
15090 }
15091
15092 CSR_WRITE(sc, WMREG_MANC, manc);
15093 }
15094 }
15095
15096 static void
15097 wm_release_manageability(struct wm_softc *sc)
15098 {
15099
15100 if (sc->sc_flags & WM_F_HAS_MANAGE) {
15101 uint32_t manc = CSR_READ(sc, WMREG_MANC);
15102
15103 manc |= MANC_ARP_EN;
15104 if (sc->sc_type >= WM_T_82571)
15105 manc &= ~MANC_EN_MNG2HOST;
15106
15107 CSR_WRITE(sc, WMREG_MANC, manc);
15108 }
15109 }
15110
15111 static void
15112 wm_get_wakeup(struct wm_softc *sc)
15113 {
15114
15115 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
15116 switch (sc->sc_type) {
15117 case WM_T_82573:
15118 case WM_T_82583:
15119 sc->sc_flags |= WM_F_HAS_AMT;
15120 /* FALLTHROUGH */
15121 case WM_T_80003:
15122 case WM_T_82575:
15123 case WM_T_82576:
15124 case WM_T_82580:
15125 case WM_T_I350:
15126 case WM_T_I354:
15127 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
15128 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
15129 /* FALLTHROUGH */
15130 case WM_T_82541:
15131 case WM_T_82541_2:
15132 case WM_T_82547:
15133 case WM_T_82547_2:
15134 case WM_T_82571:
15135 case WM_T_82572:
15136 case WM_T_82574:
15137 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
15138 break;
15139 case WM_T_ICH8:
15140 case WM_T_ICH9:
15141 case WM_T_ICH10:
15142 case WM_T_PCH:
15143 case WM_T_PCH2:
15144 case WM_T_PCH_LPT:
15145 case WM_T_PCH_SPT:
15146 case WM_T_PCH_CNP:
15147 sc->sc_flags |= WM_F_HAS_AMT;
15148 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
15149 break;
15150 default:
15151 break;
15152 }
15153
15154 /* 1: HAS_MANAGE */
15155 if (wm_enable_mng_pass_thru(sc) != 0)
15156 sc->sc_flags |= WM_F_HAS_MANAGE;
15157
15158 /*
15159 * Note that the WOL flags is set after the resetting of the eeprom
15160 * stuff
15161 */
15162 }
15163
15164 /*
15165 * Unconfigure Ultra Low Power mode.
15166 * Only for I217 and newer (see below).
15167 */
15168 static int
15169 wm_ulp_disable(struct wm_softc *sc)
15170 {
15171 uint32_t reg;
15172 uint16_t phyreg;
15173 int i = 0, rv = 0;
15174
15175 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15176 device_xname(sc->sc_dev), __func__));
15177 /* Exclude old devices */
15178 if ((sc->sc_type < WM_T_PCH_LPT)
15179 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
15180 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
15181 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
15182 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
15183 return 0;
15184
15185 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
15186 /* Request ME un-configure ULP mode in the PHY */
15187 reg = CSR_READ(sc, WMREG_H2ME);
15188 reg &= ~H2ME_ULP;
15189 reg |= H2ME_ENFORCE_SETTINGS;
15190 CSR_WRITE(sc, WMREG_H2ME, reg);
15191
15192 /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
15193 while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
15194 if (i++ == 30) {
15195 device_printf(sc->sc_dev, "%s timed out\n",
15196 __func__);
15197 return -1;
15198 }
15199 delay(10 * 1000);
15200 }
15201 reg = CSR_READ(sc, WMREG_H2ME);
15202 reg &= ~H2ME_ENFORCE_SETTINGS;
15203 CSR_WRITE(sc, WMREG_H2ME, reg);
15204
15205 return 0;
15206 }
15207
15208 /* Acquire semaphore */
15209 rv = sc->phy.acquire(sc);
15210 if (rv != 0) {
15211 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
15212 device_xname(sc->sc_dev), __func__));
15213 return -1;
15214 }
15215
15216 /* Toggle LANPHYPC */
15217 wm_toggle_lanphypc_pch_lpt(sc);
15218
15219 /* Unforce SMBus mode in PHY */
15220 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
15221 if (rv != 0) {
15222 uint32_t reg2;
15223
15224 aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
15225 __func__);
15226 reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
15227 reg2 |= CTRL_EXT_FORCE_SMBUS;
15228 CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
15229 delay(50 * 1000);
15230
15231 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
15232 &phyreg);
15233 if (rv != 0)
15234 goto release;
15235 }
15236 phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
15237 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
15238
15239 /* Unforce SMBus mode in MAC */
15240 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15241 reg &= ~CTRL_EXT_FORCE_SMBUS;
15242 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15243
15244 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
15245 if (rv != 0)
15246 goto release;
15247 phyreg |= HV_PM_CTRL_K1_ENA;
15248 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
15249
15250 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
15251 &phyreg);
15252 if (rv != 0)
15253 goto release;
15254 phyreg &= ~(I218_ULP_CONFIG1_IND
15255 | I218_ULP_CONFIG1_STICKY_ULP
15256 | I218_ULP_CONFIG1_RESET_TO_SMBUS
15257 | I218_ULP_CONFIG1_WOL_HOST
15258 | I218_ULP_CONFIG1_INBAND_EXIT
15259 | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
15260 | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
15261 | I218_ULP_CONFIG1_DIS_SMB_PERST);
15262 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
15263 phyreg |= I218_ULP_CONFIG1_START;
15264 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
15265
15266 reg = CSR_READ(sc, WMREG_FEXTNVM7);
15267 reg &= ~FEXTNVM7_DIS_SMB_PERST;
15268 CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
15269
15270 release:
15271 /* Release semaphore */
15272 sc->phy.release(sc);
15273 wm_gmii_reset(sc);
15274 delay(50 * 1000);
15275
15276 return rv;
15277 }
15278
15279 /* WOL in the newer chipset interfaces (pchlan) */
15280 static int
15281 wm_enable_phy_wakeup(struct wm_softc *sc)
15282 {
15283 device_t dev = sc->sc_dev;
15284 uint32_t mreg, moff;
15285 uint16_t wuce, wuc, wufc, preg;
15286 int i, rv;
15287
15288 KASSERT(sc->sc_type >= WM_T_PCH);
15289
15290 /* Copy MAC RARs to PHY RARs */
15291 wm_copy_rx_addrs_to_phy_ich8lan(sc);
15292
15293 /* Activate PHY wakeup */
15294 rv = sc->phy.acquire(sc);
15295 if (rv != 0) {
15296 device_printf(dev, "%s: failed to acquire semaphore\n",
15297 __func__);
15298 return rv;
15299 }
15300
15301 /*
15302 * Enable access to PHY wakeup registers.
15303 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
15304 */
15305 rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
15306 if (rv != 0) {
15307 device_printf(dev,
15308 "%s: Could not enable PHY wakeup reg access\n", __func__);
15309 goto release;
15310 }
15311
15312 /* Copy MAC MTA to PHY MTA */
15313 for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
15314 uint16_t lo, hi;
15315
15316 mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
15317 lo = (uint16_t)(mreg & 0xffff);
15318 hi = (uint16_t)((mreg >> 16) & 0xffff);
15319 wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
15320 wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
15321 }
15322
15323 /* Configure PHY Rx Control register */
15324 wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
15325 mreg = CSR_READ(sc, WMREG_RCTL);
15326 if (mreg & RCTL_UPE)
15327 preg |= BM_RCTL_UPE;
15328 if (mreg & RCTL_MPE)
15329 preg |= BM_RCTL_MPE;
15330 preg &= ~(BM_RCTL_MO_MASK);
15331 moff = __SHIFTOUT(mreg, RCTL_MO);
15332 if (moff != 0)
15333 preg |= moff << BM_RCTL_MO_SHIFT;
15334 if (mreg & RCTL_BAM)
15335 preg |= BM_RCTL_BAM;
15336 if (mreg & RCTL_PMCF)
15337 preg |= BM_RCTL_PMCF;
15338 mreg = CSR_READ(sc, WMREG_CTRL);
15339 if (mreg & CTRL_RFCE)
15340 preg |= BM_RCTL_RFCE;
15341 wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
15342
15343 wuc = WUC_APME | WUC_PME_EN;
15344 wufc = WUFC_MAG;
15345 /* Enable PHY wakeup in MAC register */
15346 CSR_WRITE(sc, WMREG_WUC,
15347 WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
15348 CSR_WRITE(sc, WMREG_WUFC, wufc);
15349
15350 /* Configure and enable PHY wakeup in PHY registers */
15351 wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
15352 wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
15353
15354 wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
15355 wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
15356
15357 release:
15358 sc->phy.release(sc);
15359
15360 return 0;
15361 }
15362
15363 /* Power down workaround on D3 */
15364 static void
15365 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
15366 {
15367 uint32_t reg;
15368 uint16_t phyreg;
15369 int i;
15370
15371 for (i = 0; i < 2; i++) {
15372 /* Disable link */
15373 reg = CSR_READ(sc, WMREG_PHY_CTRL);
15374 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
15375 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
15376
15377 /*
15378 * Call gig speed drop workaround on Gig disable before
15379 * accessing any PHY registers
15380 */
15381 if (sc->sc_type == WM_T_ICH8)
15382 wm_gig_downshift_workaround_ich8lan(sc);
15383
15384 /* Write VR power-down enable */
15385 sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
15386 phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
15387 phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
15388 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
15389
15390 /* Read it back and test */
15391 sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
15392 phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
15393 if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
15394 break;
15395
15396 /* Issue PHY reset and repeat at most one more time */
15397 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
15398 }
15399 }
15400
15401 /*
15402 * wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
15403 * @sc: pointer to the HW structure
15404 *
15405 * During S0 to Sx transition, it is possible the link remains at gig
15406 * instead of negotiating to a lower speed. Before going to Sx, set
15407 * 'Gig Disable' to force link speed negotiation to a lower speed based on
15408 * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
15409 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
15410 * needs to be written.
15411 * Parts that support (and are linked to a partner which support) EEE in
15412 * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
15413 * than 10Mbps w/o EEE.
15414 */
15415 static void
15416 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
15417 {
15418 device_t dev = sc->sc_dev;
15419 struct ethercom *ec = &sc->sc_ethercom;
15420 uint32_t phy_ctrl;
15421 int rv;
15422
15423 phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
15424 phy_ctrl |= PHY_CTRL_GBE_DIS;
15425
15426 KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
15427
15428 if (sc->sc_phytype == WMPHY_I217) {
15429 uint16_t devid = sc->sc_pcidevid;
15430
15431 if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
15432 (devid == PCI_PRODUCT_INTEL_I218_V) ||
15433 (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
15434 (devid == PCI_PRODUCT_INTEL_I218_V3) ||
15435 (sc->sc_type >= WM_T_PCH_SPT))
15436 CSR_WRITE(sc, WMREG_FEXTNVM6,
15437 CSR_READ(sc, WMREG_FEXTNVM6)
15438 & ~FEXTNVM6_REQ_PLL_CLK);
15439
15440 if (sc->phy.acquire(sc) != 0)
15441 goto out;
15442
15443 if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
15444 uint16_t eee_advert;
15445
15446 rv = wm_read_emi_reg_locked(dev,
15447 I217_EEE_ADVERTISEMENT, &eee_advert);
15448 if (rv)
15449 goto release;
15450
15451 /*
15452 * Disable LPLU if both link partners support 100BaseT
15453 * EEE and 100Full is advertised on both ends of the
15454 * link, and enable Auto Enable LPI since there will
15455 * be no driver to enable LPI while in Sx.
15456 */
15457 if ((eee_advert & AN_EEEADVERT_100_TX) &&
15458 (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
15459 uint16_t anar, phy_reg;
15460
15461 sc->phy.readreg_locked(dev, 2, MII_ANAR,
15462 &anar);
15463 if (anar & ANAR_TX_FD) {
15464 phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
15465 PHY_CTRL_NOND0A_LPLU);
15466
15467 /* Set Auto Enable LPI after link up */
15468 sc->phy.readreg_locked(dev, 2,
15469 I217_LPI_GPIO_CTRL, &phy_reg);
15470 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
15471 sc->phy.writereg_locked(dev, 2,
15472 I217_LPI_GPIO_CTRL, phy_reg);
15473 }
15474 }
15475 }
15476
15477 /*
15478 * For i217 Intel Rapid Start Technology support,
15479 * when the system is going into Sx and no manageability engine
15480 * is present, the driver must configure proxy to reset only on
15481 * power good. LPI (Low Power Idle) state must also reset only
15482 * on power good, as well as the MTA (Multicast table array).
15483 * The SMBus release must also be disabled on LCD reset.
15484 */
15485
15486 /*
15487 * Enable MTA to reset for Intel Rapid Start Technology
15488 * Support
15489 */
15490
15491 release:
15492 sc->phy.release(sc);
15493 }
15494 out:
15495 CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
15496
15497 if (sc->sc_type == WM_T_ICH8)
15498 wm_gig_downshift_workaround_ich8lan(sc);
15499
15500 if (sc->sc_type >= WM_T_PCH) {
15501 wm_oem_bits_config_ich8lan(sc, false);
15502
15503 /* Reset PHY to activate OEM bits on 82577/8 */
15504 if (sc->sc_type == WM_T_PCH)
15505 wm_reset_phy(sc);
15506
15507 if (sc->phy.acquire(sc) != 0)
15508 return;
15509 wm_write_smbus_addr(sc);
15510 sc->phy.release(sc);
15511 }
15512 }
15513
15514 /*
15515 * wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
15516 * @sc: pointer to the HW structure
15517 *
15518 * During Sx to S0 transitions on non-managed devices or managed devices
15519 * on which PHY resets are not blocked, if the PHY registers cannot be
15520 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
15521 * the PHY.
15522 * On i217, setup Intel Rapid Start Technology.
15523 */
15524 static int
15525 wm_resume_workarounds_pchlan(struct wm_softc *sc)
15526 {
15527 device_t dev = sc->sc_dev;
15528 int rv;
15529
15530 if (sc->sc_type < WM_T_PCH2)
15531 return 0;
15532
15533 rv = wm_init_phy_workarounds_pchlan(sc);
15534 if (rv != 0)
15535 return -1;
15536
15537 /* For i217 Intel Rapid Start Technology support when the system
15538 * is transitioning from Sx and no manageability engine is present
15539 * configure SMBus to restore on reset, disable proxy, and enable
15540 * the reset on MTA (Multicast table array).
15541 */
15542 if (sc->sc_phytype == WMPHY_I217) {
15543 uint16_t phy_reg;
15544
15545 if (sc->phy.acquire(sc) != 0)
15546 return -1;
15547
15548 /* Clear Auto Enable LPI after link up */
15549 sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
15550 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
15551 sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
15552
15553 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
15554 /* Restore clear on SMB if no manageability engine
15555 * is present
15556 */
15557 rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
15558 &phy_reg);
15559 if (rv != 0)
15560 goto release;
15561 phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
15562 sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
15563
15564 /* Disable Proxy */
15565 sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
15566 }
15567 /* Enable reset on MTA */
15568 sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
15569 if (rv != 0)
15570 goto release;
15571 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
15572 sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
15573
15574 release:
15575 sc->phy.release(sc);
15576 return rv;
15577 }
15578
15579 return 0;
15580 }
15581
15582 static void
15583 wm_enable_wakeup(struct wm_softc *sc)
15584 {
15585 uint32_t reg, pmreg;
15586 pcireg_t pmode;
15587 int rv = 0;
15588
15589 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15590 device_xname(sc->sc_dev), __func__));
15591
15592 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
15593 &pmreg, NULL) == 0)
15594 return;
15595
15596 if ((sc->sc_flags & WM_F_WOL) == 0)
15597 goto pme;
15598
15599 /* Advertise the wakeup capability */
15600 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
15601 | CTRL_SWDPIN(3));
15602
15603 /* Keep the laser running on fiber adapters */
15604 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
15605 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
15606 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15607 reg |= CTRL_EXT_SWDPIN(3);
15608 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15609 }
15610
15611 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
15612 (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
15613 (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
15614 (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
15615 wm_suspend_workarounds_ich8lan(sc);
15616
15617 #if 0 /* For the multicast packet */
15618 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
15619 reg |= WUFC_MC;
15620 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
15621 #endif
15622
15623 if (sc->sc_type >= WM_T_PCH) {
15624 rv = wm_enable_phy_wakeup(sc);
15625 if (rv != 0)
15626 goto pme;
15627 } else {
15628 /* Enable wakeup by the MAC */
15629 CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
15630 CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
15631 }
15632
15633 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
15634 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
15635 || (sc->sc_type == WM_T_PCH2))
15636 && (sc->sc_phytype == WMPHY_IGP_3))
15637 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
15638
15639 pme:
15640 /* Request PME */
15641 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
15642 pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
15643 if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
15644 /* For WOL */
15645 pmode |= PCI_PMCSR_PME_EN;
15646 } else {
15647 /* Disable WOL */
15648 pmode &= ~PCI_PMCSR_PME_EN;
15649 }
15650 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
15651 }
15652
15653 /* Disable ASPM L0s and/or L1 for workaround */
15654 static void
15655 wm_disable_aspm(struct wm_softc *sc)
15656 {
15657 pcireg_t reg, mask = 0;
15658 unsigned const char *str = "";
15659
15660 /*
15661 * Only for PCIe device which has PCIe capability in the PCI config
15662 * space.
15663 */
15664 if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
15665 return;
15666
15667 switch (sc->sc_type) {
15668 case WM_T_82571:
15669 case WM_T_82572:
15670 /*
15671 * 8257[12] Errata 13: Device Does Not Support PCIe Active
15672 * State Power management L1 State (ASPM L1).
15673 */
15674 mask = PCIE_LCSR_ASPM_L1;
15675 str = "L1 is";
15676 break;
15677 case WM_T_82573:
15678 case WM_T_82574:
15679 case WM_T_82583:
15680 /*
15681 * The 82573 disappears when PCIe ASPM L0s is enabled.
15682 *
15683 * The 82574 and 82583 does not support PCIe ASPM L0s with
15684 * some chipset. The document of 82574 and 82583 says that
15685 * disabling L0s with some specific chipset is sufficient,
15686 * but we follow as of the Intel em driver does.
15687 *
15688 * References:
15689 * Errata 8 of the Specification Update of i82573.
15690 * Errata 20 of the Specification Update of i82574.
15691 * Errata 9 of the Specification Update of i82583.
15692 */
15693 mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
15694 str = "L0s and L1 are";
15695 break;
15696 default:
15697 return;
15698 }
15699
15700 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
15701 sc->sc_pcixe_capoff + PCIE_LCSR);
15702 reg &= ~mask;
15703 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
15704 sc->sc_pcixe_capoff + PCIE_LCSR, reg);
15705
15706 /* Print only in wm_attach() */
15707 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
15708 aprint_verbose_dev(sc->sc_dev,
15709 "ASPM %s disabled to workaround the errata.\n", str);
15710 }
15711
15712 /* LPLU */
15713
15714 static void
15715 wm_lplu_d0_disable(struct wm_softc *sc)
15716 {
15717 struct mii_data *mii = &sc->sc_mii;
15718 uint32_t reg;
15719 uint16_t phyval;
15720
15721 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15722 device_xname(sc->sc_dev), __func__));
15723
15724 if (sc->sc_phytype == WMPHY_IFE)
15725 return;
15726
15727 switch (sc->sc_type) {
15728 case WM_T_82571:
15729 case WM_T_82572:
15730 case WM_T_82573:
15731 case WM_T_82575:
15732 case WM_T_82576:
15733 mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
15734 phyval &= ~PMR_D0_LPLU;
15735 mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
15736 break;
15737 case WM_T_82580:
15738 case WM_T_I350:
15739 case WM_T_I210:
15740 case WM_T_I211:
15741 reg = CSR_READ(sc, WMREG_PHPM);
15742 reg &= ~PHPM_D0A_LPLU;
15743 CSR_WRITE(sc, WMREG_PHPM, reg);
15744 break;
15745 case WM_T_82574:
15746 case WM_T_82583:
15747 case WM_T_ICH8:
15748 case WM_T_ICH9:
15749 case WM_T_ICH10:
15750 reg = CSR_READ(sc, WMREG_PHY_CTRL);
15751 reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
15752 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
15753 CSR_WRITE_FLUSH(sc);
15754 break;
15755 case WM_T_PCH:
15756 case WM_T_PCH2:
15757 case WM_T_PCH_LPT:
15758 case WM_T_PCH_SPT:
15759 case WM_T_PCH_CNP:
15760 wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
15761 phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
15762 if (wm_phy_resetisblocked(sc) == false)
15763 phyval |= HV_OEM_BITS_ANEGNOW;
15764 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
15765 break;
15766 default:
15767 break;
15768 }
15769 }
15770
15771 /* EEE */
15772
15773 static int
15774 wm_set_eee_i350(struct wm_softc *sc)
15775 {
15776 struct ethercom *ec = &sc->sc_ethercom;
15777 uint32_t ipcnfg, eeer;
15778 uint32_t ipcnfg_mask
15779 = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
15780 uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
15781
15782 KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
15783
15784 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
15785 eeer = CSR_READ(sc, WMREG_EEER);
15786
15787 /* Enable or disable per user setting */
15788 if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
15789 ipcnfg |= ipcnfg_mask;
15790 eeer |= eeer_mask;
15791 } else {
15792 ipcnfg &= ~ipcnfg_mask;
15793 eeer &= ~eeer_mask;
15794 }
15795
15796 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
15797 CSR_WRITE(sc, WMREG_EEER, eeer);
15798 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
15799 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
15800
15801 return 0;
15802 }
15803
15804 static int
15805 wm_set_eee_pchlan(struct wm_softc *sc)
15806 {
15807 device_t dev = sc->sc_dev;
15808 struct ethercom *ec = &sc->sc_ethercom;
15809 uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
15810 int rv = 0;
15811
15812 switch (sc->sc_phytype) {
15813 case WMPHY_82579:
15814 lpa = I82579_EEE_LP_ABILITY;
15815 pcs_status = I82579_EEE_PCS_STATUS;
15816 adv_addr = I82579_EEE_ADVERTISEMENT;
15817 break;
15818 case WMPHY_I217:
15819 lpa = I217_EEE_LP_ABILITY;
15820 pcs_status = I217_EEE_PCS_STATUS;
15821 adv_addr = I217_EEE_ADVERTISEMENT;
15822 break;
15823 default:
15824 return 0;
15825 }
15826
15827 if (sc->phy.acquire(sc)) {
15828 device_printf(dev, "%s: failed to get semaphore\n", __func__);
15829 return 0;
15830 }
15831
15832 rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
15833 if (rv != 0)
15834 goto release;
15835
15836 /* Clear bits that enable EEE in various speeds */
15837 lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
15838
15839 if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
15840 /* Save off link partner's EEE ability */
15841 rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
15842 if (rv != 0)
15843 goto release;
15844
15845 /* Read EEE advertisement */
15846 if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
15847 goto release;
15848
15849 /*
15850 * Enable EEE only for speeds in which the link partner is
15851 * EEE capable and for which we advertise EEE.
15852 */
15853 if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
15854 lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
15855 if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
15856 sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
15857 if ((data & ANLPAR_TX_FD) != 0)
15858 lpi_ctrl |= I82579_LPI_CTRL_EN_100;
15859 else {
15860 /*
15861 * EEE is not supported in 100Half, so ignore
15862 * partner's EEE in 100 ability if full-duplex
15863 * is not advertised.
15864 */
15865 sc->eee_lp_ability
15866 &= ~AN_EEEADVERT_100_TX;
15867 }
15868 }
15869 }
15870
15871 if (sc->sc_phytype == WMPHY_82579) {
15872 rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
15873 if (rv != 0)
15874 goto release;
15875
15876 data &= ~I82579_LPI_PLL_SHUT_100;
15877 rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
15878 }
15879
15880 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
15881 if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
15882 goto release;
15883
15884 rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
15885 release:
15886 sc->phy.release(sc);
15887
15888 return rv;
15889 }
15890
15891 static int
15892 wm_set_eee(struct wm_softc *sc)
15893 {
15894 struct ethercom *ec = &sc->sc_ethercom;
15895
15896 if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
15897 return 0;
15898
15899 if (sc->sc_type == WM_T_I354) {
15900 /* I354 uses an external PHY */
15901 return 0; /* not yet */
15902 } else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
15903 return wm_set_eee_i350(sc);
15904 else if (sc->sc_type >= WM_T_PCH2)
15905 return wm_set_eee_pchlan(sc);
15906
15907 return 0;
15908 }
15909
15910 /*
15911 * Workarounds (mainly PHY related).
15912 * Basically, PHY's workarounds are in the PHY drivers.
15913 */
15914
15915 /* Work-around for 82566 Kumeran PCS lock loss */
15916 static int
15917 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
15918 {
15919 struct mii_data *mii = &sc->sc_mii;
15920 uint32_t status = CSR_READ(sc, WMREG_STATUS);
15921 int i, reg, rv;
15922 uint16_t phyreg;
15923
15924 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15925 device_xname(sc->sc_dev), __func__));
15926
15927 /* If the link is not up, do nothing */
15928 if ((status & STATUS_LU) == 0)
15929 return 0;
15930
15931 /* Nothing to do if the link is other than 1Gbps */
15932 if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
15933 return 0;
15934
15935 for (i = 0; i < 10; i++) {
15936 /* read twice */
15937 rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
15938 if (rv != 0)
15939 return rv;
15940 rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
15941 if (rv != 0)
15942 return rv;
15943
15944 if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
15945 goto out; /* GOOD! */
15946
15947 /* Reset the PHY */
15948 wm_reset_phy(sc);
15949 delay(5*1000);
15950 }
15951
15952 /* Disable GigE link negotiation */
15953 reg = CSR_READ(sc, WMREG_PHY_CTRL);
15954 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
15955 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
15956
15957 /*
15958 * Call gig speed drop workaround on Gig disable before accessing
15959 * any PHY registers.
15960 */
15961 wm_gig_downshift_workaround_ich8lan(sc);
15962
15963 out:
15964 return 0;
15965 }
15966
15967 /*
15968 * wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
15969 * @sc: pointer to the HW structure
15970 *
15971 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
15972 * LPLU, Gig disable, MDIC PHY reset):
15973 * 1) Set Kumeran Near-end loopback
15974 * 2) Clear Kumeran Near-end loopback
15975 * Should only be called for ICH8[m] devices with any 1G Phy.
15976 */
15977 static void
15978 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
15979 {
15980 uint16_t kmreg;
15981
15982 /* Only for igp3 */
15983 if (sc->sc_phytype == WMPHY_IGP_3) {
15984 if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
15985 return;
15986 kmreg |= KUMCTRLSTA_DIAG_NELPBK;
15987 if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
15988 return;
15989 kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
15990 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
15991 }
15992 }
15993
15994 /*
15995 * Workaround for pch's PHYs
15996 * XXX should be moved to new PHY driver?
15997 */
15998 static int
15999 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
16000 {
16001 device_t dev = sc->sc_dev;
16002 struct mii_data *mii = &sc->sc_mii;
16003 struct mii_softc *child;
16004 uint16_t phy_data, phyrev = 0;
16005 int phytype = sc->sc_phytype;
16006 int rv;
16007
16008 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16009 device_xname(dev), __func__));
16010 KASSERT(sc->sc_type == WM_T_PCH);
16011
16012 /* Set MDIO slow mode before any other MDIO access */
16013 if (phytype == WMPHY_82577)
16014 if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
16015 return rv;
16016
16017 child = LIST_FIRST(&mii->mii_phys);
16018 if (child != NULL)
16019 phyrev = child->mii_mpd_rev;
16020
16021 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
16022 if ((child != NULL) &&
16023 (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
16024 ((phytype == WMPHY_82578) && (phyrev == 1)))) {
16025 /* Disable generation of early preamble (0x4431) */
16026 rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
16027 &phy_data);
16028 if (rv != 0)
16029 return rv;
16030 phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
16031 BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
16032 rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
16033 phy_data);
16034 if (rv != 0)
16035 return rv;
16036
16037 /* Preamble tuning for SSC */
16038 rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
16039 if (rv != 0)
16040 return rv;
16041 }
16042
16043 /* 82578 */
16044 if (phytype == WMPHY_82578) {
16045 /*
16046 * Return registers to default by doing a soft reset then
16047 * writing 0x3140 to the control register
16048 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
16049 */
16050 if ((child != NULL) && (phyrev < 2)) {
16051 PHY_RESET(child);
16052 rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
16053 if (rv != 0)
16054 return rv;
16055 }
16056 }
16057
16058 /* Select page 0 */
16059 if ((rv = sc->phy.acquire(sc)) != 0)
16060 return rv;
16061 rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
16062 sc->phy.release(sc);
16063 if (rv != 0)
16064 return rv;
16065
16066 /*
16067 * Configure the K1 Si workaround during phy reset assuming there is
16068 * link so that it disables K1 if link is in 1Gbps.
16069 */
16070 if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
16071 return rv;
16072
16073 /* Workaround for link disconnects on a busy hub in half duplex */
16074 rv = sc->phy.acquire(sc);
16075 if (rv)
16076 return rv;
16077 rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
16078 if (rv)
16079 goto release;
16080 rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
16081 phy_data & 0x00ff);
16082 if (rv)
16083 goto release;
16084
16085 /* Set MSE higher to enable link to stay up when noise is high */
16086 rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
16087 release:
16088 sc->phy.release(sc);
16089
16090 return rv;
16091 }
16092
16093 /*
16094 * wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
16095 * @sc: pointer to the HW structure
16096 */
16097 static void
16098 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
16099 {
16100
16101 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16102 device_xname(sc->sc_dev), __func__));
16103
16104 if (sc->phy.acquire(sc) != 0)
16105 return;
16106
16107 wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
16108
16109 sc->phy.release(sc);
16110 }
16111
16112 static void
16113 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
16114 {
16115 device_t dev = sc->sc_dev;
16116 uint32_t mac_reg;
16117 uint16_t i, wuce;
16118 int count;
16119
16120 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16121 device_xname(dev), __func__));
16122
16123 if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
16124 return;
16125
16126 /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
16127 count = wm_rar_count(sc);
16128 for (i = 0; i < count; i++) {
16129 uint16_t lo, hi;
16130 mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
16131 lo = (uint16_t)(mac_reg & 0xffff);
16132 hi = (uint16_t)((mac_reg >> 16) & 0xffff);
16133 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
16134 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
16135
16136 mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
16137 lo = (uint16_t)(mac_reg & 0xffff);
16138 hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
16139 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
16140 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
16141 }
16142
16143 wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
16144 }
16145
16146 /*
16147 * wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
16148 * with 82579 PHY
16149 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
16150 */
16151 static int
16152 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
16153 {
16154 device_t dev = sc->sc_dev;
16155 int rar_count;
16156 int rv;
16157 uint32_t mac_reg;
16158 uint16_t dft_ctrl, data;
16159 uint16_t i;
16160
16161 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16162 device_xname(dev), __func__));
16163
16164 if (sc->sc_type < WM_T_PCH2)
16165 return 0;
16166
16167 /* Acquire PHY semaphore */
16168 rv = sc->phy.acquire(sc);
16169 if (rv != 0)
16170 return rv;
16171
16172 /* Disable Rx path while enabling/disabling workaround */
16173 rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
16174 if (rv != 0)
16175 goto out;
16176 rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
16177 dft_ctrl | (1 << 14));
16178 if (rv != 0)
16179 goto out;
16180
16181 if (enable) {
16182 /* Write Rx addresses (rar_entry_count for RAL/H, and
16183 * SHRAL/H) and initial CRC values to the MAC
16184 */
16185 rar_count = wm_rar_count(sc);
16186 for (i = 0; i < rar_count; i++) {
16187 uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
16188 uint32_t addr_high, addr_low;
16189
16190 addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
16191 if (!(addr_high & RAL_AV))
16192 continue;
16193 addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
16194 mac_addr[0] = (addr_low & 0xFF);
16195 mac_addr[1] = ((addr_low >> 8) & 0xFF);
16196 mac_addr[2] = ((addr_low >> 16) & 0xFF);
16197 mac_addr[3] = ((addr_low >> 24) & 0xFF);
16198 mac_addr[4] = (addr_high & 0xFF);
16199 mac_addr[5] = ((addr_high >> 8) & 0xFF);
16200
16201 CSR_WRITE(sc, WMREG_PCH_RAICC(i),
16202 ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
16203 }
16204
16205 /* Write Rx addresses to the PHY */
16206 wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
16207 }
16208
16209 /*
16210 * If enable ==
16211 * true: Enable jumbo frame workaround in the MAC.
16212 * false: Write MAC register values back to h/w defaults.
16213 */
16214 mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
16215 if (enable) {
16216 mac_reg &= ~(1 << 14);
16217 mac_reg |= (7 << 15);
16218 } else
16219 mac_reg &= ~(0xf << 14);
16220 CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
16221
16222 mac_reg = CSR_READ(sc, WMREG_RCTL);
16223 if (enable) {
16224 mac_reg |= RCTL_SECRC;
16225 sc->sc_rctl |= RCTL_SECRC;
16226 sc->sc_flags |= WM_F_CRC_STRIP;
16227 } else {
16228 mac_reg &= ~RCTL_SECRC;
16229 sc->sc_rctl &= ~RCTL_SECRC;
16230 sc->sc_flags &= ~WM_F_CRC_STRIP;
16231 }
16232 CSR_WRITE(sc, WMREG_RCTL, mac_reg);
16233
16234 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
16235 if (rv != 0)
16236 goto out;
16237 if (enable)
16238 data |= 1 << 0;
16239 else
16240 data &= ~(1 << 0);
16241 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
16242 if (rv != 0)
16243 goto out;
16244
16245 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
16246 if (rv != 0)
16247 goto out;
16248 /*
16249 * XXX FreeBSD and Linux do the same thing that they set the same value
16250 * on both the enable case and the disable case. Is it correct?
16251 */
16252 data &= ~(0xf << 8);
16253 data |= (0xb << 8);
16254 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
16255 if (rv != 0)
16256 goto out;
16257
16258 /*
16259 * If enable ==
16260 * true: Enable jumbo frame workaround in the PHY.
16261 * false: Write PHY register values back to h/w defaults.
16262 */
16263 rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
16264 if (rv != 0)
16265 goto out;
16266 data &= ~(0x7F << 5);
16267 if (enable)
16268 data |= (0x37 << 5);
16269 rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
16270 if (rv != 0)
16271 goto out;
16272
16273 rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
16274 if (rv != 0)
16275 goto out;
16276 if (enable)
16277 data &= ~(1 << 13);
16278 else
16279 data |= (1 << 13);
16280 rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
16281 if (rv != 0)
16282 goto out;
16283
16284 rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
16285 if (rv != 0)
16286 goto out;
16287 data &= ~(0x3FF << 2);
16288 if (enable)
16289 data |= (I82579_TX_PTR_GAP << 2);
16290 else
16291 data |= (0x8 << 2);
16292 rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
16293 if (rv != 0)
16294 goto out;
16295
16296 rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
16297 enable ? 0xf100 : 0x7e00);
16298 if (rv != 0)
16299 goto out;
16300
16301 rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
16302 if (rv != 0)
16303 goto out;
16304 if (enable)
16305 data |= 1 << 10;
16306 else
16307 data &= ~(1 << 10);
16308 rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
16309 if (rv != 0)
16310 goto out;
16311
16312 /* Re-enable Rx path after enabling/disabling workaround */
16313 rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
16314 dft_ctrl & ~(1 << 14));
16315
16316 out:
16317 sc->phy.release(sc);
16318
16319 return rv;
16320 }
16321
16322 /*
16323 * wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
16324 * done after every PHY reset.
16325 */
16326 static int
16327 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
16328 {
16329 device_t dev = sc->sc_dev;
16330 int rv;
16331
16332 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16333 device_xname(dev), __func__));
16334 KASSERT(sc->sc_type == WM_T_PCH2);
16335
16336 /* Set MDIO slow mode before any other MDIO access */
16337 rv = wm_set_mdio_slow_mode_hv(sc);
16338 if (rv != 0)
16339 return rv;
16340
16341 rv = sc->phy.acquire(sc);
16342 if (rv != 0)
16343 return rv;
16344 /* Set MSE higher to enable link to stay up when noise is high */
16345 rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
16346 if (rv != 0)
16347 goto release;
16348 /* Drop link after 5 times MSE threshold was reached */
16349 rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
16350 release:
16351 sc->phy.release(sc);
16352
16353 return rv;
16354 }
16355
16356 /**
16357 * wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
16358 * @link: link up bool flag
16359 *
16360 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
16361 * preventing further DMA write requests. Workaround the issue by disabling
16362 * the de-assertion of the clock request when in 1Gpbs mode.
16363 * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
16364 * speeds in order to avoid Tx hangs.
16365 **/
16366 static int
16367 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
16368 {
16369 uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
16370 uint32_t status = CSR_READ(sc, WMREG_STATUS);
16371 uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
16372 uint16_t phyreg;
16373
16374 if (link && (speed == STATUS_SPEED_1000)) {
16375 sc->phy.acquire(sc);
16376 int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
16377 &phyreg);
16378 if (rv != 0)
16379 goto release;
16380 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
16381 phyreg & ~KUMCTRLSTA_K1_ENABLE);
16382 if (rv != 0)
16383 goto release;
16384 delay(20);
16385 CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
16386
16387 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
16388 &phyreg);
16389 release:
16390 sc->phy.release(sc);
16391 return rv;
16392 }
16393
16394 fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
16395
16396 struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
16397 if (((child != NULL) && (child->mii_mpd_rev > 5))
16398 || !link
16399 || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
16400 goto update_fextnvm6;
16401
16402 wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
16403
16404 /* Clear link status transmit timeout */
16405 phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
16406 if (speed == STATUS_SPEED_100) {
16407 /* Set inband Tx timeout to 5x10us for 100Half */
16408 phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
16409
16410 /* Do not extend the K1 entry latency for 100Half */
16411 fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
16412 } else {
16413 /* Set inband Tx timeout to 50x10us for 10Full/Half */
16414 phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
16415
16416 /* Extend the K1 entry latency for 10 Mbps */
16417 fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
16418 }
16419
16420 wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
16421
16422 update_fextnvm6:
16423 CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
16424 return 0;
16425 }
16426
16427 /*
16428 * wm_k1_gig_workaround_hv - K1 Si workaround
16429 * @sc: pointer to the HW structure
16430 * @link: link up bool flag
16431 *
16432 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
16433 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
16434 * If link is down, the function will restore the default K1 setting located
16435 * in the NVM.
16436 */
16437 static int
16438 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
16439 {
16440 int k1_enable = sc->sc_nvm_k1_enabled;
16441
16442 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16443 device_xname(sc->sc_dev), __func__));
16444
16445 if (sc->phy.acquire(sc) != 0)
16446 return -1;
16447
16448 if (link) {
16449 k1_enable = 0;
16450
16451 /* Link stall fix for link up */
16452 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
16453 0x0100);
16454 } else {
16455 /* Link stall fix for link down */
16456 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
16457 0x4100);
16458 }
16459
16460 wm_configure_k1_ich8lan(sc, k1_enable);
16461 sc->phy.release(sc);
16462
16463 return 0;
16464 }
16465
16466 /*
16467 * wm_k1_workaround_lv - K1 Si workaround
16468 * @sc: pointer to the HW structure
16469 *
16470 * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
16471 * Disable K1 for 1000 and 100 speeds
16472 */
16473 static int
16474 wm_k1_workaround_lv(struct wm_softc *sc)
16475 {
16476 uint32_t reg;
16477 uint16_t phyreg;
16478 int rv;
16479
16480 if (sc->sc_type != WM_T_PCH2)
16481 return 0;
16482
16483 /* Set K1 beacon duration based on 10Mbps speed */
16484 rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
16485 if (rv != 0)
16486 return rv;
16487
16488 if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
16489 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
16490 if (phyreg &
16491 (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
16492 /* LV 1G/100 Packet drop issue wa */
16493 rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
16494 &phyreg);
16495 if (rv != 0)
16496 return rv;
16497 phyreg &= ~HV_PM_CTRL_K1_ENA;
16498 rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
16499 phyreg);
16500 if (rv != 0)
16501 return rv;
16502 } else {
16503 /* For 10Mbps */
16504 reg = CSR_READ(sc, WMREG_FEXTNVM4);
16505 reg &= ~FEXTNVM4_BEACON_DURATION;
16506 reg |= FEXTNVM4_BEACON_DURATION_16US;
16507 CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
16508 }
16509 }
16510
16511 return 0;
16512 }
16513
16514 /*
16515 * wm_link_stall_workaround_hv - Si workaround
16516 * @sc: pointer to the HW structure
16517 *
16518 * This function works around a Si bug where the link partner can get
16519 * a link up indication before the PHY does. If small packets are sent
16520 * by the link partner they can be placed in the packet buffer without
16521 * being properly accounted for by the PHY and will stall preventing
16522 * further packets from being received. The workaround is to clear the
16523 * packet buffer after the PHY detects link up.
16524 */
16525 static int
16526 wm_link_stall_workaround_hv(struct wm_softc *sc)
16527 {
16528 uint16_t phyreg;
16529
16530 if (sc->sc_phytype != WMPHY_82578)
16531 return 0;
16532
16533 /* Do not apply workaround if in PHY loopback bit 14 set */
16534 wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
16535 if ((phyreg & BMCR_LOOP) != 0)
16536 return 0;
16537
16538 /* Check if link is up and at 1Gbps */
16539 wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
16540 phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
16541 | BM_CS_STATUS_SPEED_MASK;
16542 if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
16543 | BM_CS_STATUS_SPEED_1000))
16544 return 0;
16545
16546 delay(200 * 1000); /* XXX too big */
16547
16548 /* Flush the packets in the fifo buffer */
16549 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
16550 HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
16551 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
16552 HV_MUX_DATA_CTRL_GEN_TO_MAC);
16553
16554 return 0;
16555 }
16556
16557 static int
16558 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
16559 {
16560 int rv;
16561 uint16_t reg;
16562
16563 rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, ®);
16564 if (rv != 0)
16565 return rv;
16566
16567 return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
16568 reg | HV_KMRN_MDIO_SLOW);
16569 }
16570
16571 /*
16572 * wm_configure_k1_ich8lan - Configure K1 power state
16573 * @sc: pointer to the HW structure
16574 * @enable: K1 state to configure
16575 *
16576 * Configure the K1 power state based on the provided parameter.
16577 * Assumes semaphore already acquired.
16578 */
16579 static void
16580 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
16581 {
16582 uint32_t ctrl, ctrl_ext, tmp;
16583 uint16_t kmreg;
16584 int rv;
16585
16586 KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
16587
16588 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
16589 if (rv != 0)
16590 return;
16591
16592 if (k1_enable)
16593 kmreg |= KUMCTRLSTA_K1_ENABLE;
16594 else
16595 kmreg &= ~KUMCTRLSTA_K1_ENABLE;
16596
16597 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
16598 if (rv != 0)
16599 return;
16600
16601 delay(20);
16602
16603 ctrl = CSR_READ(sc, WMREG_CTRL);
16604 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
16605
16606 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
16607 tmp |= CTRL_FRCSPD;
16608
16609 CSR_WRITE(sc, WMREG_CTRL, tmp);
16610 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
16611 CSR_WRITE_FLUSH(sc);
16612 delay(20);
16613
16614 CSR_WRITE(sc, WMREG_CTRL, ctrl);
16615 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
16616 CSR_WRITE_FLUSH(sc);
16617 delay(20);
16618
16619 return;
16620 }
16621
16622 /* special case - for 82575 - need to do manual init ... */
16623 static void
16624 wm_reset_init_script_82575(struct wm_softc *sc)
16625 {
16626 /*
16627 * Remark: this is untested code - we have no board without EEPROM
16628 * same setup as mentioned int the FreeBSD driver for the i82575
16629 */
16630
16631 /* SerDes configuration via SERDESCTRL */
16632 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
16633 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
16634 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
16635 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
16636
16637 /* CCM configuration via CCMCTL register */
16638 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
16639 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
16640
16641 /* PCIe lanes configuration */
16642 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
16643 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
16644 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
16645 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
16646
16647 /* PCIe PLL Configuration */
16648 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
16649 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
16650 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
16651 }
16652
16653 static void
16654 wm_reset_mdicnfg_82580(struct wm_softc *sc)
16655 {
16656 uint32_t reg;
16657 uint16_t nvmword;
16658 int rv;
16659
16660 if (sc->sc_type != WM_T_82580)
16661 return;
16662 if ((sc->sc_flags & WM_F_SGMII) == 0)
16663 return;
16664
16665 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
16666 + NVM_OFF_CFG3_PORTA, 1, &nvmword);
16667 if (rv != 0) {
16668 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
16669 __func__);
16670 return;
16671 }
16672
16673 reg = CSR_READ(sc, WMREG_MDICNFG);
16674 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
16675 reg |= MDICNFG_DEST;
16676 if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
16677 reg |= MDICNFG_COM_MDIO;
16678 CSR_WRITE(sc, WMREG_MDICNFG, reg);
16679 }
16680
16681 #define MII_INVALIDID(x) (((x) == 0x0000) || ((x) == 0xffff))
16682
16683 static bool
16684 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
16685 {
16686 uint32_t reg;
16687 uint16_t id1, id2;
16688 int i, rv;
16689
16690 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16691 device_xname(sc->sc_dev), __func__));
16692 KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
16693
16694 id1 = id2 = 0xffff;
16695 for (i = 0; i < 2; i++) {
16696 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
16697 &id1);
16698 if ((rv != 0) || MII_INVALIDID(id1))
16699 continue;
16700 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
16701 &id2);
16702 if ((rv != 0) || MII_INVALIDID(id2))
16703 continue;
16704 break;
16705 }
16706 if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
16707 goto out;
16708
16709 /*
16710 * In case the PHY needs to be in mdio slow mode,
16711 * set slow mode and try to get the PHY id again.
16712 */
16713 rv = 0;
16714 if (sc->sc_type < WM_T_PCH_LPT) {
16715 sc->phy.release(sc);
16716 wm_set_mdio_slow_mode_hv(sc);
16717 rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
16718 rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
16719 sc->phy.acquire(sc);
16720 }
16721 if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
16722 device_printf(sc->sc_dev, "XXX return with false\n");
16723 return false;
16724 }
16725 out:
16726 if (sc->sc_type >= WM_T_PCH_LPT) {
16727 /* Only unforce SMBus if ME is not active */
16728 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
16729 uint16_t phyreg;
16730
16731 /* Unforce SMBus mode in PHY */
16732 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
16733 CV_SMB_CTRL, &phyreg);
16734 phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
16735 wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
16736 CV_SMB_CTRL, phyreg);
16737
16738 /* Unforce SMBus mode in MAC */
16739 reg = CSR_READ(sc, WMREG_CTRL_EXT);
16740 reg &= ~CTRL_EXT_FORCE_SMBUS;
16741 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16742 }
16743 }
16744 return true;
16745 }
16746
16747 static void
16748 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
16749 {
16750 uint32_t reg;
16751 int i;
16752
16753 /* Set PHY Config Counter to 50msec */
16754 reg = CSR_READ(sc, WMREG_FEXTNVM3);
16755 reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
16756 reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
16757 CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
16758
16759 /* Toggle LANPHYPC */
16760 reg = CSR_READ(sc, WMREG_CTRL);
16761 reg |= CTRL_LANPHYPC_OVERRIDE;
16762 reg &= ~CTRL_LANPHYPC_VALUE;
16763 CSR_WRITE(sc, WMREG_CTRL, reg);
16764 CSR_WRITE_FLUSH(sc);
16765 delay(1000);
16766 reg &= ~CTRL_LANPHYPC_OVERRIDE;
16767 CSR_WRITE(sc, WMREG_CTRL, reg);
16768 CSR_WRITE_FLUSH(sc);
16769
16770 if (sc->sc_type < WM_T_PCH_LPT)
16771 delay(50 * 1000);
16772 else {
16773 i = 20;
16774
16775 do {
16776 delay(5 * 1000);
16777 } while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
16778 && i--);
16779
16780 delay(30 * 1000);
16781 }
16782 }
16783
16784 static int
16785 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
16786 {
16787 uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
16788 | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
16789 uint32_t rxa;
16790 uint16_t scale = 0, lat_enc = 0;
16791 int32_t obff_hwm = 0;
16792 int64_t lat_ns, value;
16793
16794 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16795 device_xname(sc->sc_dev), __func__));
16796
16797 if (link) {
16798 uint16_t max_snoop, max_nosnoop, max_ltr_enc;
16799 uint32_t status;
16800 uint16_t speed;
16801 pcireg_t preg;
16802
16803 status = CSR_READ(sc, WMREG_STATUS);
16804 switch (__SHIFTOUT(status, STATUS_SPEED)) {
16805 case STATUS_SPEED_10:
16806 speed = 10;
16807 break;
16808 case STATUS_SPEED_100:
16809 speed = 100;
16810 break;
16811 case STATUS_SPEED_1000:
16812 speed = 1000;
16813 break;
16814 default:
16815 device_printf(sc->sc_dev, "Unknown speed "
16816 "(status = %08x)\n", status);
16817 return -1;
16818 }
16819
16820 /* Rx Packet Buffer Allocation size (KB) */
16821 rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
16822
16823 /*
16824 * Determine the maximum latency tolerated by the device.
16825 *
16826 * Per the PCIe spec, the tolerated latencies are encoded as
16827 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
16828 * a 10-bit value (0-1023) to provide a range from 1 ns to
16829 * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
16830 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
16831 */
16832 lat_ns = ((int64_t)rxa * 1024 -
16833 (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
16834 + ETHER_HDR_LEN))) * 8 * 1000;
16835 if (lat_ns < 0)
16836 lat_ns = 0;
16837 else
16838 lat_ns /= speed;
16839 value = lat_ns;
16840
16841 while (value > LTRV_VALUE) {
16842 scale ++;
16843 value = howmany(value, __BIT(5));
16844 }
16845 if (scale > LTRV_SCALE_MAX) {
16846 device_printf(sc->sc_dev,
16847 "Invalid LTR latency scale %d\n", scale);
16848 return -1;
16849 }
16850 lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
16851
16852 /* Determine the maximum latency tolerated by the platform */
16853 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
16854 WM_PCI_LTR_CAP_LPT);
16855 max_snoop = preg & 0xffff;
16856 max_nosnoop = preg >> 16;
16857
16858 max_ltr_enc = MAX(max_snoop, max_nosnoop);
16859
16860 if (lat_enc > max_ltr_enc) {
16861 lat_enc = max_ltr_enc;
16862 lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
16863 * PCI_LTR_SCALETONS(
16864 __SHIFTOUT(lat_enc,
16865 PCI_LTR_MAXSNOOPLAT_SCALE));
16866 }
16867
16868 if (lat_ns) {
16869 lat_ns *= speed * 1000;
16870 lat_ns /= 8;
16871 lat_ns /= 1000000000;
16872 obff_hwm = (int32_t)(rxa - lat_ns);
16873 }
16874 if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
16875 device_printf(sc->sc_dev, "Invalid high water mark %d"
16876 "(rxa = %d, lat_ns = %d)\n",
16877 obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
16878 return -1;
16879 }
16880 }
16881 /* Snoop and No-Snoop latencies the same */
16882 reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
16883 CSR_WRITE(sc, WMREG_LTRV, reg);
16884
16885 /* Set OBFF high water mark */
16886 reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
16887 reg |= obff_hwm;
16888 CSR_WRITE(sc, WMREG_SVT, reg);
16889
16890 /* Enable OBFF */
16891 reg = CSR_READ(sc, WMREG_SVCR);
16892 reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
16893 CSR_WRITE(sc, WMREG_SVCR, reg);
16894
16895 return 0;
16896 }
16897
16898 /*
16899 * I210 Errata 25 and I211 Errata 10
16900 * Slow System Clock.
16901 *
16902 * Note that this function is called on both FLASH and iNVM case on NetBSD.
16903 */
16904 static int
16905 wm_pll_workaround_i210(struct wm_softc *sc)
16906 {
16907 uint32_t mdicnfg, wuc;
16908 uint32_t reg;
16909 pcireg_t pcireg;
16910 uint32_t pmreg;
16911 uint16_t nvmword, tmp_nvmword;
16912 uint16_t phyval;
16913 bool wa_done = false;
16914 int i, rv = 0;
16915
16916 /* Get Power Management cap offset */
16917 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
16918 &pmreg, NULL) == 0)
16919 return -1;
16920
16921 /* Save WUC and MDICNFG registers */
16922 wuc = CSR_READ(sc, WMREG_WUC);
16923 mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
16924
16925 reg = mdicnfg & ~MDICNFG_DEST;
16926 CSR_WRITE(sc, WMREG_MDICNFG, reg);
16927
16928 if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
16929 /*
16930 * The default value of the Initialization Control Word 1
16931 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
16932 */
16933 nvmword = INVM_DEFAULT_AL;
16934 }
16935 tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
16936
16937 for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
16938 wm_gmii_gs40g_readreg(sc->sc_dev, 1,
16939 GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
16940
16941 if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
16942 rv = 0;
16943 break; /* OK */
16944 } else
16945 rv = -1;
16946
16947 wa_done = true;
16948 /* Directly reset the internal PHY */
16949 reg = CSR_READ(sc, WMREG_CTRL);
16950 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
16951
16952 reg = CSR_READ(sc, WMREG_CTRL_EXT);
16953 reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
16954 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16955
16956 CSR_WRITE(sc, WMREG_WUC, 0);
16957 reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
16958 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
16959
16960 pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
16961 pmreg + PCI_PMCSR);
16962 pcireg |= PCI_PMCSR_STATE_D3;
16963 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
16964 pmreg + PCI_PMCSR, pcireg);
16965 delay(1000);
16966 pcireg &= ~PCI_PMCSR_STATE_D3;
16967 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
16968 pmreg + PCI_PMCSR, pcireg);
16969
16970 reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
16971 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
16972
16973 /* Restore WUC register */
16974 CSR_WRITE(sc, WMREG_WUC, wuc);
16975 }
16976
16977 /* Restore MDICNFG setting */
16978 CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
16979 if (wa_done)
16980 aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
16981 return rv;
16982 }
16983
16984 static void
16985 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
16986 {
16987 uint32_t reg;
16988
16989 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16990 device_xname(sc->sc_dev), __func__));
16991 KASSERT((sc->sc_type == WM_T_PCH_SPT)
16992 || (sc->sc_type == WM_T_PCH_CNP));
16993
16994 reg = CSR_READ(sc, WMREG_FEXTNVM7);
16995 reg |= FEXTNVM7_SIDE_CLK_UNGATE;
16996 CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
16997
16998 reg = CSR_READ(sc, WMREG_FEXTNVM9);
16999 reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
17000 CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
17001 }
17002
17003 /* Sysctl function */
17004 #ifdef WM_DEBUG
17005 static int
17006 wm_sysctl_debug(SYSCTLFN_ARGS)
17007 {
17008 struct sysctlnode node = *rnode;
17009 struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
17010 uint32_t dflags;
17011 int error;
17012
17013 dflags = sc->sc_debug;
17014 node.sysctl_data = &dflags;
17015 error = sysctl_lookup(SYSCTLFN_CALL(&node));
17016
17017 if (error || newp == NULL)
17018 return error;
17019
17020 sc->sc_debug = dflags;
17021
17022 return 0;
17023 }
17024 #endif
17025