if_wm.c revision 1.677 1 /* $NetBSD: if_wm.c,v 1.677 2020/06/11 02:39:30 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - TX Multi queue improvement (refine queue selection logic)
77 * - Split header buffer for newer descriptors
78 * - EEE (Energy Efficiency Ethernet) for I354
79 * - Virtual Function
80 * - Set LED correctly (based on contents in EEPROM)
81 * - Rework how parameters are loaded from the EEPROM.
82 */
83
84 #include <sys/cdefs.h>
85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.677 2020/06/11 02:39:30 thorpej Exp $");
86
87 #ifdef _KERNEL_OPT
88 #include "opt_net_mpsafe.h"
89 #include "opt_if_wm.h"
90 #endif
91
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/callout.h>
95 #include <sys/mbuf.h>
96 #include <sys/malloc.h>
97 #include <sys/kmem.h>
98 #include <sys/kernel.h>
99 #include <sys/socket.h>
100 #include <sys/ioctl.h>
101 #include <sys/errno.h>
102 #include <sys/device.h>
103 #include <sys/queue.h>
104 #include <sys/syslog.h>
105 #include <sys/interrupt.h>
106 #include <sys/cpu.h>
107 #include <sys/pcq.h>
108 #include <sys/sysctl.h>
109 #include <sys/workqueue.h>
110
111 #include <sys/rndsource.h>
112
113 #include <net/if.h>
114 #include <net/if_dl.h>
115 #include <net/if_media.h>
116 #include <net/if_ether.h>
117
118 #include <net/bpf.h>
119
120 #include <net/rss_config.h>
121
122 #include <netinet/in.h> /* XXX for struct ip */
123 #include <netinet/in_systm.h> /* XXX for struct ip */
124 #include <netinet/ip.h> /* XXX for struct ip */
125 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
126 #include <netinet/tcp.h> /* XXX for struct tcphdr */
127
128 #include <sys/bus.h>
129 #include <sys/intr.h>
130 #include <machine/endian.h>
131
132 #include <dev/mii/mii.h>
133 #include <dev/mii/mdio.h>
134 #include <dev/mii/miivar.h>
135 #include <dev/mii/miidevs.h>
136 #include <dev/mii/mii_bitbang.h>
137 #include <dev/mii/ikphyreg.h>
138 #include <dev/mii/igphyreg.h>
139 #include <dev/mii/igphyvar.h>
140 #include <dev/mii/inbmphyreg.h>
141 #include <dev/mii/ihphyreg.h>
142
143 #include <dev/pci/pcireg.h>
144 #include <dev/pci/pcivar.h>
145 #include <dev/pci/pcidevs.h>
146
147 #include <dev/pci/if_wmreg.h>
148 #include <dev/pci/if_wmvar.h>
149
150 #ifdef WM_DEBUG
151 #define WM_DEBUG_LINK __BIT(0)
152 #define WM_DEBUG_TX __BIT(1)
153 #define WM_DEBUG_RX __BIT(2)
154 #define WM_DEBUG_GMII __BIT(3)
155 #define WM_DEBUG_MANAGE __BIT(4)
156 #define WM_DEBUG_NVM __BIT(5)
157 #define WM_DEBUG_INIT __BIT(6)
158 #define WM_DEBUG_LOCK __BIT(7)
159 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
160 | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
161 #define DPRINTF(x, y) do { if (wm_debug & (x)) printf y; } while (0)
162 #else
163 #define DPRINTF(x, y) __nothing
164 #endif /* WM_DEBUG */
165
166 #ifdef NET_MPSAFE
167 #define WM_MPSAFE 1
168 #define CALLOUT_FLAGS CALLOUT_MPSAFE
169 #define WM_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
170 #else
171 #define CALLOUT_FLAGS 0
172 #define WM_WORKQUEUE_FLAGS WQ_PERCPU
173 #endif
174
175 #define WM_WORKQUEUE_PRI PRI_SOFTNET
176
177 /*
178 * This device driver's max interrupt numbers.
179 */
180 #define WM_MAX_NQUEUEINTR 16
181 #define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1)
182
183 #ifndef WM_DISABLE_MSI
184 #define WM_DISABLE_MSI 0
185 #endif
186 #ifndef WM_DISABLE_MSIX
187 #define WM_DISABLE_MSIX 0
188 #endif
189
190 int wm_disable_msi = WM_DISABLE_MSI;
191 int wm_disable_msix = WM_DISABLE_MSIX;
192
193 #ifndef WM_WATCHDOG_TIMEOUT
194 #define WM_WATCHDOG_TIMEOUT 5
195 #endif
196 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
197
198 /*
199 * Transmit descriptor list size. Due to errata, we can only have
200 * 256 hardware descriptors in the ring on < 82544, but we use 4096
201 * on >= 82544. We tell the upper layers that they can queue a lot
202 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
203 * of them at a time.
204 *
205 * We allow up to 64 DMA segments per packet. Pathological packet
206 * chains containing many small mbufs have been observed in zero-copy
207 * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
208 * m_defrag() is called to reduce it.
209 */
210 #define WM_NTXSEGS 64
211 #define WM_IFQUEUELEN 256
212 #define WM_TXQUEUELEN_MAX 64
213 #define WM_TXQUEUELEN_MAX_82547 16
214 #define WM_TXQUEUELEN(txq) ((txq)->txq_num)
215 #define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1)
216 #define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8)
217 #define WM_NTXDESC_82542 256
218 #define WM_NTXDESC_82544 4096
219 #define WM_NTXDESC(txq) ((txq)->txq_ndesc)
220 #define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1)
221 #define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize)
222 #define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq))
223 #define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq))
224
225 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
226
227 #define WM_TXINTERQSIZE 256
228
229 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
230 #define WM_TX_PROCESS_LIMIT_DEFAULT 100U
231 #endif
232 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
233 #define WM_TX_INTR_PROCESS_LIMIT_DEFAULT 0U
234 #endif
235
236 /*
237 * Receive descriptor list size. We have one Rx buffer for normal
238 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
239 * packet. We allocate 256 receive descriptors, each with a 2k
240 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
241 */
242 #define WM_NRXDESC 256U
243 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
244 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
245 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
246
247 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
248 #define WM_RX_PROCESS_LIMIT_DEFAULT 100U
249 #endif
250 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
251 #define WM_RX_INTR_PROCESS_LIMIT_DEFAULT 0U
252 #endif
253
254 typedef union txdescs {
255 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
256 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544];
257 } txdescs_t;
258
259 typedef union rxdescs {
260 wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
261 ext_rxdesc_t sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
262 nq_rxdesc_t sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
263 } rxdescs_t;
264
265 #define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x))
266 #define WM_CDRXOFF(rxq, x) ((rxq)->rxq_descsize * (x))
267
268 /*
269 * Software state for transmit jobs.
270 */
271 struct wm_txsoft {
272 struct mbuf *txs_mbuf; /* head of our mbuf chain */
273 bus_dmamap_t txs_dmamap; /* our DMA map */
274 int txs_firstdesc; /* first descriptor in packet */
275 int txs_lastdesc; /* last descriptor in packet */
276 int txs_ndesc; /* # of descriptors used */
277 };
278
279 /*
280 * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
281 * buffer and a DMA map. For packets which fill more than one buffer, we chain
282 * them together.
283 */
284 struct wm_rxsoft {
285 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
286 bus_dmamap_t rxs_dmamap; /* our DMA map */
287 };
288
289 #define WM_LINKUP_TIMEOUT 50
290
291 static uint16_t swfwphysem[] = {
292 SWFW_PHY0_SM,
293 SWFW_PHY1_SM,
294 SWFW_PHY2_SM,
295 SWFW_PHY3_SM
296 };
297
298 static const uint32_t wm_82580_rxpbs_table[] = {
299 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
300 };
301
302 struct wm_softc;
303
304 #ifdef WM_EVENT_COUNTERS
305 #define WM_Q_EVCNT_DEFINE(qname, evname) \
306 char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
307 struct evcnt qname##_ev_##evname;
308
309 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \
310 do { \
311 snprintf((q)->qname##_##evname##_evcnt_name, \
312 sizeof((q)->qname##_##evname##_evcnt_name), \
313 "%s%02d%s", #qname, (qnum), #evname); \
314 evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \
315 (evtype), NULL, (xname), \
316 (q)->qname##_##evname##_evcnt_name); \
317 } while (0)
318
319 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
320 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
321
322 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
323 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
324
325 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum) \
326 evcnt_detach(&(q)->qname##_ev_##evname);
327 #endif /* WM_EVENT_COUNTERS */
328
329 struct wm_txqueue {
330 kmutex_t *txq_lock; /* lock for tx operations */
331
332 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */
333
334 /* Software state for the transmit descriptors. */
335 int txq_num; /* must be a power of two */
336 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
337
338 /* TX control data structures. */
339 int txq_ndesc; /* must be a power of two */
340 size_t txq_descsize; /* a tx descriptor size */
341 txdescs_t *txq_descs_u;
342 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */
343 bus_dma_segment_t txq_desc_seg; /* control data segment */
344 int txq_desc_rseg; /* real number of control segment */
345 #define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr
346 #define txq_descs txq_descs_u->sctxu_txdescs
347 #define txq_nq_descs txq_descs_u->sctxu_nq_txdescs
348
349 bus_addr_t txq_tdt_reg; /* offset of TDT register */
350
351 int txq_free; /* number of free Tx descriptors */
352 int txq_next; /* next ready Tx descriptor */
353
354 int txq_sfree; /* number of free Tx jobs */
355 int txq_snext; /* next free Tx job */
356 int txq_sdirty; /* dirty Tx jobs */
357
358 /* These 4 variables are used only on the 82547. */
359 int txq_fifo_size; /* Tx FIFO size */
360 int txq_fifo_head; /* current head of FIFO */
361 uint32_t txq_fifo_addr; /* internal address of start of FIFO */
362 int txq_fifo_stall; /* Tx FIFO is stalled */
363
364 /*
365 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
366 * CPUs. This queue intermediate them without block.
367 */
368 pcq_t *txq_interq;
369
370 /*
371 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
372 * to manage Tx H/W queue's busy flag.
373 */
374 int txq_flags; /* flags for H/W queue, see below */
375 #define WM_TXQ_NO_SPACE 0x1
376
377 bool txq_stopping;
378
379 bool txq_sending;
380 time_t txq_lastsent;
381
382 /* Checksum flags used for previous packet */
383 uint32_t txq_last_hw_cmd;
384 uint8_t txq_last_hw_fields;
385 uint16_t txq_last_hw_ipcs;
386 uint16_t txq_last_hw_tucs;
387
388 uint32_t txq_packets; /* for AIM */
389 uint32_t txq_bytes; /* for AIM */
390 #ifdef WM_EVENT_COUNTERS
391 /* TX event counters */
392 WM_Q_EVCNT_DEFINE(txq, txsstall) /* Stalled due to no txs */
393 WM_Q_EVCNT_DEFINE(txq, txdstall) /* Stalled due to no txd */
394 WM_Q_EVCNT_DEFINE(txq, fifo_stall) /* FIFO stalls (82547) */
395 WM_Q_EVCNT_DEFINE(txq, txdw) /* Tx descriptor interrupts */
396 WM_Q_EVCNT_DEFINE(txq, txqe) /* Tx queue empty interrupts */
397 /* XXX not used? */
398
399 WM_Q_EVCNT_DEFINE(txq, ipsum) /* IP checksums comp. */
400 WM_Q_EVCNT_DEFINE(txq, tusum) /* TCP/UDP cksums comp. */
401 WM_Q_EVCNT_DEFINE(txq, tusum6) /* TCP/UDP v6 cksums comp. */
402 WM_Q_EVCNT_DEFINE(txq, tso) /* TCP seg offload (IPv4) */
403 WM_Q_EVCNT_DEFINE(txq, tso6) /* TCP seg offload (IPv6) */
404 WM_Q_EVCNT_DEFINE(txq, tsopain) /* Painful header manip. for TSO */
405 WM_Q_EVCNT_DEFINE(txq, pcqdrop) /* Pkt dropped in pcq */
406 WM_Q_EVCNT_DEFINE(txq, descdrop) /* Pkt dropped in MAC desc ring */
407 /* other than toomanyseg */
408
409 WM_Q_EVCNT_DEFINE(txq, toomanyseg) /* Pkt dropped(toomany DMA segs) */
410 WM_Q_EVCNT_DEFINE(txq, defrag) /* m_defrag() */
411 WM_Q_EVCNT_DEFINE(txq, underrun) /* Tx underrun */
412 WM_Q_EVCNT_DEFINE(txq, skipcontext) /* Tx skip wring cksum context */
413
414 char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
415 struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
416 #endif /* WM_EVENT_COUNTERS */
417 };
418
419 struct wm_rxqueue {
420 kmutex_t *rxq_lock; /* lock for rx operations */
421
422 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */
423
424 /* Software state for the receive descriptors. */
425 struct wm_rxsoft rxq_soft[WM_NRXDESC];
426
427 /* RX control data structures. */
428 int rxq_ndesc; /* must be a power of two */
429 size_t rxq_descsize; /* a rx descriptor size */
430 rxdescs_t *rxq_descs_u;
431 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */
432 bus_dma_segment_t rxq_desc_seg; /* control data segment */
433 int rxq_desc_rseg; /* real number of control segment */
434 #define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr
435 #define rxq_descs rxq_descs_u->sctxu_rxdescs
436 #define rxq_ext_descs rxq_descs_u->sctxu_ext_rxdescs
437 #define rxq_nq_descs rxq_descs_u->sctxu_nq_rxdescs
438
439 bus_addr_t rxq_rdt_reg; /* offset of RDT register */
440
441 int rxq_ptr; /* next ready Rx desc/queue ent */
442 int rxq_discard;
443 int rxq_len;
444 struct mbuf *rxq_head;
445 struct mbuf *rxq_tail;
446 struct mbuf **rxq_tailp;
447
448 bool rxq_stopping;
449
450 uint32_t rxq_packets; /* for AIM */
451 uint32_t rxq_bytes; /* for AIM */
452 #ifdef WM_EVENT_COUNTERS
453 /* RX event counters */
454 WM_Q_EVCNT_DEFINE(rxq, intr); /* Interrupts */
455 WM_Q_EVCNT_DEFINE(rxq, defer); /* Rx deferred processing */
456
457 WM_Q_EVCNT_DEFINE(rxq, ipsum); /* IP checksums checked */
458 WM_Q_EVCNT_DEFINE(rxq, tusum); /* TCP/UDP cksums checked */
459 #endif
460 };
461
462 struct wm_queue {
463 int wmq_id; /* index of TX/RX queues */
464 int wmq_intr_idx; /* index of MSI-X tables */
465
466 uint32_t wmq_itr; /* interrupt interval per queue. */
467 bool wmq_set_itr;
468
469 struct wm_txqueue wmq_txq;
470 struct wm_rxqueue wmq_rxq;
471
472 bool wmq_txrx_use_workqueue;
473 struct work wmq_cookie;
474 void *wmq_si;
475 };
476
477 struct wm_phyop {
478 int (*acquire)(struct wm_softc *);
479 void (*release)(struct wm_softc *);
480 int (*readreg_locked)(device_t, int, int, uint16_t *);
481 int (*writereg_locked)(device_t, int, int, uint16_t);
482 int reset_delay_us;
483 bool no_errprint;
484 };
485
486 struct wm_nvmop {
487 int (*acquire)(struct wm_softc *);
488 void (*release)(struct wm_softc *);
489 int (*read)(struct wm_softc *, int, int, uint16_t *);
490 };
491
492 /*
493 * Software state per device.
494 */
495 struct wm_softc {
496 device_t sc_dev; /* generic device information */
497 bus_space_tag_t sc_st; /* bus space tag */
498 bus_space_handle_t sc_sh; /* bus space handle */
499 bus_size_t sc_ss; /* bus space size */
500 bus_space_tag_t sc_iot; /* I/O space tag */
501 bus_space_handle_t sc_ioh; /* I/O space handle */
502 bus_size_t sc_ios; /* I/O space size */
503 bus_space_tag_t sc_flasht; /* flash registers space tag */
504 bus_space_handle_t sc_flashh; /* flash registers space handle */
505 bus_size_t sc_flashs; /* flash registers space size */
506 off_t sc_flashreg_offset; /*
507 * offset to flash registers from
508 * start of BAR
509 */
510 bus_dma_tag_t sc_dmat; /* bus DMA tag */
511
512 struct ethercom sc_ethercom; /* ethernet common data */
513 struct mii_data sc_mii; /* MII/media information */
514
515 pci_chipset_tag_t sc_pc;
516 pcitag_t sc_pcitag;
517 int sc_bus_speed; /* PCI/PCIX bus speed */
518 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
519
520 uint16_t sc_pcidevid; /* PCI device ID */
521 wm_chip_type sc_type; /* MAC type */
522 int sc_rev; /* MAC revision */
523 wm_phy_type sc_phytype; /* PHY type */
524 uint8_t sc_sfptype; /* SFP type */
525 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
526 #define WM_MEDIATYPE_UNKNOWN 0x00
527 #define WM_MEDIATYPE_FIBER 0x01
528 #define WM_MEDIATYPE_COPPER 0x02
529 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
530 int sc_funcid; /* unit number of the chip (0 to 3) */
531 int sc_flags; /* flags; see below */
532 u_short sc_if_flags; /* last if_flags */
533 int sc_ec_capenable; /* last ec_capenable */
534 int sc_flowflags; /* 802.3x flow control flags */
535 uint16_t eee_lp_ability; /* EEE link partner's ability */
536 int sc_align_tweak;
537
538 void *sc_ihs[WM_MAX_NINTR]; /*
539 * interrupt cookie.
540 * - legacy and msi use sc_ihs[0] only
541 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
542 */
543 pci_intr_handle_t *sc_intrs; /*
544 * legacy and msi use sc_intrs[0] only
545 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
546 */
547 int sc_nintrs; /* number of interrupts */
548
549 int sc_link_intr_idx; /* index of MSI-X tables */
550
551 callout_t sc_tick_ch; /* tick callout */
552 bool sc_core_stopping;
553
554 int sc_nvm_ver_major;
555 int sc_nvm_ver_minor;
556 int sc_nvm_ver_build;
557 int sc_nvm_addrbits; /* NVM address bits */
558 unsigned int sc_nvm_wordsize; /* NVM word size */
559 int sc_ich8_flash_base;
560 int sc_ich8_flash_bank_size;
561 int sc_nvm_k1_enabled;
562
563 int sc_nqueues;
564 struct wm_queue *sc_queue;
565 u_int sc_tx_process_limit; /* Tx proc. repeat limit in softint */
566 u_int sc_tx_intr_process_limit; /* Tx proc. repeat limit in H/W intr */
567 u_int sc_rx_process_limit; /* Rx proc. repeat limit in softint */
568 u_int sc_rx_intr_process_limit; /* Rx proc. repeat limit in H/W intr */
569 struct workqueue *sc_queue_wq;
570 bool sc_txrx_use_workqueue;
571
572 int sc_affinity_offset;
573
574 #ifdef WM_EVENT_COUNTERS
575 /* Event counters. */
576 struct evcnt sc_ev_linkintr; /* Link interrupts */
577
578 /* WM_T_82542_2_1 only */
579 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
580 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
581 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
582 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
583 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
584 #endif /* WM_EVENT_COUNTERS */
585
586 struct sysctllog *sc_sysctllog;
587
588 /* This variable are used only on the 82547. */
589 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
590
591 uint32_t sc_ctrl; /* prototype CTRL register */
592 #if 0
593 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
594 #endif
595 uint32_t sc_icr; /* prototype interrupt bits */
596 uint32_t sc_itr_init; /* prototype intr throttling reg */
597 uint32_t sc_tctl; /* prototype TCTL register */
598 uint32_t sc_rctl; /* prototype RCTL register */
599 uint32_t sc_txcw; /* prototype TXCW register */
600 uint32_t sc_tipg; /* prototype TIPG register */
601 uint32_t sc_fcrtl; /* prototype FCRTL register */
602 uint32_t sc_pba; /* prototype PBA register */
603
604 int sc_tbi_linkup; /* TBI link status */
605 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
606 int sc_tbi_serdes_ticks; /* tbi ticks */
607
608 int sc_mchash_type; /* multicast filter offset */
609
610 krndsource_t rnd_source; /* random source */
611
612 struct if_percpuq *sc_ipq; /* softint-based input queues */
613
614 kmutex_t *sc_core_lock; /* lock for softc operations */
615 kmutex_t *sc_ich_phymtx; /*
616 * 82574/82583/ICH/PCH specific PHY
617 * mutex. For 82574/82583, the mutex
618 * is used for both PHY and NVM.
619 */
620 kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */
621
622 struct wm_phyop phy;
623 struct wm_nvmop nvm;
624 };
625
626 #define WM_CORE_LOCK(_sc) \
627 if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
628 #define WM_CORE_UNLOCK(_sc) \
629 if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
630 #define WM_CORE_LOCKED(_sc) \
631 (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
632
633 #define WM_RXCHAIN_RESET(rxq) \
634 do { \
635 (rxq)->rxq_tailp = &(rxq)->rxq_head; \
636 *(rxq)->rxq_tailp = NULL; \
637 (rxq)->rxq_len = 0; \
638 } while (/*CONSTCOND*/0)
639
640 #define WM_RXCHAIN_LINK(rxq, m) \
641 do { \
642 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \
643 (rxq)->rxq_tailp = &(m)->m_next; \
644 } while (/*CONSTCOND*/0)
645
646 #ifdef WM_EVENT_COUNTERS
647 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
648 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
649
650 #define WM_Q_EVCNT_INCR(qname, evname) \
651 WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
652 #define WM_Q_EVCNT_ADD(qname, evname, val) \
653 WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
654 #else /* !WM_EVENT_COUNTERS */
655 #define WM_EVCNT_INCR(ev) /* nothing */
656 #define WM_EVCNT_ADD(ev, val) /* nothing */
657
658 #define WM_Q_EVCNT_INCR(qname, evname) /* nothing */
659 #define WM_Q_EVCNT_ADD(qname, evname, val) /* nothing */
660 #endif /* !WM_EVENT_COUNTERS */
661
662 #define CSR_READ(sc, reg) \
663 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
664 #define CSR_WRITE(sc, reg, val) \
665 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
666 #define CSR_WRITE_FLUSH(sc) \
667 (void)CSR_READ((sc), WMREG_STATUS)
668
669 #define ICH8_FLASH_READ32(sc, reg) \
670 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \
671 (reg) + sc->sc_flashreg_offset)
672 #define ICH8_FLASH_WRITE32(sc, reg, data) \
673 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \
674 (reg) + sc->sc_flashreg_offset, (data))
675
676 #define ICH8_FLASH_READ16(sc, reg) \
677 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \
678 (reg) + sc->sc_flashreg_offset)
679 #define ICH8_FLASH_WRITE16(sc, reg, data) \
680 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \
681 (reg) + sc->sc_flashreg_offset, (data))
682
683 #define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
684 #define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
685
686 #define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU)
687 #define WM_CDTXADDR_HI(txq, x) \
688 (sizeof(bus_addr_t) == 8 ? \
689 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
690
691 #define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
692 #define WM_CDRXADDR_HI(rxq, x) \
693 (sizeof(bus_addr_t) == 8 ? \
694 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
695
696 /*
697 * Register read/write functions.
698 * Other than CSR_{READ|WRITE}().
699 */
700 #if 0
701 static inline uint32_t wm_io_read(struct wm_softc *, int);
702 #endif
703 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
704 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
705 uint32_t, uint32_t);
706 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
707
708 /*
709 * Descriptor sync/init functions.
710 */
711 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
712 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
713 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
714
715 /*
716 * Device driver interface functions and commonly used functions.
717 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
718 */
719 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
720 static int wm_match(device_t, cfdata_t, void *);
721 static void wm_attach(device_t, device_t, void *);
722 static int wm_detach(device_t, int);
723 static bool wm_suspend(device_t, const pmf_qual_t *);
724 static bool wm_resume(device_t, const pmf_qual_t *);
725 static void wm_watchdog(struct ifnet *);
726 static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
727 uint16_t *);
728 static void wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
729 uint16_t *);
730 static void wm_tick(void *);
731 static int wm_ifflags_cb(struct ethercom *);
732 static int wm_ioctl(struct ifnet *, u_long, void *);
733 /* MAC address related */
734 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
735 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
736 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
737 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
738 static int wm_rar_count(struct wm_softc *);
739 static void wm_set_filter(struct wm_softc *);
740 /* Reset and init related */
741 static void wm_set_vlan(struct wm_softc *);
742 static void wm_set_pcie_completion_timeout(struct wm_softc *);
743 static void wm_get_auto_rd_done(struct wm_softc *);
744 static void wm_lan_init_done(struct wm_softc *);
745 static void wm_get_cfg_done(struct wm_softc *);
746 static int wm_phy_post_reset(struct wm_softc *);
747 static int wm_write_smbus_addr(struct wm_softc *);
748 static int wm_init_lcd_from_nvm(struct wm_softc *);
749 static int wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
750 static void wm_initialize_hardware_bits(struct wm_softc *);
751 static uint32_t wm_rxpbs_adjust_82580(uint32_t);
752 static int wm_reset_phy(struct wm_softc *);
753 static void wm_flush_desc_rings(struct wm_softc *);
754 static void wm_reset(struct wm_softc *);
755 static int wm_add_rxbuf(struct wm_rxqueue *, int);
756 static void wm_rxdrain(struct wm_rxqueue *);
757 static void wm_init_rss(struct wm_softc *);
758 static void wm_adjust_qnum(struct wm_softc *, int);
759 static inline bool wm_is_using_msix(struct wm_softc *);
760 static inline bool wm_is_using_multiqueue(struct wm_softc *);
761 static int wm_softint_establish(struct wm_softc *, int, int);
762 static int wm_setup_legacy(struct wm_softc *);
763 static int wm_setup_msix(struct wm_softc *);
764 static int wm_init(struct ifnet *);
765 static int wm_init_locked(struct ifnet *);
766 static void wm_init_sysctls(struct wm_softc *);
767 static void wm_unset_stopping_flags(struct wm_softc *);
768 static void wm_set_stopping_flags(struct wm_softc *);
769 static void wm_stop(struct ifnet *, int);
770 static void wm_stop_locked(struct ifnet *, bool, bool);
771 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
772 static void wm_82547_txfifo_stall(void *);
773 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
774 static void wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
775 /* DMA related */
776 static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
777 static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
778 static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
779 static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
780 struct wm_txqueue *);
781 static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
782 static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
783 static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
784 struct wm_rxqueue *);
785 static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
786 static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
787 static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
788 static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
789 static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
790 static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
791 static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
792 struct wm_txqueue *);
793 static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
794 struct wm_rxqueue *);
795 static int wm_alloc_txrx_queues(struct wm_softc *);
796 static void wm_free_txrx_queues(struct wm_softc *);
797 static int wm_init_txrx_queues(struct wm_softc *);
798 /* Start */
799 static void wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
800 struct wm_txsoft *, uint32_t *, uint8_t *);
801 static inline int wm_select_txqueue(struct ifnet *, struct mbuf *);
802 static void wm_start(struct ifnet *);
803 static void wm_start_locked(struct ifnet *);
804 static int wm_transmit(struct ifnet *, struct mbuf *);
805 static void wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
806 static void wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
807 bool);
808 static void wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
809 struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
810 static void wm_nq_start(struct ifnet *);
811 static void wm_nq_start_locked(struct ifnet *);
812 static int wm_nq_transmit(struct ifnet *, struct mbuf *);
813 static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
814 static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
815 bool);
816 static void wm_deferred_start_locked(struct wm_txqueue *);
817 static void wm_handle_queue(void *);
818 static void wm_handle_queue_work(struct work *, void *);
819 /* Interrupt */
820 static bool wm_txeof(struct wm_txqueue *, u_int);
821 static bool wm_rxeof(struct wm_rxqueue *, u_int);
822 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
823 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
824 static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
825 static void wm_linkintr(struct wm_softc *, uint32_t);
826 static int wm_intr_legacy(void *);
827 static inline void wm_txrxintr_disable(struct wm_queue *);
828 static inline void wm_txrxintr_enable(struct wm_queue *);
829 static void wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
830 static int wm_txrxintr_msix(void *);
831 static int wm_linkintr_msix(void *);
832
833 /*
834 * Media related.
835 * GMII, SGMII, TBI, SERDES and SFP.
836 */
837 /* Common */
838 static void wm_tbi_serdes_set_linkled(struct wm_softc *);
839 /* GMII related */
840 static void wm_gmii_reset(struct wm_softc *);
841 static void wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
842 static int wm_get_phy_id_82575(struct wm_softc *);
843 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
844 static int wm_gmii_mediachange(struct ifnet *);
845 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
846 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
847 static uint16_t wm_i82543_mii_recvbits(struct wm_softc *);
848 static int wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
849 static int wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
850 static int wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
851 static int wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
852 static int wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
853 static int wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
854 static int wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
855 static int wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
856 static int wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
857 static int wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
858 static int wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
859 static int wm_gmii_bm_writereg(device_t, int, int, uint16_t);
860 static int wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
861 static int wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
862 static int wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
863 bool);
864 static int wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
865 static int wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
866 static int wm_gmii_hv_writereg(device_t, int, int, uint16_t);
867 static int wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
868 static int wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
869 static int wm_gmii_82580_writereg(device_t, int, int, uint16_t);
870 static int wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
871 static int wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
872 static void wm_gmii_statchg(struct ifnet *);
873 /*
874 * kumeran related (80003, ICH* and PCH*).
875 * These functions are not for accessing MII registers but for accessing
876 * kumeran specific registers.
877 */
878 static int wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
879 static int wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
880 static int wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
881 static int wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
882 /* EMI register related */
883 static int wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
884 static int wm_read_emi_reg_locked(device_t, int, uint16_t *);
885 static int wm_write_emi_reg_locked(device_t, int, uint16_t);
886 /* SGMII */
887 static bool wm_sgmii_uses_mdio(struct wm_softc *);
888 static int wm_sgmii_readreg(device_t, int, int, uint16_t *);
889 static int wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
890 static int wm_sgmii_writereg(device_t, int, int, uint16_t);
891 static int wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
892 /* TBI related */
893 static bool wm_tbi_havesignal(struct wm_softc *, uint32_t);
894 static void wm_tbi_mediainit(struct wm_softc *);
895 static int wm_tbi_mediachange(struct ifnet *);
896 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
897 static int wm_check_for_link(struct wm_softc *);
898 static void wm_tbi_tick(struct wm_softc *);
899 /* SERDES related */
900 static void wm_serdes_power_up_link_82575(struct wm_softc *);
901 static int wm_serdes_mediachange(struct ifnet *);
902 static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
903 static void wm_serdes_tick(struct wm_softc *);
904 /* SFP related */
905 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
906 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
907
908 /*
909 * NVM related.
910 * Microwire, SPI (w/wo EERD) and Flash.
911 */
912 /* Misc functions */
913 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
914 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
915 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
916 /* Microwire */
917 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
918 /* SPI */
919 static int wm_nvm_ready_spi(struct wm_softc *);
920 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
921 /* Using with EERD */
922 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
923 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
924 /* Flash */
925 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
926 unsigned int *);
927 static int32_t wm_ich8_cycle_init(struct wm_softc *);
928 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
929 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
930 uint32_t *);
931 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
932 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
933 static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
934 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
935 static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
936 /* iNVM */
937 static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
938 static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
939 /* Lock, detecting NVM type, validate checksum and read */
940 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
941 static int wm_nvm_flash_presence_i210(struct wm_softc *);
942 static int wm_nvm_validate_checksum(struct wm_softc *);
943 static void wm_nvm_version_invm(struct wm_softc *);
944 static void wm_nvm_version(struct wm_softc *);
945 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
946
947 /*
948 * Hardware semaphores.
949 * Very complexed...
950 */
951 static int wm_get_null(struct wm_softc *);
952 static void wm_put_null(struct wm_softc *);
953 static int wm_get_eecd(struct wm_softc *);
954 static void wm_put_eecd(struct wm_softc *);
955 static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
956 static void wm_put_swsm_semaphore(struct wm_softc *);
957 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
958 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
959 static int wm_get_nvm_80003(struct wm_softc *);
960 static void wm_put_nvm_80003(struct wm_softc *);
961 static int wm_get_nvm_82571(struct wm_softc *);
962 static void wm_put_nvm_82571(struct wm_softc *);
963 static int wm_get_phy_82575(struct wm_softc *);
964 static void wm_put_phy_82575(struct wm_softc *);
965 static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
966 static void wm_put_swfwhw_semaphore(struct wm_softc *);
967 static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */
968 static void wm_put_swflag_ich8lan(struct wm_softc *);
969 static int wm_get_nvm_ich8lan(struct wm_softc *);
970 static void wm_put_nvm_ich8lan(struct wm_softc *);
971 static int wm_get_hw_semaphore_82573(struct wm_softc *);
972 static void wm_put_hw_semaphore_82573(struct wm_softc *);
973
974 /*
975 * Management mode and power management related subroutines.
976 * BMC, AMT, suspend/resume and EEE.
977 */
978 #if 0
979 static int wm_check_mng_mode(struct wm_softc *);
980 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
981 static int wm_check_mng_mode_82574(struct wm_softc *);
982 static int wm_check_mng_mode_generic(struct wm_softc *);
983 #endif
984 static int wm_enable_mng_pass_thru(struct wm_softc *);
985 static bool wm_phy_resetisblocked(struct wm_softc *);
986 static void wm_get_hw_control(struct wm_softc *);
987 static void wm_release_hw_control(struct wm_softc *);
988 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
989 static int wm_init_phy_workarounds_pchlan(struct wm_softc *);
990 static void wm_init_manageability(struct wm_softc *);
991 static void wm_release_manageability(struct wm_softc *);
992 static void wm_get_wakeup(struct wm_softc *);
993 static int wm_ulp_disable(struct wm_softc *);
994 static int wm_enable_phy_wakeup(struct wm_softc *);
995 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
996 static void wm_suspend_workarounds_ich8lan(struct wm_softc *);
997 static int wm_resume_workarounds_pchlan(struct wm_softc *);
998 static void wm_enable_wakeup(struct wm_softc *);
999 static void wm_disable_aspm(struct wm_softc *);
1000 /* LPLU (Low Power Link Up) */
1001 static void wm_lplu_d0_disable(struct wm_softc *);
1002 /* EEE */
1003 static int wm_set_eee_i350(struct wm_softc *);
1004 static int wm_set_eee_pchlan(struct wm_softc *);
1005 static int wm_set_eee(struct wm_softc *);
1006
1007 /*
1008 * Workarounds (mainly PHY related).
1009 * Basically, PHY's workarounds are in the PHY drivers.
1010 */
1011 static int wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
1012 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
1013 static int wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
1014 static void wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
1015 static int wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
1016 static int wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
1017 static int wm_k1_gig_workaround_hv(struct wm_softc *, int);
1018 static int wm_k1_workaround_lv(struct wm_softc *);
1019 static int wm_link_stall_workaround_hv(struct wm_softc *);
1020 static int wm_set_mdio_slow_mode_hv(struct wm_softc *);
1021 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
1022 static void wm_reset_init_script_82575(struct wm_softc *);
1023 static void wm_reset_mdicnfg_82580(struct wm_softc *);
1024 static bool wm_phy_is_accessible_pchlan(struct wm_softc *);
1025 static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
1026 static int wm_platform_pm_pch_lpt(struct wm_softc *, bool);
1027 static int wm_pll_workaround_i210(struct wm_softc *);
1028 static void wm_legacy_irq_quirk_spt(struct wm_softc *);
1029
1030 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
1031 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
1032
1033 /*
1034 * Devices supported by this driver.
1035 */
1036 static const struct wm_product {
1037 pci_vendor_id_t wmp_vendor;
1038 pci_product_id_t wmp_product;
1039 const char *wmp_name;
1040 wm_chip_type wmp_type;
1041 uint32_t wmp_flags;
1042 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
1043 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
1044 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
1045 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
1046 #define WMP_MEDIATYPE(x) ((x) & 0x03)
1047 } wm_products[] = {
1048 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
1049 "Intel i82542 1000BASE-X Ethernet",
1050 WM_T_82542_2_1, WMP_F_FIBER },
1051
1052 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
1053 "Intel i82543GC 1000BASE-X Ethernet",
1054 WM_T_82543, WMP_F_FIBER },
1055
1056 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
1057 "Intel i82543GC 1000BASE-T Ethernet",
1058 WM_T_82543, WMP_F_COPPER },
1059
1060 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
1061 "Intel i82544EI 1000BASE-T Ethernet",
1062 WM_T_82544, WMP_F_COPPER },
1063
1064 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
1065 "Intel i82544EI 1000BASE-X Ethernet",
1066 WM_T_82544, WMP_F_FIBER },
1067
1068 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
1069 "Intel i82544GC 1000BASE-T Ethernet",
1070 WM_T_82544, WMP_F_COPPER },
1071
1072 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
1073 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
1074 WM_T_82544, WMP_F_COPPER },
1075
1076 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
1077 "Intel i82540EM 1000BASE-T Ethernet",
1078 WM_T_82540, WMP_F_COPPER },
1079
1080 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
1081 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
1082 WM_T_82540, WMP_F_COPPER },
1083
1084 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
1085 "Intel i82540EP 1000BASE-T Ethernet",
1086 WM_T_82540, WMP_F_COPPER },
1087
1088 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
1089 "Intel i82540EP 1000BASE-T Ethernet",
1090 WM_T_82540, WMP_F_COPPER },
1091
1092 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
1093 "Intel i82540EP 1000BASE-T Ethernet",
1094 WM_T_82540, WMP_F_COPPER },
1095
1096 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
1097 "Intel i82545EM 1000BASE-T Ethernet",
1098 WM_T_82545, WMP_F_COPPER },
1099
1100 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
1101 "Intel i82545GM 1000BASE-T Ethernet",
1102 WM_T_82545_3, WMP_F_COPPER },
1103
1104 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
1105 "Intel i82545GM 1000BASE-X Ethernet",
1106 WM_T_82545_3, WMP_F_FIBER },
1107
1108 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
1109 "Intel i82545GM Gigabit Ethernet (SERDES)",
1110 WM_T_82545_3, WMP_F_SERDES },
1111
1112 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
1113 "Intel i82546EB 1000BASE-T Ethernet",
1114 WM_T_82546, WMP_F_COPPER },
1115
1116 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
1117 "Intel i82546EB 1000BASE-T Ethernet",
1118 WM_T_82546, WMP_F_COPPER },
1119
1120 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
1121 "Intel i82545EM 1000BASE-X Ethernet",
1122 WM_T_82545, WMP_F_FIBER },
1123
1124 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
1125 "Intel i82546EB 1000BASE-X Ethernet",
1126 WM_T_82546, WMP_F_FIBER },
1127
1128 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
1129 "Intel i82546GB 1000BASE-T Ethernet",
1130 WM_T_82546_3, WMP_F_COPPER },
1131
1132 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
1133 "Intel i82546GB 1000BASE-X Ethernet",
1134 WM_T_82546_3, WMP_F_FIBER },
1135
1136 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
1137 "Intel i82546GB Gigabit Ethernet (SERDES)",
1138 WM_T_82546_3, WMP_F_SERDES },
1139
1140 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
1141 "i82546GB quad-port Gigabit Ethernet",
1142 WM_T_82546_3, WMP_F_COPPER },
1143
1144 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
1145 "i82546GB quad-port Gigabit Ethernet (KSP3)",
1146 WM_T_82546_3, WMP_F_COPPER },
1147
1148 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
1149 "Intel PRO/1000MT (82546GB)",
1150 WM_T_82546_3, WMP_F_COPPER },
1151
1152 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
1153 "Intel i82541EI 1000BASE-T Ethernet",
1154 WM_T_82541, WMP_F_COPPER },
1155
1156 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
1157 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
1158 WM_T_82541, WMP_F_COPPER },
1159
1160 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
1161 "Intel i82541EI Mobile 1000BASE-T Ethernet",
1162 WM_T_82541, WMP_F_COPPER },
1163
1164 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
1165 "Intel i82541ER 1000BASE-T Ethernet",
1166 WM_T_82541_2, WMP_F_COPPER },
1167
1168 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
1169 "Intel i82541GI 1000BASE-T Ethernet",
1170 WM_T_82541_2, WMP_F_COPPER },
1171
1172 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
1173 "Intel i82541GI Mobile 1000BASE-T Ethernet",
1174 WM_T_82541_2, WMP_F_COPPER },
1175
1176 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
1177 "Intel i82541PI 1000BASE-T Ethernet",
1178 WM_T_82541_2, WMP_F_COPPER },
1179
1180 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
1181 "Intel i82547EI 1000BASE-T Ethernet",
1182 WM_T_82547, WMP_F_COPPER },
1183
1184 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
1185 "Intel i82547EI Mobile 1000BASE-T Ethernet",
1186 WM_T_82547, WMP_F_COPPER },
1187
1188 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
1189 "Intel i82547GI 1000BASE-T Ethernet",
1190 WM_T_82547_2, WMP_F_COPPER },
1191
1192 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
1193 "Intel PRO/1000 PT (82571EB)",
1194 WM_T_82571, WMP_F_COPPER },
1195
1196 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
1197 "Intel PRO/1000 PF (82571EB)",
1198 WM_T_82571, WMP_F_FIBER },
1199
1200 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
1201 "Intel PRO/1000 PB (82571EB)",
1202 WM_T_82571, WMP_F_SERDES },
1203
1204 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1205 "Intel PRO/1000 QT (82571EB)",
1206 WM_T_82571, WMP_F_COPPER },
1207
1208 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1209 "Intel PRO/1000 PT Quad Port Server Adapter",
1210 WM_T_82571, WMP_F_COPPER },
1211
1212 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1213 "Intel Gigabit PT Quad Port Server ExpressModule",
1214 WM_T_82571, WMP_F_COPPER },
1215
1216 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1217 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1218 WM_T_82571, WMP_F_SERDES },
1219
1220 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1221 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1222 WM_T_82571, WMP_F_SERDES },
1223
1224 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1225 "Intel 82571EB Quad 1000baseX Ethernet",
1226 WM_T_82571, WMP_F_FIBER },
1227
1228 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
1229 "Intel i82572EI 1000baseT Ethernet",
1230 WM_T_82572, WMP_F_COPPER },
1231
1232 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
1233 "Intel i82572EI 1000baseX Ethernet",
1234 WM_T_82572, WMP_F_FIBER },
1235
1236 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
1237 "Intel i82572EI Gigabit Ethernet (SERDES)",
1238 WM_T_82572, WMP_F_SERDES },
1239
1240 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
1241 "Intel i82572EI 1000baseT Ethernet",
1242 WM_T_82572, WMP_F_COPPER },
1243
1244 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
1245 "Intel i82573E",
1246 WM_T_82573, WMP_F_COPPER },
1247
1248 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
1249 "Intel i82573E IAMT",
1250 WM_T_82573, WMP_F_COPPER },
1251
1252 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
1253 "Intel i82573L Gigabit Ethernet",
1254 WM_T_82573, WMP_F_COPPER },
1255
1256 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
1257 "Intel i82574L",
1258 WM_T_82574, WMP_F_COPPER },
1259
1260 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
1261 "Intel i82574L",
1262 WM_T_82574, WMP_F_COPPER },
1263
1264 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
1265 "Intel i82583V",
1266 WM_T_82583, WMP_F_COPPER },
1267
1268 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1269 "i80003 dual 1000baseT Ethernet",
1270 WM_T_80003, WMP_F_COPPER },
1271
1272 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1273 "i80003 dual 1000baseX Ethernet",
1274 WM_T_80003, WMP_F_COPPER },
1275
1276 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1277 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1278 WM_T_80003, WMP_F_SERDES },
1279
1280 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1281 "Intel i80003 1000baseT Ethernet",
1282 WM_T_80003, WMP_F_COPPER },
1283
1284 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1285 "Intel i80003 Gigabit Ethernet (SERDES)",
1286 WM_T_80003, WMP_F_SERDES },
1287
1288 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
1289 "Intel i82801H (M_AMT) LAN Controller",
1290 WM_T_ICH8, WMP_F_COPPER },
1291 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
1292 "Intel i82801H (AMT) LAN Controller",
1293 WM_T_ICH8, WMP_F_COPPER },
1294 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
1295 "Intel i82801H LAN Controller",
1296 WM_T_ICH8, WMP_F_COPPER },
1297 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1298 "Intel i82801H (IFE) 10/100 LAN Controller",
1299 WM_T_ICH8, WMP_F_COPPER },
1300 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
1301 "Intel i82801H (M) LAN Controller",
1302 WM_T_ICH8, WMP_F_COPPER },
1303 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1304 "Intel i82801H IFE (GT) 10/100 LAN Controller",
1305 WM_T_ICH8, WMP_F_COPPER },
1306 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1307 "Intel i82801H IFE (G) 10/100 LAN Controller",
1308 WM_T_ICH8, WMP_F_COPPER },
1309 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_82567V_3,
1310 "82567V-3 LAN Controller",
1311 WM_T_ICH8, WMP_F_COPPER },
1312 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1313 "82801I (AMT) LAN Controller",
1314 WM_T_ICH9, WMP_F_COPPER },
1315 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1316 "82801I 10/100 LAN Controller",
1317 WM_T_ICH9, WMP_F_COPPER },
1318 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1319 "82801I (G) 10/100 LAN Controller",
1320 WM_T_ICH9, WMP_F_COPPER },
1321 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1322 "82801I (GT) 10/100 LAN Controller",
1323 WM_T_ICH9, WMP_F_COPPER },
1324 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1325 "82801I (C) LAN Controller",
1326 WM_T_ICH9, WMP_F_COPPER },
1327 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1328 "82801I mobile LAN Controller",
1329 WM_T_ICH9, WMP_F_COPPER },
1330 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_V,
1331 "82801I mobile (V) LAN Controller",
1332 WM_T_ICH9, WMP_F_COPPER },
1333 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1334 "82801I mobile (AMT) LAN Controller",
1335 WM_T_ICH9, WMP_F_COPPER },
1336 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1337 "82567LM-4 LAN Controller",
1338 WM_T_ICH9, WMP_F_COPPER },
1339 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1340 "82567LM-2 LAN Controller",
1341 WM_T_ICH10, WMP_F_COPPER },
1342 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1343 "82567LF-2 LAN Controller",
1344 WM_T_ICH10, WMP_F_COPPER },
1345 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1346 "82567LM-3 LAN Controller",
1347 WM_T_ICH10, WMP_F_COPPER },
1348 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1349 "82567LF-3 LAN Controller",
1350 WM_T_ICH10, WMP_F_COPPER },
1351 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1352 "82567V-2 LAN Controller",
1353 WM_T_ICH10, WMP_F_COPPER },
1354 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1355 "82567V-3? LAN Controller",
1356 WM_T_ICH10, WMP_F_COPPER },
1357 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1358 "HANKSVILLE LAN Controller",
1359 WM_T_ICH10, WMP_F_COPPER },
1360 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1361 "PCH LAN (82577LM) Controller",
1362 WM_T_PCH, WMP_F_COPPER },
1363 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1364 "PCH LAN (82577LC) Controller",
1365 WM_T_PCH, WMP_F_COPPER },
1366 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1367 "PCH LAN (82578DM) Controller",
1368 WM_T_PCH, WMP_F_COPPER },
1369 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1370 "PCH LAN (82578DC) Controller",
1371 WM_T_PCH, WMP_F_COPPER },
1372 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1373 "PCH2 LAN (82579LM) Controller",
1374 WM_T_PCH2, WMP_F_COPPER },
1375 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1376 "PCH2 LAN (82579V) Controller",
1377 WM_T_PCH2, WMP_F_COPPER },
1378 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1379 "82575EB dual-1000baseT Ethernet",
1380 WM_T_82575, WMP_F_COPPER },
1381 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1382 "82575EB dual-1000baseX Ethernet (SERDES)",
1383 WM_T_82575, WMP_F_SERDES },
1384 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1385 "82575GB quad-1000baseT Ethernet",
1386 WM_T_82575, WMP_F_COPPER },
1387 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1388 "82575GB quad-1000baseT Ethernet (PM)",
1389 WM_T_82575, WMP_F_COPPER },
1390 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1391 "82576 1000BaseT Ethernet",
1392 WM_T_82576, WMP_F_COPPER },
1393 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1394 "82576 1000BaseX Ethernet",
1395 WM_T_82576, WMP_F_FIBER },
1396
1397 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1398 "82576 gigabit Ethernet (SERDES)",
1399 WM_T_82576, WMP_F_SERDES },
1400
1401 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1402 "82576 quad-1000BaseT Ethernet",
1403 WM_T_82576, WMP_F_COPPER },
1404
1405 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1406 "82576 Gigabit ET2 Quad Port Server Adapter",
1407 WM_T_82576, WMP_F_COPPER },
1408
1409 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1410 "82576 gigabit Ethernet",
1411 WM_T_82576, WMP_F_COPPER },
1412
1413 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1414 "82576 gigabit Ethernet (SERDES)",
1415 WM_T_82576, WMP_F_SERDES },
1416 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1417 "82576 quad-gigabit Ethernet (SERDES)",
1418 WM_T_82576, WMP_F_SERDES },
1419
1420 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1421 "82580 1000BaseT Ethernet",
1422 WM_T_82580, WMP_F_COPPER },
1423 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1424 "82580 1000BaseX Ethernet",
1425 WM_T_82580, WMP_F_FIBER },
1426
1427 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1428 "82580 1000BaseT Ethernet (SERDES)",
1429 WM_T_82580, WMP_F_SERDES },
1430
1431 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1432 "82580 gigabit Ethernet (SGMII)",
1433 WM_T_82580, WMP_F_COPPER },
1434 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1435 "82580 dual-1000BaseT Ethernet",
1436 WM_T_82580, WMP_F_COPPER },
1437
1438 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1439 "82580 quad-1000BaseX Ethernet",
1440 WM_T_82580, WMP_F_FIBER },
1441
1442 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1443 "DH89XXCC Gigabit Ethernet (SGMII)",
1444 WM_T_82580, WMP_F_COPPER },
1445
1446 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1447 "DH89XXCC Gigabit Ethernet (SERDES)",
1448 WM_T_82580, WMP_F_SERDES },
1449
1450 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1451 "DH89XXCC 1000BASE-KX Ethernet",
1452 WM_T_82580, WMP_F_SERDES },
1453
1454 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1455 "DH89XXCC Gigabit Ethernet (SFP)",
1456 WM_T_82580, WMP_F_SERDES },
1457
1458 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1459 "I350 Gigabit Network Connection",
1460 WM_T_I350, WMP_F_COPPER },
1461
1462 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1463 "I350 Gigabit Fiber Network Connection",
1464 WM_T_I350, WMP_F_FIBER },
1465
1466 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1467 "I350 Gigabit Backplane Connection",
1468 WM_T_I350, WMP_F_SERDES },
1469
1470 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1471 "I350 Quad Port Gigabit Ethernet",
1472 WM_T_I350, WMP_F_SERDES },
1473
1474 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1475 "I350 Gigabit Connection",
1476 WM_T_I350, WMP_F_COPPER },
1477
1478 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1479 "I354 Gigabit Ethernet (KX)",
1480 WM_T_I354, WMP_F_SERDES },
1481
1482 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1483 "I354 Gigabit Ethernet (SGMII)",
1484 WM_T_I354, WMP_F_COPPER },
1485
1486 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1487 "I354 Gigabit Ethernet (2.5G)",
1488 WM_T_I354, WMP_F_COPPER },
1489
1490 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1491 "I210-T1 Ethernet Server Adapter",
1492 WM_T_I210, WMP_F_COPPER },
1493
1494 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1495 "I210 Ethernet (Copper OEM)",
1496 WM_T_I210, WMP_F_COPPER },
1497
1498 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1499 "I210 Ethernet (Copper IT)",
1500 WM_T_I210, WMP_F_COPPER },
1501
1502 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1503 "I210 Ethernet (Copper, FLASH less)",
1504 WM_T_I210, WMP_F_COPPER },
1505
1506 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1507 "I210 Gigabit Ethernet (Fiber)",
1508 WM_T_I210, WMP_F_FIBER },
1509
1510 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1511 "I210 Gigabit Ethernet (SERDES)",
1512 WM_T_I210, WMP_F_SERDES },
1513
1514 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1515 "I210 Gigabit Ethernet (SERDES, FLASH less)",
1516 WM_T_I210, WMP_F_SERDES },
1517
1518 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1519 "I210 Gigabit Ethernet (SGMII)",
1520 WM_T_I210, WMP_F_COPPER },
1521
1522 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII_WOF,
1523 "I210 Gigabit Ethernet (SGMII, FLASH less)",
1524 WM_T_I210, WMP_F_COPPER },
1525
1526 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1527 "I211 Ethernet (COPPER)",
1528 WM_T_I211, WMP_F_COPPER },
1529 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1530 "I217 V Ethernet Connection",
1531 WM_T_PCH_LPT, WMP_F_COPPER },
1532 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1533 "I217 LM Ethernet Connection",
1534 WM_T_PCH_LPT, WMP_F_COPPER },
1535 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1536 "I218 V Ethernet Connection",
1537 WM_T_PCH_LPT, WMP_F_COPPER },
1538 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1539 "I218 V Ethernet Connection",
1540 WM_T_PCH_LPT, WMP_F_COPPER },
1541 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1542 "I218 V Ethernet Connection",
1543 WM_T_PCH_LPT, WMP_F_COPPER },
1544 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1545 "I218 LM Ethernet Connection",
1546 WM_T_PCH_LPT, WMP_F_COPPER },
1547 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1548 "I218 LM Ethernet Connection",
1549 WM_T_PCH_LPT, WMP_F_COPPER },
1550 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1551 "I218 LM Ethernet Connection",
1552 WM_T_PCH_LPT, WMP_F_COPPER },
1553 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM,
1554 "I219 LM Ethernet Connection",
1555 WM_T_PCH_SPT, WMP_F_COPPER },
1556 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM2,
1557 "I219 LM Ethernet Connection",
1558 WM_T_PCH_SPT, WMP_F_COPPER },
1559 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM3,
1560 "I219 LM Ethernet Connection",
1561 WM_T_PCH_SPT, WMP_F_COPPER },
1562 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM4,
1563 "I219 LM Ethernet Connection",
1564 WM_T_PCH_SPT, WMP_F_COPPER },
1565 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM5,
1566 "I219 LM Ethernet Connection",
1567 WM_T_PCH_SPT, WMP_F_COPPER },
1568 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM6,
1569 "I219 LM Ethernet Connection",
1570 WM_T_PCH_CNP, WMP_F_COPPER },
1571 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM7,
1572 "I219 LM Ethernet Connection",
1573 WM_T_PCH_CNP, WMP_F_COPPER },
1574 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM8,
1575 "I219 LM Ethernet Connection",
1576 WM_T_PCH_CNP, WMP_F_COPPER },
1577 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM9,
1578 "I219 LM Ethernet Connection",
1579 WM_T_PCH_CNP, WMP_F_COPPER },
1580 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM10,
1581 "I219 LM Ethernet Connection",
1582 WM_T_PCH_CNP, WMP_F_COPPER },
1583 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM11,
1584 "I219 LM Ethernet Connection",
1585 WM_T_PCH_CNP, WMP_F_COPPER },
1586 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM12,
1587 "I219 LM Ethernet Connection",
1588 WM_T_PCH_SPT, WMP_F_COPPER },
1589 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM13,
1590 "I219 LM Ethernet Connection",
1591 WM_T_PCH_CNP, WMP_F_COPPER },
1592 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM14,
1593 "I219 LM Ethernet Connection",
1594 WM_T_PCH_CNP, WMP_F_COPPER },
1595 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM15,
1596 "I219 LM Ethernet Connection",
1597 WM_T_PCH_CNP, WMP_F_COPPER },
1598 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V,
1599 "I219 V Ethernet Connection",
1600 WM_T_PCH_SPT, WMP_F_COPPER },
1601 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V2,
1602 "I219 V Ethernet Connection",
1603 WM_T_PCH_SPT, WMP_F_COPPER },
1604 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V4,
1605 "I219 V Ethernet Connection",
1606 WM_T_PCH_SPT, WMP_F_COPPER },
1607 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V5,
1608 "I219 V Ethernet Connection",
1609 WM_T_PCH_SPT, WMP_F_COPPER },
1610 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V6,
1611 "I219 V Ethernet Connection",
1612 WM_T_PCH_CNP, WMP_F_COPPER },
1613 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V7,
1614 "I219 V Ethernet Connection",
1615 WM_T_PCH_CNP, WMP_F_COPPER },
1616 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V8,
1617 "I219 V Ethernet Connection",
1618 WM_T_PCH_CNP, WMP_F_COPPER },
1619 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V9,
1620 "I219 V Ethernet Connection",
1621 WM_T_PCH_CNP, WMP_F_COPPER },
1622 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V10,
1623 "I219 V Ethernet Connection",
1624 WM_T_PCH_CNP, WMP_F_COPPER },
1625 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V11,
1626 "I219 V Ethernet Connection",
1627 WM_T_PCH_CNP, WMP_F_COPPER },
1628 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V12,
1629 "I219 V Ethernet Connection",
1630 WM_T_PCH_SPT, WMP_F_COPPER },
1631 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V13,
1632 "I219 V Ethernet Connection",
1633 WM_T_PCH_CNP, WMP_F_COPPER },
1634 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V14,
1635 "I219 V Ethernet Connection",
1636 WM_T_PCH_CNP, WMP_F_COPPER },
1637 { 0, 0,
1638 NULL,
1639 0, 0 },
1640 };
1641
1642 /*
1643 * Register read/write functions.
1644 * Other than CSR_{READ|WRITE}().
1645 */
1646
1647 #if 0 /* Not currently used */
1648 static inline uint32_t
1649 wm_io_read(struct wm_softc *sc, int reg)
1650 {
1651
1652 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1653 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1654 }
1655 #endif
1656
1657 static inline void
1658 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1659 {
1660
1661 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1662 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1663 }
1664
1665 static inline void
1666 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1667 uint32_t data)
1668 {
1669 uint32_t regval;
1670 int i;
1671
1672 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1673
1674 CSR_WRITE(sc, reg, regval);
1675
1676 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1677 delay(5);
1678 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1679 break;
1680 }
1681 if (i == SCTL_CTL_POLL_TIMEOUT) {
1682 aprint_error("%s: WARNING:"
1683 " i82575 reg 0x%08x setup did not indicate ready\n",
1684 device_xname(sc->sc_dev), reg);
1685 }
1686 }
1687
1688 static inline void
1689 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1690 {
1691 wa->wa_low = htole32(v & 0xffffffffU);
1692 if (sizeof(bus_addr_t) == 8)
1693 wa->wa_high = htole32((uint64_t) v >> 32);
1694 else
1695 wa->wa_high = 0;
1696 }
1697
1698 /*
1699 * Descriptor sync/init functions.
1700 */
1701 static inline void
1702 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1703 {
1704 struct wm_softc *sc = txq->txq_sc;
1705
1706 /* If it will wrap around, sync to the end of the ring. */
1707 if ((start + num) > WM_NTXDESC(txq)) {
1708 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1709 WM_CDTXOFF(txq, start), txq->txq_descsize *
1710 (WM_NTXDESC(txq) - start), ops);
1711 num -= (WM_NTXDESC(txq) - start);
1712 start = 0;
1713 }
1714
1715 /* Now sync whatever is left. */
1716 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1717 WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1718 }
1719
1720 static inline void
1721 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1722 {
1723 struct wm_softc *sc = rxq->rxq_sc;
1724
1725 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1726 WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
1727 }
1728
1729 static inline void
1730 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1731 {
1732 struct wm_softc *sc = rxq->rxq_sc;
1733 struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1734 struct mbuf *m = rxs->rxs_mbuf;
1735
1736 /*
1737 * Note: We scoot the packet forward 2 bytes in the buffer
1738 * so that the payload after the Ethernet header is aligned
1739 * to a 4-byte boundary.
1740
1741 * XXX BRAINDAMAGE ALERT!
1742 * The stupid chip uses the same size for every buffer, which
1743 * is set in the Receive Control register. We are using the 2K
1744 * size option, but what we REALLY want is (2K - 2)! For this
1745 * reason, we can't "scoot" packets longer than the standard
1746 * Ethernet MTU. On strict-alignment platforms, if the total
1747 * size exceeds (2K - 2) we set align_tweak to 0 and let
1748 * the upper layer copy the headers.
1749 */
1750 m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1751
1752 if (sc->sc_type == WM_T_82574) {
1753 ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
1754 rxd->erx_data.erxd_addr =
1755 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1756 rxd->erx_data.erxd_dd = 0;
1757 } else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
1758 nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
1759
1760 rxd->nqrx_data.nrxd_paddr =
1761 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1762 /* Currently, split header is not supported. */
1763 rxd->nqrx_data.nrxd_haddr = 0;
1764 } else {
1765 wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1766
1767 wm_set_dma_addr(&rxd->wrx_addr,
1768 rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1769 rxd->wrx_len = 0;
1770 rxd->wrx_cksum = 0;
1771 rxd->wrx_status = 0;
1772 rxd->wrx_errors = 0;
1773 rxd->wrx_special = 0;
1774 }
1775 wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1776
1777 CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1778 }
1779
1780 /*
1781 * Device driver interface functions and commonly used functions.
1782 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1783 */
1784
1785 /* Lookup supported device table */
1786 static const struct wm_product *
1787 wm_lookup(const struct pci_attach_args *pa)
1788 {
1789 const struct wm_product *wmp;
1790
1791 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1792 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1793 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1794 return wmp;
1795 }
1796 return NULL;
1797 }
1798
1799 /* The match function (ca_match) */
1800 static int
1801 wm_match(device_t parent, cfdata_t cf, void *aux)
1802 {
1803 struct pci_attach_args *pa = aux;
1804
1805 if (wm_lookup(pa) != NULL)
1806 return 1;
1807
1808 return 0;
1809 }
1810
1811 /* The attach function (ca_attach) */
1812 static void
1813 wm_attach(device_t parent, device_t self, void *aux)
1814 {
1815 struct wm_softc *sc = device_private(self);
1816 struct pci_attach_args *pa = aux;
1817 prop_dictionary_t dict;
1818 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1819 pci_chipset_tag_t pc = pa->pa_pc;
1820 int counts[PCI_INTR_TYPE_SIZE];
1821 pci_intr_type_t max_type;
1822 const char *eetype, *xname;
1823 bus_space_tag_t memt;
1824 bus_space_handle_t memh;
1825 bus_size_t memsize;
1826 int memh_valid;
1827 int i, error;
1828 const struct wm_product *wmp;
1829 prop_data_t ea;
1830 prop_number_t pn;
1831 uint8_t enaddr[ETHER_ADDR_LEN];
1832 char buf[256];
1833 char wqname[MAXCOMLEN];
1834 uint16_t cfg1, cfg2, swdpin, nvmword;
1835 pcireg_t preg, memtype;
1836 uint16_t eeprom_data, apme_mask;
1837 bool force_clear_smbi;
1838 uint32_t link_mode;
1839 uint32_t reg;
1840
1841 sc->sc_dev = self;
1842 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1843 callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
1844 sc->sc_core_stopping = false;
1845
1846 wmp = wm_lookup(pa);
1847 #ifdef DIAGNOSTIC
1848 if (wmp == NULL) {
1849 printf("\n");
1850 panic("wm_attach: impossible");
1851 }
1852 #endif
1853 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1854
1855 sc->sc_pc = pa->pa_pc;
1856 sc->sc_pcitag = pa->pa_tag;
1857
1858 if (pci_dma64_available(pa))
1859 sc->sc_dmat = pa->pa_dmat64;
1860 else
1861 sc->sc_dmat = pa->pa_dmat;
1862
1863 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1864 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1865 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1866
1867 sc->sc_type = wmp->wmp_type;
1868
1869 /* Set default function pointers */
1870 sc->phy.acquire = sc->nvm.acquire = wm_get_null;
1871 sc->phy.release = sc->nvm.release = wm_put_null;
1872 sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
1873
1874 if (sc->sc_type < WM_T_82543) {
1875 if (sc->sc_rev < 2) {
1876 aprint_error_dev(sc->sc_dev,
1877 "i82542 must be at least rev. 2\n");
1878 return;
1879 }
1880 if (sc->sc_rev < 3)
1881 sc->sc_type = WM_T_82542_2_0;
1882 }
1883
1884 /*
1885 * Disable MSI for Errata:
1886 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1887 *
1888 * 82544: Errata 25
1889 * 82540: Errata 6 (easy to reproduce device timeout)
1890 * 82545: Errata 4 (easy to reproduce device timeout)
1891 * 82546: Errata 26 (easy to reproduce device timeout)
1892 * 82541: Errata 7 (easy to reproduce device timeout)
1893 *
1894 * "Byte Enables 2 and 3 are not set on MSI writes"
1895 *
1896 * 82571 & 82572: Errata 63
1897 */
1898 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1899 || (sc->sc_type == WM_T_82572))
1900 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1901
1902 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1903 || (sc->sc_type == WM_T_82580)
1904 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1905 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1906 sc->sc_flags |= WM_F_NEWQUEUE;
1907
1908 /* Set device properties (mactype) */
1909 dict = device_properties(sc->sc_dev);
1910 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1911
1912 /*
1913 * Map the device. All devices support memory-mapped acccess,
1914 * and it is really required for normal operation.
1915 */
1916 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1917 switch (memtype) {
1918 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1919 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1920 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1921 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1922 break;
1923 default:
1924 memh_valid = 0;
1925 break;
1926 }
1927
1928 if (memh_valid) {
1929 sc->sc_st = memt;
1930 sc->sc_sh = memh;
1931 sc->sc_ss = memsize;
1932 } else {
1933 aprint_error_dev(sc->sc_dev,
1934 "unable to map device registers\n");
1935 return;
1936 }
1937
1938 /*
1939 * In addition, i82544 and later support I/O mapped indirect
1940 * register access. It is not desirable (nor supported in
1941 * this driver) to use it for normal operation, though it is
1942 * required to work around bugs in some chip versions.
1943 */
1944 if (sc->sc_type >= WM_T_82544) {
1945 /* First we have to find the I/O BAR. */
1946 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1947 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1948 if (memtype == PCI_MAPREG_TYPE_IO)
1949 break;
1950 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1951 PCI_MAPREG_MEM_TYPE_64BIT)
1952 i += 4; /* skip high bits, too */
1953 }
1954 if (i < PCI_MAPREG_END) {
1955 /*
1956 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1957 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1958 * It's no problem because newer chips has no this
1959 * bug.
1960 *
1961 * The i8254x doesn't apparently respond when the
1962 * I/O BAR is 0, which looks somewhat like it's not
1963 * been configured.
1964 */
1965 preg = pci_conf_read(pc, pa->pa_tag, i);
1966 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1967 aprint_error_dev(sc->sc_dev,
1968 "WARNING: I/O BAR at zero.\n");
1969 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1970 0, &sc->sc_iot, &sc->sc_ioh,
1971 NULL, &sc->sc_ios) == 0) {
1972 sc->sc_flags |= WM_F_IOH_VALID;
1973 } else
1974 aprint_error_dev(sc->sc_dev,
1975 "WARNING: unable to map I/O space\n");
1976 }
1977
1978 }
1979
1980 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1981 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1982 preg |= PCI_COMMAND_MASTER_ENABLE;
1983 if (sc->sc_type < WM_T_82542_2_1)
1984 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1985 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1986
1987 /* Power up chip */
1988 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
1989 && error != EOPNOTSUPP) {
1990 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1991 return;
1992 }
1993
1994 wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
1995 /*
1996 * Don't use MSI-X if we can use only one queue to save interrupt
1997 * resource.
1998 */
1999 if (sc->sc_nqueues > 1) {
2000 max_type = PCI_INTR_TYPE_MSIX;
2001 /*
2002 * 82583 has a MSI-X capability in the PCI configuration space
2003 * but it doesn't support it. At least the document doesn't
2004 * say anything about MSI-X.
2005 */
2006 counts[PCI_INTR_TYPE_MSIX]
2007 = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
2008 } else {
2009 max_type = PCI_INTR_TYPE_MSI;
2010 counts[PCI_INTR_TYPE_MSIX] = 0;
2011 }
2012
2013 /* Allocation settings */
2014 counts[PCI_INTR_TYPE_MSI] = 1;
2015 counts[PCI_INTR_TYPE_INTX] = 1;
2016 /* overridden by disable flags */
2017 if (wm_disable_msi != 0) {
2018 counts[PCI_INTR_TYPE_MSI] = 0;
2019 if (wm_disable_msix != 0) {
2020 max_type = PCI_INTR_TYPE_INTX;
2021 counts[PCI_INTR_TYPE_MSIX] = 0;
2022 }
2023 } else if (wm_disable_msix != 0) {
2024 max_type = PCI_INTR_TYPE_MSI;
2025 counts[PCI_INTR_TYPE_MSIX] = 0;
2026 }
2027
2028 alloc_retry:
2029 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
2030 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
2031 return;
2032 }
2033
2034 if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
2035 error = wm_setup_msix(sc);
2036 if (error) {
2037 pci_intr_release(pc, sc->sc_intrs,
2038 counts[PCI_INTR_TYPE_MSIX]);
2039
2040 /* Setup for MSI: Disable MSI-X */
2041 max_type = PCI_INTR_TYPE_MSI;
2042 counts[PCI_INTR_TYPE_MSI] = 1;
2043 counts[PCI_INTR_TYPE_INTX] = 1;
2044 goto alloc_retry;
2045 }
2046 } else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
2047 wm_adjust_qnum(sc, 0); /* Must not use multiqueue */
2048 error = wm_setup_legacy(sc);
2049 if (error) {
2050 pci_intr_release(sc->sc_pc, sc->sc_intrs,
2051 counts[PCI_INTR_TYPE_MSI]);
2052
2053 /* The next try is for INTx: Disable MSI */
2054 max_type = PCI_INTR_TYPE_INTX;
2055 counts[PCI_INTR_TYPE_INTX] = 1;
2056 goto alloc_retry;
2057 }
2058 } else {
2059 wm_adjust_qnum(sc, 0); /* Must not use multiqueue */
2060 error = wm_setup_legacy(sc);
2061 if (error) {
2062 pci_intr_release(sc->sc_pc, sc->sc_intrs,
2063 counts[PCI_INTR_TYPE_INTX]);
2064 return;
2065 }
2066 }
2067
2068 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
2069 error = workqueue_create(&sc->sc_queue_wq, wqname,
2070 wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
2071 WM_WORKQUEUE_FLAGS);
2072 if (error) {
2073 aprint_error_dev(sc->sc_dev,
2074 "unable to create workqueue\n");
2075 goto out;
2076 }
2077
2078 /*
2079 * Check the function ID (unit number of the chip).
2080 */
2081 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
2082 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
2083 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2084 || (sc->sc_type == WM_T_82580)
2085 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2086 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
2087 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
2088 else
2089 sc->sc_funcid = 0;
2090
2091 /*
2092 * Determine a few things about the bus we're connected to.
2093 */
2094 if (sc->sc_type < WM_T_82543) {
2095 /* We don't really know the bus characteristics here. */
2096 sc->sc_bus_speed = 33;
2097 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
2098 /*
2099 * CSA (Communication Streaming Architecture) is about as fast
2100 * a 32-bit 66MHz PCI Bus.
2101 */
2102 sc->sc_flags |= WM_F_CSA;
2103 sc->sc_bus_speed = 66;
2104 aprint_verbose_dev(sc->sc_dev,
2105 "Communication Streaming Architecture\n");
2106 if (sc->sc_type == WM_T_82547) {
2107 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
2108 callout_setfunc(&sc->sc_txfifo_ch,
2109 wm_82547_txfifo_stall, sc);
2110 aprint_verbose_dev(sc->sc_dev,
2111 "using 82547 Tx FIFO stall work-around\n");
2112 }
2113 } else if (sc->sc_type >= WM_T_82571) {
2114 sc->sc_flags |= WM_F_PCIE;
2115 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
2116 && (sc->sc_type != WM_T_ICH10)
2117 && (sc->sc_type != WM_T_PCH)
2118 && (sc->sc_type != WM_T_PCH2)
2119 && (sc->sc_type != WM_T_PCH_LPT)
2120 && (sc->sc_type != WM_T_PCH_SPT)
2121 && (sc->sc_type != WM_T_PCH_CNP)) {
2122 /* ICH* and PCH* have no PCIe capability registers */
2123 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2124 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
2125 NULL) == 0)
2126 aprint_error_dev(sc->sc_dev,
2127 "unable to find PCIe capability\n");
2128 }
2129 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
2130 } else {
2131 reg = CSR_READ(sc, WMREG_STATUS);
2132 if (reg & STATUS_BUS64)
2133 sc->sc_flags |= WM_F_BUS64;
2134 if ((reg & STATUS_PCIX_MODE) != 0) {
2135 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
2136
2137 sc->sc_flags |= WM_F_PCIX;
2138 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2139 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
2140 aprint_error_dev(sc->sc_dev,
2141 "unable to find PCIX capability\n");
2142 else if (sc->sc_type != WM_T_82545_3 &&
2143 sc->sc_type != WM_T_82546_3) {
2144 /*
2145 * Work around a problem caused by the BIOS
2146 * setting the max memory read byte count
2147 * incorrectly.
2148 */
2149 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
2150 sc->sc_pcixe_capoff + PCIX_CMD);
2151 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
2152 sc->sc_pcixe_capoff + PCIX_STATUS);
2153
2154 bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
2155 PCIX_CMD_BYTECNT_SHIFT;
2156 maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
2157 PCIX_STATUS_MAXB_SHIFT;
2158 if (bytecnt > maxb) {
2159 aprint_verbose_dev(sc->sc_dev,
2160 "resetting PCI-X MMRBC: %d -> %d\n",
2161 512 << bytecnt, 512 << maxb);
2162 pcix_cmd = (pcix_cmd &
2163 ~PCIX_CMD_BYTECNT_MASK) |
2164 (maxb << PCIX_CMD_BYTECNT_SHIFT);
2165 pci_conf_write(pa->pa_pc, pa->pa_tag,
2166 sc->sc_pcixe_capoff + PCIX_CMD,
2167 pcix_cmd);
2168 }
2169 }
2170 }
2171 /*
2172 * The quad port adapter is special; it has a PCIX-PCIX
2173 * bridge on the board, and can run the secondary bus at
2174 * a higher speed.
2175 */
2176 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
2177 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
2178 : 66;
2179 } else if (sc->sc_flags & WM_F_PCIX) {
2180 switch (reg & STATUS_PCIXSPD_MASK) {
2181 case STATUS_PCIXSPD_50_66:
2182 sc->sc_bus_speed = 66;
2183 break;
2184 case STATUS_PCIXSPD_66_100:
2185 sc->sc_bus_speed = 100;
2186 break;
2187 case STATUS_PCIXSPD_100_133:
2188 sc->sc_bus_speed = 133;
2189 break;
2190 default:
2191 aprint_error_dev(sc->sc_dev,
2192 "unknown PCIXSPD %d; assuming 66MHz\n",
2193 reg & STATUS_PCIXSPD_MASK);
2194 sc->sc_bus_speed = 66;
2195 break;
2196 }
2197 } else
2198 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
2199 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
2200 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
2201 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
2202 }
2203
2204 /* clear interesting stat counters */
2205 CSR_READ(sc, WMREG_COLC);
2206 CSR_READ(sc, WMREG_RXERRC);
2207
2208 if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
2209 || (sc->sc_type >= WM_T_ICH8))
2210 sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2211 if (sc->sc_type >= WM_T_ICH8)
2212 sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2213
2214 /* Set PHY, NVM mutex related stuff */
2215 switch (sc->sc_type) {
2216 case WM_T_82542_2_0:
2217 case WM_T_82542_2_1:
2218 case WM_T_82543:
2219 case WM_T_82544:
2220 /* Microwire */
2221 sc->nvm.read = wm_nvm_read_uwire;
2222 sc->sc_nvm_wordsize = 64;
2223 sc->sc_nvm_addrbits = 6;
2224 break;
2225 case WM_T_82540:
2226 case WM_T_82545:
2227 case WM_T_82545_3:
2228 case WM_T_82546:
2229 case WM_T_82546_3:
2230 /* Microwire */
2231 sc->nvm.read = wm_nvm_read_uwire;
2232 reg = CSR_READ(sc, WMREG_EECD);
2233 if (reg & EECD_EE_SIZE) {
2234 sc->sc_nvm_wordsize = 256;
2235 sc->sc_nvm_addrbits = 8;
2236 } else {
2237 sc->sc_nvm_wordsize = 64;
2238 sc->sc_nvm_addrbits = 6;
2239 }
2240 sc->sc_flags |= WM_F_LOCK_EECD;
2241 sc->nvm.acquire = wm_get_eecd;
2242 sc->nvm.release = wm_put_eecd;
2243 break;
2244 case WM_T_82541:
2245 case WM_T_82541_2:
2246 case WM_T_82547:
2247 case WM_T_82547_2:
2248 reg = CSR_READ(sc, WMREG_EECD);
2249 /*
2250 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
2251 * on 8254[17], so set flags and functios before calling it.
2252 */
2253 sc->sc_flags |= WM_F_LOCK_EECD;
2254 sc->nvm.acquire = wm_get_eecd;
2255 sc->nvm.release = wm_put_eecd;
2256 if (reg & EECD_EE_TYPE) {
2257 /* SPI */
2258 sc->nvm.read = wm_nvm_read_spi;
2259 sc->sc_flags |= WM_F_EEPROM_SPI;
2260 wm_nvm_set_addrbits_size_eecd(sc);
2261 } else {
2262 /* Microwire */
2263 sc->nvm.read = wm_nvm_read_uwire;
2264 if ((reg & EECD_EE_ABITS) != 0) {
2265 sc->sc_nvm_wordsize = 256;
2266 sc->sc_nvm_addrbits = 8;
2267 } else {
2268 sc->sc_nvm_wordsize = 64;
2269 sc->sc_nvm_addrbits = 6;
2270 }
2271 }
2272 break;
2273 case WM_T_82571:
2274 case WM_T_82572:
2275 /* SPI */
2276 sc->nvm.read = wm_nvm_read_eerd;
2277 /* Not use WM_F_LOCK_EECD because we use EERD */
2278 sc->sc_flags |= WM_F_EEPROM_SPI;
2279 wm_nvm_set_addrbits_size_eecd(sc);
2280 sc->phy.acquire = wm_get_swsm_semaphore;
2281 sc->phy.release = wm_put_swsm_semaphore;
2282 sc->nvm.acquire = wm_get_nvm_82571;
2283 sc->nvm.release = wm_put_nvm_82571;
2284 break;
2285 case WM_T_82573:
2286 case WM_T_82574:
2287 case WM_T_82583:
2288 sc->nvm.read = wm_nvm_read_eerd;
2289 /* Not use WM_F_LOCK_EECD because we use EERD */
2290 if (sc->sc_type == WM_T_82573) {
2291 sc->phy.acquire = wm_get_swsm_semaphore;
2292 sc->phy.release = wm_put_swsm_semaphore;
2293 sc->nvm.acquire = wm_get_nvm_82571;
2294 sc->nvm.release = wm_put_nvm_82571;
2295 } else {
2296 /* Both PHY and NVM use the same semaphore. */
2297 sc->phy.acquire = sc->nvm.acquire
2298 = wm_get_swfwhw_semaphore;
2299 sc->phy.release = sc->nvm.release
2300 = wm_put_swfwhw_semaphore;
2301 }
2302 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2303 sc->sc_flags |= WM_F_EEPROM_FLASH;
2304 sc->sc_nvm_wordsize = 2048;
2305 } else {
2306 /* SPI */
2307 sc->sc_flags |= WM_F_EEPROM_SPI;
2308 wm_nvm_set_addrbits_size_eecd(sc);
2309 }
2310 break;
2311 case WM_T_82575:
2312 case WM_T_82576:
2313 case WM_T_82580:
2314 case WM_T_I350:
2315 case WM_T_I354:
2316 case WM_T_80003:
2317 /* SPI */
2318 sc->sc_flags |= WM_F_EEPROM_SPI;
2319 wm_nvm_set_addrbits_size_eecd(sc);
2320 if ((sc->sc_type == WM_T_80003)
2321 || (sc->sc_nvm_wordsize < (1 << 15))) {
2322 sc->nvm.read = wm_nvm_read_eerd;
2323 /* Don't use WM_F_LOCK_EECD because we use EERD */
2324 } else {
2325 sc->nvm.read = wm_nvm_read_spi;
2326 sc->sc_flags |= WM_F_LOCK_EECD;
2327 }
2328 sc->phy.acquire = wm_get_phy_82575;
2329 sc->phy.release = wm_put_phy_82575;
2330 sc->nvm.acquire = wm_get_nvm_80003;
2331 sc->nvm.release = wm_put_nvm_80003;
2332 break;
2333 case WM_T_ICH8:
2334 case WM_T_ICH9:
2335 case WM_T_ICH10:
2336 case WM_T_PCH:
2337 case WM_T_PCH2:
2338 case WM_T_PCH_LPT:
2339 sc->nvm.read = wm_nvm_read_ich8;
2340 /* FLASH */
2341 sc->sc_flags |= WM_F_EEPROM_FLASH;
2342 sc->sc_nvm_wordsize = 2048;
2343 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2344 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2345 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2346 aprint_error_dev(sc->sc_dev,
2347 "can't map FLASH registers\n");
2348 goto out;
2349 }
2350 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2351 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2352 ICH_FLASH_SECTOR_SIZE;
2353 sc->sc_ich8_flash_bank_size =
2354 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2355 sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2356 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2357 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2358 sc->sc_flashreg_offset = 0;
2359 sc->phy.acquire = wm_get_swflag_ich8lan;
2360 sc->phy.release = wm_put_swflag_ich8lan;
2361 sc->nvm.acquire = wm_get_nvm_ich8lan;
2362 sc->nvm.release = wm_put_nvm_ich8lan;
2363 break;
2364 case WM_T_PCH_SPT:
2365 case WM_T_PCH_CNP:
2366 sc->nvm.read = wm_nvm_read_spt;
2367 /* SPT has no GFPREG; flash registers mapped through BAR0 */
2368 sc->sc_flags |= WM_F_EEPROM_FLASH;
2369 sc->sc_flasht = sc->sc_st;
2370 sc->sc_flashh = sc->sc_sh;
2371 sc->sc_ich8_flash_base = 0;
2372 sc->sc_nvm_wordsize =
2373 (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2374 * NVM_SIZE_MULTIPLIER;
2375 /* It is size in bytes, we want words */
2376 sc->sc_nvm_wordsize /= 2;
2377 /* Assume 2 banks */
2378 sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2379 sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2380 sc->phy.acquire = wm_get_swflag_ich8lan;
2381 sc->phy.release = wm_put_swflag_ich8lan;
2382 sc->nvm.acquire = wm_get_nvm_ich8lan;
2383 sc->nvm.release = wm_put_nvm_ich8lan;
2384 break;
2385 case WM_T_I210:
2386 case WM_T_I211:
2387 /* Allow a single clear of the SW semaphore on I210 and newer*/
2388 sc->sc_flags |= WM_F_WA_I210_CLSEM;
2389 if (wm_nvm_flash_presence_i210(sc)) {
2390 sc->nvm.read = wm_nvm_read_eerd;
2391 /* Don't use WM_F_LOCK_EECD because we use EERD */
2392 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2393 wm_nvm_set_addrbits_size_eecd(sc);
2394 } else {
2395 sc->nvm.read = wm_nvm_read_invm;
2396 sc->sc_flags |= WM_F_EEPROM_INVM;
2397 sc->sc_nvm_wordsize = INVM_SIZE;
2398 }
2399 sc->phy.acquire = wm_get_phy_82575;
2400 sc->phy.release = wm_put_phy_82575;
2401 sc->nvm.acquire = wm_get_nvm_80003;
2402 sc->nvm.release = wm_put_nvm_80003;
2403 break;
2404 default:
2405 break;
2406 }
2407
2408 /* Ensure the SMBI bit is clear before first NVM or PHY access */
2409 switch (sc->sc_type) {
2410 case WM_T_82571:
2411 case WM_T_82572:
2412 reg = CSR_READ(sc, WMREG_SWSM2);
2413 if ((reg & SWSM2_LOCK) == 0) {
2414 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2415 force_clear_smbi = true;
2416 } else
2417 force_clear_smbi = false;
2418 break;
2419 case WM_T_82573:
2420 case WM_T_82574:
2421 case WM_T_82583:
2422 force_clear_smbi = true;
2423 break;
2424 default:
2425 force_clear_smbi = false;
2426 break;
2427 }
2428 if (force_clear_smbi) {
2429 reg = CSR_READ(sc, WMREG_SWSM);
2430 if ((reg & SWSM_SMBI) != 0)
2431 aprint_error_dev(sc->sc_dev,
2432 "Please update the Bootagent\n");
2433 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2434 }
2435
2436 /*
2437 * Defer printing the EEPROM type until after verifying the checksum
2438 * This allows the EEPROM type to be printed correctly in the case
2439 * that no EEPROM is attached.
2440 */
2441 /*
2442 * Validate the EEPROM checksum. If the checksum fails, flag
2443 * this for later, so we can fail future reads from the EEPROM.
2444 */
2445 if (wm_nvm_validate_checksum(sc)) {
2446 /*
2447 * Read twice again because some PCI-e parts fail the
2448 * first check due to the link being in sleep state.
2449 */
2450 if (wm_nvm_validate_checksum(sc))
2451 sc->sc_flags |= WM_F_EEPROM_INVALID;
2452 }
2453
2454 if (sc->sc_flags & WM_F_EEPROM_INVALID)
2455 aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2456 else {
2457 aprint_verbose_dev(sc->sc_dev, "%u words ",
2458 sc->sc_nvm_wordsize);
2459 if (sc->sc_flags & WM_F_EEPROM_INVM)
2460 aprint_verbose("iNVM");
2461 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2462 aprint_verbose("FLASH(HW)");
2463 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2464 aprint_verbose("FLASH");
2465 else {
2466 if (sc->sc_flags & WM_F_EEPROM_SPI)
2467 eetype = "SPI";
2468 else
2469 eetype = "MicroWire";
2470 aprint_verbose("(%d address bits) %s EEPROM",
2471 sc->sc_nvm_addrbits, eetype);
2472 }
2473 }
2474 wm_nvm_version(sc);
2475 aprint_verbose("\n");
2476
2477 /*
2478 * XXX The first call of wm_gmii_setup_phytype. The result might be
2479 * incorrect.
2480 */
2481 wm_gmii_setup_phytype(sc, 0, 0);
2482
2483 /* Check for WM_F_WOL on some chips before wm_reset() */
2484 switch (sc->sc_type) {
2485 case WM_T_ICH8:
2486 case WM_T_ICH9:
2487 case WM_T_ICH10:
2488 case WM_T_PCH:
2489 case WM_T_PCH2:
2490 case WM_T_PCH_LPT:
2491 case WM_T_PCH_SPT:
2492 case WM_T_PCH_CNP:
2493 apme_mask = WUC_APME;
2494 eeprom_data = CSR_READ(sc, WMREG_WUC);
2495 if ((eeprom_data & apme_mask) != 0)
2496 sc->sc_flags |= WM_F_WOL;
2497 break;
2498 default:
2499 break;
2500 }
2501
2502 /* Reset the chip to a known state. */
2503 wm_reset(sc);
2504
2505 /*
2506 * Check for I21[01] PLL workaround.
2507 *
2508 * Three cases:
2509 * a) Chip is I211.
2510 * b) Chip is I210 and it uses INVM (not FLASH).
2511 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
2512 */
2513 if (sc->sc_type == WM_T_I211)
2514 sc->sc_flags |= WM_F_PLL_WA_I210;
2515 if (sc->sc_type == WM_T_I210) {
2516 if (!wm_nvm_flash_presence_i210(sc))
2517 sc->sc_flags |= WM_F_PLL_WA_I210;
2518 else if ((sc->sc_nvm_ver_major < 3)
2519 || ((sc->sc_nvm_ver_major == 3)
2520 && (sc->sc_nvm_ver_minor < 25))) {
2521 aprint_verbose_dev(sc->sc_dev,
2522 "ROM image version %d.%d is older than 3.25\n",
2523 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2524 sc->sc_flags |= WM_F_PLL_WA_I210;
2525 }
2526 }
2527 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2528 wm_pll_workaround_i210(sc);
2529
2530 wm_get_wakeup(sc);
2531
2532 /* Non-AMT based hardware can now take control from firmware */
2533 if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2534 wm_get_hw_control(sc);
2535
2536 /*
2537 * Read the Ethernet address from the EEPROM, if not first found
2538 * in device properties.
2539 */
2540 ea = prop_dictionary_get(dict, "mac-address");
2541 if (ea != NULL) {
2542 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2543 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2544 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2545 } else {
2546 if (wm_read_mac_addr(sc, enaddr) != 0) {
2547 aprint_error_dev(sc->sc_dev,
2548 "unable to read Ethernet address\n");
2549 goto out;
2550 }
2551 }
2552
2553 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2554 ether_sprintf(enaddr));
2555
2556 /*
2557 * Read the config info from the EEPROM, and set up various
2558 * bits in the control registers based on their contents.
2559 */
2560 pn = prop_dictionary_get(dict, "i82543-cfg1");
2561 if (pn != NULL) {
2562 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2563 cfg1 = (uint16_t) prop_number_signed_value(pn);
2564 } else {
2565 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2566 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2567 goto out;
2568 }
2569 }
2570
2571 pn = prop_dictionary_get(dict, "i82543-cfg2");
2572 if (pn != NULL) {
2573 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2574 cfg2 = (uint16_t) prop_number_signed_value(pn);
2575 } else {
2576 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2577 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2578 goto out;
2579 }
2580 }
2581
2582 /* check for WM_F_WOL */
2583 switch (sc->sc_type) {
2584 case WM_T_82542_2_0:
2585 case WM_T_82542_2_1:
2586 case WM_T_82543:
2587 /* dummy? */
2588 eeprom_data = 0;
2589 apme_mask = NVM_CFG3_APME;
2590 break;
2591 case WM_T_82544:
2592 apme_mask = NVM_CFG2_82544_APM_EN;
2593 eeprom_data = cfg2;
2594 break;
2595 case WM_T_82546:
2596 case WM_T_82546_3:
2597 case WM_T_82571:
2598 case WM_T_82572:
2599 case WM_T_82573:
2600 case WM_T_82574:
2601 case WM_T_82583:
2602 case WM_T_80003:
2603 case WM_T_82575:
2604 case WM_T_82576:
2605 apme_mask = NVM_CFG3_APME;
2606 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2607 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2608 break;
2609 case WM_T_82580:
2610 case WM_T_I350:
2611 case WM_T_I354:
2612 case WM_T_I210:
2613 case WM_T_I211:
2614 apme_mask = NVM_CFG3_APME;
2615 wm_nvm_read(sc,
2616 NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2617 1, &eeprom_data);
2618 break;
2619 case WM_T_ICH8:
2620 case WM_T_ICH9:
2621 case WM_T_ICH10:
2622 case WM_T_PCH:
2623 case WM_T_PCH2:
2624 case WM_T_PCH_LPT:
2625 case WM_T_PCH_SPT:
2626 case WM_T_PCH_CNP:
2627 /* Already checked before wm_reset () */
2628 apme_mask = eeprom_data = 0;
2629 break;
2630 default: /* XXX 82540 */
2631 apme_mask = NVM_CFG3_APME;
2632 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2633 break;
2634 }
2635 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2636 if ((eeprom_data & apme_mask) != 0)
2637 sc->sc_flags |= WM_F_WOL;
2638
2639 /*
2640 * We have the eeprom settings, now apply the special cases
2641 * where the eeprom may be wrong or the board won't support
2642 * wake on lan on a particular port
2643 */
2644 switch (sc->sc_pcidevid) {
2645 case PCI_PRODUCT_INTEL_82546GB_PCIE:
2646 sc->sc_flags &= ~WM_F_WOL;
2647 break;
2648 case PCI_PRODUCT_INTEL_82546EB_FIBER:
2649 case PCI_PRODUCT_INTEL_82546GB_FIBER:
2650 /* Wake events only supported on port A for dual fiber
2651 * regardless of eeprom setting */
2652 if (sc->sc_funcid == 1)
2653 sc->sc_flags &= ~WM_F_WOL;
2654 break;
2655 case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
2656 /* If quad port adapter, disable WoL on all but port A */
2657 if (sc->sc_funcid != 0)
2658 sc->sc_flags &= ~WM_F_WOL;
2659 break;
2660 case PCI_PRODUCT_INTEL_82571EB_FIBER:
2661 /* Wake events only supported on port A for dual fiber
2662 * regardless of eeprom setting */
2663 if (sc->sc_funcid == 1)
2664 sc->sc_flags &= ~WM_F_WOL;
2665 break;
2666 case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
2667 case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
2668 case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
2669 /* If quad port adapter, disable WoL on all but port A */
2670 if (sc->sc_funcid != 0)
2671 sc->sc_flags &= ~WM_F_WOL;
2672 break;
2673 }
2674
2675 if (sc->sc_type >= WM_T_82575) {
2676 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2677 aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
2678 nvmword);
2679 if ((sc->sc_type == WM_T_82575) ||
2680 (sc->sc_type == WM_T_82576)) {
2681 /* Check NVM for autonegotiation */
2682 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
2683 != 0)
2684 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2685 }
2686 if ((sc->sc_type == WM_T_82575) ||
2687 (sc->sc_type == WM_T_I350)) {
2688 if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
2689 sc->sc_flags |= WM_F_MAS;
2690 }
2691 }
2692 }
2693
2694 /*
2695 * XXX need special handling for some multiple port cards
2696 * to disable a paticular port.
2697 */
2698
2699 if (sc->sc_type >= WM_T_82544) {
2700 pn = prop_dictionary_get(dict, "i82543-swdpin");
2701 if (pn != NULL) {
2702 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2703 swdpin = (uint16_t) prop_number_signed_value(pn);
2704 } else {
2705 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2706 aprint_error_dev(sc->sc_dev,
2707 "unable to read SWDPIN\n");
2708 goto out;
2709 }
2710 }
2711 }
2712
2713 if (cfg1 & NVM_CFG1_ILOS)
2714 sc->sc_ctrl |= CTRL_ILOS;
2715
2716 /*
2717 * XXX
2718 * This code isn't correct because pin 2 and 3 are located
2719 * in different position on newer chips. Check all datasheet.
2720 *
2721 * Until resolve this problem, check if a chip < 82580
2722 */
2723 if (sc->sc_type <= WM_T_82580) {
2724 if (sc->sc_type >= WM_T_82544) {
2725 sc->sc_ctrl |=
2726 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2727 CTRL_SWDPIO_SHIFT;
2728 sc->sc_ctrl |=
2729 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2730 CTRL_SWDPINS_SHIFT;
2731 } else {
2732 sc->sc_ctrl |=
2733 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2734 CTRL_SWDPIO_SHIFT;
2735 }
2736 }
2737
2738 if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
2739 wm_nvm_read(sc,
2740 NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2741 1, &nvmword);
2742 if (nvmword & NVM_CFG3_ILOS)
2743 sc->sc_ctrl |= CTRL_ILOS;
2744 }
2745
2746 #if 0
2747 if (sc->sc_type >= WM_T_82544) {
2748 if (cfg1 & NVM_CFG1_IPS0)
2749 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2750 if (cfg1 & NVM_CFG1_IPS1)
2751 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2752 sc->sc_ctrl_ext |=
2753 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2754 CTRL_EXT_SWDPIO_SHIFT;
2755 sc->sc_ctrl_ext |=
2756 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2757 CTRL_EXT_SWDPINS_SHIFT;
2758 } else {
2759 sc->sc_ctrl_ext |=
2760 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2761 CTRL_EXT_SWDPIO_SHIFT;
2762 }
2763 #endif
2764
2765 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2766 #if 0
2767 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2768 #endif
2769
2770 if (sc->sc_type == WM_T_PCH) {
2771 uint16_t val;
2772
2773 /* Save the NVM K1 bit setting */
2774 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2775
2776 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2777 sc->sc_nvm_k1_enabled = 1;
2778 else
2779 sc->sc_nvm_k1_enabled = 0;
2780 }
2781
2782 /* Determine if we're GMII, TBI, SERDES or SGMII mode */
2783 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2784 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2785 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2786 || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
2787 || sc->sc_type == WM_T_82573
2788 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2789 /* Copper only */
2790 } else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2791 || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
2792 || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
2793 || (sc->sc_type ==WM_T_I211)) {
2794 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2795 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2796 switch (link_mode) {
2797 case CTRL_EXT_LINK_MODE_1000KX:
2798 aprint_normal_dev(sc->sc_dev, "1000KX\n");
2799 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2800 break;
2801 case CTRL_EXT_LINK_MODE_SGMII:
2802 if (wm_sgmii_uses_mdio(sc)) {
2803 aprint_normal_dev(sc->sc_dev,
2804 "SGMII(MDIO)\n");
2805 sc->sc_flags |= WM_F_SGMII;
2806 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2807 break;
2808 }
2809 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2810 /*FALLTHROUGH*/
2811 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2812 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2813 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2814 if (link_mode
2815 == CTRL_EXT_LINK_MODE_SGMII) {
2816 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2817 sc->sc_flags |= WM_F_SGMII;
2818 aprint_verbose_dev(sc->sc_dev,
2819 "SGMII\n");
2820 } else {
2821 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2822 aprint_verbose_dev(sc->sc_dev,
2823 "SERDES\n");
2824 }
2825 break;
2826 }
2827 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2828 aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
2829 else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2830 aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
2831 sc->sc_flags |= WM_F_SGMII;
2832 }
2833 /* Do not change link mode for 100BaseFX */
2834 if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
2835 break;
2836
2837 /* Change current link mode setting */
2838 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2839 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2840 reg |= CTRL_EXT_LINK_MODE_SGMII;
2841 else
2842 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2843 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2844 break;
2845 case CTRL_EXT_LINK_MODE_GMII:
2846 default:
2847 aprint_normal_dev(sc->sc_dev, "Copper\n");
2848 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2849 break;
2850 }
2851
2852 reg &= ~CTRL_EXT_I2C_ENA;
2853 if ((sc->sc_flags & WM_F_SGMII) != 0)
2854 reg |= CTRL_EXT_I2C_ENA;
2855 else
2856 reg &= ~CTRL_EXT_I2C_ENA;
2857 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2858 if ((sc->sc_flags & WM_F_SGMII) != 0) {
2859 wm_gmii_setup_phytype(sc, 0, 0);
2860 wm_reset_mdicnfg_82580(sc);
2861 }
2862 } else if (sc->sc_type < WM_T_82543 ||
2863 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2864 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2865 aprint_error_dev(sc->sc_dev,
2866 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2867 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2868 }
2869 } else {
2870 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
2871 aprint_error_dev(sc->sc_dev,
2872 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2873 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2874 }
2875 }
2876
2877 if (sc->sc_type >= WM_T_PCH2)
2878 sc->sc_flags |= WM_F_EEE;
2879 else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
2880 && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
2881 /* XXX: Need special handling for I354. (not yet) */
2882 if (sc->sc_type != WM_T_I354)
2883 sc->sc_flags |= WM_F_EEE;
2884 }
2885
2886 /* Set device properties (macflags) */
2887 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2888
2889 snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
2890 aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
2891
2892 #ifdef WM_MPSAFE
2893 sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2894 #else
2895 sc->sc_core_lock = NULL;
2896 #endif
2897
2898 /* Initialize the media structures accordingly. */
2899 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2900 wm_gmii_mediainit(sc, wmp->wmp_product);
2901 else
2902 wm_tbi_mediainit(sc); /* All others */
2903
2904 ifp = &sc->sc_ethercom.ec_if;
2905 xname = device_xname(sc->sc_dev);
2906 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2907 ifp->if_softc = sc;
2908 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2909 #ifdef WM_MPSAFE
2910 ifp->if_extflags = IFEF_MPSAFE;
2911 #endif
2912 ifp->if_ioctl = wm_ioctl;
2913 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
2914 ifp->if_start = wm_nq_start;
2915 /*
2916 * When the number of CPUs is one and the controller can use
2917 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
2918 * That is, wm(4) use two interrupts, one is used for Tx/Rx
2919 * and the other is used for link status changing.
2920 * In this situation, wm_nq_transmit() is disadvantageous
2921 * because of wm_select_txqueue() and pcq(9) overhead.
2922 */
2923 if (wm_is_using_multiqueue(sc))
2924 ifp->if_transmit = wm_nq_transmit;
2925 } else {
2926 ifp->if_start = wm_start;
2927 /*
2928 * wm_transmit() has the same disadvantage as wm_transmit().
2929 */
2930 if (wm_is_using_multiqueue(sc))
2931 ifp->if_transmit = wm_transmit;
2932 }
2933 /* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
2934 ifp->if_init = wm_init;
2935 ifp->if_stop = wm_stop;
2936 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
2937 IFQ_SET_READY(&ifp->if_snd);
2938
2939 /* Check for jumbo frame */
2940 switch (sc->sc_type) {
2941 case WM_T_82573:
2942 /* XXX limited to 9234 if ASPM is disabled */
2943 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2944 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2945 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2946 break;
2947 case WM_T_82571:
2948 case WM_T_82572:
2949 case WM_T_82574:
2950 case WM_T_82583:
2951 case WM_T_82575:
2952 case WM_T_82576:
2953 case WM_T_82580:
2954 case WM_T_I350:
2955 case WM_T_I354:
2956 case WM_T_I210:
2957 case WM_T_I211:
2958 case WM_T_80003:
2959 case WM_T_ICH9:
2960 case WM_T_ICH10:
2961 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2962 case WM_T_PCH_LPT:
2963 case WM_T_PCH_SPT:
2964 case WM_T_PCH_CNP:
2965 /* XXX limited to 9234 */
2966 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2967 break;
2968 case WM_T_PCH:
2969 /* XXX limited to 4096 */
2970 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2971 break;
2972 case WM_T_82542_2_0:
2973 case WM_T_82542_2_1:
2974 case WM_T_ICH8:
2975 /* No support for jumbo frame */
2976 break;
2977 default:
2978 /* ETHER_MAX_LEN_JUMBO */
2979 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2980 break;
2981 }
2982
2983 /* If we're a i82543 or greater, we can support VLANs. */
2984 if (sc->sc_type >= WM_T_82543) {
2985 sc->sc_ethercom.ec_capabilities |=
2986 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2987 sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
2988 }
2989
2990 if ((sc->sc_flags & WM_F_EEE) != 0)
2991 sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
2992
2993 /*
2994 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2995 * on i82543 and later.
2996 */
2997 if (sc->sc_type >= WM_T_82543) {
2998 ifp->if_capabilities |=
2999 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
3000 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
3001 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
3002 IFCAP_CSUM_TCPv6_Tx |
3003 IFCAP_CSUM_UDPv6_Tx;
3004 }
3005
3006 /*
3007 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
3008 *
3009 * 82541GI (8086:1076) ... no
3010 * 82572EI (8086:10b9) ... yes
3011 */
3012 if (sc->sc_type >= WM_T_82571) {
3013 ifp->if_capabilities |=
3014 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
3015 }
3016
3017 /*
3018 * If we're a i82544 or greater (except i82547), we can do
3019 * TCP segmentation offload.
3020 */
3021 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
3022 ifp->if_capabilities |= IFCAP_TSOv4;
3023 }
3024
3025 if (sc->sc_type >= WM_T_82571) {
3026 ifp->if_capabilities |= IFCAP_TSOv6;
3027 }
3028
3029 sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
3030 sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
3031 sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
3032 sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
3033
3034 /* Attach the interface. */
3035 error = if_initialize(ifp);
3036 if (error != 0) {
3037 aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
3038 error);
3039 return; /* Error */
3040 }
3041 sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
3042 ether_ifattach(ifp, enaddr);
3043 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
3044 if_register(ifp);
3045 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
3046 RND_FLAG_DEFAULT);
3047
3048 #ifdef WM_EVENT_COUNTERS
3049 /* Attach event counters. */
3050 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
3051 NULL, xname, "linkintr");
3052
3053 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
3054 NULL, xname, "tx_xoff");
3055 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
3056 NULL, xname, "tx_xon");
3057 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
3058 NULL, xname, "rx_xoff");
3059 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
3060 NULL, xname, "rx_xon");
3061 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
3062 NULL, xname, "rx_macctl");
3063 #endif /* WM_EVENT_COUNTERS */
3064
3065 sc->sc_txrx_use_workqueue = false;
3066
3067 wm_init_sysctls(sc);
3068
3069 if (pmf_device_register(self, wm_suspend, wm_resume))
3070 pmf_class_network_register(self, ifp);
3071 else
3072 aprint_error_dev(self, "couldn't establish power handler\n");
3073
3074 sc->sc_flags |= WM_F_ATTACHED;
3075 out:
3076 return;
3077 }
3078
3079 /* The detach function (ca_detach) */
3080 static int
3081 wm_detach(device_t self, int flags __unused)
3082 {
3083 struct wm_softc *sc = device_private(self);
3084 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3085 int i;
3086
3087 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
3088 return 0;
3089
3090 /* Stop the interface. Callouts are stopped in it. */
3091 wm_stop(ifp, 1);
3092
3093 pmf_device_deregister(self);
3094
3095 sysctl_teardown(&sc->sc_sysctllog);
3096
3097 #ifdef WM_EVENT_COUNTERS
3098 evcnt_detach(&sc->sc_ev_linkintr);
3099
3100 evcnt_detach(&sc->sc_ev_tx_xoff);
3101 evcnt_detach(&sc->sc_ev_tx_xon);
3102 evcnt_detach(&sc->sc_ev_rx_xoff);
3103 evcnt_detach(&sc->sc_ev_rx_xon);
3104 evcnt_detach(&sc->sc_ev_rx_macctl);
3105 #endif /* WM_EVENT_COUNTERS */
3106
3107 rnd_detach_source(&sc->rnd_source);
3108
3109 /* Tell the firmware about the release */
3110 WM_CORE_LOCK(sc);
3111 wm_release_manageability(sc);
3112 wm_release_hw_control(sc);
3113 wm_enable_wakeup(sc);
3114 WM_CORE_UNLOCK(sc);
3115
3116 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
3117
3118 ether_ifdetach(ifp);
3119 if_detach(ifp);
3120 if_percpuq_destroy(sc->sc_ipq);
3121
3122 /* Delete all remaining media. */
3123 ifmedia_fini(&sc->sc_mii.mii_media);
3124
3125 /* Unload RX dmamaps and free mbufs */
3126 for (i = 0; i < sc->sc_nqueues; i++) {
3127 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
3128 mutex_enter(rxq->rxq_lock);
3129 wm_rxdrain(rxq);
3130 mutex_exit(rxq->rxq_lock);
3131 }
3132 /* Must unlock here */
3133
3134 /* Disestablish the interrupt handler */
3135 for (i = 0; i < sc->sc_nintrs; i++) {
3136 if (sc->sc_ihs[i] != NULL) {
3137 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
3138 sc->sc_ihs[i] = NULL;
3139 }
3140 }
3141 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
3142
3143 /* wm_stop() ensure workqueue is stopped. */
3144 workqueue_destroy(sc->sc_queue_wq);
3145
3146 for (i = 0; i < sc->sc_nqueues; i++)
3147 softint_disestablish(sc->sc_queue[i].wmq_si);
3148
3149 wm_free_txrx_queues(sc);
3150
3151 /* Unmap the registers */
3152 if (sc->sc_ss) {
3153 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
3154 sc->sc_ss = 0;
3155 }
3156 if (sc->sc_ios) {
3157 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
3158 sc->sc_ios = 0;
3159 }
3160 if (sc->sc_flashs) {
3161 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
3162 sc->sc_flashs = 0;
3163 }
3164
3165 if (sc->sc_core_lock)
3166 mutex_obj_free(sc->sc_core_lock);
3167 if (sc->sc_ich_phymtx)
3168 mutex_obj_free(sc->sc_ich_phymtx);
3169 if (sc->sc_ich_nvmmtx)
3170 mutex_obj_free(sc->sc_ich_nvmmtx);
3171
3172 return 0;
3173 }
3174
3175 static bool
3176 wm_suspend(device_t self, const pmf_qual_t *qual)
3177 {
3178 struct wm_softc *sc = device_private(self);
3179
3180 wm_release_manageability(sc);
3181 wm_release_hw_control(sc);
3182 wm_enable_wakeup(sc);
3183
3184 return true;
3185 }
3186
3187 static bool
3188 wm_resume(device_t self, const pmf_qual_t *qual)
3189 {
3190 struct wm_softc *sc = device_private(self);
3191 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3192 pcireg_t reg;
3193 char buf[256];
3194
3195 reg = CSR_READ(sc, WMREG_WUS);
3196 if (reg != 0) {
3197 snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
3198 device_printf(sc->sc_dev, "wakeup status %s\n", buf);
3199 CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
3200 }
3201
3202 if (sc->sc_type >= WM_T_PCH2)
3203 wm_resume_workarounds_pchlan(sc);
3204 if ((ifp->if_flags & IFF_UP) == 0) {
3205 wm_reset(sc);
3206 /* Non-AMT based hardware can now take control from firmware */
3207 if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
3208 wm_get_hw_control(sc);
3209 wm_init_manageability(sc);
3210 } else {
3211 /*
3212 * We called pmf_class_network_register(), so if_init() is
3213 * automatically called when IFF_UP. wm_reset(),
3214 * wm_get_hw_control() and wm_init_manageability() are called
3215 * via wm_init().
3216 */
3217 }
3218
3219 return true;
3220 }
3221
3222 /*
3223 * wm_watchdog: [ifnet interface function]
3224 *
3225 * Watchdog timer handler.
3226 */
3227 static void
3228 wm_watchdog(struct ifnet *ifp)
3229 {
3230 int qid;
3231 struct wm_softc *sc = ifp->if_softc;
3232 uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
3233
3234 for (qid = 0; qid < sc->sc_nqueues; qid++) {
3235 struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
3236
3237 wm_watchdog_txq(ifp, txq, &hang_queue);
3238 }
3239
3240 /* IF any of queues hanged up, reset the interface. */
3241 if (hang_queue != 0) {
3242 (void)wm_init(ifp);
3243
3244 /*
3245 * There are still some upper layer processing which call
3246 * ifp->if_start(). e.g. ALTQ or one CPU system
3247 */
3248 /* Try to get more packets going. */
3249 ifp->if_start(ifp);
3250 }
3251 }
3252
3253
3254 static void
3255 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
3256 {
3257
3258 mutex_enter(txq->txq_lock);
3259 if (txq->txq_sending &&
3260 time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
3261 wm_watchdog_txq_locked(ifp, txq, hang);
3262
3263 mutex_exit(txq->txq_lock);
3264 }
3265
3266 static void
3267 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
3268 uint16_t *hang)
3269 {
3270 struct wm_softc *sc = ifp->if_softc;
3271 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
3272
3273 KASSERT(mutex_owned(txq->txq_lock));
3274
3275 /*
3276 * Since we're using delayed interrupts, sweep up
3277 * before we report an error.
3278 */
3279 wm_txeof(txq, UINT_MAX);
3280
3281 if (txq->txq_sending)
3282 *hang |= __BIT(wmq->wmq_id);
3283
3284 if (txq->txq_free == WM_NTXDESC(txq)) {
3285 log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
3286 device_xname(sc->sc_dev));
3287 } else {
3288 #ifdef WM_DEBUG
3289 int i, j;
3290 struct wm_txsoft *txs;
3291 #endif
3292 log(LOG_ERR,
3293 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3294 device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
3295 txq->txq_next);
3296 if_statinc(ifp, if_oerrors);
3297 #ifdef WM_DEBUG
3298 for (i = txq->txq_sdirty; i != txq->txq_snext;
3299 i = WM_NEXTTXS(txq, i)) {
3300 txs = &txq->txq_soft[i];
3301 printf("txs %d tx %d -> %d\n",
3302 i, txs->txs_firstdesc, txs->txs_lastdesc);
3303 for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
3304 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3305 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3306 txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
3307 printf("\t %#08x%08x\n",
3308 txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
3309 txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
3310 } else {
3311 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3312 (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
3313 txq->txq_descs[j].wtx_addr.wa_low);
3314 printf("\t %#04x%02x%02x%08x\n",
3315 txq->txq_descs[j].wtx_fields.wtxu_vlan,
3316 txq->txq_descs[j].wtx_fields.wtxu_options,
3317 txq->txq_descs[j].wtx_fields.wtxu_status,
3318 txq->txq_descs[j].wtx_cmdlen);
3319 }
3320 if (j == txs->txs_lastdesc)
3321 break;
3322 }
3323 }
3324 #endif
3325 }
3326 }
3327
3328 /*
3329 * wm_tick:
3330 *
3331 * One second timer, used to check link status, sweep up
3332 * completed transmit jobs, etc.
3333 */
3334 static void
3335 wm_tick(void *arg)
3336 {
3337 struct wm_softc *sc = arg;
3338 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3339 #ifndef WM_MPSAFE
3340 int s = splnet();
3341 #endif
3342
3343 WM_CORE_LOCK(sc);
3344
3345 if (sc->sc_core_stopping) {
3346 WM_CORE_UNLOCK(sc);
3347 #ifndef WM_MPSAFE
3348 splx(s);
3349 #endif
3350 return;
3351 }
3352
3353 if (sc->sc_type >= WM_T_82542_2_1) {
3354 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3355 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3356 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3357 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3358 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3359 }
3360
3361 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
3362 if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
3363 if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
3364 + CSR_READ(sc, WMREG_CRCERRS)
3365 + CSR_READ(sc, WMREG_ALGNERRC)
3366 + CSR_READ(sc, WMREG_SYMERRC)
3367 + CSR_READ(sc, WMREG_RXERRC)
3368 + CSR_READ(sc, WMREG_SEC)
3369 + CSR_READ(sc, WMREG_CEXTERR)
3370 + CSR_READ(sc, WMREG_RLEC));
3371 /*
3372 * WMREG_RNBC is incremented when there is no available buffers in host
3373 * memory. It does not mean the number of dropped packet. Because
3374 * ethernet controller can receive packets in such case if there is
3375 * space in phy's FIFO.
3376 *
3377 * If you want to know the nubmer of WMREG_RMBC, you should use such as
3378 * own EVCNT instead of if_iqdrops.
3379 */
3380 if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
3381 IF_STAT_PUTREF(ifp);
3382
3383 if (sc->sc_flags & WM_F_HAS_MII)
3384 mii_tick(&sc->sc_mii);
3385 else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
3386 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3387 wm_serdes_tick(sc);
3388 else
3389 wm_tbi_tick(sc);
3390
3391 WM_CORE_UNLOCK(sc);
3392
3393 wm_watchdog(ifp);
3394
3395 callout_schedule(&sc->sc_tick_ch, hz);
3396 }
3397
3398 static int
3399 wm_ifflags_cb(struct ethercom *ec)
3400 {
3401 struct ifnet *ifp = &ec->ec_if;
3402 struct wm_softc *sc = ifp->if_softc;
3403 u_short iffchange;
3404 int ecchange;
3405 bool needreset = false;
3406 int rc = 0;
3407
3408 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3409 device_xname(sc->sc_dev), __func__));
3410
3411 WM_CORE_LOCK(sc);
3412
3413 /*
3414 * Check for if_flags.
3415 * Main usage is to prevent linkdown when opening bpf.
3416 */
3417 iffchange = ifp->if_flags ^ sc->sc_if_flags;
3418 sc->sc_if_flags = ifp->if_flags;
3419 if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
3420 needreset = true;
3421 goto ec;
3422 }
3423
3424 /* iff related updates */
3425 if ((iffchange & IFF_PROMISC) != 0)
3426 wm_set_filter(sc);
3427
3428 wm_set_vlan(sc);
3429
3430 ec:
3431 /* Check for ec_capenable. */
3432 ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
3433 sc->sc_ec_capenable = ec->ec_capenable;
3434 if ((ecchange & ~ETHERCAP_EEE) != 0) {
3435 needreset = true;
3436 goto out;
3437 }
3438
3439 /* ec related updates */
3440 wm_set_eee(sc);
3441
3442 out:
3443 if (needreset)
3444 rc = ENETRESET;
3445 WM_CORE_UNLOCK(sc);
3446
3447 return rc;
3448 }
3449
3450 /*
3451 * wm_ioctl: [ifnet interface function]
3452 *
3453 * Handle control requests from the operator.
3454 */
3455 static int
3456 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3457 {
3458 struct wm_softc *sc = ifp->if_softc;
3459 struct ifreq *ifr = (struct ifreq *)data;
3460 struct ifaddr *ifa = (struct ifaddr *)data;
3461 struct sockaddr_dl *sdl;
3462 int s, error;
3463
3464 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3465 device_xname(sc->sc_dev), __func__));
3466
3467 #ifndef WM_MPSAFE
3468 s = splnet();
3469 #endif
3470 switch (cmd) {
3471 case SIOCSIFMEDIA:
3472 WM_CORE_LOCK(sc);
3473 /* Flow control requires full-duplex mode. */
3474 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3475 (ifr->ifr_media & IFM_FDX) == 0)
3476 ifr->ifr_media &= ~IFM_ETH_FMASK;
3477 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3478 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3479 /* We can do both TXPAUSE and RXPAUSE. */
3480 ifr->ifr_media |=
3481 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3482 }
3483 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3484 }
3485 WM_CORE_UNLOCK(sc);
3486 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3487 break;
3488 case SIOCINITIFADDR:
3489 WM_CORE_LOCK(sc);
3490 if (ifa->ifa_addr->sa_family == AF_LINK) {
3491 sdl = satosdl(ifp->if_dl->ifa_addr);
3492 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3493 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3494 /* Unicast address is the first multicast entry */
3495 wm_set_filter(sc);
3496 error = 0;
3497 WM_CORE_UNLOCK(sc);
3498 break;
3499 }
3500 WM_CORE_UNLOCK(sc);
3501 /*FALLTHROUGH*/
3502 default:
3503 #ifdef WM_MPSAFE
3504 s = splnet();
3505 #endif
3506 /* It may call wm_start, so unlock here */
3507 error = ether_ioctl(ifp, cmd, data);
3508 #ifdef WM_MPSAFE
3509 splx(s);
3510 #endif
3511 if (error != ENETRESET)
3512 break;
3513
3514 error = 0;
3515
3516 if (cmd == SIOCSIFCAP)
3517 error = (*ifp->if_init)(ifp);
3518 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3519 ;
3520 else if (ifp->if_flags & IFF_RUNNING) {
3521 /*
3522 * Multicast list has changed; set the hardware filter
3523 * accordingly.
3524 */
3525 WM_CORE_LOCK(sc);
3526 wm_set_filter(sc);
3527 WM_CORE_UNLOCK(sc);
3528 }
3529 break;
3530 }
3531
3532 #ifndef WM_MPSAFE
3533 splx(s);
3534 #endif
3535 return error;
3536 }
3537
3538 /* MAC address related */
3539
3540 /*
3541 * Get the offset of MAC address and return it.
3542 * If error occured, use offset 0.
3543 */
3544 static uint16_t
3545 wm_check_alt_mac_addr(struct wm_softc *sc)
3546 {
3547 uint16_t myea[ETHER_ADDR_LEN / 2];
3548 uint16_t offset = NVM_OFF_MACADDR;
3549
3550 /* Try to read alternative MAC address pointer */
3551 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
3552 return 0;
3553
3554 /* Check pointer if it's valid or not. */
3555 if ((offset == 0x0000) || (offset == 0xffff))
3556 return 0;
3557
3558 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
3559 /*
3560 * Check whether alternative MAC address is valid or not.
3561 * Some cards have non 0xffff pointer but those don't use
3562 * alternative MAC address in reality.
3563 *
3564 * Check whether the broadcast bit is set or not.
3565 */
3566 if (wm_nvm_read(sc, offset, 1, myea) == 0)
3567 if (((myea[0] & 0xff) & 0x01) == 0)
3568 return offset; /* Found */
3569
3570 /* Not found */
3571 return 0;
3572 }
3573
3574 static int
3575 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
3576 {
3577 uint16_t myea[ETHER_ADDR_LEN / 2];
3578 uint16_t offset = NVM_OFF_MACADDR;
3579 int do_invert = 0;
3580
3581 switch (sc->sc_type) {
3582 case WM_T_82580:
3583 case WM_T_I350:
3584 case WM_T_I354:
3585 /* EEPROM Top Level Partitioning */
3586 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
3587 break;
3588 case WM_T_82571:
3589 case WM_T_82575:
3590 case WM_T_82576:
3591 case WM_T_80003:
3592 case WM_T_I210:
3593 case WM_T_I211:
3594 offset = wm_check_alt_mac_addr(sc);
3595 if (offset == 0)
3596 if ((sc->sc_funcid & 0x01) == 1)
3597 do_invert = 1;
3598 break;
3599 default:
3600 if ((sc->sc_funcid & 0x01) == 1)
3601 do_invert = 1;
3602 break;
3603 }
3604
3605 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
3606 goto bad;
3607
3608 enaddr[0] = myea[0] & 0xff;
3609 enaddr[1] = myea[0] >> 8;
3610 enaddr[2] = myea[1] & 0xff;
3611 enaddr[3] = myea[1] >> 8;
3612 enaddr[4] = myea[2] & 0xff;
3613 enaddr[5] = myea[2] >> 8;
3614
3615 /*
3616 * Toggle the LSB of the MAC address on the second port
3617 * of some dual port cards.
3618 */
3619 if (do_invert != 0)
3620 enaddr[5] ^= 1;
3621
3622 return 0;
3623
3624 bad:
3625 return -1;
3626 }
3627
3628 /*
3629 * wm_set_ral:
3630 *
3631 * Set an entery in the receive address list.
3632 */
3633 static void
3634 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3635 {
3636 uint32_t ral_lo, ral_hi, addrl, addrh;
3637 uint32_t wlock_mac;
3638 int rv;
3639
3640 if (enaddr != NULL) {
3641 ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
3642 ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
3643 ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
3644 ral_hi |= RAL_AV;
3645 } else {
3646 ral_lo = 0;
3647 ral_hi = 0;
3648 }
3649
3650 switch (sc->sc_type) {
3651 case WM_T_82542_2_0:
3652 case WM_T_82542_2_1:
3653 case WM_T_82543:
3654 CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
3655 CSR_WRITE_FLUSH(sc);
3656 CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
3657 CSR_WRITE_FLUSH(sc);
3658 break;
3659 case WM_T_PCH2:
3660 case WM_T_PCH_LPT:
3661 case WM_T_PCH_SPT:
3662 case WM_T_PCH_CNP:
3663 if (idx == 0) {
3664 CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
3665 CSR_WRITE_FLUSH(sc);
3666 CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
3667 CSR_WRITE_FLUSH(sc);
3668 return;
3669 }
3670 if (sc->sc_type != WM_T_PCH2) {
3671 wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
3672 FWSM_WLOCK_MAC);
3673 addrl = WMREG_SHRAL(idx - 1);
3674 addrh = WMREG_SHRAH(idx - 1);
3675 } else {
3676 wlock_mac = 0;
3677 addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
3678 addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
3679 }
3680
3681 if ((wlock_mac == 0) || (idx <= wlock_mac)) {
3682 rv = wm_get_swflag_ich8lan(sc);
3683 if (rv != 0)
3684 return;
3685 CSR_WRITE(sc, addrl, ral_lo);
3686 CSR_WRITE_FLUSH(sc);
3687 CSR_WRITE(sc, addrh, ral_hi);
3688 CSR_WRITE_FLUSH(sc);
3689 wm_put_swflag_ich8lan(sc);
3690 }
3691
3692 break;
3693 default:
3694 CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
3695 CSR_WRITE_FLUSH(sc);
3696 CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
3697 CSR_WRITE_FLUSH(sc);
3698 break;
3699 }
3700 }
3701
3702 /*
3703 * wm_mchash:
3704 *
3705 * Compute the hash of the multicast address for the 4096-bit
3706 * multicast filter.
3707 */
3708 static uint32_t
3709 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3710 {
3711 static const int lo_shift[4] = { 4, 3, 2, 0 };
3712 static const int hi_shift[4] = { 4, 5, 6, 8 };
3713 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3714 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3715 uint32_t hash;
3716
3717 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3718 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3719 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3720 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
3721 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3722 (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3723 return (hash & 0x3ff);
3724 }
3725 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3726 (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3727
3728 return (hash & 0xfff);
3729 }
3730
3731 /*
3732 *
3733 *
3734 */
3735 static int
3736 wm_rar_count(struct wm_softc *sc)
3737 {
3738 int size;
3739
3740 switch (sc->sc_type) {
3741 case WM_T_ICH8:
3742 size = WM_RAL_TABSIZE_ICH8 -1;
3743 break;
3744 case WM_T_ICH9:
3745 case WM_T_ICH10:
3746 case WM_T_PCH:
3747 size = WM_RAL_TABSIZE_ICH8;
3748 break;
3749 case WM_T_PCH2:
3750 size = WM_RAL_TABSIZE_PCH2;
3751 break;
3752 case WM_T_PCH_LPT:
3753 case WM_T_PCH_SPT:
3754 case WM_T_PCH_CNP:
3755 size = WM_RAL_TABSIZE_PCH_LPT;
3756 break;
3757 case WM_T_82575:
3758 case WM_T_I210:
3759 case WM_T_I211:
3760 size = WM_RAL_TABSIZE_82575;
3761 break;
3762 case WM_T_82576:
3763 case WM_T_82580:
3764 size = WM_RAL_TABSIZE_82576;
3765 break;
3766 case WM_T_I350:
3767 case WM_T_I354:
3768 size = WM_RAL_TABSIZE_I350;
3769 break;
3770 default:
3771 size = WM_RAL_TABSIZE;
3772 }
3773
3774 return size;
3775 }
3776
3777 /*
3778 * wm_set_filter:
3779 *
3780 * Set up the receive filter.
3781 */
3782 static void
3783 wm_set_filter(struct wm_softc *sc)
3784 {
3785 struct ethercom *ec = &sc->sc_ethercom;
3786 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3787 struct ether_multi *enm;
3788 struct ether_multistep step;
3789 bus_addr_t mta_reg;
3790 uint32_t hash, reg, bit;
3791 int i, size, ralmax;
3792
3793 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3794 device_xname(sc->sc_dev), __func__));
3795
3796 if (sc->sc_type >= WM_T_82544)
3797 mta_reg = WMREG_CORDOVA_MTA;
3798 else
3799 mta_reg = WMREG_MTA;
3800
3801 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3802
3803 if (ifp->if_flags & IFF_BROADCAST)
3804 sc->sc_rctl |= RCTL_BAM;
3805 if (ifp->if_flags & IFF_PROMISC) {
3806 sc->sc_rctl |= RCTL_UPE;
3807 ETHER_LOCK(ec);
3808 ec->ec_flags |= ETHER_F_ALLMULTI;
3809 ETHER_UNLOCK(ec);
3810 goto allmulti;
3811 }
3812
3813 /*
3814 * Set the station address in the first RAL slot, and
3815 * clear the remaining slots.
3816 */
3817 size = wm_rar_count(sc);
3818 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3819
3820 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
3821 || (sc->sc_type == WM_T_PCH_CNP)) {
3822 i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
3823 switch (i) {
3824 case 0:
3825 /* We can use all entries */
3826 ralmax = size;
3827 break;
3828 case 1:
3829 /* Only RAR[0] */
3830 ralmax = 1;
3831 break;
3832 default:
3833 /* Available SHRA + RAR[0] */
3834 ralmax = i + 1;
3835 }
3836 } else
3837 ralmax = size;
3838 for (i = 1; i < size; i++) {
3839 if (i < ralmax)
3840 wm_set_ral(sc, NULL, i);
3841 }
3842
3843 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3844 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3845 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3846 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
3847 size = WM_ICH8_MC_TABSIZE;
3848 else
3849 size = WM_MC_TABSIZE;
3850 /* Clear out the multicast table. */
3851 for (i = 0; i < size; i++) {
3852 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3853 CSR_WRITE_FLUSH(sc);
3854 }
3855
3856 ETHER_LOCK(ec);
3857 ETHER_FIRST_MULTI(step, ec, enm);
3858 while (enm != NULL) {
3859 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3860 ec->ec_flags |= ETHER_F_ALLMULTI;
3861 ETHER_UNLOCK(ec);
3862 /*
3863 * We must listen to a range of multicast addresses.
3864 * For now, just accept all multicasts, rather than
3865 * trying to set only those filter bits needed to match
3866 * the range. (At this time, the only use of address
3867 * ranges is for IP multicast routing, for which the
3868 * range is big enough to require all bits set.)
3869 */
3870 goto allmulti;
3871 }
3872
3873 hash = wm_mchash(sc, enm->enm_addrlo);
3874
3875 reg = (hash >> 5);
3876 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3877 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3878 || (sc->sc_type == WM_T_PCH2)
3879 || (sc->sc_type == WM_T_PCH_LPT)
3880 || (sc->sc_type == WM_T_PCH_SPT)
3881 || (sc->sc_type == WM_T_PCH_CNP))
3882 reg &= 0x1f;
3883 else
3884 reg &= 0x7f;
3885 bit = hash & 0x1f;
3886
3887 hash = CSR_READ(sc, mta_reg + (reg << 2));
3888 hash |= 1U << bit;
3889
3890 if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
3891 /*
3892 * 82544 Errata 9: Certain register cannot be written
3893 * with particular alignments in PCI-X bus operation
3894 * (FCAH, MTA and VFTA).
3895 */
3896 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3897 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3898 CSR_WRITE_FLUSH(sc);
3899 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3900 CSR_WRITE_FLUSH(sc);
3901 } else {
3902 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3903 CSR_WRITE_FLUSH(sc);
3904 }
3905
3906 ETHER_NEXT_MULTI(step, enm);
3907 }
3908 ec->ec_flags &= ~ETHER_F_ALLMULTI;
3909 ETHER_UNLOCK(ec);
3910
3911 goto setit;
3912
3913 allmulti:
3914 sc->sc_rctl |= RCTL_MPE;
3915
3916 setit:
3917 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3918 }
3919
3920 /* Reset and init related */
3921
3922 static void
3923 wm_set_vlan(struct wm_softc *sc)
3924 {
3925
3926 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3927 device_xname(sc->sc_dev), __func__));
3928
3929 /* Deal with VLAN enables. */
3930 if (VLAN_ATTACHED(&sc->sc_ethercom))
3931 sc->sc_ctrl |= CTRL_VME;
3932 else
3933 sc->sc_ctrl &= ~CTRL_VME;
3934
3935 /* Write the control registers. */
3936 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3937 }
3938
3939 static void
3940 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3941 {
3942 uint32_t gcr;
3943 pcireg_t ctrl2;
3944
3945 gcr = CSR_READ(sc, WMREG_GCR);
3946
3947 /* Only take action if timeout value is defaulted to 0 */
3948 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3949 goto out;
3950
3951 if ((gcr & GCR_CAP_VER2) == 0) {
3952 gcr |= GCR_CMPL_TMOUT_10MS;
3953 goto out;
3954 }
3955
3956 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3957 sc->sc_pcixe_capoff + PCIE_DCSR2);
3958 ctrl2 |= WM_PCIE_DCSR2_16MS;
3959 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3960 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3961
3962 out:
3963 /* Disable completion timeout resend */
3964 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3965
3966 CSR_WRITE(sc, WMREG_GCR, gcr);
3967 }
3968
3969 void
3970 wm_get_auto_rd_done(struct wm_softc *sc)
3971 {
3972 int i;
3973
3974 /* wait for eeprom to reload */
3975 switch (sc->sc_type) {
3976 case WM_T_82571:
3977 case WM_T_82572:
3978 case WM_T_82573:
3979 case WM_T_82574:
3980 case WM_T_82583:
3981 case WM_T_82575:
3982 case WM_T_82576:
3983 case WM_T_82580:
3984 case WM_T_I350:
3985 case WM_T_I354:
3986 case WM_T_I210:
3987 case WM_T_I211:
3988 case WM_T_80003:
3989 case WM_T_ICH8:
3990 case WM_T_ICH9:
3991 for (i = 0; i < 10; i++) {
3992 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3993 break;
3994 delay(1000);
3995 }
3996 if (i == 10) {
3997 log(LOG_ERR, "%s: auto read from eeprom failed to "
3998 "complete\n", device_xname(sc->sc_dev));
3999 }
4000 break;
4001 default:
4002 break;
4003 }
4004 }
4005
4006 void
4007 wm_lan_init_done(struct wm_softc *sc)
4008 {
4009 uint32_t reg = 0;
4010 int i;
4011
4012 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4013 device_xname(sc->sc_dev), __func__));
4014
4015 /* Wait for eeprom to reload */
4016 switch (sc->sc_type) {
4017 case WM_T_ICH10:
4018 case WM_T_PCH:
4019 case WM_T_PCH2:
4020 case WM_T_PCH_LPT:
4021 case WM_T_PCH_SPT:
4022 case WM_T_PCH_CNP:
4023 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4024 reg = CSR_READ(sc, WMREG_STATUS);
4025 if ((reg & STATUS_LAN_INIT_DONE) != 0)
4026 break;
4027 delay(100);
4028 }
4029 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4030 log(LOG_ERR, "%s: %s: lan_init_done failed to "
4031 "complete\n", device_xname(sc->sc_dev), __func__);
4032 }
4033 break;
4034 default:
4035 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4036 __func__);
4037 break;
4038 }
4039
4040 reg &= ~STATUS_LAN_INIT_DONE;
4041 CSR_WRITE(sc, WMREG_STATUS, reg);
4042 }
4043
4044 void
4045 wm_get_cfg_done(struct wm_softc *sc)
4046 {
4047 int mask;
4048 uint32_t reg;
4049 int i;
4050
4051 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4052 device_xname(sc->sc_dev), __func__));
4053
4054 /* Wait for eeprom to reload */
4055 switch (sc->sc_type) {
4056 case WM_T_82542_2_0:
4057 case WM_T_82542_2_1:
4058 /* null */
4059 break;
4060 case WM_T_82543:
4061 case WM_T_82544:
4062 case WM_T_82540:
4063 case WM_T_82545:
4064 case WM_T_82545_3:
4065 case WM_T_82546:
4066 case WM_T_82546_3:
4067 case WM_T_82541:
4068 case WM_T_82541_2:
4069 case WM_T_82547:
4070 case WM_T_82547_2:
4071 case WM_T_82573:
4072 case WM_T_82574:
4073 case WM_T_82583:
4074 /* generic */
4075 delay(10*1000);
4076 break;
4077 case WM_T_80003:
4078 case WM_T_82571:
4079 case WM_T_82572:
4080 case WM_T_82575:
4081 case WM_T_82576:
4082 case WM_T_82580:
4083 case WM_T_I350:
4084 case WM_T_I354:
4085 case WM_T_I210:
4086 case WM_T_I211:
4087 if (sc->sc_type == WM_T_82571) {
4088 /* Only 82571 shares port 0 */
4089 mask = EEMNGCTL_CFGDONE_0;
4090 } else
4091 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4092 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4093 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4094 break;
4095 delay(1000);
4096 }
4097 if (i >= WM_PHY_CFG_TIMEOUT)
4098 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
4099 device_xname(sc->sc_dev), __func__));
4100 break;
4101 case WM_T_ICH8:
4102 case WM_T_ICH9:
4103 case WM_T_ICH10:
4104 case WM_T_PCH:
4105 case WM_T_PCH2:
4106 case WM_T_PCH_LPT:
4107 case WM_T_PCH_SPT:
4108 case WM_T_PCH_CNP:
4109 delay(10*1000);
4110 if (sc->sc_type >= WM_T_ICH10)
4111 wm_lan_init_done(sc);
4112 else
4113 wm_get_auto_rd_done(sc);
4114
4115 /* Clear PHY Reset Asserted bit */
4116 reg = CSR_READ(sc, WMREG_STATUS);
4117 if ((reg & STATUS_PHYRA) != 0)
4118 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
4119 break;
4120 default:
4121 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4122 __func__);
4123 break;
4124 }
4125 }
4126
4127 int
4128 wm_phy_post_reset(struct wm_softc *sc)
4129 {
4130 device_t dev = sc->sc_dev;
4131 uint16_t reg;
4132 int rv = 0;
4133
4134 /* This function is only for ICH8 and newer. */
4135 if (sc->sc_type < WM_T_ICH8)
4136 return 0;
4137
4138 if (wm_phy_resetisblocked(sc)) {
4139 /* XXX */
4140 device_printf(dev, "PHY is blocked\n");
4141 return -1;
4142 }
4143
4144 /* Allow time for h/w to get to quiescent state after reset */
4145 delay(10*1000);
4146
4147 /* Perform any necessary post-reset workarounds */
4148 if (sc->sc_type == WM_T_PCH)
4149 rv = wm_hv_phy_workarounds_ich8lan(sc);
4150 else if (sc->sc_type == WM_T_PCH2)
4151 rv = wm_lv_phy_workarounds_ich8lan(sc);
4152 if (rv != 0)
4153 return rv;
4154
4155 /* Clear the host wakeup bit after lcd reset */
4156 if (sc->sc_type >= WM_T_PCH) {
4157 wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, ®);
4158 reg &= ~BM_WUC_HOST_WU_BIT;
4159 wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
4160 }
4161
4162 /* Configure the LCD with the extended configuration region in NVM */
4163 if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
4164 return rv;
4165
4166 /* Configure the LCD with the OEM bits in NVM */
4167 rv = wm_oem_bits_config_ich8lan(sc, true);
4168
4169 if (sc->sc_type == WM_T_PCH2) {
4170 /* Ungate automatic PHY configuration on non-managed 82579 */
4171 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
4172 delay(10 * 1000);
4173 wm_gate_hw_phy_config_ich8lan(sc, false);
4174 }
4175 /* Set EEE LPI Update Timer to 200usec */
4176 rv = sc->phy.acquire(sc);
4177 if (rv)
4178 return rv;
4179 rv = wm_write_emi_reg_locked(dev,
4180 I82579_LPI_UPDATE_TIMER, 0x1387);
4181 sc->phy.release(sc);
4182 }
4183
4184 return rv;
4185 }
4186
4187 /* Only for PCH and newer */
4188 static int
4189 wm_write_smbus_addr(struct wm_softc *sc)
4190 {
4191 uint32_t strap, freq;
4192 uint16_t phy_data;
4193 int rv;
4194
4195 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4196 device_xname(sc->sc_dev), __func__));
4197 KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
4198
4199 strap = CSR_READ(sc, WMREG_STRAP);
4200 freq = __SHIFTOUT(strap, STRAP_FREQ);
4201
4202 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
4203 if (rv != 0)
4204 return -1;
4205
4206 phy_data &= ~HV_SMB_ADDR_ADDR;
4207 phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
4208 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
4209
4210 if (sc->sc_phytype == WMPHY_I217) {
4211 /* Restore SMBus frequency */
4212 if (freq --) {
4213 phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
4214 | HV_SMB_ADDR_FREQ_HIGH);
4215 phy_data |= __SHIFTIN((freq & 0x01) != 0,
4216 HV_SMB_ADDR_FREQ_LOW);
4217 phy_data |= __SHIFTIN((freq & 0x02) != 0,
4218 HV_SMB_ADDR_FREQ_HIGH);
4219 } else
4220 DPRINTF(WM_DEBUG_INIT,
4221 ("%s: %s Unsupported SMB frequency in PHY\n",
4222 device_xname(sc->sc_dev), __func__));
4223 }
4224
4225 return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
4226 phy_data);
4227 }
4228
4229 static int
4230 wm_init_lcd_from_nvm(struct wm_softc *sc)
4231 {
4232 uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
4233 uint16_t phy_page = 0;
4234 int rv = 0;
4235
4236 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4237 device_xname(sc->sc_dev), __func__));
4238
4239 switch (sc->sc_type) {
4240 case WM_T_ICH8:
4241 if ((sc->sc_phytype == WMPHY_UNKNOWN)
4242 || (sc->sc_phytype != WMPHY_IGP_3))
4243 return 0;
4244
4245 if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
4246 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
4247 sw_cfg_mask = FEXTNVM_SW_CONFIG;
4248 break;
4249 }
4250 /* FALLTHROUGH */
4251 case WM_T_PCH:
4252 case WM_T_PCH2:
4253 case WM_T_PCH_LPT:
4254 case WM_T_PCH_SPT:
4255 case WM_T_PCH_CNP:
4256 sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
4257 break;
4258 default:
4259 return 0;
4260 }
4261
4262 if ((rv = sc->phy.acquire(sc)) != 0)
4263 return rv;
4264
4265 reg = CSR_READ(sc, WMREG_FEXTNVM);
4266 if ((reg & sw_cfg_mask) == 0)
4267 goto release;
4268
4269 /*
4270 * Make sure HW does not configure LCD from PHY extended configuration
4271 * before SW configuration
4272 */
4273 extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
4274 if ((sc->sc_type < WM_T_PCH2)
4275 && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
4276 goto release;
4277
4278 DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
4279 device_xname(sc->sc_dev), __func__));
4280 /* word_addr is in DWORD */
4281 word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
4282
4283 reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
4284 cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
4285 if (cnf_size == 0)
4286 goto release;
4287
4288 if (((sc->sc_type == WM_T_PCH)
4289 && ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
4290 || (sc->sc_type > WM_T_PCH)) {
4291 /*
4292 * HW configures the SMBus address and LEDs when the OEM and
4293 * LCD Write Enable bits are set in the NVM. When both NVM bits
4294 * are cleared, SW will configure them instead.
4295 */
4296 DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
4297 device_xname(sc->sc_dev), __func__));
4298 if ((rv = wm_write_smbus_addr(sc)) != 0)
4299 goto release;
4300
4301 reg = CSR_READ(sc, WMREG_LEDCTL);
4302 rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
4303 (uint16_t)reg);
4304 if (rv != 0)
4305 goto release;
4306 }
4307
4308 /* Configure LCD from extended configuration region. */
4309 for (i = 0; i < cnf_size; i++) {
4310 uint16_t reg_data, reg_addr;
4311
4312 if (wm_nvm_read(sc, (word_addr + i * 2), 1, ®_data) != 0)
4313 goto release;
4314
4315 if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, ®_addr) !=0)
4316 goto release;
4317
4318 if (reg_addr == MII_IGPHY_PAGE_SELECT)
4319 phy_page = reg_data;
4320
4321 reg_addr &= IGPHY_MAXREGADDR;
4322 reg_addr |= phy_page;
4323
4324 KASSERT(sc->phy.writereg_locked != NULL);
4325 rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
4326 reg_data);
4327 }
4328
4329 release:
4330 sc->phy.release(sc);
4331 return rv;
4332 }
4333
4334 /*
4335 * wm_oem_bits_config_ich8lan - SW-based LCD Configuration
4336 * @sc: pointer to the HW structure
4337 * @d0_state: boolean if entering d0 or d3 device state
4338 *
4339 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
4340 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
4341 * in NVM determines whether HW should configure LPLU and Gbe Disable.
4342 */
4343 int
4344 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
4345 {
4346 uint32_t mac_reg;
4347 uint16_t oem_reg;
4348 int rv;
4349
4350 if (sc->sc_type < WM_T_PCH)
4351 return 0;
4352
4353 rv = sc->phy.acquire(sc);
4354 if (rv != 0)
4355 return rv;
4356
4357 if (sc->sc_type == WM_T_PCH) {
4358 mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
4359 if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
4360 goto release;
4361 }
4362
4363 mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
4364 if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
4365 goto release;
4366
4367 mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
4368
4369 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
4370 if (rv != 0)
4371 goto release;
4372 oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
4373
4374 if (d0_state) {
4375 if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
4376 oem_reg |= HV_OEM_BITS_A1KDIS;
4377 if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
4378 oem_reg |= HV_OEM_BITS_LPLU;
4379 } else {
4380 if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
4381 != 0)
4382 oem_reg |= HV_OEM_BITS_A1KDIS;
4383 if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
4384 != 0)
4385 oem_reg |= HV_OEM_BITS_LPLU;
4386 }
4387
4388 /* Set Restart auto-neg to activate the bits */
4389 if ((d0_state || (sc->sc_type != WM_T_PCH))
4390 && (wm_phy_resetisblocked(sc) == false))
4391 oem_reg |= HV_OEM_BITS_ANEGNOW;
4392
4393 rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
4394
4395 release:
4396 sc->phy.release(sc);
4397
4398 return rv;
4399 }
4400
4401 /* Init hardware bits */
4402 void
4403 wm_initialize_hardware_bits(struct wm_softc *sc)
4404 {
4405 uint32_t tarc0, tarc1, reg;
4406
4407 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4408 device_xname(sc->sc_dev), __func__));
4409
4410 /* For 82571 variant, 80003 and ICHs */
4411 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
4412 || (sc->sc_type >= WM_T_80003)) {
4413
4414 /* Transmit Descriptor Control 0 */
4415 reg = CSR_READ(sc, WMREG_TXDCTL(0));
4416 reg |= TXDCTL_COUNT_DESC;
4417 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
4418
4419 /* Transmit Descriptor Control 1 */
4420 reg = CSR_READ(sc, WMREG_TXDCTL(1));
4421 reg |= TXDCTL_COUNT_DESC;
4422 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
4423
4424 /* TARC0 */
4425 tarc0 = CSR_READ(sc, WMREG_TARC0);
4426 switch (sc->sc_type) {
4427 case WM_T_82571:
4428 case WM_T_82572:
4429 case WM_T_82573:
4430 case WM_T_82574:
4431 case WM_T_82583:
4432 case WM_T_80003:
4433 /* Clear bits 30..27 */
4434 tarc0 &= ~__BITS(30, 27);
4435 break;
4436 default:
4437 break;
4438 }
4439
4440 switch (sc->sc_type) {
4441 case WM_T_82571:
4442 case WM_T_82572:
4443 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
4444
4445 tarc1 = CSR_READ(sc, WMREG_TARC1);
4446 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
4447 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
4448 /* 8257[12] Errata No.7 */
4449 tarc1 |= __BIT(22); /* TARC1 bits 22 */
4450
4451 /* TARC1 bit 28 */
4452 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4453 tarc1 &= ~__BIT(28);
4454 else
4455 tarc1 |= __BIT(28);
4456 CSR_WRITE(sc, WMREG_TARC1, tarc1);
4457
4458 /*
4459 * 8257[12] Errata No.13
4460 * Disable Dyamic Clock Gating.
4461 */
4462 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4463 reg &= ~CTRL_EXT_DMA_DYN_CLK;
4464 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4465 break;
4466 case WM_T_82573:
4467 case WM_T_82574:
4468 case WM_T_82583:
4469 if ((sc->sc_type == WM_T_82574)
4470 || (sc->sc_type == WM_T_82583))
4471 tarc0 |= __BIT(26); /* TARC0 bit 26 */
4472
4473 /* Extended Device Control */
4474 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4475 reg &= ~__BIT(23); /* Clear bit 23 */
4476 reg |= __BIT(22); /* Set bit 22 */
4477 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4478
4479 /* Device Control */
4480 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
4481 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4482
4483 /* PCIe Control Register */
4484 /*
4485 * 82573 Errata (unknown).
4486 *
4487 * 82574 Errata 25 and 82583 Errata 12
4488 * "Dropped Rx Packets":
4489 * NVM Image Version 2.1.4 and newer has no this bug.
4490 */
4491 reg = CSR_READ(sc, WMREG_GCR);
4492 reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
4493 CSR_WRITE(sc, WMREG_GCR, reg);
4494
4495 if ((sc->sc_type == WM_T_82574)
4496 || (sc->sc_type == WM_T_82583)) {
4497 /*
4498 * Document says this bit must be set for
4499 * proper operation.
4500 */
4501 reg = CSR_READ(sc, WMREG_GCR);
4502 reg |= __BIT(22);
4503 CSR_WRITE(sc, WMREG_GCR, reg);
4504
4505 /*
4506 * Apply workaround for hardware errata
4507 * documented in errata docs Fixes issue where
4508 * some error prone or unreliable PCIe
4509 * completions are occurring, particularly
4510 * with ASPM enabled. Without fix, issue can
4511 * cause Tx timeouts.
4512 */
4513 reg = CSR_READ(sc, WMREG_GCR2);
4514 reg |= __BIT(0);
4515 CSR_WRITE(sc, WMREG_GCR2, reg);
4516 }
4517 break;
4518 case WM_T_80003:
4519 /* TARC0 */
4520 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
4521 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
4522 tarc0 &= ~__BIT(20); /* Clear bits 20 */
4523
4524 /* TARC1 bit 28 */
4525 tarc1 = CSR_READ(sc, WMREG_TARC1);
4526 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4527 tarc1 &= ~__BIT(28);
4528 else
4529 tarc1 |= __BIT(28);
4530 CSR_WRITE(sc, WMREG_TARC1, tarc1);
4531 break;
4532 case WM_T_ICH8:
4533 case WM_T_ICH9:
4534 case WM_T_ICH10:
4535 case WM_T_PCH:
4536 case WM_T_PCH2:
4537 case WM_T_PCH_LPT:
4538 case WM_T_PCH_SPT:
4539 case WM_T_PCH_CNP:
4540 /* TARC0 */
4541 if (sc->sc_type == WM_T_ICH8) {
4542 /* Set TARC0 bits 29 and 28 */
4543 tarc0 |= __BITS(29, 28);
4544 } else if (sc->sc_type == WM_T_PCH_SPT) {
4545 tarc0 |= __BIT(29);
4546 /*
4547 * Drop bit 28. From Linux.
4548 * See I218/I219 spec update
4549 * "5. Buffer Overrun While the I219 is
4550 * Processing DMA Transactions"
4551 */
4552 tarc0 &= ~__BIT(28);
4553 }
4554 /* Set TARC0 bits 23,24,26,27 */
4555 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
4556
4557 /* CTRL_EXT */
4558 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4559 reg |= __BIT(22); /* Set bit 22 */
4560 /*
4561 * Enable PHY low-power state when MAC is at D3
4562 * w/o WoL
4563 */
4564 if (sc->sc_type >= WM_T_PCH)
4565 reg |= CTRL_EXT_PHYPDEN;
4566 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4567
4568 /* TARC1 */
4569 tarc1 = CSR_READ(sc, WMREG_TARC1);
4570 /* bit 28 */
4571 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4572 tarc1 &= ~__BIT(28);
4573 else
4574 tarc1 |= __BIT(28);
4575 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
4576 CSR_WRITE(sc, WMREG_TARC1, tarc1);
4577
4578 /* Device Status */
4579 if (sc->sc_type == WM_T_ICH8) {
4580 reg = CSR_READ(sc, WMREG_STATUS);
4581 reg &= ~__BIT(31);
4582 CSR_WRITE(sc, WMREG_STATUS, reg);
4583
4584 }
4585
4586 /* IOSFPC */
4587 if (sc->sc_type == WM_T_PCH_SPT) {
4588 reg = CSR_READ(sc, WMREG_IOSFPC);
4589 reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
4590 CSR_WRITE(sc, WMREG_IOSFPC, reg);
4591 }
4592 /*
4593 * Work-around descriptor data corruption issue during
4594 * NFS v2 UDP traffic, just disable the NFS filtering
4595 * capability.
4596 */
4597 reg = CSR_READ(sc, WMREG_RFCTL);
4598 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
4599 CSR_WRITE(sc, WMREG_RFCTL, reg);
4600 break;
4601 default:
4602 break;
4603 }
4604 CSR_WRITE(sc, WMREG_TARC0, tarc0);
4605
4606 switch (sc->sc_type) {
4607 /*
4608 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
4609 * Avoid RSS Hash Value bug.
4610 */
4611 case WM_T_82571:
4612 case WM_T_82572:
4613 case WM_T_82573:
4614 case WM_T_80003:
4615 case WM_T_ICH8:
4616 reg = CSR_READ(sc, WMREG_RFCTL);
4617 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
4618 CSR_WRITE(sc, WMREG_RFCTL, reg);
4619 break;
4620 case WM_T_82574:
4621 /* Use extened Rx descriptor. */
4622 reg = CSR_READ(sc, WMREG_RFCTL);
4623 reg |= WMREG_RFCTL_EXSTEN;
4624 CSR_WRITE(sc, WMREG_RFCTL, reg);
4625 break;
4626 default:
4627 break;
4628 }
4629 } else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
4630 /*
4631 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
4632 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
4633 * "Certain Malformed IPv6 Extension Headers are Not Processed
4634 * Correctly by the Device"
4635 *
4636 * I354(C2000) Errata AVR53:
4637 * "Malformed IPv6 Extension Headers May Result in LAN Device
4638 * Hang"
4639 */
4640 reg = CSR_READ(sc, WMREG_RFCTL);
4641 reg |= WMREG_RFCTL_IPV6EXDIS;
4642 CSR_WRITE(sc, WMREG_RFCTL, reg);
4643 }
4644 }
4645
4646 static uint32_t
4647 wm_rxpbs_adjust_82580(uint32_t val)
4648 {
4649 uint32_t rv = 0;
4650
4651 if (val < __arraycount(wm_82580_rxpbs_table))
4652 rv = wm_82580_rxpbs_table[val];
4653
4654 return rv;
4655 }
4656
4657 /*
4658 * wm_reset_phy:
4659 *
4660 * generic PHY reset function.
4661 * Same as e1000_phy_hw_reset_generic()
4662 */
4663 static int
4664 wm_reset_phy(struct wm_softc *sc)
4665 {
4666 uint32_t reg;
4667
4668 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4669 device_xname(sc->sc_dev), __func__));
4670 if (wm_phy_resetisblocked(sc))
4671 return -1;
4672
4673 sc->phy.acquire(sc);
4674
4675 reg = CSR_READ(sc, WMREG_CTRL);
4676 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
4677 CSR_WRITE_FLUSH(sc);
4678
4679 delay(sc->phy.reset_delay_us);
4680
4681 CSR_WRITE(sc, WMREG_CTRL, reg);
4682 CSR_WRITE_FLUSH(sc);
4683
4684 delay(150);
4685
4686 sc->phy.release(sc);
4687
4688 wm_get_cfg_done(sc);
4689 wm_phy_post_reset(sc);
4690
4691 return 0;
4692 }
4693
4694 /*
4695 * Only used by WM_T_PCH_SPT which does not use multiqueue,
4696 * so it is enough to check sc->sc_queue[0] only.
4697 */
4698 static void
4699 wm_flush_desc_rings(struct wm_softc *sc)
4700 {
4701 pcireg_t preg;
4702 uint32_t reg;
4703 struct wm_txqueue *txq;
4704 wiseman_txdesc_t *txd;
4705 int nexttx;
4706 uint32_t rctl;
4707
4708 /* First, disable MULR fix in FEXTNVM11 */
4709 reg = CSR_READ(sc, WMREG_FEXTNVM11);
4710 reg |= FEXTNVM11_DIS_MULRFIX;
4711 CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
4712
4713 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
4714 reg = CSR_READ(sc, WMREG_TDLEN(0));
4715 if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
4716 return;
4717
4718 /* TX */
4719 device_printf(sc->sc_dev, "Need TX flush (reg = %08x, len = %u)\n",
4720 preg, reg);
4721 reg = CSR_READ(sc, WMREG_TCTL);
4722 CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
4723
4724 txq = &sc->sc_queue[0].wmq_txq;
4725 nexttx = txq->txq_next;
4726 txd = &txq->txq_descs[nexttx];
4727 wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
4728 txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
4729 txd->wtx_fields.wtxu_status = 0;
4730 txd->wtx_fields.wtxu_options = 0;
4731 txd->wtx_fields.wtxu_vlan = 0;
4732
4733 bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
4734 BUS_SPACE_BARRIER_WRITE);
4735
4736 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
4737 CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
4738 bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
4739 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
4740 delay(250);
4741
4742 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
4743 if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
4744 return;
4745
4746 /* RX */
4747 device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
4748 rctl = CSR_READ(sc, WMREG_RCTL);
4749 CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
4750 CSR_WRITE_FLUSH(sc);
4751 delay(150);
4752
4753 reg = CSR_READ(sc, WMREG_RXDCTL(0));
4754 /* Zero the lower 14 bits (prefetch and host thresholds) */
4755 reg &= 0xffffc000;
4756 /*
4757 * Update thresholds: prefetch threshold to 31, host threshold
4758 * to 1 and make sure the granularity is "descriptors" and not
4759 * "cache lines"
4760 */
4761 reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
4762 CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
4763
4764 /* Momentarily enable the RX ring for the changes to take effect */
4765 CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
4766 CSR_WRITE_FLUSH(sc);
4767 delay(150);
4768 CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
4769 }
4770
4771 /*
4772 * wm_reset:
4773 *
4774 * Reset the i82542 chip.
4775 */
4776 static void
4777 wm_reset(struct wm_softc *sc)
4778 {
4779 int phy_reset = 0;
4780 int i, error = 0;
4781 uint32_t reg;
4782 uint16_t kmreg;
4783 int rv;
4784
4785 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4786 device_xname(sc->sc_dev), __func__));
4787 KASSERT(sc->sc_type != 0);
4788
4789 /*
4790 * Allocate on-chip memory according to the MTU size.
4791 * The Packet Buffer Allocation register must be written
4792 * before the chip is reset.
4793 */
4794 switch (sc->sc_type) {
4795 case WM_T_82547:
4796 case WM_T_82547_2:
4797 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4798 PBA_22K : PBA_30K;
4799 for (i = 0; i < sc->sc_nqueues; i++) {
4800 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4801 txq->txq_fifo_head = 0;
4802 txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
4803 txq->txq_fifo_size =
4804 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
4805 txq->txq_fifo_stall = 0;
4806 }
4807 break;
4808 case WM_T_82571:
4809 case WM_T_82572:
4810 case WM_T_82575: /* XXX need special handing for jumbo frames */
4811 case WM_T_80003:
4812 sc->sc_pba = PBA_32K;
4813 break;
4814 case WM_T_82573:
4815 sc->sc_pba = PBA_12K;
4816 break;
4817 case WM_T_82574:
4818 case WM_T_82583:
4819 sc->sc_pba = PBA_20K;
4820 break;
4821 case WM_T_82576:
4822 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
4823 sc->sc_pba &= RXPBS_SIZE_MASK_82576;
4824 break;
4825 case WM_T_82580:
4826 case WM_T_I350:
4827 case WM_T_I354:
4828 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
4829 break;
4830 case WM_T_I210:
4831 case WM_T_I211:
4832 sc->sc_pba = PBA_34K;
4833 break;
4834 case WM_T_ICH8:
4835 /* Workaround for a bit corruption issue in FIFO memory */
4836 sc->sc_pba = PBA_8K;
4837 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
4838 break;
4839 case WM_T_ICH9:
4840 case WM_T_ICH10:
4841 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
4842 PBA_14K : PBA_10K;
4843 break;
4844 case WM_T_PCH:
4845 case WM_T_PCH2: /* XXX 14K? */
4846 case WM_T_PCH_LPT:
4847 case WM_T_PCH_SPT:
4848 case WM_T_PCH_CNP:
4849 sc->sc_pba = PBA_26K;
4850 break;
4851 default:
4852 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4853 PBA_40K : PBA_48K;
4854 break;
4855 }
4856 /*
4857 * Only old or non-multiqueue devices have the PBA register
4858 * XXX Need special handling for 82575.
4859 */
4860 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4861 || (sc->sc_type == WM_T_82575))
4862 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
4863
4864 /* Prevent the PCI-E bus from sticking */
4865 if (sc->sc_flags & WM_F_PCIE) {
4866 int timeout = 800;
4867
4868 sc->sc_ctrl |= CTRL_GIO_M_DIS;
4869 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4870
4871 while (timeout--) {
4872 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
4873 == 0)
4874 break;
4875 delay(100);
4876 }
4877 if (timeout == 0)
4878 device_printf(sc->sc_dev,
4879 "failed to disable busmastering\n");
4880 }
4881
4882 /* Set the completion timeout for interface */
4883 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
4884 || (sc->sc_type == WM_T_82580)
4885 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4886 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
4887 wm_set_pcie_completion_timeout(sc);
4888
4889 /* Clear interrupt */
4890 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4891 if (wm_is_using_msix(sc)) {
4892 if (sc->sc_type != WM_T_82574) {
4893 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4894 CSR_WRITE(sc, WMREG_EIAC, 0);
4895 } else
4896 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4897 }
4898
4899 /* Stop the transmit and receive processes. */
4900 CSR_WRITE(sc, WMREG_RCTL, 0);
4901 sc->sc_rctl &= ~RCTL_EN;
4902 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
4903 CSR_WRITE_FLUSH(sc);
4904
4905 /* XXX set_tbi_sbp_82543() */
4906
4907 delay(10*1000);
4908
4909 /* Must acquire the MDIO ownership before MAC reset */
4910 switch (sc->sc_type) {
4911 case WM_T_82573:
4912 case WM_T_82574:
4913 case WM_T_82583:
4914 error = wm_get_hw_semaphore_82573(sc);
4915 break;
4916 default:
4917 break;
4918 }
4919
4920 /*
4921 * 82541 Errata 29? & 82547 Errata 28?
4922 * See also the description about PHY_RST bit in CTRL register
4923 * in 8254x_GBe_SDM.pdf.
4924 */
4925 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
4926 CSR_WRITE(sc, WMREG_CTRL,
4927 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
4928 CSR_WRITE_FLUSH(sc);
4929 delay(5000);
4930 }
4931
4932 switch (sc->sc_type) {
4933 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
4934 case WM_T_82541:
4935 case WM_T_82541_2:
4936 case WM_T_82547:
4937 case WM_T_82547_2:
4938 /*
4939 * On some chipsets, a reset through a memory-mapped write
4940 * cycle can cause the chip to reset before completing the
4941 * write cycle. This causes major headache that can be avoided
4942 * by issuing the reset via indirect register writes through
4943 * I/O space.
4944 *
4945 * So, if we successfully mapped the I/O BAR at attach time,
4946 * use that. Otherwise, try our luck with a memory-mapped
4947 * reset.
4948 */
4949 if (sc->sc_flags & WM_F_IOH_VALID)
4950 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
4951 else
4952 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
4953 break;
4954 case WM_T_82545_3:
4955 case WM_T_82546_3:
4956 /* Use the shadow control register on these chips. */
4957 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
4958 break;
4959 case WM_T_80003:
4960 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4961 sc->phy.acquire(sc);
4962 CSR_WRITE(sc, WMREG_CTRL, reg);
4963 sc->phy.release(sc);
4964 break;
4965 case WM_T_ICH8:
4966 case WM_T_ICH9:
4967 case WM_T_ICH10:
4968 case WM_T_PCH:
4969 case WM_T_PCH2:
4970 case WM_T_PCH_LPT:
4971 case WM_T_PCH_SPT:
4972 case WM_T_PCH_CNP:
4973 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4974 if (wm_phy_resetisblocked(sc) == false) {
4975 /*
4976 * Gate automatic PHY configuration by hardware on
4977 * non-managed 82579
4978 */
4979 if ((sc->sc_type == WM_T_PCH2)
4980 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
4981 == 0))
4982 wm_gate_hw_phy_config_ich8lan(sc, true);
4983
4984 reg |= CTRL_PHY_RESET;
4985 phy_reset = 1;
4986 } else
4987 device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
4988 sc->phy.acquire(sc);
4989 CSR_WRITE(sc, WMREG_CTRL, reg);
4990 /* Don't insert a completion barrier when reset */
4991 delay(20*1000);
4992 mutex_exit(sc->sc_ich_phymtx);
4993 break;
4994 case WM_T_82580:
4995 case WM_T_I350:
4996 case WM_T_I354:
4997 case WM_T_I210:
4998 case WM_T_I211:
4999 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5000 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
5001 CSR_WRITE_FLUSH(sc);
5002 delay(5000);
5003 break;
5004 case WM_T_82542_2_0:
5005 case WM_T_82542_2_1:
5006 case WM_T_82543:
5007 case WM_T_82540:
5008 case WM_T_82545:
5009 case WM_T_82546:
5010 case WM_T_82571:
5011 case WM_T_82572:
5012 case WM_T_82573:
5013 case WM_T_82574:
5014 case WM_T_82575:
5015 case WM_T_82576:
5016 case WM_T_82583:
5017 default:
5018 /* Everything else can safely use the documented method. */
5019 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5020 break;
5021 }
5022
5023 /* Must release the MDIO ownership after MAC reset */
5024 switch (sc->sc_type) {
5025 case WM_T_82573:
5026 case WM_T_82574:
5027 case WM_T_82583:
5028 if (error == 0)
5029 wm_put_hw_semaphore_82573(sc);
5030 break;
5031 default:
5032 break;
5033 }
5034
5035 /* Set Phy Config Counter to 50msec */
5036 if (sc->sc_type == WM_T_PCH2) {
5037 reg = CSR_READ(sc, WMREG_FEXTNVM3);
5038 reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
5039 reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
5040 CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
5041 }
5042
5043 if (phy_reset != 0)
5044 wm_get_cfg_done(sc);
5045
5046 /* Reload EEPROM */
5047 switch (sc->sc_type) {
5048 case WM_T_82542_2_0:
5049 case WM_T_82542_2_1:
5050 case WM_T_82543:
5051 case WM_T_82544:
5052 delay(10);
5053 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5054 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5055 CSR_WRITE_FLUSH(sc);
5056 delay(2000);
5057 break;
5058 case WM_T_82540:
5059 case WM_T_82545:
5060 case WM_T_82545_3:
5061 case WM_T_82546:
5062 case WM_T_82546_3:
5063 delay(5*1000);
5064 /* XXX Disable HW ARPs on ASF enabled adapters */
5065 break;
5066 case WM_T_82541:
5067 case WM_T_82541_2:
5068 case WM_T_82547:
5069 case WM_T_82547_2:
5070 delay(20000);
5071 /* XXX Disable HW ARPs on ASF enabled adapters */
5072 break;
5073 case WM_T_82571:
5074 case WM_T_82572:
5075 case WM_T_82573:
5076 case WM_T_82574:
5077 case WM_T_82583:
5078 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
5079 delay(10);
5080 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5081 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5082 CSR_WRITE_FLUSH(sc);
5083 }
5084 /* check EECD_EE_AUTORD */
5085 wm_get_auto_rd_done(sc);
5086 /*
5087 * Phy configuration from NVM just starts after EECD_AUTO_RD
5088 * is set.
5089 */
5090 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
5091 || (sc->sc_type == WM_T_82583))
5092 delay(25*1000);
5093 break;
5094 case WM_T_82575:
5095 case WM_T_82576:
5096 case WM_T_82580:
5097 case WM_T_I350:
5098 case WM_T_I354:
5099 case WM_T_I210:
5100 case WM_T_I211:
5101 case WM_T_80003:
5102 /* check EECD_EE_AUTORD */
5103 wm_get_auto_rd_done(sc);
5104 break;
5105 case WM_T_ICH8:
5106 case WM_T_ICH9:
5107 case WM_T_ICH10:
5108 case WM_T_PCH:
5109 case WM_T_PCH2:
5110 case WM_T_PCH_LPT:
5111 case WM_T_PCH_SPT:
5112 case WM_T_PCH_CNP:
5113 break;
5114 default:
5115 panic("%s: unknown type\n", __func__);
5116 }
5117
5118 /* Check whether EEPROM is present or not */
5119 switch (sc->sc_type) {
5120 case WM_T_82575:
5121 case WM_T_82576:
5122 case WM_T_82580:
5123 case WM_T_I350:
5124 case WM_T_I354:
5125 case WM_T_ICH8:
5126 case WM_T_ICH9:
5127 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
5128 /* Not found */
5129 sc->sc_flags |= WM_F_EEPROM_INVALID;
5130 if (sc->sc_type == WM_T_82575)
5131 wm_reset_init_script_82575(sc);
5132 }
5133 break;
5134 default:
5135 break;
5136 }
5137
5138 if (phy_reset != 0)
5139 wm_phy_post_reset(sc);
5140
5141 if ((sc->sc_type == WM_T_82580)
5142 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
5143 /* Clear global device reset status bit */
5144 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
5145 }
5146
5147 /* Clear any pending interrupt events. */
5148 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5149 reg = CSR_READ(sc, WMREG_ICR);
5150 if (wm_is_using_msix(sc)) {
5151 if (sc->sc_type != WM_T_82574) {
5152 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5153 CSR_WRITE(sc, WMREG_EIAC, 0);
5154 } else
5155 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5156 }
5157
5158 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5159 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5160 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
5161 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
5162 reg = CSR_READ(sc, WMREG_KABGTXD);
5163 reg |= KABGTXD_BGSQLBIAS;
5164 CSR_WRITE(sc, WMREG_KABGTXD, reg);
5165 }
5166
5167 /* Reload sc_ctrl */
5168 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5169
5170 wm_set_eee(sc);
5171
5172 /*
5173 * For PCH, this write will make sure that any noise will be detected
5174 * as a CRC error and be dropped rather than show up as a bad packet
5175 * to the DMA engine
5176 */
5177 if (sc->sc_type == WM_T_PCH)
5178 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
5179
5180 if (sc->sc_type >= WM_T_82544)
5181 CSR_WRITE(sc, WMREG_WUC, 0);
5182
5183 if (sc->sc_type < WM_T_82575)
5184 wm_disable_aspm(sc); /* Workaround for some chips */
5185
5186 wm_reset_mdicnfg_82580(sc);
5187
5188 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
5189 wm_pll_workaround_i210(sc);
5190
5191 if (sc->sc_type == WM_T_80003) {
5192 /* Default to TRUE to enable the MDIC W/A */
5193 sc->sc_flags |= WM_F_80003_MDIC_WA;
5194
5195 rv = wm_kmrn_readreg(sc,
5196 KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
5197 if (rv == 0) {
5198 if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
5199 == KUMCTRLSTA_OPMODE_INBAND_MDIO)
5200 sc->sc_flags &= ~WM_F_80003_MDIC_WA;
5201 else
5202 sc->sc_flags |= WM_F_80003_MDIC_WA;
5203 }
5204 }
5205 }
5206
5207 /*
5208 * wm_add_rxbuf:
5209 *
5210 * Add a receive buffer to the indiciated descriptor.
5211 */
5212 static int
5213 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
5214 {
5215 struct wm_softc *sc = rxq->rxq_sc;
5216 struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
5217 struct mbuf *m;
5218 int error;
5219
5220 KASSERT(mutex_owned(rxq->rxq_lock));
5221
5222 MGETHDR(m, M_DONTWAIT, MT_DATA);
5223 if (m == NULL)
5224 return ENOBUFS;
5225
5226 MCLGET(m, M_DONTWAIT);
5227 if ((m->m_flags & M_EXT) == 0) {
5228 m_freem(m);
5229 return ENOBUFS;
5230 }
5231
5232 if (rxs->rxs_mbuf != NULL)
5233 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5234
5235 rxs->rxs_mbuf = m;
5236
5237 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5238 /*
5239 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
5240 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
5241 */
5242 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
5243 m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
5244 if (error) {
5245 /* XXX XXX XXX */
5246 aprint_error_dev(sc->sc_dev,
5247 "unable to load rx DMA map %d, error = %d\n", idx, error);
5248 panic("wm_add_rxbuf");
5249 }
5250
5251 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5252 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5253
5254 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5255 if ((sc->sc_rctl & RCTL_EN) != 0)
5256 wm_init_rxdesc(rxq, idx);
5257 } else
5258 wm_init_rxdesc(rxq, idx);
5259
5260 return 0;
5261 }
5262
5263 /*
5264 * wm_rxdrain:
5265 *
5266 * Drain the receive queue.
5267 */
5268 static void
5269 wm_rxdrain(struct wm_rxqueue *rxq)
5270 {
5271 struct wm_softc *sc = rxq->rxq_sc;
5272 struct wm_rxsoft *rxs;
5273 int i;
5274
5275 KASSERT(mutex_owned(rxq->rxq_lock));
5276
5277 for (i = 0; i < WM_NRXDESC; i++) {
5278 rxs = &rxq->rxq_soft[i];
5279 if (rxs->rxs_mbuf != NULL) {
5280 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5281 m_freem(rxs->rxs_mbuf);
5282 rxs->rxs_mbuf = NULL;
5283 }
5284 }
5285 }
5286
5287 /*
5288 * Setup registers for RSS.
5289 *
5290 * XXX not yet VMDq support
5291 */
5292 static void
5293 wm_init_rss(struct wm_softc *sc)
5294 {
5295 uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
5296 int i;
5297
5298 CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
5299
5300 for (i = 0; i < RETA_NUM_ENTRIES; i++) {
5301 unsigned int qid, reta_ent;
5302
5303 qid = i % sc->sc_nqueues;
5304 switch (sc->sc_type) {
5305 case WM_T_82574:
5306 reta_ent = __SHIFTIN(qid,
5307 RETA_ENT_QINDEX_MASK_82574);
5308 break;
5309 case WM_T_82575:
5310 reta_ent = __SHIFTIN(qid,
5311 RETA_ENT_QINDEX1_MASK_82575);
5312 break;
5313 default:
5314 reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
5315 break;
5316 }
5317
5318 reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
5319 reta_reg &= ~RETA_ENTRY_MASK_Q(i);
5320 reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
5321 CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
5322 }
5323
5324 rss_getkey((uint8_t *)rss_key);
5325 for (i = 0; i < RSSRK_NUM_REGS; i++)
5326 CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
5327
5328 if (sc->sc_type == WM_T_82574)
5329 mrqc = MRQC_ENABLE_RSS_MQ_82574;
5330 else
5331 mrqc = MRQC_ENABLE_RSS_MQ;
5332
5333 /*
5334 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
5335 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
5336 */
5337 mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
5338 mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
5339 #if 0
5340 mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
5341 mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
5342 #endif
5343 mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
5344
5345 CSR_WRITE(sc, WMREG_MRQC, mrqc);
5346 }
5347
5348 /*
5349 * Adjust TX and RX queue numbers which the system actulally uses.
5350 *
5351 * The numbers are affected by below parameters.
5352 * - The nubmer of hardware queues
5353 * - The number of MSI-X vectors (= "nvectors" argument)
5354 * - ncpu
5355 */
5356 static void
5357 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
5358 {
5359 int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
5360
5361 if (nvectors < 2) {
5362 sc->sc_nqueues = 1;
5363 return;
5364 }
5365
5366 switch (sc->sc_type) {
5367 case WM_T_82572:
5368 hw_ntxqueues = 2;
5369 hw_nrxqueues = 2;
5370 break;
5371 case WM_T_82574:
5372 hw_ntxqueues = 2;
5373 hw_nrxqueues = 2;
5374 break;
5375 case WM_T_82575:
5376 hw_ntxqueues = 4;
5377 hw_nrxqueues = 4;
5378 break;
5379 case WM_T_82576:
5380 hw_ntxqueues = 16;
5381 hw_nrxqueues = 16;
5382 break;
5383 case WM_T_82580:
5384 case WM_T_I350:
5385 case WM_T_I354:
5386 hw_ntxqueues = 8;
5387 hw_nrxqueues = 8;
5388 break;
5389 case WM_T_I210:
5390 hw_ntxqueues = 4;
5391 hw_nrxqueues = 4;
5392 break;
5393 case WM_T_I211:
5394 hw_ntxqueues = 2;
5395 hw_nrxqueues = 2;
5396 break;
5397 /*
5398 * As below ethernet controllers does not support MSI-X,
5399 * this driver let them not use multiqueue.
5400 * - WM_T_80003
5401 * - WM_T_ICH8
5402 * - WM_T_ICH9
5403 * - WM_T_ICH10
5404 * - WM_T_PCH
5405 * - WM_T_PCH2
5406 * - WM_T_PCH_LPT
5407 */
5408 default:
5409 hw_ntxqueues = 1;
5410 hw_nrxqueues = 1;
5411 break;
5412 }
5413
5414 hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
5415
5416 /*
5417 * As queues more than MSI-X vectors cannot improve scaling, we limit
5418 * the number of queues used actually.
5419 */
5420 if (nvectors < hw_nqueues + 1)
5421 sc->sc_nqueues = nvectors - 1;
5422 else
5423 sc->sc_nqueues = hw_nqueues;
5424
5425 /*
5426 * As queues more then cpus cannot improve scaling, we limit
5427 * the number of queues used actually.
5428 */
5429 if (ncpu < sc->sc_nqueues)
5430 sc->sc_nqueues = ncpu;
5431 }
5432
5433 static inline bool
5434 wm_is_using_msix(struct wm_softc *sc)
5435 {
5436
5437 return (sc->sc_nintrs > 1);
5438 }
5439
5440 static inline bool
5441 wm_is_using_multiqueue(struct wm_softc *sc)
5442 {
5443
5444 return (sc->sc_nqueues > 1);
5445 }
5446
5447 static int
5448 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
5449 {
5450 struct wm_queue *wmq = &sc->sc_queue[qidx];
5451
5452 wmq->wmq_id = qidx;
5453 wmq->wmq_intr_idx = intr_idx;
5454 wmq->wmq_si = softint_establish(SOFTINT_NET
5455 #ifdef WM_MPSAFE
5456 | SOFTINT_MPSAFE
5457 #endif
5458 , wm_handle_queue, wmq);
5459 if (wmq->wmq_si != NULL)
5460 return 0;
5461
5462 aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
5463 wmq->wmq_id);
5464 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
5465 sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
5466 return ENOMEM;
5467 }
5468
5469 /*
5470 * Both single interrupt MSI and INTx can use this function.
5471 */
5472 static int
5473 wm_setup_legacy(struct wm_softc *sc)
5474 {
5475 pci_chipset_tag_t pc = sc->sc_pc;
5476 const char *intrstr = NULL;
5477 char intrbuf[PCI_INTRSTR_LEN];
5478 int error;
5479
5480 error = wm_alloc_txrx_queues(sc);
5481 if (error) {
5482 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
5483 error);
5484 return ENOMEM;
5485 }
5486 intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
5487 sizeof(intrbuf));
5488 #ifdef WM_MPSAFE
5489 pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
5490 #endif
5491 sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
5492 IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
5493 if (sc->sc_ihs[0] == NULL) {
5494 aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
5495 (pci_intr_type(pc, sc->sc_intrs[0])
5496 == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
5497 return ENOMEM;
5498 }
5499
5500 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
5501 sc->sc_nintrs = 1;
5502
5503 return wm_softint_establish(sc, 0, 0);
5504 }
5505
5506 static int
5507 wm_setup_msix(struct wm_softc *sc)
5508 {
5509 void *vih;
5510 kcpuset_t *affinity;
5511 int qidx, error, intr_idx, txrx_established;
5512 pci_chipset_tag_t pc = sc->sc_pc;
5513 const char *intrstr = NULL;
5514 char intrbuf[PCI_INTRSTR_LEN];
5515 char intr_xname[INTRDEVNAMEBUF];
5516
5517 if (sc->sc_nqueues < ncpu) {
5518 /*
5519 * To avoid other devices' interrupts, the affinity of Tx/Rx
5520 * interrupts start from CPU#1.
5521 */
5522 sc->sc_affinity_offset = 1;
5523 } else {
5524 /*
5525 * In this case, this device use all CPUs. So, we unify
5526 * affinitied cpu_index to msix vector number for readability.
5527 */
5528 sc->sc_affinity_offset = 0;
5529 }
5530
5531 error = wm_alloc_txrx_queues(sc);
5532 if (error) {
5533 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
5534 error);
5535 return ENOMEM;
5536 }
5537
5538 kcpuset_create(&affinity, false);
5539 intr_idx = 0;
5540
5541 /*
5542 * TX and RX
5543 */
5544 txrx_established = 0;
5545 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5546 struct wm_queue *wmq = &sc->sc_queue[qidx];
5547 int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
5548
5549 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
5550 sizeof(intrbuf));
5551 #ifdef WM_MPSAFE
5552 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
5553 PCI_INTR_MPSAFE, true);
5554 #endif
5555 memset(intr_xname, 0, sizeof(intr_xname));
5556 snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
5557 device_xname(sc->sc_dev), qidx);
5558 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
5559 IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
5560 if (vih == NULL) {
5561 aprint_error_dev(sc->sc_dev,
5562 "unable to establish MSI-X(for TX and RX)%s%s\n",
5563 intrstr ? " at " : "",
5564 intrstr ? intrstr : "");
5565
5566 goto fail;
5567 }
5568 kcpuset_zero(affinity);
5569 /* Round-robin affinity */
5570 kcpuset_set(affinity, affinity_to);
5571 error = interrupt_distribute(vih, affinity, NULL);
5572 if (error == 0) {
5573 aprint_normal_dev(sc->sc_dev,
5574 "for TX and RX interrupting at %s affinity to %u\n",
5575 intrstr, affinity_to);
5576 } else {
5577 aprint_normal_dev(sc->sc_dev,
5578 "for TX and RX interrupting at %s\n", intrstr);
5579 }
5580 sc->sc_ihs[intr_idx] = vih;
5581 if (wm_softint_establish(sc, qidx, intr_idx) != 0)
5582 goto fail;
5583 txrx_established++;
5584 intr_idx++;
5585 }
5586
5587 /* LINK */
5588 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
5589 sizeof(intrbuf));
5590 #ifdef WM_MPSAFE
5591 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
5592 #endif
5593 memset(intr_xname, 0, sizeof(intr_xname));
5594 snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
5595 device_xname(sc->sc_dev));
5596 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
5597 IPL_NET, wm_linkintr_msix, sc, intr_xname);
5598 if (vih == NULL) {
5599 aprint_error_dev(sc->sc_dev,
5600 "unable to establish MSI-X(for LINK)%s%s\n",
5601 intrstr ? " at " : "",
5602 intrstr ? intrstr : "");
5603
5604 goto fail;
5605 }
5606 /* Keep default affinity to LINK interrupt */
5607 aprint_normal_dev(sc->sc_dev,
5608 "for LINK interrupting at %s\n", intrstr);
5609 sc->sc_ihs[intr_idx] = vih;
5610 sc->sc_link_intr_idx = intr_idx;
5611
5612 sc->sc_nintrs = sc->sc_nqueues + 1;
5613 kcpuset_destroy(affinity);
5614 return 0;
5615
5616 fail:
5617 for (qidx = 0; qidx < txrx_established; qidx++) {
5618 struct wm_queue *wmq = &sc->sc_queue[qidx];
5619 pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
5620 sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
5621 }
5622
5623 kcpuset_destroy(affinity);
5624 return ENOMEM;
5625 }
5626
5627 static void
5628 wm_unset_stopping_flags(struct wm_softc *sc)
5629 {
5630 int i;
5631
5632 KASSERT(WM_CORE_LOCKED(sc));
5633
5634 /* Must unset stopping flags in ascending order. */
5635 for (i = 0; i < sc->sc_nqueues; i++) {
5636 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5637 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5638
5639 mutex_enter(txq->txq_lock);
5640 txq->txq_stopping = false;
5641 mutex_exit(txq->txq_lock);
5642
5643 mutex_enter(rxq->rxq_lock);
5644 rxq->rxq_stopping = false;
5645 mutex_exit(rxq->rxq_lock);
5646 }
5647
5648 sc->sc_core_stopping = false;
5649 }
5650
5651 static void
5652 wm_set_stopping_flags(struct wm_softc *sc)
5653 {
5654 int i;
5655
5656 KASSERT(WM_CORE_LOCKED(sc));
5657
5658 sc->sc_core_stopping = true;
5659
5660 /* Must set stopping flags in ascending order. */
5661 for (i = 0; i < sc->sc_nqueues; i++) {
5662 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5663 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5664
5665 mutex_enter(rxq->rxq_lock);
5666 rxq->rxq_stopping = true;
5667 mutex_exit(rxq->rxq_lock);
5668
5669 mutex_enter(txq->txq_lock);
5670 txq->txq_stopping = true;
5671 mutex_exit(txq->txq_lock);
5672 }
5673 }
5674
5675 /*
5676 * Write interrupt interval value to ITR or EITR
5677 */
5678 static void
5679 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
5680 {
5681
5682 if (!wmq->wmq_set_itr)
5683 return;
5684
5685 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5686 uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
5687
5688 /*
5689 * 82575 doesn't have CNT_INGR field.
5690 * So, overwrite counter field by software.
5691 */
5692 if (sc->sc_type == WM_T_82575)
5693 eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
5694 else
5695 eitr |= EITR_CNT_INGR;
5696
5697 CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
5698 } else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
5699 /*
5700 * 82574 has both ITR and EITR. SET EITR when we use
5701 * the multi queue function with MSI-X.
5702 */
5703 CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
5704 wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
5705 } else {
5706 KASSERT(wmq->wmq_id == 0);
5707 CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
5708 }
5709
5710 wmq->wmq_set_itr = false;
5711 }
5712
5713 /*
5714 * TODO
5715 * Below dynamic calculation of itr is almost the same as linux igb,
5716 * however it does not fit to wm(4). So, we will have been disable AIM
5717 * until we will find appropriate calculation of itr.
5718 */
5719 /*
5720 * calculate interrupt interval value to be going to write register in
5721 * wm_itrs_writereg(). This function does not write ITR/EITR register.
5722 */
5723 static void
5724 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
5725 {
5726 #ifdef NOTYET
5727 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
5728 struct wm_txqueue *txq = &wmq->wmq_txq;
5729 uint32_t avg_size = 0;
5730 uint32_t new_itr;
5731
5732 if (rxq->rxq_packets)
5733 avg_size = rxq->rxq_bytes / rxq->rxq_packets;
5734 if (txq->txq_packets)
5735 avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
5736
5737 if (avg_size == 0) {
5738 new_itr = 450; /* restore default value */
5739 goto out;
5740 }
5741
5742 /* Add 24 bytes to size to account for CRC, preamble, and gap */
5743 avg_size += 24;
5744
5745 /* Don't starve jumbo frames */
5746 avg_size = uimin(avg_size, 3000);
5747
5748 /* Give a little boost to mid-size frames */
5749 if ((avg_size > 300) && (avg_size < 1200))
5750 new_itr = avg_size / 3;
5751 else
5752 new_itr = avg_size / 2;
5753
5754 out:
5755 /*
5756 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
5757 * controllers. See sc->sc_itr_init setting in wm_init_locked().
5758 */
5759 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
5760 new_itr *= 4;
5761
5762 if (new_itr != wmq->wmq_itr) {
5763 wmq->wmq_itr = new_itr;
5764 wmq->wmq_set_itr = true;
5765 } else
5766 wmq->wmq_set_itr = false;
5767
5768 rxq->rxq_packets = 0;
5769 rxq->rxq_bytes = 0;
5770 txq->txq_packets = 0;
5771 txq->txq_bytes = 0;
5772 #endif
5773 }
5774
5775 static void
5776 wm_init_sysctls(struct wm_softc *sc)
5777 {
5778 struct sysctllog **log;
5779 const struct sysctlnode *rnode, *cnode;
5780 int rv;
5781 const char *dvname;
5782
5783 log = &sc->sc_sysctllog;
5784 dvname = device_xname(sc->sc_dev);
5785
5786 rv = sysctl_createv(log, 0, NULL, &rnode,
5787 0, CTLTYPE_NODE, dvname,
5788 SYSCTL_DESCR("wm information and settings"),
5789 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
5790 if (rv != 0)
5791 goto err;
5792
5793 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
5794 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
5795 NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
5796 if (rv != 0)
5797 goto teardown;
5798
5799 return;
5800
5801 teardown:
5802 sysctl_teardown(log);
5803 err:
5804 sc->sc_sysctllog = NULL;
5805 device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
5806 __func__, rv);
5807 }
5808
5809 /*
5810 * wm_init: [ifnet interface function]
5811 *
5812 * Initialize the interface.
5813 */
5814 static int
5815 wm_init(struct ifnet *ifp)
5816 {
5817 struct wm_softc *sc = ifp->if_softc;
5818 int ret;
5819
5820 WM_CORE_LOCK(sc);
5821 ret = wm_init_locked(ifp);
5822 WM_CORE_UNLOCK(sc);
5823
5824 return ret;
5825 }
5826
5827 static int
5828 wm_init_locked(struct ifnet *ifp)
5829 {
5830 struct wm_softc *sc = ifp->if_softc;
5831 struct ethercom *ec = &sc->sc_ethercom;
5832 int i, j, trynum, error = 0;
5833 uint32_t reg, sfp_mask = 0;
5834
5835 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5836 device_xname(sc->sc_dev), __func__));
5837 KASSERT(WM_CORE_LOCKED(sc));
5838
5839 /*
5840 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
5841 * There is a small but measurable benefit to avoiding the adjusment
5842 * of the descriptor so that the headers are aligned, for normal mtu,
5843 * on such platforms. One possibility is that the DMA itself is
5844 * slightly more efficient if the front of the entire packet (instead
5845 * of the front of the headers) is aligned.
5846 *
5847 * Note we must always set align_tweak to 0 if we are using
5848 * jumbo frames.
5849 */
5850 #ifdef __NO_STRICT_ALIGNMENT
5851 sc->sc_align_tweak = 0;
5852 #else
5853 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
5854 sc->sc_align_tweak = 0;
5855 else
5856 sc->sc_align_tweak = 2;
5857 #endif /* __NO_STRICT_ALIGNMENT */
5858
5859 /* Cancel any pending I/O. */
5860 wm_stop_locked(ifp, false, false);
5861
5862 /* Update statistics before reset */
5863 if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
5864 if_ierrors, CSR_READ(sc, WMREG_RXERRC));
5865
5866 /* PCH_SPT hardware workaround */
5867 if (sc->sc_type == WM_T_PCH_SPT)
5868 wm_flush_desc_rings(sc);
5869
5870 /* Reset the chip to a known state. */
5871 wm_reset(sc);
5872
5873 /*
5874 * AMT based hardware can now take control from firmware
5875 * Do this after reset.
5876 */
5877 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
5878 wm_get_hw_control(sc);
5879
5880 if ((sc->sc_type >= WM_T_PCH_SPT) &&
5881 pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
5882 wm_legacy_irq_quirk_spt(sc);
5883
5884 /* Init hardware bits */
5885 wm_initialize_hardware_bits(sc);
5886
5887 /* Reset the PHY. */
5888 if (sc->sc_flags & WM_F_HAS_MII)
5889 wm_gmii_reset(sc);
5890
5891 if (sc->sc_type >= WM_T_ICH8) {
5892 reg = CSR_READ(sc, WMREG_GCR);
5893 /*
5894 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
5895 * default after reset.
5896 */
5897 if (sc->sc_type == WM_T_ICH8)
5898 reg |= GCR_NO_SNOOP_ALL;
5899 else
5900 reg &= ~GCR_NO_SNOOP_ALL;
5901 CSR_WRITE(sc, WMREG_GCR, reg);
5902 }
5903 if ((sc->sc_type >= WM_T_ICH8)
5904 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
5905 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
5906
5907 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5908 reg |= CTRL_EXT_RO_DIS;
5909 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5910 }
5911
5912 /* Calculate (E)ITR value */
5913 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
5914 /*
5915 * For NEWQUEUE's EITR (except for 82575).
5916 * 82575's EITR should be set same throttling value as other
5917 * old controllers' ITR because the interrupt/sec calculation
5918 * is the same, that is, 1,000,000,000 / (N * 256).
5919 *
5920 * 82574's EITR should be set same throttling value as ITR.
5921 *
5922 * For N interrupts/sec, set this value to:
5923 * 1,000,000 / N in contrast to ITR throttoling value.
5924 */
5925 sc->sc_itr_init = 450;
5926 } else if (sc->sc_type >= WM_T_82543) {
5927 /*
5928 * Set up the interrupt throttling register (units of 256ns)
5929 * Note that a footnote in Intel's documentation says this
5930 * ticker runs at 1/4 the rate when the chip is in 100Mbit
5931 * or 10Mbit mode. Empirically, it appears to be the case
5932 * that that is also true for the 1024ns units of the other
5933 * interrupt-related timer registers -- so, really, we ought
5934 * to divide this value by 4 when the link speed is low.
5935 *
5936 * XXX implement this division at link speed change!
5937 */
5938
5939 /*
5940 * For N interrupts/sec, set this value to:
5941 * 1,000,000,000 / (N * 256). Note that we set the
5942 * absolute and packet timer values to this value
5943 * divided by 4 to get "simple timer" behavior.
5944 */
5945 sc->sc_itr_init = 1500; /* 2604 ints/sec */
5946 }
5947
5948 error = wm_init_txrx_queues(sc);
5949 if (error)
5950 goto out;
5951
5952 if (((sc->sc_flags & WM_F_SGMII) == 0) &&
5953 (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
5954 (sc->sc_type >= WM_T_82575))
5955 wm_serdes_power_up_link_82575(sc);
5956
5957 /* Clear out the VLAN table -- we don't use it (yet). */
5958 CSR_WRITE(sc, WMREG_VET, 0);
5959 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
5960 trynum = 10; /* Due to hw errata */
5961 else
5962 trynum = 1;
5963 for (i = 0; i < WM_VLAN_TABSIZE; i++)
5964 for (j = 0; j < trynum; j++)
5965 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
5966
5967 /*
5968 * Set up flow-control parameters.
5969 *
5970 * XXX Values could probably stand some tuning.
5971 */
5972 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
5973 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
5974 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
5975 && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
5976 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
5977 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
5978 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
5979 }
5980
5981 sc->sc_fcrtl = FCRTL_DFLT;
5982 if (sc->sc_type < WM_T_82543) {
5983 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
5984 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
5985 } else {
5986 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
5987 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
5988 }
5989
5990 if (sc->sc_type == WM_T_80003)
5991 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
5992 else
5993 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
5994
5995 /* Writes the control register. */
5996 wm_set_vlan(sc);
5997
5998 if (sc->sc_flags & WM_F_HAS_MII) {
5999 uint16_t kmreg;
6000
6001 switch (sc->sc_type) {
6002 case WM_T_80003:
6003 case WM_T_ICH8:
6004 case WM_T_ICH9:
6005 case WM_T_ICH10:
6006 case WM_T_PCH:
6007 case WM_T_PCH2:
6008 case WM_T_PCH_LPT:
6009 case WM_T_PCH_SPT:
6010 case WM_T_PCH_CNP:
6011 /*
6012 * Set the mac to wait the maximum time between each
6013 * iteration and increase the max iterations when
6014 * polling the phy; this fixes erroneous timeouts at
6015 * 10Mbps.
6016 */
6017 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
6018 0xFFFF);
6019 wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
6020 &kmreg);
6021 kmreg |= 0x3F;
6022 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
6023 kmreg);
6024 break;
6025 default:
6026 break;
6027 }
6028
6029 if (sc->sc_type == WM_T_80003) {
6030 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6031 reg &= ~CTRL_EXT_LINK_MODE_MASK;
6032 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6033
6034 /* Bypass RX and TX FIFO's */
6035 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
6036 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
6037 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
6038 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
6039 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
6040 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
6041 }
6042 }
6043 #if 0
6044 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
6045 #endif
6046
6047 /* Set up checksum offload parameters. */
6048 reg = CSR_READ(sc, WMREG_RXCSUM);
6049 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
6050 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
6051 reg |= RXCSUM_IPOFL;
6052 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
6053 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
6054 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
6055 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
6056 CSR_WRITE(sc, WMREG_RXCSUM, reg);
6057
6058 /* Set registers about MSI-X */
6059 if (wm_is_using_msix(sc)) {
6060 uint32_t ivar, qintr_idx;
6061 struct wm_queue *wmq;
6062 unsigned int qid;
6063
6064 if (sc->sc_type == WM_T_82575) {
6065 /* Interrupt control */
6066 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6067 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
6068 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6069
6070 /* TX and RX */
6071 for (i = 0; i < sc->sc_nqueues; i++) {
6072 wmq = &sc->sc_queue[i];
6073 CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
6074 EITR_TX_QUEUE(wmq->wmq_id)
6075 | EITR_RX_QUEUE(wmq->wmq_id));
6076 }
6077 /* Link status */
6078 CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
6079 EITR_OTHER);
6080 } else if (sc->sc_type == WM_T_82574) {
6081 /* Interrupt control */
6082 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6083 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
6084 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6085
6086 /*
6087 * Workaround issue with spurious interrupts
6088 * in MSI-X mode.
6089 * At wm_initialize_hardware_bits(), sc_nintrs has not
6090 * initialized yet. So re-initialize WMREG_RFCTL here.
6091 */
6092 reg = CSR_READ(sc, WMREG_RFCTL);
6093 reg |= WMREG_RFCTL_ACKDIS;
6094 CSR_WRITE(sc, WMREG_RFCTL, reg);
6095
6096 ivar = 0;
6097 /* TX and RX */
6098 for (i = 0; i < sc->sc_nqueues; i++) {
6099 wmq = &sc->sc_queue[i];
6100 qid = wmq->wmq_id;
6101 qintr_idx = wmq->wmq_intr_idx;
6102
6103 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
6104 IVAR_TX_MASK_Q_82574(qid));
6105 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
6106 IVAR_RX_MASK_Q_82574(qid));
6107 }
6108 /* Link status */
6109 ivar |= __SHIFTIN((IVAR_VALID_82574
6110 | sc->sc_link_intr_idx), IVAR_OTHER_MASK);
6111 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
6112 } else {
6113 /* Interrupt control */
6114 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
6115 | GPIE_EIAME | GPIE_PBA);
6116
6117 switch (sc->sc_type) {
6118 case WM_T_82580:
6119 case WM_T_I350:
6120 case WM_T_I354:
6121 case WM_T_I210:
6122 case WM_T_I211:
6123 /* TX and RX */
6124 for (i = 0; i < sc->sc_nqueues; i++) {
6125 wmq = &sc->sc_queue[i];
6126 qid = wmq->wmq_id;
6127 qintr_idx = wmq->wmq_intr_idx;
6128
6129 ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
6130 ivar &= ~IVAR_TX_MASK_Q(qid);
6131 ivar |= __SHIFTIN((qintr_idx
6132 | IVAR_VALID),
6133 IVAR_TX_MASK_Q(qid));
6134 ivar &= ~IVAR_RX_MASK_Q(qid);
6135 ivar |= __SHIFTIN((qintr_idx
6136 | IVAR_VALID),
6137 IVAR_RX_MASK_Q(qid));
6138 CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
6139 }
6140 break;
6141 case WM_T_82576:
6142 /* TX and RX */
6143 for (i = 0; i < sc->sc_nqueues; i++) {
6144 wmq = &sc->sc_queue[i];
6145 qid = wmq->wmq_id;
6146 qintr_idx = wmq->wmq_intr_idx;
6147
6148 ivar = CSR_READ(sc,
6149 WMREG_IVAR_Q_82576(qid));
6150 ivar &= ~IVAR_TX_MASK_Q_82576(qid);
6151 ivar |= __SHIFTIN((qintr_idx
6152 | IVAR_VALID),
6153 IVAR_TX_MASK_Q_82576(qid));
6154 ivar &= ~IVAR_RX_MASK_Q_82576(qid);
6155 ivar |= __SHIFTIN((qintr_idx
6156 | IVAR_VALID),
6157 IVAR_RX_MASK_Q_82576(qid));
6158 CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
6159 ivar);
6160 }
6161 break;
6162 default:
6163 break;
6164 }
6165
6166 /* Link status */
6167 ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
6168 IVAR_MISC_OTHER);
6169 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
6170 }
6171
6172 if (wm_is_using_multiqueue(sc)) {
6173 wm_init_rss(sc);
6174
6175 /*
6176 ** NOTE: Receive Full-Packet Checksum Offload
6177 ** is mutually exclusive with Multiqueue. However
6178 ** this is not the same as TCP/IP checksums which
6179 ** still work.
6180 */
6181 reg = CSR_READ(sc, WMREG_RXCSUM);
6182 reg |= RXCSUM_PCSD;
6183 CSR_WRITE(sc, WMREG_RXCSUM, reg);
6184 }
6185 }
6186
6187 /* Set up the interrupt registers. */
6188 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
6189
6190 /* Enable SFP module insertion interrupt if it's required */
6191 if ((sc->sc_flags & WM_F_SFP) != 0) {
6192 sc->sc_ctrl |= CTRL_EXTLINK_EN;
6193 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6194 sfp_mask = ICR_GPI(0);
6195 }
6196
6197 if (wm_is_using_msix(sc)) {
6198 uint32_t mask;
6199 struct wm_queue *wmq;
6200
6201 switch (sc->sc_type) {
6202 case WM_T_82574:
6203 mask = 0;
6204 for (i = 0; i < sc->sc_nqueues; i++) {
6205 wmq = &sc->sc_queue[i];
6206 mask |= ICR_TXQ(wmq->wmq_id);
6207 mask |= ICR_RXQ(wmq->wmq_id);
6208 }
6209 mask |= ICR_OTHER;
6210 CSR_WRITE(sc, WMREG_EIAC_82574, mask);
6211 CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
6212 break;
6213 default:
6214 if (sc->sc_type == WM_T_82575) {
6215 mask = 0;
6216 for (i = 0; i < sc->sc_nqueues; i++) {
6217 wmq = &sc->sc_queue[i];
6218 mask |= EITR_TX_QUEUE(wmq->wmq_id);
6219 mask |= EITR_RX_QUEUE(wmq->wmq_id);
6220 }
6221 mask |= EITR_OTHER;
6222 } else {
6223 mask = 0;
6224 for (i = 0; i < sc->sc_nqueues; i++) {
6225 wmq = &sc->sc_queue[i];
6226 mask |= 1 << wmq->wmq_intr_idx;
6227 }
6228 mask |= 1 << sc->sc_link_intr_idx;
6229 }
6230 CSR_WRITE(sc, WMREG_EIAC, mask);
6231 CSR_WRITE(sc, WMREG_EIAM, mask);
6232 CSR_WRITE(sc, WMREG_EIMS, mask);
6233
6234 /* For other interrupts */
6235 CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
6236 break;
6237 }
6238 } else {
6239 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
6240 ICR_RXO | ICR_RXT0 | sfp_mask;
6241 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
6242 }
6243
6244 /* Set up the inter-packet gap. */
6245 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6246
6247 if (sc->sc_type >= WM_T_82543) {
6248 for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6249 struct wm_queue *wmq = &sc->sc_queue[qidx];
6250 wm_itrs_writereg(sc, wmq);
6251 }
6252 /*
6253 * Link interrupts occur much less than TX
6254 * interrupts and RX interrupts. So, we don't
6255 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
6256 * FreeBSD's if_igb.
6257 */
6258 }
6259
6260 /* Set the VLAN ethernetype. */
6261 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
6262
6263 /*
6264 * Set up the transmit control register; we start out with
6265 * a collision distance suitable for FDX, but update it whe
6266 * we resolve the media type.
6267 */
6268 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
6269 | TCTL_CT(TX_COLLISION_THRESHOLD)
6270 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6271 if (sc->sc_type >= WM_T_82571)
6272 sc->sc_tctl |= TCTL_MULR;
6273 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6274
6275 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6276 /* Write TDT after TCTL.EN is set. See the document. */
6277 CSR_WRITE(sc, WMREG_TDT(0), 0);
6278 }
6279
6280 if (sc->sc_type == WM_T_80003) {
6281 reg = CSR_READ(sc, WMREG_TCTL_EXT);
6282 reg &= ~TCTL_EXT_GCEX_MASK;
6283 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
6284 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
6285 }
6286
6287 /* Set the media. */
6288 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
6289 goto out;
6290
6291 /* Configure for OS presence */
6292 wm_init_manageability(sc);
6293
6294 /*
6295 * Set up the receive control register; we actually program the
6296 * register when we set the receive filter. Use multicast address
6297 * offset type 0.
6298 *
6299 * Only the i82544 has the ability to strip the incoming CRC, so we
6300 * don't enable that feature.
6301 */
6302 sc->sc_mchash_type = 0;
6303 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
6304 | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
6305
6306 /* 82574 use one buffer extended Rx descriptor. */
6307 if (sc->sc_type == WM_T_82574)
6308 sc->sc_rctl |= RCTL_DTYP_ONEBUF;
6309
6310 /*
6311 * The I350 has a bug where it always strips the CRC whether
6312 * asked to or not. So ask for stripped CRC here and cope in rxeof
6313 */
6314 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
6315 || (sc->sc_type == WM_T_I210))
6316 sc->sc_rctl |= RCTL_SECRC;
6317
6318 if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
6319 && (ifp->if_mtu > ETHERMTU)) {
6320 sc->sc_rctl |= RCTL_LPE;
6321 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6322 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
6323 }
6324
6325 if (MCLBYTES == 2048)
6326 sc->sc_rctl |= RCTL_2k;
6327 else {
6328 if (sc->sc_type >= WM_T_82543) {
6329 switch (MCLBYTES) {
6330 case 4096:
6331 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
6332 break;
6333 case 8192:
6334 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
6335 break;
6336 case 16384:
6337 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
6338 break;
6339 default:
6340 panic("wm_init: MCLBYTES %d unsupported",
6341 MCLBYTES);
6342 break;
6343 }
6344 } else
6345 panic("wm_init: i82542 requires MCLBYTES = 2048");
6346 }
6347
6348 /* Enable ECC */
6349 switch (sc->sc_type) {
6350 case WM_T_82571:
6351 reg = CSR_READ(sc, WMREG_PBA_ECC);
6352 reg |= PBA_ECC_CORR_EN;
6353 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
6354 break;
6355 case WM_T_PCH_LPT:
6356 case WM_T_PCH_SPT:
6357 case WM_T_PCH_CNP:
6358 reg = CSR_READ(sc, WMREG_PBECCSTS);
6359 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
6360 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
6361
6362 sc->sc_ctrl |= CTRL_MEHE;
6363 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6364 break;
6365 default:
6366 break;
6367 }
6368
6369 /*
6370 * Set the receive filter.
6371 *
6372 * For 82575 and 82576, the RX descriptors must be initialized after
6373 * the setting of RCTL.EN in wm_set_filter()
6374 */
6375 wm_set_filter(sc);
6376
6377 /* On 575 and later set RDT only if RX enabled */
6378 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6379 int qidx;
6380 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6381 struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
6382 for (i = 0; i < WM_NRXDESC; i++) {
6383 mutex_enter(rxq->rxq_lock);
6384 wm_init_rxdesc(rxq, i);
6385 mutex_exit(rxq->rxq_lock);
6386
6387 }
6388 }
6389 }
6390
6391 wm_unset_stopping_flags(sc);
6392
6393 /* Start the one second link check clock. */
6394 callout_schedule(&sc->sc_tick_ch, hz);
6395
6396 /* ...all done! */
6397 ifp->if_flags |= IFF_RUNNING;
6398
6399 out:
6400 /* Save last flags for the callback */
6401 sc->sc_if_flags = ifp->if_flags;
6402 sc->sc_ec_capenable = ec->ec_capenable;
6403 if (error)
6404 log(LOG_ERR, "%s: interface not running\n",
6405 device_xname(sc->sc_dev));
6406 return error;
6407 }
6408
6409 /*
6410 * wm_stop: [ifnet interface function]
6411 *
6412 * Stop transmission on the interface.
6413 */
6414 static void
6415 wm_stop(struct ifnet *ifp, int disable)
6416 {
6417 struct wm_softc *sc = ifp->if_softc;
6418
6419 ASSERT_SLEEPABLE();
6420
6421 WM_CORE_LOCK(sc);
6422 wm_stop_locked(ifp, disable ? true : false, true);
6423 WM_CORE_UNLOCK(sc);
6424
6425 /*
6426 * After wm_set_stopping_flags(), it is guaranteed
6427 * wm_handle_queue_work() does not call workqueue_enqueue().
6428 * However, workqueue_wait() cannot call in wm_stop_locked()
6429 * because it can sleep...
6430 * so, call workqueue_wait() here.
6431 */
6432 for (int i = 0; i < sc->sc_nqueues; i++)
6433 workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
6434 }
6435
6436 static void
6437 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
6438 {
6439 struct wm_softc *sc = ifp->if_softc;
6440 struct wm_txsoft *txs;
6441 int i, qidx;
6442
6443 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
6444 device_xname(sc->sc_dev), __func__));
6445 KASSERT(WM_CORE_LOCKED(sc));
6446
6447 wm_set_stopping_flags(sc);
6448
6449 if (sc->sc_flags & WM_F_HAS_MII) {
6450 /* Down the MII. */
6451 mii_down(&sc->sc_mii);
6452 } else {
6453 #if 0
6454 /* Should we clear PHY's status properly? */
6455 wm_reset(sc);
6456 #endif
6457 }
6458
6459 /* Stop the transmit and receive processes. */
6460 CSR_WRITE(sc, WMREG_TCTL, 0);
6461 CSR_WRITE(sc, WMREG_RCTL, 0);
6462 sc->sc_rctl &= ~RCTL_EN;
6463
6464 /*
6465 * Clear the interrupt mask to ensure the device cannot assert its
6466 * interrupt line.
6467 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
6468 * service any currently pending or shared interrupt.
6469 */
6470 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
6471 sc->sc_icr = 0;
6472 if (wm_is_using_msix(sc)) {
6473 if (sc->sc_type != WM_T_82574) {
6474 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
6475 CSR_WRITE(sc, WMREG_EIAC, 0);
6476 } else
6477 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
6478 }
6479
6480 /*
6481 * Stop callouts after interrupts are disabled; if we have
6482 * to wait for them, we will be releasing the CORE_LOCK
6483 * briefly, which will unblock interrupts on the current CPU.
6484 */
6485
6486 /* Stop the one second clock. */
6487 if (wait)
6488 callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
6489 else
6490 callout_stop(&sc->sc_tick_ch);
6491
6492 /* Stop the 82547 Tx FIFO stall check timer. */
6493 if (sc->sc_type == WM_T_82547) {
6494 if (wait)
6495 callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
6496 else
6497 callout_stop(&sc->sc_txfifo_ch);
6498 }
6499
6500 /* Release any queued transmit buffers. */
6501 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6502 struct wm_queue *wmq = &sc->sc_queue[qidx];
6503 struct wm_txqueue *txq = &wmq->wmq_txq;
6504 mutex_enter(txq->txq_lock);
6505 txq->txq_sending = false; /* Ensure watchdog disabled */
6506 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6507 txs = &txq->txq_soft[i];
6508 if (txs->txs_mbuf != NULL) {
6509 bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
6510 m_freem(txs->txs_mbuf);
6511 txs->txs_mbuf = NULL;
6512 }
6513 }
6514 mutex_exit(txq->txq_lock);
6515 }
6516
6517 /* Mark the interface as down and cancel the watchdog timer. */
6518 ifp->if_flags &= ~IFF_RUNNING;
6519
6520 if (disable) {
6521 for (i = 0; i < sc->sc_nqueues; i++) {
6522 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6523 mutex_enter(rxq->rxq_lock);
6524 wm_rxdrain(rxq);
6525 mutex_exit(rxq->rxq_lock);
6526 }
6527 }
6528
6529 #if 0 /* notyet */
6530 if (sc->sc_type >= WM_T_82544)
6531 CSR_WRITE(sc, WMREG_WUC, 0);
6532 #endif
6533 }
6534
6535 static void
6536 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
6537 {
6538 struct mbuf *m;
6539 int i;
6540
6541 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
6542 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
6543 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
6544 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
6545 m->m_data, m->m_len, m->m_flags);
6546 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
6547 i, i == 1 ? "" : "s");
6548 }
6549
6550 /*
6551 * wm_82547_txfifo_stall:
6552 *
6553 * Callout used to wait for the 82547 Tx FIFO to drain,
6554 * reset the FIFO pointers, and restart packet transmission.
6555 */
6556 static void
6557 wm_82547_txfifo_stall(void *arg)
6558 {
6559 struct wm_softc *sc = arg;
6560 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6561
6562 mutex_enter(txq->txq_lock);
6563
6564 if (txq->txq_stopping)
6565 goto out;
6566
6567 if (txq->txq_fifo_stall) {
6568 if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
6569 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
6570 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
6571 /*
6572 * Packets have drained. Stop transmitter, reset
6573 * FIFO pointers, restart transmitter, and kick
6574 * the packet queue.
6575 */
6576 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
6577 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
6578 CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
6579 CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
6580 CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
6581 CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
6582 CSR_WRITE(sc, WMREG_TCTL, tctl);
6583 CSR_WRITE_FLUSH(sc);
6584
6585 txq->txq_fifo_head = 0;
6586 txq->txq_fifo_stall = 0;
6587 wm_start_locked(&sc->sc_ethercom.ec_if);
6588 } else {
6589 /*
6590 * Still waiting for packets to drain; try again in
6591 * another tick.
6592 */
6593 callout_schedule(&sc->sc_txfifo_ch, 1);
6594 }
6595 }
6596
6597 out:
6598 mutex_exit(txq->txq_lock);
6599 }
6600
6601 /*
6602 * wm_82547_txfifo_bugchk:
6603 *
6604 * Check for bug condition in the 82547 Tx FIFO. We need to
6605 * prevent enqueueing a packet that would wrap around the end
6606 * if the Tx FIFO ring buffer, otherwise the chip will croak.
6607 *
6608 * We do this by checking the amount of space before the end
6609 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
6610 * the Tx FIFO, wait for all remaining packets to drain, reset
6611 * the internal FIFO pointers to the beginning, and restart
6612 * transmission on the interface.
6613 */
6614 #define WM_FIFO_HDR 0x10
6615 #define WM_82547_PAD_LEN 0x3e0
6616 static int
6617 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
6618 {
6619 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6620 int space = txq->txq_fifo_size - txq->txq_fifo_head;
6621 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
6622
6623 /* Just return if already stalled. */
6624 if (txq->txq_fifo_stall)
6625 return 1;
6626
6627 if (sc->sc_mii.mii_media_active & IFM_FDX) {
6628 /* Stall only occurs in half-duplex mode. */
6629 goto send_packet;
6630 }
6631
6632 if (len >= WM_82547_PAD_LEN + space) {
6633 txq->txq_fifo_stall = 1;
6634 callout_schedule(&sc->sc_txfifo_ch, 1);
6635 return 1;
6636 }
6637
6638 send_packet:
6639 txq->txq_fifo_head += len;
6640 if (txq->txq_fifo_head >= txq->txq_fifo_size)
6641 txq->txq_fifo_head -= txq->txq_fifo_size;
6642
6643 return 0;
6644 }
6645
6646 static int
6647 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
6648 {
6649 int error;
6650
6651 /*
6652 * Allocate the control data structures, and create and load the
6653 * DMA map for it.
6654 *
6655 * NOTE: All Tx descriptors must be in the same 4G segment of
6656 * memory. So must Rx descriptors. We simplify by allocating
6657 * both sets within the same 4G segment.
6658 */
6659 if (sc->sc_type < WM_T_82544)
6660 WM_NTXDESC(txq) = WM_NTXDESC_82542;
6661 else
6662 WM_NTXDESC(txq) = WM_NTXDESC_82544;
6663 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6664 txq->txq_descsize = sizeof(nq_txdesc_t);
6665 else
6666 txq->txq_descsize = sizeof(wiseman_txdesc_t);
6667
6668 if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
6669 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
6670 1, &txq->txq_desc_rseg, 0)) != 0) {
6671 aprint_error_dev(sc->sc_dev,
6672 "unable to allocate TX control data, error = %d\n",
6673 error);
6674 goto fail_0;
6675 }
6676
6677 if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
6678 txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
6679 (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
6680 aprint_error_dev(sc->sc_dev,
6681 "unable to map TX control data, error = %d\n", error);
6682 goto fail_1;
6683 }
6684
6685 if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
6686 WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
6687 aprint_error_dev(sc->sc_dev,
6688 "unable to create TX control data DMA map, error = %d\n",
6689 error);
6690 goto fail_2;
6691 }
6692
6693 if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
6694 txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
6695 aprint_error_dev(sc->sc_dev,
6696 "unable to load TX control data DMA map, error = %d\n",
6697 error);
6698 goto fail_3;
6699 }
6700
6701 return 0;
6702
6703 fail_3:
6704 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
6705 fail_2:
6706 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
6707 WM_TXDESCS_SIZE(txq));
6708 fail_1:
6709 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
6710 fail_0:
6711 return error;
6712 }
6713
6714 static void
6715 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
6716 {
6717
6718 bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
6719 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
6720 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
6721 WM_TXDESCS_SIZE(txq));
6722 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
6723 }
6724
6725 static int
6726 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
6727 {
6728 int error;
6729 size_t rxq_descs_size;
6730
6731 /*
6732 * Allocate the control data structures, and create and load the
6733 * DMA map for it.
6734 *
6735 * NOTE: All Tx descriptors must be in the same 4G segment of
6736 * memory. So must Rx descriptors. We simplify by allocating
6737 * both sets within the same 4G segment.
6738 */
6739 rxq->rxq_ndesc = WM_NRXDESC;
6740 if (sc->sc_type == WM_T_82574)
6741 rxq->rxq_descsize = sizeof(ext_rxdesc_t);
6742 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6743 rxq->rxq_descsize = sizeof(nq_rxdesc_t);
6744 else
6745 rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
6746 rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
6747
6748 if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
6749 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
6750 1, &rxq->rxq_desc_rseg, 0)) != 0) {
6751 aprint_error_dev(sc->sc_dev,
6752 "unable to allocate RX control data, error = %d\n",
6753 error);
6754 goto fail_0;
6755 }
6756
6757 if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
6758 rxq->rxq_desc_rseg, rxq_descs_size,
6759 (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
6760 aprint_error_dev(sc->sc_dev,
6761 "unable to map RX control data, error = %d\n", error);
6762 goto fail_1;
6763 }
6764
6765 if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
6766 rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
6767 aprint_error_dev(sc->sc_dev,
6768 "unable to create RX control data DMA map, error = %d\n",
6769 error);
6770 goto fail_2;
6771 }
6772
6773 if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
6774 rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
6775 aprint_error_dev(sc->sc_dev,
6776 "unable to load RX control data DMA map, error = %d\n",
6777 error);
6778 goto fail_3;
6779 }
6780
6781 return 0;
6782
6783 fail_3:
6784 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
6785 fail_2:
6786 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
6787 rxq_descs_size);
6788 fail_1:
6789 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
6790 fail_0:
6791 return error;
6792 }
6793
6794 static void
6795 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
6796 {
6797
6798 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
6799 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
6800 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
6801 rxq->rxq_descsize * rxq->rxq_ndesc);
6802 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
6803 }
6804
6805
6806 static int
6807 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
6808 {
6809 int i, error;
6810
6811 /* Create the transmit buffer DMA maps. */
6812 WM_TXQUEUELEN(txq) =
6813 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
6814 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
6815 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6816 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
6817 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
6818 &txq->txq_soft[i].txs_dmamap)) != 0) {
6819 aprint_error_dev(sc->sc_dev,
6820 "unable to create Tx DMA map %d, error = %d\n",
6821 i, error);
6822 goto fail;
6823 }
6824 }
6825
6826 return 0;
6827
6828 fail:
6829 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6830 if (txq->txq_soft[i].txs_dmamap != NULL)
6831 bus_dmamap_destroy(sc->sc_dmat,
6832 txq->txq_soft[i].txs_dmamap);
6833 }
6834 return error;
6835 }
6836
6837 static void
6838 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
6839 {
6840 int i;
6841
6842 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6843 if (txq->txq_soft[i].txs_dmamap != NULL)
6844 bus_dmamap_destroy(sc->sc_dmat,
6845 txq->txq_soft[i].txs_dmamap);
6846 }
6847 }
6848
6849 static int
6850 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
6851 {
6852 int i, error;
6853
6854 /* Create the receive buffer DMA maps. */
6855 for (i = 0; i < rxq->rxq_ndesc; i++) {
6856 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
6857 MCLBYTES, 0, 0,
6858 &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
6859 aprint_error_dev(sc->sc_dev,
6860 "unable to create Rx DMA map %d error = %d\n",
6861 i, error);
6862 goto fail;
6863 }
6864 rxq->rxq_soft[i].rxs_mbuf = NULL;
6865 }
6866
6867 return 0;
6868
6869 fail:
6870 for (i = 0; i < rxq->rxq_ndesc; i++) {
6871 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
6872 bus_dmamap_destroy(sc->sc_dmat,
6873 rxq->rxq_soft[i].rxs_dmamap);
6874 }
6875 return error;
6876 }
6877
6878 static void
6879 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
6880 {
6881 int i;
6882
6883 for (i = 0; i < rxq->rxq_ndesc; i++) {
6884 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
6885 bus_dmamap_destroy(sc->sc_dmat,
6886 rxq->rxq_soft[i].rxs_dmamap);
6887 }
6888 }
6889
6890 /*
6891 * wm_alloc_quques:
6892 * Allocate {tx,rx}descs and {tx,rx} buffers
6893 */
6894 static int
6895 wm_alloc_txrx_queues(struct wm_softc *sc)
6896 {
6897 int i, error, tx_done, rx_done;
6898
6899 sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
6900 KM_SLEEP);
6901 if (sc->sc_queue == NULL) {
6902 aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
6903 error = ENOMEM;
6904 goto fail_0;
6905 }
6906
6907 /* For transmission */
6908 error = 0;
6909 tx_done = 0;
6910 for (i = 0; i < sc->sc_nqueues; i++) {
6911 #ifdef WM_EVENT_COUNTERS
6912 int j;
6913 const char *xname;
6914 #endif
6915 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6916 txq->txq_sc = sc;
6917 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
6918
6919 error = wm_alloc_tx_descs(sc, txq);
6920 if (error)
6921 break;
6922 error = wm_alloc_tx_buffer(sc, txq);
6923 if (error) {
6924 wm_free_tx_descs(sc, txq);
6925 break;
6926 }
6927 txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
6928 if (txq->txq_interq == NULL) {
6929 wm_free_tx_descs(sc, txq);
6930 wm_free_tx_buffer(sc, txq);
6931 error = ENOMEM;
6932 break;
6933 }
6934
6935 #ifdef WM_EVENT_COUNTERS
6936 xname = device_xname(sc->sc_dev);
6937
6938 WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
6939 WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
6940 WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
6941 WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
6942 WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
6943 WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
6944 WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
6945 WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
6946 WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
6947 WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
6948 WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
6949
6950 for (j = 0; j < WM_NTXSEGS; j++) {
6951 snprintf(txq->txq_txseg_evcnt_names[j],
6952 sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
6953 evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
6954 NULL, xname, txq->txq_txseg_evcnt_names[j]);
6955 }
6956
6957 WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
6958 WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
6959 WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
6960 WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
6961 WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
6962 WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
6963 #endif /* WM_EVENT_COUNTERS */
6964
6965 tx_done++;
6966 }
6967 if (error)
6968 goto fail_1;
6969
6970 /* For receive */
6971 error = 0;
6972 rx_done = 0;
6973 for (i = 0; i < sc->sc_nqueues; i++) {
6974 #ifdef WM_EVENT_COUNTERS
6975 const char *xname;
6976 #endif
6977 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6978 rxq->rxq_sc = sc;
6979 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
6980
6981 error = wm_alloc_rx_descs(sc, rxq);
6982 if (error)
6983 break;
6984
6985 error = wm_alloc_rx_buffer(sc, rxq);
6986 if (error) {
6987 wm_free_rx_descs(sc, rxq);
6988 break;
6989 }
6990
6991 #ifdef WM_EVENT_COUNTERS
6992 xname = device_xname(sc->sc_dev);
6993
6994 WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
6995 WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
6996
6997 WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
6998 WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
6999 #endif /* WM_EVENT_COUNTERS */
7000
7001 rx_done++;
7002 }
7003 if (error)
7004 goto fail_2;
7005
7006 return 0;
7007
7008 fail_2:
7009 for (i = 0; i < rx_done; i++) {
7010 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7011 wm_free_rx_buffer(sc, rxq);
7012 wm_free_rx_descs(sc, rxq);
7013 if (rxq->rxq_lock)
7014 mutex_obj_free(rxq->rxq_lock);
7015 }
7016 fail_1:
7017 for (i = 0; i < tx_done; i++) {
7018 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7019 pcq_destroy(txq->txq_interq);
7020 wm_free_tx_buffer(sc, txq);
7021 wm_free_tx_descs(sc, txq);
7022 if (txq->txq_lock)
7023 mutex_obj_free(txq->txq_lock);
7024 }
7025
7026 kmem_free(sc->sc_queue,
7027 sizeof(struct wm_queue) * sc->sc_nqueues);
7028 fail_0:
7029 return error;
7030 }
7031
7032 /*
7033 * wm_free_quques:
7034 * Free {tx,rx}descs and {tx,rx} buffers
7035 */
7036 static void
7037 wm_free_txrx_queues(struct wm_softc *sc)
7038 {
7039 int i;
7040
7041 for (i = 0; i < sc->sc_nqueues; i++) {
7042 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7043
7044 #ifdef WM_EVENT_COUNTERS
7045 WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
7046 WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
7047 WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
7048 WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
7049 #endif /* WM_EVENT_COUNTERS */
7050
7051 wm_free_rx_buffer(sc, rxq);
7052 wm_free_rx_descs(sc, rxq);
7053 if (rxq->rxq_lock)
7054 mutex_obj_free(rxq->rxq_lock);
7055 }
7056
7057 for (i = 0; i < sc->sc_nqueues; i++) {
7058 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7059 struct mbuf *m;
7060 #ifdef WM_EVENT_COUNTERS
7061 int j;
7062
7063 WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
7064 WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
7065 WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
7066 WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
7067 WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
7068 WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
7069 WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
7070 WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
7071 WM_Q_EVCNT_DETACH(txq, tso, txq, i);
7072 WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
7073 WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
7074
7075 for (j = 0; j < WM_NTXSEGS; j++)
7076 evcnt_detach(&txq->txq_ev_txseg[j]);
7077
7078 WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
7079 WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
7080 WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
7081 WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
7082 WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
7083 WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
7084 #endif /* WM_EVENT_COUNTERS */
7085
7086 /* Drain txq_interq */
7087 while ((m = pcq_get(txq->txq_interq)) != NULL)
7088 m_freem(m);
7089 pcq_destroy(txq->txq_interq);
7090
7091 wm_free_tx_buffer(sc, txq);
7092 wm_free_tx_descs(sc, txq);
7093 if (txq->txq_lock)
7094 mutex_obj_free(txq->txq_lock);
7095 }
7096
7097 kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
7098 }
7099
7100 static void
7101 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
7102 {
7103
7104 KASSERT(mutex_owned(txq->txq_lock));
7105
7106 /* Initialize the transmit descriptor ring. */
7107 memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
7108 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
7109 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7110 txq->txq_free = WM_NTXDESC(txq);
7111 txq->txq_next = 0;
7112 }
7113
7114 static void
7115 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
7116 struct wm_txqueue *txq)
7117 {
7118
7119 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
7120 device_xname(sc->sc_dev), __func__));
7121 KASSERT(mutex_owned(txq->txq_lock));
7122
7123 if (sc->sc_type < WM_T_82543) {
7124 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
7125 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
7126 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
7127 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
7128 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
7129 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
7130 } else {
7131 int qid = wmq->wmq_id;
7132
7133 CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
7134 CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
7135 CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
7136 CSR_WRITE(sc, WMREG_TDH(qid), 0);
7137
7138 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7139 /*
7140 * Don't write TDT before TCTL.EN is set.
7141 * See the document.
7142 */
7143 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
7144 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
7145 | TXDCTL_WTHRESH(0));
7146 else {
7147 /* XXX should update with AIM? */
7148 CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
7149 if (sc->sc_type >= WM_T_82540) {
7150 /* Should be the same */
7151 CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
7152 }
7153
7154 CSR_WRITE(sc, WMREG_TDT(qid), 0);
7155 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
7156 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
7157 }
7158 }
7159 }
7160
7161 static void
7162 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
7163 {
7164 int i;
7165
7166 KASSERT(mutex_owned(txq->txq_lock));
7167
7168 /* Initialize the transmit job descriptors. */
7169 for (i = 0; i < WM_TXQUEUELEN(txq); i++)
7170 txq->txq_soft[i].txs_mbuf = NULL;
7171 txq->txq_sfree = WM_TXQUEUELEN(txq);
7172 txq->txq_snext = 0;
7173 txq->txq_sdirty = 0;
7174 }
7175
7176 static void
7177 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
7178 struct wm_txqueue *txq)
7179 {
7180
7181 KASSERT(mutex_owned(txq->txq_lock));
7182
7183 /*
7184 * Set up some register offsets that are different between
7185 * the i82542 and the i82543 and later chips.
7186 */
7187 if (sc->sc_type < WM_T_82543)
7188 txq->txq_tdt_reg = WMREG_OLD_TDT;
7189 else
7190 txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
7191
7192 wm_init_tx_descs(sc, txq);
7193 wm_init_tx_regs(sc, wmq, txq);
7194 wm_init_tx_buffer(sc, txq);
7195
7196 txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
7197 txq->txq_sending = false;
7198 }
7199
7200 static void
7201 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
7202 struct wm_rxqueue *rxq)
7203 {
7204
7205 KASSERT(mutex_owned(rxq->rxq_lock));
7206
7207 /*
7208 * Initialize the receive descriptor and receive job
7209 * descriptor rings.
7210 */
7211 if (sc->sc_type < WM_T_82543) {
7212 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
7213 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
7214 CSR_WRITE(sc, WMREG_OLD_RDLEN0,
7215 rxq->rxq_descsize * rxq->rxq_ndesc);
7216 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
7217 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
7218 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
7219
7220 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
7221 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
7222 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
7223 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
7224 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
7225 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
7226 } else {
7227 int qid = wmq->wmq_id;
7228
7229 CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
7230 CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
7231 CSR_WRITE(sc, WMREG_RDLEN(qid),
7232 rxq->rxq_descsize * rxq->rxq_ndesc);
7233
7234 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7235 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
7236 panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
7237
7238 /* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
7239 CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
7240 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
7241 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
7242 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
7243 | RXDCTL_WTHRESH(1));
7244 CSR_WRITE(sc, WMREG_RDH(qid), 0);
7245 CSR_WRITE(sc, WMREG_RDT(qid), 0);
7246 } else {
7247 CSR_WRITE(sc, WMREG_RDH(qid), 0);
7248 CSR_WRITE(sc, WMREG_RDT(qid), 0);
7249 /* XXX should update with AIM? */
7250 CSR_WRITE(sc, WMREG_RDTR,
7251 (wmq->wmq_itr / 4) | RDTR_FPD);
7252 /* MUST be same */
7253 CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
7254 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
7255 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
7256 }
7257 }
7258 }
7259
7260 static int
7261 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7262 {
7263 struct wm_rxsoft *rxs;
7264 int error, i;
7265
7266 KASSERT(mutex_owned(rxq->rxq_lock));
7267
7268 for (i = 0; i < rxq->rxq_ndesc; i++) {
7269 rxs = &rxq->rxq_soft[i];
7270 if (rxs->rxs_mbuf == NULL) {
7271 if ((error = wm_add_rxbuf(rxq, i)) != 0) {
7272 log(LOG_ERR, "%s: unable to allocate or map "
7273 "rx buffer %d, error = %d\n",
7274 device_xname(sc->sc_dev), i, error);
7275 /*
7276 * XXX Should attempt to run with fewer receive
7277 * XXX buffers instead of just failing.
7278 */
7279 wm_rxdrain(rxq);
7280 return ENOMEM;
7281 }
7282 } else {
7283 /*
7284 * For 82575 and 82576, the RX descriptors must be
7285 * initialized after the setting of RCTL.EN in
7286 * wm_set_filter()
7287 */
7288 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
7289 wm_init_rxdesc(rxq, i);
7290 }
7291 }
7292 rxq->rxq_ptr = 0;
7293 rxq->rxq_discard = 0;
7294 WM_RXCHAIN_RESET(rxq);
7295
7296 return 0;
7297 }
7298
7299 static int
7300 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
7301 struct wm_rxqueue *rxq)
7302 {
7303
7304 KASSERT(mutex_owned(rxq->rxq_lock));
7305
7306 /*
7307 * Set up some register offsets that are different between
7308 * the i82542 and the i82543 and later chips.
7309 */
7310 if (sc->sc_type < WM_T_82543)
7311 rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
7312 else
7313 rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
7314
7315 wm_init_rx_regs(sc, wmq, rxq);
7316 return wm_init_rx_buffer(sc, rxq);
7317 }
7318
7319 /*
7320 * wm_init_quques:
7321 * Initialize {tx,rx}descs and {tx,rx} buffers
7322 */
7323 static int
7324 wm_init_txrx_queues(struct wm_softc *sc)
7325 {
7326 int i, error = 0;
7327
7328 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
7329 device_xname(sc->sc_dev), __func__));
7330
7331 for (i = 0; i < sc->sc_nqueues; i++) {
7332 struct wm_queue *wmq = &sc->sc_queue[i];
7333 struct wm_txqueue *txq = &wmq->wmq_txq;
7334 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
7335
7336 /*
7337 * TODO
7338 * Currently, use constant variable instead of AIM.
7339 * Furthermore, the interrupt interval of multiqueue which use
7340 * polling mode is less than default value.
7341 * More tuning and AIM are required.
7342 */
7343 if (wm_is_using_multiqueue(sc))
7344 wmq->wmq_itr = 50;
7345 else
7346 wmq->wmq_itr = sc->sc_itr_init;
7347 wmq->wmq_set_itr = true;
7348
7349 mutex_enter(txq->txq_lock);
7350 wm_init_tx_queue(sc, wmq, txq);
7351 mutex_exit(txq->txq_lock);
7352
7353 mutex_enter(rxq->rxq_lock);
7354 error = wm_init_rx_queue(sc, wmq, rxq);
7355 mutex_exit(rxq->rxq_lock);
7356 if (error)
7357 break;
7358 }
7359
7360 return error;
7361 }
7362
7363 /*
7364 * wm_tx_offload:
7365 *
7366 * Set up TCP/IP checksumming parameters for the
7367 * specified packet.
7368 */
7369 static void
7370 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
7371 struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
7372 {
7373 struct mbuf *m0 = txs->txs_mbuf;
7374 struct livengood_tcpip_ctxdesc *t;
7375 uint32_t ipcs, tucs, cmd, cmdlen, seg;
7376 uint32_t ipcse;
7377 struct ether_header *eh;
7378 int offset, iphl;
7379 uint8_t fields;
7380
7381 /*
7382 * XXX It would be nice if the mbuf pkthdr had offset
7383 * fields for the protocol headers.
7384 */
7385
7386 eh = mtod(m0, struct ether_header *);
7387 switch (htons(eh->ether_type)) {
7388 case ETHERTYPE_IP:
7389 case ETHERTYPE_IPV6:
7390 offset = ETHER_HDR_LEN;
7391 break;
7392
7393 case ETHERTYPE_VLAN:
7394 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
7395 break;
7396
7397 default:
7398 /* Don't support this protocol or encapsulation. */
7399 txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
7400 txq->txq_last_hw_ipcs = 0;
7401 txq->txq_last_hw_tucs = 0;
7402 *fieldsp = 0;
7403 *cmdp = 0;
7404 return;
7405 }
7406
7407 if ((m0->m_pkthdr.csum_flags &
7408 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
7409 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
7410 } else
7411 iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
7412
7413 ipcse = offset + iphl - 1;
7414
7415 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
7416 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
7417 seg = 0;
7418 fields = 0;
7419
7420 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
7421 int hlen = offset + iphl;
7422 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
7423
7424 if (__predict_false(m0->m_len <
7425 (hlen + sizeof(struct tcphdr)))) {
7426 /*
7427 * TCP/IP headers are not in the first mbuf; we need
7428 * to do this the slow and painful way. Let's just
7429 * hope this doesn't happen very often.
7430 */
7431 struct tcphdr th;
7432
7433 WM_Q_EVCNT_INCR(txq, tsopain);
7434
7435 m_copydata(m0, hlen, sizeof(th), &th);
7436 if (v4) {
7437 struct ip ip;
7438
7439 m_copydata(m0, offset, sizeof(ip), &ip);
7440 ip.ip_len = 0;
7441 m_copyback(m0,
7442 offset + offsetof(struct ip, ip_len),
7443 sizeof(ip.ip_len), &ip.ip_len);
7444 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
7445 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
7446 } else {
7447 struct ip6_hdr ip6;
7448
7449 m_copydata(m0, offset, sizeof(ip6), &ip6);
7450 ip6.ip6_plen = 0;
7451 m_copyback(m0,
7452 offset + offsetof(struct ip6_hdr, ip6_plen),
7453 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
7454 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
7455 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
7456 }
7457 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
7458 sizeof(th.th_sum), &th.th_sum);
7459
7460 hlen += th.th_off << 2;
7461 } else {
7462 /*
7463 * TCP/IP headers are in the first mbuf; we can do
7464 * this the easy way.
7465 */
7466 struct tcphdr *th;
7467
7468 if (v4) {
7469 struct ip *ip =
7470 (void *)(mtod(m0, char *) + offset);
7471 th = (void *)(mtod(m0, char *) + hlen);
7472
7473 ip->ip_len = 0;
7474 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
7475 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
7476 } else {
7477 struct ip6_hdr *ip6 =
7478 (void *)(mtod(m0, char *) + offset);
7479 th = (void *)(mtod(m0, char *) + hlen);
7480
7481 ip6->ip6_plen = 0;
7482 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
7483 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
7484 }
7485 hlen += th->th_off << 2;
7486 }
7487
7488 if (v4) {
7489 WM_Q_EVCNT_INCR(txq, tso);
7490 cmdlen |= WTX_TCPIP_CMD_IP;
7491 } else {
7492 WM_Q_EVCNT_INCR(txq, tso6);
7493 ipcse = 0;
7494 }
7495 cmd |= WTX_TCPIP_CMD_TSE;
7496 cmdlen |= WTX_TCPIP_CMD_TSE |
7497 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
7498 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
7499 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
7500 }
7501
7502 /*
7503 * NOTE: Even if we're not using the IP or TCP/UDP checksum
7504 * offload feature, if we load the context descriptor, we
7505 * MUST provide valid values for IPCSS and TUCSS fields.
7506 */
7507
7508 ipcs = WTX_TCPIP_IPCSS(offset) |
7509 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
7510 WTX_TCPIP_IPCSE(ipcse);
7511 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
7512 WM_Q_EVCNT_INCR(txq, ipsum);
7513 fields |= WTX_IXSM;
7514 }
7515
7516 offset += iphl;
7517
7518 if (m0->m_pkthdr.csum_flags &
7519 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
7520 WM_Q_EVCNT_INCR(txq, tusum);
7521 fields |= WTX_TXSM;
7522 tucs = WTX_TCPIP_TUCSS(offset) |
7523 WTX_TCPIP_TUCSO(offset +
7524 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
7525 WTX_TCPIP_TUCSE(0) /* Rest of packet */;
7526 } else if ((m0->m_pkthdr.csum_flags &
7527 (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
7528 WM_Q_EVCNT_INCR(txq, tusum6);
7529 fields |= WTX_TXSM;
7530 tucs = WTX_TCPIP_TUCSS(offset) |
7531 WTX_TCPIP_TUCSO(offset +
7532 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
7533 WTX_TCPIP_TUCSE(0) /* Rest of packet */;
7534 } else {
7535 /* Just initialize it to a valid TCP context. */
7536 tucs = WTX_TCPIP_TUCSS(offset) |
7537 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
7538 WTX_TCPIP_TUCSE(0) /* Rest of packet */;
7539 }
7540
7541 *cmdp = cmd;
7542 *fieldsp = fields;
7543
7544 /*
7545 * We don't have to write context descriptor for every packet
7546 * except for 82574. For 82574, we must write context descriptor
7547 * for every packet when we use two descriptor queues.
7548 *
7549 * The 82574L can only remember the *last* context used
7550 * regardless of queue that it was use for. We cannot reuse
7551 * contexts on this hardware platform and must generate a new
7552 * context every time. 82574L hardware spec, section 7.2.6,
7553 * second note.
7554 */
7555 if (sc->sc_nqueues < 2) {
7556 /*
7557 *
7558 * Setting up new checksum offload context for every
7559 * frames takes a lot of processing time for hardware.
7560 * This also reduces performance a lot for small sized
7561 * frames so avoid it if driver can use previously
7562 * configured checksum offload context.
7563 * For TSO, in theory we can use the same TSO context only if
7564 * frame is the same type(IP/TCP) and the same MSS. However
7565 * checking whether a frame has the same IP/TCP structure is
7566 * hard thing so just ignore that and always restablish a
7567 * new TSO context.
7568 */
7569 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
7570 == 0) {
7571 if (txq->txq_last_hw_cmd == cmd &&
7572 txq->txq_last_hw_fields == fields &&
7573 txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
7574 txq->txq_last_hw_tucs == (tucs & 0xffff)) {
7575 WM_Q_EVCNT_INCR(txq, skipcontext);
7576 return;
7577 }
7578 }
7579
7580 txq->txq_last_hw_cmd = cmd;
7581 txq->txq_last_hw_fields = fields;
7582 txq->txq_last_hw_ipcs = (ipcs & 0xffff);
7583 txq->txq_last_hw_tucs = (tucs & 0xffff);
7584 }
7585
7586 /* Fill in the context descriptor. */
7587 t = (struct livengood_tcpip_ctxdesc *)
7588 &txq->txq_descs[txq->txq_next];
7589 t->tcpip_ipcs = htole32(ipcs);
7590 t->tcpip_tucs = htole32(tucs);
7591 t->tcpip_cmdlen = htole32(cmdlen);
7592 t->tcpip_seg = htole32(seg);
7593 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
7594
7595 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
7596 txs->txs_ndesc++;
7597 }
7598
7599 static inline int
7600 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
7601 {
7602 struct wm_softc *sc = ifp->if_softc;
7603 u_int cpuid = cpu_index(curcpu());
7604
7605 /*
7606 * Currently, simple distribute strategy.
7607 * TODO:
7608 * distribute by flowid(RSS has value).
7609 */
7610 return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
7611 }
7612
7613 /*
7614 * wm_start: [ifnet interface function]
7615 *
7616 * Start packet transmission on the interface.
7617 */
7618 static void
7619 wm_start(struct ifnet *ifp)
7620 {
7621 struct wm_softc *sc = ifp->if_softc;
7622 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7623
7624 #ifdef WM_MPSAFE
7625 KASSERT(if_is_mpsafe(ifp));
7626 #endif
7627 /*
7628 * if_obytes and if_omcasts are added in if_transmit()@if.c.
7629 */
7630
7631 mutex_enter(txq->txq_lock);
7632 if (!txq->txq_stopping)
7633 wm_start_locked(ifp);
7634 mutex_exit(txq->txq_lock);
7635 }
7636
7637 static void
7638 wm_start_locked(struct ifnet *ifp)
7639 {
7640 struct wm_softc *sc = ifp->if_softc;
7641 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7642
7643 wm_send_common_locked(ifp, txq, false);
7644 }
7645
7646 static int
7647 wm_transmit(struct ifnet *ifp, struct mbuf *m)
7648 {
7649 int qid;
7650 struct wm_softc *sc = ifp->if_softc;
7651 struct wm_txqueue *txq;
7652
7653 qid = wm_select_txqueue(ifp, m);
7654 txq = &sc->sc_queue[qid].wmq_txq;
7655
7656 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
7657 m_freem(m);
7658 WM_Q_EVCNT_INCR(txq, pcqdrop);
7659 return ENOBUFS;
7660 }
7661
7662 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
7663 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
7664 if (m->m_flags & M_MCAST)
7665 if_statinc_ref(nsr, if_omcasts);
7666 IF_STAT_PUTREF(ifp);
7667
7668 if (mutex_tryenter(txq->txq_lock)) {
7669 if (!txq->txq_stopping)
7670 wm_transmit_locked(ifp, txq);
7671 mutex_exit(txq->txq_lock);
7672 }
7673
7674 return 0;
7675 }
7676
7677 static void
7678 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
7679 {
7680
7681 wm_send_common_locked(ifp, txq, true);
7682 }
7683
7684 static void
7685 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
7686 bool is_transmit)
7687 {
7688 struct wm_softc *sc = ifp->if_softc;
7689 struct mbuf *m0;
7690 struct wm_txsoft *txs;
7691 bus_dmamap_t dmamap;
7692 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
7693 bus_addr_t curaddr;
7694 bus_size_t seglen, curlen;
7695 uint32_t cksumcmd;
7696 uint8_t cksumfields;
7697 bool remap = true;
7698
7699 KASSERT(mutex_owned(txq->txq_lock));
7700
7701 if ((ifp->if_flags & IFF_RUNNING) == 0)
7702 return;
7703 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
7704 return;
7705
7706 /* Remember the previous number of free descriptors. */
7707 ofree = txq->txq_free;
7708
7709 /*
7710 * Loop through the send queue, setting up transmit descriptors
7711 * until we drain the queue, or use up all available transmit
7712 * descriptors.
7713 */
7714 for (;;) {
7715 m0 = NULL;
7716
7717 /* Get a work queue entry. */
7718 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
7719 wm_txeof(txq, UINT_MAX);
7720 if (txq->txq_sfree == 0) {
7721 DPRINTF(WM_DEBUG_TX,
7722 ("%s: TX: no free job descriptors\n",
7723 device_xname(sc->sc_dev)));
7724 WM_Q_EVCNT_INCR(txq, txsstall);
7725 break;
7726 }
7727 }
7728
7729 /* Grab a packet off the queue. */
7730 if (is_transmit)
7731 m0 = pcq_get(txq->txq_interq);
7732 else
7733 IFQ_DEQUEUE(&ifp->if_snd, m0);
7734 if (m0 == NULL)
7735 break;
7736
7737 DPRINTF(WM_DEBUG_TX,
7738 ("%s: TX: have packet to transmit: %p\n",
7739 device_xname(sc->sc_dev), m0));
7740
7741 txs = &txq->txq_soft[txq->txq_snext];
7742 dmamap = txs->txs_dmamap;
7743
7744 use_tso = (m0->m_pkthdr.csum_flags &
7745 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
7746
7747 /*
7748 * So says the Linux driver:
7749 * The controller does a simple calculation to make sure
7750 * there is enough room in the FIFO before initiating the
7751 * DMA for each buffer. The calc is:
7752 * 4 = ceil(buffer len / MSS)
7753 * To make sure we don't overrun the FIFO, adjust the max
7754 * buffer len if the MSS drops.
7755 */
7756 dmamap->dm_maxsegsz =
7757 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
7758 ? m0->m_pkthdr.segsz << 2
7759 : WTX_MAX_LEN;
7760
7761 /*
7762 * Load the DMA map. If this fails, the packet either
7763 * didn't fit in the allotted number of segments, or we
7764 * were short on resources. For the too-many-segments
7765 * case, we simply report an error and drop the packet,
7766 * since we can't sanely copy a jumbo packet to a single
7767 * buffer.
7768 */
7769 retry:
7770 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
7771 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
7772 if (__predict_false(error)) {
7773 if (error == EFBIG) {
7774 if (remap == true) {
7775 struct mbuf *m;
7776
7777 remap = false;
7778 m = m_defrag(m0, M_NOWAIT);
7779 if (m != NULL) {
7780 WM_Q_EVCNT_INCR(txq, defrag);
7781 m0 = m;
7782 goto retry;
7783 }
7784 }
7785 WM_Q_EVCNT_INCR(txq, toomanyseg);
7786 log(LOG_ERR, "%s: Tx packet consumes too many "
7787 "DMA segments, dropping...\n",
7788 device_xname(sc->sc_dev));
7789 wm_dump_mbuf_chain(sc, m0);
7790 m_freem(m0);
7791 continue;
7792 }
7793 /* Short on resources, just stop for now. */
7794 DPRINTF(WM_DEBUG_TX,
7795 ("%s: TX: dmamap load failed: %d\n",
7796 device_xname(sc->sc_dev), error));
7797 break;
7798 }
7799
7800 segs_needed = dmamap->dm_nsegs;
7801 if (use_tso) {
7802 /* For sentinel descriptor; see below. */
7803 segs_needed++;
7804 }
7805
7806 /*
7807 * Ensure we have enough descriptors free to describe
7808 * the packet. Note, we always reserve one descriptor
7809 * at the end of the ring due to the semantics of the
7810 * TDT register, plus one more in the event we need
7811 * to load offload context.
7812 */
7813 if (segs_needed > txq->txq_free - 2) {
7814 /*
7815 * Not enough free descriptors to transmit this
7816 * packet. We haven't committed anything yet,
7817 * so just unload the DMA map, put the packet
7818 * pack on the queue, and punt. Notify the upper
7819 * layer that there are no more slots left.
7820 */
7821 DPRINTF(WM_DEBUG_TX,
7822 ("%s: TX: need %d (%d) descriptors, have %d\n",
7823 device_xname(sc->sc_dev), dmamap->dm_nsegs,
7824 segs_needed, txq->txq_free - 1));
7825 txq->txq_flags |= WM_TXQ_NO_SPACE;
7826 bus_dmamap_unload(sc->sc_dmat, dmamap);
7827 WM_Q_EVCNT_INCR(txq, txdstall);
7828 break;
7829 }
7830
7831 /*
7832 * Check for 82547 Tx FIFO bug. We need to do this
7833 * once we know we can transmit the packet, since we
7834 * do some internal FIFO space accounting here.
7835 */
7836 if (sc->sc_type == WM_T_82547 &&
7837 wm_82547_txfifo_bugchk(sc, m0)) {
7838 DPRINTF(WM_DEBUG_TX,
7839 ("%s: TX: 82547 Tx FIFO bug detected\n",
7840 device_xname(sc->sc_dev)));
7841 txq->txq_flags |= WM_TXQ_NO_SPACE;
7842 bus_dmamap_unload(sc->sc_dmat, dmamap);
7843 WM_Q_EVCNT_INCR(txq, fifo_stall);
7844 break;
7845 }
7846
7847 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
7848
7849 DPRINTF(WM_DEBUG_TX,
7850 ("%s: TX: packet has %d (%d) DMA segments\n",
7851 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
7852
7853 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
7854
7855 /*
7856 * Store a pointer to the packet so that we can free it
7857 * later.
7858 *
7859 * Initially, we consider the number of descriptors the
7860 * packet uses the number of DMA segments. This may be
7861 * incremented by 1 if we do checksum offload (a descriptor
7862 * is used to set the checksum context).
7863 */
7864 txs->txs_mbuf = m0;
7865 txs->txs_firstdesc = txq->txq_next;
7866 txs->txs_ndesc = segs_needed;
7867
7868 /* Set up offload parameters for this packet. */
7869 if (m0->m_pkthdr.csum_flags &
7870 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
7871 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7872 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
7873 wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
7874 } else {
7875 txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
7876 txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
7877 cksumcmd = 0;
7878 cksumfields = 0;
7879 }
7880
7881 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
7882
7883 /* Sync the DMA map. */
7884 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
7885 BUS_DMASYNC_PREWRITE);
7886
7887 /* Initialize the transmit descriptor. */
7888 for (nexttx = txq->txq_next, seg = 0;
7889 seg < dmamap->dm_nsegs; seg++) {
7890 for (seglen = dmamap->dm_segs[seg].ds_len,
7891 curaddr = dmamap->dm_segs[seg].ds_addr;
7892 seglen != 0;
7893 curaddr += curlen, seglen -= curlen,
7894 nexttx = WM_NEXTTX(txq, nexttx)) {
7895 curlen = seglen;
7896
7897 /*
7898 * So says the Linux driver:
7899 * Work around for premature descriptor
7900 * write-backs in TSO mode. Append a
7901 * 4-byte sentinel descriptor.
7902 */
7903 if (use_tso && seg == dmamap->dm_nsegs - 1 &&
7904 curlen > 8)
7905 curlen -= 4;
7906
7907 wm_set_dma_addr(
7908 &txq->txq_descs[nexttx].wtx_addr, curaddr);
7909 txq->txq_descs[nexttx].wtx_cmdlen
7910 = htole32(cksumcmd | curlen);
7911 txq->txq_descs[nexttx].wtx_fields.wtxu_status
7912 = 0;
7913 txq->txq_descs[nexttx].wtx_fields.wtxu_options
7914 = cksumfields;
7915 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
7916 lasttx = nexttx;
7917
7918 DPRINTF(WM_DEBUG_TX,
7919 ("%s: TX: desc %d: low %#" PRIx64 ", "
7920 "len %#04zx\n",
7921 device_xname(sc->sc_dev), nexttx,
7922 (uint64_t)curaddr, curlen));
7923 }
7924 }
7925
7926 KASSERT(lasttx != -1);
7927
7928 /*
7929 * Set up the command byte on the last descriptor of
7930 * the packet. If we're in the interrupt delay window,
7931 * delay the interrupt.
7932 */
7933 txq->txq_descs[lasttx].wtx_cmdlen |=
7934 htole32(WTX_CMD_EOP | WTX_CMD_RS);
7935
7936 /*
7937 * If VLANs are enabled and the packet has a VLAN tag, set
7938 * up the descriptor to encapsulate the packet for us.
7939 *
7940 * This is only valid on the last descriptor of the packet.
7941 */
7942 if (vlan_has_tag(m0)) {
7943 txq->txq_descs[lasttx].wtx_cmdlen |=
7944 htole32(WTX_CMD_VLE);
7945 txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
7946 = htole16(vlan_get_tag(m0));
7947 }
7948
7949 txs->txs_lastdesc = lasttx;
7950
7951 DPRINTF(WM_DEBUG_TX,
7952 ("%s: TX: desc %d: cmdlen 0x%08x\n",
7953 device_xname(sc->sc_dev),
7954 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
7955
7956 /* Sync the descriptors we're using. */
7957 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
7958 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7959
7960 /* Give the packet to the chip. */
7961 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
7962
7963 DPRINTF(WM_DEBUG_TX,
7964 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
7965
7966 DPRINTF(WM_DEBUG_TX,
7967 ("%s: TX: finished transmitting packet, job %d\n",
7968 device_xname(sc->sc_dev), txq->txq_snext));
7969
7970 /* Advance the tx pointer. */
7971 txq->txq_free -= txs->txs_ndesc;
7972 txq->txq_next = nexttx;
7973
7974 txq->txq_sfree--;
7975 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
7976
7977 /* Pass the packet to any BPF listeners. */
7978 bpf_mtap(ifp, m0, BPF_D_OUT);
7979 }
7980
7981 if (m0 != NULL) {
7982 txq->txq_flags |= WM_TXQ_NO_SPACE;
7983 WM_Q_EVCNT_INCR(txq, descdrop);
7984 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
7985 __func__));
7986 m_freem(m0);
7987 }
7988
7989 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
7990 /* No more slots; notify upper layer. */
7991 txq->txq_flags |= WM_TXQ_NO_SPACE;
7992 }
7993
7994 if (txq->txq_free != ofree) {
7995 /* Set a watchdog timer in case the chip flakes out. */
7996 txq->txq_lastsent = time_uptime;
7997 txq->txq_sending = true;
7998 }
7999 }
8000
8001 /*
8002 * wm_nq_tx_offload:
8003 *
8004 * Set up TCP/IP checksumming parameters for the
8005 * specified packet, for NEWQUEUE devices
8006 */
8007 static void
8008 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
8009 struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
8010 {
8011 struct mbuf *m0 = txs->txs_mbuf;
8012 uint32_t vl_len, mssidx, cmdc;
8013 struct ether_header *eh;
8014 int offset, iphl;
8015
8016 /*
8017 * XXX It would be nice if the mbuf pkthdr had offset
8018 * fields for the protocol headers.
8019 */
8020 *cmdlenp = 0;
8021 *fieldsp = 0;
8022
8023 eh = mtod(m0, struct ether_header *);
8024 switch (htons(eh->ether_type)) {
8025 case ETHERTYPE_IP:
8026 case ETHERTYPE_IPV6:
8027 offset = ETHER_HDR_LEN;
8028 break;
8029
8030 case ETHERTYPE_VLAN:
8031 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
8032 break;
8033
8034 default:
8035 /* Don't support this protocol or encapsulation. */
8036 *do_csum = false;
8037 return;
8038 }
8039 *do_csum = true;
8040 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
8041 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
8042
8043 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
8044 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
8045
8046 if ((m0->m_pkthdr.csum_flags &
8047 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
8048 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
8049 } else {
8050 iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
8051 }
8052 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
8053 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
8054
8055 if (vlan_has_tag(m0)) {
8056 vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
8057 << NQTXC_VLLEN_VLAN_SHIFT);
8058 *cmdlenp |= NQTX_CMD_VLE;
8059 }
8060
8061 mssidx = 0;
8062
8063 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
8064 int hlen = offset + iphl;
8065 int tcp_hlen;
8066 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
8067
8068 if (__predict_false(m0->m_len <
8069 (hlen + sizeof(struct tcphdr)))) {
8070 /*
8071 * TCP/IP headers are not in the first mbuf; we need
8072 * to do this the slow and painful way. Let's just
8073 * hope this doesn't happen very often.
8074 */
8075 struct tcphdr th;
8076
8077 WM_Q_EVCNT_INCR(txq, tsopain);
8078
8079 m_copydata(m0, hlen, sizeof(th), &th);
8080 if (v4) {
8081 struct ip ip;
8082
8083 m_copydata(m0, offset, sizeof(ip), &ip);
8084 ip.ip_len = 0;
8085 m_copyback(m0,
8086 offset + offsetof(struct ip, ip_len),
8087 sizeof(ip.ip_len), &ip.ip_len);
8088 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
8089 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
8090 } else {
8091 struct ip6_hdr ip6;
8092
8093 m_copydata(m0, offset, sizeof(ip6), &ip6);
8094 ip6.ip6_plen = 0;
8095 m_copyback(m0,
8096 offset + offsetof(struct ip6_hdr, ip6_plen),
8097 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
8098 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
8099 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
8100 }
8101 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
8102 sizeof(th.th_sum), &th.th_sum);
8103
8104 tcp_hlen = th.th_off << 2;
8105 } else {
8106 /*
8107 * TCP/IP headers are in the first mbuf; we can do
8108 * this the easy way.
8109 */
8110 struct tcphdr *th;
8111
8112 if (v4) {
8113 struct ip *ip =
8114 (void *)(mtod(m0, char *) + offset);
8115 th = (void *)(mtod(m0, char *) + hlen);
8116
8117 ip->ip_len = 0;
8118 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
8119 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
8120 } else {
8121 struct ip6_hdr *ip6 =
8122 (void *)(mtod(m0, char *) + offset);
8123 th = (void *)(mtod(m0, char *) + hlen);
8124
8125 ip6->ip6_plen = 0;
8126 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
8127 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
8128 }
8129 tcp_hlen = th->th_off << 2;
8130 }
8131 hlen += tcp_hlen;
8132 *cmdlenp |= NQTX_CMD_TSE;
8133
8134 if (v4) {
8135 WM_Q_EVCNT_INCR(txq, tso);
8136 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
8137 } else {
8138 WM_Q_EVCNT_INCR(txq, tso6);
8139 *fieldsp |= NQTXD_FIELDS_TUXSM;
8140 }
8141 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
8142 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
8143 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
8144 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
8145 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
8146 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
8147 } else {
8148 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
8149 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
8150 }
8151
8152 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
8153 *fieldsp |= NQTXD_FIELDS_IXSM;
8154 cmdc |= NQTXC_CMD_IP4;
8155 }
8156
8157 if (m0->m_pkthdr.csum_flags &
8158 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
8159 WM_Q_EVCNT_INCR(txq, tusum);
8160 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
8161 cmdc |= NQTXC_CMD_TCP;
8162 else
8163 cmdc |= NQTXC_CMD_UDP;
8164
8165 cmdc |= NQTXC_CMD_IP4;
8166 *fieldsp |= NQTXD_FIELDS_TUXSM;
8167 }
8168 if (m0->m_pkthdr.csum_flags &
8169 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
8170 WM_Q_EVCNT_INCR(txq, tusum6);
8171 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
8172 cmdc |= NQTXC_CMD_TCP;
8173 else
8174 cmdc |= NQTXC_CMD_UDP;
8175
8176 cmdc |= NQTXC_CMD_IP6;
8177 *fieldsp |= NQTXD_FIELDS_TUXSM;
8178 }
8179
8180 /*
8181 * We don't have to write context descriptor for every packet to
8182 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
8183 * I210 and I211. It is enough to write once per a Tx queue for these
8184 * controllers.
8185 * It would be overhead to write context descriptor for every packet,
8186 * however it does not cause problems.
8187 */
8188 /* Fill in the context descriptor. */
8189 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
8190 htole32(vl_len);
8191 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
8192 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
8193 htole32(cmdc);
8194 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
8195 htole32(mssidx);
8196 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
8197 DPRINTF(WM_DEBUG_TX,
8198 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
8199 txq->txq_next, 0, vl_len));
8200 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
8201 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
8202 txs->txs_ndesc++;
8203 }
8204
8205 /*
8206 * wm_nq_start: [ifnet interface function]
8207 *
8208 * Start packet transmission on the interface for NEWQUEUE devices
8209 */
8210 static void
8211 wm_nq_start(struct ifnet *ifp)
8212 {
8213 struct wm_softc *sc = ifp->if_softc;
8214 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8215
8216 #ifdef WM_MPSAFE
8217 KASSERT(if_is_mpsafe(ifp));
8218 #endif
8219 /*
8220 * if_obytes and if_omcasts are added in if_transmit()@if.c.
8221 */
8222
8223 mutex_enter(txq->txq_lock);
8224 if (!txq->txq_stopping)
8225 wm_nq_start_locked(ifp);
8226 mutex_exit(txq->txq_lock);
8227 }
8228
8229 static void
8230 wm_nq_start_locked(struct ifnet *ifp)
8231 {
8232 struct wm_softc *sc = ifp->if_softc;
8233 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8234
8235 wm_nq_send_common_locked(ifp, txq, false);
8236 }
8237
8238 static int
8239 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
8240 {
8241 int qid;
8242 struct wm_softc *sc = ifp->if_softc;
8243 struct wm_txqueue *txq;
8244
8245 qid = wm_select_txqueue(ifp, m);
8246 txq = &sc->sc_queue[qid].wmq_txq;
8247
8248 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
8249 m_freem(m);
8250 WM_Q_EVCNT_INCR(txq, pcqdrop);
8251 return ENOBUFS;
8252 }
8253
8254 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
8255 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
8256 if (m->m_flags & M_MCAST)
8257 if_statinc_ref(nsr, if_omcasts);
8258 IF_STAT_PUTREF(ifp);
8259
8260 /*
8261 * The situations which this mutex_tryenter() fails at running time
8262 * are below two patterns.
8263 * (1) contention with interrupt handler(wm_txrxintr_msix())
8264 * (2) contention with deferred if_start softint(wm_handle_queue())
8265 * In the case of (1), the last packet enqueued to txq->txq_interq is
8266 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
8267 * In the case of (2), the last packet enqueued to txq->txq_interq is
8268 * also dequeued by wm_deferred_start_locked(). So, it does not get
8269 * stuck, either.
8270 */
8271 if (mutex_tryenter(txq->txq_lock)) {
8272 if (!txq->txq_stopping)
8273 wm_nq_transmit_locked(ifp, txq);
8274 mutex_exit(txq->txq_lock);
8275 }
8276
8277 return 0;
8278 }
8279
8280 static void
8281 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
8282 {
8283
8284 wm_nq_send_common_locked(ifp, txq, true);
8285 }
8286
8287 static void
8288 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
8289 bool is_transmit)
8290 {
8291 struct wm_softc *sc = ifp->if_softc;
8292 struct mbuf *m0;
8293 struct wm_txsoft *txs;
8294 bus_dmamap_t dmamap;
8295 int error, nexttx, lasttx = -1, seg, segs_needed;
8296 bool do_csum, sent;
8297 bool remap = true;
8298
8299 KASSERT(mutex_owned(txq->txq_lock));
8300
8301 if ((ifp->if_flags & IFF_RUNNING) == 0)
8302 return;
8303 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
8304 return;
8305
8306 sent = false;
8307
8308 /*
8309 * Loop through the send queue, setting up transmit descriptors
8310 * until we drain the queue, or use up all available transmit
8311 * descriptors.
8312 */
8313 for (;;) {
8314 m0 = NULL;
8315
8316 /* Get a work queue entry. */
8317 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
8318 wm_txeof(txq, UINT_MAX);
8319 if (txq->txq_sfree == 0) {
8320 DPRINTF(WM_DEBUG_TX,
8321 ("%s: TX: no free job descriptors\n",
8322 device_xname(sc->sc_dev)));
8323 WM_Q_EVCNT_INCR(txq, txsstall);
8324 break;
8325 }
8326 }
8327
8328 /* Grab a packet off the queue. */
8329 if (is_transmit)
8330 m0 = pcq_get(txq->txq_interq);
8331 else
8332 IFQ_DEQUEUE(&ifp->if_snd, m0);
8333 if (m0 == NULL)
8334 break;
8335
8336 DPRINTF(WM_DEBUG_TX,
8337 ("%s: TX: have packet to transmit: %p\n",
8338 device_xname(sc->sc_dev), m0));
8339
8340 txs = &txq->txq_soft[txq->txq_snext];
8341 dmamap = txs->txs_dmamap;
8342
8343 /*
8344 * Load the DMA map. If this fails, the packet either
8345 * didn't fit in the allotted number of segments, or we
8346 * were short on resources. For the too-many-segments
8347 * case, we simply report an error and drop the packet,
8348 * since we can't sanely copy a jumbo packet to a single
8349 * buffer.
8350 */
8351 retry:
8352 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
8353 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
8354 if (__predict_false(error)) {
8355 if (error == EFBIG) {
8356 if (remap == true) {
8357 struct mbuf *m;
8358
8359 remap = false;
8360 m = m_defrag(m0, M_NOWAIT);
8361 if (m != NULL) {
8362 WM_Q_EVCNT_INCR(txq, defrag);
8363 m0 = m;
8364 goto retry;
8365 }
8366 }
8367 WM_Q_EVCNT_INCR(txq, toomanyseg);
8368 log(LOG_ERR, "%s: Tx packet consumes too many "
8369 "DMA segments, dropping...\n",
8370 device_xname(sc->sc_dev));
8371 wm_dump_mbuf_chain(sc, m0);
8372 m_freem(m0);
8373 continue;
8374 }
8375 /* Short on resources, just stop for now. */
8376 DPRINTF(WM_DEBUG_TX,
8377 ("%s: TX: dmamap load failed: %d\n",
8378 device_xname(sc->sc_dev), error));
8379 break;
8380 }
8381
8382 segs_needed = dmamap->dm_nsegs;
8383
8384 /*
8385 * Ensure we have enough descriptors free to describe
8386 * the packet. Note, we always reserve one descriptor
8387 * at the end of the ring due to the semantics of the
8388 * TDT register, plus one more in the event we need
8389 * to load offload context.
8390 */
8391 if (segs_needed > txq->txq_free - 2) {
8392 /*
8393 * Not enough free descriptors to transmit this
8394 * packet. We haven't committed anything yet,
8395 * so just unload the DMA map, put the packet
8396 * pack on the queue, and punt. Notify the upper
8397 * layer that there are no more slots left.
8398 */
8399 DPRINTF(WM_DEBUG_TX,
8400 ("%s: TX: need %d (%d) descriptors, have %d\n",
8401 device_xname(sc->sc_dev), dmamap->dm_nsegs,
8402 segs_needed, txq->txq_free - 1));
8403 txq->txq_flags |= WM_TXQ_NO_SPACE;
8404 bus_dmamap_unload(sc->sc_dmat, dmamap);
8405 WM_Q_EVCNT_INCR(txq, txdstall);
8406 break;
8407 }
8408
8409 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
8410
8411 DPRINTF(WM_DEBUG_TX,
8412 ("%s: TX: packet has %d (%d) DMA segments\n",
8413 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
8414
8415 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
8416
8417 /*
8418 * Store a pointer to the packet so that we can free it
8419 * later.
8420 *
8421 * Initially, we consider the number of descriptors the
8422 * packet uses the number of DMA segments. This may be
8423 * incremented by 1 if we do checksum offload (a descriptor
8424 * is used to set the checksum context).
8425 */
8426 txs->txs_mbuf = m0;
8427 txs->txs_firstdesc = txq->txq_next;
8428 txs->txs_ndesc = segs_needed;
8429
8430 /* Set up offload parameters for this packet. */
8431 uint32_t cmdlen, fields, dcmdlen;
8432 if (m0->m_pkthdr.csum_flags &
8433 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
8434 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
8435 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
8436 wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
8437 &do_csum);
8438 } else {
8439 do_csum = false;
8440 cmdlen = 0;
8441 fields = 0;
8442 }
8443
8444 /* Sync the DMA map. */
8445 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
8446 BUS_DMASYNC_PREWRITE);
8447
8448 /* Initialize the first transmit descriptor. */
8449 nexttx = txq->txq_next;
8450 if (!do_csum) {
8451 /* Setup a legacy descriptor */
8452 wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
8453 dmamap->dm_segs[0].ds_addr);
8454 txq->txq_descs[nexttx].wtx_cmdlen =
8455 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
8456 txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
8457 txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
8458 if (vlan_has_tag(m0)) {
8459 txq->txq_descs[nexttx].wtx_cmdlen |=
8460 htole32(WTX_CMD_VLE);
8461 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
8462 htole16(vlan_get_tag(m0));
8463 } else
8464 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
8465
8466 dcmdlen = 0;
8467 } else {
8468 /* Setup an advanced data descriptor */
8469 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
8470 htole64(dmamap->dm_segs[0].ds_addr);
8471 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
8472 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
8473 htole32(dmamap->dm_segs[0].ds_len | cmdlen);
8474 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
8475 htole32(fields);
8476 DPRINTF(WM_DEBUG_TX,
8477 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
8478 device_xname(sc->sc_dev), nexttx,
8479 (uint64_t)dmamap->dm_segs[0].ds_addr));
8480 DPRINTF(WM_DEBUG_TX,
8481 ("\t 0x%08x%08x\n", fields,
8482 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
8483 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
8484 }
8485
8486 lasttx = nexttx;
8487 nexttx = WM_NEXTTX(txq, nexttx);
8488 /*
8489 * Fill in the next descriptors. legacy or advanced format
8490 * is the same here
8491 */
8492 for (seg = 1; seg < dmamap->dm_nsegs;
8493 seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
8494 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
8495 htole64(dmamap->dm_segs[seg].ds_addr);
8496 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
8497 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
8498 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
8499 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
8500 lasttx = nexttx;
8501
8502 DPRINTF(WM_DEBUG_TX,
8503 ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
8504 device_xname(sc->sc_dev), nexttx,
8505 (uint64_t)dmamap->dm_segs[seg].ds_addr,
8506 dmamap->dm_segs[seg].ds_len));
8507 }
8508
8509 KASSERT(lasttx != -1);
8510
8511 /*
8512 * Set up the command byte on the last descriptor of
8513 * the packet. If we're in the interrupt delay window,
8514 * delay the interrupt.
8515 */
8516 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
8517 (NQTX_CMD_EOP | NQTX_CMD_RS));
8518 txq->txq_descs[lasttx].wtx_cmdlen |=
8519 htole32(WTX_CMD_EOP | WTX_CMD_RS);
8520
8521 txs->txs_lastdesc = lasttx;
8522
8523 DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
8524 device_xname(sc->sc_dev),
8525 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
8526
8527 /* Sync the descriptors we're using. */
8528 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
8529 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
8530
8531 /* Give the packet to the chip. */
8532 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
8533 sent = true;
8534
8535 DPRINTF(WM_DEBUG_TX,
8536 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
8537
8538 DPRINTF(WM_DEBUG_TX,
8539 ("%s: TX: finished transmitting packet, job %d\n",
8540 device_xname(sc->sc_dev), txq->txq_snext));
8541
8542 /* Advance the tx pointer. */
8543 txq->txq_free -= txs->txs_ndesc;
8544 txq->txq_next = nexttx;
8545
8546 txq->txq_sfree--;
8547 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
8548
8549 /* Pass the packet to any BPF listeners. */
8550 bpf_mtap(ifp, m0, BPF_D_OUT);
8551 }
8552
8553 if (m0 != NULL) {
8554 txq->txq_flags |= WM_TXQ_NO_SPACE;
8555 WM_Q_EVCNT_INCR(txq, descdrop);
8556 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
8557 __func__));
8558 m_freem(m0);
8559 }
8560
8561 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
8562 /* No more slots; notify upper layer. */
8563 txq->txq_flags |= WM_TXQ_NO_SPACE;
8564 }
8565
8566 if (sent) {
8567 /* Set a watchdog timer in case the chip flakes out. */
8568 txq->txq_lastsent = time_uptime;
8569 txq->txq_sending = true;
8570 }
8571 }
8572
8573 static void
8574 wm_deferred_start_locked(struct wm_txqueue *txq)
8575 {
8576 struct wm_softc *sc = txq->txq_sc;
8577 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8578 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
8579 int qid = wmq->wmq_id;
8580
8581 KASSERT(mutex_owned(txq->txq_lock));
8582
8583 if (txq->txq_stopping) {
8584 mutex_exit(txq->txq_lock);
8585 return;
8586 }
8587
8588 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
8589 /* XXX need for ALTQ or one CPU system */
8590 if (qid == 0)
8591 wm_nq_start_locked(ifp);
8592 wm_nq_transmit_locked(ifp, txq);
8593 } else {
8594 /* XXX need for ALTQ or one CPU system */
8595 if (qid == 0)
8596 wm_start_locked(ifp);
8597 wm_transmit_locked(ifp, txq);
8598 }
8599 }
8600
8601 /* Interrupt */
8602
8603 /*
8604 * wm_txeof:
8605 *
8606 * Helper; handle transmit interrupts.
8607 */
8608 static bool
8609 wm_txeof(struct wm_txqueue *txq, u_int limit)
8610 {
8611 struct wm_softc *sc = txq->txq_sc;
8612 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8613 struct wm_txsoft *txs;
8614 int count = 0;
8615 int i;
8616 uint8_t status;
8617 bool more = false;
8618
8619 KASSERT(mutex_owned(txq->txq_lock));
8620
8621 if (txq->txq_stopping)
8622 return false;
8623
8624 txq->txq_flags &= ~WM_TXQ_NO_SPACE;
8625
8626 /*
8627 * Go through the Tx list and free mbufs for those
8628 * frames which have been transmitted.
8629 */
8630 for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
8631 i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
8632 if (limit-- == 0) {
8633 more = true;
8634 DPRINTF(WM_DEBUG_TX,
8635 ("%s: TX: loop limited, job %d is not processed\n",
8636 device_xname(sc->sc_dev), i));
8637 break;
8638 }
8639
8640 txs = &txq->txq_soft[i];
8641
8642 DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
8643 device_xname(sc->sc_dev), i));
8644
8645 wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
8646 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
8647
8648 status =
8649 txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
8650 if ((status & WTX_ST_DD) == 0) {
8651 wm_cdtxsync(txq, txs->txs_lastdesc, 1,
8652 BUS_DMASYNC_PREREAD);
8653 break;
8654 }
8655
8656 count++;
8657 DPRINTF(WM_DEBUG_TX,
8658 ("%s: TX: job %d done: descs %d..%d\n",
8659 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
8660 txs->txs_lastdesc));
8661
8662 /*
8663 * XXX We should probably be using the statistics
8664 * XXX registers, but I don't know if they exist
8665 * XXX on chips before the i82544.
8666 */
8667
8668 #ifdef WM_EVENT_COUNTERS
8669 if (status & WTX_ST_TU)
8670 WM_Q_EVCNT_INCR(txq, underrun);
8671 #endif /* WM_EVENT_COUNTERS */
8672
8673 /*
8674 * 82574 and newer's document says the status field has neither
8675 * EC (Excessive Collision) bit nor LC (Late Collision) bit
8676 * (reserved). Refer "PCIe GbE Controller Open Source Software
8677 * Developer's Manual", 82574 datasheet and newer.
8678 *
8679 * XXX I saw the LC bit was set on I218 even though the media
8680 * was full duplex, so the bit might be used for other
8681 * meaning ...(I have no document).
8682 */
8683
8684 if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
8685 && ((sc->sc_type < WM_T_82574)
8686 || (sc->sc_type == WM_T_80003))) {
8687 if_statinc(ifp, if_oerrors);
8688 if (status & WTX_ST_LC)
8689 log(LOG_WARNING, "%s: late collision\n",
8690 device_xname(sc->sc_dev));
8691 else if (status & WTX_ST_EC) {
8692 if_statadd(ifp, if_collisions,
8693 TX_COLLISION_THRESHOLD + 1);
8694 log(LOG_WARNING, "%s: excessive collisions\n",
8695 device_xname(sc->sc_dev));
8696 }
8697 } else
8698 if_statinc(ifp, if_opackets);
8699
8700 txq->txq_packets++;
8701 txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
8702
8703 txq->txq_free += txs->txs_ndesc;
8704 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
8705 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
8706 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
8707 m_freem(txs->txs_mbuf);
8708 txs->txs_mbuf = NULL;
8709 }
8710
8711 /* Update the dirty transmit buffer pointer. */
8712 txq->txq_sdirty = i;
8713 DPRINTF(WM_DEBUG_TX,
8714 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
8715
8716 if (count != 0)
8717 rnd_add_uint32(&sc->rnd_source, count);
8718
8719 /*
8720 * If there are no more pending transmissions, cancel the watchdog
8721 * timer.
8722 */
8723 if (txq->txq_sfree == WM_TXQUEUELEN(txq))
8724 txq->txq_sending = false;
8725
8726 return more;
8727 }
8728
8729 static inline uint32_t
8730 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
8731 {
8732 struct wm_softc *sc = rxq->rxq_sc;
8733
8734 if (sc->sc_type == WM_T_82574)
8735 return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
8736 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8737 return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
8738 else
8739 return rxq->rxq_descs[idx].wrx_status;
8740 }
8741
8742 static inline uint32_t
8743 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
8744 {
8745 struct wm_softc *sc = rxq->rxq_sc;
8746
8747 if (sc->sc_type == WM_T_82574)
8748 return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
8749 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8750 return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
8751 else
8752 return rxq->rxq_descs[idx].wrx_errors;
8753 }
8754
8755 static inline uint16_t
8756 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
8757 {
8758 struct wm_softc *sc = rxq->rxq_sc;
8759
8760 if (sc->sc_type == WM_T_82574)
8761 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
8762 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8763 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
8764 else
8765 return rxq->rxq_descs[idx].wrx_special;
8766 }
8767
8768 static inline int
8769 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
8770 {
8771 struct wm_softc *sc = rxq->rxq_sc;
8772
8773 if (sc->sc_type == WM_T_82574)
8774 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
8775 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8776 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
8777 else
8778 return rxq->rxq_descs[idx].wrx_len;
8779 }
8780
8781 #ifdef WM_DEBUG
8782 static inline uint32_t
8783 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
8784 {
8785 struct wm_softc *sc = rxq->rxq_sc;
8786
8787 if (sc->sc_type == WM_T_82574)
8788 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
8789 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8790 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
8791 else
8792 return 0;
8793 }
8794
8795 static inline uint8_t
8796 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
8797 {
8798 struct wm_softc *sc = rxq->rxq_sc;
8799
8800 if (sc->sc_type == WM_T_82574)
8801 return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
8802 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8803 return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
8804 else
8805 return 0;
8806 }
8807 #endif /* WM_DEBUG */
8808
8809 static inline bool
8810 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
8811 uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
8812 {
8813
8814 if (sc->sc_type == WM_T_82574)
8815 return (status & ext_bit) != 0;
8816 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8817 return (status & nq_bit) != 0;
8818 else
8819 return (status & legacy_bit) != 0;
8820 }
8821
8822 static inline bool
8823 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
8824 uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
8825 {
8826
8827 if (sc->sc_type == WM_T_82574)
8828 return (error & ext_bit) != 0;
8829 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8830 return (error & nq_bit) != 0;
8831 else
8832 return (error & legacy_bit) != 0;
8833 }
8834
8835 static inline bool
8836 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
8837 {
8838
8839 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
8840 WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
8841 return true;
8842 else
8843 return false;
8844 }
8845
8846 static inline bool
8847 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
8848 {
8849 struct wm_softc *sc = rxq->rxq_sc;
8850
8851 /* XXX missing error bit for newqueue? */
8852 if (wm_rxdesc_is_set_error(sc, errors,
8853 WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
8854 EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
8855 | EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
8856 NQRXC_ERROR_RXE)) {
8857 if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
8858 EXTRXC_ERROR_SE, 0))
8859 log(LOG_WARNING, "%s: symbol error\n",
8860 device_xname(sc->sc_dev));
8861 else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
8862 EXTRXC_ERROR_SEQ, 0))
8863 log(LOG_WARNING, "%s: receive sequence error\n",
8864 device_xname(sc->sc_dev));
8865 else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
8866 EXTRXC_ERROR_CE, 0))
8867 log(LOG_WARNING, "%s: CRC error\n",
8868 device_xname(sc->sc_dev));
8869 return true;
8870 }
8871
8872 return false;
8873 }
8874
8875 static inline bool
8876 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
8877 {
8878 struct wm_softc *sc = rxq->rxq_sc;
8879
8880 if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
8881 NQRXC_STATUS_DD)) {
8882 /* We have processed all of the receive descriptors. */
8883 wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
8884 return false;
8885 }
8886
8887 return true;
8888 }
8889
8890 static inline bool
8891 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
8892 uint16_t vlantag, struct mbuf *m)
8893 {
8894
8895 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
8896 WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
8897 vlan_set_tag(m, le16toh(vlantag));
8898 }
8899
8900 return true;
8901 }
8902
8903 static inline void
8904 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
8905 uint32_t errors, struct mbuf *m)
8906 {
8907 struct wm_softc *sc = rxq->rxq_sc;
8908
8909 if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
8910 if (wm_rxdesc_is_set_status(sc, status,
8911 WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
8912 WM_Q_EVCNT_INCR(rxq, ipsum);
8913 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
8914 if (wm_rxdesc_is_set_error(sc, errors,
8915 WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
8916 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
8917 }
8918 if (wm_rxdesc_is_set_status(sc, status,
8919 WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
8920 /*
8921 * Note: we don't know if this was TCP or UDP,
8922 * so we just set both bits, and expect the
8923 * upper layers to deal.
8924 */
8925 WM_Q_EVCNT_INCR(rxq, tusum);
8926 m->m_pkthdr.csum_flags |=
8927 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
8928 M_CSUM_TCPv6 | M_CSUM_UDPv6;
8929 if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
8930 EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
8931 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
8932 }
8933 }
8934 }
8935
8936 /*
8937 * wm_rxeof:
8938 *
8939 * Helper; handle receive interrupts.
8940 */
8941 static bool
8942 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
8943 {
8944 struct wm_softc *sc = rxq->rxq_sc;
8945 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8946 struct wm_rxsoft *rxs;
8947 struct mbuf *m;
8948 int i, len;
8949 int count = 0;
8950 uint32_t status, errors;
8951 uint16_t vlantag;
8952 bool more = false;
8953
8954 KASSERT(mutex_owned(rxq->rxq_lock));
8955
8956 for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
8957 if (limit-- == 0) {
8958 rxq->rxq_ptr = i;
8959 more = true;
8960 DPRINTF(WM_DEBUG_RX,
8961 ("%s: RX: loop limited, descriptor %d is not processed\n",
8962 device_xname(sc->sc_dev), i));
8963 break;
8964 }
8965
8966 rxs = &rxq->rxq_soft[i];
8967
8968 DPRINTF(WM_DEBUG_RX,
8969 ("%s: RX: checking descriptor %d\n",
8970 device_xname(sc->sc_dev), i));
8971 wm_cdrxsync(rxq, i,
8972 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
8973
8974 status = wm_rxdesc_get_status(rxq, i);
8975 errors = wm_rxdesc_get_errors(rxq, i);
8976 len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
8977 vlantag = wm_rxdesc_get_vlantag(rxq, i);
8978 #ifdef WM_DEBUG
8979 uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
8980 uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
8981 #endif
8982
8983 if (!wm_rxdesc_dd(rxq, i, status)) {
8984 /*
8985 * Update the receive pointer holding rxq_lock
8986 * consistent with increment counter.
8987 */
8988 rxq->rxq_ptr = i;
8989 break;
8990 }
8991
8992 count++;
8993 if (__predict_false(rxq->rxq_discard)) {
8994 DPRINTF(WM_DEBUG_RX,
8995 ("%s: RX: discarding contents of descriptor %d\n",
8996 device_xname(sc->sc_dev), i));
8997 wm_init_rxdesc(rxq, i);
8998 if (wm_rxdesc_is_eop(rxq, status)) {
8999 /* Reset our state. */
9000 DPRINTF(WM_DEBUG_RX,
9001 ("%s: RX: resetting rxdiscard -> 0\n",
9002 device_xname(sc->sc_dev)));
9003 rxq->rxq_discard = 0;
9004 }
9005 continue;
9006 }
9007
9008 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
9009 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
9010
9011 m = rxs->rxs_mbuf;
9012
9013 /*
9014 * Add a new receive buffer to the ring, unless of
9015 * course the length is zero. Treat the latter as a
9016 * failed mapping.
9017 */
9018 if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
9019 /*
9020 * Failed, throw away what we've done so
9021 * far, and discard the rest of the packet.
9022 */
9023 if_statinc(ifp, if_ierrors);
9024 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
9025 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
9026 wm_init_rxdesc(rxq, i);
9027 if (!wm_rxdesc_is_eop(rxq, status))
9028 rxq->rxq_discard = 1;
9029 if (rxq->rxq_head != NULL)
9030 m_freem(rxq->rxq_head);
9031 WM_RXCHAIN_RESET(rxq);
9032 DPRINTF(WM_DEBUG_RX,
9033 ("%s: RX: Rx buffer allocation failed, "
9034 "dropping packet%s\n", device_xname(sc->sc_dev),
9035 rxq->rxq_discard ? " (discard)" : ""));
9036 continue;
9037 }
9038
9039 m->m_len = len;
9040 rxq->rxq_len += len;
9041 DPRINTF(WM_DEBUG_RX,
9042 ("%s: RX: buffer at %p len %d\n",
9043 device_xname(sc->sc_dev), m->m_data, len));
9044
9045 /* If this is not the end of the packet, keep looking. */
9046 if (!wm_rxdesc_is_eop(rxq, status)) {
9047 WM_RXCHAIN_LINK(rxq, m);
9048 DPRINTF(WM_DEBUG_RX,
9049 ("%s: RX: not yet EOP, rxlen -> %d\n",
9050 device_xname(sc->sc_dev), rxq->rxq_len));
9051 continue;
9052 }
9053
9054 /*
9055 * Okay, we have the entire packet now. The chip is
9056 * configured to include the FCS except I350 and I21[01]
9057 * (not all chips can be configured to strip it),
9058 * so we need to trim it.
9059 * May need to adjust length of previous mbuf in the
9060 * chain if the current mbuf is too short.
9061 * For an eratta, the RCTL_SECRC bit in RCTL register
9062 * is always set in I350, so we don't trim it.
9063 */
9064 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
9065 && (sc->sc_type != WM_T_I210)
9066 && (sc->sc_type != WM_T_I211)) {
9067 if (m->m_len < ETHER_CRC_LEN) {
9068 rxq->rxq_tail->m_len
9069 -= (ETHER_CRC_LEN - m->m_len);
9070 m->m_len = 0;
9071 } else
9072 m->m_len -= ETHER_CRC_LEN;
9073 len = rxq->rxq_len - ETHER_CRC_LEN;
9074 } else
9075 len = rxq->rxq_len;
9076
9077 WM_RXCHAIN_LINK(rxq, m);
9078
9079 *rxq->rxq_tailp = NULL;
9080 m = rxq->rxq_head;
9081
9082 WM_RXCHAIN_RESET(rxq);
9083
9084 DPRINTF(WM_DEBUG_RX,
9085 ("%s: RX: have entire packet, len -> %d\n",
9086 device_xname(sc->sc_dev), len));
9087
9088 /* If an error occurred, update stats and drop the packet. */
9089 if (wm_rxdesc_has_errors(rxq, errors)) {
9090 m_freem(m);
9091 continue;
9092 }
9093
9094 /* No errors. Receive the packet. */
9095 m_set_rcvif(m, ifp);
9096 m->m_pkthdr.len = len;
9097 /*
9098 * TODO
9099 * should be save rsshash and rsstype to this mbuf.
9100 */
9101 DPRINTF(WM_DEBUG_RX,
9102 ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
9103 device_xname(sc->sc_dev), rsstype, rsshash));
9104
9105 /*
9106 * If VLANs are enabled, VLAN packets have been unwrapped
9107 * for us. Associate the tag with the packet.
9108 */
9109 if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
9110 continue;
9111
9112 /* Set up checksum info for this packet. */
9113 wm_rxdesc_ensure_checksum(rxq, status, errors, m);
9114 /*
9115 * Update the receive pointer holding rxq_lock consistent with
9116 * increment counter.
9117 */
9118 rxq->rxq_ptr = i;
9119 rxq->rxq_packets++;
9120 rxq->rxq_bytes += len;
9121 mutex_exit(rxq->rxq_lock);
9122
9123 /* Pass it on. */
9124 if_percpuq_enqueue(sc->sc_ipq, m);
9125
9126 mutex_enter(rxq->rxq_lock);
9127
9128 if (rxq->rxq_stopping)
9129 break;
9130 }
9131
9132 if (count != 0)
9133 rnd_add_uint32(&sc->rnd_source, count);
9134
9135 DPRINTF(WM_DEBUG_RX,
9136 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
9137
9138 return more;
9139 }
9140
9141 /*
9142 * wm_linkintr_gmii:
9143 *
9144 * Helper; handle link interrupts for GMII.
9145 */
9146 static void
9147 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
9148 {
9149 device_t dev = sc->sc_dev;
9150 uint32_t status, reg;
9151 bool link;
9152 int rv;
9153
9154 KASSERT(WM_CORE_LOCKED(sc));
9155
9156 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
9157 __func__));
9158
9159 if ((icr & ICR_LSC) == 0) {
9160 if (icr & ICR_RXSEQ)
9161 DPRINTF(WM_DEBUG_LINK,
9162 ("%s: LINK Receive sequence error\n",
9163 device_xname(dev)));
9164 return;
9165 }
9166
9167 /* Link status changed */
9168 status = CSR_READ(sc, WMREG_STATUS);
9169 link = status & STATUS_LU;
9170 if (link) {
9171 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
9172 device_xname(dev),
9173 (status & STATUS_FD) ? "FDX" : "HDX"));
9174 } else {
9175 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9176 device_xname(dev)));
9177 }
9178 if ((sc->sc_type == WM_T_ICH8) && (link == false))
9179 wm_gig_downshift_workaround_ich8lan(sc);
9180
9181 if ((sc->sc_type == WM_T_ICH8)
9182 && (sc->sc_phytype == WMPHY_IGP_3)) {
9183 wm_kmrn_lock_loss_workaround_ich8lan(sc);
9184 }
9185 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
9186 device_xname(dev)));
9187 mii_pollstat(&sc->sc_mii);
9188 if (sc->sc_type == WM_T_82543) {
9189 int miistatus, active;
9190
9191 /*
9192 * With 82543, we need to force speed and
9193 * duplex on the MAC equal to what the PHY
9194 * speed and duplex configuration is.
9195 */
9196 miistatus = sc->sc_mii.mii_media_status;
9197
9198 if (miistatus & IFM_ACTIVE) {
9199 active = sc->sc_mii.mii_media_active;
9200 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
9201 switch (IFM_SUBTYPE(active)) {
9202 case IFM_10_T:
9203 sc->sc_ctrl |= CTRL_SPEED_10;
9204 break;
9205 case IFM_100_TX:
9206 sc->sc_ctrl |= CTRL_SPEED_100;
9207 break;
9208 case IFM_1000_T:
9209 sc->sc_ctrl |= CTRL_SPEED_1000;
9210 break;
9211 default:
9212 /*
9213 * Fiber?
9214 * Shoud not enter here.
9215 */
9216 device_printf(dev, "unknown media (%x)\n",
9217 active);
9218 break;
9219 }
9220 if (active & IFM_FDX)
9221 sc->sc_ctrl |= CTRL_FD;
9222 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9223 }
9224 } else if (sc->sc_type == WM_T_PCH) {
9225 wm_k1_gig_workaround_hv(sc,
9226 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
9227 }
9228
9229 /*
9230 * When connected at 10Mbps half-duplex, some parts are excessively
9231 * aggressive resulting in many collisions. To avoid this, increase
9232 * the IPG and reduce Rx latency in the PHY.
9233 */
9234 if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
9235 && link) {
9236 uint32_t tipg_reg;
9237 uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
9238 bool fdx;
9239 uint16_t emi_addr, emi_val;
9240
9241 tipg_reg = CSR_READ(sc, WMREG_TIPG);
9242 tipg_reg &= ~TIPG_IPGT_MASK;
9243 fdx = status & STATUS_FD;
9244
9245 if (!fdx && (speed == STATUS_SPEED_10)) {
9246 tipg_reg |= 0xff;
9247 /* Reduce Rx latency in analog PHY */
9248 emi_val = 0;
9249 } else if ((sc->sc_type >= WM_T_PCH_SPT) &&
9250 fdx && speed != STATUS_SPEED_1000) {
9251 tipg_reg |= 0xc;
9252 emi_val = 1;
9253 } else {
9254 /* Roll back the default values */
9255 tipg_reg |= 0x08;
9256 emi_val = 1;
9257 }
9258
9259 CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
9260
9261 rv = sc->phy.acquire(sc);
9262 if (rv)
9263 return;
9264
9265 if (sc->sc_type == WM_T_PCH2)
9266 emi_addr = I82579_RX_CONFIG;
9267 else
9268 emi_addr = I217_RX_CONFIG;
9269 rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
9270
9271 if (sc->sc_type >= WM_T_PCH_LPT) {
9272 uint16_t phy_reg;
9273
9274 sc->phy.readreg_locked(dev, 2,
9275 I217_PLL_CLOCK_GATE_REG, &phy_reg);
9276 phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
9277 if (speed == STATUS_SPEED_100
9278 || speed == STATUS_SPEED_10)
9279 phy_reg |= 0x3e8;
9280 else
9281 phy_reg |= 0xfa;
9282 sc->phy.writereg_locked(dev, 2,
9283 I217_PLL_CLOCK_GATE_REG, phy_reg);
9284
9285 if (speed == STATUS_SPEED_1000) {
9286 sc->phy.readreg_locked(dev, 2,
9287 HV_PM_CTRL, &phy_reg);
9288
9289 phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
9290
9291 sc->phy.writereg_locked(dev, 2,
9292 HV_PM_CTRL, phy_reg);
9293 }
9294 }
9295 sc->phy.release(sc);
9296
9297 if (rv)
9298 return;
9299
9300 if (sc->sc_type >= WM_T_PCH_SPT) {
9301 uint16_t data, ptr_gap;
9302
9303 if (speed == STATUS_SPEED_1000) {
9304 rv = sc->phy.acquire(sc);
9305 if (rv)
9306 return;
9307
9308 rv = sc->phy.readreg_locked(dev, 2,
9309 I219_UNKNOWN1, &data);
9310 if (rv) {
9311 sc->phy.release(sc);
9312 return;
9313 }
9314
9315 ptr_gap = (data & (0x3ff << 2)) >> 2;
9316 if (ptr_gap < 0x18) {
9317 data &= ~(0x3ff << 2);
9318 data |= (0x18 << 2);
9319 rv = sc->phy.writereg_locked(dev,
9320 2, I219_UNKNOWN1, data);
9321 }
9322 sc->phy.release(sc);
9323 if (rv)
9324 return;
9325 } else {
9326 rv = sc->phy.acquire(sc);
9327 if (rv)
9328 return;
9329
9330 rv = sc->phy.writereg_locked(dev, 2,
9331 I219_UNKNOWN1, 0xc023);
9332 sc->phy.release(sc);
9333 if (rv)
9334 return;
9335
9336 }
9337 }
9338 }
9339
9340 /*
9341 * I217 Packet Loss issue:
9342 * ensure that FEXTNVM4 Beacon Duration is set correctly
9343 * on power up.
9344 * Set the Beacon Duration for I217 to 8 usec
9345 */
9346 if (sc->sc_type >= WM_T_PCH_LPT) {
9347 reg = CSR_READ(sc, WMREG_FEXTNVM4);
9348 reg &= ~FEXTNVM4_BEACON_DURATION;
9349 reg |= FEXTNVM4_BEACON_DURATION_8US;
9350 CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
9351 }
9352
9353 /* Work-around I218 hang issue */
9354 if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
9355 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
9356 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
9357 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
9358 wm_k1_workaround_lpt_lp(sc, link);
9359
9360 if (sc->sc_type >= WM_T_PCH_LPT) {
9361 /*
9362 * Set platform power management values for Latency
9363 * Tolerance Reporting (LTR)
9364 */
9365 wm_platform_pm_pch_lpt(sc,
9366 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
9367 }
9368
9369 /* Clear link partner's EEE ability */
9370 sc->eee_lp_ability = 0;
9371
9372 /* FEXTNVM6 K1-off workaround */
9373 if (sc->sc_type == WM_T_PCH_SPT) {
9374 reg = CSR_READ(sc, WMREG_FEXTNVM6);
9375 if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
9376 reg |= FEXTNVM6_K1_OFF_ENABLE;
9377 else
9378 reg &= ~FEXTNVM6_K1_OFF_ENABLE;
9379 CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
9380 }
9381
9382 if (!link)
9383 return;
9384
9385 switch (sc->sc_type) {
9386 case WM_T_PCH2:
9387 wm_k1_workaround_lv(sc);
9388 /* FALLTHROUGH */
9389 case WM_T_PCH:
9390 if (sc->sc_phytype == WMPHY_82578)
9391 wm_link_stall_workaround_hv(sc);
9392 break;
9393 default:
9394 break;
9395 }
9396
9397 /* Enable/Disable EEE after link up */
9398 if (sc->sc_phytype > WMPHY_82579)
9399 wm_set_eee_pchlan(sc);
9400 }
9401
9402 /*
9403 * wm_linkintr_tbi:
9404 *
9405 * Helper; handle link interrupts for TBI mode.
9406 */
9407 static void
9408 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
9409 {
9410 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9411 uint32_t status;
9412
9413 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
9414 __func__));
9415
9416 status = CSR_READ(sc, WMREG_STATUS);
9417 if (icr & ICR_LSC) {
9418 wm_check_for_link(sc);
9419 if (status & STATUS_LU) {
9420 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
9421 device_xname(sc->sc_dev),
9422 (status & STATUS_FD) ? "FDX" : "HDX"));
9423 /*
9424 * NOTE: CTRL will update TFCE and RFCE automatically,
9425 * so we should update sc->sc_ctrl
9426 */
9427
9428 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
9429 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9430 sc->sc_fcrtl &= ~FCRTL_XONE;
9431 if (status & STATUS_FD)
9432 sc->sc_tctl |=
9433 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9434 else
9435 sc->sc_tctl |=
9436 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9437 if (sc->sc_ctrl & CTRL_TFCE)
9438 sc->sc_fcrtl |= FCRTL_XONE;
9439 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9440 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
9441 WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
9442 sc->sc_tbi_linkup = 1;
9443 if_link_state_change(ifp, LINK_STATE_UP);
9444 } else {
9445 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9446 device_xname(sc->sc_dev)));
9447 sc->sc_tbi_linkup = 0;
9448 if_link_state_change(ifp, LINK_STATE_DOWN);
9449 }
9450 /* Update LED */
9451 wm_tbi_serdes_set_linkled(sc);
9452 } else if (icr & ICR_RXSEQ)
9453 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
9454 device_xname(sc->sc_dev)));
9455 }
9456
9457 /*
9458 * wm_linkintr_serdes:
9459 *
9460 * Helper; handle link interrupts for TBI mode.
9461 */
9462 static void
9463 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
9464 {
9465 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9466 struct mii_data *mii = &sc->sc_mii;
9467 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9468 uint32_t pcs_adv, pcs_lpab, reg;
9469
9470 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
9471 __func__));
9472
9473 if (icr & ICR_LSC) {
9474 /* Check PCS */
9475 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9476 if ((reg & PCS_LSTS_LINKOK) != 0) {
9477 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
9478 device_xname(sc->sc_dev)));
9479 mii->mii_media_status |= IFM_ACTIVE;
9480 sc->sc_tbi_linkup = 1;
9481 if_link_state_change(ifp, LINK_STATE_UP);
9482 } else {
9483 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9484 device_xname(sc->sc_dev)));
9485 mii->mii_media_status |= IFM_NONE;
9486 sc->sc_tbi_linkup = 0;
9487 if_link_state_change(ifp, LINK_STATE_DOWN);
9488 wm_tbi_serdes_set_linkled(sc);
9489 return;
9490 }
9491 mii->mii_media_active |= IFM_1000_SX;
9492 if ((reg & PCS_LSTS_FDX) != 0)
9493 mii->mii_media_active |= IFM_FDX;
9494 else
9495 mii->mii_media_active |= IFM_HDX;
9496 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
9497 /* Check flow */
9498 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9499 if ((reg & PCS_LSTS_AN_COMP) == 0) {
9500 DPRINTF(WM_DEBUG_LINK,
9501 ("XXX LINKOK but not ACOMP\n"));
9502 return;
9503 }
9504 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
9505 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
9506 DPRINTF(WM_DEBUG_LINK,
9507 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
9508 if ((pcs_adv & TXCW_SYM_PAUSE)
9509 && (pcs_lpab & TXCW_SYM_PAUSE)) {
9510 mii->mii_media_active |= IFM_FLOW
9511 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
9512 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
9513 && (pcs_adv & TXCW_ASYM_PAUSE)
9514 && (pcs_lpab & TXCW_SYM_PAUSE)
9515 && (pcs_lpab & TXCW_ASYM_PAUSE))
9516 mii->mii_media_active |= IFM_FLOW
9517 | IFM_ETH_TXPAUSE;
9518 else if ((pcs_adv & TXCW_SYM_PAUSE)
9519 && (pcs_adv & TXCW_ASYM_PAUSE)
9520 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
9521 && (pcs_lpab & TXCW_ASYM_PAUSE))
9522 mii->mii_media_active |= IFM_FLOW
9523 | IFM_ETH_RXPAUSE;
9524 }
9525 /* Update LED */
9526 wm_tbi_serdes_set_linkled(sc);
9527 } else
9528 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
9529 device_xname(sc->sc_dev)));
9530 }
9531
9532 /*
9533 * wm_linkintr:
9534 *
9535 * Helper; handle link interrupts.
9536 */
9537 static void
9538 wm_linkintr(struct wm_softc *sc, uint32_t icr)
9539 {
9540
9541 KASSERT(WM_CORE_LOCKED(sc));
9542
9543 if (sc->sc_flags & WM_F_HAS_MII)
9544 wm_linkintr_gmii(sc, icr);
9545 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
9546 && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
9547 wm_linkintr_serdes(sc, icr);
9548 else
9549 wm_linkintr_tbi(sc, icr);
9550 }
9551
9552
9553 static inline void
9554 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
9555 {
9556
9557 if (wmq->wmq_txrx_use_workqueue)
9558 workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
9559 else
9560 softint_schedule(wmq->wmq_si);
9561 }
9562
9563 /*
9564 * wm_intr_legacy:
9565 *
9566 * Interrupt service routine for INTx and MSI.
9567 */
9568 static int
9569 wm_intr_legacy(void *arg)
9570 {
9571 struct wm_softc *sc = arg;
9572 struct wm_queue *wmq = &sc->sc_queue[0];
9573 struct wm_txqueue *txq = &wmq->wmq_txq;
9574 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
9575 uint32_t icr, rndval = 0;
9576 int handled = 0;
9577
9578 while (1 /* CONSTCOND */) {
9579 icr = CSR_READ(sc, WMREG_ICR);
9580 if ((icr & sc->sc_icr) == 0)
9581 break;
9582 if (handled == 0)
9583 DPRINTF(WM_DEBUG_TX,
9584 ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
9585 if (rndval == 0)
9586 rndval = icr;
9587
9588 mutex_enter(rxq->rxq_lock);
9589
9590 if (rxq->rxq_stopping) {
9591 mutex_exit(rxq->rxq_lock);
9592 break;
9593 }
9594
9595 handled = 1;
9596
9597 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
9598 if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
9599 DPRINTF(WM_DEBUG_RX,
9600 ("%s: RX: got Rx intr 0x%08x\n",
9601 device_xname(sc->sc_dev),
9602 icr & (ICR_RXDMT0 | ICR_RXT0)));
9603 WM_Q_EVCNT_INCR(rxq, intr);
9604 }
9605 #endif
9606 /*
9607 * wm_rxeof() does *not* call upper layer functions directly,
9608 * as if_percpuq_enqueue() just call softint_schedule().
9609 * So, we can call wm_rxeof() in interrupt context.
9610 */
9611 wm_rxeof(rxq, UINT_MAX);
9612
9613 mutex_exit(rxq->rxq_lock);
9614 mutex_enter(txq->txq_lock);
9615
9616 if (txq->txq_stopping) {
9617 mutex_exit(txq->txq_lock);
9618 break;
9619 }
9620
9621 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
9622 if (icr & ICR_TXDW) {
9623 DPRINTF(WM_DEBUG_TX,
9624 ("%s: TX: got TXDW interrupt\n",
9625 device_xname(sc->sc_dev)));
9626 WM_Q_EVCNT_INCR(txq, txdw);
9627 }
9628 #endif
9629 wm_txeof(txq, UINT_MAX);
9630
9631 mutex_exit(txq->txq_lock);
9632 WM_CORE_LOCK(sc);
9633
9634 if (sc->sc_core_stopping) {
9635 WM_CORE_UNLOCK(sc);
9636 break;
9637 }
9638
9639 if (icr & (ICR_LSC | ICR_RXSEQ)) {
9640 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
9641 wm_linkintr(sc, icr);
9642 }
9643 if ((icr & ICR_GPI(0)) != 0)
9644 device_printf(sc->sc_dev, "got module interrupt\n");
9645
9646 WM_CORE_UNLOCK(sc);
9647
9648 if (icr & ICR_RXO) {
9649 #if defined(WM_DEBUG)
9650 log(LOG_WARNING, "%s: Receive overrun\n",
9651 device_xname(sc->sc_dev));
9652 #endif /* defined(WM_DEBUG) */
9653 }
9654 }
9655
9656 rnd_add_uint32(&sc->rnd_source, rndval);
9657
9658 if (handled) {
9659 /* Try to get more packets going. */
9660 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
9661 wm_sched_handle_queue(sc, wmq);
9662 }
9663
9664 return handled;
9665 }
9666
9667 static inline void
9668 wm_txrxintr_disable(struct wm_queue *wmq)
9669 {
9670 struct wm_softc *sc = wmq->wmq_txq.txq_sc;
9671
9672 if (sc->sc_type == WM_T_82574)
9673 CSR_WRITE(sc, WMREG_IMC,
9674 ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
9675 else if (sc->sc_type == WM_T_82575)
9676 CSR_WRITE(sc, WMREG_EIMC,
9677 EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
9678 else
9679 CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
9680 }
9681
9682 static inline void
9683 wm_txrxintr_enable(struct wm_queue *wmq)
9684 {
9685 struct wm_softc *sc = wmq->wmq_txq.txq_sc;
9686
9687 wm_itrs_calculate(sc, wmq);
9688
9689 /*
9690 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
9691 * There is no need to care about which of RXQ(0) and RXQ(1) enable
9692 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
9693 * while each wm_handle_queue(wmq) is runnig.
9694 */
9695 if (sc->sc_type == WM_T_82574)
9696 CSR_WRITE(sc, WMREG_IMS,
9697 ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
9698 else if (sc->sc_type == WM_T_82575)
9699 CSR_WRITE(sc, WMREG_EIMS,
9700 EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
9701 else
9702 CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
9703 }
9704
9705 static int
9706 wm_txrxintr_msix(void *arg)
9707 {
9708 struct wm_queue *wmq = arg;
9709 struct wm_txqueue *txq = &wmq->wmq_txq;
9710 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
9711 struct wm_softc *sc = txq->txq_sc;
9712 u_int txlimit = sc->sc_tx_intr_process_limit;
9713 u_int rxlimit = sc->sc_rx_intr_process_limit;
9714 bool txmore;
9715 bool rxmore;
9716
9717 KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
9718
9719 DPRINTF(WM_DEBUG_TX,
9720 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
9721
9722 wm_txrxintr_disable(wmq);
9723
9724 mutex_enter(txq->txq_lock);
9725
9726 if (txq->txq_stopping) {
9727 mutex_exit(txq->txq_lock);
9728 return 0;
9729 }
9730
9731 WM_Q_EVCNT_INCR(txq, txdw);
9732 txmore = wm_txeof(txq, txlimit);
9733 /* wm_deferred start() is done in wm_handle_queue(). */
9734 mutex_exit(txq->txq_lock);
9735
9736 DPRINTF(WM_DEBUG_RX,
9737 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
9738 mutex_enter(rxq->rxq_lock);
9739
9740 if (rxq->rxq_stopping) {
9741 mutex_exit(rxq->rxq_lock);
9742 return 0;
9743 }
9744
9745 WM_Q_EVCNT_INCR(rxq, intr);
9746 rxmore = wm_rxeof(rxq, rxlimit);
9747 mutex_exit(rxq->rxq_lock);
9748
9749 wm_itrs_writereg(sc, wmq);
9750
9751 if (txmore || rxmore) {
9752 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
9753 wm_sched_handle_queue(sc, wmq);
9754 } else
9755 wm_txrxintr_enable(wmq);
9756
9757 return 1;
9758 }
9759
9760 static void
9761 wm_handle_queue(void *arg)
9762 {
9763 struct wm_queue *wmq = arg;
9764 struct wm_txqueue *txq = &wmq->wmq_txq;
9765 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
9766 struct wm_softc *sc = txq->txq_sc;
9767 u_int txlimit = sc->sc_tx_process_limit;
9768 u_int rxlimit = sc->sc_rx_process_limit;
9769 bool txmore;
9770 bool rxmore;
9771
9772 mutex_enter(txq->txq_lock);
9773 if (txq->txq_stopping) {
9774 mutex_exit(txq->txq_lock);
9775 return;
9776 }
9777 txmore = wm_txeof(txq, txlimit);
9778 wm_deferred_start_locked(txq);
9779 mutex_exit(txq->txq_lock);
9780
9781 mutex_enter(rxq->rxq_lock);
9782 if (rxq->rxq_stopping) {
9783 mutex_exit(rxq->rxq_lock);
9784 return;
9785 }
9786 WM_Q_EVCNT_INCR(rxq, defer);
9787 rxmore = wm_rxeof(rxq, rxlimit);
9788 mutex_exit(rxq->rxq_lock);
9789
9790 if (txmore || rxmore) {
9791 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
9792 wm_sched_handle_queue(sc, wmq);
9793 } else
9794 wm_txrxintr_enable(wmq);
9795 }
9796
9797 static void
9798 wm_handle_queue_work(struct work *wk, void *context)
9799 {
9800 struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
9801
9802 /*
9803 * "enqueued flag" is not required here.
9804 */
9805 wm_handle_queue(wmq);
9806 }
9807
9808 /*
9809 * wm_linkintr_msix:
9810 *
9811 * Interrupt service routine for link status change for MSI-X.
9812 */
9813 static int
9814 wm_linkintr_msix(void *arg)
9815 {
9816 struct wm_softc *sc = arg;
9817 uint32_t reg;
9818 bool has_rxo;
9819
9820 reg = CSR_READ(sc, WMREG_ICR);
9821 WM_CORE_LOCK(sc);
9822 DPRINTF(WM_DEBUG_LINK,
9823 ("%s: LINK: got link intr. ICR = %08x\n",
9824 device_xname(sc->sc_dev), reg));
9825
9826 if (sc->sc_core_stopping)
9827 goto out;
9828
9829 if ((reg & ICR_LSC) != 0) {
9830 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
9831 wm_linkintr(sc, ICR_LSC);
9832 }
9833 if ((reg & ICR_GPI(0)) != 0)
9834 device_printf(sc->sc_dev, "got module interrupt\n");
9835
9836 /*
9837 * XXX 82574 MSI-X mode workaround
9838 *
9839 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
9840 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
9841 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
9842 * interrupts by writing WMREG_ICS to process receive packets.
9843 */
9844 if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
9845 #if defined(WM_DEBUG)
9846 log(LOG_WARNING, "%s: Receive overrun\n",
9847 device_xname(sc->sc_dev));
9848 #endif /* defined(WM_DEBUG) */
9849
9850 has_rxo = true;
9851 /*
9852 * The RXO interrupt is very high rate when receive traffic is
9853 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
9854 * interrupts. ICR_OTHER will be enabled at the end of
9855 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
9856 * ICR_RXQ(1) interrupts.
9857 */
9858 CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
9859
9860 CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
9861 }
9862
9863
9864
9865 out:
9866 WM_CORE_UNLOCK(sc);
9867
9868 if (sc->sc_type == WM_T_82574) {
9869 if (!has_rxo)
9870 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
9871 else
9872 CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
9873 } else if (sc->sc_type == WM_T_82575)
9874 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
9875 else
9876 CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
9877
9878 return 1;
9879 }
9880
9881 /*
9882 * Media related.
9883 * GMII, SGMII, TBI (and SERDES)
9884 */
9885
9886 /* Common */
9887
9888 /*
9889 * wm_tbi_serdes_set_linkled:
9890 *
9891 * Update the link LED on TBI and SERDES devices.
9892 */
9893 static void
9894 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
9895 {
9896
9897 if (sc->sc_tbi_linkup)
9898 sc->sc_ctrl |= CTRL_SWDPIN(0);
9899 else
9900 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
9901
9902 /* 82540 or newer devices are active low */
9903 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
9904
9905 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9906 }
9907
9908 /* GMII related */
9909
9910 /*
9911 * wm_gmii_reset:
9912 *
9913 * Reset the PHY.
9914 */
9915 static void
9916 wm_gmii_reset(struct wm_softc *sc)
9917 {
9918 uint32_t reg;
9919 int rv;
9920
9921 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
9922 device_xname(sc->sc_dev), __func__));
9923
9924 rv = sc->phy.acquire(sc);
9925 if (rv != 0) {
9926 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9927 __func__);
9928 return;
9929 }
9930
9931 switch (sc->sc_type) {
9932 case WM_T_82542_2_0:
9933 case WM_T_82542_2_1:
9934 /* null */
9935 break;
9936 case WM_T_82543:
9937 /*
9938 * With 82543, we need to force speed and duplex on the MAC
9939 * equal to what the PHY speed and duplex configuration is.
9940 * In addition, we need to perform a hardware reset on the PHY
9941 * to take it out of reset.
9942 */
9943 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
9944 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9945
9946 /* The PHY reset pin is active-low. */
9947 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9948 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
9949 CTRL_EXT_SWDPIN(4));
9950 reg |= CTRL_EXT_SWDPIO(4);
9951
9952 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9953 CSR_WRITE_FLUSH(sc);
9954 delay(10*1000);
9955
9956 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
9957 CSR_WRITE_FLUSH(sc);
9958 delay(150);
9959 #if 0
9960 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
9961 #endif
9962 delay(20*1000); /* XXX extra delay to get PHY ID? */
9963 break;
9964 case WM_T_82544: /* Reset 10000us */
9965 case WM_T_82540:
9966 case WM_T_82545:
9967 case WM_T_82545_3:
9968 case WM_T_82546:
9969 case WM_T_82546_3:
9970 case WM_T_82541:
9971 case WM_T_82541_2:
9972 case WM_T_82547:
9973 case WM_T_82547_2:
9974 case WM_T_82571: /* Reset 100us */
9975 case WM_T_82572:
9976 case WM_T_82573:
9977 case WM_T_82574:
9978 case WM_T_82575:
9979 case WM_T_82576:
9980 case WM_T_82580:
9981 case WM_T_I350:
9982 case WM_T_I354:
9983 case WM_T_I210:
9984 case WM_T_I211:
9985 case WM_T_82583:
9986 case WM_T_80003:
9987 /* Generic reset */
9988 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
9989 CSR_WRITE_FLUSH(sc);
9990 delay(20000);
9991 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9992 CSR_WRITE_FLUSH(sc);
9993 delay(20000);
9994
9995 if ((sc->sc_type == WM_T_82541)
9996 || (sc->sc_type == WM_T_82541_2)
9997 || (sc->sc_type == WM_T_82547)
9998 || (sc->sc_type == WM_T_82547_2)) {
9999 /* Workaround for igp are done in igp_reset() */
10000 /* XXX add code to set LED after phy reset */
10001 }
10002 break;
10003 case WM_T_ICH8:
10004 case WM_T_ICH9:
10005 case WM_T_ICH10:
10006 case WM_T_PCH:
10007 case WM_T_PCH2:
10008 case WM_T_PCH_LPT:
10009 case WM_T_PCH_SPT:
10010 case WM_T_PCH_CNP:
10011 /* Generic reset */
10012 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10013 CSR_WRITE_FLUSH(sc);
10014 delay(100);
10015 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10016 CSR_WRITE_FLUSH(sc);
10017 delay(150);
10018 break;
10019 default:
10020 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
10021 __func__);
10022 break;
10023 }
10024
10025 sc->phy.release(sc);
10026
10027 /* get_cfg_done */
10028 wm_get_cfg_done(sc);
10029
10030 /* Extra setup */
10031 switch (sc->sc_type) {
10032 case WM_T_82542_2_0:
10033 case WM_T_82542_2_1:
10034 case WM_T_82543:
10035 case WM_T_82544:
10036 case WM_T_82540:
10037 case WM_T_82545:
10038 case WM_T_82545_3:
10039 case WM_T_82546:
10040 case WM_T_82546_3:
10041 case WM_T_82541_2:
10042 case WM_T_82547_2:
10043 case WM_T_82571:
10044 case WM_T_82572:
10045 case WM_T_82573:
10046 case WM_T_82574:
10047 case WM_T_82583:
10048 case WM_T_82575:
10049 case WM_T_82576:
10050 case WM_T_82580:
10051 case WM_T_I350:
10052 case WM_T_I354:
10053 case WM_T_I210:
10054 case WM_T_I211:
10055 case WM_T_80003:
10056 /* Null */
10057 break;
10058 case WM_T_82541:
10059 case WM_T_82547:
10060 /* XXX Configure actively LED after PHY reset */
10061 break;
10062 case WM_T_ICH8:
10063 case WM_T_ICH9:
10064 case WM_T_ICH10:
10065 case WM_T_PCH:
10066 case WM_T_PCH2:
10067 case WM_T_PCH_LPT:
10068 case WM_T_PCH_SPT:
10069 case WM_T_PCH_CNP:
10070 wm_phy_post_reset(sc);
10071 break;
10072 default:
10073 panic("%s: unknown type\n", __func__);
10074 break;
10075 }
10076 }
10077
10078 /*
10079 * Setup sc_phytype and mii_{read|write}reg.
10080 *
10081 * To identify PHY type, correct read/write function should be selected.
10082 * To select correct read/write function, PCI ID or MAC type are required
10083 * without accessing PHY registers.
10084 *
10085 * On the first call of this function, PHY ID is not known yet. Check
10086 * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
10087 * result might be incorrect.
10088 *
10089 * In the second call, PHY OUI and model is used to identify PHY type.
10090 * It might not be perfect because of the lack of compared entry, but it
10091 * would be better than the first call.
10092 *
10093 * If the detected new result and previous assumption is different,
10094 * diagnous message will be printed.
10095 */
10096 static void
10097 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
10098 uint16_t phy_model)
10099 {
10100 device_t dev = sc->sc_dev;
10101 struct mii_data *mii = &sc->sc_mii;
10102 uint16_t new_phytype = WMPHY_UNKNOWN;
10103 uint16_t doubt_phytype = WMPHY_UNKNOWN;
10104 mii_readreg_t new_readreg;
10105 mii_writereg_t new_writereg;
10106 bool dodiag = true;
10107
10108 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
10109 device_xname(sc->sc_dev), __func__));
10110
10111 /*
10112 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
10113 * incorrect. So don't print diag output when it's 2nd call.
10114 */
10115 if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
10116 dodiag = false;
10117
10118 if (mii->mii_readreg == NULL) {
10119 /*
10120 * This is the first call of this function. For ICH and PCH
10121 * variants, it's difficult to determine the PHY access method
10122 * by sc_type, so use the PCI product ID for some devices.
10123 */
10124
10125 switch (sc->sc_pcidevid) {
10126 case PCI_PRODUCT_INTEL_PCH_M_LM:
10127 case PCI_PRODUCT_INTEL_PCH_M_LC:
10128 /* 82577 */
10129 new_phytype = WMPHY_82577;
10130 break;
10131 case PCI_PRODUCT_INTEL_PCH_D_DM:
10132 case PCI_PRODUCT_INTEL_PCH_D_DC:
10133 /* 82578 */
10134 new_phytype = WMPHY_82578;
10135 break;
10136 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
10137 case PCI_PRODUCT_INTEL_PCH2_LV_V:
10138 /* 82579 */
10139 new_phytype = WMPHY_82579;
10140 break;
10141 case PCI_PRODUCT_INTEL_82801H_82567V_3:
10142 case PCI_PRODUCT_INTEL_82801I_BM:
10143 case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
10144 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
10145 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
10146 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
10147 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
10148 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
10149 /* ICH8, 9, 10 with 82567 */
10150 new_phytype = WMPHY_BM;
10151 break;
10152 default:
10153 break;
10154 }
10155 } else {
10156 /* It's not the first call. Use PHY OUI and model */
10157 switch (phy_oui) {
10158 case MII_OUI_ATTANSIC: /* XXX ??? */
10159 switch (phy_model) {
10160 case 0x0004: /* XXX */
10161 new_phytype = WMPHY_82578;
10162 break;
10163 default:
10164 break;
10165 }
10166 break;
10167 case MII_OUI_xxMARVELL:
10168 switch (phy_model) {
10169 case MII_MODEL_xxMARVELL_I210:
10170 new_phytype = WMPHY_I210;
10171 break;
10172 case MII_MODEL_xxMARVELL_E1011:
10173 case MII_MODEL_xxMARVELL_E1000_3:
10174 case MII_MODEL_xxMARVELL_E1000_5:
10175 case MII_MODEL_xxMARVELL_E1112:
10176 new_phytype = WMPHY_M88;
10177 break;
10178 case MII_MODEL_xxMARVELL_E1149:
10179 new_phytype = WMPHY_BM;
10180 break;
10181 case MII_MODEL_xxMARVELL_E1111:
10182 case MII_MODEL_xxMARVELL_I347:
10183 case MII_MODEL_xxMARVELL_E1512:
10184 case MII_MODEL_xxMARVELL_E1340M:
10185 case MII_MODEL_xxMARVELL_E1543:
10186 new_phytype = WMPHY_M88;
10187 break;
10188 case MII_MODEL_xxMARVELL_I82563:
10189 new_phytype = WMPHY_GG82563;
10190 break;
10191 default:
10192 break;
10193 }
10194 break;
10195 case MII_OUI_INTEL:
10196 switch (phy_model) {
10197 case MII_MODEL_INTEL_I82577:
10198 new_phytype = WMPHY_82577;
10199 break;
10200 case MII_MODEL_INTEL_I82579:
10201 new_phytype = WMPHY_82579;
10202 break;
10203 case MII_MODEL_INTEL_I217:
10204 new_phytype = WMPHY_I217;
10205 break;
10206 case MII_MODEL_INTEL_I82580:
10207 case MII_MODEL_INTEL_I350:
10208 new_phytype = WMPHY_82580;
10209 break;
10210 default:
10211 break;
10212 }
10213 break;
10214 case MII_OUI_yyINTEL:
10215 switch (phy_model) {
10216 case MII_MODEL_yyINTEL_I82562G:
10217 case MII_MODEL_yyINTEL_I82562EM:
10218 case MII_MODEL_yyINTEL_I82562ET:
10219 new_phytype = WMPHY_IFE;
10220 break;
10221 case MII_MODEL_yyINTEL_IGP01E1000:
10222 new_phytype = WMPHY_IGP;
10223 break;
10224 case MII_MODEL_yyINTEL_I82566:
10225 new_phytype = WMPHY_IGP_3;
10226 break;
10227 default:
10228 break;
10229 }
10230 break;
10231 default:
10232 break;
10233 }
10234
10235 if (dodiag) {
10236 if (new_phytype == WMPHY_UNKNOWN)
10237 aprint_verbose_dev(dev,
10238 "%s: Unknown PHY model. OUI=%06x, "
10239 "model=%04x\n", __func__, phy_oui,
10240 phy_model);
10241
10242 if ((sc->sc_phytype != WMPHY_UNKNOWN)
10243 && (sc->sc_phytype != new_phytype)) {
10244 aprint_error_dev(dev, "Previously assumed PHY "
10245 "type(%u) was incorrect. PHY type from PHY"
10246 "ID = %u\n", sc->sc_phytype, new_phytype);
10247 }
10248 }
10249 }
10250
10251 /* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
10252 if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
10253 /* SGMII */
10254 new_readreg = wm_sgmii_readreg;
10255 new_writereg = wm_sgmii_writereg;
10256 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
10257 /* BM2 (phyaddr == 1) */
10258 if ((sc->sc_phytype != WMPHY_UNKNOWN)
10259 && (new_phytype != WMPHY_BM)
10260 && (new_phytype != WMPHY_UNKNOWN))
10261 doubt_phytype = new_phytype;
10262 new_phytype = WMPHY_BM;
10263 new_readreg = wm_gmii_bm_readreg;
10264 new_writereg = wm_gmii_bm_writereg;
10265 } else if (sc->sc_type >= WM_T_PCH) {
10266 /* All PCH* use _hv_ */
10267 new_readreg = wm_gmii_hv_readreg;
10268 new_writereg = wm_gmii_hv_writereg;
10269 } else if (sc->sc_type >= WM_T_ICH8) {
10270 /* non-82567 ICH8, 9 and 10 */
10271 new_readreg = wm_gmii_i82544_readreg;
10272 new_writereg = wm_gmii_i82544_writereg;
10273 } else if (sc->sc_type >= WM_T_80003) {
10274 /* 80003 */
10275 if ((sc->sc_phytype != WMPHY_UNKNOWN)
10276 && (new_phytype != WMPHY_GG82563)
10277 && (new_phytype != WMPHY_UNKNOWN))
10278 doubt_phytype = new_phytype;
10279 new_phytype = WMPHY_GG82563;
10280 new_readreg = wm_gmii_i80003_readreg;
10281 new_writereg = wm_gmii_i80003_writereg;
10282 } else if (sc->sc_type >= WM_T_I210) {
10283 /* I210 and I211 */
10284 if ((sc->sc_phytype != WMPHY_UNKNOWN)
10285 && (new_phytype != WMPHY_I210)
10286 && (new_phytype != WMPHY_UNKNOWN))
10287 doubt_phytype = new_phytype;
10288 new_phytype = WMPHY_I210;
10289 new_readreg = wm_gmii_gs40g_readreg;
10290 new_writereg = wm_gmii_gs40g_writereg;
10291 } else if (sc->sc_type >= WM_T_82580) {
10292 /* 82580, I350 and I354 */
10293 new_readreg = wm_gmii_82580_readreg;
10294 new_writereg = wm_gmii_82580_writereg;
10295 } else if (sc->sc_type >= WM_T_82544) {
10296 /* 82544, 0, [56], [17], 8257[1234] and 82583 */
10297 new_readreg = wm_gmii_i82544_readreg;
10298 new_writereg = wm_gmii_i82544_writereg;
10299 } else {
10300 new_readreg = wm_gmii_i82543_readreg;
10301 new_writereg = wm_gmii_i82543_writereg;
10302 }
10303
10304 if (new_phytype == WMPHY_BM) {
10305 /* All BM use _bm_ */
10306 new_readreg = wm_gmii_bm_readreg;
10307 new_writereg = wm_gmii_bm_writereg;
10308 }
10309 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
10310 /* All PCH* use _hv_ */
10311 new_readreg = wm_gmii_hv_readreg;
10312 new_writereg = wm_gmii_hv_writereg;
10313 }
10314
10315 /* Diag output */
10316 if (dodiag) {
10317 if (doubt_phytype != WMPHY_UNKNOWN)
10318 aprint_error_dev(dev, "Assumed new PHY type was "
10319 "incorrect. old = %u, new = %u\n", sc->sc_phytype,
10320 new_phytype);
10321 else if ((sc->sc_phytype != WMPHY_UNKNOWN)
10322 && (sc->sc_phytype != new_phytype))
10323 aprint_error_dev(dev, "Previously assumed PHY type(%u)"
10324 "was incorrect. New PHY type = %u\n",
10325 sc->sc_phytype, new_phytype);
10326
10327 if ((mii->mii_readreg != NULL) &&
10328 (new_phytype == WMPHY_UNKNOWN))
10329 aprint_error_dev(dev, "PHY type is still unknown.\n");
10330
10331 if ((mii->mii_readreg != NULL) &&
10332 (mii->mii_readreg != new_readreg))
10333 aprint_error_dev(dev, "Previously assumed PHY "
10334 "read/write function was incorrect.\n");
10335 }
10336
10337 /* Update now */
10338 sc->sc_phytype = new_phytype;
10339 mii->mii_readreg = new_readreg;
10340 mii->mii_writereg = new_writereg;
10341 if (new_readreg == wm_gmii_hv_readreg) {
10342 sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
10343 sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
10344 } else if (new_readreg == wm_sgmii_readreg) {
10345 sc->phy.readreg_locked = wm_sgmii_readreg_locked;
10346 sc->phy.writereg_locked = wm_sgmii_writereg_locked;
10347 } else if (new_readreg == wm_gmii_i82544_readreg) {
10348 sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
10349 sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
10350 }
10351 }
10352
10353 /*
10354 * wm_get_phy_id_82575:
10355 *
10356 * Return PHY ID. Return -1 if it failed.
10357 */
10358 static int
10359 wm_get_phy_id_82575(struct wm_softc *sc)
10360 {
10361 uint32_t reg;
10362 int phyid = -1;
10363
10364 /* XXX */
10365 if ((sc->sc_flags & WM_F_SGMII) == 0)
10366 return -1;
10367
10368 if (wm_sgmii_uses_mdio(sc)) {
10369 switch (sc->sc_type) {
10370 case WM_T_82575:
10371 case WM_T_82576:
10372 reg = CSR_READ(sc, WMREG_MDIC);
10373 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
10374 break;
10375 case WM_T_82580:
10376 case WM_T_I350:
10377 case WM_T_I354:
10378 case WM_T_I210:
10379 case WM_T_I211:
10380 reg = CSR_READ(sc, WMREG_MDICNFG);
10381 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
10382 break;
10383 default:
10384 return -1;
10385 }
10386 }
10387
10388 return phyid;
10389 }
10390
10391
10392 /*
10393 * wm_gmii_mediainit:
10394 *
10395 * Initialize media for use on 1000BASE-T devices.
10396 */
10397 static void
10398 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
10399 {
10400 device_t dev = sc->sc_dev;
10401 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10402 struct mii_data *mii = &sc->sc_mii;
10403
10404 DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
10405 device_xname(sc->sc_dev), __func__));
10406
10407 /* We have GMII. */
10408 sc->sc_flags |= WM_F_HAS_MII;
10409
10410 if (sc->sc_type == WM_T_80003)
10411 sc->sc_tipg = TIPG_1000T_80003_DFLT;
10412 else
10413 sc->sc_tipg = TIPG_1000T_DFLT;
10414
10415 /*
10416 * Let the chip set speed/duplex on its own based on
10417 * signals from the PHY.
10418 * XXXbouyer - I'm not sure this is right for the 80003,
10419 * the em driver only sets CTRL_SLU here - but it seems to work.
10420 */
10421 sc->sc_ctrl |= CTRL_SLU;
10422 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10423
10424 /* Initialize our media structures and probe the GMII. */
10425 mii->mii_ifp = ifp;
10426
10427 mii->mii_statchg = wm_gmii_statchg;
10428
10429 /* get PHY control from SMBus to PCIe */
10430 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
10431 || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
10432 || (sc->sc_type == WM_T_PCH_CNP))
10433 wm_init_phy_workarounds_pchlan(sc);
10434
10435 wm_gmii_reset(sc);
10436
10437 sc->sc_ethercom.ec_mii = &sc->sc_mii;
10438 ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
10439 wm_gmii_mediastatus, sc->sc_core_lock);
10440
10441 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
10442 || (sc->sc_type == WM_T_82580)
10443 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
10444 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
10445 if ((sc->sc_flags & WM_F_SGMII) == 0) {
10446 /* Attach only one port */
10447 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
10448 MII_OFFSET_ANY, MIIF_DOPAUSE);
10449 } else {
10450 int i, id;
10451 uint32_t ctrl_ext;
10452
10453 id = wm_get_phy_id_82575(sc);
10454 if (id != -1) {
10455 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
10456 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
10457 }
10458 if ((id == -1)
10459 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
10460 /* Power on sgmii phy if it is disabled */
10461 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
10462 CSR_WRITE(sc, WMREG_CTRL_EXT,
10463 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
10464 CSR_WRITE_FLUSH(sc);
10465 delay(300*1000); /* XXX too long */
10466
10467 /*
10468 * From 1 to 8.
10469 *
10470 * I2C access fails with I2C register's ERROR
10471 * bit set, so prevent error message while
10472 * scanning.
10473 */
10474 sc->phy.no_errprint = true;
10475 for (i = 1; i < 8; i++)
10476 mii_attach(sc->sc_dev, &sc->sc_mii,
10477 0xffffffff, i, MII_OFFSET_ANY,
10478 MIIF_DOPAUSE);
10479 sc->phy.no_errprint = false;
10480
10481 /* Restore previous sfp cage power state */
10482 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
10483 }
10484 }
10485 } else
10486 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
10487 MII_OFFSET_ANY, MIIF_DOPAUSE);
10488
10489 /*
10490 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
10491 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
10492 */
10493 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
10494 || (sc->sc_type == WM_T_PCH_SPT)
10495 || (sc->sc_type == WM_T_PCH_CNP))
10496 && (LIST_FIRST(&mii->mii_phys) == NULL)) {
10497 wm_set_mdio_slow_mode_hv(sc);
10498 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
10499 MII_OFFSET_ANY, MIIF_DOPAUSE);
10500 }
10501
10502 /*
10503 * (For ICH8 variants)
10504 * If PHY detection failed, use BM's r/w function and retry.
10505 */
10506 if (LIST_FIRST(&mii->mii_phys) == NULL) {
10507 /* if failed, retry with *_bm_* */
10508 aprint_verbose_dev(dev, "Assumed PHY access function "
10509 "(type = %d) might be incorrect. Use BM and retry.\n",
10510 sc->sc_phytype);
10511 sc->sc_phytype = WMPHY_BM;
10512 mii->mii_readreg = wm_gmii_bm_readreg;
10513 mii->mii_writereg = wm_gmii_bm_writereg;
10514
10515 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
10516 MII_OFFSET_ANY, MIIF_DOPAUSE);
10517 }
10518
10519 if (LIST_FIRST(&mii->mii_phys) == NULL) {
10520 /* Any PHY wasn't find */
10521 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
10522 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
10523 sc->sc_phytype = WMPHY_NONE;
10524 } else {
10525 struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
10526
10527 /*
10528 * PHY Found! Check PHY type again by the second call of
10529 * wm_gmii_setup_phytype.
10530 */
10531 wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
10532 child->mii_mpd_model);
10533
10534 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
10535 }
10536 }
10537
10538 /*
10539 * wm_gmii_mediachange: [ifmedia interface function]
10540 *
10541 * Set hardware to newly-selected media on a 1000BASE-T device.
10542 */
10543 static int
10544 wm_gmii_mediachange(struct ifnet *ifp)
10545 {
10546 struct wm_softc *sc = ifp->if_softc;
10547 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
10548 uint32_t reg;
10549 int rc;
10550
10551 DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
10552 device_xname(sc->sc_dev), __func__));
10553 if ((ifp->if_flags & IFF_UP) == 0)
10554 return 0;
10555
10556 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
10557 if ((sc->sc_type == WM_T_82580)
10558 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
10559 || (sc->sc_type == WM_T_I211)) {
10560 reg = CSR_READ(sc, WMREG_PHPM);
10561 reg &= ~PHPM_GO_LINK_D;
10562 CSR_WRITE(sc, WMREG_PHPM, reg);
10563 }
10564
10565 /* Disable D0 LPLU. */
10566 wm_lplu_d0_disable(sc);
10567
10568 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
10569 sc->sc_ctrl |= CTRL_SLU;
10570 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
10571 || (sc->sc_type > WM_T_82543)) {
10572 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
10573 } else {
10574 sc->sc_ctrl &= ~CTRL_ASDE;
10575 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
10576 if (ife->ifm_media & IFM_FDX)
10577 sc->sc_ctrl |= CTRL_FD;
10578 switch (IFM_SUBTYPE(ife->ifm_media)) {
10579 case IFM_10_T:
10580 sc->sc_ctrl |= CTRL_SPEED_10;
10581 break;
10582 case IFM_100_TX:
10583 sc->sc_ctrl |= CTRL_SPEED_100;
10584 break;
10585 case IFM_1000_T:
10586 sc->sc_ctrl |= CTRL_SPEED_1000;
10587 break;
10588 case IFM_NONE:
10589 /* There is no specific setting for IFM_NONE */
10590 break;
10591 default:
10592 panic("wm_gmii_mediachange: bad media 0x%x",
10593 ife->ifm_media);
10594 }
10595 }
10596 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10597 CSR_WRITE_FLUSH(sc);
10598
10599 if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
10600 wm_serdes_mediachange(ifp);
10601
10602 if (sc->sc_type <= WM_T_82543)
10603 wm_gmii_reset(sc);
10604 else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
10605 && ((sc->sc_flags & WM_F_SGMII) != 0)) {
10606 /* allow time for SFP cage time to power up phy */
10607 delay(300 * 1000);
10608 wm_gmii_reset(sc);
10609 }
10610
10611 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
10612 return 0;
10613 return rc;
10614 }
10615
10616 /*
10617 * wm_gmii_mediastatus: [ifmedia interface function]
10618 *
10619 * Get the current interface media status on a 1000BASE-T device.
10620 */
10621 static void
10622 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
10623 {
10624 struct wm_softc *sc = ifp->if_softc;
10625
10626 ether_mediastatus(ifp, ifmr);
10627 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
10628 | sc->sc_flowflags;
10629 }
10630
10631 #define MDI_IO CTRL_SWDPIN(2)
10632 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
10633 #define MDI_CLK CTRL_SWDPIN(3)
10634
10635 static void
10636 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
10637 {
10638 uint32_t i, v;
10639
10640 v = CSR_READ(sc, WMREG_CTRL);
10641 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
10642 v |= MDI_DIR | CTRL_SWDPIO(3);
10643
10644 for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
10645 if (data & i)
10646 v |= MDI_IO;
10647 else
10648 v &= ~MDI_IO;
10649 CSR_WRITE(sc, WMREG_CTRL, v);
10650 CSR_WRITE_FLUSH(sc);
10651 delay(10);
10652 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
10653 CSR_WRITE_FLUSH(sc);
10654 delay(10);
10655 CSR_WRITE(sc, WMREG_CTRL, v);
10656 CSR_WRITE_FLUSH(sc);
10657 delay(10);
10658 }
10659 }
10660
10661 static uint16_t
10662 wm_i82543_mii_recvbits(struct wm_softc *sc)
10663 {
10664 uint32_t v, i;
10665 uint16_t data = 0;
10666
10667 v = CSR_READ(sc, WMREG_CTRL);
10668 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
10669 v |= CTRL_SWDPIO(3);
10670
10671 CSR_WRITE(sc, WMREG_CTRL, v);
10672 CSR_WRITE_FLUSH(sc);
10673 delay(10);
10674 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
10675 CSR_WRITE_FLUSH(sc);
10676 delay(10);
10677 CSR_WRITE(sc, WMREG_CTRL, v);
10678 CSR_WRITE_FLUSH(sc);
10679 delay(10);
10680
10681 for (i = 0; i < 16; i++) {
10682 data <<= 1;
10683 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
10684 CSR_WRITE_FLUSH(sc);
10685 delay(10);
10686 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
10687 data |= 1;
10688 CSR_WRITE(sc, WMREG_CTRL, v);
10689 CSR_WRITE_FLUSH(sc);
10690 delay(10);
10691 }
10692
10693 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
10694 CSR_WRITE_FLUSH(sc);
10695 delay(10);
10696 CSR_WRITE(sc, WMREG_CTRL, v);
10697 CSR_WRITE_FLUSH(sc);
10698 delay(10);
10699
10700 return data;
10701 }
10702
10703 #undef MDI_IO
10704 #undef MDI_DIR
10705 #undef MDI_CLK
10706
10707 /*
10708 * wm_gmii_i82543_readreg: [mii interface function]
10709 *
10710 * Read a PHY register on the GMII (i82543 version).
10711 */
10712 static int
10713 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
10714 {
10715 struct wm_softc *sc = device_private(dev);
10716
10717 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
10718 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
10719 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
10720 *val = wm_i82543_mii_recvbits(sc) & 0xffff;
10721
10722 DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
10723 device_xname(dev), phy, reg, *val));
10724
10725 return 0;
10726 }
10727
10728 /*
10729 * wm_gmii_i82543_writereg: [mii interface function]
10730 *
10731 * Write a PHY register on the GMII (i82543 version).
10732 */
10733 static int
10734 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
10735 {
10736 struct wm_softc *sc = device_private(dev);
10737
10738 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
10739 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
10740 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
10741 (MII_COMMAND_START << 30), 32);
10742
10743 return 0;
10744 }
10745
10746 /*
10747 * wm_gmii_mdic_readreg: [mii interface function]
10748 *
10749 * Read a PHY register on the GMII.
10750 */
10751 static int
10752 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
10753 {
10754 struct wm_softc *sc = device_private(dev);
10755 uint32_t mdic = 0;
10756 int i;
10757
10758 if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
10759 && (reg > MII_ADDRMASK)) {
10760 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
10761 __func__, sc->sc_phytype, reg);
10762 reg &= MII_ADDRMASK;
10763 }
10764
10765 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
10766 MDIC_REGADD(reg));
10767
10768 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
10769 delay(50);
10770 mdic = CSR_READ(sc, WMREG_MDIC);
10771 if (mdic & MDIC_READY)
10772 break;
10773 }
10774
10775 if ((mdic & MDIC_READY) == 0) {
10776 DPRINTF(WM_DEBUG_GMII,
10777 ("%s: MDIC read timed out: phy %d reg %d\n",
10778 device_xname(dev), phy, reg));
10779 return ETIMEDOUT;
10780 } else if (mdic & MDIC_E) {
10781 /* This is normal if no PHY is present. */
10782 DPRINTF(WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
10783 device_xname(sc->sc_dev), phy, reg));
10784 return -1;
10785 } else
10786 *val = MDIC_DATA(mdic);
10787
10788 /*
10789 * Allow some time after each MDIC transaction to avoid
10790 * reading duplicate data in the next MDIC transaction.
10791 */
10792 if (sc->sc_type == WM_T_PCH2)
10793 delay(100);
10794
10795 return 0;
10796 }
10797
10798 /*
10799 * wm_gmii_mdic_writereg: [mii interface function]
10800 *
10801 * Write a PHY register on the GMII.
10802 */
10803 static int
10804 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
10805 {
10806 struct wm_softc *sc = device_private(dev);
10807 uint32_t mdic = 0;
10808 int i;
10809
10810 if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
10811 && (reg > MII_ADDRMASK)) {
10812 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
10813 __func__, sc->sc_phytype, reg);
10814 reg &= MII_ADDRMASK;
10815 }
10816
10817 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
10818 MDIC_REGADD(reg) | MDIC_DATA(val));
10819
10820 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
10821 delay(50);
10822 mdic = CSR_READ(sc, WMREG_MDIC);
10823 if (mdic & MDIC_READY)
10824 break;
10825 }
10826
10827 if ((mdic & MDIC_READY) == 0) {
10828 DPRINTF(WM_DEBUG_GMII,
10829 ("%s: MDIC write timed out: phy %d reg %d\n",
10830 device_xname(dev), phy, reg));
10831 return ETIMEDOUT;
10832 } else if (mdic & MDIC_E) {
10833 DPRINTF(WM_DEBUG_GMII,
10834 ("%s: MDIC write error: phy %d reg %d\n",
10835 device_xname(dev), phy, reg));
10836 return -1;
10837 }
10838
10839 /*
10840 * Allow some time after each MDIC transaction to avoid
10841 * reading duplicate data in the next MDIC transaction.
10842 */
10843 if (sc->sc_type == WM_T_PCH2)
10844 delay(100);
10845
10846 return 0;
10847 }
10848
10849 /*
10850 * wm_gmii_i82544_readreg: [mii interface function]
10851 *
10852 * Read a PHY register on the GMII.
10853 */
10854 static int
10855 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
10856 {
10857 struct wm_softc *sc = device_private(dev);
10858 int rv;
10859
10860 if (sc->phy.acquire(sc)) {
10861 device_printf(dev, "%s: failed to get semaphore\n", __func__);
10862 return -1;
10863 }
10864
10865 rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
10866
10867 sc->phy.release(sc);
10868
10869 return rv;
10870 }
10871
10872 static int
10873 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
10874 {
10875 struct wm_softc *sc = device_private(dev);
10876 int rv;
10877
10878 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
10879 switch (sc->sc_phytype) {
10880 case WMPHY_IGP:
10881 case WMPHY_IGP_2:
10882 case WMPHY_IGP_3:
10883 rv = wm_gmii_mdic_writereg(dev, phy,
10884 MII_IGPHY_PAGE_SELECT, reg);
10885 if (rv != 0)
10886 return rv;
10887 break;
10888 default:
10889 #ifdef WM_DEBUG
10890 device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
10891 __func__, sc->sc_phytype, reg);
10892 #endif
10893 break;
10894 }
10895 }
10896
10897 return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
10898 }
10899
10900 /*
10901 * wm_gmii_i82544_writereg: [mii interface function]
10902 *
10903 * Write a PHY register on the GMII.
10904 */
10905 static int
10906 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
10907 {
10908 struct wm_softc *sc = device_private(dev);
10909 int rv;
10910
10911 if (sc->phy.acquire(sc)) {
10912 device_printf(dev, "%s: failed to get semaphore\n", __func__);
10913 return -1;
10914 }
10915
10916 rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
10917 sc->phy.release(sc);
10918
10919 return rv;
10920 }
10921
10922 static int
10923 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
10924 {
10925 struct wm_softc *sc = device_private(dev);
10926 int rv;
10927
10928 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
10929 switch (sc->sc_phytype) {
10930 case WMPHY_IGP:
10931 case WMPHY_IGP_2:
10932 case WMPHY_IGP_3:
10933 rv = wm_gmii_mdic_writereg(dev, phy,
10934 MII_IGPHY_PAGE_SELECT, reg);
10935 if (rv != 0)
10936 return rv;
10937 break;
10938 default:
10939 #ifdef WM_DEBUG
10940 device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
10941 __func__, sc->sc_phytype, reg);
10942 #endif
10943 break;
10944 }
10945 }
10946
10947 return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
10948 }
10949
10950 /*
10951 * wm_gmii_i80003_readreg: [mii interface function]
10952 *
10953 * Read a PHY register on the kumeran
10954 * This could be handled by the PHY layer if we didn't have to lock the
10955 * ressource ...
10956 */
10957 static int
10958 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
10959 {
10960 struct wm_softc *sc = device_private(dev);
10961 int page_select;
10962 uint16_t temp, temp2;
10963 int rv = 0;
10964
10965 if (phy != 1) /* Only one PHY on kumeran bus */
10966 return -1;
10967
10968 if (sc->phy.acquire(sc)) {
10969 device_printf(dev, "%s: failed to get semaphore\n", __func__);
10970 return -1;
10971 }
10972
10973 if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
10974 page_select = GG82563_PHY_PAGE_SELECT;
10975 else {
10976 /*
10977 * Use Alternative Page Select register to access registers
10978 * 30 and 31.
10979 */
10980 page_select = GG82563_PHY_PAGE_SELECT_ALT;
10981 }
10982 temp = reg >> GG82563_PAGE_SHIFT;
10983 if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
10984 goto out;
10985
10986 if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
10987 /*
10988 * Wait more 200us for a bug of the ready bit in the MDIC
10989 * register.
10990 */
10991 delay(200);
10992 rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
10993 if ((rv != 0) || (temp2 != temp)) {
10994 device_printf(dev, "%s failed\n", __func__);
10995 rv = -1;
10996 goto out;
10997 }
10998 delay(200);
10999 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11000 delay(200);
11001 } else
11002 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11003
11004 out:
11005 sc->phy.release(sc);
11006 return rv;
11007 }
11008
11009 /*
11010 * wm_gmii_i80003_writereg: [mii interface function]
11011 *
11012 * Write a PHY register on the kumeran.
11013 * This could be handled by the PHY layer if we didn't have to lock the
11014 * ressource ...
11015 */
11016 static int
11017 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
11018 {
11019 struct wm_softc *sc = device_private(dev);
11020 int page_select, rv;
11021 uint16_t temp, temp2;
11022
11023 if (phy != 1) /* Only one PHY on kumeran bus */
11024 return -1;
11025
11026 if (sc->phy.acquire(sc)) {
11027 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11028 return -1;
11029 }
11030
11031 if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
11032 page_select = GG82563_PHY_PAGE_SELECT;
11033 else {
11034 /*
11035 * Use Alternative Page Select register to access registers
11036 * 30 and 31.
11037 */
11038 page_select = GG82563_PHY_PAGE_SELECT_ALT;
11039 }
11040 temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
11041 if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
11042 goto out;
11043
11044 if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
11045 /*
11046 * Wait more 200us for a bug of the ready bit in the MDIC
11047 * register.
11048 */
11049 delay(200);
11050 rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
11051 if ((rv != 0) || (temp2 != temp)) {
11052 device_printf(dev, "%s failed\n", __func__);
11053 rv = -1;
11054 goto out;
11055 }
11056 delay(200);
11057 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11058 delay(200);
11059 } else
11060 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11061
11062 out:
11063 sc->phy.release(sc);
11064 return rv;
11065 }
11066
11067 /*
11068 * wm_gmii_bm_readreg: [mii interface function]
11069 *
11070 * Read a PHY register on the kumeran
11071 * This could be handled by the PHY layer if we didn't have to lock the
11072 * ressource ...
11073 */
11074 static int
11075 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
11076 {
11077 struct wm_softc *sc = device_private(dev);
11078 uint16_t page = reg >> BME1000_PAGE_SHIFT;
11079 int rv;
11080
11081 if (sc->phy.acquire(sc)) {
11082 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11083 return -1;
11084 }
11085
11086 if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
11087 phy = ((page >= 768) || ((page == 0) && (reg == 25))
11088 || (reg == 31)) ? 1 : phy;
11089 /* Page 800 works differently than the rest so it has its own func */
11090 if (page == BM_WUC_PAGE) {
11091 rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
11092 goto release;
11093 }
11094
11095 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11096 if ((phy == 1) && (sc->sc_type != WM_T_82574)
11097 && (sc->sc_type != WM_T_82583))
11098 rv = wm_gmii_mdic_writereg(dev, phy,
11099 MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11100 else
11101 rv = wm_gmii_mdic_writereg(dev, phy,
11102 BME1000_PHY_PAGE_SELECT, page);
11103 if (rv != 0)
11104 goto release;
11105 }
11106
11107 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11108
11109 release:
11110 sc->phy.release(sc);
11111 return rv;
11112 }
11113
11114 /*
11115 * wm_gmii_bm_writereg: [mii interface function]
11116 *
11117 * Write a PHY register on the kumeran.
11118 * This could be handled by the PHY layer if we didn't have to lock the
11119 * ressource ...
11120 */
11121 static int
11122 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
11123 {
11124 struct wm_softc *sc = device_private(dev);
11125 uint16_t page = reg >> BME1000_PAGE_SHIFT;
11126 int rv;
11127
11128 if (sc->phy.acquire(sc)) {
11129 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11130 return -1;
11131 }
11132
11133 if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
11134 phy = ((page >= 768) || ((page == 0) && (reg == 25))
11135 || (reg == 31)) ? 1 : phy;
11136 /* Page 800 works differently than the rest so it has its own func */
11137 if (page == BM_WUC_PAGE) {
11138 rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
11139 goto release;
11140 }
11141
11142 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11143 if ((phy == 1) && (sc->sc_type != WM_T_82574)
11144 && (sc->sc_type != WM_T_82583))
11145 rv = wm_gmii_mdic_writereg(dev, phy,
11146 MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11147 else
11148 rv = wm_gmii_mdic_writereg(dev, phy,
11149 BME1000_PHY_PAGE_SELECT, page);
11150 if (rv != 0)
11151 goto release;
11152 }
11153
11154 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11155
11156 release:
11157 sc->phy.release(sc);
11158 return rv;
11159 }
11160
11161 /*
11162 * wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
11163 * @dev: pointer to the HW structure
11164 * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
11165 *
11166 * Assumes semaphore already acquired and phy_reg points to a valid memory
11167 * address to store contents of the BM_WUC_ENABLE_REG register.
11168 */
11169 static int
11170 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
11171 {
11172 uint16_t temp;
11173 int rv;
11174
11175 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
11176 device_xname(dev), __func__));
11177
11178 if (!phy_regp)
11179 return -1;
11180
11181 /* All page select, port ctrl and wakeup registers use phy address 1 */
11182
11183 /* Select Port Control Registers page */
11184 rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
11185 BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
11186 if (rv != 0)
11187 return rv;
11188
11189 /* Read WUCE and save it */
11190 rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
11191 if (rv != 0)
11192 return rv;
11193
11194 /* Enable both PHY wakeup mode and Wakeup register page writes.
11195 * Prevent a power state change by disabling ME and Host PHY wakeup.
11196 */
11197 temp = *phy_regp;
11198 temp |= BM_WUC_ENABLE_BIT;
11199 temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
11200
11201 if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
11202 return rv;
11203
11204 /* Select Host Wakeup Registers page - caller now able to write
11205 * registers on the Wakeup registers page
11206 */
11207 return wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
11208 BM_WUC_PAGE << IGP3_PAGE_SHIFT);
11209 }
11210
11211 /*
11212 * wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
11213 * @dev: pointer to the HW structure
11214 * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
11215 *
11216 * Restore BM_WUC_ENABLE_REG to its original value.
11217 *
11218 * Assumes semaphore already acquired and *phy_reg is the contents of the
11219 * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
11220 * caller.
11221 */
11222 static int
11223 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
11224 {
11225
11226 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
11227 device_xname(dev), __func__));
11228
11229 if (!phy_regp)
11230 return -1;
11231
11232 /* Select Port Control Registers page */
11233 wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
11234 BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
11235
11236 /* Restore 769.17 to its original value */
11237 wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
11238
11239 return 0;
11240 }
11241
11242 /*
11243 * wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
11244 * @sc: pointer to the HW structure
11245 * @offset: register offset to be read or written
11246 * @val: pointer to the data to read or write
11247 * @rd: determines if operation is read or write
11248 * @page_set: BM_WUC_PAGE already set and access enabled
11249 *
11250 * Read the PHY register at offset and store the retrieved information in
11251 * data, or write data to PHY register at offset. Note the procedure to
11252 * access the PHY wakeup registers is different than reading the other PHY
11253 * registers. It works as such:
11254 * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1
11255 * 2) Set page to 800 for host (801 if we were manageability)
11256 * 3) Write the address using the address opcode (0x11)
11257 * 4) Read or write the data using the data opcode (0x12)
11258 * 5) Restore 769.17.2 to its original value
11259 *
11260 * Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
11261 * step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
11262 *
11263 * Assumes semaphore is already acquired. When page_set==TRUE, assumes
11264 * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
11265 * is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
11266 */
11267 static int
11268 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
11269 bool page_set)
11270 {
11271 struct wm_softc *sc = device_private(dev);
11272 uint16_t regnum = BM_PHY_REG_NUM(offset);
11273 uint16_t page = BM_PHY_REG_PAGE(offset);
11274 uint16_t wuce;
11275 int rv = 0;
11276
11277 DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
11278 device_xname(dev), __func__));
11279 /* XXX Gig must be disabled for MDIO accesses to page 800 */
11280 if ((sc->sc_type == WM_T_PCH)
11281 && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
11282 device_printf(dev,
11283 "Attempting to access page %d while gig enabled.\n", page);
11284 }
11285
11286 if (!page_set) {
11287 /* Enable access to PHY wakeup registers */
11288 rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
11289 if (rv != 0) {
11290 device_printf(dev,
11291 "%s: Could not enable PHY wakeup reg access\n",
11292 __func__);
11293 return rv;
11294 }
11295 }
11296 DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
11297 device_xname(sc->sc_dev), __func__, page, regnum));
11298
11299 /*
11300 * 2) Access PHY wakeup register.
11301 * See wm_access_phy_wakeup_reg_bm.
11302 */
11303
11304 /* Write the Wakeup register page offset value using opcode 0x11 */
11305 rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
11306 if (rv != 0)
11307 return rv;
11308
11309 if (rd) {
11310 /* Read the Wakeup register page value using opcode 0x12 */
11311 rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
11312 } else {
11313 /* Write the Wakeup register page value using opcode 0x12 */
11314 rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
11315 }
11316 if (rv != 0)
11317 return rv;
11318
11319 if (!page_set)
11320 rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
11321
11322 return rv;
11323 }
11324
11325 /*
11326 * wm_gmii_hv_readreg: [mii interface function]
11327 *
11328 * Read a PHY register on the kumeran
11329 * This could be handled by the PHY layer if we didn't have to lock the
11330 * ressource ...
11331 */
11332 static int
11333 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
11334 {
11335 struct wm_softc *sc = device_private(dev);
11336 int rv;
11337
11338 DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
11339 device_xname(dev), __func__));
11340 if (sc->phy.acquire(sc)) {
11341 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11342 return -1;
11343 }
11344
11345 rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
11346 sc->phy.release(sc);
11347 return rv;
11348 }
11349
11350 static int
11351 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
11352 {
11353 uint16_t page = BM_PHY_REG_PAGE(reg);
11354 uint16_t regnum = BM_PHY_REG_NUM(reg);
11355 int rv;
11356
11357 phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
11358
11359 /* Page 800 works differently than the rest so it has its own func */
11360 if (page == BM_WUC_PAGE)
11361 return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
11362
11363 /*
11364 * Lower than page 768 works differently than the rest so it has its
11365 * own func
11366 */
11367 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
11368 device_printf(dev, "gmii_hv_readreg!!!\n");
11369 return -1;
11370 }
11371
11372 /*
11373 * XXX I21[789] documents say that the SMBus Address register is at
11374 * PHY address 01, Page 0 (not 768), Register 26.
11375 */
11376 if (page == HV_INTC_FC_PAGE_START)
11377 page = 0;
11378
11379 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
11380 rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
11381 page << BME1000_PAGE_SHIFT);
11382 if (rv != 0)
11383 return rv;
11384 }
11385
11386 return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
11387 }
11388
11389 /*
11390 * wm_gmii_hv_writereg: [mii interface function]
11391 *
11392 * Write a PHY register on the kumeran.
11393 * This could be handled by the PHY layer if we didn't have to lock the
11394 * ressource ...
11395 */
11396 static int
11397 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
11398 {
11399 struct wm_softc *sc = device_private(dev);
11400 int rv;
11401
11402 DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
11403 device_xname(dev), __func__));
11404
11405 if (sc->phy.acquire(sc)) {
11406 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11407 return -1;
11408 }
11409
11410 rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
11411 sc->phy.release(sc);
11412
11413 return rv;
11414 }
11415
11416 static int
11417 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
11418 {
11419 struct wm_softc *sc = device_private(dev);
11420 uint16_t page = BM_PHY_REG_PAGE(reg);
11421 uint16_t regnum = BM_PHY_REG_NUM(reg);
11422 int rv;
11423
11424 phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
11425
11426 /* Page 800 works differently than the rest so it has its own func */
11427 if (page == BM_WUC_PAGE)
11428 return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
11429 false);
11430
11431 /*
11432 * Lower than page 768 works differently than the rest so it has its
11433 * own func
11434 */
11435 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
11436 device_printf(dev, "gmii_hv_writereg!!!\n");
11437 return -1;
11438 }
11439
11440 {
11441 /*
11442 * XXX I21[789] documents say that the SMBus Address register
11443 * is at PHY address 01, Page 0 (not 768), Register 26.
11444 */
11445 if (page == HV_INTC_FC_PAGE_START)
11446 page = 0;
11447
11448 /*
11449 * XXX Workaround MDIO accesses being disabled after entering
11450 * IEEE Power Down (whenever bit 11 of the PHY control
11451 * register is set)
11452 */
11453 if (sc->sc_phytype == WMPHY_82578) {
11454 struct mii_softc *child;
11455
11456 child = LIST_FIRST(&sc->sc_mii.mii_phys);
11457 if ((child != NULL) && (child->mii_mpd_rev >= 1)
11458 && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
11459 && ((val & (1 << 11)) != 0)) {
11460 device_printf(dev, "XXX need workaround\n");
11461 }
11462 }
11463
11464 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
11465 rv = wm_gmii_mdic_writereg(dev, 1,
11466 MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11467 if (rv != 0)
11468 return rv;
11469 }
11470 }
11471
11472 return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
11473 }
11474
11475 /*
11476 * wm_gmii_82580_readreg: [mii interface function]
11477 *
11478 * Read a PHY register on the 82580 and I350.
11479 * This could be handled by the PHY layer if we didn't have to lock the
11480 * ressource ...
11481 */
11482 static int
11483 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
11484 {
11485 struct wm_softc *sc = device_private(dev);
11486 int rv;
11487
11488 if (sc->phy.acquire(sc) != 0) {
11489 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11490 return -1;
11491 }
11492
11493 #ifdef DIAGNOSTIC
11494 if (reg > MII_ADDRMASK) {
11495 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11496 __func__, sc->sc_phytype, reg);
11497 reg &= MII_ADDRMASK;
11498 }
11499 #endif
11500 rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
11501
11502 sc->phy.release(sc);
11503 return rv;
11504 }
11505
11506 /*
11507 * wm_gmii_82580_writereg: [mii interface function]
11508 *
11509 * Write a PHY register on the 82580 and I350.
11510 * This could be handled by the PHY layer if we didn't have to lock the
11511 * ressource ...
11512 */
11513 static int
11514 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
11515 {
11516 struct wm_softc *sc = device_private(dev);
11517 int rv;
11518
11519 if (sc->phy.acquire(sc) != 0) {
11520 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11521 return -1;
11522 }
11523
11524 #ifdef DIAGNOSTIC
11525 if (reg > MII_ADDRMASK) {
11526 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11527 __func__, sc->sc_phytype, reg);
11528 reg &= MII_ADDRMASK;
11529 }
11530 #endif
11531 rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
11532
11533 sc->phy.release(sc);
11534 return rv;
11535 }
11536
11537 /*
11538 * wm_gmii_gs40g_readreg: [mii interface function]
11539 *
11540 * Read a PHY register on the I2100 and I211.
11541 * This could be handled by the PHY layer if we didn't have to lock the
11542 * ressource ...
11543 */
11544 static int
11545 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
11546 {
11547 struct wm_softc *sc = device_private(dev);
11548 int page, offset;
11549 int rv;
11550
11551 /* Acquire semaphore */
11552 if (sc->phy.acquire(sc)) {
11553 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11554 return -1;
11555 }
11556
11557 /* Page select */
11558 page = reg >> GS40G_PAGE_SHIFT;
11559 rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
11560 if (rv != 0)
11561 goto release;
11562
11563 /* Read reg */
11564 offset = reg & GS40G_OFFSET_MASK;
11565 rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
11566
11567 release:
11568 sc->phy.release(sc);
11569 return rv;
11570 }
11571
11572 /*
11573 * wm_gmii_gs40g_writereg: [mii interface function]
11574 *
11575 * Write a PHY register on the I210 and I211.
11576 * This could be handled by the PHY layer if we didn't have to lock the
11577 * ressource ...
11578 */
11579 static int
11580 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
11581 {
11582 struct wm_softc *sc = device_private(dev);
11583 uint16_t page;
11584 int offset, rv;
11585
11586 /* Acquire semaphore */
11587 if (sc->phy.acquire(sc)) {
11588 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11589 return -1;
11590 }
11591
11592 /* Page select */
11593 page = reg >> GS40G_PAGE_SHIFT;
11594 rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
11595 if (rv != 0)
11596 goto release;
11597
11598 /* Write reg */
11599 offset = reg & GS40G_OFFSET_MASK;
11600 rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
11601
11602 release:
11603 /* Release semaphore */
11604 sc->phy.release(sc);
11605 return rv;
11606 }
11607
11608 /*
11609 * wm_gmii_statchg: [mii interface function]
11610 *
11611 * Callback from MII layer when media changes.
11612 */
11613 static void
11614 wm_gmii_statchg(struct ifnet *ifp)
11615 {
11616 struct wm_softc *sc = ifp->if_softc;
11617 struct mii_data *mii = &sc->sc_mii;
11618
11619 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
11620 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
11621 sc->sc_fcrtl &= ~FCRTL_XONE;
11622
11623 /* Get flow control negotiation result. */
11624 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
11625 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
11626 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
11627 mii->mii_media_active &= ~IFM_ETH_FMASK;
11628 }
11629
11630 if (sc->sc_flowflags & IFM_FLOW) {
11631 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
11632 sc->sc_ctrl |= CTRL_TFCE;
11633 sc->sc_fcrtl |= FCRTL_XONE;
11634 }
11635 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
11636 sc->sc_ctrl |= CTRL_RFCE;
11637 }
11638
11639 if (mii->mii_media_active & IFM_FDX) {
11640 DPRINTF(WM_DEBUG_LINK,
11641 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
11642 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
11643 } else {
11644 DPRINTF(WM_DEBUG_LINK,
11645 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
11646 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
11647 }
11648
11649 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11650 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
11651 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
11652 : WMREG_FCRTL, sc->sc_fcrtl);
11653 if (sc->sc_type == WM_T_80003) {
11654 switch (IFM_SUBTYPE(mii->mii_media_active)) {
11655 case IFM_1000_T:
11656 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
11657 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
11658 sc->sc_tipg = TIPG_1000T_80003_DFLT;
11659 break;
11660 default:
11661 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
11662 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
11663 sc->sc_tipg = TIPG_10_100_80003_DFLT;
11664 break;
11665 }
11666 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
11667 }
11668 }
11669
11670 /* kumeran related (80003, ICH* and PCH*) */
11671
11672 /*
11673 * wm_kmrn_readreg:
11674 *
11675 * Read a kumeran register
11676 */
11677 static int
11678 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
11679 {
11680 int rv;
11681
11682 if (sc->sc_type == WM_T_80003)
11683 rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
11684 else
11685 rv = sc->phy.acquire(sc);
11686 if (rv != 0) {
11687 device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
11688 __func__);
11689 return rv;
11690 }
11691
11692 rv = wm_kmrn_readreg_locked(sc, reg, val);
11693
11694 if (sc->sc_type == WM_T_80003)
11695 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
11696 else
11697 sc->phy.release(sc);
11698
11699 return rv;
11700 }
11701
11702 static int
11703 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
11704 {
11705
11706 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
11707 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
11708 KUMCTRLSTA_REN);
11709 CSR_WRITE_FLUSH(sc);
11710 delay(2);
11711
11712 *val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
11713
11714 return 0;
11715 }
11716
11717 /*
11718 * wm_kmrn_writereg:
11719 *
11720 * Write a kumeran register
11721 */
11722 static int
11723 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
11724 {
11725 int rv;
11726
11727 if (sc->sc_type == WM_T_80003)
11728 rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
11729 else
11730 rv = sc->phy.acquire(sc);
11731 if (rv != 0) {
11732 device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
11733 __func__);
11734 return rv;
11735 }
11736
11737 rv = wm_kmrn_writereg_locked(sc, reg, val);
11738
11739 if (sc->sc_type == WM_T_80003)
11740 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
11741 else
11742 sc->phy.release(sc);
11743
11744 return rv;
11745 }
11746
11747 static int
11748 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
11749 {
11750
11751 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
11752 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
11753
11754 return 0;
11755 }
11756
11757 /*
11758 * EMI register related (82579, WMPHY_I217(PCH2 and newer))
11759 * This access method is different from IEEE MMD.
11760 */
11761 static int
11762 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
11763 {
11764 struct wm_softc *sc = device_private(dev);
11765 int rv;
11766
11767 rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
11768 if (rv != 0)
11769 return rv;
11770
11771 if (rd)
11772 rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
11773 else
11774 rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
11775 return rv;
11776 }
11777
11778 static int
11779 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
11780 {
11781
11782 return wm_access_emi_reg_locked(dev, reg, val, true);
11783 }
11784
11785 static int
11786 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
11787 {
11788
11789 return wm_access_emi_reg_locked(dev, reg, &val, false);
11790 }
11791
11792 /* SGMII related */
11793
11794 /*
11795 * wm_sgmii_uses_mdio
11796 *
11797 * Check whether the transaction is to the internal PHY or the external
11798 * MDIO interface. Return true if it's MDIO.
11799 */
11800 static bool
11801 wm_sgmii_uses_mdio(struct wm_softc *sc)
11802 {
11803 uint32_t reg;
11804 bool ismdio = false;
11805
11806 switch (sc->sc_type) {
11807 case WM_T_82575:
11808 case WM_T_82576:
11809 reg = CSR_READ(sc, WMREG_MDIC);
11810 ismdio = ((reg & MDIC_DEST) != 0);
11811 break;
11812 case WM_T_82580:
11813 case WM_T_I350:
11814 case WM_T_I354:
11815 case WM_T_I210:
11816 case WM_T_I211:
11817 reg = CSR_READ(sc, WMREG_MDICNFG);
11818 ismdio = ((reg & MDICNFG_DEST) != 0);
11819 break;
11820 default:
11821 break;
11822 }
11823
11824 return ismdio;
11825 }
11826
11827 /*
11828 * wm_sgmii_readreg: [mii interface function]
11829 *
11830 * Read a PHY register on the SGMII
11831 * This could be handled by the PHY layer if we didn't have to lock the
11832 * ressource ...
11833 */
11834 static int
11835 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
11836 {
11837 struct wm_softc *sc = device_private(dev);
11838 int rv;
11839
11840 if (sc->phy.acquire(sc)) {
11841 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11842 return -1;
11843 }
11844
11845 rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
11846
11847 sc->phy.release(sc);
11848 return rv;
11849 }
11850
11851 static int
11852 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
11853 {
11854 struct wm_softc *sc = device_private(dev);
11855 uint32_t i2ccmd;
11856 int i, rv = 0;
11857
11858 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
11859 | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
11860 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
11861
11862 /* Poll the ready bit */
11863 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
11864 delay(50);
11865 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
11866 if (i2ccmd & I2CCMD_READY)
11867 break;
11868 }
11869 if ((i2ccmd & I2CCMD_READY) == 0) {
11870 device_printf(dev, "I2CCMD Read did not complete\n");
11871 rv = ETIMEDOUT;
11872 }
11873 if ((i2ccmd & I2CCMD_ERROR) != 0) {
11874 if (!sc->phy.no_errprint)
11875 device_printf(dev, "I2CCMD Error bit set\n");
11876 rv = EIO;
11877 }
11878
11879 *val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
11880
11881 return rv;
11882 }
11883
11884 /*
11885 * wm_sgmii_writereg: [mii interface function]
11886 *
11887 * Write a PHY register on the SGMII.
11888 * This could be handled by the PHY layer if we didn't have to lock the
11889 * ressource ...
11890 */
11891 static int
11892 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
11893 {
11894 struct wm_softc *sc = device_private(dev);
11895 int rv;
11896
11897 if (sc->phy.acquire(sc) != 0) {
11898 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11899 return -1;
11900 }
11901
11902 rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
11903
11904 sc->phy.release(sc);
11905
11906 return rv;
11907 }
11908
11909 static int
11910 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
11911 {
11912 struct wm_softc *sc = device_private(dev);
11913 uint32_t i2ccmd;
11914 uint16_t swapdata;
11915 int rv = 0;
11916 int i;
11917
11918 /* Swap the data bytes for the I2C interface */
11919 swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
11920 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
11921 | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
11922 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
11923
11924 /* Poll the ready bit */
11925 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
11926 delay(50);
11927 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
11928 if (i2ccmd & I2CCMD_READY)
11929 break;
11930 }
11931 if ((i2ccmd & I2CCMD_READY) == 0) {
11932 device_printf(dev, "I2CCMD Write did not complete\n");
11933 rv = ETIMEDOUT;
11934 }
11935 if ((i2ccmd & I2CCMD_ERROR) != 0) {
11936 device_printf(dev, "I2CCMD Error bit set\n");
11937 rv = EIO;
11938 }
11939
11940 return rv;
11941 }
11942
11943 /* TBI related */
11944
11945 static bool
11946 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
11947 {
11948 bool sig;
11949
11950 sig = ctrl & CTRL_SWDPIN(1);
11951
11952 /*
11953 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
11954 * detect a signal, 1 if they don't.
11955 */
11956 if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
11957 sig = !sig;
11958
11959 return sig;
11960 }
11961
11962 /*
11963 * wm_tbi_mediainit:
11964 *
11965 * Initialize media for use on 1000BASE-X devices.
11966 */
11967 static void
11968 wm_tbi_mediainit(struct wm_softc *sc)
11969 {
11970 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
11971 const char *sep = "";
11972
11973 if (sc->sc_type < WM_T_82543)
11974 sc->sc_tipg = TIPG_WM_DFLT;
11975 else
11976 sc->sc_tipg = TIPG_LG_DFLT;
11977
11978 sc->sc_tbi_serdes_anegticks = 5;
11979
11980 /* Initialize our media structures */
11981 sc->sc_mii.mii_ifp = ifp;
11982 sc->sc_ethercom.ec_mii = &sc->sc_mii;
11983
11984 if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
11985 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
11986 ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
11987 wm_serdes_mediachange, wm_serdes_mediastatus,
11988 sc->sc_core_lock);
11989 } else {
11990 ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
11991 wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
11992 }
11993
11994 /*
11995 * SWD Pins:
11996 *
11997 * 0 = Link LED (output)
11998 * 1 = Loss Of Signal (input)
11999 */
12000 sc->sc_ctrl |= CTRL_SWDPIO(0);
12001
12002 /* XXX Perhaps this is only for TBI */
12003 if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
12004 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
12005
12006 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
12007 sc->sc_ctrl &= ~CTRL_LRST;
12008
12009 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12010
12011 #define ADD(ss, mm, dd) \
12012 do { \
12013 aprint_normal("%s%s", sep, ss); \
12014 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
12015 sep = ", "; \
12016 } while (/*CONSTCOND*/0)
12017
12018 aprint_normal_dev(sc->sc_dev, "");
12019
12020 if (sc->sc_type == WM_T_I354) {
12021 uint32_t status;
12022
12023 status = CSR_READ(sc, WMREG_STATUS);
12024 if (((status & STATUS_2P5_SKU) != 0)
12025 && ((status & STATUS_2P5_SKU_OVER) == 0)) {
12026 ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
12027 } else
12028 ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
12029 } else if (sc->sc_type == WM_T_82545) {
12030 /* Only 82545 is LX (XXX except SFP) */
12031 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
12032 ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
12033 } else if (sc->sc_sfptype != 0) {
12034 /* XXX wm(4) fiber/serdes don't use ifm_data */
12035 switch (sc->sc_sfptype) {
12036 default:
12037 case SFF_SFP_ETH_FLAGS_1000SX:
12038 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
12039 ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
12040 break;
12041 case SFF_SFP_ETH_FLAGS_1000LX:
12042 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
12043 ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
12044 break;
12045 case SFF_SFP_ETH_FLAGS_1000CX:
12046 ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
12047 ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
12048 break;
12049 case SFF_SFP_ETH_FLAGS_1000T:
12050 ADD("1000baseT", IFM_1000_T, 0);
12051 ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
12052 break;
12053 case SFF_SFP_ETH_FLAGS_100FX:
12054 ADD("100baseFX", IFM_100_FX, ANAR_TX);
12055 ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
12056 break;
12057 }
12058 } else {
12059 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
12060 ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
12061 }
12062 ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
12063 aprint_normal("\n");
12064
12065 #undef ADD
12066
12067 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
12068 }
12069
12070 /*
12071 * wm_tbi_mediachange: [ifmedia interface function]
12072 *
12073 * Set hardware to newly-selected media on a 1000BASE-X device.
12074 */
12075 static int
12076 wm_tbi_mediachange(struct ifnet *ifp)
12077 {
12078 struct wm_softc *sc = ifp->if_softc;
12079 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
12080 uint32_t status, ctrl;
12081 bool signal;
12082 int i;
12083
12084 KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
12085 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
12086 /* XXX need some work for >= 82571 and < 82575 */
12087 if (sc->sc_type < WM_T_82575)
12088 return 0;
12089 }
12090
12091 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
12092 || (sc->sc_type >= WM_T_82575))
12093 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
12094
12095 sc->sc_ctrl &= ~CTRL_LRST;
12096 sc->sc_txcw = TXCW_ANE;
12097 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
12098 sc->sc_txcw |= TXCW_FD | TXCW_HD;
12099 else if (ife->ifm_media & IFM_FDX)
12100 sc->sc_txcw |= TXCW_FD;
12101 else
12102 sc->sc_txcw |= TXCW_HD;
12103
12104 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
12105 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
12106
12107 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
12108 device_xname(sc->sc_dev), sc->sc_txcw));
12109 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
12110 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12111 CSR_WRITE_FLUSH(sc);
12112 delay(1000);
12113
12114 ctrl = CSR_READ(sc, WMREG_CTRL);
12115 signal = wm_tbi_havesignal(sc, ctrl);
12116
12117 DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
12118 signal));
12119
12120 if (signal) {
12121 /* Have signal; wait for the link to come up. */
12122 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
12123 delay(10000);
12124 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
12125 break;
12126 }
12127
12128 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
12129 device_xname(sc->sc_dev), i));
12130
12131 status = CSR_READ(sc, WMREG_STATUS);
12132 DPRINTF(WM_DEBUG_LINK,
12133 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
12134 device_xname(sc->sc_dev), status, STATUS_LU));
12135 if (status & STATUS_LU) {
12136 /* Link is up. */
12137 DPRINTF(WM_DEBUG_LINK,
12138 ("%s: LINK: set media -> link up %s\n",
12139 device_xname(sc->sc_dev),
12140 (status & STATUS_FD) ? "FDX" : "HDX"));
12141
12142 /*
12143 * NOTE: CTRL will update TFCE and RFCE automatically,
12144 * so we should update sc->sc_ctrl
12145 */
12146 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
12147 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
12148 sc->sc_fcrtl &= ~FCRTL_XONE;
12149 if (status & STATUS_FD)
12150 sc->sc_tctl |=
12151 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
12152 else
12153 sc->sc_tctl |=
12154 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
12155 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
12156 sc->sc_fcrtl |= FCRTL_XONE;
12157 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
12158 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
12159 WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
12160 sc->sc_tbi_linkup = 1;
12161 } else {
12162 if (i == WM_LINKUP_TIMEOUT)
12163 wm_check_for_link(sc);
12164 /* Link is down. */
12165 DPRINTF(WM_DEBUG_LINK,
12166 ("%s: LINK: set media -> link down\n",
12167 device_xname(sc->sc_dev)));
12168 sc->sc_tbi_linkup = 0;
12169 }
12170 } else {
12171 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
12172 device_xname(sc->sc_dev)));
12173 sc->sc_tbi_linkup = 0;
12174 }
12175
12176 wm_tbi_serdes_set_linkled(sc);
12177
12178 return 0;
12179 }
12180
12181 /*
12182 * wm_tbi_mediastatus: [ifmedia interface function]
12183 *
12184 * Get the current interface media status on a 1000BASE-X device.
12185 */
12186 static void
12187 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
12188 {
12189 struct wm_softc *sc = ifp->if_softc;
12190 uint32_t ctrl, status;
12191
12192 ifmr->ifm_status = IFM_AVALID;
12193 ifmr->ifm_active = IFM_ETHER;
12194
12195 status = CSR_READ(sc, WMREG_STATUS);
12196 if ((status & STATUS_LU) == 0) {
12197 ifmr->ifm_active |= IFM_NONE;
12198 return;
12199 }
12200
12201 ifmr->ifm_status |= IFM_ACTIVE;
12202 /* Only 82545 is LX */
12203 if (sc->sc_type == WM_T_82545)
12204 ifmr->ifm_active |= IFM_1000_LX;
12205 else
12206 ifmr->ifm_active |= IFM_1000_SX;
12207 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
12208 ifmr->ifm_active |= IFM_FDX;
12209 else
12210 ifmr->ifm_active |= IFM_HDX;
12211 ctrl = CSR_READ(sc, WMREG_CTRL);
12212 if (ctrl & CTRL_RFCE)
12213 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
12214 if (ctrl & CTRL_TFCE)
12215 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
12216 }
12217
12218 /* XXX TBI only */
12219 static int
12220 wm_check_for_link(struct wm_softc *sc)
12221 {
12222 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
12223 uint32_t rxcw;
12224 uint32_t ctrl;
12225 uint32_t status;
12226 bool signal;
12227
12228 DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
12229 device_xname(sc->sc_dev), __func__));
12230
12231 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
12232 /* XXX need some work for >= 82571 */
12233 if (sc->sc_type >= WM_T_82571) {
12234 sc->sc_tbi_linkup = 1;
12235 return 0;
12236 }
12237 }
12238
12239 rxcw = CSR_READ(sc, WMREG_RXCW);
12240 ctrl = CSR_READ(sc, WMREG_CTRL);
12241 status = CSR_READ(sc, WMREG_STATUS);
12242 signal = wm_tbi_havesignal(sc, ctrl);
12243
12244 DPRINTF(WM_DEBUG_LINK,
12245 ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
12246 device_xname(sc->sc_dev), __func__, signal,
12247 ((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
12248
12249 /*
12250 * SWDPIN LU RXCW
12251 * 0 0 0
12252 * 0 0 1 (should not happen)
12253 * 0 1 0 (should not happen)
12254 * 0 1 1 (should not happen)
12255 * 1 0 0 Disable autonego and force linkup
12256 * 1 0 1 got /C/ but not linkup yet
12257 * 1 1 0 (linkup)
12258 * 1 1 1 If IFM_AUTO, back to autonego
12259 *
12260 */
12261 if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
12262 DPRINTF(WM_DEBUG_LINK,
12263 ("%s: %s: force linkup and fullduplex\n",
12264 device_xname(sc->sc_dev), __func__));
12265 sc->sc_tbi_linkup = 0;
12266 /* Disable auto-negotiation in the TXCW register */
12267 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
12268
12269 /*
12270 * Force link-up and also force full-duplex.
12271 *
12272 * NOTE: CTRL was updated TFCE and RFCE automatically,
12273 * so we should update sc->sc_ctrl
12274 */
12275 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
12276 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12277 } else if (((status & STATUS_LU) != 0)
12278 && ((rxcw & RXCW_C) != 0)
12279 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
12280 sc->sc_tbi_linkup = 1;
12281 DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
12282 device_xname(sc->sc_dev),
12283 __func__));
12284 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
12285 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
12286 } else if (signal && ((rxcw & RXCW_C) != 0)) {
12287 DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
12288 device_xname(sc->sc_dev), __func__));
12289 } else {
12290 DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
12291 device_xname(sc->sc_dev), __func__, rxcw, ctrl,
12292 status));
12293 }
12294
12295 return 0;
12296 }
12297
12298 /*
12299 * wm_tbi_tick:
12300 *
12301 * Check the link on TBI devices.
12302 * This function acts as mii_tick().
12303 */
12304 static void
12305 wm_tbi_tick(struct wm_softc *sc)
12306 {
12307 struct mii_data *mii = &sc->sc_mii;
12308 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
12309 uint32_t status;
12310
12311 KASSERT(WM_CORE_LOCKED(sc));
12312
12313 status = CSR_READ(sc, WMREG_STATUS);
12314
12315 /* XXX is this needed? */
12316 (void)CSR_READ(sc, WMREG_RXCW);
12317 (void)CSR_READ(sc, WMREG_CTRL);
12318
12319 /* set link status */
12320 if ((status & STATUS_LU) == 0) {
12321 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
12322 device_xname(sc->sc_dev)));
12323 sc->sc_tbi_linkup = 0;
12324 } else if (sc->sc_tbi_linkup == 0) {
12325 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
12326 device_xname(sc->sc_dev),
12327 (status & STATUS_FD) ? "FDX" : "HDX"));
12328 sc->sc_tbi_linkup = 1;
12329 sc->sc_tbi_serdes_ticks = 0;
12330 }
12331
12332 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
12333 goto setled;
12334
12335 if ((status & STATUS_LU) == 0) {
12336 sc->sc_tbi_linkup = 0;
12337 /* If the timer expired, retry autonegotiation */
12338 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
12339 && (++sc->sc_tbi_serdes_ticks
12340 >= sc->sc_tbi_serdes_anegticks)) {
12341 DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
12342 device_xname(sc->sc_dev), __func__));
12343 sc->sc_tbi_serdes_ticks = 0;
12344 /*
12345 * Reset the link, and let autonegotiation do
12346 * its thing
12347 */
12348 sc->sc_ctrl |= CTRL_LRST;
12349 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12350 CSR_WRITE_FLUSH(sc);
12351 delay(1000);
12352 sc->sc_ctrl &= ~CTRL_LRST;
12353 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12354 CSR_WRITE_FLUSH(sc);
12355 delay(1000);
12356 CSR_WRITE(sc, WMREG_TXCW,
12357 sc->sc_txcw & ~TXCW_ANE);
12358 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
12359 }
12360 }
12361
12362 setled:
12363 wm_tbi_serdes_set_linkled(sc);
12364 }
12365
12366 /* SERDES related */
12367 static void
12368 wm_serdes_power_up_link_82575(struct wm_softc *sc)
12369 {
12370 uint32_t reg;
12371
12372 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
12373 && ((sc->sc_flags & WM_F_SGMII) == 0))
12374 return;
12375
12376 /* Enable PCS to turn on link */
12377 reg = CSR_READ(sc, WMREG_PCS_CFG);
12378 reg |= PCS_CFG_PCS_EN;
12379 CSR_WRITE(sc, WMREG_PCS_CFG, reg);
12380
12381 /* Power up the laser */
12382 reg = CSR_READ(sc, WMREG_CTRL_EXT);
12383 reg &= ~CTRL_EXT_SWDPIN(3);
12384 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12385
12386 /* Flush the write to verify completion */
12387 CSR_WRITE_FLUSH(sc);
12388 delay(1000);
12389 }
12390
12391 static int
12392 wm_serdes_mediachange(struct ifnet *ifp)
12393 {
12394 struct wm_softc *sc = ifp->if_softc;
12395 bool pcs_autoneg = true; /* XXX */
12396 uint32_t ctrl_ext, pcs_lctl, reg;
12397
12398 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
12399 && ((sc->sc_flags & WM_F_SGMII) == 0))
12400 return 0;
12401
12402 /* XXX Currently, this function is not called on 8257[12] */
12403 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
12404 || (sc->sc_type >= WM_T_82575))
12405 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
12406
12407 /* Power on the sfp cage if present */
12408 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
12409 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
12410 ctrl_ext |= CTRL_EXT_I2C_ENA;
12411 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
12412
12413 sc->sc_ctrl |= CTRL_SLU;
12414
12415 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
12416 sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
12417
12418 pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
12419 switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
12420 case CTRL_EXT_LINK_MODE_SGMII:
12421 /* SGMII mode lets the phy handle forcing speed/duplex */
12422 pcs_autoneg = true;
12423 /* Autoneg time out should be disabled for SGMII mode */
12424 pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
12425 break;
12426 case CTRL_EXT_LINK_MODE_1000KX:
12427 pcs_autoneg = false;
12428 /* FALLTHROUGH */
12429 default:
12430 if ((sc->sc_type == WM_T_82575)
12431 || (sc->sc_type == WM_T_82576)) {
12432 if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
12433 pcs_autoneg = false;
12434 }
12435 sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
12436 | CTRL_FRCFDX;
12437
12438 /* Set speed of 1000/Full if speed/duplex is forced */
12439 pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
12440 }
12441 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12442
12443 pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
12444 PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
12445
12446 if (pcs_autoneg) {
12447 /* Set PCS register for autoneg */
12448 pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
12449
12450 /* Disable force flow control for autoneg */
12451 pcs_lctl &= ~PCS_LCTL_FORCE_FC;
12452
12453 /* Configure flow control advertisement for autoneg */
12454 reg = CSR_READ(sc, WMREG_PCS_ANADV);
12455 reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
12456 reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
12457 CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
12458 } else
12459 pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
12460
12461 CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
12462
12463 return 0;
12464 }
12465
12466 static void
12467 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
12468 {
12469 struct wm_softc *sc = ifp->if_softc;
12470 struct mii_data *mii = &sc->sc_mii;
12471 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
12472 uint32_t pcs_adv, pcs_lpab, reg;
12473
12474 ifmr->ifm_status = IFM_AVALID;
12475 ifmr->ifm_active = IFM_ETHER;
12476
12477 /* Check PCS */
12478 reg = CSR_READ(sc, WMREG_PCS_LSTS);
12479 if ((reg & PCS_LSTS_LINKOK) == 0) {
12480 ifmr->ifm_active |= IFM_NONE;
12481 sc->sc_tbi_linkup = 0;
12482 goto setled;
12483 }
12484
12485 sc->sc_tbi_linkup = 1;
12486 ifmr->ifm_status |= IFM_ACTIVE;
12487 if (sc->sc_type == WM_T_I354) {
12488 uint32_t status;
12489
12490 status = CSR_READ(sc, WMREG_STATUS);
12491 if (((status & STATUS_2P5_SKU) != 0)
12492 && ((status & STATUS_2P5_SKU_OVER) == 0)) {
12493 ifmr->ifm_active |= IFM_2500_KX;
12494 } else
12495 ifmr->ifm_active |= IFM_1000_KX;
12496 } else {
12497 switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
12498 case PCS_LSTS_SPEED_10:
12499 ifmr->ifm_active |= IFM_10_T; /* XXX */
12500 break;
12501 case PCS_LSTS_SPEED_100:
12502 ifmr->ifm_active |= IFM_100_FX; /* XXX */
12503 break;
12504 case PCS_LSTS_SPEED_1000:
12505 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
12506 break;
12507 default:
12508 device_printf(sc->sc_dev, "Unknown speed\n");
12509 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
12510 break;
12511 }
12512 }
12513 if ((reg & PCS_LSTS_FDX) != 0)
12514 ifmr->ifm_active |= IFM_FDX;
12515 else
12516 ifmr->ifm_active |= IFM_HDX;
12517 mii->mii_media_active &= ~IFM_ETH_FMASK;
12518 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
12519 /* Check flow */
12520 reg = CSR_READ(sc, WMREG_PCS_LSTS);
12521 if ((reg & PCS_LSTS_AN_COMP) == 0) {
12522 DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
12523 goto setled;
12524 }
12525 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
12526 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
12527 DPRINTF(WM_DEBUG_LINK,
12528 ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
12529 if ((pcs_adv & TXCW_SYM_PAUSE)
12530 && (pcs_lpab & TXCW_SYM_PAUSE)) {
12531 mii->mii_media_active |= IFM_FLOW
12532 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
12533 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
12534 && (pcs_adv & TXCW_ASYM_PAUSE)
12535 && (pcs_lpab & TXCW_SYM_PAUSE)
12536 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
12537 mii->mii_media_active |= IFM_FLOW
12538 | IFM_ETH_TXPAUSE;
12539 } else if ((pcs_adv & TXCW_SYM_PAUSE)
12540 && (pcs_adv & TXCW_ASYM_PAUSE)
12541 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
12542 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
12543 mii->mii_media_active |= IFM_FLOW
12544 | IFM_ETH_RXPAUSE;
12545 }
12546 }
12547 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
12548 | (mii->mii_media_active & IFM_ETH_FMASK);
12549 setled:
12550 wm_tbi_serdes_set_linkled(sc);
12551 }
12552
12553 /*
12554 * wm_serdes_tick:
12555 *
12556 * Check the link on serdes devices.
12557 */
12558 static void
12559 wm_serdes_tick(struct wm_softc *sc)
12560 {
12561 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
12562 struct mii_data *mii = &sc->sc_mii;
12563 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
12564 uint32_t reg;
12565
12566 KASSERT(WM_CORE_LOCKED(sc));
12567
12568 mii->mii_media_status = IFM_AVALID;
12569 mii->mii_media_active = IFM_ETHER;
12570
12571 /* Check PCS */
12572 reg = CSR_READ(sc, WMREG_PCS_LSTS);
12573 if ((reg & PCS_LSTS_LINKOK) != 0) {
12574 mii->mii_media_status |= IFM_ACTIVE;
12575 sc->sc_tbi_linkup = 1;
12576 sc->sc_tbi_serdes_ticks = 0;
12577 mii->mii_media_active |= IFM_1000_SX; /* XXX */
12578 if ((reg & PCS_LSTS_FDX) != 0)
12579 mii->mii_media_active |= IFM_FDX;
12580 else
12581 mii->mii_media_active |= IFM_HDX;
12582 } else {
12583 mii->mii_media_status |= IFM_NONE;
12584 sc->sc_tbi_linkup = 0;
12585 /* If the timer expired, retry autonegotiation */
12586 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
12587 && (++sc->sc_tbi_serdes_ticks
12588 >= sc->sc_tbi_serdes_anegticks)) {
12589 DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
12590 device_xname(sc->sc_dev), __func__));
12591 sc->sc_tbi_serdes_ticks = 0;
12592 /* XXX */
12593 wm_serdes_mediachange(ifp);
12594 }
12595 }
12596
12597 wm_tbi_serdes_set_linkled(sc);
12598 }
12599
12600 /* SFP related */
12601
12602 static int
12603 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
12604 {
12605 uint32_t i2ccmd;
12606 int i;
12607
12608 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
12609 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
12610
12611 /* Poll the ready bit */
12612 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
12613 delay(50);
12614 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
12615 if (i2ccmd & I2CCMD_READY)
12616 break;
12617 }
12618 if ((i2ccmd & I2CCMD_READY) == 0)
12619 return -1;
12620 if ((i2ccmd & I2CCMD_ERROR) != 0)
12621 return -1;
12622
12623 *data = i2ccmd & 0x00ff;
12624
12625 return 0;
12626 }
12627
12628 static uint32_t
12629 wm_sfp_get_media_type(struct wm_softc *sc)
12630 {
12631 uint32_t ctrl_ext;
12632 uint8_t val = 0;
12633 int timeout = 3;
12634 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
12635 int rv = -1;
12636
12637 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
12638 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
12639 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
12640 CSR_WRITE_FLUSH(sc);
12641
12642 /* Read SFP module data */
12643 while (timeout) {
12644 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
12645 if (rv == 0)
12646 break;
12647 delay(100*1000); /* XXX too big */
12648 timeout--;
12649 }
12650 if (rv != 0)
12651 goto out;
12652
12653 switch (val) {
12654 case SFF_SFP_ID_SFF:
12655 aprint_normal_dev(sc->sc_dev,
12656 "Module/Connector soldered to board\n");
12657 break;
12658 case SFF_SFP_ID_SFP:
12659 sc->sc_flags |= WM_F_SFP;
12660 break;
12661 case SFF_SFP_ID_UNKNOWN:
12662 goto out;
12663 default:
12664 break;
12665 }
12666
12667 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
12668 if (rv != 0)
12669 goto out;
12670
12671 sc->sc_sfptype = val;
12672 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
12673 mediatype = WM_MEDIATYPE_SERDES;
12674 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
12675 sc->sc_flags |= WM_F_SGMII;
12676 mediatype = WM_MEDIATYPE_COPPER;
12677 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
12678 sc->sc_flags |= WM_F_SGMII;
12679 mediatype = WM_MEDIATYPE_SERDES;
12680 } else {
12681 device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
12682 __func__, sc->sc_sfptype);
12683 sc->sc_sfptype = 0; /* XXX unknown */
12684 }
12685
12686 out:
12687 /* Restore I2C interface setting */
12688 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
12689
12690 return mediatype;
12691 }
12692
12693 /*
12694 * NVM related.
12695 * Microwire, SPI (w/wo EERD) and Flash.
12696 */
12697
12698 /* Both spi and uwire */
12699
12700 /*
12701 * wm_eeprom_sendbits:
12702 *
12703 * Send a series of bits to the EEPROM.
12704 */
12705 static void
12706 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
12707 {
12708 uint32_t reg;
12709 int x;
12710
12711 reg = CSR_READ(sc, WMREG_EECD);
12712
12713 for (x = nbits; x > 0; x--) {
12714 if (bits & (1U << (x - 1)))
12715 reg |= EECD_DI;
12716 else
12717 reg &= ~EECD_DI;
12718 CSR_WRITE(sc, WMREG_EECD, reg);
12719 CSR_WRITE_FLUSH(sc);
12720 delay(2);
12721 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
12722 CSR_WRITE_FLUSH(sc);
12723 delay(2);
12724 CSR_WRITE(sc, WMREG_EECD, reg);
12725 CSR_WRITE_FLUSH(sc);
12726 delay(2);
12727 }
12728 }
12729
12730 /*
12731 * wm_eeprom_recvbits:
12732 *
12733 * Receive a series of bits from the EEPROM.
12734 */
12735 static void
12736 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
12737 {
12738 uint32_t reg, val;
12739 int x;
12740
12741 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
12742
12743 val = 0;
12744 for (x = nbits; x > 0; x--) {
12745 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
12746 CSR_WRITE_FLUSH(sc);
12747 delay(2);
12748 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
12749 val |= (1U << (x - 1));
12750 CSR_WRITE(sc, WMREG_EECD, reg);
12751 CSR_WRITE_FLUSH(sc);
12752 delay(2);
12753 }
12754 *valp = val;
12755 }
12756
12757 /* Microwire */
12758
12759 /*
12760 * wm_nvm_read_uwire:
12761 *
12762 * Read a word from the EEPROM using the MicroWire protocol.
12763 */
12764 static int
12765 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
12766 {
12767 uint32_t reg, val;
12768 int i;
12769
12770 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12771 device_xname(sc->sc_dev), __func__));
12772
12773 if (sc->nvm.acquire(sc) != 0)
12774 return -1;
12775
12776 for (i = 0; i < wordcnt; i++) {
12777 /* Clear SK and DI. */
12778 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
12779 CSR_WRITE(sc, WMREG_EECD, reg);
12780
12781 /*
12782 * XXX: workaround for a bug in qemu-0.12.x and prior
12783 * and Xen.
12784 *
12785 * We use this workaround only for 82540 because qemu's
12786 * e1000 act as 82540.
12787 */
12788 if (sc->sc_type == WM_T_82540) {
12789 reg |= EECD_SK;
12790 CSR_WRITE(sc, WMREG_EECD, reg);
12791 reg &= ~EECD_SK;
12792 CSR_WRITE(sc, WMREG_EECD, reg);
12793 CSR_WRITE_FLUSH(sc);
12794 delay(2);
12795 }
12796 /* XXX: end of workaround */
12797
12798 /* Set CHIP SELECT. */
12799 reg |= EECD_CS;
12800 CSR_WRITE(sc, WMREG_EECD, reg);
12801 CSR_WRITE_FLUSH(sc);
12802 delay(2);
12803
12804 /* Shift in the READ command. */
12805 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
12806
12807 /* Shift in address. */
12808 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
12809
12810 /* Shift out the data. */
12811 wm_eeprom_recvbits(sc, &val, 16);
12812 data[i] = val & 0xffff;
12813
12814 /* Clear CHIP SELECT. */
12815 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
12816 CSR_WRITE(sc, WMREG_EECD, reg);
12817 CSR_WRITE_FLUSH(sc);
12818 delay(2);
12819 }
12820
12821 sc->nvm.release(sc);
12822 return 0;
12823 }
12824
12825 /* SPI */
12826
12827 /*
12828 * Set SPI and FLASH related information from the EECD register.
12829 * For 82541 and 82547, the word size is taken from EEPROM.
12830 */
12831 static int
12832 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
12833 {
12834 int size;
12835 uint32_t reg;
12836 uint16_t data;
12837
12838 reg = CSR_READ(sc, WMREG_EECD);
12839 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
12840
12841 /* Read the size of NVM from EECD by default */
12842 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
12843 switch (sc->sc_type) {
12844 case WM_T_82541:
12845 case WM_T_82541_2:
12846 case WM_T_82547:
12847 case WM_T_82547_2:
12848 /* Set dummy value to access EEPROM */
12849 sc->sc_nvm_wordsize = 64;
12850 if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
12851 aprint_error_dev(sc->sc_dev,
12852 "%s: failed to read EEPROM size\n", __func__);
12853 }
12854 reg = data;
12855 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
12856 if (size == 0)
12857 size = 6; /* 64 word size */
12858 else
12859 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
12860 break;
12861 case WM_T_80003:
12862 case WM_T_82571:
12863 case WM_T_82572:
12864 case WM_T_82573: /* SPI case */
12865 case WM_T_82574: /* SPI case */
12866 case WM_T_82583: /* SPI case */
12867 size += NVM_WORD_SIZE_BASE_SHIFT;
12868 if (size > 14)
12869 size = 14;
12870 break;
12871 case WM_T_82575:
12872 case WM_T_82576:
12873 case WM_T_82580:
12874 case WM_T_I350:
12875 case WM_T_I354:
12876 case WM_T_I210:
12877 case WM_T_I211:
12878 size += NVM_WORD_SIZE_BASE_SHIFT;
12879 if (size > 15)
12880 size = 15;
12881 break;
12882 default:
12883 aprint_error_dev(sc->sc_dev,
12884 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
12885 return -1;
12886 break;
12887 }
12888
12889 sc->sc_nvm_wordsize = 1 << size;
12890
12891 return 0;
12892 }
12893
12894 /*
12895 * wm_nvm_ready_spi:
12896 *
12897 * Wait for a SPI EEPROM to be ready for commands.
12898 */
12899 static int
12900 wm_nvm_ready_spi(struct wm_softc *sc)
12901 {
12902 uint32_t val;
12903 int usec;
12904
12905 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12906 device_xname(sc->sc_dev), __func__));
12907
12908 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
12909 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
12910 wm_eeprom_recvbits(sc, &val, 8);
12911 if ((val & SPI_SR_RDY) == 0)
12912 break;
12913 }
12914 if (usec >= SPI_MAX_RETRIES) {
12915 aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
12916 return -1;
12917 }
12918 return 0;
12919 }
12920
12921 /*
12922 * wm_nvm_read_spi:
12923 *
12924 * Read a work from the EEPROM using the SPI protocol.
12925 */
12926 static int
12927 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
12928 {
12929 uint32_t reg, val;
12930 int i;
12931 uint8_t opc;
12932 int rv = 0;
12933
12934 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12935 device_xname(sc->sc_dev), __func__));
12936
12937 if (sc->nvm.acquire(sc) != 0)
12938 return -1;
12939
12940 /* Clear SK and CS. */
12941 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
12942 CSR_WRITE(sc, WMREG_EECD, reg);
12943 CSR_WRITE_FLUSH(sc);
12944 delay(2);
12945
12946 if ((rv = wm_nvm_ready_spi(sc)) != 0)
12947 goto out;
12948
12949 /* Toggle CS to flush commands. */
12950 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
12951 CSR_WRITE_FLUSH(sc);
12952 delay(2);
12953 CSR_WRITE(sc, WMREG_EECD, reg);
12954 CSR_WRITE_FLUSH(sc);
12955 delay(2);
12956
12957 opc = SPI_OPC_READ;
12958 if (sc->sc_nvm_addrbits == 8 && word >= 128)
12959 opc |= SPI_OPC_A8;
12960
12961 wm_eeprom_sendbits(sc, opc, 8);
12962 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
12963
12964 for (i = 0; i < wordcnt; i++) {
12965 wm_eeprom_recvbits(sc, &val, 16);
12966 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
12967 }
12968
12969 /* Raise CS and clear SK. */
12970 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
12971 CSR_WRITE(sc, WMREG_EECD, reg);
12972 CSR_WRITE_FLUSH(sc);
12973 delay(2);
12974
12975 out:
12976 sc->nvm.release(sc);
12977 return rv;
12978 }
12979
12980 /* Using with EERD */
12981
12982 static int
12983 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
12984 {
12985 uint32_t attempts = 100000;
12986 uint32_t i, reg = 0;
12987 int32_t done = -1;
12988
12989 for (i = 0; i < attempts; i++) {
12990 reg = CSR_READ(sc, rw);
12991
12992 if (reg & EERD_DONE) {
12993 done = 0;
12994 break;
12995 }
12996 delay(5);
12997 }
12998
12999 return done;
13000 }
13001
13002 static int
13003 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
13004 {
13005 int i, eerd = 0;
13006 int rv = 0;
13007
13008 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
13009 device_xname(sc->sc_dev), __func__));
13010
13011 if (sc->nvm.acquire(sc) != 0)
13012 return -1;
13013
13014 for (i = 0; i < wordcnt; i++) {
13015 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
13016 CSR_WRITE(sc, WMREG_EERD, eerd);
13017 rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
13018 if (rv != 0) {
13019 aprint_error_dev(sc->sc_dev, "EERD polling failed: "
13020 "offset=%d. wordcnt=%d\n", offset, wordcnt);
13021 break;
13022 }
13023 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
13024 }
13025
13026 sc->nvm.release(sc);
13027 return rv;
13028 }
13029
13030 /* Flash */
13031
13032 static int
13033 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
13034 {
13035 uint32_t eecd;
13036 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
13037 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
13038 uint32_t nvm_dword = 0;
13039 uint8_t sig_byte = 0;
13040 int rv;
13041
13042 switch (sc->sc_type) {
13043 case WM_T_PCH_SPT:
13044 case WM_T_PCH_CNP:
13045 bank1_offset = sc->sc_ich8_flash_bank_size * 2;
13046 act_offset = ICH_NVM_SIG_WORD * 2;
13047
13048 /* Set bank to 0 in case flash read fails. */
13049 *bank = 0;
13050
13051 /* Check bank 0 */
13052 rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
13053 if (rv != 0)
13054 return rv;
13055 sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
13056 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13057 *bank = 0;
13058 return 0;
13059 }
13060
13061 /* Check bank 1 */
13062 rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
13063 &nvm_dword);
13064 sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
13065 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13066 *bank = 1;
13067 return 0;
13068 }
13069 aprint_error_dev(sc->sc_dev,
13070 "%s: no valid NVM bank present (%u)\n", __func__, *bank);
13071 return -1;
13072 case WM_T_ICH8:
13073 case WM_T_ICH9:
13074 eecd = CSR_READ(sc, WMREG_EECD);
13075 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
13076 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
13077 return 0;
13078 }
13079 /* FALLTHROUGH */
13080 default:
13081 /* Default to 0 */
13082 *bank = 0;
13083
13084 /* Check bank 0 */
13085 wm_read_ich8_byte(sc, act_offset, &sig_byte);
13086 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13087 *bank = 0;
13088 return 0;
13089 }
13090
13091 /* Check bank 1 */
13092 wm_read_ich8_byte(sc, act_offset + bank1_offset,
13093 &sig_byte);
13094 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13095 *bank = 1;
13096 return 0;
13097 }
13098 }
13099
13100 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
13101 device_xname(sc->sc_dev)));
13102 return -1;
13103 }
13104
13105 /******************************************************************************
13106 * This function does initial flash setup so that a new read/write/erase cycle
13107 * can be started.
13108 *
13109 * sc - The pointer to the hw structure
13110 ****************************************************************************/
13111 static int32_t
13112 wm_ich8_cycle_init(struct wm_softc *sc)
13113 {
13114 uint16_t hsfsts;
13115 int32_t error = 1;
13116 int32_t i = 0;
13117
13118 if (sc->sc_type >= WM_T_PCH_SPT)
13119 hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
13120 else
13121 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
13122
13123 /* May be check the Flash Des Valid bit in Hw status */
13124 if ((hsfsts & HSFSTS_FLDVAL) == 0)
13125 return error;
13126
13127 /* Clear FCERR in Hw status by writing 1 */
13128 /* Clear DAEL in Hw status by writing a 1 */
13129 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
13130
13131 if (sc->sc_type >= WM_T_PCH_SPT)
13132 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
13133 else
13134 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
13135
13136 /*
13137 * Either we should have a hardware SPI cycle in progress bit to check
13138 * against, in order to start a new cycle or FDONE bit should be
13139 * changed in the hardware so that it is 1 after hardware reset, which
13140 * can then be used as an indication whether a cycle is in progress or
13141 * has been completed .. we should also have some software semaphore
13142 * mechanism to guard FDONE or the cycle in progress bit so that two
13143 * threads access to those bits can be sequentiallized or a way so that
13144 * 2 threads don't start the cycle at the same time
13145 */
13146
13147 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
13148 /*
13149 * There is no cycle running at present, so we can start a
13150 * cycle
13151 */
13152
13153 /* Begin by setting Flash Cycle Done. */
13154 hsfsts |= HSFSTS_DONE;
13155 if (sc->sc_type >= WM_T_PCH_SPT)
13156 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
13157 hsfsts & 0xffffUL);
13158 else
13159 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
13160 error = 0;
13161 } else {
13162 /*
13163 * Otherwise poll for sometime so the current cycle has a
13164 * chance to end before giving up.
13165 */
13166 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
13167 if (sc->sc_type >= WM_T_PCH_SPT)
13168 hsfsts = ICH8_FLASH_READ32(sc,
13169 ICH_FLASH_HSFSTS) & 0xffffUL;
13170 else
13171 hsfsts = ICH8_FLASH_READ16(sc,
13172 ICH_FLASH_HSFSTS);
13173 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
13174 error = 0;
13175 break;
13176 }
13177 delay(1);
13178 }
13179 if (error == 0) {
13180 /*
13181 * Successful in waiting for previous cycle to timeout,
13182 * now set the Flash Cycle Done.
13183 */
13184 hsfsts |= HSFSTS_DONE;
13185 if (sc->sc_type >= WM_T_PCH_SPT)
13186 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
13187 hsfsts & 0xffffUL);
13188 else
13189 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
13190 hsfsts);
13191 }
13192 }
13193 return error;
13194 }
13195
13196 /******************************************************************************
13197 * This function starts a flash cycle and waits for its completion
13198 *
13199 * sc - The pointer to the hw structure
13200 ****************************************************************************/
13201 static int32_t
13202 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
13203 {
13204 uint16_t hsflctl;
13205 uint16_t hsfsts;
13206 int32_t error = 1;
13207 uint32_t i = 0;
13208
13209 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
13210 if (sc->sc_type >= WM_T_PCH_SPT)
13211 hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
13212 else
13213 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
13214 hsflctl |= HSFCTL_GO;
13215 if (sc->sc_type >= WM_T_PCH_SPT)
13216 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
13217 (uint32_t)hsflctl << 16);
13218 else
13219 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
13220
13221 /* Wait till FDONE bit is set to 1 */
13222 do {
13223 if (sc->sc_type >= WM_T_PCH_SPT)
13224 hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
13225 & 0xffffUL;
13226 else
13227 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
13228 if (hsfsts & HSFSTS_DONE)
13229 break;
13230 delay(1);
13231 i++;
13232 } while (i < timeout);
13233 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
13234 error = 0;
13235
13236 return error;
13237 }
13238
13239 /******************************************************************************
13240 * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
13241 *
13242 * sc - The pointer to the hw structure
13243 * index - The index of the byte or word to read.
13244 * size - Size of data to read, 1=byte 2=word, 4=dword
13245 * data - Pointer to the word to store the value read.
13246 *****************************************************************************/
13247 static int32_t
13248 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
13249 uint32_t size, uint32_t *data)
13250 {
13251 uint16_t hsfsts;
13252 uint16_t hsflctl;
13253 uint32_t flash_linear_address;
13254 uint32_t flash_data = 0;
13255 int32_t error = 1;
13256 int32_t count = 0;
13257
13258 if (size < 1 || size > 4 || data == 0x0 ||
13259 index > ICH_FLASH_LINEAR_ADDR_MASK)
13260 return error;
13261
13262 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
13263 sc->sc_ich8_flash_base;
13264
13265 do {
13266 delay(1);
13267 /* Steps */
13268 error = wm_ich8_cycle_init(sc);
13269 if (error)
13270 break;
13271
13272 if (sc->sc_type >= WM_T_PCH_SPT)
13273 hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
13274 >> 16;
13275 else
13276 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
13277 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
13278 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
13279 & HSFCTL_BCOUNT_MASK;
13280 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
13281 if (sc->sc_type >= WM_T_PCH_SPT) {
13282 /*
13283 * In SPT, This register is in Lan memory space, not
13284 * flash. Therefore, only 32 bit access is supported.
13285 */
13286 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
13287 (uint32_t)hsflctl << 16);
13288 } else
13289 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
13290
13291 /*
13292 * Write the last 24 bits of index into Flash Linear address
13293 * field in Flash Address
13294 */
13295 /* TODO: TBD maybe check the index against the size of flash */
13296
13297 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
13298
13299 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
13300
13301 /*
13302 * Check if FCERR is set to 1, if set to 1, clear it and try
13303 * the whole sequence a few more times, else read in (shift in)
13304 * the Flash Data0, the order is least significant byte first
13305 * msb to lsb
13306 */
13307 if (error == 0) {
13308 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
13309 if (size == 1)
13310 *data = (uint8_t)(flash_data & 0x000000FF);
13311 else if (size == 2)
13312 *data = (uint16_t)(flash_data & 0x0000FFFF);
13313 else if (size == 4)
13314 *data = (uint32_t)flash_data;
13315 break;
13316 } else {
13317 /*
13318 * If we've gotten here, then things are probably
13319 * completely hosed, but if the error condition is
13320 * detected, it won't hurt to give it another try...
13321 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
13322 */
13323 if (sc->sc_type >= WM_T_PCH_SPT)
13324 hsfsts = ICH8_FLASH_READ32(sc,
13325 ICH_FLASH_HSFSTS) & 0xffffUL;
13326 else
13327 hsfsts = ICH8_FLASH_READ16(sc,
13328 ICH_FLASH_HSFSTS);
13329
13330 if (hsfsts & HSFSTS_ERR) {
13331 /* Repeat for some time before giving up. */
13332 continue;
13333 } else if ((hsfsts & HSFSTS_DONE) == 0)
13334 break;
13335 }
13336 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
13337
13338 return error;
13339 }
13340
13341 /******************************************************************************
13342 * Reads a single byte from the NVM using the ICH8 flash access registers.
13343 *
13344 * sc - pointer to wm_hw structure
13345 * index - The index of the byte to read.
13346 * data - Pointer to a byte to store the value read.
13347 *****************************************************************************/
13348 static int32_t
13349 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
13350 {
13351 int32_t status;
13352 uint32_t word = 0;
13353
13354 status = wm_read_ich8_data(sc, index, 1, &word);
13355 if (status == 0)
13356 *data = (uint8_t)word;
13357 else
13358 *data = 0;
13359
13360 return status;
13361 }
13362
13363 /******************************************************************************
13364 * Reads a word from the NVM using the ICH8 flash access registers.
13365 *
13366 * sc - pointer to wm_hw structure
13367 * index - The starting byte index of the word to read.
13368 * data - Pointer to a word to store the value read.
13369 *****************************************************************************/
13370 static int32_t
13371 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
13372 {
13373 int32_t status;
13374 uint32_t word = 0;
13375
13376 status = wm_read_ich8_data(sc, index, 2, &word);
13377 if (status == 0)
13378 *data = (uint16_t)word;
13379 else
13380 *data = 0;
13381
13382 return status;
13383 }
13384
13385 /******************************************************************************
13386 * Reads a dword from the NVM using the ICH8 flash access registers.
13387 *
13388 * sc - pointer to wm_hw structure
13389 * index - The starting byte index of the word to read.
13390 * data - Pointer to a word to store the value read.
13391 *****************************************************************************/
13392 static int32_t
13393 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
13394 {
13395 int32_t status;
13396
13397 status = wm_read_ich8_data(sc, index, 4, data);
13398 return status;
13399 }
13400
13401 /******************************************************************************
13402 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
13403 * register.
13404 *
13405 * sc - Struct containing variables accessed by shared code
13406 * offset - offset of word in the EEPROM to read
13407 * data - word read from the EEPROM
13408 * words - number of words to read
13409 *****************************************************************************/
13410 static int
13411 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
13412 {
13413 int32_t rv = 0;
13414 uint32_t flash_bank = 0;
13415 uint32_t act_offset = 0;
13416 uint32_t bank_offset = 0;
13417 uint16_t word = 0;
13418 uint16_t i = 0;
13419
13420 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
13421 device_xname(sc->sc_dev), __func__));
13422
13423 if (sc->nvm.acquire(sc) != 0)
13424 return -1;
13425
13426 /*
13427 * We need to know which is the valid flash bank. In the event
13428 * that we didn't allocate eeprom_shadow_ram, we may not be
13429 * managing flash_bank. So it cannot be trusted and needs
13430 * to be updated with each read.
13431 */
13432 rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
13433 if (rv) {
13434 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
13435 device_xname(sc->sc_dev)));
13436 flash_bank = 0;
13437 }
13438
13439 /*
13440 * Adjust offset appropriately if we're on bank 1 - adjust for word
13441 * size
13442 */
13443 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
13444
13445 for (i = 0; i < words; i++) {
13446 /* The NVM part needs a byte offset, hence * 2 */
13447 act_offset = bank_offset + ((offset + i) * 2);
13448 rv = wm_read_ich8_word(sc, act_offset, &word);
13449 if (rv) {
13450 aprint_error_dev(sc->sc_dev,
13451 "%s: failed to read NVM\n", __func__);
13452 break;
13453 }
13454 data[i] = word;
13455 }
13456
13457 sc->nvm.release(sc);
13458 return rv;
13459 }
13460
13461 /******************************************************************************
13462 * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
13463 * register.
13464 *
13465 * sc - Struct containing variables accessed by shared code
13466 * offset - offset of word in the EEPROM to read
13467 * data - word read from the EEPROM
13468 * words - number of words to read
13469 *****************************************************************************/
13470 static int
13471 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
13472 {
13473 int32_t rv = 0;
13474 uint32_t flash_bank = 0;
13475 uint32_t act_offset = 0;
13476 uint32_t bank_offset = 0;
13477 uint32_t dword = 0;
13478 uint16_t i = 0;
13479
13480 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
13481 device_xname(sc->sc_dev), __func__));
13482
13483 if (sc->nvm.acquire(sc) != 0)
13484 return -1;
13485
13486 /*
13487 * We need to know which is the valid flash bank. In the event
13488 * that we didn't allocate eeprom_shadow_ram, we may not be
13489 * managing flash_bank. So it cannot be trusted and needs
13490 * to be updated with each read.
13491 */
13492 rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
13493 if (rv) {
13494 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
13495 device_xname(sc->sc_dev)));
13496 flash_bank = 0;
13497 }
13498
13499 /*
13500 * Adjust offset appropriately if we're on bank 1 - adjust for word
13501 * size
13502 */
13503 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
13504
13505 for (i = 0; i < words; i++) {
13506 /* The NVM part needs a byte offset, hence * 2 */
13507 act_offset = bank_offset + ((offset + i) * 2);
13508 /* but we must read dword aligned, so mask ... */
13509 rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
13510 if (rv) {
13511 aprint_error_dev(sc->sc_dev,
13512 "%s: failed to read NVM\n", __func__);
13513 break;
13514 }
13515 /* ... and pick out low or high word */
13516 if ((act_offset & 0x2) == 0)
13517 data[i] = (uint16_t)(dword & 0xFFFF);
13518 else
13519 data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
13520 }
13521
13522 sc->nvm.release(sc);
13523 return rv;
13524 }
13525
13526 /* iNVM */
13527
13528 static int
13529 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
13530 {
13531 int32_t rv = 0;
13532 uint32_t invm_dword;
13533 uint16_t i;
13534 uint8_t record_type, word_address;
13535
13536 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
13537 device_xname(sc->sc_dev), __func__));
13538
13539 for (i = 0; i < INVM_SIZE; i++) {
13540 invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
13541 /* Get record type */
13542 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
13543 if (record_type == INVM_UNINITIALIZED_STRUCTURE)
13544 break;
13545 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
13546 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
13547 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
13548 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
13549 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
13550 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
13551 if (word_address == address) {
13552 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
13553 rv = 0;
13554 break;
13555 }
13556 }
13557 }
13558
13559 return rv;
13560 }
13561
13562 static int
13563 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
13564 {
13565 int rv = 0;
13566 int i;
13567
13568 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
13569 device_xname(sc->sc_dev), __func__));
13570
13571 if (sc->nvm.acquire(sc) != 0)
13572 return -1;
13573
13574 for (i = 0; i < words; i++) {
13575 switch (offset + i) {
13576 case NVM_OFF_MACADDR:
13577 case NVM_OFF_MACADDR1:
13578 case NVM_OFF_MACADDR2:
13579 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
13580 if (rv != 0) {
13581 data[i] = 0xffff;
13582 rv = -1;
13583 }
13584 break;
13585 case NVM_OFF_CFG2:
13586 rv = wm_nvm_read_word_invm(sc, offset, data);
13587 if (rv != 0) {
13588 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
13589 rv = 0;
13590 }
13591 break;
13592 case NVM_OFF_CFG4:
13593 rv = wm_nvm_read_word_invm(sc, offset, data);
13594 if (rv != 0) {
13595 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
13596 rv = 0;
13597 }
13598 break;
13599 case NVM_OFF_LED_1_CFG:
13600 rv = wm_nvm_read_word_invm(sc, offset, data);
13601 if (rv != 0) {
13602 *data = NVM_LED_1_CFG_DEFAULT_I211;
13603 rv = 0;
13604 }
13605 break;
13606 case NVM_OFF_LED_0_2_CFG:
13607 rv = wm_nvm_read_word_invm(sc, offset, data);
13608 if (rv != 0) {
13609 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
13610 rv = 0;
13611 }
13612 break;
13613 case NVM_OFF_ID_LED_SETTINGS:
13614 rv = wm_nvm_read_word_invm(sc, offset, data);
13615 if (rv != 0) {
13616 *data = ID_LED_RESERVED_FFFF;
13617 rv = 0;
13618 }
13619 break;
13620 default:
13621 DPRINTF(WM_DEBUG_NVM,
13622 ("NVM word 0x%02x is not mapped.\n", offset));
13623 *data = NVM_RESERVED_WORD;
13624 break;
13625 }
13626 }
13627
13628 sc->nvm.release(sc);
13629 return rv;
13630 }
13631
13632 /* Lock, detecting NVM type, validate checksum, version and read */
13633
13634 static int
13635 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
13636 {
13637 uint32_t eecd = 0;
13638
13639 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
13640 || sc->sc_type == WM_T_82583) {
13641 eecd = CSR_READ(sc, WMREG_EECD);
13642
13643 /* Isolate bits 15 & 16 */
13644 eecd = ((eecd >> 15) & 0x03);
13645
13646 /* If both bits are set, device is Flash type */
13647 if (eecd == 0x03)
13648 return 0;
13649 }
13650 return 1;
13651 }
13652
13653 static int
13654 wm_nvm_flash_presence_i210(struct wm_softc *sc)
13655 {
13656 uint32_t eec;
13657
13658 eec = CSR_READ(sc, WMREG_EEC);
13659 if ((eec & EEC_FLASH_DETECTED) != 0)
13660 return 1;
13661
13662 return 0;
13663 }
13664
13665 /*
13666 * wm_nvm_validate_checksum
13667 *
13668 * The checksum is defined as the sum of the first 64 (16 bit) words.
13669 */
13670 static int
13671 wm_nvm_validate_checksum(struct wm_softc *sc)
13672 {
13673 uint16_t checksum;
13674 uint16_t eeprom_data;
13675 #ifdef WM_DEBUG
13676 uint16_t csum_wordaddr, valid_checksum;
13677 #endif
13678 int i;
13679
13680 checksum = 0;
13681
13682 /* Don't check for I211 */
13683 if (sc->sc_type == WM_T_I211)
13684 return 0;
13685
13686 #ifdef WM_DEBUG
13687 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
13688 || (sc->sc_type == WM_T_PCH_CNP)) {
13689 csum_wordaddr = NVM_OFF_COMPAT;
13690 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
13691 } else {
13692 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
13693 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
13694 }
13695
13696 /* Dump EEPROM image for debug */
13697 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
13698 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
13699 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
13700 /* XXX PCH_SPT? */
13701 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
13702 if ((eeprom_data & valid_checksum) == 0)
13703 DPRINTF(WM_DEBUG_NVM,
13704 ("%s: NVM need to be updated (%04x != %04x)\n",
13705 device_xname(sc->sc_dev), eeprom_data,
13706 valid_checksum));
13707 }
13708
13709 if ((wm_debug & WM_DEBUG_NVM) != 0) {
13710 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
13711 for (i = 0; i < NVM_SIZE; i++) {
13712 if (wm_nvm_read(sc, i, 1, &eeprom_data))
13713 printf("XXXX ");
13714 else
13715 printf("%04hx ", eeprom_data);
13716 if (i % 8 == 7)
13717 printf("\n");
13718 }
13719 }
13720
13721 #endif /* WM_DEBUG */
13722
13723 for (i = 0; i < NVM_SIZE; i++) {
13724 if (wm_nvm_read(sc, i, 1, &eeprom_data))
13725 return 1;
13726 checksum += eeprom_data;
13727 }
13728
13729 if (checksum != (uint16_t) NVM_CHECKSUM) {
13730 #ifdef WM_DEBUG
13731 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
13732 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
13733 #endif
13734 }
13735
13736 return 0;
13737 }
13738
13739 static void
13740 wm_nvm_version_invm(struct wm_softc *sc)
13741 {
13742 uint32_t dword;
13743
13744 /*
13745 * Linux's code to decode version is very strange, so we don't
13746 * obey that algorithm and just use word 61 as the document.
13747 * Perhaps it's not perfect though...
13748 *
13749 * Example:
13750 *
13751 * Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
13752 */
13753 dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
13754 dword = __SHIFTOUT(dword, INVM_VER_1);
13755 sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
13756 sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
13757 }
13758
13759 static void
13760 wm_nvm_version(struct wm_softc *sc)
13761 {
13762 uint16_t major, minor, build, patch;
13763 uint16_t uid0, uid1;
13764 uint16_t nvm_data;
13765 uint16_t off;
13766 bool check_version = false;
13767 bool check_optionrom = false;
13768 bool have_build = false;
13769 bool have_uid = true;
13770
13771 /*
13772 * Version format:
13773 *
13774 * XYYZ
13775 * X0YZ
13776 * X0YY
13777 *
13778 * Example:
13779 *
13780 * 82571 0x50a2 5.10.2? (the spec update notes about 5.6-5.10)
13781 * 82571 0x50a6 5.10.6?
13782 * 82572 0x506a 5.6.10?
13783 * 82572EI 0x5069 5.6.9?
13784 * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4)
13785 * 0x2013 2.1.3?
13786 * 82583 0x10a0 1.10.0? (document says it's default value)
13787 * ICH8+82567 0x0040 0.4.0?
13788 * ICH9+82566 0x1040 1.4.0?
13789 *ICH10+82567 0x0043 0.4.3?
13790 * PCH+82577 0x00c1 0.12.1?
13791 * PCH2+82579 0x00d3 0.13.3?
13792 * 0x00d4 0.13.4?
13793 * LPT+I218 0x0023 0.2.3?
13794 * SPT+I219 0x0084 0.8.4?
13795 * CNP+I219 0x0054 0.5.4?
13796 */
13797
13798 /*
13799 * XXX
13800 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
13801 * I've never seen on real 82574 hardware with such small SPI ROM.
13802 */
13803 if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
13804 || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
13805 have_uid = false;
13806
13807 switch (sc->sc_type) {
13808 case WM_T_82571:
13809 case WM_T_82572:
13810 case WM_T_82574:
13811 case WM_T_82583:
13812 check_version = true;
13813 check_optionrom = true;
13814 have_build = true;
13815 break;
13816 case WM_T_ICH8:
13817 case WM_T_ICH9:
13818 case WM_T_ICH10:
13819 case WM_T_PCH:
13820 case WM_T_PCH2:
13821 case WM_T_PCH_LPT:
13822 case WM_T_PCH_SPT:
13823 case WM_T_PCH_CNP:
13824 check_version = true;
13825 have_build = true;
13826 have_uid = false;
13827 break;
13828 case WM_T_82575:
13829 case WM_T_82576:
13830 case WM_T_82580:
13831 if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
13832 check_version = true;
13833 break;
13834 case WM_T_I211:
13835 wm_nvm_version_invm(sc);
13836 have_uid = false;
13837 goto printver;
13838 case WM_T_I210:
13839 if (!wm_nvm_flash_presence_i210(sc)) {
13840 wm_nvm_version_invm(sc);
13841 have_uid = false;
13842 goto printver;
13843 }
13844 /* FALLTHROUGH */
13845 case WM_T_I350:
13846 case WM_T_I354:
13847 check_version = true;
13848 check_optionrom = true;
13849 break;
13850 default:
13851 return;
13852 }
13853 if (check_version
13854 && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
13855 major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
13856 if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
13857 minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
13858 build = nvm_data & NVM_BUILD_MASK;
13859 have_build = true;
13860 } else
13861 minor = nvm_data & 0x00ff;
13862
13863 /* Decimal */
13864 minor = (minor / 16) * 10 + (minor % 16);
13865 sc->sc_nvm_ver_major = major;
13866 sc->sc_nvm_ver_minor = minor;
13867
13868 printver:
13869 aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
13870 sc->sc_nvm_ver_minor);
13871 if (have_build) {
13872 sc->sc_nvm_ver_build = build;
13873 aprint_verbose(".%d", build);
13874 }
13875 }
13876
13877 /* Assume the Option ROM area is at avove NVM_SIZE */
13878 if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
13879 && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
13880 /* Option ROM Version */
13881 if ((off != 0x0000) && (off != 0xffff)) {
13882 int rv;
13883
13884 off += NVM_COMBO_VER_OFF;
13885 rv = wm_nvm_read(sc, off + 1, 1, &uid1);
13886 rv |= wm_nvm_read(sc, off, 1, &uid0);
13887 if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
13888 && (uid1 != 0) && (uid1 != 0xffff)) {
13889 /* 16bits */
13890 major = uid0 >> 8;
13891 build = (uid0 << 8) | (uid1 >> 8);
13892 patch = uid1 & 0x00ff;
13893 aprint_verbose(", option ROM Version %d.%d.%d",
13894 major, build, patch);
13895 }
13896 }
13897 }
13898
13899 if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
13900 aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
13901 }
13902
13903 /*
13904 * wm_nvm_read:
13905 *
13906 * Read data from the serial EEPROM.
13907 */
13908 static int
13909 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
13910 {
13911 int rv;
13912
13913 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
13914 device_xname(sc->sc_dev), __func__));
13915
13916 if (sc->sc_flags & WM_F_EEPROM_INVALID)
13917 return -1;
13918
13919 rv = sc->nvm.read(sc, word, wordcnt, data);
13920
13921 return rv;
13922 }
13923
13924 /*
13925 * Hardware semaphores.
13926 * Very complexed...
13927 */
13928
13929 static int
13930 wm_get_null(struct wm_softc *sc)
13931 {
13932
13933 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13934 device_xname(sc->sc_dev), __func__));
13935 return 0;
13936 }
13937
13938 static void
13939 wm_put_null(struct wm_softc *sc)
13940 {
13941
13942 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13943 device_xname(sc->sc_dev), __func__));
13944 return;
13945 }
13946
13947 static int
13948 wm_get_eecd(struct wm_softc *sc)
13949 {
13950 uint32_t reg;
13951 int x;
13952
13953 DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
13954 device_xname(sc->sc_dev), __func__));
13955
13956 reg = CSR_READ(sc, WMREG_EECD);
13957
13958 /* Request EEPROM access. */
13959 reg |= EECD_EE_REQ;
13960 CSR_WRITE(sc, WMREG_EECD, reg);
13961
13962 /* ..and wait for it to be granted. */
13963 for (x = 0; x < 1000; x++) {
13964 reg = CSR_READ(sc, WMREG_EECD);
13965 if (reg & EECD_EE_GNT)
13966 break;
13967 delay(5);
13968 }
13969 if ((reg & EECD_EE_GNT) == 0) {
13970 aprint_error_dev(sc->sc_dev,
13971 "could not acquire EEPROM GNT\n");
13972 reg &= ~EECD_EE_REQ;
13973 CSR_WRITE(sc, WMREG_EECD, reg);
13974 return -1;
13975 }
13976
13977 return 0;
13978 }
13979
13980 static void
13981 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
13982 {
13983
13984 *eecd |= EECD_SK;
13985 CSR_WRITE(sc, WMREG_EECD, *eecd);
13986 CSR_WRITE_FLUSH(sc);
13987 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
13988 delay(1);
13989 else
13990 delay(50);
13991 }
13992
13993 static void
13994 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
13995 {
13996
13997 *eecd &= ~EECD_SK;
13998 CSR_WRITE(sc, WMREG_EECD, *eecd);
13999 CSR_WRITE_FLUSH(sc);
14000 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
14001 delay(1);
14002 else
14003 delay(50);
14004 }
14005
14006 static void
14007 wm_put_eecd(struct wm_softc *sc)
14008 {
14009 uint32_t reg;
14010
14011 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14012 device_xname(sc->sc_dev), __func__));
14013
14014 /* Stop nvm */
14015 reg = CSR_READ(sc, WMREG_EECD);
14016 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
14017 /* Pull CS high */
14018 reg |= EECD_CS;
14019 wm_nvm_eec_clock_lower(sc, ®);
14020 } else {
14021 /* CS on Microwire is active-high */
14022 reg &= ~(EECD_CS | EECD_DI);
14023 CSR_WRITE(sc, WMREG_EECD, reg);
14024 wm_nvm_eec_clock_raise(sc, ®);
14025 wm_nvm_eec_clock_lower(sc, ®);
14026 }
14027
14028 reg = CSR_READ(sc, WMREG_EECD);
14029 reg &= ~EECD_EE_REQ;
14030 CSR_WRITE(sc, WMREG_EECD, reg);
14031
14032 return;
14033 }
14034
14035 /*
14036 * Get hardware semaphore.
14037 * Same as e1000_get_hw_semaphore_generic()
14038 */
14039 static int
14040 wm_get_swsm_semaphore(struct wm_softc *sc)
14041 {
14042 int32_t timeout;
14043 uint32_t swsm;
14044
14045 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14046 device_xname(sc->sc_dev), __func__));
14047 KASSERT(sc->sc_nvm_wordsize > 0);
14048
14049 retry:
14050 /* Get the SW semaphore. */
14051 timeout = sc->sc_nvm_wordsize + 1;
14052 while (timeout) {
14053 swsm = CSR_READ(sc, WMREG_SWSM);
14054
14055 if ((swsm & SWSM_SMBI) == 0)
14056 break;
14057
14058 delay(50);
14059 timeout--;
14060 }
14061
14062 if (timeout == 0) {
14063 if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
14064 /*
14065 * In rare circumstances, the SW semaphore may already
14066 * be held unintentionally. Clear the semaphore once
14067 * before giving up.
14068 */
14069 sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
14070 wm_put_swsm_semaphore(sc);
14071 goto retry;
14072 }
14073 aprint_error_dev(sc->sc_dev,
14074 "could not acquire SWSM SMBI\n");
14075 return 1;
14076 }
14077
14078 /* Get the FW semaphore. */
14079 timeout = sc->sc_nvm_wordsize + 1;
14080 while (timeout) {
14081 swsm = CSR_READ(sc, WMREG_SWSM);
14082 swsm |= SWSM_SWESMBI;
14083 CSR_WRITE(sc, WMREG_SWSM, swsm);
14084 /* If we managed to set the bit we got the semaphore. */
14085 swsm = CSR_READ(sc, WMREG_SWSM);
14086 if (swsm & SWSM_SWESMBI)
14087 break;
14088
14089 delay(50);
14090 timeout--;
14091 }
14092
14093 if (timeout == 0) {
14094 aprint_error_dev(sc->sc_dev,
14095 "could not acquire SWSM SWESMBI\n");
14096 /* Release semaphores */
14097 wm_put_swsm_semaphore(sc);
14098 return 1;
14099 }
14100 return 0;
14101 }
14102
14103 /*
14104 * Put hardware semaphore.
14105 * Same as e1000_put_hw_semaphore_generic()
14106 */
14107 static void
14108 wm_put_swsm_semaphore(struct wm_softc *sc)
14109 {
14110 uint32_t swsm;
14111
14112 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14113 device_xname(sc->sc_dev), __func__));
14114
14115 swsm = CSR_READ(sc, WMREG_SWSM);
14116 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
14117 CSR_WRITE(sc, WMREG_SWSM, swsm);
14118 }
14119
14120 /*
14121 * Get SW/FW semaphore.
14122 * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
14123 */
14124 static int
14125 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
14126 {
14127 uint32_t swfw_sync;
14128 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
14129 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
14130 int timeout;
14131
14132 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14133 device_xname(sc->sc_dev), __func__));
14134
14135 if (sc->sc_type == WM_T_80003)
14136 timeout = 50;
14137 else
14138 timeout = 200;
14139
14140 while (timeout) {
14141 if (wm_get_swsm_semaphore(sc)) {
14142 aprint_error_dev(sc->sc_dev,
14143 "%s: failed to get semaphore\n",
14144 __func__);
14145 return 1;
14146 }
14147 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
14148 if ((swfw_sync & (swmask | fwmask)) == 0) {
14149 swfw_sync |= swmask;
14150 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
14151 wm_put_swsm_semaphore(sc);
14152 return 0;
14153 }
14154 wm_put_swsm_semaphore(sc);
14155 delay(5000);
14156 timeout--;
14157 }
14158 device_printf(sc->sc_dev,
14159 "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
14160 mask, swfw_sync);
14161 return 1;
14162 }
14163
14164 static void
14165 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
14166 {
14167 uint32_t swfw_sync;
14168
14169 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14170 device_xname(sc->sc_dev), __func__));
14171
14172 while (wm_get_swsm_semaphore(sc) != 0)
14173 continue;
14174
14175 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
14176 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
14177 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
14178
14179 wm_put_swsm_semaphore(sc);
14180 }
14181
14182 static int
14183 wm_get_nvm_80003(struct wm_softc *sc)
14184 {
14185 int rv;
14186
14187 DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
14188 device_xname(sc->sc_dev), __func__));
14189
14190 if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
14191 aprint_error_dev(sc->sc_dev,
14192 "%s: failed to get semaphore(SWFW)\n", __func__);
14193 return rv;
14194 }
14195
14196 if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
14197 && (rv = wm_get_eecd(sc)) != 0) {
14198 aprint_error_dev(sc->sc_dev,
14199 "%s: failed to get semaphore(EECD)\n", __func__);
14200 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
14201 return rv;
14202 }
14203
14204 return 0;
14205 }
14206
14207 static void
14208 wm_put_nvm_80003(struct wm_softc *sc)
14209 {
14210
14211 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14212 device_xname(sc->sc_dev), __func__));
14213
14214 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
14215 wm_put_eecd(sc);
14216 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
14217 }
14218
14219 static int
14220 wm_get_nvm_82571(struct wm_softc *sc)
14221 {
14222 int rv;
14223
14224 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14225 device_xname(sc->sc_dev), __func__));
14226
14227 if ((rv = wm_get_swsm_semaphore(sc)) != 0)
14228 return rv;
14229
14230 switch (sc->sc_type) {
14231 case WM_T_82573:
14232 break;
14233 default:
14234 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
14235 rv = wm_get_eecd(sc);
14236 break;
14237 }
14238
14239 if (rv != 0) {
14240 aprint_error_dev(sc->sc_dev,
14241 "%s: failed to get semaphore\n",
14242 __func__);
14243 wm_put_swsm_semaphore(sc);
14244 }
14245
14246 return rv;
14247 }
14248
14249 static void
14250 wm_put_nvm_82571(struct wm_softc *sc)
14251 {
14252
14253 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14254 device_xname(sc->sc_dev), __func__));
14255
14256 switch (sc->sc_type) {
14257 case WM_T_82573:
14258 break;
14259 default:
14260 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
14261 wm_put_eecd(sc);
14262 break;
14263 }
14264
14265 wm_put_swsm_semaphore(sc);
14266 }
14267
14268 static int
14269 wm_get_phy_82575(struct wm_softc *sc)
14270 {
14271
14272 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14273 device_xname(sc->sc_dev), __func__));
14274 return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
14275 }
14276
14277 static void
14278 wm_put_phy_82575(struct wm_softc *sc)
14279 {
14280
14281 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14282 device_xname(sc->sc_dev), __func__));
14283 return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
14284 }
14285
14286 static int
14287 wm_get_swfwhw_semaphore(struct wm_softc *sc)
14288 {
14289 uint32_t ext_ctrl;
14290 int timeout = 200;
14291
14292 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14293 device_xname(sc->sc_dev), __func__));
14294
14295 mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
14296 for (timeout = 0; timeout < 200; timeout++) {
14297 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14298 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
14299 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14300
14301 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14302 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
14303 return 0;
14304 delay(5000);
14305 }
14306 device_printf(sc->sc_dev,
14307 "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
14308 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
14309 return 1;
14310 }
14311
14312 static void
14313 wm_put_swfwhw_semaphore(struct wm_softc *sc)
14314 {
14315 uint32_t ext_ctrl;
14316
14317 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14318 device_xname(sc->sc_dev), __func__));
14319
14320 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14321 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14322 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14323
14324 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
14325 }
14326
14327 static int
14328 wm_get_swflag_ich8lan(struct wm_softc *sc)
14329 {
14330 uint32_t ext_ctrl;
14331 int timeout;
14332
14333 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14334 device_xname(sc->sc_dev), __func__));
14335 mutex_enter(sc->sc_ich_phymtx);
14336 for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
14337 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14338 if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
14339 break;
14340 delay(1000);
14341 }
14342 if (timeout >= WM_PHY_CFG_TIMEOUT) {
14343 device_printf(sc->sc_dev,
14344 "SW has already locked the resource\n");
14345 goto out;
14346 }
14347
14348 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
14349 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14350 for (timeout = 0; timeout < 1000; timeout++) {
14351 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14352 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
14353 break;
14354 delay(1000);
14355 }
14356 if (timeout >= 1000) {
14357 device_printf(sc->sc_dev, "failed to acquire semaphore\n");
14358 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14359 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14360 goto out;
14361 }
14362 return 0;
14363
14364 out:
14365 mutex_exit(sc->sc_ich_phymtx);
14366 return 1;
14367 }
14368
14369 static void
14370 wm_put_swflag_ich8lan(struct wm_softc *sc)
14371 {
14372 uint32_t ext_ctrl;
14373
14374 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14375 device_xname(sc->sc_dev), __func__));
14376 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14377 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
14378 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14379 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14380 } else {
14381 device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
14382 }
14383
14384 mutex_exit(sc->sc_ich_phymtx);
14385 }
14386
14387 static int
14388 wm_get_nvm_ich8lan(struct wm_softc *sc)
14389 {
14390
14391 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14392 device_xname(sc->sc_dev), __func__));
14393 mutex_enter(sc->sc_ich_nvmmtx);
14394
14395 return 0;
14396 }
14397
14398 static void
14399 wm_put_nvm_ich8lan(struct wm_softc *sc)
14400 {
14401
14402 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14403 device_xname(sc->sc_dev), __func__));
14404 mutex_exit(sc->sc_ich_nvmmtx);
14405 }
14406
14407 static int
14408 wm_get_hw_semaphore_82573(struct wm_softc *sc)
14409 {
14410 int i = 0;
14411 uint32_t reg;
14412
14413 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14414 device_xname(sc->sc_dev), __func__));
14415
14416 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
14417 do {
14418 CSR_WRITE(sc, WMREG_EXTCNFCTR,
14419 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
14420 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
14421 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
14422 break;
14423 delay(2*1000);
14424 i++;
14425 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
14426
14427 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
14428 wm_put_hw_semaphore_82573(sc);
14429 log(LOG_ERR, "%s: Driver can't access the PHY\n",
14430 device_xname(sc->sc_dev));
14431 return -1;
14432 }
14433
14434 return 0;
14435 }
14436
14437 static void
14438 wm_put_hw_semaphore_82573(struct wm_softc *sc)
14439 {
14440 uint32_t reg;
14441
14442 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14443 device_xname(sc->sc_dev), __func__));
14444
14445 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
14446 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14447 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
14448 }
14449
14450 /*
14451 * Management mode and power management related subroutines.
14452 * BMC, AMT, suspend/resume and EEE.
14453 */
14454
14455 #ifdef WM_WOL
14456 static int
14457 wm_check_mng_mode(struct wm_softc *sc)
14458 {
14459 int rv;
14460
14461 switch (sc->sc_type) {
14462 case WM_T_ICH8:
14463 case WM_T_ICH9:
14464 case WM_T_ICH10:
14465 case WM_T_PCH:
14466 case WM_T_PCH2:
14467 case WM_T_PCH_LPT:
14468 case WM_T_PCH_SPT:
14469 case WM_T_PCH_CNP:
14470 rv = wm_check_mng_mode_ich8lan(sc);
14471 break;
14472 case WM_T_82574:
14473 case WM_T_82583:
14474 rv = wm_check_mng_mode_82574(sc);
14475 break;
14476 case WM_T_82571:
14477 case WM_T_82572:
14478 case WM_T_82573:
14479 case WM_T_80003:
14480 rv = wm_check_mng_mode_generic(sc);
14481 break;
14482 default:
14483 /* Noting to do */
14484 rv = 0;
14485 break;
14486 }
14487
14488 return rv;
14489 }
14490
14491 static int
14492 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
14493 {
14494 uint32_t fwsm;
14495
14496 fwsm = CSR_READ(sc, WMREG_FWSM);
14497
14498 if (((fwsm & FWSM_FW_VALID) != 0)
14499 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
14500 return 1;
14501
14502 return 0;
14503 }
14504
14505 static int
14506 wm_check_mng_mode_82574(struct wm_softc *sc)
14507 {
14508 uint16_t data;
14509
14510 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
14511
14512 if ((data & NVM_CFG2_MNGM_MASK) != 0)
14513 return 1;
14514
14515 return 0;
14516 }
14517
14518 static int
14519 wm_check_mng_mode_generic(struct wm_softc *sc)
14520 {
14521 uint32_t fwsm;
14522
14523 fwsm = CSR_READ(sc, WMREG_FWSM);
14524
14525 if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
14526 return 1;
14527
14528 return 0;
14529 }
14530 #endif /* WM_WOL */
14531
14532 static int
14533 wm_enable_mng_pass_thru(struct wm_softc *sc)
14534 {
14535 uint32_t manc, fwsm, factps;
14536
14537 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
14538 return 0;
14539
14540 manc = CSR_READ(sc, WMREG_MANC);
14541
14542 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
14543 device_xname(sc->sc_dev), manc));
14544 if ((manc & MANC_RECV_TCO_EN) == 0)
14545 return 0;
14546
14547 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
14548 fwsm = CSR_READ(sc, WMREG_FWSM);
14549 factps = CSR_READ(sc, WMREG_FACTPS);
14550 if (((factps & FACTPS_MNGCG) == 0)
14551 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
14552 return 1;
14553 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
14554 uint16_t data;
14555
14556 factps = CSR_READ(sc, WMREG_FACTPS);
14557 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
14558 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
14559 device_xname(sc->sc_dev), factps, data));
14560 if (((factps & FACTPS_MNGCG) == 0)
14561 && ((data & NVM_CFG2_MNGM_MASK)
14562 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
14563 return 1;
14564 } else if (((manc & MANC_SMBUS_EN) != 0)
14565 && ((manc & MANC_ASF_EN) == 0))
14566 return 1;
14567
14568 return 0;
14569 }
14570
14571 static bool
14572 wm_phy_resetisblocked(struct wm_softc *sc)
14573 {
14574 bool blocked = false;
14575 uint32_t reg;
14576 int i = 0;
14577
14578 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14579 device_xname(sc->sc_dev), __func__));
14580
14581 switch (sc->sc_type) {
14582 case WM_T_ICH8:
14583 case WM_T_ICH9:
14584 case WM_T_ICH10:
14585 case WM_T_PCH:
14586 case WM_T_PCH2:
14587 case WM_T_PCH_LPT:
14588 case WM_T_PCH_SPT:
14589 case WM_T_PCH_CNP:
14590 do {
14591 reg = CSR_READ(sc, WMREG_FWSM);
14592 if ((reg & FWSM_RSPCIPHY) == 0) {
14593 blocked = true;
14594 delay(10*1000);
14595 continue;
14596 }
14597 blocked = false;
14598 } while (blocked && (i++ < 30));
14599 return blocked;
14600 break;
14601 case WM_T_82571:
14602 case WM_T_82572:
14603 case WM_T_82573:
14604 case WM_T_82574:
14605 case WM_T_82583:
14606 case WM_T_80003:
14607 reg = CSR_READ(sc, WMREG_MANC);
14608 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
14609 return true;
14610 else
14611 return false;
14612 break;
14613 default:
14614 /* No problem */
14615 break;
14616 }
14617
14618 return false;
14619 }
14620
14621 static void
14622 wm_get_hw_control(struct wm_softc *sc)
14623 {
14624 uint32_t reg;
14625
14626 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14627 device_xname(sc->sc_dev), __func__));
14628
14629 if (sc->sc_type == WM_T_82573) {
14630 reg = CSR_READ(sc, WMREG_SWSM);
14631 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
14632 } else if (sc->sc_type >= WM_T_82571) {
14633 reg = CSR_READ(sc, WMREG_CTRL_EXT);
14634 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
14635 }
14636 }
14637
14638 static void
14639 wm_release_hw_control(struct wm_softc *sc)
14640 {
14641 uint32_t reg;
14642
14643 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14644 device_xname(sc->sc_dev), __func__));
14645
14646 if (sc->sc_type == WM_T_82573) {
14647 reg = CSR_READ(sc, WMREG_SWSM);
14648 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
14649 } else if (sc->sc_type >= WM_T_82571) {
14650 reg = CSR_READ(sc, WMREG_CTRL_EXT);
14651 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
14652 }
14653 }
14654
14655 static void
14656 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
14657 {
14658 uint32_t reg;
14659
14660 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14661 device_xname(sc->sc_dev), __func__));
14662
14663 if (sc->sc_type < WM_T_PCH2)
14664 return;
14665
14666 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
14667
14668 if (gate)
14669 reg |= EXTCNFCTR_GATE_PHY_CFG;
14670 else
14671 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
14672
14673 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
14674 }
14675
14676 static int
14677 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
14678 {
14679 uint32_t fwsm, reg;
14680 int rv = 0;
14681
14682 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14683 device_xname(sc->sc_dev), __func__));
14684
14685 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
14686 wm_gate_hw_phy_config_ich8lan(sc, true);
14687
14688 /* Disable ULP */
14689 wm_ulp_disable(sc);
14690
14691 /* Acquire PHY semaphore */
14692 rv = sc->phy.acquire(sc);
14693 if (rv != 0) {
14694 DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
14695 device_xname(sc->sc_dev), __func__));
14696 return -1;
14697 }
14698
14699 /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
14700 * inaccessible and resetting the PHY is not blocked, toggle the
14701 * LANPHYPC Value bit to force the interconnect to PCIe mode.
14702 */
14703 fwsm = CSR_READ(sc, WMREG_FWSM);
14704 switch (sc->sc_type) {
14705 case WM_T_PCH_LPT:
14706 case WM_T_PCH_SPT:
14707 case WM_T_PCH_CNP:
14708 if (wm_phy_is_accessible_pchlan(sc))
14709 break;
14710
14711 /* Before toggling LANPHYPC, see if PHY is accessible by
14712 * forcing MAC to SMBus mode first.
14713 */
14714 reg = CSR_READ(sc, WMREG_CTRL_EXT);
14715 reg |= CTRL_EXT_FORCE_SMBUS;
14716 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
14717 #if 0
14718 /* XXX Isn't this required??? */
14719 CSR_WRITE_FLUSH(sc);
14720 #endif
14721 /* Wait 50 milliseconds for MAC to finish any retries
14722 * that it might be trying to perform from previous
14723 * attempts to acknowledge any phy read requests.
14724 */
14725 delay(50 * 1000);
14726 /* FALLTHROUGH */
14727 case WM_T_PCH2:
14728 if (wm_phy_is_accessible_pchlan(sc) == true)
14729 break;
14730 /* FALLTHROUGH */
14731 case WM_T_PCH:
14732 if (sc->sc_type == WM_T_PCH)
14733 if ((fwsm & FWSM_FW_VALID) != 0)
14734 break;
14735
14736 if (wm_phy_resetisblocked(sc) == true) {
14737 device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
14738 break;
14739 }
14740
14741 /* Toggle LANPHYPC Value bit */
14742 wm_toggle_lanphypc_pch_lpt(sc);
14743
14744 if (sc->sc_type >= WM_T_PCH_LPT) {
14745 if (wm_phy_is_accessible_pchlan(sc) == true)
14746 break;
14747
14748 /* Toggling LANPHYPC brings the PHY out of SMBus mode
14749 * so ensure that the MAC is also out of SMBus mode
14750 */
14751 reg = CSR_READ(sc, WMREG_CTRL_EXT);
14752 reg &= ~CTRL_EXT_FORCE_SMBUS;
14753 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
14754
14755 if (wm_phy_is_accessible_pchlan(sc) == true)
14756 break;
14757 rv = -1;
14758 }
14759 break;
14760 default:
14761 break;
14762 }
14763
14764 /* Release semaphore */
14765 sc->phy.release(sc);
14766
14767 if (rv == 0) {
14768 /* Check to see if able to reset PHY. Print error if not */
14769 if (wm_phy_resetisblocked(sc)) {
14770 device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
14771 goto out;
14772 }
14773
14774 /* Reset the PHY before any access to it. Doing so, ensures
14775 * that the PHY is in a known good state before we read/write
14776 * PHY registers. The generic reset is sufficient here,
14777 * because we haven't determined the PHY type yet.
14778 */
14779 if (wm_reset_phy(sc) != 0)
14780 goto out;
14781
14782 /* On a successful reset, possibly need to wait for the PHY
14783 * to quiesce to an accessible state before returning control
14784 * to the calling function. If the PHY does not quiesce, then
14785 * return E1000E_BLK_PHY_RESET, as this is the condition that
14786 * the PHY is in.
14787 */
14788 if (wm_phy_resetisblocked(sc))
14789 device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
14790 }
14791
14792 out:
14793 /* Ungate automatic PHY configuration on non-managed 82579 */
14794 if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
14795 delay(10*1000);
14796 wm_gate_hw_phy_config_ich8lan(sc, false);
14797 }
14798
14799 return 0;
14800 }
14801
14802 static void
14803 wm_init_manageability(struct wm_softc *sc)
14804 {
14805
14806 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14807 device_xname(sc->sc_dev), __func__));
14808 if (sc->sc_flags & WM_F_HAS_MANAGE) {
14809 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
14810 uint32_t manc = CSR_READ(sc, WMREG_MANC);
14811
14812 /* Disable hardware interception of ARP */
14813 manc &= ~MANC_ARP_EN;
14814
14815 /* Enable receiving management packets to the host */
14816 if (sc->sc_type >= WM_T_82571) {
14817 manc |= MANC_EN_MNG2HOST;
14818 manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
14819 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
14820 }
14821
14822 CSR_WRITE(sc, WMREG_MANC, manc);
14823 }
14824 }
14825
14826 static void
14827 wm_release_manageability(struct wm_softc *sc)
14828 {
14829
14830 if (sc->sc_flags & WM_F_HAS_MANAGE) {
14831 uint32_t manc = CSR_READ(sc, WMREG_MANC);
14832
14833 manc |= MANC_ARP_EN;
14834 if (sc->sc_type >= WM_T_82571)
14835 manc &= ~MANC_EN_MNG2HOST;
14836
14837 CSR_WRITE(sc, WMREG_MANC, manc);
14838 }
14839 }
14840
14841 static void
14842 wm_get_wakeup(struct wm_softc *sc)
14843 {
14844
14845 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
14846 switch (sc->sc_type) {
14847 case WM_T_82573:
14848 case WM_T_82583:
14849 sc->sc_flags |= WM_F_HAS_AMT;
14850 /* FALLTHROUGH */
14851 case WM_T_80003:
14852 case WM_T_82575:
14853 case WM_T_82576:
14854 case WM_T_82580:
14855 case WM_T_I350:
14856 case WM_T_I354:
14857 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
14858 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
14859 /* FALLTHROUGH */
14860 case WM_T_82541:
14861 case WM_T_82541_2:
14862 case WM_T_82547:
14863 case WM_T_82547_2:
14864 case WM_T_82571:
14865 case WM_T_82572:
14866 case WM_T_82574:
14867 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
14868 break;
14869 case WM_T_ICH8:
14870 case WM_T_ICH9:
14871 case WM_T_ICH10:
14872 case WM_T_PCH:
14873 case WM_T_PCH2:
14874 case WM_T_PCH_LPT:
14875 case WM_T_PCH_SPT:
14876 case WM_T_PCH_CNP:
14877 sc->sc_flags |= WM_F_HAS_AMT;
14878 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
14879 break;
14880 default:
14881 break;
14882 }
14883
14884 /* 1: HAS_MANAGE */
14885 if (wm_enable_mng_pass_thru(sc) != 0)
14886 sc->sc_flags |= WM_F_HAS_MANAGE;
14887
14888 /*
14889 * Note that the WOL flags is set after the resetting of the eeprom
14890 * stuff
14891 */
14892 }
14893
14894 /*
14895 * Unconfigure Ultra Low Power mode.
14896 * Only for I217 and newer (see below).
14897 */
14898 static int
14899 wm_ulp_disable(struct wm_softc *sc)
14900 {
14901 uint32_t reg;
14902 uint16_t phyreg;
14903 int i = 0, rv = 0;
14904
14905 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14906 device_xname(sc->sc_dev), __func__));
14907 /* Exclude old devices */
14908 if ((sc->sc_type < WM_T_PCH_LPT)
14909 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
14910 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
14911 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
14912 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
14913 return 0;
14914
14915 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
14916 /* Request ME un-configure ULP mode in the PHY */
14917 reg = CSR_READ(sc, WMREG_H2ME);
14918 reg &= ~H2ME_ULP;
14919 reg |= H2ME_ENFORCE_SETTINGS;
14920 CSR_WRITE(sc, WMREG_H2ME, reg);
14921
14922 /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
14923 while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
14924 if (i++ == 30) {
14925 device_printf(sc->sc_dev, "%s timed out\n",
14926 __func__);
14927 return -1;
14928 }
14929 delay(10 * 1000);
14930 }
14931 reg = CSR_READ(sc, WMREG_H2ME);
14932 reg &= ~H2ME_ENFORCE_SETTINGS;
14933 CSR_WRITE(sc, WMREG_H2ME, reg);
14934
14935 return 0;
14936 }
14937
14938 /* Acquire semaphore */
14939 rv = sc->phy.acquire(sc);
14940 if (rv != 0) {
14941 DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
14942 device_xname(sc->sc_dev), __func__));
14943 return -1;
14944 }
14945
14946 /* Toggle LANPHYPC */
14947 wm_toggle_lanphypc_pch_lpt(sc);
14948
14949 /* Unforce SMBus mode in PHY */
14950 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
14951 if (rv != 0) {
14952 uint32_t reg2;
14953
14954 aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
14955 __func__);
14956 reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
14957 reg2 |= CTRL_EXT_FORCE_SMBUS;
14958 CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
14959 delay(50 * 1000);
14960
14961 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
14962 &phyreg);
14963 if (rv != 0)
14964 goto release;
14965 }
14966 phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
14967 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
14968
14969 /* Unforce SMBus mode in MAC */
14970 reg = CSR_READ(sc, WMREG_CTRL_EXT);
14971 reg &= ~CTRL_EXT_FORCE_SMBUS;
14972 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
14973
14974 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
14975 if (rv != 0)
14976 goto release;
14977 phyreg |= HV_PM_CTRL_K1_ENA;
14978 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
14979
14980 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
14981 &phyreg);
14982 if (rv != 0)
14983 goto release;
14984 phyreg &= ~(I218_ULP_CONFIG1_IND
14985 | I218_ULP_CONFIG1_STICKY_ULP
14986 | I218_ULP_CONFIG1_RESET_TO_SMBUS
14987 | I218_ULP_CONFIG1_WOL_HOST
14988 | I218_ULP_CONFIG1_INBAND_EXIT
14989 | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
14990 | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
14991 | I218_ULP_CONFIG1_DIS_SMB_PERST);
14992 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
14993 phyreg |= I218_ULP_CONFIG1_START;
14994 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
14995
14996 reg = CSR_READ(sc, WMREG_FEXTNVM7);
14997 reg &= ~FEXTNVM7_DIS_SMB_PERST;
14998 CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
14999
15000 release:
15001 /* Release semaphore */
15002 sc->phy.release(sc);
15003 wm_gmii_reset(sc);
15004 delay(50 * 1000);
15005
15006 return rv;
15007 }
15008
15009 /* WOL in the newer chipset interfaces (pchlan) */
15010 static int
15011 wm_enable_phy_wakeup(struct wm_softc *sc)
15012 {
15013 device_t dev = sc->sc_dev;
15014 uint32_t mreg, moff;
15015 uint16_t wuce, wuc, wufc, preg;
15016 int i, rv;
15017
15018 KASSERT(sc->sc_type >= WM_T_PCH);
15019
15020 /* Copy MAC RARs to PHY RARs */
15021 wm_copy_rx_addrs_to_phy_ich8lan(sc);
15022
15023 /* Activate PHY wakeup */
15024 rv = sc->phy.acquire(sc);
15025 if (rv != 0) {
15026 device_printf(dev, "%s: failed to acquire semaphore\n",
15027 __func__);
15028 return rv;
15029 }
15030
15031 /*
15032 * Enable access to PHY wakeup registers.
15033 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
15034 */
15035 rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
15036 if (rv != 0) {
15037 device_printf(dev,
15038 "%s: Could not enable PHY wakeup reg access\n", __func__);
15039 goto release;
15040 }
15041
15042 /* Copy MAC MTA to PHY MTA */
15043 for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
15044 uint16_t lo, hi;
15045
15046 mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
15047 lo = (uint16_t)(mreg & 0xffff);
15048 hi = (uint16_t)((mreg >> 16) & 0xffff);
15049 wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
15050 wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
15051 }
15052
15053 /* Configure PHY Rx Control register */
15054 wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
15055 mreg = CSR_READ(sc, WMREG_RCTL);
15056 if (mreg & RCTL_UPE)
15057 preg |= BM_RCTL_UPE;
15058 if (mreg & RCTL_MPE)
15059 preg |= BM_RCTL_MPE;
15060 preg &= ~(BM_RCTL_MO_MASK);
15061 moff = __SHIFTOUT(mreg, RCTL_MO);
15062 if (moff != 0)
15063 preg |= moff << BM_RCTL_MO_SHIFT;
15064 if (mreg & RCTL_BAM)
15065 preg |= BM_RCTL_BAM;
15066 if (mreg & RCTL_PMCF)
15067 preg |= BM_RCTL_PMCF;
15068 mreg = CSR_READ(sc, WMREG_CTRL);
15069 if (mreg & CTRL_RFCE)
15070 preg |= BM_RCTL_RFCE;
15071 wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
15072
15073 wuc = WUC_APME | WUC_PME_EN;
15074 wufc = WUFC_MAG;
15075 /* Enable PHY wakeup in MAC register */
15076 CSR_WRITE(sc, WMREG_WUC,
15077 WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
15078 CSR_WRITE(sc, WMREG_WUFC, wufc);
15079
15080 /* Configure and enable PHY wakeup in PHY registers */
15081 wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
15082 wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
15083
15084 wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
15085 wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
15086
15087 release:
15088 sc->phy.release(sc);
15089
15090 return 0;
15091 }
15092
15093 /* Power down workaround on D3 */
15094 static void
15095 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
15096 {
15097 uint32_t reg;
15098 uint16_t phyreg;
15099 int i;
15100
15101 for (i = 0; i < 2; i++) {
15102 /* Disable link */
15103 reg = CSR_READ(sc, WMREG_PHY_CTRL);
15104 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
15105 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
15106
15107 /*
15108 * Call gig speed drop workaround on Gig disable before
15109 * accessing any PHY registers
15110 */
15111 if (sc->sc_type == WM_T_ICH8)
15112 wm_gig_downshift_workaround_ich8lan(sc);
15113
15114 /* Write VR power-down enable */
15115 sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
15116 phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
15117 phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
15118 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
15119
15120 /* Read it back and test */
15121 sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
15122 phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
15123 if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
15124 break;
15125
15126 /* Issue PHY reset and repeat at most one more time */
15127 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
15128 }
15129 }
15130
15131 /*
15132 * wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
15133 * @sc: pointer to the HW structure
15134 *
15135 * During S0 to Sx transition, it is possible the link remains at gig
15136 * instead of negotiating to a lower speed. Before going to Sx, set
15137 * 'Gig Disable' to force link speed negotiation to a lower speed based on
15138 * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
15139 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
15140 * needs to be written.
15141 * Parts that support (and are linked to a partner which support) EEE in
15142 * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
15143 * than 10Mbps w/o EEE.
15144 */
15145 static void
15146 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
15147 {
15148 device_t dev = sc->sc_dev;
15149 struct ethercom *ec = &sc->sc_ethercom;
15150 uint32_t phy_ctrl;
15151 int rv;
15152
15153 phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
15154 phy_ctrl |= PHY_CTRL_GBE_DIS;
15155
15156 KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
15157
15158 if (sc->sc_phytype == WMPHY_I217) {
15159 uint16_t devid = sc->sc_pcidevid;
15160
15161 if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
15162 (devid == PCI_PRODUCT_INTEL_I218_V) ||
15163 (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
15164 (devid == PCI_PRODUCT_INTEL_I218_V3) ||
15165 (sc->sc_type >= WM_T_PCH_SPT))
15166 CSR_WRITE(sc, WMREG_FEXTNVM6,
15167 CSR_READ(sc, WMREG_FEXTNVM6)
15168 & ~FEXTNVM6_REQ_PLL_CLK);
15169
15170 if (sc->phy.acquire(sc) != 0)
15171 goto out;
15172
15173 if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
15174 uint16_t eee_advert;
15175
15176 rv = wm_read_emi_reg_locked(dev,
15177 I217_EEE_ADVERTISEMENT, &eee_advert);
15178 if (rv)
15179 goto release;
15180
15181 /*
15182 * Disable LPLU if both link partners support 100BaseT
15183 * EEE and 100Full is advertised on both ends of the
15184 * link, and enable Auto Enable LPI since there will
15185 * be no driver to enable LPI while in Sx.
15186 */
15187 if ((eee_advert & AN_EEEADVERT_100_TX) &&
15188 (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
15189 uint16_t anar, phy_reg;
15190
15191 sc->phy.readreg_locked(dev, 2, MII_ANAR,
15192 &anar);
15193 if (anar & ANAR_TX_FD) {
15194 phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
15195 PHY_CTRL_NOND0A_LPLU);
15196
15197 /* Set Auto Enable LPI after link up */
15198 sc->phy.readreg_locked(dev, 2,
15199 I217_LPI_GPIO_CTRL, &phy_reg);
15200 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
15201 sc->phy.writereg_locked(dev, 2,
15202 I217_LPI_GPIO_CTRL, phy_reg);
15203 }
15204 }
15205 }
15206
15207 /*
15208 * For i217 Intel Rapid Start Technology support,
15209 * when the system is going into Sx and no manageability engine
15210 * is present, the driver must configure proxy to reset only on
15211 * power good. LPI (Low Power Idle) state must also reset only
15212 * on power good, as well as the MTA (Multicast table array).
15213 * The SMBus release must also be disabled on LCD reset.
15214 */
15215
15216 /*
15217 * Enable MTA to reset for Intel Rapid Start Technology
15218 * Support
15219 */
15220
15221 release:
15222 sc->phy.release(sc);
15223 }
15224 out:
15225 CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
15226
15227 if (sc->sc_type == WM_T_ICH8)
15228 wm_gig_downshift_workaround_ich8lan(sc);
15229
15230 if (sc->sc_type >= WM_T_PCH) {
15231 wm_oem_bits_config_ich8lan(sc, false);
15232
15233 /* Reset PHY to activate OEM bits on 82577/8 */
15234 if (sc->sc_type == WM_T_PCH)
15235 wm_reset_phy(sc);
15236
15237 if (sc->phy.acquire(sc) != 0)
15238 return;
15239 wm_write_smbus_addr(sc);
15240 sc->phy.release(sc);
15241 }
15242 }
15243
15244 /*
15245 * wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
15246 * @sc: pointer to the HW structure
15247 *
15248 * During Sx to S0 transitions on non-managed devices or managed devices
15249 * on which PHY resets are not blocked, if the PHY registers cannot be
15250 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
15251 * the PHY.
15252 * On i217, setup Intel Rapid Start Technology.
15253 */
15254 static int
15255 wm_resume_workarounds_pchlan(struct wm_softc *sc)
15256 {
15257 device_t dev = sc->sc_dev;
15258 int rv;
15259
15260 if (sc->sc_type < WM_T_PCH2)
15261 return 0;
15262
15263 rv = wm_init_phy_workarounds_pchlan(sc);
15264 if (rv != 0)
15265 return -1;
15266
15267 /* For i217 Intel Rapid Start Technology support when the system
15268 * is transitioning from Sx and no manageability engine is present
15269 * configure SMBus to restore on reset, disable proxy, and enable
15270 * the reset on MTA (Multicast table array).
15271 */
15272 if (sc->sc_phytype == WMPHY_I217) {
15273 uint16_t phy_reg;
15274
15275 if (sc->phy.acquire(sc) != 0)
15276 return -1;
15277
15278 /* Clear Auto Enable LPI after link up */
15279 sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
15280 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
15281 sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
15282
15283 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
15284 /* Restore clear on SMB if no manageability engine
15285 * is present
15286 */
15287 rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
15288 &phy_reg);
15289 if (rv != 0)
15290 goto release;
15291 phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
15292 sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
15293
15294 /* Disable Proxy */
15295 sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
15296 }
15297 /* Enable reset on MTA */
15298 sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
15299 if (rv != 0)
15300 goto release;
15301 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
15302 sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
15303
15304 release:
15305 sc->phy.release(sc);
15306 return rv;
15307 }
15308
15309 return 0;
15310 }
15311
15312 static void
15313 wm_enable_wakeup(struct wm_softc *sc)
15314 {
15315 uint32_t reg, pmreg;
15316 pcireg_t pmode;
15317 int rv = 0;
15318
15319 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
15320 device_xname(sc->sc_dev), __func__));
15321
15322 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
15323 &pmreg, NULL) == 0)
15324 return;
15325
15326 if ((sc->sc_flags & WM_F_WOL) == 0)
15327 goto pme;
15328
15329 /* Advertise the wakeup capability */
15330 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
15331 | CTRL_SWDPIN(3));
15332
15333 /* Keep the laser running on fiber adapters */
15334 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
15335 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
15336 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15337 reg |= CTRL_EXT_SWDPIN(3);
15338 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15339 }
15340
15341 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
15342 (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
15343 (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
15344 (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
15345 wm_suspend_workarounds_ich8lan(sc);
15346
15347 #if 0 /* For the multicast packet */
15348 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
15349 reg |= WUFC_MC;
15350 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
15351 #endif
15352
15353 if (sc->sc_type >= WM_T_PCH) {
15354 rv = wm_enable_phy_wakeup(sc);
15355 if (rv != 0)
15356 goto pme;
15357 } else {
15358 /* Enable wakeup by the MAC */
15359 CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
15360 CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
15361 }
15362
15363 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
15364 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
15365 || (sc->sc_type == WM_T_PCH2))
15366 && (sc->sc_phytype == WMPHY_IGP_3))
15367 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
15368
15369 pme:
15370 /* Request PME */
15371 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
15372 pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
15373 if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
15374 /* For WOL */
15375 pmode |= PCI_PMCSR_PME_EN;
15376 } else {
15377 /* Disable WOL */
15378 pmode &= ~PCI_PMCSR_PME_EN;
15379 }
15380 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
15381 }
15382
15383 /* Disable ASPM L0s and/or L1 for workaround */
15384 static void
15385 wm_disable_aspm(struct wm_softc *sc)
15386 {
15387 pcireg_t reg, mask = 0;
15388 unsigned const char *str = "";
15389
15390 /*
15391 * Only for PCIe device which has PCIe capability in the PCI config
15392 * space.
15393 */
15394 if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
15395 return;
15396
15397 switch (sc->sc_type) {
15398 case WM_T_82571:
15399 case WM_T_82572:
15400 /*
15401 * 8257[12] Errata 13: Device Does Not Support PCIe Active
15402 * State Power management L1 State (ASPM L1).
15403 */
15404 mask = PCIE_LCSR_ASPM_L1;
15405 str = "L1 is";
15406 break;
15407 case WM_T_82573:
15408 case WM_T_82574:
15409 case WM_T_82583:
15410 /*
15411 * The 82573 disappears when PCIe ASPM L0s is enabled.
15412 *
15413 * The 82574 and 82583 does not support PCIe ASPM L0s with
15414 * some chipset. The document of 82574 and 82583 says that
15415 * disabling L0s with some specific chipset is sufficient,
15416 * but we follow as of the Intel em driver does.
15417 *
15418 * References:
15419 * Errata 8 of the Specification Update of i82573.
15420 * Errata 20 of the Specification Update of i82574.
15421 * Errata 9 of the Specification Update of i82583.
15422 */
15423 mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
15424 str = "L0s and L1 are";
15425 break;
15426 default:
15427 return;
15428 }
15429
15430 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
15431 sc->sc_pcixe_capoff + PCIE_LCSR);
15432 reg &= ~mask;
15433 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
15434 sc->sc_pcixe_capoff + PCIE_LCSR, reg);
15435
15436 /* Print only in wm_attach() */
15437 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
15438 aprint_verbose_dev(sc->sc_dev,
15439 "ASPM %s disabled to workaround the errata.\n", str);
15440 }
15441
15442 /* LPLU */
15443
15444 static void
15445 wm_lplu_d0_disable(struct wm_softc *sc)
15446 {
15447 struct mii_data *mii = &sc->sc_mii;
15448 uint32_t reg;
15449 uint16_t phyval;
15450
15451 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
15452 device_xname(sc->sc_dev), __func__));
15453
15454 if (sc->sc_phytype == WMPHY_IFE)
15455 return;
15456
15457 switch (sc->sc_type) {
15458 case WM_T_82571:
15459 case WM_T_82572:
15460 case WM_T_82573:
15461 case WM_T_82575:
15462 case WM_T_82576:
15463 mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, &phyval);
15464 phyval &= ~PMR_D0_LPLU;
15465 mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, phyval);
15466 break;
15467 case WM_T_82580:
15468 case WM_T_I350:
15469 case WM_T_I210:
15470 case WM_T_I211:
15471 reg = CSR_READ(sc, WMREG_PHPM);
15472 reg &= ~PHPM_D0A_LPLU;
15473 CSR_WRITE(sc, WMREG_PHPM, reg);
15474 break;
15475 case WM_T_82574:
15476 case WM_T_82583:
15477 case WM_T_ICH8:
15478 case WM_T_ICH9:
15479 case WM_T_ICH10:
15480 reg = CSR_READ(sc, WMREG_PHY_CTRL);
15481 reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
15482 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
15483 CSR_WRITE_FLUSH(sc);
15484 break;
15485 case WM_T_PCH:
15486 case WM_T_PCH2:
15487 case WM_T_PCH_LPT:
15488 case WM_T_PCH_SPT:
15489 case WM_T_PCH_CNP:
15490 wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
15491 phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
15492 if (wm_phy_resetisblocked(sc) == false)
15493 phyval |= HV_OEM_BITS_ANEGNOW;
15494 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
15495 break;
15496 default:
15497 break;
15498 }
15499 }
15500
15501 /* EEE */
15502
15503 static int
15504 wm_set_eee_i350(struct wm_softc *sc)
15505 {
15506 struct ethercom *ec = &sc->sc_ethercom;
15507 uint32_t ipcnfg, eeer;
15508 uint32_t ipcnfg_mask
15509 = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
15510 uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
15511
15512 KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
15513
15514 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
15515 eeer = CSR_READ(sc, WMREG_EEER);
15516
15517 /* Enable or disable per user setting */
15518 if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
15519 ipcnfg |= ipcnfg_mask;
15520 eeer |= eeer_mask;
15521 } else {
15522 ipcnfg &= ~ipcnfg_mask;
15523 eeer &= ~eeer_mask;
15524 }
15525
15526 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
15527 CSR_WRITE(sc, WMREG_EEER, eeer);
15528 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
15529 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
15530
15531 return 0;
15532 }
15533
15534 static int
15535 wm_set_eee_pchlan(struct wm_softc *sc)
15536 {
15537 device_t dev = sc->sc_dev;
15538 struct ethercom *ec = &sc->sc_ethercom;
15539 uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
15540 int rv = 0;
15541
15542 switch (sc->sc_phytype) {
15543 case WMPHY_82579:
15544 lpa = I82579_EEE_LP_ABILITY;
15545 pcs_status = I82579_EEE_PCS_STATUS;
15546 adv_addr = I82579_EEE_ADVERTISEMENT;
15547 break;
15548 case WMPHY_I217:
15549 lpa = I217_EEE_LP_ABILITY;
15550 pcs_status = I217_EEE_PCS_STATUS;
15551 adv_addr = I217_EEE_ADVERTISEMENT;
15552 break;
15553 default:
15554 return 0;
15555 }
15556
15557 if (sc->phy.acquire(sc)) {
15558 device_printf(dev, "%s: failed to get semaphore\n", __func__);
15559 return 0;
15560 }
15561
15562 rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
15563 if (rv != 0)
15564 goto release;
15565
15566 /* Clear bits that enable EEE in various speeds */
15567 lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
15568
15569 if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
15570 /* Save off link partner's EEE ability */
15571 rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
15572 if (rv != 0)
15573 goto release;
15574
15575 /* Read EEE advertisement */
15576 if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
15577 goto release;
15578
15579 /*
15580 * Enable EEE only for speeds in which the link partner is
15581 * EEE capable and for which we advertise EEE.
15582 */
15583 if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
15584 lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
15585 if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
15586 sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
15587 if ((data & ANLPAR_TX_FD) != 0)
15588 lpi_ctrl |= I82579_LPI_CTRL_EN_100;
15589 else {
15590 /*
15591 * EEE is not supported in 100Half, so ignore
15592 * partner's EEE in 100 ability if full-duplex
15593 * is not advertised.
15594 */
15595 sc->eee_lp_ability
15596 &= ~AN_EEEADVERT_100_TX;
15597 }
15598 }
15599 }
15600
15601 if (sc->sc_phytype == WMPHY_82579) {
15602 rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
15603 if (rv != 0)
15604 goto release;
15605
15606 data &= ~I82579_LPI_PLL_SHUT_100;
15607 rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
15608 }
15609
15610 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
15611 if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
15612 goto release;
15613
15614 rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
15615 release:
15616 sc->phy.release(sc);
15617
15618 return rv;
15619 }
15620
15621 static int
15622 wm_set_eee(struct wm_softc *sc)
15623 {
15624 struct ethercom *ec = &sc->sc_ethercom;
15625
15626 if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
15627 return 0;
15628
15629 if (sc->sc_type == WM_T_I354) {
15630 /* I354 uses an external PHY */
15631 return 0; /* not yet */
15632 } else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
15633 return wm_set_eee_i350(sc);
15634 else if (sc->sc_type >= WM_T_PCH2)
15635 return wm_set_eee_pchlan(sc);
15636
15637 return 0;
15638 }
15639
15640 /*
15641 * Workarounds (mainly PHY related).
15642 * Basically, PHY's workarounds are in the PHY drivers.
15643 */
15644
15645 /* Work-around for 82566 Kumeran PCS lock loss */
15646 static int
15647 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
15648 {
15649 struct mii_data *mii = &sc->sc_mii;
15650 uint32_t status = CSR_READ(sc, WMREG_STATUS);
15651 int i, reg, rv;
15652 uint16_t phyreg;
15653
15654 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
15655 device_xname(sc->sc_dev), __func__));
15656
15657 /* If the link is not up, do nothing */
15658 if ((status & STATUS_LU) == 0)
15659 return 0;
15660
15661 /* Nothing to do if the link is other than 1Gbps */
15662 if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
15663 return 0;
15664
15665 for (i = 0; i < 10; i++) {
15666 /* read twice */
15667 rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
15668 if (rv != 0)
15669 return rv;
15670 rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
15671 if (rv != 0)
15672 return rv;
15673
15674 if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
15675 goto out; /* GOOD! */
15676
15677 /* Reset the PHY */
15678 wm_reset_phy(sc);
15679 delay(5*1000);
15680 }
15681
15682 /* Disable GigE link negotiation */
15683 reg = CSR_READ(sc, WMREG_PHY_CTRL);
15684 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
15685 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
15686
15687 /*
15688 * Call gig speed drop workaround on Gig disable before accessing
15689 * any PHY registers.
15690 */
15691 wm_gig_downshift_workaround_ich8lan(sc);
15692
15693 out:
15694 return 0;
15695 }
15696
15697 /*
15698 * wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
15699 * @sc: pointer to the HW structure
15700 *
15701 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
15702 * LPLU, Gig disable, MDIC PHY reset):
15703 * 1) Set Kumeran Near-end loopback
15704 * 2) Clear Kumeran Near-end loopback
15705 * Should only be called for ICH8[m] devices with any 1G Phy.
15706 */
15707 static void
15708 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
15709 {
15710 uint16_t kmreg;
15711
15712 /* Only for igp3 */
15713 if (sc->sc_phytype == WMPHY_IGP_3) {
15714 if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
15715 return;
15716 kmreg |= KUMCTRLSTA_DIAG_NELPBK;
15717 if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
15718 return;
15719 kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
15720 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
15721 }
15722 }
15723
15724 /*
15725 * Workaround for pch's PHYs
15726 * XXX should be moved to new PHY driver?
15727 */
15728 static int
15729 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
15730 {
15731 device_t dev = sc->sc_dev;
15732 struct mii_data *mii = &sc->sc_mii;
15733 struct mii_softc *child;
15734 uint16_t phy_data, phyrev = 0;
15735 int phytype = sc->sc_phytype;
15736 int rv;
15737
15738 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
15739 device_xname(dev), __func__));
15740 KASSERT(sc->sc_type == WM_T_PCH);
15741
15742 /* Set MDIO slow mode before any other MDIO access */
15743 if (phytype == WMPHY_82577)
15744 if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
15745 return rv;
15746
15747 child = LIST_FIRST(&mii->mii_phys);
15748 if (child != NULL)
15749 phyrev = child->mii_mpd_rev;
15750
15751 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
15752 if ((child != NULL) &&
15753 (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
15754 ((phytype == WMPHY_82578) && (phyrev == 1)))) {
15755 /* Disable generation of early preamble (0x4431) */
15756 rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
15757 &phy_data);
15758 if (rv != 0)
15759 return rv;
15760 phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
15761 BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
15762 rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
15763 phy_data);
15764 if (rv != 0)
15765 return rv;
15766
15767 /* Preamble tuning for SSC */
15768 rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
15769 if (rv != 0)
15770 return rv;
15771 }
15772
15773 /* 82578 */
15774 if (phytype == WMPHY_82578) {
15775 /*
15776 * Return registers to default by doing a soft reset then
15777 * writing 0x3140 to the control register
15778 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
15779 */
15780 if ((child != NULL) && (phyrev < 2)) {
15781 PHY_RESET(child);
15782 rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
15783 if (rv != 0)
15784 return rv;
15785 }
15786 }
15787
15788 /* Select page 0 */
15789 if ((rv = sc->phy.acquire(sc)) != 0)
15790 return rv;
15791 rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT, 0);
15792 sc->phy.release(sc);
15793 if (rv != 0)
15794 return rv;
15795
15796 /*
15797 * Configure the K1 Si workaround during phy reset assuming there is
15798 * link so that it disables K1 if link is in 1Gbps.
15799 */
15800 if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
15801 return rv;
15802
15803 /* Workaround for link disconnects on a busy hub in half duplex */
15804 rv = sc->phy.acquire(sc);
15805 if (rv)
15806 return rv;
15807 rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
15808 if (rv)
15809 goto release;
15810 rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
15811 phy_data & 0x00ff);
15812 if (rv)
15813 goto release;
15814
15815 /* Set MSE higher to enable link to stay up when noise is high */
15816 rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
15817 release:
15818 sc->phy.release(sc);
15819
15820 return rv;
15821 }
15822
15823 /*
15824 * wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
15825 * @sc: pointer to the HW structure
15826 */
15827 static void
15828 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
15829 {
15830 device_t dev = sc->sc_dev;
15831 uint32_t mac_reg;
15832 uint16_t i, wuce;
15833 int count;
15834
15835 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
15836 device_xname(sc->sc_dev), __func__));
15837
15838 if (sc->phy.acquire(sc) != 0)
15839 return;
15840 if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
15841 goto release;
15842
15843 /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
15844 count = wm_rar_count(sc);
15845 for (i = 0; i < count; i++) {
15846 uint16_t lo, hi;
15847 mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
15848 lo = (uint16_t)(mac_reg & 0xffff);
15849 hi = (uint16_t)((mac_reg >> 16) & 0xffff);
15850 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
15851 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
15852
15853 mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
15854 lo = (uint16_t)(mac_reg & 0xffff);
15855 hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
15856 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
15857 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
15858 }
15859
15860 wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
15861
15862 release:
15863 sc->phy.release(sc);
15864 }
15865
15866 /*
15867 * wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
15868 * done after every PHY reset.
15869 */
15870 static int
15871 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
15872 {
15873 device_t dev = sc->sc_dev;
15874 int rv;
15875
15876 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
15877 device_xname(dev), __func__));
15878 KASSERT(sc->sc_type == WM_T_PCH2);
15879
15880 /* Set MDIO slow mode before any other MDIO access */
15881 rv = wm_set_mdio_slow_mode_hv(sc);
15882 if (rv != 0)
15883 return rv;
15884
15885 rv = sc->phy.acquire(sc);
15886 if (rv != 0)
15887 return rv;
15888 /* Set MSE higher to enable link to stay up when noise is high */
15889 rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
15890 if (rv != 0)
15891 goto release;
15892 /* Drop link after 5 times MSE threshold was reached */
15893 rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
15894 release:
15895 sc->phy.release(sc);
15896
15897 return rv;
15898 }
15899
15900 /**
15901 * wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
15902 * @link: link up bool flag
15903 *
15904 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
15905 * preventing further DMA write requests. Workaround the issue by disabling
15906 * the de-assertion of the clock request when in 1Gpbs mode.
15907 * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
15908 * speeds in order to avoid Tx hangs.
15909 **/
15910 static int
15911 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
15912 {
15913 uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
15914 uint32_t status = CSR_READ(sc, WMREG_STATUS);
15915 uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
15916 uint16_t phyreg;
15917
15918 if (link && (speed == STATUS_SPEED_1000)) {
15919 sc->phy.acquire(sc);
15920 int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
15921 &phyreg);
15922 if (rv != 0)
15923 goto release;
15924 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
15925 phyreg & ~KUMCTRLSTA_K1_ENABLE);
15926 if (rv != 0)
15927 goto release;
15928 delay(20);
15929 CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
15930
15931 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
15932 &phyreg);
15933 release:
15934 sc->phy.release(sc);
15935 return rv;
15936 }
15937
15938 fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
15939
15940 struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
15941 if (((child != NULL) && (child->mii_mpd_rev > 5))
15942 || !link
15943 || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
15944 goto update_fextnvm6;
15945
15946 wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
15947
15948 /* Clear link status transmit timeout */
15949 phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
15950 if (speed == STATUS_SPEED_100) {
15951 /* Set inband Tx timeout to 5x10us for 100Half */
15952 phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
15953
15954 /* Do not extend the K1 entry latency for 100Half */
15955 fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
15956 } else {
15957 /* Set inband Tx timeout to 50x10us for 10Full/Half */
15958 phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
15959
15960 /* Extend the K1 entry latency for 10 Mbps */
15961 fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
15962 }
15963
15964 wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
15965
15966 update_fextnvm6:
15967 CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
15968 return 0;
15969 }
15970
15971 /*
15972 * wm_k1_gig_workaround_hv - K1 Si workaround
15973 * @sc: pointer to the HW structure
15974 * @link: link up bool flag
15975 *
15976 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
15977 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
15978 * If link is down, the function will restore the default K1 setting located
15979 * in the NVM.
15980 */
15981 static int
15982 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
15983 {
15984 int k1_enable = sc->sc_nvm_k1_enabled;
15985
15986 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
15987 device_xname(sc->sc_dev), __func__));
15988
15989 if (sc->phy.acquire(sc) != 0)
15990 return -1;
15991
15992 if (link) {
15993 k1_enable = 0;
15994
15995 /* Link stall fix for link up */
15996 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
15997 0x0100);
15998 } else {
15999 /* Link stall fix for link down */
16000 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
16001 0x4100);
16002 }
16003
16004 wm_configure_k1_ich8lan(sc, k1_enable);
16005 sc->phy.release(sc);
16006
16007 return 0;
16008 }
16009
16010 /*
16011 * wm_k1_workaround_lv - K1 Si workaround
16012 * @sc: pointer to the HW structure
16013 *
16014 * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
16015 * Disable K1 for 1000 and 100 speeds
16016 */
16017 static int
16018 wm_k1_workaround_lv(struct wm_softc *sc)
16019 {
16020 uint32_t reg;
16021 uint16_t phyreg;
16022 int rv;
16023
16024 if (sc->sc_type != WM_T_PCH2)
16025 return 0;
16026
16027 /* Set K1 beacon duration based on 10Mbps speed */
16028 rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
16029 if (rv != 0)
16030 return rv;
16031
16032 if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
16033 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
16034 if (phyreg &
16035 (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
16036 /* LV 1G/100 Packet drop issue wa */
16037 rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
16038 &phyreg);
16039 if (rv != 0)
16040 return rv;
16041 phyreg &= ~HV_PM_CTRL_K1_ENA;
16042 rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
16043 phyreg);
16044 if (rv != 0)
16045 return rv;
16046 } else {
16047 /* For 10Mbps */
16048 reg = CSR_READ(sc, WMREG_FEXTNVM4);
16049 reg &= ~FEXTNVM4_BEACON_DURATION;
16050 reg |= FEXTNVM4_BEACON_DURATION_16US;
16051 CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
16052 }
16053 }
16054
16055 return 0;
16056 }
16057
16058 /*
16059 * wm_link_stall_workaround_hv - Si workaround
16060 * @sc: pointer to the HW structure
16061 *
16062 * This function works around a Si bug where the link partner can get
16063 * a link up indication before the PHY does. If small packets are sent
16064 * by the link partner they can be placed in the packet buffer without
16065 * being properly accounted for by the PHY and will stall preventing
16066 * further packets from being received. The workaround is to clear the
16067 * packet buffer after the PHY detects link up.
16068 */
16069 static int
16070 wm_link_stall_workaround_hv(struct wm_softc *sc)
16071 {
16072 uint16_t phyreg;
16073
16074 if (sc->sc_phytype != WMPHY_82578)
16075 return 0;
16076
16077 /* Do not apply workaround if in PHY loopback bit 14 set */
16078 wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
16079 if ((phyreg & BMCR_LOOP) != 0)
16080 return 0;
16081
16082 /* Check if link is up and at 1Gbps */
16083 wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
16084 phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
16085 | BM_CS_STATUS_SPEED_MASK;
16086 if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
16087 | BM_CS_STATUS_SPEED_1000))
16088 return 0;
16089
16090 delay(200 * 1000); /* XXX too big */
16091
16092 /* Flush the packets in the fifo buffer */
16093 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
16094 HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
16095 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
16096 HV_MUX_DATA_CTRL_GEN_TO_MAC);
16097
16098 return 0;
16099 }
16100
16101 static int
16102 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
16103 {
16104 int rv;
16105 uint16_t reg;
16106
16107 rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, ®);
16108 if (rv != 0)
16109 return rv;
16110
16111 return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
16112 reg | HV_KMRN_MDIO_SLOW);
16113 }
16114
16115 /*
16116 * wm_configure_k1_ich8lan - Configure K1 power state
16117 * @sc: pointer to the HW structure
16118 * @enable: K1 state to configure
16119 *
16120 * Configure the K1 power state based on the provided parameter.
16121 * Assumes semaphore already acquired.
16122 */
16123 static void
16124 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
16125 {
16126 uint32_t ctrl, ctrl_ext, tmp;
16127 uint16_t kmreg;
16128 int rv;
16129
16130 KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
16131
16132 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
16133 if (rv != 0)
16134 return;
16135
16136 if (k1_enable)
16137 kmreg |= KUMCTRLSTA_K1_ENABLE;
16138 else
16139 kmreg &= ~KUMCTRLSTA_K1_ENABLE;
16140
16141 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
16142 if (rv != 0)
16143 return;
16144
16145 delay(20);
16146
16147 ctrl = CSR_READ(sc, WMREG_CTRL);
16148 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
16149
16150 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
16151 tmp |= CTRL_FRCSPD;
16152
16153 CSR_WRITE(sc, WMREG_CTRL, tmp);
16154 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
16155 CSR_WRITE_FLUSH(sc);
16156 delay(20);
16157
16158 CSR_WRITE(sc, WMREG_CTRL, ctrl);
16159 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
16160 CSR_WRITE_FLUSH(sc);
16161 delay(20);
16162
16163 return;
16164 }
16165
16166 /* special case - for 82575 - need to do manual init ... */
16167 static void
16168 wm_reset_init_script_82575(struct wm_softc *sc)
16169 {
16170 /*
16171 * Remark: this is untested code - we have no board without EEPROM
16172 * same setup as mentioned int the FreeBSD driver for the i82575
16173 */
16174
16175 /* SerDes configuration via SERDESCTRL */
16176 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
16177 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
16178 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
16179 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
16180
16181 /* CCM configuration via CCMCTL register */
16182 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
16183 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
16184
16185 /* PCIe lanes configuration */
16186 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
16187 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
16188 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
16189 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
16190
16191 /* PCIe PLL Configuration */
16192 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
16193 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
16194 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
16195 }
16196
16197 static void
16198 wm_reset_mdicnfg_82580(struct wm_softc *sc)
16199 {
16200 uint32_t reg;
16201 uint16_t nvmword;
16202 int rv;
16203
16204 if (sc->sc_type != WM_T_82580)
16205 return;
16206 if ((sc->sc_flags & WM_F_SGMII) == 0)
16207 return;
16208
16209 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
16210 + NVM_OFF_CFG3_PORTA, 1, &nvmword);
16211 if (rv != 0) {
16212 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
16213 __func__);
16214 return;
16215 }
16216
16217 reg = CSR_READ(sc, WMREG_MDICNFG);
16218 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
16219 reg |= MDICNFG_DEST;
16220 if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
16221 reg |= MDICNFG_COM_MDIO;
16222 CSR_WRITE(sc, WMREG_MDICNFG, reg);
16223 }
16224
16225 #define MII_INVALIDID(x) (((x) == 0x0000) || ((x) == 0xffff))
16226
16227 static bool
16228 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
16229 {
16230 uint32_t reg;
16231 uint16_t id1, id2;
16232 int i, rv;
16233
16234 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
16235 device_xname(sc->sc_dev), __func__));
16236 KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
16237
16238 id1 = id2 = 0xffff;
16239 for (i = 0; i < 2; i++) {
16240 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
16241 &id1);
16242 if ((rv != 0) || MII_INVALIDID(id1))
16243 continue;
16244 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
16245 &id2);
16246 if ((rv != 0) || MII_INVALIDID(id2))
16247 continue;
16248 break;
16249 }
16250 if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
16251 goto out;
16252
16253 /*
16254 * In case the PHY needs to be in mdio slow mode,
16255 * set slow mode and try to get the PHY id again.
16256 */
16257 rv = 0;
16258 if (sc->sc_type < WM_T_PCH_LPT) {
16259 sc->phy.release(sc);
16260 wm_set_mdio_slow_mode_hv(sc);
16261 rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
16262 rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
16263 sc->phy.acquire(sc);
16264 }
16265 if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
16266 device_printf(sc->sc_dev, "XXX return with false\n");
16267 return false;
16268 }
16269 out:
16270 if (sc->sc_type >= WM_T_PCH_LPT) {
16271 /* Only unforce SMBus if ME is not active */
16272 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
16273 uint16_t phyreg;
16274
16275 /* Unforce SMBus mode in PHY */
16276 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
16277 CV_SMB_CTRL, &phyreg);
16278 phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
16279 wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
16280 CV_SMB_CTRL, phyreg);
16281
16282 /* Unforce SMBus mode in MAC */
16283 reg = CSR_READ(sc, WMREG_CTRL_EXT);
16284 reg &= ~CTRL_EXT_FORCE_SMBUS;
16285 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16286 }
16287 }
16288 return true;
16289 }
16290
16291 static void
16292 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
16293 {
16294 uint32_t reg;
16295 int i;
16296
16297 /* Set PHY Config Counter to 50msec */
16298 reg = CSR_READ(sc, WMREG_FEXTNVM3);
16299 reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
16300 reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
16301 CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
16302
16303 /* Toggle LANPHYPC */
16304 reg = CSR_READ(sc, WMREG_CTRL);
16305 reg |= CTRL_LANPHYPC_OVERRIDE;
16306 reg &= ~CTRL_LANPHYPC_VALUE;
16307 CSR_WRITE(sc, WMREG_CTRL, reg);
16308 CSR_WRITE_FLUSH(sc);
16309 delay(1000);
16310 reg &= ~CTRL_LANPHYPC_OVERRIDE;
16311 CSR_WRITE(sc, WMREG_CTRL, reg);
16312 CSR_WRITE_FLUSH(sc);
16313
16314 if (sc->sc_type < WM_T_PCH_LPT)
16315 delay(50 * 1000);
16316 else {
16317 i = 20;
16318
16319 do {
16320 delay(5 * 1000);
16321 } while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
16322 && i--);
16323
16324 delay(30 * 1000);
16325 }
16326 }
16327
16328 static int
16329 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
16330 {
16331 uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
16332 | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
16333 uint32_t rxa;
16334 uint16_t scale = 0, lat_enc = 0;
16335 int32_t obff_hwm = 0;
16336 int64_t lat_ns, value;
16337
16338 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
16339 device_xname(sc->sc_dev), __func__));
16340
16341 if (link) {
16342 uint16_t max_snoop, max_nosnoop, max_ltr_enc;
16343 uint32_t status;
16344 uint16_t speed;
16345 pcireg_t preg;
16346
16347 status = CSR_READ(sc, WMREG_STATUS);
16348 switch (__SHIFTOUT(status, STATUS_SPEED)) {
16349 case STATUS_SPEED_10:
16350 speed = 10;
16351 break;
16352 case STATUS_SPEED_100:
16353 speed = 100;
16354 break;
16355 case STATUS_SPEED_1000:
16356 speed = 1000;
16357 break;
16358 default:
16359 device_printf(sc->sc_dev, "Unknown speed "
16360 "(status = %08x)\n", status);
16361 return -1;
16362 }
16363
16364 /* Rx Packet Buffer Allocation size (KB) */
16365 rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
16366
16367 /*
16368 * Determine the maximum latency tolerated by the device.
16369 *
16370 * Per the PCIe spec, the tolerated latencies are encoded as
16371 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
16372 * a 10-bit value (0-1023) to provide a range from 1 ns to
16373 * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
16374 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
16375 */
16376 lat_ns = ((int64_t)rxa * 1024 -
16377 (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
16378 + ETHER_HDR_LEN))) * 8 * 1000;
16379 if (lat_ns < 0)
16380 lat_ns = 0;
16381 else
16382 lat_ns /= speed;
16383 value = lat_ns;
16384
16385 while (value > LTRV_VALUE) {
16386 scale ++;
16387 value = howmany(value, __BIT(5));
16388 }
16389 if (scale > LTRV_SCALE_MAX) {
16390 device_printf(sc->sc_dev,
16391 "Invalid LTR latency scale %d\n", scale);
16392 return -1;
16393 }
16394 lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
16395
16396 /* Determine the maximum latency tolerated by the platform */
16397 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
16398 WM_PCI_LTR_CAP_LPT);
16399 max_snoop = preg & 0xffff;
16400 max_nosnoop = preg >> 16;
16401
16402 max_ltr_enc = MAX(max_snoop, max_nosnoop);
16403
16404 if (lat_enc > max_ltr_enc) {
16405 lat_enc = max_ltr_enc;
16406 lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
16407 * PCI_LTR_SCALETONS(
16408 __SHIFTOUT(lat_enc,
16409 PCI_LTR_MAXSNOOPLAT_SCALE));
16410 }
16411
16412 if (lat_ns) {
16413 lat_ns *= speed * 1000;
16414 lat_ns /= 8;
16415 lat_ns /= 1000000000;
16416 obff_hwm = (int32_t)(rxa - lat_ns);
16417 }
16418 if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
16419 device_printf(sc->sc_dev, "Invalid high water mark %d"
16420 "(rxa = %d, lat_ns = %d)\n",
16421 obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
16422 return -1;
16423 }
16424 }
16425 /* Snoop and No-Snoop latencies the same */
16426 reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
16427 CSR_WRITE(sc, WMREG_LTRV, reg);
16428
16429 /* Set OBFF high water mark */
16430 reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
16431 reg |= obff_hwm;
16432 CSR_WRITE(sc, WMREG_SVT, reg);
16433
16434 /* Enable OBFF */
16435 reg = CSR_READ(sc, WMREG_SVCR);
16436 reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
16437 CSR_WRITE(sc, WMREG_SVCR, reg);
16438
16439 return 0;
16440 }
16441
16442 /*
16443 * I210 Errata 25 and I211 Errata 10
16444 * Slow System Clock.
16445 */
16446 static int
16447 wm_pll_workaround_i210(struct wm_softc *sc)
16448 {
16449 uint32_t mdicnfg, wuc;
16450 uint32_t reg;
16451 pcireg_t pcireg;
16452 uint32_t pmreg;
16453 uint16_t nvmword, tmp_nvmword;
16454 uint16_t phyval;
16455 bool wa_done = false;
16456 int i, rv = 0;
16457
16458 /* Get Power Management cap offset */
16459 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
16460 &pmreg, NULL) == 0)
16461 return -1;
16462
16463 /* Save WUC and MDICNFG registers */
16464 wuc = CSR_READ(sc, WMREG_WUC);
16465 mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
16466
16467 reg = mdicnfg & ~MDICNFG_DEST;
16468 CSR_WRITE(sc, WMREG_MDICNFG, reg);
16469
16470 if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
16471 nvmword = INVM_DEFAULT_AL;
16472 tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
16473
16474 for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
16475 wm_gmii_gs40g_readreg(sc->sc_dev, 1,
16476 GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
16477
16478 if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
16479 rv = 0;
16480 break; /* OK */
16481 } else
16482 rv = -1;
16483
16484 wa_done = true;
16485 /* Directly reset the internal PHY */
16486 reg = CSR_READ(sc, WMREG_CTRL);
16487 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
16488
16489 reg = CSR_READ(sc, WMREG_CTRL_EXT);
16490 reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
16491 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16492
16493 CSR_WRITE(sc, WMREG_WUC, 0);
16494 reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
16495 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
16496
16497 pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
16498 pmreg + PCI_PMCSR);
16499 pcireg |= PCI_PMCSR_STATE_D3;
16500 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
16501 pmreg + PCI_PMCSR, pcireg);
16502 delay(1000);
16503 pcireg &= ~PCI_PMCSR_STATE_D3;
16504 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
16505 pmreg + PCI_PMCSR, pcireg);
16506
16507 reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
16508 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
16509
16510 /* Restore WUC register */
16511 CSR_WRITE(sc, WMREG_WUC, wuc);
16512 }
16513
16514 /* Restore MDICNFG setting */
16515 CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
16516 if (wa_done)
16517 aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
16518 return rv;
16519 }
16520
16521 static void
16522 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
16523 {
16524 uint32_t reg;
16525
16526 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
16527 device_xname(sc->sc_dev), __func__));
16528 KASSERT((sc->sc_type == WM_T_PCH_SPT)
16529 || (sc->sc_type == WM_T_PCH_CNP));
16530
16531 reg = CSR_READ(sc, WMREG_FEXTNVM7);
16532 reg |= FEXTNVM7_SIDE_CLK_UNGATE;
16533 CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
16534
16535 reg = CSR_READ(sc, WMREG_FEXTNVM9);
16536 reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
16537 CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
16538 }
16539