if_wm.c revision 1.436 1 /* $NetBSD: if_wm.c,v 1.436 2016/10/31 02:44:54 knakahara Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - Disable D0 LPLU on 8257[12356], 82580 and I350.
77 * - TX Multi queue improvement (refine queue selection logic)
78 * - Advanced Receive Descriptor
79 * - EEE (Energy Efficiency Ethernet)
80 * - Virtual Function
81 * - Set LED correctly (based on contents in EEPROM)
82 * - Rework how parameters are loaded from the EEPROM.
83 * - Image Unique ID
84 */
85
86 #include <sys/cdefs.h>
87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.436 2016/10/31 02:44:54 knakahara Exp $");
88
89 #ifdef _KERNEL_OPT
90 #include "opt_net_mpsafe.h"
91 #endif
92
93 #include <sys/param.h>
94 #include <sys/systm.h>
95 #include <sys/callout.h>
96 #include <sys/mbuf.h>
97 #include <sys/malloc.h>
98 #include <sys/kmem.h>
99 #include <sys/kernel.h>
100 #include <sys/socket.h>
101 #include <sys/ioctl.h>
102 #include <sys/errno.h>
103 #include <sys/device.h>
104 #include <sys/queue.h>
105 #include <sys/syslog.h>
106 #include <sys/interrupt.h>
107 #include <sys/cpu.h>
108 #include <sys/pcq.h>
109
110 #include <sys/rndsource.h>
111
112 #include <net/if.h>
113 #include <net/if_dl.h>
114 #include <net/if_media.h>
115 #include <net/if_ether.h>
116
117 #include <net/bpf.h>
118
119 #include <netinet/in.h> /* XXX for struct ip */
120 #include <netinet/in_systm.h> /* XXX for struct ip */
121 #include <netinet/ip.h> /* XXX for struct ip */
122 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
123 #include <netinet/tcp.h> /* XXX for struct tcphdr */
124
125 #include <sys/bus.h>
126 #include <sys/intr.h>
127 #include <machine/endian.h>
128
129 #include <dev/mii/mii.h>
130 #include <dev/mii/miivar.h>
131 #include <dev/mii/miidevs.h>
132 #include <dev/mii/mii_bitbang.h>
133 #include <dev/mii/ikphyreg.h>
134 #include <dev/mii/igphyreg.h>
135 #include <dev/mii/igphyvar.h>
136 #include <dev/mii/inbmphyreg.h>
137
138 #include <dev/pci/pcireg.h>
139 #include <dev/pci/pcivar.h>
140 #include <dev/pci/pcidevs.h>
141
142 #include <dev/pci/if_wmreg.h>
143 #include <dev/pci/if_wmvar.h>
144
145 #ifdef WM_DEBUG
146 #define WM_DEBUG_LINK __BIT(0)
147 #define WM_DEBUG_TX __BIT(1)
148 #define WM_DEBUG_RX __BIT(2)
149 #define WM_DEBUG_GMII __BIT(3)
150 #define WM_DEBUG_MANAGE __BIT(4)
151 #define WM_DEBUG_NVM __BIT(5)
152 #define WM_DEBUG_INIT __BIT(6)
153 #define WM_DEBUG_LOCK __BIT(7)
154 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
155 | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
156
157 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
158 #else
159 #define DPRINTF(x, y) /* nothing */
160 #endif /* WM_DEBUG */
161
162 #ifdef NET_MPSAFE
163 #define WM_MPSAFE 1
164 #endif
165
166 /*
167 * This device driver's max interrupt numbers.
168 */
169 #define WM_MAX_NQUEUEINTR 16
170 #define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1)
171
172 /*
173 * Transmit descriptor list size. Due to errata, we can only have
174 * 256 hardware descriptors in the ring on < 82544, but we use 4096
175 * on >= 82544. We tell the upper layers that they can queue a lot
176 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
177 * of them at a time.
178 *
179 * We allow up to 256 (!) DMA segments per packet. Pathological packet
180 * chains containing many small mbufs have been observed in zero-copy
181 * situations with jumbo frames.
182 */
183 #define WM_NTXSEGS 256
184 #define WM_IFQUEUELEN 256
185 #define WM_TXQUEUELEN_MAX 64
186 #define WM_TXQUEUELEN_MAX_82547 16
187 #define WM_TXQUEUELEN(txq) ((txq)->txq_num)
188 #define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1)
189 #define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8)
190 #define WM_NTXDESC_82542 256
191 #define WM_NTXDESC_82544 4096
192 #define WM_NTXDESC(txq) ((txq)->txq_ndesc)
193 #define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1)
194 #define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize)
195 #define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq))
196 #define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq))
197
198 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
199
200 #define WM_TXINTERQSIZE 256
201
202 /*
203 * Receive descriptor list size. We have one Rx buffer for normal
204 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
205 * packet. We allocate 256 receive descriptors, each with a 2k
206 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
207 */
208 #define WM_NRXDESC 256
209 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
210 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
211 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
212
213 typedef union txdescs {
214 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
215 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544];
216 } txdescs_t;
217
218 #define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x))
219 #define WM_CDRXOFF(x) (sizeof(wiseman_rxdesc_t) * x)
220
221 /*
222 * Software state for transmit jobs.
223 */
224 struct wm_txsoft {
225 struct mbuf *txs_mbuf; /* head of our mbuf chain */
226 bus_dmamap_t txs_dmamap; /* our DMA map */
227 int txs_firstdesc; /* first descriptor in packet */
228 int txs_lastdesc; /* last descriptor in packet */
229 int txs_ndesc; /* # of descriptors used */
230 };
231
232 /*
233 * Software state for receive buffers. Each descriptor gets a
234 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
235 * more than one buffer, we chain them together.
236 */
237 struct wm_rxsoft {
238 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
239 bus_dmamap_t rxs_dmamap; /* our DMA map */
240 };
241
242 #define WM_LINKUP_TIMEOUT 50
243
244 static uint16_t swfwphysem[] = {
245 SWFW_PHY0_SM,
246 SWFW_PHY1_SM,
247 SWFW_PHY2_SM,
248 SWFW_PHY3_SM
249 };
250
251 static const uint32_t wm_82580_rxpbs_table[] = {
252 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
253 };
254
255 struct wm_softc;
256
257 #ifdef WM_EVENT_COUNTERS
258 #define WM_Q_EVCNT_DEFINE(qname, evname) \
259 char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
260 struct evcnt qname##_ev_##evname;
261
262 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \
263 do{ \
264 snprintf((q)->qname##_##evname##_evcnt_name, \
265 sizeof((q)->qname##_##evname##_evcnt_name), \
266 "%s%02d%s", #qname, (qnum), #evname); \
267 evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \
268 (evtype), NULL, (xname), \
269 (q)->qname##_##evname##_evcnt_name); \
270 }while(0)
271
272 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
273 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
274
275 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
276 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
277 #endif /* WM_EVENT_COUNTERS */
278
279 struct wm_txqueue {
280 kmutex_t *txq_lock; /* lock for tx operations */
281
282 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */
283
284 /* Software state for the transmit descriptors. */
285 int txq_num; /* must be a power of two */
286 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
287
288 /* TX control data structures. */
289 int txq_ndesc; /* must be a power of two */
290 size_t txq_descsize; /* a tx descriptor size */
291 txdescs_t *txq_descs_u;
292 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */
293 bus_dma_segment_t txq_desc_seg; /* control data segment */
294 int txq_desc_rseg; /* real number of control segment */
295 #define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr
296 #define txq_descs txq_descs_u->sctxu_txdescs
297 #define txq_nq_descs txq_descs_u->sctxu_nq_txdescs
298
299 bus_addr_t txq_tdt_reg; /* offset of TDT register */
300
301 int txq_free; /* number of free Tx descriptors */
302 int txq_next; /* next ready Tx descriptor */
303
304 int txq_sfree; /* number of free Tx jobs */
305 int txq_snext; /* next free Tx job */
306 int txq_sdirty; /* dirty Tx jobs */
307
308 /* These 4 variables are used only on the 82547. */
309 int txq_fifo_size; /* Tx FIFO size */
310 int txq_fifo_head; /* current head of FIFO */
311 uint32_t txq_fifo_addr; /* internal address of start of FIFO */
312 int txq_fifo_stall; /* Tx FIFO is stalled */
313
314 /*
315 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
316 * CPUs. This queue intermediate them without block.
317 */
318 pcq_t *txq_interq;
319
320 /*
321 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
322 * to manage Tx H/W queue's busy flag.
323 */
324 int txq_flags; /* flags for H/W queue, see below */
325 #define WM_TXQ_NO_SPACE 0x1
326
327 bool txq_stopping;
328
329 #ifdef WM_EVENT_COUNTERS
330 WM_Q_EVCNT_DEFINE(txq, txsstall) /* Tx stalled due to no txs */
331 WM_Q_EVCNT_DEFINE(txq, txdstall) /* Tx stalled due to no txd */
332 WM_Q_EVCNT_DEFINE(txq, txfifo_stall) /* Tx FIFO stalls (82547) */
333 WM_Q_EVCNT_DEFINE(txq, txdw) /* Tx descriptor interrupts */
334 WM_Q_EVCNT_DEFINE(txq, txqe) /* Tx queue empty interrupts */
335 /* XXX not used? */
336
337 WM_Q_EVCNT_DEFINE(txq, txipsum) /* IP checksums comp. out-bound */
338 WM_Q_EVCNT_DEFINE(txq,txtusum) /* TCP/UDP cksums comp. out-bound */
339 WM_Q_EVCNT_DEFINE(txq, txtusum6) /* TCP/UDP v6 cksums comp. out-bound */
340 WM_Q_EVCNT_DEFINE(txq, txtso) /* TCP seg offload out-bound (IPv4) */
341 WM_Q_EVCNT_DEFINE(txq, txtso6) /* TCP seg offload out-bound (IPv6) */
342 WM_Q_EVCNT_DEFINE(txq, txtsopain) /* painful header manip. for TSO */
343
344 WM_Q_EVCNT_DEFINE(txq, txdrop) /* Tx packets dropped(too many segs) */
345
346 WM_Q_EVCNT_DEFINE(txq, tu) /* Tx underrun */
347
348 char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
349 struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
350 #endif /* WM_EVENT_COUNTERS */
351 };
352
353 struct wm_rxqueue {
354 kmutex_t *rxq_lock; /* lock for rx operations */
355
356 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */
357
358 /* Software state for the receive descriptors. */
359 wiseman_rxdesc_t *rxq_descs;
360
361 /* RX control data structures. */
362 struct wm_rxsoft rxq_soft[WM_NRXDESC];
363 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */
364 bus_dma_segment_t rxq_desc_seg; /* control data segment */
365 int rxq_desc_rseg; /* real number of control segment */
366 size_t rxq_desc_size; /* control data size */
367 #define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr
368
369 bus_addr_t rxq_rdt_reg; /* offset of RDT register */
370
371 int rxq_ptr; /* next ready Rx desc/queue ent */
372 int rxq_discard;
373 int rxq_len;
374 struct mbuf *rxq_head;
375 struct mbuf *rxq_tail;
376 struct mbuf **rxq_tailp;
377
378 bool rxq_stopping;
379
380 #ifdef WM_EVENT_COUNTERS
381 WM_Q_EVCNT_DEFINE(rxq, rxintr); /* Rx interrupts */
382
383 WM_Q_EVCNT_DEFINE(rxq, rxipsum); /* IP checksums checked in-bound */
384 WM_Q_EVCNT_DEFINE(rxq, rxtusum); /* TCP/UDP cksums checked in-bound */
385 #endif
386 };
387
388 struct wm_queue {
389 int wmq_id; /* index of transmit and receive queues */
390 int wmq_intr_idx; /* index of MSI-X tables */
391
392 struct wm_txqueue wmq_txq;
393 struct wm_rxqueue wmq_rxq;
394 };
395
396 struct wm_phyop {
397 int (*acquire)(struct wm_softc *);
398 void (*release)(struct wm_softc *);
399 };
400
401 /*
402 * Software state per device.
403 */
404 struct wm_softc {
405 device_t sc_dev; /* generic device information */
406 bus_space_tag_t sc_st; /* bus space tag */
407 bus_space_handle_t sc_sh; /* bus space handle */
408 bus_size_t sc_ss; /* bus space size */
409 bus_space_tag_t sc_iot; /* I/O space tag */
410 bus_space_handle_t sc_ioh; /* I/O space handle */
411 bus_size_t sc_ios; /* I/O space size */
412 bus_space_tag_t sc_flasht; /* flash registers space tag */
413 bus_space_handle_t sc_flashh; /* flash registers space handle */
414 bus_size_t sc_flashs; /* flash registers space size */
415 off_t sc_flashreg_offset; /*
416 * offset to flash registers from
417 * start of BAR
418 */
419 bus_dma_tag_t sc_dmat; /* bus DMA tag */
420
421 struct ethercom sc_ethercom; /* ethernet common data */
422 struct mii_data sc_mii; /* MII/media information */
423
424 pci_chipset_tag_t sc_pc;
425 pcitag_t sc_pcitag;
426 int sc_bus_speed; /* PCI/PCIX bus speed */
427 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
428
429 uint16_t sc_pcidevid; /* PCI device ID */
430 wm_chip_type sc_type; /* MAC type */
431 int sc_rev; /* MAC revision */
432 wm_phy_type sc_phytype; /* PHY type */
433 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
434 #define WM_MEDIATYPE_UNKNOWN 0x00
435 #define WM_MEDIATYPE_FIBER 0x01
436 #define WM_MEDIATYPE_COPPER 0x02
437 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
438 int sc_funcid; /* unit number of the chip (0 to 3) */
439 int sc_flags; /* flags; see below */
440 int sc_if_flags; /* last if_flags */
441 int sc_flowflags; /* 802.3x flow control flags */
442 int sc_align_tweak;
443
444 void *sc_ihs[WM_MAX_NINTR]; /*
445 * interrupt cookie.
446 * legacy and msi use sc_ihs[0].
447 */
448 pci_intr_handle_t *sc_intrs; /* legacy and msi use sc_intrs[0] */
449 int sc_nintrs; /* number of interrupts */
450
451 int sc_link_intr_idx; /* index of MSI-X tables */
452
453 callout_t sc_tick_ch; /* tick callout */
454 bool sc_core_stopping;
455
456 int sc_nvm_ver_major;
457 int sc_nvm_ver_minor;
458 int sc_nvm_ver_build;
459 int sc_nvm_addrbits; /* NVM address bits */
460 unsigned int sc_nvm_wordsize; /* NVM word size */
461 int sc_ich8_flash_base;
462 int sc_ich8_flash_bank_size;
463 int sc_nvm_k1_enabled;
464
465 int sc_nqueues;
466 struct wm_queue *sc_queue;
467
468 int sc_affinity_offset;
469
470 #ifdef WM_EVENT_COUNTERS
471 /* Event counters. */
472 struct evcnt sc_ev_linkintr; /* Link interrupts */
473
474 /* WM_T_82542_2_1 only */
475 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
476 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
477 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
478 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
479 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
480 #endif /* WM_EVENT_COUNTERS */
481
482 /* This variable are used only on the 82547. */
483 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
484
485 uint32_t sc_ctrl; /* prototype CTRL register */
486 #if 0
487 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
488 #endif
489 uint32_t sc_icr; /* prototype interrupt bits */
490 uint32_t sc_itr; /* prototype intr throttling reg */
491 uint32_t sc_tctl; /* prototype TCTL register */
492 uint32_t sc_rctl; /* prototype RCTL register */
493 uint32_t sc_txcw; /* prototype TXCW register */
494 uint32_t sc_tipg; /* prototype TIPG register */
495 uint32_t sc_fcrtl; /* prototype FCRTL register */
496 uint32_t sc_pba; /* prototype PBA register */
497
498 int sc_tbi_linkup; /* TBI link status */
499 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
500 int sc_tbi_serdes_ticks; /* tbi ticks */
501
502 int sc_mchash_type; /* multicast filter offset */
503
504 krndsource_t rnd_source; /* random source */
505
506 struct if_percpuq *sc_ipq; /* softint-based input queues */
507
508 kmutex_t *sc_core_lock; /* lock for softc operations */
509 kmutex_t *sc_ich_phymtx; /*
510 * 82574/82583/ICH/PCH specific PHY
511 * mutex. For 82574/82583, the mutex
512 * is used for both PHY and NVM.
513 */
514 kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */
515
516 struct wm_phyop phy;
517 };
518
519 #define WM_CORE_LOCK(_sc) if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
520 #define WM_CORE_UNLOCK(_sc) if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
521 #define WM_CORE_LOCKED(_sc) (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
522
523 #ifdef WM_MPSAFE
524 #define CALLOUT_FLAGS CALLOUT_MPSAFE
525 #else
526 #define CALLOUT_FLAGS 0
527 #endif
528
529 #define WM_RXCHAIN_RESET(rxq) \
530 do { \
531 (rxq)->rxq_tailp = &(rxq)->rxq_head; \
532 *(rxq)->rxq_tailp = NULL; \
533 (rxq)->rxq_len = 0; \
534 } while (/*CONSTCOND*/0)
535
536 #define WM_RXCHAIN_LINK(rxq, m) \
537 do { \
538 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \
539 (rxq)->rxq_tailp = &(m)->m_next; \
540 } while (/*CONSTCOND*/0)
541
542 #ifdef WM_EVENT_COUNTERS
543 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
544 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
545
546 #define WM_Q_EVCNT_INCR(qname, evname) \
547 WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
548 #define WM_Q_EVCNT_ADD(qname, evname, val) \
549 WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
550 #else /* !WM_EVENT_COUNTERS */
551 #define WM_EVCNT_INCR(ev) /* nothing */
552 #define WM_EVCNT_ADD(ev, val) /* nothing */
553
554 #define WM_Q_EVCNT_INCR(qname, evname) /* nothing */
555 #define WM_Q_EVCNT_ADD(qname, evname, val) /* nothing */
556 #endif /* !WM_EVENT_COUNTERS */
557
558 #define CSR_READ(sc, reg) \
559 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
560 #define CSR_WRITE(sc, reg, val) \
561 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
562 #define CSR_WRITE_FLUSH(sc) \
563 (void) CSR_READ((sc), WMREG_STATUS)
564
565 #define ICH8_FLASH_READ32(sc, reg) \
566 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \
567 (reg) + sc->sc_flashreg_offset)
568 #define ICH8_FLASH_WRITE32(sc, reg, data) \
569 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \
570 (reg) + sc->sc_flashreg_offset, (data))
571
572 #define ICH8_FLASH_READ16(sc, reg) \
573 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \
574 (reg) + sc->sc_flashreg_offset)
575 #define ICH8_FLASH_WRITE16(sc, reg, data) \
576 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \
577 (reg) + sc->sc_flashreg_offset, (data))
578
579 #define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
580 #define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
581
582 #define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU)
583 #define WM_CDTXADDR_HI(txq, x) \
584 (sizeof(bus_addr_t) == 8 ? \
585 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
586
587 #define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
588 #define WM_CDRXADDR_HI(rxq, x) \
589 (sizeof(bus_addr_t) == 8 ? \
590 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
591
592 /*
593 * Register read/write functions.
594 * Other than CSR_{READ|WRITE}().
595 */
596 #if 0
597 static inline uint32_t wm_io_read(struct wm_softc *, int);
598 #endif
599 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
600 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
601 uint32_t, uint32_t);
602 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
603
604 /*
605 * Descriptor sync/init functions.
606 */
607 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
608 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
609 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
610
611 /*
612 * Device driver interface functions and commonly used functions.
613 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
614 */
615 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
616 static int wm_match(device_t, cfdata_t, void *);
617 static void wm_attach(device_t, device_t, void *);
618 static int wm_detach(device_t, int);
619 static bool wm_suspend(device_t, const pmf_qual_t *);
620 static bool wm_resume(device_t, const pmf_qual_t *);
621 static void wm_watchdog(struct ifnet *);
622 static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
623 static void wm_tick(void *);
624 static int wm_ifflags_cb(struct ethercom *);
625 static int wm_ioctl(struct ifnet *, u_long, void *);
626 /* MAC address related */
627 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
628 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
629 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
630 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
631 static void wm_set_filter(struct wm_softc *);
632 /* Reset and init related */
633 static void wm_set_vlan(struct wm_softc *);
634 static void wm_set_pcie_completion_timeout(struct wm_softc *);
635 static void wm_get_auto_rd_done(struct wm_softc *);
636 static void wm_lan_init_done(struct wm_softc *);
637 static void wm_get_cfg_done(struct wm_softc *);
638 static void wm_initialize_hardware_bits(struct wm_softc *);
639 static uint32_t wm_rxpbs_adjust_82580(uint32_t);
640 static void wm_reset(struct wm_softc *);
641 static int wm_add_rxbuf(struct wm_rxqueue *, int);
642 static void wm_rxdrain(struct wm_rxqueue *);
643 static void wm_rss_getkey(uint8_t *);
644 static void wm_init_rss(struct wm_softc *);
645 static void wm_adjust_qnum(struct wm_softc *, int);
646 static int wm_setup_legacy(struct wm_softc *);
647 static int wm_setup_msix(struct wm_softc *);
648 static int wm_init(struct ifnet *);
649 static int wm_init_locked(struct ifnet *);
650 static void wm_turnon(struct wm_softc *);
651 static void wm_turnoff(struct wm_softc *);
652 static void wm_stop(struct ifnet *, int);
653 static void wm_stop_locked(struct ifnet *, int);
654 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
655 static void wm_82547_txfifo_stall(void *);
656 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
657 /* DMA related */
658 static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
659 static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
660 static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
661 static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
662 struct wm_txqueue *);
663 static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
664 static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
665 static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
666 struct wm_rxqueue *);
667 static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
668 static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
669 static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
670 static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
671 static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
672 static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
673 static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
674 struct wm_txqueue *);
675 static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
676 struct wm_rxqueue *);
677 static int wm_alloc_txrx_queues(struct wm_softc *);
678 static void wm_free_txrx_queues(struct wm_softc *);
679 static int wm_init_txrx_queues(struct wm_softc *);
680 /* Start */
681 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
682 uint32_t *, uint8_t *);
683 static void wm_start(struct ifnet *);
684 static void wm_start_locked(struct ifnet *);
685 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
686 struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
687 static void wm_nq_start(struct ifnet *);
688 static void wm_nq_start_locked(struct ifnet *);
689 static int wm_nq_transmit(struct ifnet *, struct mbuf *);
690 static inline int wm_nq_select_txqueue(struct ifnet *, struct mbuf *);
691 static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
692 static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
693 /* Interrupt */
694 static int wm_txeof(struct wm_softc *, struct wm_txqueue *);
695 static void wm_rxeof(struct wm_rxqueue *);
696 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
697 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
698 static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
699 static void wm_linkintr(struct wm_softc *, uint32_t);
700 static int wm_intr_legacy(void *);
701 static int wm_txrxintr_msix(void *);
702 static int wm_linkintr_msix(void *);
703
704 /*
705 * Media related.
706 * GMII, SGMII, TBI, SERDES and SFP.
707 */
708 /* Common */
709 static void wm_tbi_serdes_set_linkled(struct wm_softc *);
710 /* GMII related */
711 static void wm_gmii_reset(struct wm_softc *);
712 static int wm_get_phy_id_82575(struct wm_softc *);
713 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
714 static int wm_gmii_mediachange(struct ifnet *);
715 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
716 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
717 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
718 static int wm_gmii_i82543_readreg(device_t, int, int);
719 static void wm_gmii_i82543_writereg(device_t, int, int, int);
720 static int wm_gmii_mdic_readreg(device_t, int, int);
721 static void wm_gmii_mdic_writereg(device_t, int, int, int);
722 static int wm_gmii_i82544_readreg(device_t, int, int);
723 static void wm_gmii_i82544_writereg(device_t, int, int, int);
724 static int wm_gmii_i80003_readreg(device_t, int, int);
725 static void wm_gmii_i80003_writereg(device_t, int, int, int);
726 static int wm_gmii_bm_readreg(device_t, int, int);
727 static void wm_gmii_bm_writereg(device_t, int, int, int);
728 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
729 static int wm_gmii_hv_readreg(device_t, int, int);
730 static int wm_gmii_hv_readreg_locked(device_t, int, int);
731 static void wm_gmii_hv_writereg(device_t, int, int, int);
732 static void wm_gmii_hv_writereg_locked(device_t, int, int, int);
733 static int wm_gmii_82580_readreg(device_t, int, int);
734 static void wm_gmii_82580_writereg(device_t, int, int, int);
735 static int wm_gmii_gs40g_readreg(device_t, int, int);
736 static void wm_gmii_gs40g_writereg(device_t, int, int, int);
737 static void wm_gmii_statchg(struct ifnet *);
738 static int wm_kmrn_readreg(struct wm_softc *, int);
739 static int wm_kmrn_readreg_locked(struct wm_softc *, int);
740 static void wm_kmrn_writereg(struct wm_softc *, int, int);
741 static void wm_kmrn_writereg_locked(struct wm_softc *, int, int);
742 /* SGMII */
743 static bool wm_sgmii_uses_mdio(struct wm_softc *);
744 static int wm_sgmii_readreg(device_t, int, int);
745 static void wm_sgmii_writereg(device_t, int, int, int);
746 /* TBI related */
747 static void wm_tbi_mediainit(struct wm_softc *);
748 static int wm_tbi_mediachange(struct ifnet *);
749 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
750 static int wm_check_for_link(struct wm_softc *);
751 static void wm_tbi_tick(struct wm_softc *);
752 /* SERDES related */
753 static void wm_serdes_power_up_link_82575(struct wm_softc *);
754 static int wm_serdes_mediachange(struct ifnet *);
755 static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
756 static void wm_serdes_tick(struct wm_softc *);
757 /* SFP related */
758 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
759 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
760
761 /*
762 * NVM related.
763 * Microwire, SPI (w/wo EERD) and Flash.
764 */
765 /* Misc functions */
766 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
767 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
768 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
769 /* Microwire */
770 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
771 /* SPI */
772 static int wm_nvm_ready_spi(struct wm_softc *);
773 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
774 /* Using with EERD */
775 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
776 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
777 /* Flash */
778 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
779 unsigned int *);
780 static int32_t wm_ich8_cycle_init(struct wm_softc *);
781 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
782 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
783 uint32_t *);
784 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
785 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
786 static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
787 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
788 static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
789 /* iNVM */
790 static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
791 static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
792 /* Lock, detecting NVM type, validate checksum and read */
793 static int wm_nvm_acquire(struct wm_softc *);
794 static void wm_nvm_release(struct wm_softc *);
795 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
796 static int wm_nvm_get_flash_presence_i210(struct wm_softc *);
797 static int wm_nvm_validate_checksum(struct wm_softc *);
798 static void wm_nvm_version_invm(struct wm_softc *);
799 static void wm_nvm_version(struct wm_softc *);
800 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
801
802 /*
803 * Hardware semaphores.
804 * Very complexed...
805 */
806 static int wm_get_null(struct wm_softc *);
807 static void wm_put_null(struct wm_softc *);
808 static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
809 static void wm_put_swsm_semaphore(struct wm_softc *);
810 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
811 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
812 static int wm_get_phy_82575(struct wm_softc *);
813 static void wm_put_phy_82575(struct wm_softc *);
814 static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
815 static void wm_put_swfwhw_semaphore(struct wm_softc *);
816 static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */
817 static void wm_put_swflag_ich8lan(struct wm_softc *);
818 static int wm_get_nvm_ich8lan(struct wm_softc *); /* For NVM */
819 static void wm_put_nvm_ich8lan(struct wm_softc *);
820 static int wm_get_hw_semaphore_82573(struct wm_softc *);
821 static void wm_put_hw_semaphore_82573(struct wm_softc *);
822
823 /*
824 * Management mode and power management related subroutines.
825 * BMC, AMT, suspend/resume and EEE.
826 */
827 #ifdef WM_WOL
828 static int wm_check_mng_mode(struct wm_softc *);
829 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
830 static int wm_check_mng_mode_82574(struct wm_softc *);
831 static int wm_check_mng_mode_generic(struct wm_softc *);
832 #endif
833 static int wm_enable_mng_pass_thru(struct wm_softc *);
834 static bool wm_phy_resetisblocked(struct wm_softc *);
835 static void wm_get_hw_control(struct wm_softc *);
836 static void wm_release_hw_control(struct wm_softc *);
837 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
838 static void wm_smbustopci(struct wm_softc *);
839 static void wm_init_manageability(struct wm_softc *);
840 static void wm_release_manageability(struct wm_softc *);
841 static void wm_get_wakeup(struct wm_softc *);
842 #ifdef WM_WOL
843 static void wm_enable_phy_wakeup(struct wm_softc *);
844 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
845 static void wm_enable_wakeup(struct wm_softc *);
846 #endif
847 /* LPLU (Low Power Link Up) */
848 static void wm_lplu_d0_disable(struct wm_softc *);
849 static void wm_lplu_d0_disable_pch(struct wm_softc *);
850 /* EEE */
851 static void wm_set_eee_i350(struct wm_softc *);
852
853 /*
854 * Workarounds (mainly PHY related).
855 * Basically, PHY's workarounds are in the PHY drivers.
856 */
857 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
858 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
859 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
860 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
861 static int wm_k1_gig_workaround_hv(struct wm_softc *, int);
862 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
863 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
864 static void wm_reset_init_script_82575(struct wm_softc *);
865 static void wm_reset_mdicnfg_82580(struct wm_softc *);
866 static void wm_pll_workaround_i210(struct wm_softc *);
867
868 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
869 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
870
871 /*
872 * Devices supported by this driver.
873 */
874 static const struct wm_product {
875 pci_vendor_id_t wmp_vendor;
876 pci_product_id_t wmp_product;
877 const char *wmp_name;
878 wm_chip_type wmp_type;
879 uint32_t wmp_flags;
880 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
881 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
882 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
883 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
884 #define WMP_MEDIATYPE(x) ((x) & 0x03)
885 } wm_products[] = {
886 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
887 "Intel i82542 1000BASE-X Ethernet",
888 WM_T_82542_2_1, WMP_F_FIBER },
889
890 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
891 "Intel i82543GC 1000BASE-X Ethernet",
892 WM_T_82543, WMP_F_FIBER },
893
894 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
895 "Intel i82543GC 1000BASE-T Ethernet",
896 WM_T_82543, WMP_F_COPPER },
897
898 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
899 "Intel i82544EI 1000BASE-T Ethernet",
900 WM_T_82544, WMP_F_COPPER },
901
902 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
903 "Intel i82544EI 1000BASE-X Ethernet",
904 WM_T_82544, WMP_F_FIBER },
905
906 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
907 "Intel i82544GC 1000BASE-T Ethernet",
908 WM_T_82544, WMP_F_COPPER },
909
910 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
911 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
912 WM_T_82544, WMP_F_COPPER },
913
914 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
915 "Intel i82540EM 1000BASE-T Ethernet",
916 WM_T_82540, WMP_F_COPPER },
917
918 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
919 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
920 WM_T_82540, WMP_F_COPPER },
921
922 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
923 "Intel i82540EP 1000BASE-T Ethernet",
924 WM_T_82540, WMP_F_COPPER },
925
926 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
927 "Intel i82540EP 1000BASE-T Ethernet",
928 WM_T_82540, WMP_F_COPPER },
929
930 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
931 "Intel i82540EP 1000BASE-T Ethernet",
932 WM_T_82540, WMP_F_COPPER },
933
934 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
935 "Intel i82545EM 1000BASE-T Ethernet",
936 WM_T_82545, WMP_F_COPPER },
937
938 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
939 "Intel i82545GM 1000BASE-T Ethernet",
940 WM_T_82545_3, WMP_F_COPPER },
941
942 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
943 "Intel i82545GM 1000BASE-X Ethernet",
944 WM_T_82545_3, WMP_F_FIBER },
945
946 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
947 "Intel i82545GM Gigabit Ethernet (SERDES)",
948 WM_T_82545_3, WMP_F_SERDES },
949
950 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
951 "Intel i82546EB 1000BASE-T Ethernet",
952 WM_T_82546, WMP_F_COPPER },
953
954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
955 "Intel i82546EB 1000BASE-T Ethernet",
956 WM_T_82546, WMP_F_COPPER },
957
958 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
959 "Intel i82545EM 1000BASE-X Ethernet",
960 WM_T_82545, WMP_F_FIBER },
961
962 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
963 "Intel i82546EB 1000BASE-X Ethernet",
964 WM_T_82546, WMP_F_FIBER },
965
966 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
967 "Intel i82546GB 1000BASE-T Ethernet",
968 WM_T_82546_3, WMP_F_COPPER },
969
970 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
971 "Intel i82546GB 1000BASE-X Ethernet",
972 WM_T_82546_3, WMP_F_FIBER },
973
974 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
975 "Intel i82546GB Gigabit Ethernet (SERDES)",
976 WM_T_82546_3, WMP_F_SERDES },
977
978 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
979 "i82546GB quad-port Gigabit Ethernet",
980 WM_T_82546_3, WMP_F_COPPER },
981
982 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
983 "i82546GB quad-port Gigabit Ethernet (KSP3)",
984 WM_T_82546_3, WMP_F_COPPER },
985
986 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
987 "Intel PRO/1000MT (82546GB)",
988 WM_T_82546_3, WMP_F_COPPER },
989
990 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
991 "Intel i82541EI 1000BASE-T Ethernet",
992 WM_T_82541, WMP_F_COPPER },
993
994 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
995 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
996 WM_T_82541, WMP_F_COPPER },
997
998 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
999 "Intel i82541EI Mobile 1000BASE-T Ethernet",
1000 WM_T_82541, WMP_F_COPPER },
1001
1002 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
1003 "Intel i82541ER 1000BASE-T Ethernet",
1004 WM_T_82541_2, WMP_F_COPPER },
1005
1006 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
1007 "Intel i82541GI 1000BASE-T Ethernet",
1008 WM_T_82541_2, WMP_F_COPPER },
1009
1010 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
1011 "Intel i82541GI Mobile 1000BASE-T Ethernet",
1012 WM_T_82541_2, WMP_F_COPPER },
1013
1014 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
1015 "Intel i82541PI 1000BASE-T Ethernet",
1016 WM_T_82541_2, WMP_F_COPPER },
1017
1018 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
1019 "Intel i82547EI 1000BASE-T Ethernet",
1020 WM_T_82547, WMP_F_COPPER },
1021
1022 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
1023 "Intel i82547EI Mobile 1000BASE-T Ethernet",
1024 WM_T_82547, WMP_F_COPPER },
1025
1026 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
1027 "Intel i82547GI 1000BASE-T Ethernet",
1028 WM_T_82547_2, WMP_F_COPPER },
1029
1030 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
1031 "Intel PRO/1000 PT (82571EB)",
1032 WM_T_82571, WMP_F_COPPER },
1033
1034 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
1035 "Intel PRO/1000 PF (82571EB)",
1036 WM_T_82571, WMP_F_FIBER },
1037
1038 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
1039 "Intel PRO/1000 PB (82571EB)",
1040 WM_T_82571, WMP_F_SERDES },
1041
1042 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1043 "Intel PRO/1000 QT (82571EB)",
1044 WM_T_82571, WMP_F_COPPER },
1045
1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1047 "Intel PRO/1000 PT Quad Port Server Adapter",
1048 WM_T_82571, WMP_F_COPPER, },
1049
1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1051 "Intel Gigabit PT Quad Port Server ExpressModule",
1052 WM_T_82571, WMP_F_COPPER, },
1053
1054 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1055 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1056 WM_T_82571, WMP_F_SERDES, },
1057
1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1059 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1060 WM_T_82571, WMP_F_SERDES, },
1061
1062 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1063 "Intel 82571EB Quad 1000baseX Ethernet",
1064 WM_T_82571, WMP_F_FIBER, },
1065
1066 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
1067 "Intel i82572EI 1000baseT Ethernet",
1068 WM_T_82572, WMP_F_COPPER },
1069
1070 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
1071 "Intel i82572EI 1000baseX Ethernet",
1072 WM_T_82572, WMP_F_FIBER },
1073
1074 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
1075 "Intel i82572EI Gigabit Ethernet (SERDES)",
1076 WM_T_82572, WMP_F_SERDES },
1077
1078 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
1079 "Intel i82572EI 1000baseT Ethernet",
1080 WM_T_82572, WMP_F_COPPER },
1081
1082 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
1083 "Intel i82573E",
1084 WM_T_82573, WMP_F_COPPER },
1085
1086 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
1087 "Intel i82573E IAMT",
1088 WM_T_82573, WMP_F_COPPER },
1089
1090 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
1091 "Intel i82573L Gigabit Ethernet",
1092 WM_T_82573, WMP_F_COPPER },
1093
1094 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
1095 "Intel i82574L",
1096 WM_T_82574, WMP_F_COPPER },
1097
1098 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
1099 "Intel i82574L",
1100 WM_T_82574, WMP_F_COPPER },
1101
1102 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
1103 "Intel i82583V",
1104 WM_T_82583, WMP_F_COPPER },
1105
1106 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1107 "i80003 dual 1000baseT Ethernet",
1108 WM_T_80003, WMP_F_COPPER },
1109
1110 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1111 "i80003 dual 1000baseX Ethernet",
1112 WM_T_80003, WMP_F_COPPER },
1113
1114 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1115 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1116 WM_T_80003, WMP_F_SERDES },
1117
1118 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1119 "Intel i80003 1000baseT Ethernet",
1120 WM_T_80003, WMP_F_COPPER },
1121
1122 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1123 "Intel i80003 Gigabit Ethernet (SERDES)",
1124 WM_T_80003, WMP_F_SERDES },
1125
1126 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
1127 "Intel i82801H (M_AMT) LAN Controller",
1128 WM_T_ICH8, WMP_F_COPPER },
1129 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
1130 "Intel i82801H (AMT) LAN Controller",
1131 WM_T_ICH8, WMP_F_COPPER },
1132 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
1133 "Intel i82801H LAN Controller",
1134 WM_T_ICH8, WMP_F_COPPER },
1135 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1136 "Intel i82801H (IFE) LAN Controller",
1137 WM_T_ICH8, WMP_F_COPPER },
1138 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
1139 "Intel i82801H (M) LAN Controller",
1140 WM_T_ICH8, WMP_F_COPPER },
1141 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1142 "Intel i82801H IFE (GT) LAN Controller",
1143 WM_T_ICH8, WMP_F_COPPER },
1144 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1145 "Intel i82801H IFE (G) LAN Controller",
1146 WM_T_ICH8, WMP_F_COPPER },
1147 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_82567V_3,
1148 "82567V-3 LAN Controller",
1149 WM_T_ICH8, WMP_F_COPPER },
1150 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1151 "82801I (AMT) LAN Controller",
1152 WM_T_ICH9, WMP_F_COPPER },
1153 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1154 "82801I LAN Controller",
1155 WM_T_ICH9, WMP_F_COPPER },
1156 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1157 "82801I (G) LAN Controller",
1158 WM_T_ICH9, WMP_F_COPPER },
1159 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1160 "82801I (GT) LAN Controller",
1161 WM_T_ICH9, WMP_F_COPPER },
1162 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1163 "82801I (C) LAN Controller",
1164 WM_T_ICH9, WMP_F_COPPER },
1165 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1166 "82801I mobile LAN Controller",
1167 WM_T_ICH9, WMP_F_COPPER },
1168 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1169 "82801I mobile (V) LAN Controller",
1170 WM_T_ICH9, WMP_F_COPPER },
1171 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1172 "82801I mobile (AMT) LAN Controller",
1173 WM_T_ICH9, WMP_F_COPPER },
1174 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1175 "82567LM-4 LAN Controller",
1176 WM_T_ICH9, WMP_F_COPPER },
1177 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1178 "82567LM-2 LAN Controller",
1179 WM_T_ICH10, WMP_F_COPPER },
1180 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1181 "82567LF-2 LAN Controller",
1182 WM_T_ICH10, WMP_F_COPPER },
1183 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1184 "82567LM-3 LAN Controller",
1185 WM_T_ICH10, WMP_F_COPPER },
1186 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1187 "82567LF-3 LAN Controller",
1188 WM_T_ICH10, WMP_F_COPPER },
1189 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1190 "82567V-2 LAN Controller",
1191 WM_T_ICH10, WMP_F_COPPER },
1192 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1193 "82567V-3? LAN Controller",
1194 WM_T_ICH10, WMP_F_COPPER },
1195 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1196 "HANKSVILLE LAN Controller",
1197 WM_T_ICH10, WMP_F_COPPER },
1198 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1199 "PCH LAN (82577LM) Controller",
1200 WM_T_PCH, WMP_F_COPPER },
1201 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1202 "PCH LAN (82577LC) Controller",
1203 WM_T_PCH, WMP_F_COPPER },
1204 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1205 "PCH LAN (82578DM) Controller",
1206 WM_T_PCH, WMP_F_COPPER },
1207 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1208 "PCH LAN (82578DC) Controller",
1209 WM_T_PCH, WMP_F_COPPER },
1210 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1211 "PCH2 LAN (82579LM) Controller",
1212 WM_T_PCH2, WMP_F_COPPER },
1213 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1214 "PCH2 LAN (82579V) Controller",
1215 WM_T_PCH2, WMP_F_COPPER },
1216 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1217 "82575EB dual-1000baseT Ethernet",
1218 WM_T_82575, WMP_F_COPPER },
1219 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1220 "82575EB dual-1000baseX Ethernet (SERDES)",
1221 WM_T_82575, WMP_F_SERDES },
1222 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1223 "82575GB quad-1000baseT Ethernet",
1224 WM_T_82575, WMP_F_COPPER },
1225 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1226 "82575GB quad-1000baseT Ethernet (PM)",
1227 WM_T_82575, WMP_F_COPPER },
1228 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1229 "82576 1000BaseT Ethernet",
1230 WM_T_82576, WMP_F_COPPER },
1231 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1232 "82576 1000BaseX Ethernet",
1233 WM_T_82576, WMP_F_FIBER },
1234
1235 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1236 "82576 gigabit Ethernet (SERDES)",
1237 WM_T_82576, WMP_F_SERDES },
1238
1239 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1240 "82576 quad-1000BaseT Ethernet",
1241 WM_T_82576, WMP_F_COPPER },
1242
1243 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1244 "82576 Gigabit ET2 Quad Port Server Adapter",
1245 WM_T_82576, WMP_F_COPPER },
1246
1247 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1248 "82576 gigabit Ethernet",
1249 WM_T_82576, WMP_F_COPPER },
1250
1251 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1252 "82576 gigabit Ethernet (SERDES)",
1253 WM_T_82576, WMP_F_SERDES },
1254 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1255 "82576 quad-gigabit Ethernet (SERDES)",
1256 WM_T_82576, WMP_F_SERDES },
1257
1258 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1259 "82580 1000BaseT Ethernet",
1260 WM_T_82580, WMP_F_COPPER },
1261 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1262 "82580 1000BaseX Ethernet",
1263 WM_T_82580, WMP_F_FIBER },
1264
1265 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1266 "82580 1000BaseT Ethernet (SERDES)",
1267 WM_T_82580, WMP_F_SERDES },
1268
1269 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1270 "82580 gigabit Ethernet (SGMII)",
1271 WM_T_82580, WMP_F_COPPER },
1272 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1273 "82580 dual-1000BaseT Ethernet",
1274 WM_T_82580, WMP_F_COPPER },
1275
1276 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1277 "82580 quad-1000BaseX Ethernet",
1278 WM_T_82580, WMP_F_FIBER },
1279
1280 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1281 "DH89XXCC Gigabit Ethernet (SGMII)",
1282 WM_T_82580, WMP_F_COPPER },
1283
1284 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1285 "DH89XXCC Gigabit Ethernet (SERDES)",
1286 WM_T_82580, WMP_F_SERDES },
1287
1288 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1289 "DH89XXCC 1000BASE-KX Ethernet",
1290 WM_T_82580, WMP_F_SERDES },
1291
1292 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1293 "DH89XXCC Gigabit Ethernet (SFP)",
1294 WM_T_82580, WMP_F_SERDES },
1295
1296 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1297 "I350 Gigabit Network Connection",
1298 WM_T_I350, WMP_F_COPPER },
1299
1300 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1301 "I350 Gigabit Fiber Network Connection",
1302 WM_T_I350, WMP_F_FIBER },
1303
1304 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1305 "I350 Gigabit Backplane Connection",
1306 WM_T_I350, WMP_F_SERDES },
1307
1308 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1309 "I350 Quad Port Gigabit Ethernet",
1310 WM_T_I350, WMP_F_SERDES },
1311
1312 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1313 "I350 Gigabit Connection",
1314 WM_T_I350, WMP_F_COPPER },
1315
1316 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1317 "I354 Gigabit Ethernet (KX)",
1318 WM_T_I354, WMP_F_SERDES },
1319
1320 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1321 "I354 Gigabit Ethernet (SGMII)",
1322 WM_T_I354, WMP_F_COPPER },
1323
1324 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1325 "I354 Gigabit Ethernet (2.5G)",
1326 WM_T_I354, WMP_F_COPPER },
1327
1328 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1329 "I210-T1 Ethernet Server Adapter",
1330 WM_T_I210, WMP_F_COPPER },
1331
1332 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1333 "I210 Ethernet (Copper OEM)",
1334 WM_T_I210, WMP_F_COPPER },
1335
1336 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1337 "I210 Ethernet (Copper IT)",
1338 WM_T_I210, WMP_F_COPPER },
1339
1340 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1341 "I210 Ethernet (FLASH less)",
1342 WM_T_I210, WMP_F_COPPER },
1343
1344 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1345 "I210 Gigabit Ethernet (Fiber)",
1346 WM_T_I210, WMP_F_FIBER },
1347
1348 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1349 "I210 Gigabit Ethernet (SERDES)",
1350 WM_T_I210, WMP_F_SERDES },
1351
1352 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1353 "I210 Gigabit Ethernet (FLASH less)",
1354 WM_T_I210, WMP_F_SERDES },
1355
1356 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1357 "I210 Gigabit Ethernet (SGMII)",
1358 WM_T_I210, WMP_F_COPPER },
1359
1360 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1361 "I211 Ethernet (COPPER)",
1362 WM_T_I211, WMP_F_COPPER },
1363 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1364 "I217 V Ethernet Connection",
1365 WM_T_PCH_LPT, WMP_F_COPPER },
1366 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1367 "I217 LM Ethernet Connection",
1368 WM_T_PCH_LPT, WMP_F_COPPER },
1369 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1370 "I218 V Ethernet Connection",
1371 WM_T_PCH_LPT, WMP_F_COPPER },
1372 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1373 "I218 V Ethernet Connection",
1374 WM_T_PCH_LPT, WMP_F_COPPER },
1375 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1376 "I218 V Ethernet Connection",
1377 WM_T_PCH_LPT, WMP_F_COPPER },
1378 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1379 "I218 LM Ethernet Connection",
1380 WM_T_PCH_LPT, WMP_F_COPPER },
1381 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1382 "I218 LM Ethernet Connection",
1383 WM_T_PCH_LPT, WMP_F_COPPER },
1384 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1385 "I218 LM Ethernet Connection",
1386 WM_T_PCH_LPT, WMP_F_COPPER },
1387 #if 0
1388 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V,
1389 "I219 V Ethernet Connection",
1390 WM_T_PCH_SPT, WMP_F_COPPER },
1391 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V2,
1392 "I219 V Ethernet Connection",
1393 WM_T_PCH_SPT, WMP_F_COPPER },
1394 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V4,
1395 "I219 V Ethernet Connection",
1396 WM_T_PCH_SPT, WMP_F_COPPER },
1397 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V5,
1398 "I219 V Ethernet Connection",
1399 WM_T_PCH_SPT, WMP_F_COPPER },
1400 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM,
1401 "I219 LM Ethernet Connection",
1402 WM_T_PCH_SPT, WMP_F_COPPER },
1403 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM2,
1404 "I219 LM Ethernet Connection",
1405 WM_T_PCH_SPT, WMP_F_COPPER },
1406 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM3,
1407 "I219 LM Ethernet Connection",
1408 WM_T_PCH_SPT, WMP_F_COPPER },
1409 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM4,
1410 "I219 LM Ethernet Connection",
1411 WM_T_PCH_SPT, WMP_F_COPPER },
1412 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM5,
1413 "I219 LM Ethernet Connection",
1414 WM_T_PCH_SPT, WMP_F_COPPER },
1415 #endif
1416 { 0, 0,
1417 NULL,
1418 0, 0 },
1419 };
1420
1421 /*
1422 * Register read/write functions.
1423 * Other than CSR_{READ|WRITE}().
1424 */
1425
1426 #if 0 /* Not currently used */
1427 static inline uint32_t
1428 wm_io_read(struct wm_softc *sc, int reg)
1429 {
1430
1431 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1432 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1433 }
1434 #endif
1435
1436 static inline void
1437 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1438 {
1439
1440 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1441 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1442 }
1443
1444 static inline void
1445 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1446 uint32_t data)
1447 {
1448 uint32_t regval;
1449 int i;
1450
1451 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1452
1453 CSR_WRITE(sc, reg, regval);
1454
1455 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1456 delay(5);
1457 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1458 break;
1459 }
1460 if (i == SCTL_CTL_POLL_TIMEOUT) {
1461 aprint_error("%s: WARNING:"
1462 " i82575 reg 0x%08x setup did not indicate ready\n",
1463 device_xname(sc->sc_dev), reg);
1464 }
1465 }
1466
1467 static inline void
1468 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1469 {
1470 wa->wa_low = htole32(v & 0xffffffffU);
1471 if (sizeof(bus_addr_t) == 8)
1472 wa->wa_high = htole32((uint64_t) v >> 32);
1473 else
1474 wa->wa_high = 0;
1475 }
1476
1477 /*
1478 * Descriptor sync/init functions.
1479 */
1480 static inline void
1481 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1482 {
1483 struct wm_softc *sc = txq->txq_sc;
1484
1485 /* If it will wrap around, sync to the end of the ring. */
1486 if ((start + num) > WM_NTXDESC(txq)) {
1487 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1488 WM_CDTXOFF(txq, start), txq->txq_descsize *
1489 (WM_NTXDESC(txq) - start), ops);
1490 num -= (WM_NTXDESC(txq) - start);
1491 start = 0;
1492 }
1493
1494 /* Now sync whatever is left. */
1495 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1496 WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1497 }
1498
1499 static inline void
1500 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1501 {
1502 struct wm_softc *sc = rxq->rxq_sc;
1503
1504 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1505 WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
1506 }
1507
1508 static inline void
1509 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1510 {
1511 struct wm_softc *sc = rxq->rxq_sc;
1512 struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1513 wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1514 struct mbuf *m = rxs->rxs_mbuf;
1515
1516 /*
1517 * Note: We scoot the packet forward 2 bytes in the buffer
1518 * so that the payload after the Ethernet header is aligned
1519 * to a 4-byte boundary.
1520
1521 * XXX BRAINDAMAGE ALERT!
1522 * The stupid chip uses the same size for every buffer, which
1523 * is set in the Receive Control register. We are using the 2K
1524 * size option, but what we REALLY want is (2K - 2)! For this
1525 * reason, we can't "scoot" packets longer than the standard
1526 * Ethernet MTU. On strict-alignment platforms, if the total
1527 * size exceeds (2K - 2) we set align_tweak to 0 and let
1528 * the upper layer copy the headers.
1529 */
1530 m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1531
1532 wm_set_dma_addr(&rxd->wrx_addr,
1533 rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1534 rxd->wrx_len = 0;
1535 rxd->wrx_cksum = 0;
1536 rxd->wrx_status = 0;
1537 rxd->wrx_errors = 0;
1538 rxd->wrx_special = 0;
1539 wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1540
1541 CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1542 }
1543
1544 /*
1545 * Device driver interface functions and commonly used functions.
1546 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1547 */
1548
1549 /* Lookup supported device table */
1550 static const struct wm_product *
1551 wm_lookup(const struct pci_attach_args *pa)
1552 {
1553 const struct wm_product *wmp;
1554
1555 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1556 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1557 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1558 return wmp;
1559 }
1560 return NULL;
1561 }
1562
1563 /* The match function (ca_match) */
1564 static int
1565 wm_match(device_t parent, cfdata_t cf, void *aux)
1566 {
1567 struct pci_attach_args *pa = aux;
1568
1569 if (wm_lookup(pa) != NULL)
1570 return 1;
1571
1572 return 0;
1573 }
1574
1575 /* The attach function (ca_attach) */
1576 static void
1577 wm_attach(device_t parent, device_t self, void *aux)
1578 {
1579 struct wm_softc *sc = device_private(self);
1580 struct pci_attach_args *pa = aux;
1581 prop_dictionary_t dict;
1582 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1583 pci_chipset_tag_t pc = pa->pa_pc;
1584 int counts[PCI_INTR_TYPE_SIZE];
1585 pci_intr_type_t max_type;
1586 const char *eetype, *xname;
1587 bus_space_tag_t memt;
1588 bus_space_handle_t memh;
1589 bus_size_t memsize;
1590 int memh_valid;
1591 int i, error;
1592 const struct wm_product *wmp;
1593 prop_data_t ea;
1594 prop_number_t pn;
1595 uint8_t enaddr[ETHER_ADDR_LEN];
1596 uint16_t cfg1, cfg2, swdpin, nvmword;
1597 pcireg_t preg, memtype;
1598 uint16_t eeprom_data, apme_mask;
1599 bool force_clear_smbi;
1600 uint32_t link_mode;
1601 uint32_t reg;
1602
1603 sc->sc_dev = self;
1604 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1605 sc->sc_core_stopping = false;
1606
1607 wmp = wm_lookup(pa);
1608 #ifdef DIAGNOSTIC
1609 if (wmp == NULL) {
1610 printf("\n");
1611 panic("wm_attach: impossible");
1612 }
1613 #endif
1614 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1615
1616 sc->sc_pc = pa->pa_pc;
1617 sc->sc_pcitag = pa->pa_tag;
1618
1619 if (pci_dma64_available(pa))
1620 sc->sc_dmat = pa->pa_dmat64;
1621 else
1622 sc->sc_dmat = pa->pa_dmat;
1623
1624 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1625 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1626 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1627
1628 sc->sc_type = wmp->wmp_type;
1629
1630 /* Set default function pointers */
1631 sc->phy.acquire = wm_get_null;
1632 sc->phy.release = wm_put_null;
1633
1634 if (sc->sc_type < WM_T_82543) {
1635 if (sc->sc_rev < 2) {
1636 aprint_error_dev(sc->sc_dev,
1637 "i82542 must be at least rev. 2\n");
1638 return;
1639 }
1640 if (sc->sc_rev < 3)
1641 sc->sc_type = WM_T_82542_2_0;
1642 }
1643
1644 /*
1645 * Disable MSI for Errata:
1646 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1647 *
1648 * 82544: Errata 25
1649 * 82540: Errata 6 (easy to reproduce device timeout)
1650 * 82545: Errata 4 (easy to reproduce device timeout)
1651 * 82546: Errata 26 (easy to reproduce device timeout)
1652 * 82541: Errata 7 (easy to reproduce device timeout)
1653 *
1654 * "Byte Enables 2 and 3 are not set on MSI writes"
1655 *
1656 * 82571 & 82572: Errata 63
1657 */
1658 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1659 || (sc->sc_type == WM_T_82572))
1660 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1661
1662 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1663 || (sc->sc_type == WM_T_82580)
1664 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1665 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1666 sc->sc_flags |= WM_F_NEWQUEUE;
1667
1668 /* Set device properties (mactype) */
1669 dict = device_properties(sc->sc_dev);
1670 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1671
1672 /*
1673 * Map the device. All devices support memory-mapped acccess,
1674 * and it is really required for normal operation.
1675 */
1676 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1677 switch (memtype) {
1678 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1679 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1680 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1681 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1682 break;
1683 default:
1684 memh_valid = 0;
1685 break;
1686 }
1687
1688 if (memh_valid) {
1689 sc->sc_st = memt;
1690 sc->sc_sh = memh;
1691 sc->sc_ss = memsize;
1692 } else {
1693 aprint_error_dev(sc->sc_dev,
1694 "unable to map device registers\n");
1695 return;
1696 }
1697
1698 /*
1699 * In addition, i82544 and later support I/O mapped indirect
1700 * register access. It is not desirable (nor supported in
1701 * this driver) to use it for normal operation, though it is
1702 * required to work around bugs in some chip versions.
1703 */
1704 if (sc->sc_type >= WM_T_82544) {
1705 /* First we have to find the I/O BAR. */
1706 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1707 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1708 if (memtype == PCI_MAPREG_TYPE_IO)
1709 break;
1710 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1711 PCI_MAPREG_MEM_TYPE_64BIT)
1712 i += 4; /* skip high bits, too */
1713 }
1714 if (i < PCI_MAPREG_END) {
1715 /*
1716 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1717 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1718 * It's no problem because newer chips has no this
1719 * bug.
1720 *
1721 * The i8254x doesn't apparently respond when the
1722 * I/O BAR is 0, which looks somewhat like it's not
1723 * been configured.
1724 */
1725 preg = pci_conf_read(pc, pa->pa_tag, i);
1726 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1727 aprint_error_dev(sc->sc_dev,
1728 "WARNING: I/O BAR at zero.\n");
1729 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1730 0, &sc->sc_iot, &sc->sc_ioh,
1731 NULL, &sc->sc_ios) == 0) {
1732 sc->sc_flags |= WM_F_IOH_VALID;
1733 } else {
1734 aprint_error_dev(sc->sc_dev,
1735 "WARNING: unable to map I/O space\n");
1736 }
1737 }
1738
1739 }
1740
1741 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1742 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1743 preg |= PCI_COMMAND_MASTER_ENABLE;
1744 if (sc->sc_type < WM_T_82542_2_1)
1745 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1746 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1747
1748 /* power up chip */
1749 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1750 NULL)) && error != EOPNOTSUPP) {
1751 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1752 return;
1753 }
1754
1755 wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
1756
1757 /* Allocation settings */
1758 max_type = PCI_INTR_TYPE_MSIX;
1759 counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
1760 counts[PCI_INTR_TYPE_MSI] = 1;
1761 counts[PCI_INTR_TYPE_INTX] = 1;
1762
1763 alloc_retry:
1764 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1765 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1766 return;
1767 }
1768
1769 if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1770 error = wm_setup_msix(sc);
1771 if (error) {
1772 pci_intr_release(pc, sc->sc_intrs,
1773 counts[PCI_INTR_TYPE_MSIX]);
1774
1775 /* Setup for MSI: Disable MSI-X */
1776 max_type = PCI_INTR_TYPE_MSI;
1777 counts[PCI_INTR_TYPE_MSI] = 1;
1778 counts[PCI_INTR_TYPE_INTX] = 1;
1779 goto alloc_retry;
1780 }
1781 } else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
1782 wm_adjust_qnum(sc, 0); /* must not use multiqueue */
1783 error = wm_setup_legacy(sc);
1784 if (error) {
1785 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1786 counts[PCI_INTR_TYPE_MSI]);
1787
1788 /* The next try is for INTx: Disable MSI */
1789 max_type = PCI_INTR_TYPE_INTX;
1790 counts[PCI_INTR_TYPE_INTX] = 1;
1791 goto alloc_retry;
1792 }
1793 } else {
1794 wm_adjust_qnum(sc, 0); /* must not use multiqueue */
1795 error = wm_setup_legacy(sc);
1796 if (error) {
1797 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1798 counts[PCI_INTR_TYPE_INTX]);
1799 return;
1800 }
1801 }
1802
1803 /*
1804 * Check the function ID (unit number of the chip).
1805 */
1806 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1807 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1808 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1809 || (sc->sc_type == WM_T_82580)
1810 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1811 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1812 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1813 else
1814 sc->sc_funcid = 0;
1815
1816 /*
1817 * Determine a few things about the bus we're connected to.
1818 */
1819 if (sc->sc_type < WM_T_82543) {
1820 /* We don't really know the bus characteristics here. */
1821 sc->sc_bus_speed = 33;
1822 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1823 /*
1824 * CSA (Communication Streaming Architecture) is about as fast
1825 * a 32-bit 66MHz PCI Bus.
1826 */
1827 sc->sc_flags |= WM_F_CSA;
1828 sc->sc_bus_speed = 66;
1829 aprint_verbose_dev(sc->sc_dev,
1830 "Communication Streaming Architecture\n");
1831 if (sc->sc_type == WM_T_82547) {
1832 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1833 callout_setfunc(&sc->sc_txfifo_ch,
1834 wm_82547_txfifo_stall, sc);
1835 aprint_verbose_dev(sc->sc_dev,
1836 "using 82547 Tx FIFO stall work-around\n");
1837 }
1838 } else if (sc->sc_type >= WM_T_82571) {
1839 sc->sc_flags |= WM_F_PCIE;
1840 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1841 && (sc->sc_type != WM_T_ICH10)
1842 && (sc->sc_type != WM_T_PCH)
1843 && (sc->sc_type != WM_T_PCH2)
1844 && (sc->sc_type != WM_T_PCH_LPT)
1845 && (sc->sc_type != WM_T_PCH_SPT)) {
1846 /* ICH* and PCH* have no PCIe capability registers */
1847 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1848 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1849 NULL) == 0)
1850 aprint_error_dev(sc->sc_dev,
1851 "unable to find PCIe capability\n");
1852 }
1853 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1854 } else {
1855 reg = CSR_READ(sc, WMREG_STATUS);
1856 if (reg & STATUS_BUS64)
1857 sc->sc_flags |= WM_F_BUS64;
1858 if ((reg & STATUS_PCIX_MODE) != 0) {
1859 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1860
1861 sc->sc_flags |= WM_F_PCIX;
1862 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1863 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1864 aprint_error_dev(sc->sc_dev,
1865 "unable to find PCIX capability\n");
1866 else if (sc->sc_type != WM_T_82545_3 &&
1867 sc->sc_type != WM_T_82546_3) {
1868 /*
1869 * Work around a problem caused by the BIOS
1870 * setting the max memory read byte count
1871 * incorrectly.
1872 */
1873 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1874 sc->sc_pcixe_capoff + PCIX_CMD);
1875 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1876 sc->sc_pcixe_capoff + PCIX_STATUS);
1877
1878 bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1879 PCIX_CMD_BYTECNT_SHIFT;
1880 maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1881 PCIX_STATUS_MAXB_SHIFT;
1882 if (bytecnt > maxb) {
1883 aprint_verbose_dev(sc->sc_dev,
1884 "resetting PCI-X MMRBC: %d -> %d\n",
1885 512 << bytecnt, 512 << maxb);
1886 pcix_cmd = (pcix_cmd &
1887 ~PCIX_CMD_BYTECNT_MASK) |
1888 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1889 pci_conf_write(pa->pa_pc, pa->pa_tag,
1890 sc->sc_pcixe_capoff + PCIX_CMD,
1891 pcix_cmd);
1892 }
1893 }
1894 }
1895 /*
1896 * The quad port adapter is special; it has a PCIX-PCIX
1897 * bridge on the board, and can run the secondary bus at
1898 * a higher speed.
1899 */
1900 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1901 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1902 : 66;
1903 } else if (sc->sc_flags & WM_F_PCIX) {
1904 switch (reg & STATUS_PCIXSPD_MASK) {
1905 case STATUS_PCIXSPD_50_66:
1906 sc->sc_bus_speed = 66;
1907 break;
1908 case STATUS_PCIXSPD_66_100:
1909 sc->sc_bus_speed = 100;
1910 break;
1911 case STATUS_PCIXSPD_100_133:
1912 sc->sc_bus_speed = 133;
1913 break;
1914 default:
1915 aprint_error_dev(sc->sc_dev,
1916 "unknown PCIXSPD %d; assuming 66MHz\n",
1917 reg & STATUS_PCIXSPD_MASK);
1918 sc->sc_bus_speed = 66;
1919 break;
1920 }
1921 } else
1922 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1923 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1924 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1925 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1926 }
1927
1928 /* clear interesting stat counters */
1929 CSR_READ(sc, WMREG_COLC);
1930 CSR_READ(sc, WMREG_RXERRC);
1931
1932 /* get PHY control from SMBus to PCIe */
1933 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1934 || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
1935 wm_smbustopci(sc);
1936
1937 if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
1938 || (sc->sc_type >= WM_T_ICH8))
1939 sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
1940 if (sc->sc_type >= WM_T_ICH8)
1941 sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
1942
1943 /* Set PHY, NVM mutex related stuff */
1944 switch (sc->sc_type) {
1945 case WM_T_82542_2_0:
1946 case WM_T_82542_2_1:
1947 case WM_T_82543:
1948 case WM_T_82544:
1949 /* Microwire */
1950 sc->sc_nvm_wordsize = 64;
1951 sc->sc_nvm_addrbits = 6;
1952 break;
1953 case WM_T_82540:
1954 case WM_T_82545:
1955 case WM_T_82545_3:
1956 case WM_T_82546:
1957 case WM_T_82546_3:
1958 /* Microwire */
1959 reg = CSR_READ(sc, WMREG_EECD);
1960 if (reg & EECD_EE_SIZE) {
1961 sc->sc_nvm_wordsize = 256;
1962 sc->sc_nvm_addrbits = 8;
1963 } else {
1964 sc->sc_nvm_wordsize = 64;
1965 sc->sc_nvm_addrbits = 6;
1966 }
1967 sc->sc_flags |= WM_F_LOCK_EECD;
1968 break;
1969 case WM_T_82541:
1970 case WM_T_82541_2:
1971 case WM_T_82547:
1972 case WM_T_82547_2:
1973 sc->sc_flags |= WM_F_LOCK_EECD;
1974 reg = CSR_READ(sc, WMREG_EECD);
1975 if (reg & EECD_EE_TYPE) {
1976 /* SPI */
1977 sc->sc_flags |= WM_F_EEPROM_SPI;
1978 wm_nvm_set_addrbits_size_eecd(sc);
1979 } else {
1980 /* Microwire */
1981 if ((reg & EECD_EE_ABITS) != 0) {
1982 sc->sc_nvm_wordsize = 256;
1983 sc->sc_nvm_addrbits = 8;
1984 } else {
1985 sc->sc_nvm_wordsize = 64;
1986 sc->sc_nvm_addrbits = 6;
1987 }
1988 }
1989 break;
1990 case WM_T_82571:
1991 case WM_T_82572:
1992 /* SPI */
1993 sc->sc_flags |= WM_F_EEPROM_SPI;
1994 wm_nvm_set_addrbits_size_eecd(sc);
1995 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1996 sc->phy.acquire = wm_get_swsm_semaphore;
1997 sc->phy.release = wm_put_swsm_semaphore;
1998 break;
1999 case WM_T_82573:
2000 case WM_T_82574:
2001 case WM_T_82583:
2002 if (sc->sc_type == WM_T_82573) {
2003 sc->sc_flags |= WM_F_LOCK_SWSM;
2004 sc->phy.acquire = wm_get_swsm_semaphore;
2005 sc->phy.release = wm_put_swsm_semaphore;
2006 } else {
2007 sc->sc_flags |= WM_F_LOCK_EXTCNF;
2008 /* Both PHY and NVM use the same semaphore. */
2009 sc->phy.acquire
2010 = wm_get_swfwhw_semaphore;
2011 sc->phy.release
2012 = wm_put_swfwhw_semaphore;
2013 }
2014 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2015 sc->sc_flags |= WM_F_EEPROM_FLASH;
2016 sc->sc_nvm_wordsize = 2048;
2017 } else {
2018 /* SPI */
2019 sc->sc_flags |= WM_F_EEPROM_SPI;
2020 wm_nvm_set_addrbits_size_eecd(sc);
2021 }
2022 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
2023 break;
2024 case WM_T_82575:
2025 case WM_T_82576:
2026 case WM_T_82580:
2027 case WM_T_I350:
2028 case WM_T_I354:
2029 case WM_T_80003:
2030 /* SPI */
2031 sc->sc_flags |= WM_F_EEPROM_SPI;
2032 wm_nvm_set_addrbits_size_eecd(sc);
2033 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
2034 | WM_F_LOCK_SWSM;
2035 sc->phy.acquire = wm_get_phy_82575;
2036 sc->phy.release = wm_put_phy_82575;
2037 break;
2038 case WM_T_ICH8:
2039 case WM_T_ICH9:
2040 case WM_T_ICH10:
2041 case WM_T_PCH:
2042 case WM_T_PCH2:
2043 case WM_T_PCH_LPT:
2044 /* FLASH */
2045 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
2046 sc->sc_nvm_wordsize = 2048;
2047 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2048 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2049 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2050 aprint_error_dev(sc->sc_dev,
2051 "can't map FLASH registers\n");
2052 goto out;
2053 }
2054 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2055 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2056 ICH_FLASH_SECTOR_SIZE;
2057 sc->sc_ich8_flash_bank_size =
2058 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2059 sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2060 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2061 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2062 sc->sc_flashreg_offset = 0;
2063 sc->phy.acquire = wm_get_swflag_ich8lan;
2064 sc->phy.release = wm_put_swflag_ich8lan;
2065 break;
2066 case WM_T_PCH_SPT:
2067 /* SPT has no GFPREG; flash registers mapped through BAR0 */
2068 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
2069 sc->sc_flasht = sc->sc_st;
2070 sc->sc_flashh = sc->sc_sh;
2071 sc->sc_ich8_flash_base = 0;
2072 sc->sc_nvm_wordsize =
2073 (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2074 * NVM_SIZE_MULTIPLIER;
2075 /* It is size in bytes, we want words */
2076 sc->sc_nvm_wordsize /= 2;
2077 /* assume 2 banks */
2078 sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2079 sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2080 sc->phy.acquire = wm_get_swflag_ich8lan;
2081 sc->phy.release = wm_put_swflag_ich8lan;
2082 break;
2083 case WM_T_I210:
2084 case WM_T_I211:
2085 if (wm_nvm_get_flash_presence_i210(sc)) {
2086 wm_nvm_set_addrbits_size_eecd(sc);
2087 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2088 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
2089 } else {
2090 sc->sc_nvm_wordsize = INVM_SIZE;
2091 sc->sc_flags |= WM_F_EEPROM_INVM;
2092 }
2093 sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
2094 sc->phy.acquire = wm_get_phy_82575;
2095 sc->phy.release = wm_put_phy_82575;
2096 break;
2097 default:
2098 break;
2099 }
2100
2101 /* Reset the chip to a known state. */
2102 wm_reset(sc);
2103
2104 /* Ensure the SMBI bit is clear before first NVM or PHY access */
2105 switch (sc->sc_type) {
2106 case WM_T_82571:
2107 case WM_T_82572:
2108 reg = CSR_READ(sc, WMREG_SWSM2);
2109 if ((reg & SWSM2_LOCK) == 0) {
2110 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2111 force_clear_smbi = true;
2112 } else
2113 force_clear_smbi = false;
2114 break;
2115 case WM_T_82573:
2116 case WM_T_82574:
2117 case WM_T_82583:
2118 force_clear_smbi = true;
2119 break;
2120 default:
2121 force_clear_smbi = false;
2122 break;
2123 }
2124 if (force_clear_smbi) {
2125 reg = CSR_READ(sc, WMREG_SWSM);
2126 if ((reg & SWSM_SMBI) != 0)
2127 aprint_error_dev(sc->sc_dev,
2128 "Please update the Bootagent\n");
2129 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2130 }
2131
2132 /*
2133 * Defer printing the EEPROM type until after verifying the checksum
2134 * This allows the EEPROM type to be printed correctly in the case
2135 * that no EEPROM is attached.
2136 */
2137 /*
2138 * Validate the EEPROM checksum. If the checksum fails, flag
2139 * this for later, so we can fail future reads from the EEPROM.
2140 */
2141 if (wm_nvm_validate_checksum(sc)) {
2142 /*
2143 * Read twice again because some PCI-e parts fail the
2144 * first check due to the link being in sleep state.
2145 */
2146 if (wm_nvm_validate_checksum(sc))
2147 sc->sc_flags |= WM_F_EEPROM_INVALID;
2148 }
2149
2150 /* Set device properties (macflags) */
2151 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2152
2153 if (sc->sc_flags & WM_F_EEPROM_INVALID)
2154 aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2155 else {
2156 aprint_verbose_dev(sc->sc_dev, "%u words ",
2157 sc->sc_nvm_wordsize);
2158 if (sc->sc_flags & WM_F_EEPROM_INVM)
2159 aprint_verbose("iNVM");
2160 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2161 aprint_verbose("FLASH(HW)");
2162 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2163 aprint_verbose("FLASH");
2164 else {
2165 if (sc->sc_flags & WM_F_EEPROM_SPI)
2166 eetype = "SPI";
2167 else
2168 eetype = "MicroWire";
2169 aprint_verbose("(%d address bits) %s EEPROM",
2170 sc->sc_nvm_addrbits, eetype);
2171 }
2172 }
2173 wm_nvm_version(sc);
2174 aprint_verbose("\n");
2175
2176 /* Check for I21[01] PLL workaround */
2177 if (sc->sc_type == WM_T_I210)
2178 sc->sc_flags |= WM_F_PLL_WA_I210;
2179 if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
2180 /* NVM image release 3.25 has a workaround */
2181 if ((sc->sc_nvm_ver_major < 3)
2182 || ((sc->sc_nvm_ver_major == 3)
2183 && (sc->sc_nvm_ver_minor < 25))) {
2184 aprint_verbose_dev(sc->sc_dev,
2185 "ROM image version %d.%d is older than 3.25\n",
2186 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2187 sc->sc_flags |= WM_F_PLL_WA_I210;
2188 }
2189 }
2190 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2191 wm_pll_workaround_i210(sc);
2192
2193 wm_get_wakeup(sc);
2194 switch (sc->sc_type) {
2195 case WM_T_82571:
2196 case WM_T_82572:
2197 case WM_T_82573:
2198 case WM_T_82574:
2199 case WM_T_82583:
2200 case WM_T_80003:
2201 case WM_T_ICH8:
2202 case WM_T_ICH9:
2203 case WM_T_ICH10:
2204 case WM_T_PCH:
2205 case WM_T_PCH2:
2206 case WM_T_PCH_LPT:
2207 case WM_T_PCH_SPT:
2208 /* Non-AMT based hardware can now take control from firmware */
2209 if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2210 wm_get_hw_control(sc);
2211 break;
2212 default:
2213 break;
2214 }
2215
2216 /*
2217 * Read the Ethernet address from the EEPROM, if not first found
2218 * in device properties.
2219 */
2220 ea = prop_dictionary_get(dict, "mac-address");
2221 if (ea != NULL) {
2222 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2223 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2224 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2225 } else {
2226 if (wm_read_mac_addr(sc, enaddr) != 0) {
2227 aprint_error_dev(sc->sc_dev,
2228 "unable to read Ethernet address\n");
2229 goto out;
2230 }
2231 }
2232
2233 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2234 ether_sprintf(enaddr));
2235
2236 /*
2237 * Read the config info from the EEPROM, and set up various
2238 * bits in the control registers based on their contents.
2239 */
2240 pn = prop_dictionary_get(dict, "i82543-cfg1");
2241 if (pn != NULL) {
2242 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2243 cfg1 = (uint16_t) prop_number_integer_value(pn);
2244 } else {
2245 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2246 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2247 goto out;
2248 }
2249 }
2250
2251 pn = prop_dictionary_get(dict, "i82543-cfg2");
2252 if (pn != NULL) {
2253 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2254 cfg2 = (uint16_t) prop_number_integer_value(pn);
2255 } else {
2256 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2257 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2258 goto out;
2259 }
2260 }
2261
2262 /* check for WM_F_WOL */
2263 switch (sc->sc_type) {
2264 case WM_T_82542_2_0:
2265 case WM_T_82542_2_1:
2266 case WM_T_82543:
2267 /* dummy? */
2268 eeprom_data = 0;
2269 apme_mask = NVM_CFG3_APME;
2270 break;
2271 case WM_T_82544:
2272 apme_mask = NVM_CFG2_82544_APM_EN;
2273 eeprom_data = cfg2;
2274 break;
2275 case WM_T_82546:
2276 case WM_T_82546_3:
2277 case WM_T_82571:
2278 case WM_T_82572:
2279 case WM_T_82573:
2280 case WM_T_82574:
2281 case WM_T_82583:
2282 case WM_T_80003:
2283 default:
2284 apme_mask = NVM_CFG3_APME;
2285 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2286 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2287 break;
2288 case WM_T_82575:
2289 case WM_T_82576:
2290 case WM_T_82580:
2291 case WM_T_I350:
2292 case WM_T_I354: /* XXX ok? */
2293 case WM_T_ICH8:
2294 case WM_T_ICH9:
2295 case WM_T_ICH10:
2296 case WM_T_PCH:
2297 case WM_T_PCH2:
2298 case WM_T_PCH_LPT:
2299 case WM_T_PCH_SPT:
2300 /* XXX The funcid should be checked on some devices */
2301 apme_mask = WUC_APME;
2302 eeprom_data = CSR_READ(sc, WMREG_WUC);
2303 break;
2304 }
2305
2306 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2307 if ((eeprom_data & apme_mask) != 0)
2308 sc->sc_flags |= WM_F_WOL;
2309 #ifdef WM_DEBUG
2310 if ((sc->sc_flags & WM_F_WOL) != 0)
2311 printf("WOL\n");
2312 #endif
2313
2314 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2315 /* Check NVM for autonegotiation */
2316 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2317 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2318 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2319 }
2320 }
2321
2322 /*
2323 * XXX need special handling for some multiple port cards
2324 * to disable a paticular port.
2325 */
2326
2327 if (sc->sc_type >= WM_T_82544) {
2328 pn = prop_dictionary_get(dict, "i82543-swdpin");
2329 if (pn != NULL) {
2330 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2331 swdpin = (uint16_t) prop_number_integer_value(pn);
2332 } else {
2333 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2334 aprint_error_dev(sc->sc_dev,
2335 "unable to read SWDPIN\n");
2336 goto out;
2337 }
2338 }
2339 }
2340
2341 if (cfg1 & NVM_CFG1_ILOS)
2342 sc->sc_ctrl |= CTRL_ILOS;
2343
2344 /*
2345 * XXX
2346 * This code isn't correct because pin 2 and 3 are located
2347 * in different position on newer chips. Check all datasheet.
2348 *
2349 * Until resolve this problem, check if a chip < 82580
2350 */
2351 if (sc->sc_type <= WM_T_82580) {
2352 if (sc->sc_type >= WM_T_82544) {
2353 sc->sc_ctrl |=
2354 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2355 CTRL_SWDPIO_SHIFT;
2356 sc->sc_ctrl |=
2357 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2358 CTRL_SWDPINS_SHIFT;
2359 } else {
2360 sc->sc_ctrl |=
2361 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2362 CTRL_SWDPIO_SHIFT;
2363 }
2364 }
2365
2366 /* XXX For other than 82580? */
2367 if (sc->sc_type == WM_T_82580) {
2368 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2369 if (nvmword & __BIT(13))
2370 sc->sc_ctrl |= CTRL_ILOS;
2371 }
2372
2373 #if 0
2374 if (sc->sc_type >= WM_T_82544) {
2375 if (cfg1 & NVM_CFG1_IPS0)
2376 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2377 if (cfg1 & NVM_CFG1_IPS1)
2378 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2379 sc->sc_ctrl_ext |=
2380 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2381 CTRL_EXT_SWDPIO_SHIFT;
2382 sc->sc_ctrl_ext |=
2383 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2384 CTRL_EXT_SWDPINS_SHIFT;
2385 } else {
2386 sc->sc_ctrl_ext |=
2387 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2388 CTRL_EXT_SWDPIO_SHIFT;
2389 }
2390 #endif
2391
2392 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2393 #if 0
2394 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2395 #endif
2396
2397 if (sc->sc_type == WM_T_PCH) {
2398 uint16_t val;
2399
2400 /* Save the NVM K1 bit setting */
2401 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2402
2403 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2404 sc->sc_nvm_k1_enabled = 1;
2405 else
2406 sc->sc_nvm_k1_enabled = 0;
2407 }
2408
2409 /*
2410 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2411 * media structures accordingly.
2412 */
2413 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2414 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2415 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2416 || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
2417 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2418 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2419 wm_gmii_mediainit(sc, wmp->wmp_product);
2420 } else if (sc->sc_type < WM_T_82543 ||
2421 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2422 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2423 aprint_error_dev(sc->sc_dev,
2424 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2425 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2426 }
2427 wm_tbi_mediainit(sc);
2428 } else {
2429 switch (sc->sc_type) {
2430 case WM_T_82575:
2431 case WM_T_82576:
2432 case WM_T_82580:
2433 case WM_T_I350:
2434 case WM_T_I354:
2435 case WM_T_I210:
2436 case WM_T_I211:
2437 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2438 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2439 switch (link_mode) {
2440 case CTRL_EXT_LINK_MODE_1000KX:
2441 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2442 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2443 break;
2444 case CTRL_EXT_LINK_MODE_SGMII:
2445 if (wm_sgmii_uses_mdio(sc)) {
2446 aprint_verbose_dev(sc->sc_dev,
2447 "SGMII(MDIO)\n");
2448 sc->sc_flags |= WM_F_SGMII;
2449 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2450 break;
2451 }
2452 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2453 /*FALLTHROUGH*/
2454 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2455 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2456 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2457 if (link_mode
2458 == CTRL_EXT_LINK_MODE_SGMII) {
2459 sc->sc_mediatype
2460 = WM_MEDIATYPE_COPPER;
2461 sc->sc_flags |= WM_F_SGMII;
2462 } else {
2463 sc->sc_mediatype
2464 = WM_MEDIATYPE_SERDES;
2465 aprint_verbose_dev(sc->sc_dev,
2466 "SERDES\n");
2467 }
2468 break;
2469 }
2470 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2471 aprint_verbose_dev(sc->sc_dev,
2472 "SERDES\n");
2473
2474 /* Change current link mode setting */
2475 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2476 switch (sc->sc_mediatype) {
2477 case WM_MEDIATYPE_COPPER:
2478 reg |= CTRL_EXT_LINK_MODE_SGMII;
2479 break;
2480 case WM_MEDIATYPE_SERDES:
2481 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2482 break;
2483 default:
2484 break;
2485 }
2486 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2487 break;
2488 case CTRL_EXT_LINK_MODE_GMII:
2489 default:
2490 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2491 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2492 break;
2493 }
2494
2495 reg &= ~CTRL_EXT_I2C_ENA;
2496 if ((sc->sc_flags & WM_F_SGMII) != 0)
2497 reg |= CTRL_EXT_I2C_ENA;
2498 else
2499 reg &= ~CTRL_EXT_I2C_ENA;
2500 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2501
2502 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2503 wm_gmii_mediainit(sc, wmp->wmp_product);
2504 else
2505 wm_tbi_mediainit(sc);
2506 break;
2507 default:
2508 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
2509 aprint_error_dev(sc->sc_dev,
2510 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2511 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2512 wm_gmii_mediainit(sc, wmp->wmp_product);
2513 }
2514 }
2515
2516 ifp = &sc->sc_ethercom.ec_if;
2517 xname = device_xname(sc->sc_dev);
2518 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2519 ifp->if_softc = sc;
2520 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2521 ifp->if_extflags = IFEF_START_MPSAFE;
2522 ifp->if_ioctl = wm_ioctl;
2523 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
2524 ifp->if_start = wm_nq_start;
2525 if (sc->sc_nqueues > 1)
2526 ifp->if_transmit = wm_nq_transmit;
2527 } else
2528 ifp->if_start = wm_start;
2529 ifp->if_watchdog = wm_watchdog;
2530 ifp->if_init = wm_init;
2531 ifp->if_stop = wm_stop;
2532 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2533 IFQ_SET_READY(&ifp->if_snd);
2534
2535 /* Check for jumbo frame */
2536 switch (sc->sc_type) {
2537 case WM_T_82573:
2538 /* XXX limited to 9234 if ASPM is disabled */
2539 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2540 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2541 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2542 break;
2543 case WM_T_82571:
2544 case WM_T_82572:
2545 case WM_T_82574:
2546 case WM_T_82575:
2547 case WM_T_82576:
2548 case WM_T_82580:
2549 case WM_T_I350:
2550 case WM_T_I354: /* XXXX ok? */
2551 case WM_T_I210:
2552 case WM_T_I211:
2553 case WM_T_80003:
2554 case WM_T_ICH9:
2555 case WM_T_ICH10:
2556 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2557 case WM_T_PCH_LPT:
2558 case WM_T_PCH_SPT:
2559 /* XXX limited to 9234 */
2560 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2561 break;
2562 case WM_T_PCH:
2563 /* XXX limited to 4096 */
2564 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2565 break;
2566 case WM_T_82542_2_0:
2567 case WM_T_82542_2_1:
2568 case WM_T_82583:
2569 case WM_T_ICH8:
2570 /* No support for jumbo frame */
2571 break;
2572 default:
2573 /* ETHER_MAX_LEN_JUMBO */
2574 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2575 break;
2576 }
2577
2578 /* If we're a i82543 or greater, we can support VLANs. */
2579 if (sc->sc_type >= WM_T_82543)
2580 sc->sc_ethercom.ec_capabilities |=
2581 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2582
2583 /*
2584 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2585 * on i82543 and later.
2586 */
2587 if (sc->sc_type >= WM_T_82543) {
2588 ifp->if_capabilities |=
2589 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2590 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2591 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2592 IFCAP_CSUM_TCPv6_Tx |
2593 IFCAP_CSUM_UDPv6_Tx;
2594 }
2595
2596 /*
2597 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2598 *
2599 * 82541GI (8086:1076) ... no
2600 * 82572EI (8086:10b9) ... yes
2601 */
2602 if (sc->sc_type >= WM_T_82571) {
2603 ifp->if_capabilities |=
2604 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2605 }
2606
2607 /*
2608 * If we're a i82544 or greater (except i82547), we can do
2609 * TCP segmentation offload.
2610 */
2611 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2612 ifp->if_capabilities |= IFCAP_TSOv4;
2613 }
2614
2615 if (sc->sc_type >= WM_T_82571) {
2616 ifp->if_capabilities |= IFCAP_TSOv6;
2617 }
2618
2619 #ifdef WM_MPSAFE
2620 sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2621 #else
2622 sc->sc_core_lock = NULL;
2623 #endif
2624
2625 /* Attach the interface. */
2626 if_initialize(ifp);
2627 sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
2628 ether_ifattach(ifp, enaddr);
2629 if_register(ifp);
2630 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2631 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2632 RND_FLAG_DEFAULT);
2633
2634 #ifdef WM_EVENT_COUNTERS
2635 /* Attach event counters. */
2636 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2637 NULL, xname, "linkintr");
2638
2639 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2640 NULL, xname, "tx_xoff");
2641 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2642 NULL, xname, "tx_xon");
2643 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2644 NULL, xname, "rx_xoff");
2645 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2646 NULL, xname, "rx_xon");
2647 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2648 NULL, xname, "rx_macctl");
2649 #endif /* WM_EVENT_COUNTERS */
2650
2651 if (pmf_device_register(self, wm_suspend, wm_resume))
2652 pmf_class_network_register(self, ifp);
2653 else
2654 aprint_error_dev(self, "couldn't establish power handler\n");
2655
2656 sc->sc_flags |= WM_F_ATTACHED;
2657 out:
2658 return;
2659 }
2660
2661 /* The detach function (ca_detach) */
2662 static int
2663 wm_detach(device_t self, int flags __unused)
2664 {
2665 struct wm_softc *sc = device_private(self);
2666 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2667 int i;
2668
2669 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2670 return 0;
2671
2672 /* Stop the interface. Callouts are stopped in it. */
2673 wm_stop(ifp, 1);
2674
2675 pmf_device_deregister(self);
2676
2677 /* Tell the firmware about the release */
2678 WM_CORE_LOCK(sc);
2679 wm_release_manageability(sc);
2680 wm_release_hw_control(sc);
2681 WM_CORE_UNLOCK(sc);
2682
2683 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2684
2685 /* Delete all remaining media. */
2686 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2687
2688 ether_ifdetach(ifp);
2689 if_detach(ifp);
2690 if_percpuq_destroy(sc->sc_ipq);
2691
2692 /* Unload RX dmamaps and free mbufs */
2693 for (i = 0; i < sc->sc_nqueues; i++) {
2694 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
2695 mutex_enter(rxq->rxq_lock);
2696 wm_rxdrain(rxq);
2697 mutex_exit(rxq->rxq_lock);
2698 }
2699 /* Must unlock here */
2700
2701 /* Disestablish the interrupt handler */
2702 for (i = 0; i < sc->sc_nintrs; i++) {
2703 if (sc->sc_ihs[i] != NULL) {
2704 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2705 sc->sc_ihs[i] = NULL;
2706 }
2707 }
2708 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
2709
2710 wm_free_txrx_queues(sc);
2711
2712 /* Unmap the registers */
2713 if (sc->sc_ss) {
2714 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2715 sc->sc_ss = 0;
2716 }
2717 if (sc->sc_ios) {
2718 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2719 sc->sc_ios = 0;
2720 }
2721 if (sc->sc_flashs) {
2722 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
2723 sc->sc_flashs = 0;
2724 }
2725
2726 if (sc->sc_core_lock)
2727 mutex_obj_free(sc->sc_core_lock);
2728 if (sc->sc_ich_phymtx)
2729 mutex_obj_free(sc->sc_ich_phymtx);
2730 if (sc->sc_ich_nvmmtx)
2731 mutex_obj_free(sc->sc_ich_nvmmtx);
2732
2733 return 0;
2734 }
2735
2736 static bool
2737 wm_suspend(device_t self, const pmf_qual_t *qual)
2738 {
2739 struct wm_softc *sc = device_private(self);
2740
2741 wm_release_manageability(sc);
2742 wm_release_hw_control(sc);
2743 #ifdef WM_WOL
2744 wm_enable_wakeup(sc);
2745 #endif
2746
2747 return true;
2748 }
2749
2750 static bool
2751 wm_resume(device_t self, const pmf_qual_t *qual)
2752 {
2753 struct wm_softc *sc = device_private(self);
2754
2755 wm_init_manageability(sc);
2756
2757 return true;
2758 }
2759
2760 /*
2761 * wm_watchdog: [ifnet interface function]
2762 *
2763 * Watchdog timer handler.
2764 */
2765 static void
2766 wm_watchdog(struct ifnet *ifp)
2767 {
2768 int qid;
2769 struct wm_softc *sc = ifp->if_softc;
2770
2771 for (qid = 0; qid < sc->sc_nqueues; qid++) {
2772 struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
2773
2774 wm_watchdog_txq(ifp, txq);
2775 }
2776
2777 /* Reset the interface. */
2778 (void) wm_init(ifp);
2779
2780 /*
2781 * There are still some upper layer processing which call
2782 * ifp->if_start(). e.g. ALTQ
2783 */
2784 /* Try to get more packets going. */
2785 ifp->if_start(ifp);
2786 }
2787
2788 static void
2789 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
2790 {
2791 struct wm_softc *sc = ifp->if_softc;
2792
2793 /*
2794 * Since we're using delayed interrupts, sweep up
2795 * before we report an error.
2796 */
2797 mutex_enter(txq->txq_lock);
2798 wm_txeof(sc, txq);
2799 mutex_exit(txq->txq_lock);
2800
2801 if (txq->txq_free != WM_NTXDESC(txq)) {
2802 #ifdef WM_DEBUG
2803 int i, j;
2804 struct wm_txsoft *txs;
2805 #endif
2806 log(LOG_ERR,
2807 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2808 device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
2809 txq->txq_next);
2810 ifp->if_oerrors++;
2811 #ifdef WM_DEBUG
2812 for (i = txq->txq_sdirty; i != txq->txq_snext ;
2813 i = WM_NEXTTXS(txq, i)) {
2814 txs = &txq->txq_soft[i];
2815 printf("txs %d tx %d -> %d\n",
2816 i, txs->txs_firstdesc, txs->txs_lastdesc);
2817 for (j = txs->txs_firstdesc; ;
2818 j = WM_NEXTTX(txq, j)) {
2819 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2820 txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
2821 printf("\t %#08x%08x\n",
2822 txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
2823 txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
2824 if (j == txs->txs_lastdesc)
2825 break;
2826 }
2827 }
2828 #endif
2829 }
2830 }
2831
2832 /*
2833 * wm_tick:
2834 *
2835 * One second timer, used to check link status, sweep up
2836 * completed transmit jobs, etc.
2837 */
2838 static void
2839 wm_tick(void *arg)
2840 {
2841 struct wm_softc *sc = arg;
2842 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2843 #ifndef WM_MPSAFE
2844 int s = splnet();
2845 #endif
2846
2847 WM_CORE_LOCK(sc);
2848
2849 if (sc->sc_core_stopping)
2850 goto out;
2851
2852 if (sc->sc_type >= WM_T_82542_2_1) {
2853 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2854 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2855 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2856 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2857 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2858 }
2859
2860 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2861 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2862 + CSR_READ(sc, WMREG_CRCERRS)
2863 + CSR_READ(sc, WMREG_ALGNERRC)
2864 + CSR_READ(sc, WMREG_SYMERRC)
2865 + CSR_READ(sc, WMREG_RXERRC)
2866 + CSR_READ(sc, WMREG_SEC)
2867 + CSR_READ(sc, WMREG_CEXTERR)
2868 + CSR_READ(sc, WMREG_RLEC);
2869 /*
2870 * WMREG_RNBC is incremented when there is no available buffers in host
2871 * memory. It does not mean the number of dropped packet. Because
2872 * ethernet controller can receive packets in such case if there is
2873 * space in phy's FIFO.
2874 *
2875 * If you want to know the nubmer of WMREG_RMBC, you should use such as
2876 * own EVCNT instead of if_iqdrops.
2877 */
2878 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
2879
2880 if (sc->sc_flags & WM_F_HAS_MII)
2881 mii_tick(&sc->sc_mii);
2882 else if ((sc->sc_type >= WM_T_82575)
2883 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
2884 wm_serdes_tick(sc);
2885 else
2886 wm_tbi_tick(sc);
2887
2888 out:
2889 WM_CORE_UNLOCK(sc);
2890 #ifndef WM_MPSAFE
2891 splx(s);
2892 #endif
2893
2894 if (!sc->sc_core_stopping)
2895 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2896 }
2897
2898 static int
2899 wm_ifflags_cb(struct ethercom *ec)
2900 {
2901 struct ifnet *ifp = &ec->ec_if;
2902 struct wm_softc *sc = ifp->if_softc;
2903 int rc = 0;
2904
2905 WM_CORE_LOCK(sc);
2906
2907 int change = ifp->if_flags ^ sc->sc_if_flags;
2908 sc->sc_if_flags = ifp->if_flags;
2909
2910 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
2911 rc = ENETRESET;
2912 goto out;
2913 }
2914
2915 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2916 wm_set_filter(sc);
2917
2918 wm_set_vlan(sc);
2919
2920 out:
2921 WM_CORE_UNLOCK(sc);
2922
2923 return rc;
2924 }
2925
2926 /*
2927 * wm_ioctl: [ifnet interface function]
2928 *
2929 * Handle control requests from the operator.
2930 */
2931 static int
2932 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2933 {
2934 struct wm_softc *sc = ifp->if_softc;
2935 struct ifreq *ifr = (struct ifreq *) data;
2936 struct ifaddr *ifa = (struct ifaddr *)data;
2937 struct sockaddr_dl *sdl;
2938 int s, error;
2939
2940 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
2941 device_xname(sc->sc_dev), __func__));
2942
2943 #ifndef WM_MPSAFE
2944 s = splnet();
2945 #endif
2946 switch (cmd) {
2947 case SIOCSIFMEDIA:
2948 case SIOCGIFMEDIA:
2949 WM_CORE_LOCK(sc);
2950 /* Flow control requires full-duplex mode. */
2951 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2952 (ifr->ifr_media & IFM_FDX) == 0)
2953 ifr->ifr_media &= ~IFM_ETH_FMASK;
2954 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2955 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2956 /* We can do both TXPAUSE and RXPAUSE. */
2957 ifr->ifr_media |=
2958 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2959 }
2960 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2961 }
2962 WM_CORE_UNLOCK(sc);
2963 #ifdef WM_MPSAFE
2964 s = splnet();
2965 #endif
2966 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2967 #ifdef WM_MPSAFE
2968 splx(s);
2969 #endif
2970 break;
2971 case SIOCINITIFADDR:
2972 WM_CORE_LOCK(sc);
2973 if (ifa->ifa_addr->sa_family == AF_LINK) {
2974 sdl = satosdl(ifp->if_dl->ifa_addr);
2975 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2976 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2977 /* unicast address is first multicast entry */
2978 wm_set_filter(sc);
2979 error = 0;
2980 WM_CORE_UNLOCK(sc);
2981 break;
2982 }
2983 WM_CORE_UNLOCK(sc);
2984 /*FALLTHROUGH*/
2985 default:
2986 #ifdef WM_MPSAFE
2987 s = splnet();
2988 #endif
2989 /* It may call wm_start, so unlock here */
2990 error = ether_ioctl(ifp, cmd, data);
2991 #ifdef WM_MPSAFE
2992 splx(s);
2993 #endif
2994 if (error != ENETRESET)
2995 break;
2996
2997 error = 0;
2998
2999 if (cmd == SIOCSIFCAP) {
3000 error = (*ifp->if_init)(ifp);
3001 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3002 ;
3003 else if (ifp->if_flags & IFF_RUNNING) {
3004 /*
3005 * Multicast list has changed; set the hardware filter
3006 * accordingly.
3007 */
3008 WM_CORE_LOCK(sc);
3009 wm_set_filter(sc);
3010 WM_CORE_UNLOCK(sc);
3011 }
3012 break;
3013 }
3014
3015 #ifndef WM_MPSAFE
3016 splx(s);
3017 #endif
3018 return error;
3019 }
3020
3021 /* MAC address related */
3022
3023 /*
3024 * Get the offset of MAC address and return it.
3025 * If error occured, use offset 0.
3026 */
3027 static uint16_t
3028 wm_check_alt_mac_addr(struct wm_softc *sc)
3029 {
3030 uint16_t myea[ETHER_ADDR_LEN / 2];
3031 uint16_t offset = NVM_OFF_MACADDR;
3032
3033 /* Try to read alternative MAC address pointer */
3034 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
3035 return 0;
3036
3037 /* Check pointer if it's valid or not. */
3038 if ((offset == 0x0000) || (offset == 0xffff))
3039 return 0;
3040
3041 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
3042 /*
3043 * Check whether alternative MAC address is valid or not.
3044 * Some cards have non 0xffff pointer but those don't use
3045 * alternative MAC address in reality.
3046 *
3047 * Check whether the broadcast bit is set or not.
3048 */
3049 if (wm_nvm_read(sc, offset, 1, myea) == 0)
3050 if (((myea[0] & 0xff) & 0x01) == 0)
3051 return offset; /* Found */
3052
3053 /* Not found */
3054 return 0;
3055 }
3056
3057 static int
3058 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
3059 {
3060 uint16_t myea[ETHER_ADDR_LEN / 2];
3061 uint16_t offset = NVM_OFF_MACADDR;
3062 int do_invert = 0;
3063
3064 switch (sc->sc_type) {
3065 case WM_T_82580:
3066 case WM_T_I350:
3067 case WM_T_I354:
3068 /* EEPROM Top Level Partitioning */
3069 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
3070 break;
3071 case WM_T_82571:
3072 case WM_T_82575:
3073 case WM_T_82576:
3074 case WM_T_80003:
3075 case WM_T_I210:
3076 case WM_T_I211:
3077 offset = wm_check_alt_mac_addr(sc);
3078 if (offset == 0)
3079 if ((sc->sc_funcid & 0x01) == 1)
3080 do_invert = 1;
3081 break;
3082 default:
3083 if ((sc->sc_funcid & 0x01) == 1)
3084 do_invert = 1;
3085 break;
3086 }
3087
3088 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
3089 goto bad;
3090
3091 enaddr[0] = myea[0] & 0xff;
3092 enaddr[1] = myea[0] >> 8;
3093 enaddr[2] = myea[1] & 0xff;
3094 enaddr[3] = myea[1] >> 8;
3095 enaddr[4] = myea[2] & 0xff;
3096 enaddr[5] = myea[2] >> 8;
3097
3098 /*
3099 * Toggle the LSB of the MAC address on the second port
3100 * of some dual port cards.
3101 */
3102 if (do_invert != 0)
3103 enaddr[5] ^= 1;
3104
3105 return 0;
3106
3107 bad:
3108 return -1;
3109 }
3110
3111 /*
3112 * wm_set_ral:
3113 *
3114 * Set an entery in the receive address list.
3115 */
3116 static void
3117 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3118 {
3119 uint32_t ral_lo, ral_hi;
3120
3121 if (enaddr != NULL) {
3122 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3123 (enaddr[3] << 24);
3124 ral_hi = enaddr[4] | (enaddr[5] << 8);
3125 ral_hi |= RAL_AV;
3126 } else {
3127 ral_lo = 0;
3128 ral_hi = 0;
3129 }
3130
3131 if (sc->sc_type >= WM_T_82544) {
3132 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3133 ral_lo);
3134 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3135 ral_hi);
3136 } else {
3137 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3138 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3139 }
3140 }
3141
3142 /*
3143 * wm_mchash:
3144 *
3145 * Compute the hash of the multicast address for the 4096-bit
3146 * multicast filter.
3147 */
3148 static uint32_t
3149 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3150 {
3151 static const int lo_shift[4] = { 4, 3, 2, 0 };
3152 static const int hi_shift[4] = { 4, 5, 6, 8 };
3153 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3154 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3155 uint32_t hash;
3156
3157 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3158 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3159 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3160 || (sc->sc_type == WM_T_PCH_SPT)) {
3161 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3162 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3163 return (hash & 0x3ff);
3164 }
3165 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3166 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3167
3168 return (hash & 0xfff);
3169 }
3170
3171 /*
3172 * wm_set_filter:
3173 *
3174 * Set up the receive filter.
3175 */
3176 static void
3177 wm_set_filter(struct wm_softc *sc)
3178 {
3179 struct ethercom *ec = &sc->sc_ethercom;
3180 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3181 struct ether_multi *enm;
3182 struct ether_multistep step;
3183 bus_addr_t mta_reg;
3184 uint32_t hash, reg, bit;
3185 int i, size, ralmax;
3186
3187 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3188 device_xname(sc->sc_dev), __func__));
3189
3190 if (sc->sc_type >= WM_T_82544)
3191 mta_reg = WMREG_CORDOVA_MTA;
3192 else
3193 mta_reg = WMREG_MTA;
3194
3195 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3196
3197 if (ifp->if_flags & IFF_BROADCAST)
3198 sc->sc_rctl |= RCTL_BAM;
3199 if (ifp->if_flags & IFF_PROMISC) {
3200 sc->sc_rctl |= RCTL_UPE;
3201 goto allmulti;
3202 }
3203
3204 /*
3205 * Set the station address in the first RAL slot, and
3206 * clear the remaining slots.
3207 */
3208 if (sc->sc_type == WM_T_ICH8)
3209 size = WM_RAL_TABSIZE_ICH8 -1;
3210 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3211 || (sc->sc_type == WM_T_PCH))
3212 size = WM_RAL_TABSIZE_ICH8;
3213 else if (sc->sc_type == WM_T_PCH2)
3214 size = WM_RAL_TABSIZE_PCH2;
3215 else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
3216 size = WM_RAL_TABSIZE_PCH_LPT;
3217 else if (sc->sc_type == WM_T_82575)
3218 size = WM_RAL_TABSIZE_82575;
3219 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3220 size = WM_RAL_TABSIZE_82576;
3221 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3222 size = WM_RAL_TABSIZE_I350;
3223 else
3224 size = WM_RAL_TABSIZE;
3225 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3226
3227 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
3228 i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
3229 switch (i) {
3230 case 0:
3231 /* We can use all entries */
3232 ralmax = size;
3233 break;
3234 case 1:
3235 /* Only RAR[0] */
3236 ralmax = 1;
3237 break;
3238 default:
3239 /* available SHRA + RAR[0] */
3240 ralmax = i + 1;
3241 }
3242 } else
3243 ralmax = size;
3244 for (i = 1; i < size; i++) {
3245 if (i < ralmax)
3246 wm_set_ral(sc, NULL, i);
3247 }
3248
3249 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3250 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3251 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3252 || (sc->sc_type == WM_T_PCH_SPT))
3253 size = WM_ICH8_MC_TABSIZE;
3254 else
3255 size = WM_MC_TABSIZE;
3256 /* Clear out the multicast table. */
3257 for (i = 0; i < size; i++)
3258 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3259
3260 ETHER_FIRST_MULTI(step, ec, enm);
3261 while (enm != NULL) {
3262 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3263 /*
3264 * We must listen to a range of multicast addresses.
3265 * For now, just accept all multicasts, rather than
3266 * trying to set only those filter bits needed to match
3267 * the range. (At this time, the only use of address
3268 * ranges is for IP multicast routing, for which the
3269 * range is big enough to require all bits set.)
3270 */
3271 goto allmulti;
3272 }
3273
3274 hash = wm_mchash(sc, enm->enm_addrlo);
3275
3276 reg = (hash >> 5);
3277 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3278 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3279 || (sc->sc_type == WM_T_PCH2)
3280 || (sc->sc_type == WM_T_PCH_LPT)
3281 || (sc->sc_type == WM_T_PCH_SPT))
3282 reg &= 0x1f;
3283 else
3284 reg &= 0x7f;
3285 bit = hash & 0x1f;
3286
3287 hash = CSR_READ(sc, mta_reg + (reg << 2));
3288 hash |= 1U << bit;
3289
3290 if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
3291 /*
3292 * 82544 Errata 9: Certain register cannot be written
3293 * with particular alignments in PCI-X bus operation
3294 * (FCAH, MTA and VFTA).
3295 */
3296 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3297 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3298 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3299 } else
3300 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3301
3302 ETHER_NEXT_MULTI(step, enm);
3303 }
3304
3305 ifp->if_flags &= ~IFF_ALLMULTI;
3306 goto setit;
3307
3308 allmulti:
3309 ifp->if_flags |= IFF_ALLMULTI;
3310 sc->sc_rctl |= RCTL_MPE;
3311
3312 setit:
3313 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3314 }
3315
3316 /* Reset and init related */
3317
3318 static void
3319 wm_set_vlan(struct wm_softc *sc)
3320 {
3321
3322 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3323 device_xname(sc->sc_dev), __func__));
3324
3325 /* Deal with VLAN enables. */
3326 if (VLAN_ATTACHED(&sc->sc_ethercom))
3327 sc->sc_ctrl |= CTRL_VME;
3328 else
3329 sc->sc_ctrl &= ~CTRL_VME;
3330
3331 /* Write the control registers. */
3332 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3333 }
3334
3335 static void
3336 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3337 {
3338 uint32_t gcr;
3339 pcireg_t ctrl2;
3340
3341 gcr = CSR_READ(sc, WMREG_GCR);
3342
3343 /* Only take action if timeout value is defaulted to 0 */
3344 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3345 goto out;
3346
3347 if ((gcr & GCR_CAP_VER2) == 0) {
3348 gcr |= GCR_CMPL_TMOUT_10MS;
3349 goto out;
3350 }
3351
3352 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3353 sc->sc_pcixe_capoff + PCIE_DCSR2);
3354 ctrl2 |= WM_PCIE_DCSR2_16MS;
3355 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3356 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3357
3358 out:
3359 /* Disable completion timeout resend */
3360 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3361
3362 CSR_WRITE(sc, WMREG_GCR, gcr);
3363 }
3364
3365 void
3366 wm_get_auto_rd_done(struct wm_softc *sc)
3367 {
3368 int i;
3369
3370 /* wait for eeprom to reload */
3371 switch (sc->sc_type) {
3372 case WM_T_82571:
3373 case WM_T_82572:
3374 case WM_T_82573:
3375 case WM_T_82574:
3376 case WM_T_82583:
3377 case WM_T_82575:
3378 case WM_T_82576:
3379 case WM_T_82580:
3380 case WM_T_I350:
3381 case WM_T_I354:
3382 case WM_T_I210:
3383 case WM_T_I211:
3384 case WM_T_80003:
3385 case WM_T_ICH8:
3386 case WM_T_ICH9:
3387 for (i = 0; i < 10; i++) {
3388 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3389 break;
3390 delay(1000);
3391 }
3392 if (i == 10) {
3393 log(LOG_ERR, "%s: auto read from eeprom failed to "
3394 "complete\n", device_xname(sc->sc_dev));
3395 }
3396 break;
3397 default:
3398 break;
3399 }
3400 }
3401
3402 void
3403 wm_lan_init_done(struct wm_softc *sc)
3404 {
3405 uint32_t reg = 0;
3406 int i;
3407
3408 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3409 device_xname(sc->sc_dev), __func__));
3410
3411 /* Wait for eeprom to reload */
3412 switch (sc->sc_type) {
3413 case WM_T_ICH10:
3414 case WM_T_PCH:
3415 case WM_T_PCH2:
3416 case WM_T_PCH_LPT:
3417 case WM_T_PCH_SPT:
3418 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3419 reg = CSR_READ(sc, WMREG_STATUS);
3420 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3421 break;
3422 delay(100);
3423 }
3424 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3425 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3426 "complete\n", device_xname(sc->sc_dev), __func__);
3427 }
3428 break;
3429 default:
3430 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3431 __func__);
3432 break;
3433 }
3434
3435 reg &= ~STATUS_LAN_INIT_DONE;
3436 CSR_WRITE(sc, WMREG_STATUS, reg);
3437 }
3438
3439 void
3440 wm_get_cfg_done(struct wm_softc *sc)
3441 {
3442 int mask;
3443 uint32_t reg;
3444 int i;
3445
3446 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3447 device_xname(sc->sc_dev), __func__));
3448
3449 /* Wait for eeprom to reload */
3450 switch (sc->sc_type) {
3451 case WM_T_82542_2_0:
3452 case WM_T_82542_2_1:
3453 /* null */
3454 break;
3455 case WM_T_82543:
3456 case WM_T_82544:
3457 case WM_T_82540:
3458 case WM_T_82545:
3459 case WM_T_82545_3:
3460 case WM_T_82546:
3461 case WM_T_82546_3:
3462 case WM_T_82541:
3463 case WM_T_82541_2:
3464 case WM_T_82547:
3465 case WM_T_82547_2:
3466 case WM_T_82573:
3467 case WM_T_82574:
3468 case WM_T_82583:
3469 /* generic */
3470 delay(10*1000);
3471 break;
3472 case WM_T_80003:
3473 case WM_T_82571:
3474 case WM_T_82572:
3475 case WM_T_82575:
3476 case WM_T_82576:
3477 case WM_T_82580:
3478 case WM_T_I350:
3479 case WM_T_I354:
3480 case WM_T_I210:
3481 case WM_T_I211:
3482 if (sc->sc_type == WM_T_82571) {
3483 /* Only 82571 shares port 0 */
3484 mask = EEMNGCTL_CFGDONE_0;
3485 } else
3486 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3487 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3488 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3489 break;
3490 delay(1000);
3491 }
3492 if (i >= WM_PHY_CFG_TIMEOUT) {
3493 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3494 device_xname(sc->sc_dev), __func__));
3495 }
3496 break;
3497 case WM_T_ICH8:
3498 case WM_T_ICH9:
3499 case WM_T_ICH10:
3500 case WM_T_PCH:
3501 case WM_T_PCH2:
3502 case WM_T_PCH_LPT:
3503 case WM_T_PCH_SPT:
3504 delay(10*1000);
3505 if (sc->sc_type >= WM_T_ICH10)
3506 wm_lan_init_done(sc);
3507 else
3508 wm_get_auto_rd_done(sc);
3509
3510 reg = CSR_READ(sc, WMREG_STATUS);
3511 if ((reg & STATUS_PHYRA) != 0)
3512 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3513 break;
3514 default:
3515 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3516 __func__);
3517 break;
3518 }
3519 }
3520
3521 /* Init hardware bits */
3522 void
3523 wm_initialize_hardware_bits(struct wm_softc *sc)
3524 {
3525 uint32_t tarc0, tarc1, reg;
3526
3527 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3528 device_xname(sc->sc_dev), __func__));
3529
3530 /* For 82571 variant, 80003 and ICHs */
3531 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3532 || (sc->sc_type >= WM_T_80003)) {
3533
3534 /* Transmit Descriptor Control 0 */
3535 reg = CSR_READ(sc, WMREG_TXDCTL(0));
3536 reg |= TXDCTL_COUNT_DESC;
3537 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3538
3539 /* Transmit Descriptor Control 1 */
3540 reg = CSR_READ(sc, WMREG_TXDCTL(1));
3541 reg |= TXDCTL_COUNT_DESC;
3542 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3543
3544 /* TARC0 */
3545 tarc0 = CSR_READ(sc, WMREG_TARC0);
3546 switch (sc->sc_type) {
3547 case WM_T_82571:
3548 case WM_T_82572:
3549 case WM_T_82573:
3550 case WM_T_82574:
3551 case WM_T_82583:
3552 case WM_T_80003:
3553 /* Clear bits 30..27 */
3554 tarc0 &= ~__BITS(30, 27);
3555 break;
3556 default:
3557 break;
3558 }
3559
3560 switch (sc->sc_type) {
3561 case WM_T_82571:
3562 case WM_T_82572:
3563 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3564
3565 tarc1 = CSR_READ(sc, WMREG_TARC1);
3566 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3567 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3568 /* 8257[12] Errata No.7 */
3569 tarc1 |= __BIT(22); /* TARC1 bits 22 */
3570
3571 /* TARC1 bit 28 */
3572 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3573 tarc1 &= ~__BIT(28);
3574 else
3575 tarc1 |= __BIT(28);
3576 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3577
3578 /*
3579 * 8257[12] Errata No.13
3580 * Disable Dyamic Clock Gating.
3581 */
3582 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3583 reg &= ~CTRL_EXT_DMA_DYN_CLK;
3584 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3585 break;
3586 case WM_T_82573:
3587 case WM_T_82574:
3588 case WM_T_82583:
3589 if ((sc->sc_type == WM_T_82574)
3590 || (sc->sc_type == WM_T_82583))
3591 tarc0 |= __BIT(26); /* TARC0 bit 26 */
3592
3593 /* Extended Device Control */
3594 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3595 reg &= ~__BIT(23); /* Clear bit 23 */
3596 reg |= __BIT(22); /* Set bit 22 */
3597 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3598
3599 /* Device Control */
3600 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
3601 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3602
3603 /* PCIe Control Register */
3604 /*
3605 * 82573 Errata (unknown).
3606 *
3607 * 82574 Errata 25 and 82583 Errata 12
3608 * "Dropped Rx Packets":
3609 * NVM Image Version 2.1.4 and newer has no this bug.
3610 */
3611 reg = CSR_READ(sc, WMREG_GCR);
3612 reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
3613 CSR_WRITE(sc, WMREG_GCR, reg);
3614
3615 if ((sc->sc_type == WM_T_82574)
3616 || (sc->sc_type == WM_T_82583)) {
3617 /*
3618 * Document says this bit must be set for
3619 * proper operation.
3620 */
3621 reg = CSR_READ(sc, WMREG_GCR);
3622 reg |= __BIT(22);
3623 CSR_WRITE(sc, WMREG_GCR, reg);
3624
3625 /*
3626 * Apply workaround for hardware errata
3627 * documented in errata docs Fixes issue where
3628 * some error prone or unreliable PCIe
3629 * completions are occurring, particularly
3630 * with ASPM enabled. Without fix, issue can
3631 * cause Tx timeouts.
3632 */
3633 reg = CSR_READ(sc, WMREG_GCR2);
3634 reg |= __BIT(0);
3635 CSR_WRITE(sc, WMREG_GCR2, reg);
3636 }
3637 break;
3638 case WM_T_80003:
3639 /* TARC0 */
3640 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3641 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3642 tarc0 &= ~__BIT(20); /* Clear bits 20 */
3643
3644 /* TARC1 bit 28 */
3645 tarc1 = CSR_READ(sc, WMREG_TARC1);
3646 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3647 tarc1 &= ~__BIT(28);
3648 else
3649 tarc1 |= __BIT(28);
3650 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3651 break;
3652 case WM_T_ICH8:
3653 case WM_T_ICH9:
3654 case WM_T_ICH10:
3655 case WM_T_PCH:
3656 case WM_T_PCH2:
3657 case WM_T_PCH_LPT:
3658 case WM_T_PCH_SPT:
3659 /* TARC0 */
3660 if ((sc->sc_type == WM_T_ICH8)
3661 || (sc->sc_type == WM_T_PCH_SPT)) {
3662 /* Set TARC0 bits 29 and 28 */
3663 tarc0 |= __BITS(29, 28);
3664 }
3665 /* Set TARC0 bits 23,24,26,27 */
3666 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3667
3668 /* CTRL_EXT */
3669 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3670 reg |= __BIT(22); /* Set bit 22 */
3671 /*
3672 * Enable PHY low-power state when MAC is at D3
3673 * w/o WoL
3674 */
3675 if (sc->sc_type >= WM_T_PCH)
3676 reg |= CTRL_EXT_PHYPDEN;
3677 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3678
3679 /* TARC1 */
3680 tarc1 = CSR_READ(sc, WMREG_TARC1);
3681 /* bit 28 */
3682 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3683 tarc1 &= ~__BIT(28);
3684 else
3685 tarc1 |= __BIT(28);
3686 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3687 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3688
3689 /* Device Status */
3690 if (sc->sc_type == WM_T_ICH8) {
3691 reg = CSR_READ(sc, WMREG_STATUS);
3692 reg &= ~__BIT(31);
3693 CSR_WRITE(sc, WMREG_STATUS, reg);
3694
3695 }
3696
3697 /* IOSFPC */
3698 if (sc->sc_type == WM_T_PCH_SPT) {
3699 reg = CSR_READ(sc, WMREG_IOSFPC);
3700 reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
3701 CSR_WRITE(sc, WMREG_IOSFPC, reg);
3702 }
3703 /*
3704 * Work-around descriptor data corruption issue during
3705 * NFS v2 UDP traffic, just disable the NFS filtering
3706 * capability.
3707 */
3708 reg = CSR_READ(sc, WMREG_RFCTL);
3709 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3710 CSR_WRITE(sc, WMREG_RFCTL, reg);
3711 break;
3712 default:
3713 break;
3714 }
3715 CSR_WRITE(sc, WMREG_TARC0, tarc0);
3716
3717 /*
3718 * 8257[12] Errata No.52 and some others.
3719 * Avoid RSS Hash Value bug.
3720 */
3721 switch (sc->sc_type) {
3722 case WM_T_82571:
3723 case WM_T_82572:
3724 case WM_T_82573:
3725 case WM_T_80003:
3726 case WM_T_ICH8:
3727 reg = CSR_READ(sc, WMREG_RFCTL);
3728 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3729 CSR_WRITE(sc, WMREG_RFCTL, reg);
3730 break;
3731 default:
3732 break;
3733 }
3734 }
3735 }
3736
3737 static uint32_t
3738 wm_rxpbs_adjust_82580(uint32_t val)
3739 {
3740 uint32_t rv = 0;
3741
3742 if (val < __arraycount(wm_82580_rxpbs_table))
3743 rv = wm_82580_rxpbs_table[val];
3744
3745 return rv;
3746 }
3747
3748 /*
3749 * wm_reset:
3750 *
3751 * Reset the i82542 chip.
3752 */
3753 static void
3754 wm_reset(struct wm_softc *sc)
3755 {
3756 int phy_reset = 0;
3757 int i, error = 0;
3758 uint32_t reg;
3759
3760 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3761 device_xname(sc->sc_dev), __func__));
3762 KASSERT(sc->sc_type != 0);
3763
3764 /*
3765 * Allocate on-chip memory according to the MTU size.
3766 * The Packet Buffer Allocation register must be written
3767 * before the chip is reset.
3768 */
3769 switch (sc->sc_type) {
3770 case WM_T_82547:
3771 case WM_T_82547_2:
3772 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3773 PBA_22K : PBA_30K;
3774 for (i = 0; i < sc->sc_nqueues; i++) {
3775 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3776 txq->txq_fifo_head = 0;
3777 txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3778 txq->txq_fifo_size =
3779 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3780 txq->txq_fifo_stall = 0;
3781 }
3782 break;
3783 case WM_T_82571:
3784 case WM_T_82572:
3785 case WM_T_82575: /* XXX need special handing for jumbo frames */
3786 case WM_T_80003:
3787 sc->sc_pba = PBA_32K;
3788 break;
3789 case WM_T_82573:
3790 sc->sc_pba = PBA_12K;
3791 break;
3792 case WM_T_82574:
3793 case WM_T_82583:
3794 sc->sc_pba = PBA_20K;
3795 break;
3796 case WM_T_82576:
3797 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
3798 sc->sc_pba &= RXPBS_SIZE_MASK_82576;
3799 break;
3800 case WM_T_82580:
3801 case WM_T_I350:
3802 case WM_T_I354:
3803 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
3804 break;
3805 case WM_T_I210:
3806 case WM_T_I211:
3807 sc->sc_pba = PBA_34K;
3808 break;
3809 case WM_T_ICH8:
3810 /* Workaround for a bit corruption issue in FIFO memory */
3811 sc->sc_pba = PBA_8K;
3812 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3813 break;
3814 case WM_T_ICH9:
3815 case WM_T_ICH10:
3816 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3817 PBA_14K : PBA_10K;
3818 break;
3819 case WM_T_PCH:
3820 case WM_T_PCH2:
3821 case WM_T_PCH_LPT:
3822 case WM_T_PCH_SPT:
3823 sc->sc_pba = PBA_26K;
3824 break;
3825 default:
3826 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3827 PBA_40K : PBA_48K;
3828 break;
3829 }
3830 /*
3831 * Only old or non-multiqueue devices have the PBA register
3832 * XXX Need special handling for 82575.
3833 */
3834 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3835 || (sc->sc_type == WM_T_82575))
3836 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3837
3838 /* Prevent the PCI-E bus from sticking */
3839 if (sc->sc_flags & WM_F_PCIE) {
3840 int timeout = 800;
3841
3842 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3843 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3844
3845 while (timeout--) {
3846 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3847 == 0)
3848 break;
3849 delay(100);
3850 }
3851 }
3852
3853 /* Set the completion timeout for interface */
3854 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3855 || (sc->sc_type == WM_T_82580)
3856 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3857 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3858 wm_set_pcie_completion_timeout(sc);
3859
3860 /* Clear interrupt */
3861 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3862 if (sc->sc_nintrs > 1) {
3863 if (sc->sc_type != WM_T_82574) {
3864 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3865 CSR_WRITE(sc, WMREG_EIAC, 0);
3866 } else {
3867 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3868 }
3869 }
3870
3871 /* Stop the transmit and receive processes. */
3872 CSR_WRITE(sc, WMREG_RCTL, 0);
3873 sc->sc_rctl &= ~RCTL_EN;
3874 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3875 CSR_WRITE_FLUSH(sc);
3876
3877 /* XXX set_tbi_sbp_82543() */
3878
3879 delay(10*1000);
3880
3881 /* Must acquire the MDIO ownership before MAC reset */
3882 switch (sc->sc_type) {
3883 case WM_T_82573:
3884 case WM_T_82574:
3885 case WM_T_82583:
3886 error = wm_get_hw_semaphore_82573(sc);
3887 break;
3888 default:
3889 break;
3890 }
3891
3892 /*
3893 * 82541 Errata 29? & 82547 Errata 28?
3894 * See also the description about PHY_RST bit in CTRL register
3895 * in 8254x_GBe_SDM.pdf.
3896 */
3897 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3898 CSR_WRITE(sc, WMREG_CTRL,
3899 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3900 CSR_WRITE_FLUSH(sc);
3901 delay(5000);
3902 }
3903
3904 switch (sc->sc_type) {
3905 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3906 case WM_T_82541:
3907 case WM_T_82541_2:
3908 case WM_T_82547:
3909 case WM_T_82547_2:
3910 /*
3911 * On some chipsets, a reset through a memory-mapped write
3912 * cycle can cause the chip to reset before completing the
3913 * write cycle. This causes major headache that can be
3914 * avoided by issuing the reset via indirect register writes
3915 * through I/O space.
3916 *
3917 * So, if we successfully mapped the I/O BAR at attach time,
3918 * use that. Otherwise, try our luck with a memory-mapped
3919 * reset.
3920 */
3921 if (sc->sc_flags & WM_F_IOH_VALID)
3922 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3923 else
3924 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3925 break;
3926 case WM_T_82545_3:
3927 case WM_T_82546_3:
3928 /* Use the shadow control register on these chips. */
3929 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3930 break;
3931 case WM_T_80003:
3932 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3933 sc->phy.acquire(sc);
3934 CSR_WRITE(sc, WMREG_CTRL, reg);
3935 sc->phy.release(sc);
3936 break;
3937 case WM_T_ICH8:
3938 case WM_T_ICH9:
3939 case WM_T_ICH10:
3940 case WM_T_PCH:
3941 case WM_T_PCH2:
3942 case WM_T_PCH_LPT:
3943 case WM_T_PCH_SPT:
3944 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3945 if (wm_phy_resetisblocked(sc) == false) {
3946 /*
3947 * Gate automatic PHY configuration by hardware on
3948 * non-managed 82579
3949 */
3950 if ((sc->sc_type == WM_T_PCH2)
3951 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3952 == 0))
3953 wm_gate_hw_phy_config_ich8lan(sc, true);
3954
3955 reg |= CTRL_PHY_RESET;
3956 phy_reset = 1;
3957 } else
3958 printf("XXX reset is blocked!!!\n");
3959 sc->phy.acquire(sc);
3960 CSR_WRITE(sc, WMREG_CTRL, reg);
3961 /* Don't insert a completion barrier when reset */
3962 delay(20*1000);
3963 mutex_exit(sc->sc_ich_phymtx);
3964 break;
3965 case WM_T_82580:
3966 case WM_T_I350:
3967 case WM_T_I354:
3968 case WM_T_I210:
3969 case WM_T_I211:
3970 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3971 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3972 CSR_WRITE_FLUSH(sc);
3973 delay(5000);
3974 break;
3975 case WM_T_82542_2_0:
3976 case WM_T_82542_2_1:
3977 case WM_T_82543:
3978 case WM_T_82540:
3979 case WM_T_82545:
3980 case WM_T_82546:
3981 case WM_T_82571:
3982 case WM_T_82572:
3983 case WM_T_82573:
3984 case WM_T_82574:
3985 case WM_T_82575:
3986 case WM_T_82576:
3987 case WM_T_82583:
3988 default:
3989 /* Everything else can safely use the documented method. */
3990 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3991 break;
3992 }
3993
3994 /* Must release the MDIO ownership after MAC reset */
3995 switch (sc->sc_type) {
3996 case WM_T_82573:
3997 case WM_T_82574:
3998 case WM_T_82583:
3999 if (error == 0)
4000 wm_put_hw_semaphore_82573(sc);
4001 break;
4002 default:
4003 break;
4004 }
4005
4006 if (phy_reset != 0) {
4007 wm_get_cfg_done(sc);
4008 delay(10 * 1000);
4009 if (sc->sc_type >= WM_T_PCH) {
4010 reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
4011 BM_PORT_GEN_CFG);
4012 reg &= ~BM_WUC_HOST_WU_BIT;
4013 wm_gmii_hv_writereg(sc->sc_dev, 2,
4014 BM_PORT_GEN_CFG, reg);
4015 }
4016 }
4017
4018 /* reload EEPROM */
4019 switch (sc->sc_type) {
4020 case WM_T_82542_2_0:
4021 case WM_T_82542_2_1:
4022 case WM_T_82543:
4023 case WM_T_82544:
4024 delay(10);
4025 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4026 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4027 CSR_WRITE_FLUSH(sc);
4028 delay(2000);
4029 break;
4030 case WM_T_82540:
4031 case WM_T_82545:
4032 case WM_T_82545_3:
4033 case WM_T_82546:
4034 case WM_T_82546_3:
4035 delay(5*1000);
4036 /* XXX Disable HW ARPs on ASF enabled adapters */
4037 break;
4038 case WM_T_82541:
4039 case WM_T_82541_2:
4040 case WM_T_82547:
4041 case WM_T_82547_2:
4042 delay(20000);
4043 /* XXX Disable HW ARPs on ASF enabled adapters */
4044 break;
4045 case WM_T_82571:
4046 case WM_T_82572:
4047 case WM_T_82573:
4048 case WM_T_82574:
4049 case WM_T_82583:
4050 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4051 delay(10);
4052 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4053 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4054 CSR_WRITE_FLUSH(sc);
4055 }
4056 /* check EECD_EE_AUTORD */
4057 wm_get_auto_rd_done(sc);
4058 /*
4059 * Phy configuration from NVM just starts after EECD_AUTO_RD
4060 * is set.
4061 */
4062 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4063 || (sc->sc_type == WM_T_82583))
4064 delay(25*1000);
4065 break;
4066 case WM_T_82575:
4067 case WM_T_82576:
4068 case WM_T_82580:
4069 case WM_T_I350:
4070 case WM_T_I354:
4071 case WM_T_I210:
4072 case WM_T_I211:
4073 case WM_T_80003:
4074 /* check EECD_EE_AUTORD */
4075 wm_get_auto_rd_done(sc);
4076 break;
4077 case WM_T_ICH8:
4078 case WM_T_ICH9:
4079 case WM_T_ICH10:
4080 case WM_T_PCH:
4081 case WM_T_PCH2:
4082 case WM_T_PCH_LPT:
4083 case WM_T_PCH_SPT:
4084 break;
4085 default:
4086 panic("%s: unknown type\n", __func__);
4087 }
4088
4089 /* Check whether EEPROM is present or not */
4090 switch (sc->sc_type) {
4091 case WM_T_82575:
4092 case WM_T_82576:
4093 case WM_T_82580:
4094 case WM_T_I350:
4095 case WM_T_I354:
4096 case WM_T_ICH8:
4097 case WM_T_ICH9:
4098 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4099 /* Not found */
4100 sc->sc_flags |= WM_F_EEPROM_INVALID;
4101 if (sc->sc_type == WM_T_82575)
4102 wm_reset_init_script_82575(sc);
4103 }
4104 break;
4105 default:
4106 break;
4107 }
4108
4109 if ((sc->sc_type == WM_T_82580)
4110 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
4111 /* clear global device reset status bit */
4112 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4113 }
4114
4115 /* Clear any pending interrupt events. */
4116 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4117 reg = CSR_READ(sc, WMREG_ICR);
4118 if (sc->sc_nintrs > 1) {
4119 if (sc->sc_type != WM_T_82574) {
4120 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4121 CSR_WRITE(sc, WMREG_EIAC, 0);
4122 } else
4123 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4124 }
4125
4126 /* reload sc_ctrl */
4127 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4128
4129 if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
4130 wm_set_eee_i350(sc);
4131
4132 /* dummy read from WUC */
4133 if (sc->sc_type == WM_T_PCH)
4134 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4135 /*
4136 * For PCH, this write will make sure that any noise will be detected
4137 * as a CRC error and be dropped rather than show up as a bad packet
4138 * to the DMA engine
4139 */
4140 if (sc->sc_type == WM_T_PCH)
4141 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4142
4143 if (sc->sc_type >= WM_T_82544)
4144 CSR_WRITE(sc, WMREG_WUC, 0);
4145
4146 wm_reset_mdicnfg_82580(sc);
4147
4148 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
4149 wm_pll_workaround_i210(sc);
4150 }
4151
4152 /*
4153 * wm_add_rxbuf:
4154 *
4155 * Add a receive buffer to the indiciated descriptor.
4156 */
4157 static int
4158 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
4159 {
4160 struct wm_softc *sc = rxq->rxq_sc;
4161 struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
4162 struct mbuf *m;
4163 int error;
4164
4165 KASSERT(mutex_owned(rxq->rxq_lock));
4166
4167 MGETHDR(m, M_DONTWAIT, MT_DATA);
4168 if (m == NULL)
4169 return ENOBUFS;
4170
4171 MCLGET(m, M_DONTWAIT);
4172 if ((m->m_flags & M_EXT) == 0) {
4173 m_freem(m);
4174 return ENOBUFS;
4175 }
4176
4177 if (rxs->rxs_mbuf != NULL)
4178 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4179
4180 rxs->rxs_mbuf = m;
4181
4182 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4183 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4184 BUS_DMA_READ | BUS_DMA_NOWAIT);
4185 if (error) {
4186 /* XXX XXX XXX */
4187 aprint_error_dev(sc->sc_dev,
4188 "unable to load rx DMA map %d, error = %d\n",
4189 idx, error);
4190 panic("wm_add_rxbuf");
4191 }
4192
4193 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4194 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4195
4196 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4197 if ((sc->sc_rctl & RCTL_EN) != 0)
4198 wm_init_rxdesc(rxq, idx);
4199 } else
4200 wm_init_rxdesc(rxq, idx);
4201
4202 return 0;
4203 }
4204
4205 /*
4206 * wm_rxdrain:
4207 *
4208 * Drain the receive queue.
4209 */
4210 static void
4211 wm_rxdrain(struct wm_rxqueue *rxq)
4212 {
4213 struct wm_softc *sc = rxq->rxq_sc;
4214 struct wm_rxsoft *rxs;
4215 int i;
4216
4217 KASSERT(mutex_owned(rxq->rxq_lock));
4218
4219 for (i = 0; i < WM_NRXDESC; i++) {
4220 rxs = &rxq->rxq_soft[i];
4221 if (rxs->rxs_mbuf != NULL) {
4222 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4223 m_freem(rxs->rxs_mbuf);
4224 rxs->rxs_mbuf = NULL;
4225 }
4226 }
4227 }
4228
4229
4230 /*
4231 * XXX copy from FreeBSD's sys/net/rss_config.c
4232 */
4233 /*
4234 * RSS secret key, intended to prevent attacks on load-balancing. Its
4235 * effectiveness may be limited by algorithm choice and available entropy
4236 * during the boot.
4237 *
4238 * XXXRW: And that we don't randomize it yet!
4239 *
4240 * This is the default Microsoft RSS specification key which is also
4241 * the Chelsio T5 firmware default key.
4242 */
4243 #define RSS_KEYSIZE 40
4244 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
4245 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
4246 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
4247 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
4248 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
4249 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
4250 };
4251
4252 /*
4253 * Caller must pass an array of size sizeof(rss_key).
4254 *
4255 * XXX
4256 * As if_ixgbe may use this function, this function should not be
4257 * if_wm specific function.
4258 */
4259 static void
4260 wm_rss_getkey(uint8_t *key)
4261 {
4262
4263 memcpy(key, wm_rss_key, sizeof(wm_rss_key));
4264 }
4265
4266 /*
4267 * Setup registers for RSS.
4268 *
4269 * XXX not yet VMDq support
4270 */
4271 static void
4272 wm_init_rss(struct wm_softc *sc)
4273 {
4274 uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
4275 int i;
4276
4277 CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
4278
4279 for (i = 0; i < RETA_NUM_ENTRIES; i++) {
4280 int qid, reta_ent;
4281
4282 qid = i % sc->sc_nqueues;
4283 switch(sc->sc_type) {
4284 case WM_T_82574:
4285 reta_ent = __SHIFTIN(qid,
4286 RETA_ENT_QINDEX_MASK_82574);
4287 break;
4288 case WM_T_82575:
4289 reta_ent = __SHIFTIN(qid,
4290 RETA_ENT_QINDEX1_MASK_82575);
4291 break;
4292 default:
4293 reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
4294 break;
4295 }
4296
4297 reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
4298 reta_reg &= ~RETA_ENTRY_MASK_Q(i);
4299 reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
4300 CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
4301 }
4302
4303 wm_rss_getkey((uint8_t *)rss_key);
4304 for (i = 0; i < RSSRK_NUM_REGS; i++)
4305 CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
4306
4307 if (sc->sc_type == WM_T_82574)
4308 mrqc = MRQC_ENABLE_RSS_MQ_82574;
4309 else
4310 mrqc = MRQC_ENABLE_RSS_MQ;
4311
4312 /* XXXX
4313 * The same as FreeBSD igb.
4314 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
4315 */
4316 mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
4317 mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
4318 mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
4319 mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
4320
4321 CSR_WRITE(sc, WMREG_MRQC, mrqc);
4322 }
4323
4324 /*
4325 * Adjust TX and RX queue numbers which the system actulally uses.
4326 *
4327 * The numbers are affected by below parameters.
4328 * - The nubmer of hardware queues
4329 * - The number of MSI-X vectors (= "nvectors" argument)
4330 * - ncpu
4331 */
4332 static void
4333 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
4334 {
4335 int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
4336
4337 if (nvectors < 2) {
4338 sc->sc_nqueues = 1;
4339 return;
4340 }
4341
4342 switch(sc->sc_type) {
4343 case WM_T_82572:
4344 hw_ntxqueues = 2;
4345 hw_nrxqueues = 2;
4346 break;
4347 case WM_T_82574:
4348 hw_ntxqueues = 2;
4349 hw_nrxqueues = 2;
4350 break;
4351 case WM_T_82575:
4352 hw_ntxqueues = 4;
4353 hw_nrxqueues = 4;
4354 break;
4355 case WM_T_82576:
4356 hw_ntxqueues = 16;
4357 hw_nrxqueues = 16;
4358 break;
4359 case WM_T_82580:
4360 case WM_T_I350:
4361 case WM_T_I354:
4362 hw_ntxqueues = 8;
4363 hw_nrxqueues = 8;
4364 break;
4365 case WM_T_I210:
4366 hw_ntxqueues = 4;
4367 hw_nrxqueues = 4;
4368 break;
4369 case WM_T_I211:
4370 hw_ntxqueues = 2;
4371 hw_nrxqueues = 2;
4372 break;
4373 /*
4374 * As below ethernet controllers does not support MSI-X,
4375 * this driver let them not use multiqueue.
4376 * - WM_T_80003
4377 * - WM_T_ICH8
4378 * - WM_T_ICH9
4379 * - WM_T_ICH10
4380 * - WM_T_PCH
4381 * - WM_T_PCH2
4382 * - WM_T_PCH_LPT
4383 */
4384 default:
4385 hw_ntxqueues = 1;
4386 hw_nrxqueues = 1;
4387 break;
4388 }
4389
4390 hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
4391
4392 /*
4393 * As queues more than MSI-X vectors cannot improve scaling, we limit
4394 * the number of queues used actually.
4395 */
4396 if (nvectors < hw_nqueues + 1) {
4397 sc->sc_nqueues = nvectors - 1;
4398 } else {
4399 sc->sc_nqueues = hw_nqueues;
4400 }
4401
4402 /*
4403 * As queues more then cpus cannot improve scaling, we limit
4404 * the number of queues used actually.
4405 */
4406 if (ncpu < sc->sc_nqueues)
4407 sc->sc_nqueues = ncpu;
4408 }
4409
4410 /*
4411 * Both single interrupt MSI and INTx can use this function.
4412 */
4413 static int
4414 wm_setup_legacy(struct wm_softc *sc)
4415 {
4416 pci_chipset_tag_t pc = sc->sc_pc;
4417 const char *intrstr = NULL;
4418 char intrbuf[PCI_INTRSTR_LEN];
4419 int error;
4420
4421 error = wm_alloc_txrx_queues(sc);
4422 if (error) {
4423 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4424 error);
4425 return ENOMEM;
4426 }
4427 intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
4428 sizeof(intrbuf));
4429 #ifdef WM_MPSAFE
4430 pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
4431 #endif
4432 sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
4433 IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
4434 if (sc->sc_ihs[0] == NULL) {
4435 aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
4436 (pci_intr_type(pc, sc->sc_intrs[0])
4437 == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
4438 return ENOMEM;
4439 }
4440
4441 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
4442 sc->sc_nintrs = 1;
4443 return 0;
4444 }
4445
4446 static int
4447 wm_setup_msix(struct wm_softc *sc)
4448 {
4449 void *vih;
4450 kcpuset_t *affinity;
4451 int qidx, error, intr_idx, txrx_established;
4452 pci_chipset_tag_t pc = sc->sc_pc;
4453 const char *intrstr = NULL;
4454 char intrbuf[PCI_INTRSTR_LEN];
4455 char intr_xname[INTRDEVNAMEBUF];
4456
4457 if (sc->sc_nqueues < ncpu) {
4458 /*
4459 * To avoid other devices' interrupts, the affinity of Tx/Rx
4460 * interrupts start from CPU#1.
4461 */
4462 sc->sc_affinity_offset = 1;
4463 } else {
4464 /*
4465 * In this case, this device use all CPUs. So, we unify
4466 * affinitied cpu_index to msix vector number for readability.
4467 */
4468 sc->sc_affinity_offset = 0;
4469 }
4470
4471 error = wm_alloc_txrx_queues(sc);
4472 if (error) {
4473 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4474 error);
4475 return ENOMEM;
4476 }
4477
4478 kcpuset_create(&affinity, false);
4479 intr_idx = 0;
4480
4481 /*
4482 * TX and RX
4483 */
4484 txrx_established = 0;
4485 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
4486 struct wm_queue *wmq = &sc->sc_queue[qidx];
4487 int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
4488
4489 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4490 sizeof(intrbuf));
4491 #ifdef WM_MPSAFE
4492 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
4493 PCI_INTR_MPSAFE, true);
4494 #endif
4495 memset(intr_xname, 0, sizeof(intr_xname));
4496 snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
4497 device_xname(sc->sc_dev), qidx);
4498 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4499 IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
4500 if (vih == NULL) {
4501 aprint_error_dev(sc->sc_dev,
4502 "unable to establish MSI-X(for TX and RX)%s%s\n",
4503 intrstr ? " at " : "",
4504 intrstr ? intrstr : "");
4505
4506 goto fail;
4507 }
4508 kcpuset_zero(affinity);
4509 /* Round-robin affinity */
4510 kcpuset_set(affinity, affinity_to);
4511 error = interrupt_distribute(vih, affinity, NULL);
4512 if (error == 0) {
4513 aprint_normal_dev(sc->sc_dev,
4514 "for TX and RX interrupting at %s affinity to %u\n",
4515 intrstr, affinity_to);
4516 } else {
4517 aprint_normal_dev(sc->sc_dev,
4518 "for TX and RX interrupting at %s\n", intrstr);
4519 }
4520 sc->sc_ihs[intr_idx] = vih;
4521 wmq->wmq_id= qidx;
4522 wmq->wmq_intr_idx = intr_idx;
4523
4524 txrx_established++;
4525 intr_idx++;
4526 }
4527
4528 /*
4529 * LINK
4530 */
4531 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4532 sizeof(intrbuf));
4533 #ifdef WM_MPSAFE
4534 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
4535 #endif
4536 memset(intr_xname, 0, sizeof(intr_xname));
4537 snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
4538 device_xname(sc->sc_dev));
4539 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4540 IPL_NET, wm_linkintr_msix, sc, intr_xname);
4541 if (vih == NULL) {
4542 aprint_error_dev(sc->sc_dev,
4543 "unable to establish MSI-X(for LINK)%s%s\n",
4544 intrstr ? " at " : "",
4545 intrstr ? intrstr : "");
4546
4547 goto fail;
4548 }
4549 /* keep default affinity to LINK interrupt */
4550 aprint_normal_dev(sc->sc_dev,
4551 "for LINK interrupting at %s\n", intrstr);
4552 sc->sc_ihs[intr_idx] = vih;
4553 sc->sc_link_intr_idx = intr_idx;
4554
4555 sc->sc_nintrs = sc->sc_nqueues + 1;
4556 kcpuset_destroy(affinity);
4557 return 0;
4558
4559 fail:
4560 for (qidx = 0; qidx < txrx_established; qidx++) {
4561 struct wm_queue *wmq = &sc->sc_queue[qidx];
4562 pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
4563 sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
4564 }
4565
4566 kcpuset_destroy(affinity);
4567 return ENOMEM;
4568 }
4569
4570 static void
4571 wm_turnon(struct wm_softc *sc)
4572 {
4573 int i;
4574
4575 KASSERT(WM_CORE_LOCKED(sc));
4576
4577 for(i = 0; i < sc->sc_nqueues; i++) {
4578 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4579 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
4580
4581 mutex_enter(txq->txq_lock);
4582 txq->txq_stopping = false;
4583 mutex_exit(txq->txq_lock);
4584
4585 mutex_enter(rxq->rxq_lock);
4586 rxq->rxq_stopping = false;
4587 mutex_exit(rxq->rxq_lock);
4588 }
4589
4590 sc->sc_core_stopping = false;
4591 }
4592
4593 static void
4594 wm_turnoff(struct wm_softc *sc)
4595 {
4596 int i;
4597
4598 KASSERT(WM_CORE_LOCKED(sc));
4599
4600 sc->sc_core_stopping = true;
4601
4602 for(i = 0; i < sc->sc_nqueues; i++) {
4603 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
4604 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4605
4606 mutex_enter(rxq->rxq_lock);
4607 rxq->rxq_stopping = true;
4608 mutex_exit(rxq->rxq_lock);
4609
4610 mutex_enter(txq->txq_lock);
4611 txq->txq_stopping = true;
4612 mutex_exit(txq->txq_lock);
4613 }
4614 }
4615
4616 /*
4617 * wm_init: [ifnet interface function]
4618 *
4619 * Initialize the interface.
4620 */
4621 static int
4622 wm_init(struct ifnet *ifp)
4623 {
4624 struct wm_softc *sc = ifp->if_softc;
4625 int ret;
4626
4627 WM_CORE_LOCK(sc);
4628 ret = wm_init_locked(ifp);
4629 WM_CORE_UNLOCK(sc);
4630
4631 return ret;
4632 }
4633
4634 static int
4635 wm_init_locked(struct ifnet *ifp)
4636 {
4637 struct wm_softc *sc = ifp->if_softc;
4638 int i, j, trynum, error = 0;
4639 uint32_t reg;
4640
4641 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4642 device_xname(sc->sc_dev), __func__));
4643 KASSERT(WM_CORE_LOCKED(sc));
4644
4645 /*
4646 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4647 * There is a small but measurable benefit to avoiding the adjusment
4648 * of the descriptor so that the headers are aligned, for normal mtu,
4649 * on such platforms. One possibility is that the DMA itself is
4650 * slightly more efficient if the front of the entire packet (instead
4651 * of the front of the headers) is aligned.
4652 *
4653 * Note we must always set align_tweak to 0 if we are using
4654 * jumbo frames.
4655 */
4656 #ifdef __NO_STRICT_ALIGNMENT
4657 sc->sc_align_tweak = 0;
4658 #else
4659 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4660 sc->sc_align_tweak = 0;
4661 else
4662 sc->sc_align_tweak = 2;
4663 #endif /* __NO_STRICT_ALIGNMENT */
4664
4665 /* Cancel any pending I/O. */
4666 wm_stop_locked(ifp, 0);
4667
4668 /* update statistics before reset */
4669 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4670 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4671
4672 /* Reset the chip to a known state. */
4673 wm_reset(sc);
4674
4675 switch (sc->sc_type) {
4676 case WM_T_82571:
4677 case WM_T_82572:
4678 case WM_T_82573:
4679 case WM_T_82574:
4680 case WM_T_82583:
4681 case WM_T_80003:
4682 case WM_T_ICH8:
4683 case WM_T_ICH9:
4684 case WM_T_ICH10:
4685 case WM_T_PCH:
4686 case WM_T_PCH2:
4687 case WM_T_PCH_LPT:
4688 case WM_T_PCH_SPT:
4689 /* AMT based hardware can now take control from firmware */
4690 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
4691 wm_get_hw_control(sc);
4692 break;
4693 default:
4694 break;
4695 }
4696
4697 /* Init hardware bits */
4698 wm_initialize_hardware_bits(sc);
4699
4700 /* Reset the PHY. */
4701 if (sc->sc_flags & WM_F_HAS_MII)
4702 wm_gmii_reset(sc);
4703
4704 /* Calculate (E)ITR value */
4705 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4706 sc->sc_itr = 450; /* For EITR */
4707 } else if (sc->sc_type >= WM_T_82543) {
4708 /*
4709 * Set up the interrupt throttling register (units of 256ns)
4710 * Note that a footnote in Intel's documentation says this
4711 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4712 * or 10Mbit mode. Empirically, it appears to be the case
4713 * that that is also true for the 1024ns units of the other
4714 * interrupt-related timer registers -- so, really, we ought
4715 * to divide this value by 4 when the link speed is low.
4716 *
4717 * XXX implement this division at link speed change!
4718 */
4719
4720 /*
4721 * For N interrupts/sec, set this value to:
4722 * 1000000000 / (N * 256). Note that we set the
4723 * absolute and packet timer values to this value
4724 * divided by 4 to get "simple timer" behavior.
4725 */
4726
4727 sc->sc_itr = 1500; /* 2604 ints/sec */
4728 }
4729
4730 error = wm_init_txrx_queues(sc);
4731 if (error)
4732 goto out;
4733
4734 /*
4735 * Clear out the VLAN table -- we don't use it (yet).
4736 */
4737 CSR_WRITE(sc, WMREG_VET, 0);
4738 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4739 trynum = 10; /* Due to hw errata */
4740 else
4741 trynum = 1;
4742 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4743 for (j = 0; j < trynum; j++)
4744 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4745
4746 /*
4747 * Set up flow-control parameters.
4748 *
4749 * XXX Values could probably stand some tuning.
4750 */
4751 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4752 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4753 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
4754 && (sc->sc_type != WM_T_PCH_SPT)) {
4755 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4756 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4757 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4758 }
4759
4760 sc->sc_fcrtl = FCRTL_DFLT;
4761 if (sc->sc_type < WM_T_82543) {
4762 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4763 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4764 } else {
4765 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4766 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4767 }
4768
4769 if (sc->sc_type == WM_T_80003)
4770 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4771 else
4772 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4773
4774 /* Writes the control register. */
4775 wm_set_vlan(sc);
4776
4777 if (sc->sc_flags & WM_F_HAS_MII) {
4778 int val;
4779
4780 switch (sc->sc_type) {
4781 case WM_T_80003:
4782 case WM_T_ICH8:
4783 case WM_T_ICH9:
4784 case WM_T_ICH10:
4785 case WM_T_PCH:
4786 case WM_T_PCH2:
4787 case WM_T_PCH_LPT:
4788 case WM_T_PCH_SPT:
4789 /*
4790 * Set the mac to wait the maximum time between each
4791 * iteration and increase the max iterations when
4792 * polling the phy; this fixes erroneous timeouts at
4793 * 10Mbps.
4794 */
4795 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4796 0xFFFF);
4797 val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
4798 val |= 0x3F;
4799 wm_kmrn_writereg(sc,
4800 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4801 break;
4802 default:
4803 break;
4804 }
4805
4806 if (sc->sc_type == WM_T_80003) {
4807 val = CSR_READ(sc, WMREG_CTRL_EXT);
4808 val &= ~CTRL_EXT_LINK_MODE_MASK;
4809 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4810
4811 /* Bypass RX and TX FIFO's */
4812 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4813 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4814 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4815 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4816 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4817 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4818 }
4819 }
4820 #if 0
4821 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4822 #endif
4823
4824 /* Set up checksum offload parameters. */
4825 reg = CSR_READ(sc, WMREG_RXCSUM);
4826 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4827 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4828 reg |= RXCSUM_IPOFL;
4829 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4830 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4831 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4832 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4833 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4834
4835 /* Set up MSI-X */
4836 if (sc->sc_nintrs > 1) {
4837 uint32_t ivar;
4838 struct wm_queue *wmq;
4839 int qid, qintr_idx;
4840
4841 if (sc->sc_type == WM_T_82575) {
4842 /* Interrupt control */
4843 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4844 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
4845 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4846
4847 /* TX and RX */
4848 for (i = 0; i < sc->sc_nqueues; i++) {
4849 wmq = &sc->sc_queue[i];
4850 CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
4851 EITR_TX_QUEUE(wmq->wmq_id)
4852 | EITR_RX_QUEUE(wmq->wmq_id));
4853 }
4854 /* Link status */
4855 CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
4856 EITR_OTHER);
4857 } else if (sc->sc_type == WM_T_82574) {
4858 /* Interrupt control */
4859 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4860 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
4861 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4862
4863 ivar = 0;
4864 /* TX and RX */
4865 for (i = 0; i < sc->sc_nqueues; i++) {
4866 wmq = &sc->sc_queue[i];
4867 qid = wmq->wmq_id;
4868 qintr_idx = wmq->wmq_intr_idx;
4869
4870 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
4871 IVAR_TX_MASK_Q_82574(qid));
4872 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
4873 IVAR_RX_MASK_Q_82574(qid));
4874 }
4875 /* Link status */
4876 ivar |= __SHIFTIN((IVAR_VALID_82574
4877 | sc->sc_link_intr_idx), IVAR_OTHER_MASK);
4878 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
4879 } else {
4880 /* Interrupt control */
4881 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
4882 | GPIE_EIAME | GPIE_PBA);
4883
4884 switch (sc->sc_type) {
4885 case WM_T_82580:
4886 case WM_T_I350:
4887 case WM_T_I354:
4888 case WM_T_I210:
4889 case WM_T_I211:
4890 /* TX and RX */
4891 for (i = 0; i < sc->sc_nqueues; i++) {
4892 wmq = &sc->sc_queue[i];
4893 qid = wmq->wmq_id;
4894 qintr_idx = wmq->wmq_intr_idx;
4895
4896 ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
4897 ivar &= ~IVAR_TX_MASK_Q(qid);
4898 ivar |= __SHIFTIN((qintr_idx
4899 | IVAR_VALID),
4900 IVAR_TX_MASK_Q(qid));
4901 ivar &= ~IVAR_RX_MASK_Q(qid);
4902 ivar |= __SHIFTIN((qintr_idx
4903 | IVAR_VALID),
4904 IVAR_RX_MASK_Q(qid));
4905 CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
4906 }
4907 break;
4908 case WM_T_82576:
4909 /* TX and RX */
4910 for (i = 0; i < sc->sc_nqueues; i++) {
4911 wmq = &sc->sc_queue[i];
4912 qid = wmq->wmq_id;
4913 qintr_idx = wmq->wmq_intr_idx;
4914
4915 ivar = CSR_READ(sc,
4916 WMREG_IVAR_Q_82576(qid));
4917 ivar &= ~IVAR_TX_MASK_Q_82576(qid);
4918 ivar |= __SHIFTIN((qintr_idx
4919 | IVAR_VALID),
4920 IVAR_TX_MASK_Q_82576(qid));
4921 ivar &= ~IVAR_RX_MASK_Q_82576(qid);
4922 ivar |= __SHIFTIN((qintr_idx
4923 | IVAR_VALID),
4924 IVAR_RX_MASK_Q_82576(qid));
4925 CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
4926 ivar);
4927 }
4928 break;
4929 default:
4930 break;
4931 }
4932
4933 /* Link status */
4934 ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
4935 IVAR_MISC_OTHER);
4936 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
4937 }
4938
4939 if (sc->sc_nqueues > 1) {
4940 wm_init_rss(sc);
4941
4942 /*
4943 ** NOTE: Receive Full-Packet Checksum Offload
4944 ** is mutually exclusive with Multiqueue. However
4945 ** this is not the same as TCP/IP checksums which
4946 ** still work.
4947 */
4948 reg = CSR_READ(sc, WMREG_RXCSUM);
4949 reg |= RXCSUM_PCSD;
4950 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4951 }
4952 }
4953
4954 /* Set up the interrupt registers. */
4955 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4956 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4957 ICR_RXO | ICR_RXT0;
4958 if (sc->sc_nintrs > 1) {
4959 uint32_t mask;
4960 struct wm_queue *wmq;
4961
4962 switch (sc->sc_type) {
4963 case WM_T_82574:
4964 CSR_WRITE(sc, WMREG_EIAC_82574,
4965 WMREG_EIAC_82574_MSIX_MASK);
4966 sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
4967 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4968 break;
4969 default:
4970 if (sc->sc_type == WM_T_82575) {
4971 mask = 0;
4972 for (i = 0; i < sc->sc_nqueues; i++) {
4973 wmq = &sc->sc_queue[i];
4974 mask |= EITR_TX_QUEUE(wmq->wmq_id);
4975 mask |= EITR_RX_QUEUE(wmq->wmq_id);
4976 }
4977 mask |= EITR_OTHER;
4978 } else {
4979 mask = 0;
4980 for (i = 0; i < sc->sc_nqueues; i++) {
4981 wmq = &sc->sc_queue[i];
4982 mask |= 1 << wmq->wmq_intr_idx;
4983 }
4984 mask |= 1 << sc->sc_link_intr_idx;
4985 }
4986 CSR_WRITE(sc, WMREG_EIAC, mask);
4987 CSR_WRITE(sc, WMREG_EIAM, mask);
4988 CSR_WRITE(sc, WMREG_EIMS, mask);
4989 CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
4990 break;
4991 }
4992 } else
4993 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4994
4995 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4996 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4997 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4998 || (sc->sc_type == WM_T_PCH_SPT)) {
4999 reg = CSR_READ(sc, WMREG_KABGTXD);
5000 reg |= KABGTXD_BGSQLBIAS;
5001 CSR_WRITE(sc, WMREG_KABGTXD, reg);
5002 }
5003
5004 /* Set up the inter-packet gap. */
5005 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
5006
5007 if (sc->sc_type >= WM_T_82543) {
5008 /*
5009 * XXX 82574 has both ITR and EITR. SET EITR when we use
5010 * the multi queue function with MSI-X.
5011 */
5012 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5013 int qidx;
5014 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5015 struct wm_queue *wmq = &sc->sc_queue[qidx];
5016 CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
5017 sc->sc_itr);
5018 }
5019 /*
5020 * Link interrupts occur much less than TX
5021 * interrupts and RX interrupts. So, we don't
5022 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
5023 * FreeBSD's if_igb.
5024 */
5025 } else
5026 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
5027 }
5028
5029 /* Set the VLAN ethernetype. */
5030 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
5031
5032 /*
5033 * Set up the transmit control register; we start out with
5034 * a collision distance suitable for FDX, but update it whe
5035 * we resolve the media type.
5036 */
5037 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
5038 | TCTL_CT(TX_COLLISION_THRESHOLD)
5039 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5040 if (sc->sc_type >= WM_T_82571)
5041 sc->sc_tctl |= TCTL_MULR;
5042 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5043
5044 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5045 /* Write TDT after TCTL.EN is set. See the document. */
5046 CSR_WRITE(sc, WMREG_TDT(0), 0);
5047 }
5048
5049 if (sc->sc_type == WM_T_80003) {
5050 reg = CSR_READ(sc, WMREG_TCTL_EXT);
5051 reg &= ~TCTL_EXT_GCEX_MASK;
5052 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
5053 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
5054 }
5055
5056 /* Set the media. */
5057 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
5058 goto out;
5059
5060 /* Configure for OS presence */
5061 wm_init_manageability(sc);
5062
5063 /*
5064 * Set up the receive control register; we actually program
5065 * the register when we set the receive filter. Use multicast
5066 * address offset type 0.
5067 *
5068 * Only the i82544 has the ability to strip the incoming
5069 * CRC, so we don't enable that feature.
5070 */
5071 sc->sc_mchash_type = 0;
5072 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
5073 | RCTL_MO(sc->sc_mchash_type);
5074
5075 /*
5076 * The I350 has a bug where it always strips the CRC whether
5077 * asked to or not. So ask for stripped CRC here and cope in rxeof
5078 */
5079 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
5080 || (sc->sc_type == WM_T_I210))
5081 sc->sc_rctl |= RCTL_SECRC;
5082
5083 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
5084 && (ifp->if_mtu > ETHERMTU)) {
5085 sc->sc_rctl |= RCTL_LPE;
5086 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5087 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
5088 }
5089
5090 if (MCLBYTES == 2048) {
5091 sc->sc_rctl |= RCTL_2k;
5092 } else {
5093 if (sc->sc_type >= WM_T_82543) {
5094 switch (MCLBYTES) {
5095 case 4096:
5096 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
5097 break;
5098 case 8192:
5099 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
5100 break;
5101 case 16384:
5102 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
5103 break;
5104 default:
5105 panic("wm_init: MCLBYTES %d unsupported",
5106 MCLBYTES);
5107 break;
5108 }
5109 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
5110 }
5111
5112 /* Set the receive filter. */
5113 wm_set_filter(sc);
5114
5115 /* Enable ECC */
5116 switch (sc->sc_type) {
5117 case WM_T_82571:
5118 reg = CSR_READ(sc, WMREG_PBA_ECC);
5119 reg |= PBA_ECC_CORR_EN;
5120 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
5121 break;
5122 case WM_T_PCH_LPT:
5123 case WM_T_PCH_SPT:
5124 reg = CSR_READ(sc, WMREG_PBECCSTS);
5125 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
5126 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
5127
5128 reg = CSR_READ(sc, WMREG_CTRL);
5129 reg |= CTRL_MEHE;
5130 CSR_WRITE(sc, WMREG_CTRL, reg);
5131 break;
5132 default:
5133 break;
5134 }
5135
5136 /* On 575 and later set RDT only if RX enabled */
5137 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5138 int qidx;
5139 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5140 struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
5141 for (i = 0; i < WM_NRXDESC; i++) {
5142 mutex_enter(rxq->rxq_lock);
5143 wm_init_rxdesc(rxq, i);
5144 mutex_exit(rxq->rxq_lock);
5145
5146 }
5147 }
5148 }
5149
5150 wm_turnon(sc);
5151
5152 /* Start the one second link check clock. */
5153 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
5154
5155 /* ...all done! */
5156 ifp->if_flags |= IFF_RUNNING;
5157 ifp->if_flags &= ~IFF_OACTIVE;
5158
5159 out:
5160 sc->sc_if_flags = ifp->if_flags;
5161 if (error)
5162 log(LOG_ERR, "%s: interface not running\n",
5163 device_xname(sc->sc_dev));
5164 return error;
5165 }
5166
5167 /*
5168 * wm_stop: [ifnet interface function]
5169 *
5170 * Stop transmission on the interface.
5171 */
5172 static void
5173 wm_stop(struct ifnet *ifp, int disable)
5174 {
5175 struct wm_softc *sc = ifp->if_softc;
5176
5177 WM_CORE_LOCK(sc);
5178 wm_stop_locked(ifp, disable);
5179 WM_CORE_UNLOCK(sc);
5180 }
5181
5182 static void
5183 wm_stop_locked(struct ifnet *ifp, int disable)
5184 {
5185 struct wm_softc *sc = ifp->if_softc;
5186 struct wm_txsoft *txs;
5187 int i, qidx;
5188
5189 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5190 device_xname(sc->sc_dev), __func__));
5191 KASSERT(WM_CORE_LOCKED(sc));
5192
5193 wm_turnoff(sc);
5194
5195 /* Stop the one second clock. */
5196 callout_stop(&sc->sc_tick_ch);
5197
5198 /* Stop the 82547 Tx FIFO stall check timer. */
5199 if (sc->sc_type == WM_T_82547)
5200 callout_stop(&sc->sc_txfifo_ch);
5201
5202 if (sc->sc_flags & WM_F_HAS_MII) {
5203 /* Down the MII. */
5204 mii_down(&sc->sc_mii);
5205 } else {
5206 #if 0
5207 /* Should we clear PHY's status properly? */
5208 wm_reset(sc);
5209 #endif
5210 }
5211
5212 /* Stop the transmit and receive processes. */
5213 CSR_WRITE(sc, WMREG_TCTL, 0);
5214 CSR_WRITE(sc, WMREG_RCTL, 0);
5215 sc->sc_rctl &= ~RCTL_EN;
5216
5217 /*
5218 * Clear the interrupt mask to ensure the device cannot assert its
5219 * interrupt line.
5220 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
5221 * service any currently pending or shared interrupt.
5222 */
5223 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5224 sc->sc_icr = 0;
5225 if (sc->sc_nintrs > 1) {
5226 if (sc->sc_type != WM_T_82574) {
5227 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5228 CSR_WRITE(sc, WMREG_EIAC, 0);
5229 } else
5230 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5231 }
5232
5233 /* Release any queued transmit buffers. */
5234 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5235 struct wm_queue *wmq = &sc->sc_queue[qidx];
5236 struct wm_txqueue *txq = &wmq->wmq_txq;
5237 mutex_enter(txq->txq_lock);
5238 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5239 txs = &txq->txq_soft[i];
5240 if (txs->txs_mbuf != NULL) {
5241 bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
5242 m_freem(txs->txs_mbuf);
5243 txs->txs_mbuf = NULL;
5244 }
5245 }
5246 if (sc->sc_type == WM_T_PCH_SPT) {
5247 pcireg_t preg;
5248 uint32_t reg;
5249 int nexttx;
5250
5251 /* First, disable MULR fix in FEXTNVM11 */
5252 reg = CSR_READ(sc, WMREG_FEXTNVM11);
5253 reg |= FEXTNVM11_DIS_MULRFIX;
5254 CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
5255
5256 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
5257 WM_PCI_DESCRING_STATUS);
5258 reg = CSR_READ(sc, WMREG_TDLEN(0));
5259 printf("XXX RST: FLUSH = %08x, len = %u\n",
5260 (uint32_t)(preg & DESCRING_STATUS_FLUSH_REQ), reg);
5261 if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0)
5262 && (reg != 0)) {
5263 /* TX */
5264 printf("XXX need TX flush (reg = %08x)\n",
5265 preg);
5266 wm_init_tx_descs(sc, txq);
5267 wm_init_tx_regs(sc, wmq, txq);
5268 nexttx = txq->txq_next;
5269 wm_set_dma_addr(
5270 &txq->txq_descs[nexttx].wtx_addr,
5271 WM_CDTXADDR(txq, nexttx));
5272 txq->txq_descs[nexttx].wtx_cmdlen
5273 = htole32(WTX_CMD_IFCS | 512);
5274 wm_cdtxsync(txq, nexttx, 1,
5275 BUS_DMASYNC_PREREAD |BUS_DMASYNC_PREWRITE);
5276 CSR_WRITE(sc, WMREG_TCTL, TCTL_EN);
5277 CSR_WRITE(sc, WMREG_TDT(0), nexttx);
5278 CSR_WRITE_FLUSH(sc);
5279 delay(250);
5280 CSR_WRITE(sc, WMREG_TCTL, 0);
5281 }
5282 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
5283 WM_PCI_DESCRING_STATUS);
5284 if (preg & DESCRING_STATUS_FLUSH_REQ) {
5285 /* RX */
5286 printf("XXX need RX flush\n");
5287 }
5288 }
5289 mutex_exit(txq->txq_lock);
5290 }
5291
5292 /* Mark the interface as down and cancel the watchdog timer. */
5293 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
5294 ifp->if_timer = 0;
5295
5296 if (disable) {
5297 for (i = 0; i < sc->sc_nqueues; i++) {
5298 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5299 mutex_enter(rxq->rxq_lock);
5300 wm_rxdrain(rxq);
5301 mutex_exit(rxq->rxq_lock);
5302 }
5303 }
5304
5305 #if 0 /* notyet */
5306 if (sc->sc_type >= WM_T_82544)
5307 CSR_WRITE(sc, WMREG_WUC, 0);
5308 #endif
5309 }
5310
5311 static void
5312 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
5313 {
5314 struct mbuf *m;
5315 int i;
5316
5317 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
5318 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
5319 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
5320 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
5321 m->m_data, m->m_len, m->m_flags);
5322 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
5323 i, i == 1 ? "" : "s");
5324 }
5325
5326 /*
5327 * wm_82547_txfifo_stall:
5328 *
5329 * Callout used to wait for the 82547 Tx FIFO to drain,
5330 * reset the FIFO pointers, and restart packet transmission.
5331 */
5332 static void
5333 wm_82547_txfifo_stall(void *arg)
5334 {
5335 struct wm_softc *sc = arg;
5336 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
5337
5338 mutex_enter(txq->txq_lock);
5339
5340 if (txq->txq_stopping)
5341 goto out;
5342
5343 if (txq->txq_fifo_stall) {
5344 if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
5345 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
5346 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
5347 /*
5348 * Packets have drained. Stop transmitter, reset
5349 * FIFO pointers, restart transmitter, and kick
5350 * the packet queue.
5351 */
5352 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
5353 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
5354 CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
5355 CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
5356 CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
5357 CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
5358 CSR_WRITE(sc, WMREG_TCTL, tctl);
5359 CSR_WRITE_FLUSH(sc);
5360
5361 txq->txq_fifo_head = 0;
5362 txq->txq_fifo_stall = 0;
5363 wm_start_locked(&sc->sc_ethercom.ec_if);
5364 } else {
5365 /*
5366 * Still waiting for packets to drain; try again in
5367 * another tick.
5368 */
5369 callout_schedule(&sc->sc_txfifo_ch, 1);
5370 }
5371 }
5372
5373 out:
5374 mutex_exit(txq->txq_lock);
5375 }
5376
5377 /*
5378 * wm_82547_txfifo_bugchk:
5379 *
5380 * Check for bug condition in the 82547 Tx FIFO. We need to
5381 * prevent enqueueing a packet that would wrap around the end
5382 * if the Tx FIFO ring buffer, otherwise the chip will croak.
5383 *
5384 * We do this by checking the amount of space before the end
5385 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
5386 * the Tx FIFO, wait for all remaining packets to drain, reset
5387 * the internal FIFO pointers to the beginning, and restart
5388 * transmission on the interface.
5389 */
5390 #define WM_FIFO_HDR 0x10
5391 #define WM_82547_PAD_LEN 0x3e0
5392 static int
5393 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
5394 {
5395 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
5396 int space = txq->txq_fifo_size - txq->txq_fifo_head;
5397 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
5398
5399 /* Just return if already stalled. */
5400 if (txq->txq_fifo_stall)
5401 return 1;
5402
5403 if (sc->sc_mii.mii_media_active & IFM_FDX) {
5404 /* Stall only occurs in half-duplex mode. */
5405 goto send_packet;
5406 }
5407
5408 if (len >= WM_82547_PAD_LEN + space) {
5409 txq->txq_fifo_stall = 1;
5410 callout_schedule(&sc->sc_txfifo_ch, 1);
5411 return 1;
5412 }
5413
5414 send_packet:
5415 txq->txq_fifo_head += len;
5416 if (txq->txq_fifo_head >= txq->txq_fifo_size)
5417 txq->txq_fifo_head -= txq->txq_fifo_size;
5418
5419 return 0;
5420 }
5421
5422 static int
5423 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5424 {
5425 int error;
5426
5427 /*
5428 * Allocate the control data structures, and create and load the
5429 * DMA map for it.
5430 *
5431 * NOTE: All Tx descriptors must be in the same 4G segment of
5432 * memory. So must Rx descriptors. We simplify by allocating
5433 * both sets within the same 4G segment.
5434 */
5435 if (sc->sc_type < WM_T_82544)
5436 WM_NTXDESC(txq) = WM_NTXDESC_82542;
5437 else
5438 WM_NTXDESC(txq) = WM_NTXDESC_82544;
5439 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5440 txq->txq_descsize = sizeof(nq_txdesc_t);
5441 else
5442 txq->txq_descsize = sizeof(wiseman_txdesc_t);
5443
5444 if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
5445 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
5446 1, &txq->txq_desc_rseg, 0)) != 0) {
5447 aprint_error_dev(sc->sc_dev,
5448 "unable to allocate TX control data, error = %d\n",
5449 error);
5450 goto fail_0;
5451 }
5452
5453 if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
5454 txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
5455 (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
5456 aprint_error_dev(sc->sc_dev,
5457 "unable to map TX control data, error = %d\n", error);
5458 goto fail_1;
5459 }
5460
5461 if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
5462 WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
5463 aprint_error_dev(sc->sc_dev,
5464 "unable to create TX control data DMA map, error = %d\n",
5465 error);
5466 goto fail_2;
5467 }
5468
5469 if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
5470 txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
5471 aprint_error_dev(sc->sc_dev,
5472 "unable to load TX control data DMA map, error = %d\n",
5473 error);
5474 goto fail_3;
5475 }
5476
5477 return 0;
5478
5479 fail_3:
5480 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5481 fail_2:
5482 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5483 WM_TXDESCS_SIZE(txq));
5484 fail_1:
5485 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5486 fail_0:
5487 return error;
5488 }
5489
5490 static void
5491 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5492 {
5493
5494 bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
5495 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5496 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5497 WM_TXDESCS_SIZE(txq));
5498 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5499 }
5500
5501 static int
5502 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5503 {
5504 int error;
5505
5506 /*
5507 * Allocate the control data structures, and create and load the
5508 * DMA map for it.
5509 *
5510 * NOTE: All Tx descriptors must be in the same 4G segment of
5511 * memory. So must Rx descriptors. We simplify by allocating
5512 * both sets within the same 4G segment.
5513 */
5514 rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
5515 if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
5516 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
5517 1, &rxq->rxq_desc_rseg, 0)) != 0) {
5518 aprint_error_dev(sc->sc_dev,
5519 "unable to allocate RX control data, error = %d\n",
5520 error);
5521 goto fail_0;
5522 }
5523
5524 if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
5525 rxq->rxq_desc_rseg, rxq->rxq_desc_size,
5526 (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
5527 aprint_error_dev(sc->sc_dev,
5528 "unable to map RX control data, error = %d\n", error);
5529 goto fail_1;
5530 }
5531
5532 if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
5533 rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
5534 aprint_error_dev(sc->sc_dev,
5535 "unable to create RX control data DMA map, error = %d\n",
5536 error);
5537 goto fail_2;
5538 }
5539
5540 if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
5541 rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
5542 aprint_error_dev(sc->sc_dev,
5543 "unable to load RX control data DMA map, error = %d\n",
5544 error);
5545 goto fail_3;
5546 }
5547
5548 return 0;
5549
5550 fail_3:
5551 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5552 fail_2:
5553 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5554 rxq->rxq_desc_size);
5555 fail_1:
5556 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5557 fail_0:
5558 return error;
5559 }
5560
5561 static void
5562 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5563 {
5564
5565 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
5566 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5567 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5568 rxq->rxq_desc_size);
5569 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5570 }
5571
5572
5573 static int
5574 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5575 {
5576 int i, error;
5577
5578 /* Create the transmit buffer DMA maps. */
5579 WM_TXQUEUELEN(txq) =
5580 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
5581 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
5582 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5583 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
5584 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
5585 &txq->txq_soft[i].txs_dmamap)) != 0) {
5586 aprint_error_dev(sc->sc_dev,
5587 "unable to create Tx DMA map %d, error = %d\n",
5588 i, error);
5589 goto fail;
5590 }
5591 }
5592
5593 return 0;
5594
5595 fail:
5596 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5597 if (txq->txq_soft[i].txs_dmamap != NULL)
5598 bus_dmamap_destroy(sc->sc_dmat,
5599 txq->txq_soft[i].txs_dmamap);
5600 }
5601 return error;
5602 }
5603
5604 static void
5605 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5606 {
5607 int i;
5608
5609 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5610 if (txq->txq_soft[i].txs_dmamap != NULL)
5611 bus_dmamap_destroy(sc->sc_dmat,
5612 txq->txq_soft[i].txs_dmamap);
5613 }
5614 }
5615
5616 static int
5617 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5618 {
5619 int i, error;
5620
5621 /* Create the receive buffer DMA maps. */
5622 for (i = 0; i < WM_NRXDESC; i++) {
5623 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
5624 MCLBYTES, 0, 0,
5625 &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
5626 aprint_error_dev(sc->sc_dev,
5627 "unable to create Rx DMA map %d error = %d\n",
5628 i, error);
5629 goto fail;
5630 }
5631 rxq->rxq_soft[i].rxs_mbuf = NULL;
5632 }
5633
5634 return 0;
5635
5636 fail:
5637 for (i = 0; i < WM_NRXDESC; i++) {
5638 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5639 bus_dmamap_destroy(sc->sc_dmat,
5640 rxq->rxq_soft[i].rxs_dmamap);
5641 }
5642 return error;
5643 }
5644
5645 static void
5646 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5647 {
5648 int i;
5649
5650 for (i = 0; i < WM_NRXDESC; i++) {
5651 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5652 bus_dmamap_destroy(sc->sc_dmat,
5653 rxq->rxq_soft[i].rxs_dmamap);
5654 }
5655 }
5656
5657 /*
5658 * wm_alloc_quques:
5659 * Allocate {tx,rx}descs and {tx,rx} buffers
5660 */
5661 static int
5662 wm_alloc_txrx_queues(struct wm_softc *sc)
5663 {
5664 int i, error, tx_done, rx_done;
5665
5666 sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
5667 KM_SLEEP);
5668 if (sc->sc_queue == NULL) {
5669 aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
5670 error = ENOMEM;
5671 goto fail_0;
5672 }
5673
5674 /*
5675 * For transmission
5676 */
5677 error = 0;
5678 tx_done = 0;
5679 for (i = 0; i < sc->sc_nqueues; i++) {
5680 #ifdef WM_EVENT_COUNTERS
5681 int j;
5682 const char *xname;
5683 #endif
5684 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5685 txq->txq_sc = sc;
5686 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5687
5688 error = wm_alloc_tx_descs(sc, txq);
5689 if (error)
5690 break;
5691 error = wm_alloc_tx_buffer(sc, txq);
5692 if (error) {
5693 wm_free_tx_descs(sc, txq);
5694 break;
5695 }
5696 txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
5697 if (txq->txq_interq == NULL) {
5698 wm_free_tx_descs(sc, txq);
5699 wm_free_tx_buffer(sc, txq);
5700 error = ENOMEM;
5701 break;
5702 }
5703
5704 #ifdef WM_EVENT_COUNTERS
5705 xname = device_xname(sc->sc_dev);
5706
5707 WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
5708 WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
5709 WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
5710 WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
5711 WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
5712
5713 WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
5714 WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
5715 WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
5716 WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
5717 WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
5718 WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
5719
5720 for (j = 0; j < WM_NTXSEGS; j++) {
5721 snprintf(txq->txq_txseg_evcnt_names[j],
5722 sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
5723 evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
5724 NULL, xname, txq->txq_txseg_evcnt_names[j]);
5725 }
5726
5727 WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
5728
5729 WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
5730 #endif /* WM_EVENT_COUNTERS */
5731
5732 tx_done++;
5733 }
5734 if (error)
5735 goto fail_1;
5736
5737 /*
5738 * For recieve
5739 */
5740 error = 0;
5741 rx_done = 0;
5742 for (i = 0; i < sc->sc_nqueues; i++) {
5743 #ifdef WM_EVENT_COUNTERS
5744 const char *xname;
5745 #endif
5746 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5747 rxq->rxq_sc = sc;
5748 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5749
5750 error = wm_alloc_rx_descs(sc, rxq);
5751 if (error)
5752 break;
5753
5754 error = wm_alloc_rx_buffer(sc, rxq);
5755 if (error) {
5756 wm_free_rx_descs(sc, rxq);
5757 break;
5758 }
5759
5760 #ifdef WM_EVENT_COUNTERS
5761 xname = device_xname(sc->sc_dev);
5762
5763 WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
5764
5765 WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
5766 WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
5767 #endif /* WM_EVENT_COUNTERS */
5768
5769 rx_done++;
5770 }
5771 if (error)
5772 goto fail_2;
5773
5774 return 0;
5775
5776 fail_2:
5777 for (i = 0; i < rx_done; i++) {
5778 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5779 wm_free_rx_buffer(sc, rxq);
5780 wm_free_rx_descs(sc, rxq);
5781 if (rxq->rxq_lock)
5782 mutex_obj_free(rxq->rxq_lock);
5783 }
5784 fail_1:
5785 for (i = 0; i < tx_done; i++) {
5786 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5787 pcq_destroy(txq->txq_interq);
5788 wm_free_tx_buffer(sc, txq);
5789 wm_free_tx_descs(sc, txq);
5790 if (txq->txq_lock)
5791 mutex_obj_free(txq->txq_lock);
5792 }
5793
5794 kmem_free(sc->sc_queue,
5795 sizeof(struct wm_queue) * sc->sc_nqueues);
5796 fail_0:
5797 return error;
5798 }
5799
5800 /*
5801 * wm_free_quques:
5802 * Free {tx,rx}descs and {tx,rx} buffers
5803 */
5804 static void
5805 wm_free_txrx_queues(struct wm_softc *sc)
5806 {
5807 int i;
5808
5809 for (i = 0; i < sc->sc_nqueues; i++) {
5810 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5811 wm_free_rx_buffer(sc, rxq);
5812 wm_free_rx_descs(sc, rxq);
5813 if (rxq->rxq_lock)
5814 mutex_obj_free(rxq->rxq_lock);
5815 }
5816
5817 for (i = 0; i < sc->sc_nqueues; i++) {
5818 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5819 wm_free_tx_buffer(sc, txq);
5820 wm_free_tx_descs(sc, txq);
5821 if (txq->txq_lock)
5822 mutex_obj_free(txq->txq_lock);
5823 }
5824
5825 kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
5826 }
5827
5828 static void
5829 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5830 {
5831
5832 KASSERT(mutex_owned(txq->txq_lock));
5833
5834 /* Initialize the transmit descriptor ring. */
5835 memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
5836 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
5837 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5838 txq->txq_free = WM_NTXDESC(txq);
5839 txq->txq_next = 0;
5840 }
5841
5842 static void
5843 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
5844 struct wm_txqueue *txq)
5845 {
5846
5847 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5848 device_xname(sc->sc_dev), __func__));
5849 KASSERT(mutex_owned(txq->txq_lock));
5850
5851 if (sc->sc_type < WM_T_82543) {
5852 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
5853 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
5854 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
5855 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
5856 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
5857 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
5858 } else {
5859 int qid = wmq->wmq_id;
5860
5861 CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
5862 CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
5863 CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
5864 CSR_WRITE(sc, WMREG_TDH(qid), 0);
5865
5866 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5867 /*
5868 * Don't write TDT before TCTL.EN is set.
5869 * See the document.
5870 */
5871 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
5872 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
5873 | TXDCTL_WTHRESH(0));
5874 else {
5875 /* ITR / 4 */
5876 CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
5877 if (sc->sc_type >= WM_T_82540) {
5878 /* should be same */
5879 CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
5880 }
5881
5882 CSR_WRITE(sc, WMREG_TDT(qid), 0);
5883 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
5884 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
5885 }
5886 }
5887 }
5888
5889 static void
5890 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5891 {
5892 int i;
5893
5894 KASSERT(mutex_owned(txq->txq_lock));
5895
5896 /* Initialize the transmit job descriptors. */
5897 for (i = 0; i < WM_TXQUEUELEN(txq); i++)
5898 txq->txq_soft[i].txs_mbuf = NULL;
5899 txq->txq_sfree = WM_TXQUEUELEN(txq);
5900 txq->txq_snext = 0;
5901 txq->txq_sdirty = 0;
5902 }
5903
5904 static void
5905 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
5906 struct wm_txqueue *txq)
5907 {
5908
5909 KASSERT(mutex_owned(txq->txq_lock));
5910
5911 /*
5912 * Set up some register offsets that are different between
5913 * the i82542 and the i82543 and later chips.
5914 */
5915 if (sc->sc_type < WM_T_82543)
5916 txq->txq_tdt_reg = WMREG_OLD_TDT;
5917 else
5918 txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
5919
5920 wm_init_tx_descs(sc, txq);
5921 wm_init_tx_regs(sc, wmq, txq);
5922 wm_init_tx_buffer(sc, txq);
5923 }
5924
5925 static void
5926 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
5927 struct wm_rxqueue *rxq)
5928 {
5929
5930 KASSERT(mutex_owned(rxq->rxq_lock));
5931
5932 /*
5933 * Initialize the receive descriptor and receive job
5934 * descriptor rings.
5935 */
5936 if (sc->sc_type < WM_T_82543) {
5937 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
5938 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
5939 CSR_WRITE(sc, WMREG_OLD_RDLEN0,
5940 sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
5941 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
5942 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
5943 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
5944
5945 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
5946 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
5947 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
5948 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
5949 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
5950 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
5951 } else {
5952 int qid = wmq->wmq_id;
5953
5954 CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
5955 CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
5956 CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
5957
5958 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5959 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
5960 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
5961 CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
5962 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
5963 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
5964 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
5965 | RXDCTL_WTHRESH(1));
5966 CSR_WRITE(sc, WMREG_RDH(qid), 0);
5967 CSR_WRITE(sc, WMREG_RDT(qid), 0);
5968 } else {
5969 CSR_WRITE(sc, WMREG_RDH(qid), 0);
5970 CSR_WRITE(sc, WMREG_RDT(qid), 0);
5971 /* ITR / 4 */
5972 CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
5973 /* MUST be same */
5974 CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
5975 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
5976 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
5977 }
5978 }
5979 }
5980
5981 static int
5982 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5983 {
5984 struct wm_rxsoft *rxs;
5985 int error, i;
5986
5987 KASSERT(mutex_owned(rxq->rxq_lock));
5988
5989 for (i = 0; i < WM_NRXDESC; i++) {
5990 rxs = &rxq->rxq_soft[i];
5991 if (rxs->rxs_mbuf == NULL) {
5992 if ((error = wm_add_rxbuf(rxq, i)) != 0) {
5993 log(LOG_ERR, "%s: unable to allocate or map "
5994 "rx buffer %d, error = %d\n",
5995 device_xname(sc->sc_dev), i, error);
5996 /*
5997 * XXX Should attempt to run with fewer receive
5998 * XXX buffers instead of just failing.
5999 */
6000 wm_rxdrain(rxq);
6001 return ENOMEM;
6002 }
6003 } else {
6004 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
6005 wm_init_rxdesc(rxq, i);
6006 /*
6007 * For 82575 and newer device, the RX descriptors
6008 * must be initialized after the setting of RCTL.EN in
6009 * wm_set_filter()
6010 */
6011 }
6012 }
6013 rxq->rxq_ptr = 0;
6014 rxq->rxq_discard = 0;
6015 WM_RXCHAIN_RESET(rxq);
6016
6017 return 0;
6018 }
6019
6020 static int
6021 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
6022 struct wm_rxqueue *rxq)
6023 {
6024
6025 KASSERT(mutex_owned(rxq->rxq_lock));
6026
6027 /*
6028 * Set up some register offsets that are different between
6029 * the i82542 and the i82543 and later chips.
6030 */
6031 if (sc->sc_type < WM_T_82543)
6032 rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
6033 else
6034 rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
6035
6036 wm_init_rx_regs(sc, wmq, rxq);
6037 return wm_init_rx_buffer(sc, rxq);
6038 }
6039
6040 /*
6041 * wm_init_quques:
6042 * Initialize {tx,rx}descs and {tx,rx} buffers
6043 */
6044 static int
6045 wm_init_txrx_queues(struct wm_softc *sc)
6046 {
6047 int i, error = 0;
6048
6049 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
6050 device_xname(sc->sc_dev), __func__));
6051
6052 for (i = 0; i < sc->sc_nqueues; i++) {
6053 struct wm_queue *wmq = &sc->sc_queue[i];
6054 struct wm_txqueue *txq = &wmq->wmq_txq;
6055 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6056
6057 mutex_enter(txq->txq_lock);
6058 wm_init_tx_queue(sc, wmq, txq);
6059 mutex_exit(txq->txq_lock);
6060
6061 mutex_enter(rxq->rxq_lock);
6062 error = wm_init_rx_queue(sc, wmq, rxq);
6063 mutex_exit(rxq->rxq_lock);
6064 if (error)
6065 break;
6066 }
6067
6068 return error;
6069 }
6070
6071 /*
6072 * wm_tx_offload:
6073 *
6074 * Set up TCP/IP checksumming parameters for the
6075 * specified packet.
6076 */
6077 static int
6078 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
6079 uint8_t *fieldsp)
6080 {
6081 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6082 struct mbuf *m0 = txs->txs_mbuf;
6083 struct livengood_tcpip_ctxdesc *t;
6084 uint32_t ipcs, tucs, cmd, cmdlen, seg;
6085 uint32_t ipcse;
6086 struct ether_header *eh;
6087 int offset, iphl;
6088 uint8_t fields;
6089
6090 /*
6091 * XXX It would be nice if the mbuf pkthdr had offset
6092 * fields for the protocol headers.
6093 */
6094
6095 eh = mtod(m0, struct ether_header *);
6096 switch (htons(eh->ether_type)) {
6097 case ETHERTYPE_IP:
6098 case ETHERTYPE_IPV6:
6099 offset = ETHER_HDR_LEN;
6100 break;
6101
6102 case ETHERTYPE_VLAN:
6103 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6104 break;
6105
6106 default:
6107 /*
6108 * Don't support this protocol or encapsulation.
6109 */
6110 *fieldsp = 0;
6111 *cmdp = 0;
6112 return 0;
6113 }
6114
6115 if ((m0->m_pkthdr.csum_flags &
6116 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
6117 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6118 } else {
6119 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
6120 }
6121 ipcse = offset + iphl - 1;
6122
6123 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
6124 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
6125 seg = 0;
6126 fields = 0;
6127
6128 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6129 int hlen = offset + iphl;
6130 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6131
6132 if (__predict_false(m0->m_len <
6133 (hlen + sizeof(struct tcphdr)))) {
6134 /*
6135 * TCP/IP headers are not in the first mbuf; we need
6136 * to do this the slow and painful way. Let's just
6137 * hope this doesn't happen very often.
6138 */
6139 struct tcphdr th;
6140
6141 WM_Q_EVCNT_INCR(txq, txtsopain);
6142
6143 m_copydata(m0, hlen, sizeof(th), &th);
6144 if (v4) {
6145 struct ip ip;
6146
6147 m_copydata(m0, offset, sizeof(ip), &ip);
6148 ip.ip_len = 0;
6149 m_copyback(m0,
6150 offset + offsetof(struct ip, ip_len),
6151 sizeof(ip.ip_len), &ip.ip_len);
6152 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6153 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6154 } else {
6155 struct ip6_hdr ip6;
6156
6157 m_copydata(m0, offset, sizeof(ip6), &ip6);
6158 ip6.ip6_plen = 0;
6159 m_copyback(m0,
6160 offset + offsetof(struct ip6_hdr, ip6_plen),
6161 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6162 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6163 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6164 }
6165 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6166 sizeof(th.th_sum), &th.th_sum);
6167
6168 hlen += th.th_off << 2;
6169 } else {
6170 /*
6171 * TCP/IP headers are in the first mbuf; we can do
6172 * this the easy way.
6173 */
6174 struct tcphdr *th;
6175
6176 if (v4) {
6177 struct ip *ip =
6178 (void *)(mtod(m0, char *) + offset);
6179 th = (void *)(mtod(m0, char *) + hlen);
6180
6181 ip->ip_len = 0;
6182 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6183 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6184 } else {
6185 struct ip6_hdr *ip6 =
6186 (void *)(mtod(m0, char *) + offset);
6187 th = (void *)(mtod(m0, char *) + hlen);
6188
6189 ip6->ip6_plen = 0;
6190 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6191 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6192 }
6193 hlen += th->th_off << 2;
6194 }
6195
6196 if (v4) {
6197 WM_Q_EVCNT_INCR(txq, txtso);
6198 cmdlen |= WTX_TCPIP_CMD_IP;
6199 } else {
6200 WM_Q_EVCNT_INCR(txq, txtso6);
6201 ipcse = 0;
6202 }
6203 cmd |= WTX_TCPIP_CMD_TSE;
6204 cmdlen |= WTX_TCPIP_CMD_TSE |
6205 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
6206 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
6207 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
6208 }
6209
6210 /*
6211 * NOTE: Even if we're not using the IP or TCP/UDP checksum
6212 * offload feature, if we load the context descriptor, we
6213 * MUST provide valid values for IPCSS and TUCSS fields.
6214 */
6215
6216 ipcs = WTX_TCPIP_IPCSS(offset) |
6217 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
6218 WTX_TCPIP_IPCSE(ipcse);
6219 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
6220 WM_Q_EVCNT_INCR(txq, txipsum);
6221 fields |= WTX_IXSM;
6222 }
6223
6224 offset += iphl;
6225
6226 if (m0->m_pkthdr.csum_flags &
6227 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
6228 WM_Q_EVCNT_INCR(txq, txtusum);
6229 fields |= WTX_TXSM;
6230 tucs = WTX_TCPIP_TUCSS(offset) |
6231 WTX_TCPIP_TUCSO(offset +
6232 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
6233 WTX_TCPIP_TUCSE(0) /* rest of packet */;
6234 } else if ((m0->m_pkthdr.csum_flags &
6235 (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
6236 WM_Q_EVCNT_INCR(txq, txtusum6);
6237 fields |= WTX_TXSM;
6238 tucs = WTX_TCPIP_TUCSS(offset) |
6239 WTX_TCPIP_TUCSO(offset +
6240 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
6241 WTX_TCPIP_TUCSE(0) /* rest of packet */;
6242 } else {
6243 /* Just initialize it to a valid TCP context. */
6244 tucs = WTX_TCPIP_TUCSS(offset) |
6245 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
6246 WTX_TCPIP_TUCSE(0) /* rest of packet */;
6247 }
6248
6249 /* Fill in the context descriptor. */
6250 t = (struct livengood_tcpip_ctxdesc *)
6251 &txq->txq_descs[txq->txq_next];
6252 t->tcpip_ipcs = htole32(ipcs);
6253 t->tcpip_tucs = htole32(tucs);
6254 t->tcpip_cmdlen = htole32(cmdlen);
6255 t->tcpip_seg = htole32(seg);
6256 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6257
6258 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6259 txs->txs_ndesc++;
6260
6261 *cmdp = cmd;
6262 *fieldsp = fields;
6263
6264 return 0;
6265 }
6266
6267 /*
6268 * wm_start: [ifnet interface function]
6269 *
6270 * Start packet transmission on the interface.
6271 */
6272 static void
6273 wm_start(struct ifnet *ifp)
6274 {
6275 struct wm_softc *sc = ifp->if_softc;
6276 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6277
6278 KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
6279
6280 mutex_enter(txq->txq_lock);
6281 if (!txq->txq_stopping)
6282 wm_start_locked(ifp);
6283 mutex_exit(txq->txq_lock);
6284 }
6285
6286 static void
6287 wm_start_locked(struct ifnet *ifp)
6288 {
6289 struct wm_softc *sc = ifp->if_softc;
6290 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6291 struct mbuf *m0;
6292 struct m_tag *mtag;
6293 struct wm_txsoft *txs;
6294 bus_dmamap_t dmamap;
6295 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
6296 bus_addr_t curaddr;
6297 bus_size_t seglen, curlen;
6298 uint32_t cksumcmd;
6299 uint8_t cksumfields;
6300
6301 KASSERT(mutex_owned(txq->txq_lock));
6302
6303 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6304 return;
6305
6306 /* Remember the previous number of free descriptors. */
6307 ofree = txq->txq_free;
6308
6309 /*
6310 * Loop through the send queue, setting up transmit descriptors
6311 * until we drain the queue, or use up all available transmit
6312 * descriptors.
6313 */
6314 for (;;) {
6315 m0 = NULL;
6316
6317 /* Get a work queue entry. */
6318 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6319 wm_txeof(sc, txq);
6320 if (txq->txq_sfree == 0) {
6321 DPRINTF(WM_DEBUG_TX,
6322 ("%s: TX: no free job descriptors\n",
6323 device_xname(sc->sc_dev)));
6324 WM_Q_EVCNT_INCR(txq, txsstall);
6325 break;
6326 }
6327 }
6328
6329 /* Grab a packet off the queue. */
6330 IFQ_DEQUEUE(&ifp->if_snd, m0);
6331 if (m0 == NULL)
6332 break;
6333
6334 DPRINTF(WM_DEBUG_TX,
6335 ("%s: TX: have packet to transmit: %p\n",
6336 device_xname(sc->sc_dev), m0));
6337
6338 txs = &txq->txq_soft[txq->txq_snext];
6339 dmamap = txs->txs_dmamap;
6340
6341 use_tso = (m0->m_pkthdr.csum_flags &
6342 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
6343
6344 /*
6345 * So says the Linux driver:
6346 * The controller does a simple calculation to make sure
6347 * there is enough room in the FIFO before initiating the
6348 * DMA for each buffer. The calc is:
6349 * 4 = ceil(buffer len / MSS)
6350 * To make sure we don't overrun the FIFO, adjust the max
6351 * buffer len if the MSS drops.
6352 */
6353 dmamap->dm_maxsegsz =
6354 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
6355 ? m0->m_pkthdr.segsz << 2
6356 : WTX_MAX_LEN;
6357
6358 /*
6359 * Load the DMA map. If this fails, the packet either
6360 * didn't fit in the allotted number of segments, or we
6361 * were short on resources. For the too-many-segments
6362 * case, we simply report an error and drop the packet,
6363 * since we can't sanely copy a jumbo packet to a single
6364 * buffer.
6365 */
6366 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6367 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
6368 if (error) {
6369 if (error == EFBIG) {
6370 WM_Q_EVCNT_INCR(txq, txdrop);
6371 log(LOG_ERR, "%s: Tx packet consumes too many "
6372 "DMA segments, dropping...\n",
6373 device_xname(sc->sc_dev));
6374 wm_dump_mbuf_chain(sc, m0);
6375 m_freem(m0);
6376 continue;
6377 }
6378 /* Short on resources, just stop for now. */
6379 DPRINTF(WM_DEBUG_TX,
6380 ("%s: TX: dmamap load failed: %d\n",
6381 device_xname(sc->sc_dev), error));
6382 break;
6383 }
6384
6385 segs_needed = dmamap->dm_nsegs;
6386 if (use_tso) {
6387 /* For sentinel descriptor; see below. */
6388 segs_needed++;
6389 }
6390
6391 /*
6392 * Ensure we have enough descriptors free to describe
6393 * the packet. Note, we always reserve one descriptor
6394 * at the end of the ring due to the semantics of the
6395 * TDT register, plus one more in the event we need
6396 * to load offload context.
6397 */
6398 if (segs_needed > txq->txq_free - 2) {
6399 /*
6400 * Not enough free descriptors to transmit this
6401 * packet. We haven't committed anything yet,
6402 * so just unload the DMA map, put the packet
6403 * pack on the queue, and punt. Notify the upper
6404 * layer that there are no more slots left.
6405 */
6406 DPRINTF(WM_DEBUG_TX,
6407 ("%s: TX: need %d (%d) descriptors, have %d\n",
6408 device_xname(sc->sc_dev), dmamap->dm_nsegs,
6409 segs_needed, txq->txq_free - 1));
6410 ifp->if_flags |= IFF_OACTIVE;
6411 bus_dmamap_unload(sc->sc_dmat, dmamap);
6412 WM_Q_EVCNT_INCR(txq, txdstall);
6413 break;
6414 }
6415
6416 /*
6417 * Check for 82547 Tx FIFO bug. We need to do this
6418 * once we know we can transmit the packet, since we
6419 * do some internal FIFO space accounting here.
6420 */
6421 if (sc->sc_type == WM_T_82547 &&
6422 wm_82547_txfifo_bugchk(sc, m0)) {
6423 DPRINTF(WM_DEBUG_TX,
6424 ("%s: TX: 82547 Tx FIFO bug detected\n",
6425 device_xname(sc->sc_dev)));
6426 ifp->if_flags |= IFF_OACTIVE;
6427 bus_dmamap_unload(sc->sc_dmat, dmamap);
6428 WM_Q_EVCNT_INCR(txq, txfifo_stall);
6429 break;
6430 }
6431
6432 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6433
6434 DPRINTF(WM_DEBUG_TX,
6435 ("%s: TX: packet has %d (%d) DMA segments\n",
6436 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6437
6438 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
6439
6440 /*
6441 * Store a pointer to the packet so that we can free it
6442 * later.
6443 *
6444 * Initially, we consider the number of descriptors the
6445 * packet uses the number of DMA segments. This may be
6446 * incremented by 1 if we do checksum offload (a descriptor
6447 * is used to set the checksum context).
6448 */
6449 txs->txs_mbuf = m0;
6450 txs->txs_firstdesc = txq->txq_next;
6451 txs->txs_ndesc = segs_needed;
6452
6453 /* Set up offload parameters for this packet. */
6454 if (m0->m_pkthdr.csum_flags &
6455 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
6456 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
6457 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
6458 if (wm_tx_offload(sc, txs, &cksumcmd,
6459 &cksumfields) != 0) {
6460 /* Error message already displayed. */
6461 bus_dmamap_unload(sc->sc_dmat, dmamap);
6462 continue;
6463 }
6464 } else {
6465 cksumcmd = 0;
6466 cksumfields = 0;
6467 }
6468
6469 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
6470
6471 /* Sync the DMA map. */
6472 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6473 BUS_DMASYNC_PREWRITE);
6474
6475 /* Initialize the transmit descriptor. */
6476 for (nexttx = txq->txq_next, seg = 0;
6477 seg < dmamap->dm_nsegs; seg++) {
6478 for (seglen = dmamap->dm_segs[seg].ds_len,
6479 curaddr = dmamap->dm_segs[seg].ds_addr;
6480 seglen != 0;
6481 curaddr += curlen, seglen -= curlen,
6482 nexttx = WM_NEXTTX(txq, nexttx)) {
6483 curlen = seglen;
6484
6485 /*
6486 * So says the Linux driver:
6487 * Work around for premature descriptor
6488 * write-backs in TSO mode. Append a
6489 * 4-byte sentinel descriptor.
6490 */
6491 if (use_tso && seg == dmamap->dm_nsegs - 1 &&
6492 curlen > 8)
6493 curlen -= 4;
6494
6495 wm_set_dma_addr(
6496 &txq->txq_descs[nexttx].wtx_addr, curaddr);
6497 txq->txq_descs[nexttx].wtx_cmdlen
6498 = htole32(cksumcmd | curlen);
6499 txq->txq_descs[nexttx].wtx_fields.wtxu_status
6500 = 0;
6501 txq->txq_descs[nexttx].wtx_fields.wtxu_options
6502 = cksumfields;
6503 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
6504 lasttx = nexttx;
6505
6506 DPRINTF(WM_DEBUG_TX,
6507 ("%s: TX: desc %d: low %#" PRIx64 ", "
6508 "len %#04zx\n",
6509 device_xname(sc->sc_dev), nexttx,
6510 (uint64_t)curaddr, curlen));
6511 }
6512 }
6513
6514 KASSERT(lasttx != -1);
6515
6516 /*
6517 * Set up the command byte on the last descriptor of
6518 * the packet. If we're in the interrupt delay window,
6519 * delay the interrupt.
6520 */
6521 txq->txq_descs[lasttx].wtx_cmdlen |=
6522 htole32(WTX_CMD_EOP | WTX_CMD_RS);
6523
6524 /*
6525 * If VLANs are enabled and the packet has a VLAN tag, set
6526 * up the descriptor to encapsulate the packet for us.
6527 *
6528 * This is only valid on the last descriptor of the packet.
6529 */
6530 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6531 txq->txq_descs[lasttx].wtx_cmdlen |=
6532 htole32(WTX_CMD_VLE);
6533 txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
6534 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6535 }
6536
6537 txs->txs_lastdesc = lasttx;
6538
6539 DPRINTF(WM_DEBUG_TX,
6540 ("%s: TX: desc %d: cmdlen 0x%08x\n",
6541 device_xname(sc->sc_dev),
6542 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
6543
6544 /* Sync the descriptors we're using. */
6545 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
6546 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
6547
6548 /* Give the packet to the chip. */
6549 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
6550
6551 DPRINTF(WM_DEBUG_TX,
6552 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6553
6554 DPRINTF(WM_DEBUG_TX,
6555 ("%s: TX: finished transmitting packet, job %d\n",
6556 device_xname(sc->sc_dev), txq->txq_snext));
6557
6558 /* Advance the tx pointer. */
6559 txq->txq_free -= txs->txs_ndesc;
6560 txq->txq_next = nexttx;
6561
6562 txq->txq_sfree--;
6563 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
6564
6565 /* Pass the packet to any BPF listeners. */
6566 bpf_mtap(ifp, m0);
6567 }
6568
6569 if (m0 != NULL) {
6570 ifp->if_flags |= IFF_OACTIVE;
6571 WM_Q_EVCNT_INCR(txq, txdrop);
6572 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
6573 __func__));
6574 m_freem(m0);
6575 }
6576
6577 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
6578 /* No more slots; notify upper layer. */
6579 ifp->if_flags |= IFF_OACTIVE;
6580 }
6581
6582 if (txq->txq_free != ofree) {
6583 /* Set a watchdog timer in case the chip flakes out. */
6584 ifp->if_timer = 5;
6585 }
6586 }
6587
6588 /*
6589 * wm_nq_tx_offload:
6590 *
6591 * Set up TCP/IP checksumming parameters for the
6592 * specified packet, for NEWQUEUE devices
6593 */
6594 static int
6595 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
6596 struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
6597 {
6598 struct mbuf *m0 = txs->txs_mbuf;
6599 struct m_tag *mtag;
6600 uint32_t vl_len, mssidx, cmdc;
6601 struct ether_header *eh;
6602 int offset, iphl;
6603
6604 /*
6605 * XXX It would be nice if the mbuf pkthdr had offset
6606 * fields for the protocol headers.
6607 */
6608 *cmdlenp = 0;
6609 *fieldsp = 0;
6610
6611 eh = mtod(m0, struct ether_header *);
6612 switch (htons(eh->ether_type)) {
6613 case ETHERTYPE_IP:
6614 case ETHERTYPE_IPV6:
6615 offset = ETHER_HDR_LEN;
6616 break;
6617
6618 case ETHERTYPE_VLAN:
6619 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6620 break;
6621
6622 default:
6623 /* Don't support this protocol or encapsulation. */
6624 *do_csum = false;
6625 return 0;
6626 }
6627 *do_csum = true;
6628 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
6629 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
6630
6631 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
6632 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
6633
6634 if ((m0->m_pkthdr.csum_flags &
6635 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
6636 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6637 } else {
6638 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
6639 }
6640 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
6641 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
6642
6643 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6644 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
6645 << NQTXC_VLLEN_VLAN_SHIFT);
6646 *cmdlenp |= NQTX_CMD_VLE;
6647 }
6648
6649 mssidx = 0;
6650
6651 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6652 int hlen = offset + iphl;
6653 int tcp_hlen;
6654 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6655
6656 if (__predict_false(m0->m_len <
6657 (hlen + sizeof(struct tcphdr)))) {
6658 /*
6659 * TCP/IP headers are not in the first mbuf; we need
6660 * to do this the slow and painful way. Let's just
6661 * hope this doesn't happen very often.
6662 */
6663 struct tcphdr th;
6664
6665 WM_Q_EVCNT_INCR(txq, txtsopain);
6666
6667 m_copydata(m0, hlen, sizeof(th), &th);
6668 if (v4) {
6669 struct ip ip;
6670
6671 m_copydata(m0, offset, sizeof(ip), &ip);
6672 ip.ip_len = 0;
6673 m_copyback(m0,
6674 offset + offsetof(struct ip, ip_len),
6675 sizeof(ip.ip_len), &ip.ip_len);
6676 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6677 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6678 } else {
6679 struct ip6_hdr ip6;
6680
6681 m_copydata(m0, offset, sizeof(ip6), &ip6);
6682 ip6.ip6_plen = 0;
6683 m_copyback(m0,
6684 offset + offsetof(struct ip6_hdr, ip6_plen),
6685 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6686 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6687 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6688 }
6689 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6690 sizeof(th.th_sum), &th.th_sum);
6691
6692 tcp_hlen = th.th_off << 2;
6693 } else {
6694 /*
6695 * TCP/IP headers are in the first mbuf; we can do
6696 * this the easy way.
6697 */
6698 struct tcphdr *th;
6699
6700 if (v4) {
6701 struct ip *ip =
6702 (void *)(mtod(m0, char *) + offset);
6703 th = (void *)(mtod(m0, char *) + hlen);
6704
6705 ip->ip_len = 0;
6706 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6707 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6708 } else {
6709 struct ip6_hdr *ip6 =
6710 (void *)(mtod(m0, char *) + offset);
6711 th = (void *)(mtod(m0, char *) + hlen);
6712
6713 ip6->ip6_plen = 0;
6714 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6715 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6716 }
6717 tcp_hlen = th->th_off << 2;
6718 }
6719 hlen += tcp_hlen;
6720 *cmdlenp |= NQTX_CMD_TSE;
6721
6722 if (v4) {
6723 WM_Q_EVCNT_INCR(txq, txtso);
6724 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
6725 } else {
6726 WM_Q_EVCNT_INCR(txq, txtso6);
6727 *fieldsp |= NQTXD_FIELDS_TUXSM;
6728 }
6729 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
6730 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6731 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
6732 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
6733 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
6734 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
6735 } else {
6736 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
6737 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6738 }
6739
6740 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
6741 *fieldsp |= NQTXD_FIELDS_IXSM;
6742 cmdc |= NQTXC_CMD_IP4;
6743 }
6744
6745 if (m0->m_pkthdr.csum_flags &
6746 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6747 WM_Q_EVCNT_INCR(txq, txtusum);
6748 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6749 cmdc |= NQTXC_CMD_TCP;
6750 } else {
6751 cmdc |= NQTXC_CMD_UDP;
6752 }
6753 cmdc |= NQTXC_CMD_IP4;
6754 *fieldsp |= NQTXD_FIELDS_TUXSM;
6755 }
6756 if (m0->m_pkthdr.csum_flags &
6757 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6758 WM_Q_EVCNT_INCR(txq, txtusum6);
6759 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6760 cmdc |= NQTXC_CMD_TCP;
6761 } else {
6762 cmdc |= NQTXC_CMD_UDP;
6763 }
6764 cmdc |= NQTXC_CMD_IP6;
6765 *fieldsp |= NQTXD_FIELDS_TUXSM;
6766 }
6767
6768 /* Fill in the context descriptor. */
6769 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
6770 htole32(vl_len);
6771 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
6772 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
6773 htole32(cmdc);
6774 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
6775 htole32(mssidx);
6776 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6777 DPRINTF(WM_DEBUG_TX,
6778 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
6779 txq->txq_next, 0, vl_len));
6780 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
6781 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6782 txs->txs_ndesc++;
6783 return 0;
6784 }
6785
6786 /*
6787 * wm_nq_start: [ifnet interface function]
6788 *
6789 * Start packet transmission on the interface for NEWQUEUE devices
6790 */
6791 static void
6792 wm_nq_start(struct ifnet *ifp)
6793 {
6794 struct wm_softc *sc = ifp->if_softc;
6795 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6796
6797 KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
6798
6799 mutex_enter(txq->txq_lock);
6800 if (!txq->txq_stopping)
6801 wm_nq_start_locked(ifp);
6802 mutex_exit(txq->txq_lock);
6803 }
6804
6805 static void
6806 wm_nq_start_locked(struct ifnet *ifp)
6807 {
6808 struct wm_softc *sc = ifp->if_softc;
6809 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6810
6811 wm_nq_send_common_locked(ifp, txq, false);
6812 }
6813
6814 static inline int
6815 wm_nq_select_txqueue(struct ifnet *ifp, struct mbuf *m)
6816 {
6817 struct wm_softc *sc = ifp->if_softc;
6818 u_int cpuid = cpu_index(curcpu());
6819
6820 /*
6821 * Currently, simple distribute strategy.
6822 * TODO:
6823 * destribute by flowid(RSS has value).
6824 */
6825 return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
6826 }
6827
6828 static int
6829 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
6830 {
6831 int qid;
6832 struct wm_softc *sc = ifp->if_softc;
6833 struct wm_txqueue *txq;
6834
6835 qid = wm_nq_select_txqueue(ifp, m);
6836 txq = &sc->sc_queue[qid].wmq_txq;
6837
6838 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
6839 m_freem(m);
6840 WM_Q_EVCNT_INCR(txq, txdrop);
6841 return ENOBUFS;
6842 }
6843
6844 if (mutex_tryenter(txq->txq_lock)) {
6845 /* XXXX should be per TX queue */
6846 ifp->if_obytes += m->m_pkthdr.len;
6847 if (m->m_flags & M_MCAST)
6848 ifp->if_omcasts++;
6849
6850 if (!txq->txq_stopping)
6851 wm_nq_transmit_locked(ifp, txq);
6852 mutex_exit(txq->txq_lock);
6853 }
6854
6855 return 0;
6856 }
6857
6858 static void
6859 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
6860 {
6861
6862 wm_nq_send_common_locked(ifp, txq, true);
6863 }
6864
6865 static void
6866 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
6867 bool is_transmit)
6868 {
6869 struct wm_softc *sc = ifp->if_softc;
6870 struct mbuf *m0;
6871 struct m_tag *mtag;
6872 struct wm_txsoft *txs;
6873 bus_dmamap_t dmamap;
6874 int error, nexttx, lasttx = -1, seg, segs_needed;
6875 bool do_csum, sent;
6876
6877 KASSERT(mutex_owned(txq->txq_lock));
6878
6879 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6880 return;
6881 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
6882 return;
6883
6884 sent = false;
6885
6886 /*
6887 * Loop through the send queue, setting up transmit descriptors
6888 * until we drain the queue, or use up all available transmit
6889 * descriptors.
6890 */
6891 for (;;) {
6892 m0 = NULL;
6893
6894 /* Get a work queue entry. */
6895 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6896 wm_txeof(sc, txq);
6897 if (txq->txq_sfree == 0) {
6898 DPRINTF(WM_DEBUG_TX,
6899 ("%s: TX: no free job descriptors\n",
6900 device_xname(sc->sc_dev)));
6901 WM_Q_EVCNT_INCR(txq, txsstall);
6902 break;
6903 }
6904 }
6905
6906 /* Grab a packet off the queue. */
6907 if (is_transmit)
6908 m0 = pcq_get(txq->txq_interq);
6909 else
6910 IFQ_DEQUEUE(&ifp->if_snd, m0);
6911 if (m0 == NULL)
6912 break;
6913
6914 DPRINTF(WM_DEBUG_TX,
6915 ("%s: TX: have packet to transmit: %p\n",
6916 device_xname(sc->sc_dev), m0));
6917
6918 txs = &txq->txq_soft[txq->txq_snext];
6919 dmamap = txs->txs_dmamap;
6920
6921 /*
6922 * Load the DMA map. If this fails, the packet either
6923 * didn't fit in the allotted number of segments, or we
6924 * were short on resources. For the too-many-segments
6925 * case, we simply report an error and drop the packet,
6926 * since we can't sanely copy a jumbo packet to a single
6927 * buffer.
6928 */
6929 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6930 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
6931 if (error) {
6932 if (error == EFBIG) {
6933 WM_Q_EVCNT_INCR(txq, txdrop);
6934 log(LOG_ERR, "%s: Tx packet consumes too many "
6935 "DMA segments, dropping...\n",
6936 device_xname(sc->sc_dev));
6937 wm_dump_mbuf_chain(sc, m0);
6938 m_freem(m0);
6939 continue;
6940 }
6941 /* Short on resources, just stop for now. */
6942 DPRINTF(WM_DEBUG_TX,
6943 ("%s: TX: dmamap load failed: %d\n",
6944 device_xname(sc->sc_dev), error));
6945 break;
6946 }
6947
6948 segs_needed = dmamap->dm_nsegs;
6949
6950 /*
6951 * Ensure we have enough descriptors free to describe
6952 * the packet. Note, we always reserve one descriptor
6953 * at the end of the ring due to the semantics of the
6954 * TDT register, plus one more in the event we need
6955 * to load offload context.
6956 */
6957 if (segs_needed > txq->txq_free - 2) {
6958 /*
6959 * Not enough free descriptors to transmit this
6960 * packet. We haven't committed anything yet,
6961 * so just unload the DMA map, put the packet
6962 * pack on the queue, and punt. Notify the upper
6963 * layer that there are no more slots left.
6964 */
6965 DPRINTF(WM_DEBUG_TX,
6966 ("%s: TX: need %d (%d) descriptors, have %d\n",
6967 device_xname(sc->sc_dev), dmamap->dm_nsegs,
6968 segs_needed, txq->txq_free - 1));
6969 txq->txq_flags |= WM_TXQ_NO_SPACE;
6970 bus_dmamap_unload(sc->sc_dmat, dmamap);
6971 WM_Q_EVCNT_INCR(txq, txdstall);
6972 break;
6973 }
6974
6975 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6976
6977 DPRINTF(WM_DEBUG_TX,
6978 ("%s: TX: packet has %d (%d) DMA segments\n",
6979 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6980
6981 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
6982
6983 /*
6984 * Store a pointer to the packet so that we can free it
6985 * later.
6986 *
6987 * Initially, we consider the number of descriptors the
6988 * packet uses the number of DMA segments. This may be
6989 * incremented by 1 if we do checksum offload (a descriptor
6990 * is used to set the checksum context).
6991 */
6992 txs->txs_mbuf = m0;
6993 txs->txs_firstdesc = txq->txq_next;
6994 txs->txs_ndesc = segs_needed;
6995
6996 /* Set up offload parameters for this packet. */
6997 uint32_t cmdlen, fields, dcmdlen;
6998 if (m0->m_pkthdr.csum_flags &
6999 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
7000 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7001 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
7002 if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
7003 &do_csum) != 0) {
7004 /* Error message already displayed. */
7005 bus_dmamap_unload(sc->sc_dmat, dmamap);
7006 continue;
7007 }
7008 } else {
7009 do_csum = false;
7010 cmdlen = 0;
7011 fields = 0;
7012 }
7013
7014 /* Sync the DMA map. */
7015 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
7016 BUS_DMASYNC_PREWRITE);
7017
7018 /* Initialize the first transmit descriptor. */
7019 nexttx = txq->txq_next;
7020 if (!do_csum) {
7021 /* setup a legacy descriptor */
7022 wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
7023 dmamap->dm_segs[0].ds_addr);
7024 txq->txq_descs[nexttx].wtx_cmdlen =
7025 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
7026 txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
7027 txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
7028 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
7029 NULL) {
7030 txq->txq_descs[nexttx].wtx_cmdlen |=
7031 htole32(WTX_CMD_VLE);
7032 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
7033 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
7034 } else {
7035 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
7036 }
7037 dcmdlen = 0;
7038 } else {
7039 /* setup an advanced data descriptor */
7040 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
7041 htole64(dmamap->dm_segs[0].ds_addr);
7042 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
7043 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
7044 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
7045 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
7046 htole32(fields);
7047 DPRINTF(WM_DEBUG_TX,
7048 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
7049 device_xname(sc->sc_dev), nexttx,
7050 (uint64_t)dmamap->dm_segs[0].ds_addr));
7051 DPRINTF(WM_DEBUG_TX,
7052 ("\t 0x%08x%08x\n", fields,
7053 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
7054 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
7055 }
7056
7057 lasttx = nexttx;
7058 nexttx = WM_NEXTTX(txq, nexttx);
7059 /*
7060 * fill in the next descriptors. legacy or adcanced format
7061 * is the same here
7062 */
7063 for (seg = 1; seg < dmamap->dm_nsegs;
7064 seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
7065 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
7066 htole64(dmamap->dm_segs[seg].ds_addr);
7067 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
7068 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
7069 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
7070 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
7071 lasttx = nexttx;
7072
7073 DPRINTF(WM_DEBUG_TX,
7074 ("%s: TX: desc %d: %#" PRIx64 ", "
7075 "len %#04zx\n",
7076 device_xname(sc->sc_dev), nexttx,
7077 (uint64_t)dmamap->dm_segs[seg].ds_addr,
7078 dmamap->dm_segs[seg].ds_len));
7079 }
7080
7081 KASSERT(lasttx != -1);
7082
7083 /*
7084 * Set up the command byte on the last descriptor of
7085 * the packet. If we're in the interrupt delay window,
7086 * delay the interrupt.
7087 */
7088 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
7089 (NQTX_CMD_EOP | NQTX_CMD_RS));
7090 txq->txq_descs[lasttx].wtx_cmdlen |=
7091 htole32(WTX_CMD_EOP | WTX_CMD_RS);
7092
7093 txs->txs_lastdesc = lasttx;
7094
7095 DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
7096 device_xname(sc->sc_dev),
7097 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
7098
7099 /* Sync the descriptors we're using. */
7100 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
7101 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7102
7103 /* Give the packet to the chip. */
7104 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
7105 sent = true;
7106
7107 DPRINTF(WM_DEBUG_TX,
7108 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
7109
7110 DPRINTF(WM_DEBUG_TX,
7111 ("%s: TX: finished transmitting packet, job %d\n",
7112 device_xname(sc->sc_dev), txq->txq_snext));
7113
7114 /* Advance the tx pointer. */
7115 txq->txq_free -= txs->txs_ndesc;
7116 txq->txq_next = nexttx;
7117
7118 txq->txq_sfree--;
7119 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
7120
7121 /* Pass the packet to any BPF listeners. */
7122 bpf_mtap(ifp, m0);
7123 }
7124
7125 if (m0 != NULL) {
7126 txq->txq_flags |= WM_TXQ_NO_SPACE;
7127 WM_Q_EVCNT_INCR(txq, txdrop);
7128 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
7129 __func__));
7130 m_freem(m0);
7131 }
7132
7133 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
7134 /* No more slots; notify upper layer. */
7135 txq->txq_flags |= WM_TXQ_NO_SPACE;
7136 }
7137
7138 if (sent) {
7139 /* Set a watchdog timer in case the chip flakes out. */
7140 ifp->if_timer = 5;
7141 }
7142 }
7143
7144 /* Interrupt */
7145
7146 /*
7147 * wm_txeof:
7148 *
7149 * Helper; handle transmit interrupts.
7150 */
7151 static int
7152 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
7153 {
7154 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7155 struct wm_txsoft *txs;
7156 bool processed = false;
7157 int count = 0;
7158 int i;
7159 uint8_t status;
7160
7161 KASSERT(mutex_owned(txq->txq_lock));
7162
7163 if (txq->txq_stopping)
7164 return 0;
7165
7166 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7167 txq->txq_flags &= ~WM_TXQ_NO_SPACE;
7168 else
7169 ifp->if_flags &= ~IFF_OACTIVE;
7170
7171 /*
7172 * Go through the Tx list and free mbufs for those
7173 * frames which have been transmitted.
7174 */
7175 for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
7176 i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
7177 txs = &txq->txq_soft[i];
7178
7179 DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
7180 device_xname(sc->sc_dev), i));
7181
7182 wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
7183 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
7184
7185 status =
7186 txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
7187 if ((status & WTX_ST_DD) == 0) {
7188 wm_cdtxsync(txq, txs->txs_lastdesc, 1,
7189 BUS_DMASYNC_PREREAD);
7190 break;
7191 }
7192
7193 processed = true;
7194 count++;
7195 DPRINTF(WM_DEBUG_TX,
7196 ("%s: TX: job %d done: descs %d..%d\n",
7197 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
7198 txs->txs_lastdesc));
7199
7200 /*
7201 * XXX We should probably be using the statistics
7202 * XXX registers, but I don't know if they exist
7203 * XXX on chips before the i82544.
7204 */
7205
7206 #ifdef WM_EVENT_COUNTERS
7207 if (status & WTX_ST_TU)
7208 WM_Q_EVCNT_INCR(txq, tu);
7209 #endif /* WM_EVENT_COUNTERS */
7210
7211 if (status & (WTX_ST_EC | WTX_ST_LC)) {
7212 ifp->if_oerrors++;
7213 if (status & WTX_ST_LC)
7214 log(LOG_WARNING, "%s: late collision\n",
7215 device_xname(sc->sc_dev));
7216 else if (status & WTX_ST_EC) {
7217 ifp->if_collisions += 16;
7218 log(LOG_WARNING, "%s: excessive collisions\n",
7219 device_xname(sc->sc_dev));
7220 }
7221 } else
7222 ifp->if_opackets++;
7223
7224 txq->txq_free += txs->txs_ndesc;
7225 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
7226 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
7227 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
7228 m_freem(txs->txs_mbuf);
7229 txs->txs_mbuf = NULL;
7230 }
7231
7232 /* Update the dirty transmit buffer pointer. */
7233 txq->txq_sdirty = i;
7234 DPRINTF(WM_DEBUG_TX,
7235 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
7236
7237 if (count != 0)
7238 rnd_add_uint32(&sc->rnd_source, count);
7239
7240 /*
7241 * If there are no more pending transmissions, cancel the watchdog
7242 * timer.
7243 */
7244 if (txq->txq_sfree == WM_TXQUEUELEN(txq))
7245 ifp->if_timer = 0;
7246
7247 return processed;
7248 }
7249
7250 /*
7251 * wm_rxeof:
7252 *
7253 * Helper; handle receive interrupts.
7254 */
7255 static void
7256 wm_rxeof(struct wm_rxqueue *rxq)
7257 {
7258 struct wm_softc *sc = rxq->rxq_sc;
7259 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7260 struct wm_rxsoft *rxs;
7261 struct mbuf *m;
7262 int i, len;
7263 int count = 0;
7264 uint8_t status, errors;
7265 uint16_t vlantag;
7266
7267 KASSERT(mutex_owned(rxq->rxq_lock));
7268
7269 for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
7270 rxs = &rxq->rxq_soft[i];
7271
7272 DPRINTF(WM_DEBUG_RX,
7273 ("%s: RX: checking descriptor %d\n",
7274 device_xname(sc->sc_dev), i));
7275
7276 wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
7277
7278 status = rxq->rxq_descs[i].wrx_status;
7279 errors = rxq->rxq_descs[i].wrx_errors;
7280 len = le16toh(rxq->rxq_descs[i].wrx_len);
7281 vlantag = rxq->rxq_descs[i].wrx_special;
7282
7283 if ((status & WRX_ST_DD) == 0) {
7284 /* We have processed all of the receive descriptors. */
7285 wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
7286 break;
7287 }
7288
7289 count++;
7290 if (__predict_false(rxq->rxq_discard)) {
7291 DPRINTF(WM_DEBUG_RX,
7292 ("%s: RX: discarding contents of descriptor %d\n",
7293 device_xname(sc->sc_dev), i));
7294 wm_init_rxdesc(rxq, i);
7295 if (status & WRX_ST_EOP) {
7296 /* Reset our state. */
7297 DPRINTF(WM_DEBUG_RX,
7298 ("%s: RX: resetting rxdiscard -> 0\n",
7299 device_xname(sc->sc_dev)));
7300 rxq->rxq_discard = 0;
7301 }
7302 continue;
7303 }
7304
7305 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
7306 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
7307
7308 m = rxs->rxs_mbuf;
7309
7310 /*
7311 * Add a new receive buffer to the ring, unless of
7312 * course the length is zero. Treat the latter as a
7313 * failed mapping.
7314 */
7315 if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
7316 /*
7317 * Failed, throw away what we've done so
7318 * far, and discard the rest of the packet.
7319 */
7320 ifp->if_ierrors++;
7321 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
7322 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
7323 wm_init_rxdesc(rxq, i);
7324 if ((status & WRX_ST_EOP) == 0)
7325 rxq->rxq_discard = 1;
7326 if (rxq->rxq_head != NULL)
7327 m_freem(rxq->rxq_head);
7328 WM_RXCHAIN_RESET(rxq);
7329 DPRINTF(WM_DEBUG_RX,
7330 ("%s: RX: Rx buffer allocation failed, "
7331 "dropping packet%s\n", device_xname(sc->sc_dev),
7332 rxq->rxq_discard ? " (discard)" : ""));
7333 continue;
7334 }
7335
7336 m->m_len = len;
7337 rxq->rxq_len += len;
7338 DPRINTF(WM_DEBUG_RX,
7339 ("%s: RX: buffer at %p len %d\n",
7340 device_xname(sc->sc_dev), m->m_data, len));
7341
7342 /* If this is not the end of the packet, keep looking. */
7343 if ((status & WRX_ST_EOP) == 0) {
7344 WM_RXCHAIN_LINK(rxq, m);
7345 DPRINTF(WM_DEBUG_RX,
7346 ("%s: RX: not yet EOP, rxlen -> %d\n",
7347 device_xname(sc->sc_dev), rxq->rxq_len));
7348 continue;
7349 }
7350
7351 /*
7352 * Okay, we have the entire packet now. The chip is
7353 * configured to include the FCS except I350 and I21[01]
7354 * (not all chips can be configured to strip it),
7355 * so we need to trim it.
7356 * May need to adjust length of previous mbuf in the
7357 * chain if the current mbuf is too short.
7358 * For an eratta, the RCTL_SECRC bit in RCTL register
7359 * is always set in I350, so we don't trim it.
7360 */
7361 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
7362 && (sc->sc_type != WM_T_I210)
7363 && (sc->sc_type != WM_T_I211)) {
7364 if (m->m_len < ETHER_CRC_LEN) {
7365 rxq->rxq_tail->m_len
7366 -= (ETHER_CRC_LEN - m->m_len);
7367 m->m_len = 0;
7368 } else
7369 m->m_len -= ETHER_CRC_LEN;
7370 len = rxq->rxq_len - ETHER_CRC_LEN;
7371 } else
7372 len = rxq->rxq_len;
7373
7374 WM_RXCHAIN_LINK(rxq, m);
7375
7376 *rxq->rxq_tailp = NULL;
7377 m = rxq->rxq_head;
7378
7379 WM_RXCHAIN_RESET(rxq);
7380
7381 DPRINTF(WM_DEBUG_RX,
7382 ("%s: RX: have entire packet, len -> %d\n",
7383 device_xname(sc->sc_dev), len));
7384
7385 /* If an error occurred, update stats and drop the packet. */
7386 if (errors &
7387 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
7388 if (errors & WRX_ER_SE)
7389 log(LOG_WARNING, "%s: symbol error\n",
7390 device_xname(sc->sc_dev));
7391 else if (errors & WRX_ER_SEQ)
7392 log(LOG_WARNING, "%s: receive sequence error\n",
7393 device_xname(sc->sc_dev));
7394 else if (errors & WRX_ER_CE)
7395 log(LOG_WARNING, "%s: CRC error\n",
7396 device_xname(sc->sc_dev));
7397 m_freem(m);
7398 continue;
7399 }
7400
7401 /* No errors. Receive the packet. */
7402 m_set_rcvif(m, ifp);
7403 m->m_pkthdr.len = len;
7404
7405 /*
7406 * If VLANs are enabled, VLAN packets have been unwrapped
7407 * for us. Associate the tag with the packet.
7408 */
7409 /* XXXX should check for i350 and i354 */
7410 if ((status & WRX_ST_VP) != 0) {
7411 VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
7412 }
7413
7414 /* Set up checksum info for this packet. */
7415 if ((status & WRX_ST_IXSM) == 0) {
7416 if (status & WRX_ST_IPCS) {
7417 WM_Q_EVCNT_INCR(rxq, rxipsum);
7418 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
7419 if (errors & WRX_ER_IPE)
7420 m->m_pkthdr.csum_flags |=
7421 M_CSUM_IPv4_BAD;
7422 }
7423 if (status & WRX_ST_TCPCS) {
7424 /*
7425 * Note: we don't know if this was TCP or UDP,
7426 * so we just set both bits, and expect the
7427 * upper layers to deal.
7428 */
7429 WM_Q_EVCNT_INCR(rxq, rxtusum);
7430 m->m_pkthdr.csum_flags |=
7431 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7432 M_CSUM_TCPv6 | M_CSUM_UDPv6;
7433 if (errors & WRX_ER_TCPE)
7434 m->m_pkthdr.csum_flags |=
7435 M_CSUM_TCP_UDP_BAD;
7436 }
7437 }
7438
7439 ifp->if_ipackets++;
7440
7441 mutex_exit(rxq->rxq_lock);
7442
7443 /* Pass this up to any BPF listeners. */
7444 bpf_mtap(ifp, m);
7445
7446 /* Pass it on. */
7447 if_percpuq_enqueue(sc->sc_ipq, m);
7448
7449 mutex_enter(rxq->rxq_lock);
7450
7451 if (rxq->rxq_stopping)
7452 break;
7453 }
7454
7455 /* Update the receive pointer. */
7456 rxq->rxq_ptr = i;
7457 if (count != 0)
7458 rnd_add_uint32(&sc->rnd_source, count);
7459
7460 DPRINTF(WM_DEBUG_RX,
7461 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
7462 }
7463
7464 /*
7465 * wm_linkintr_gmii:
7466 *
7467 * Helper; handle link interrupts for GMII.
7468 */
7469 static void
7470 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
7471 {
7472
7473 KASSERT(WM_CORE_LOCKED(sc));
7474
7475 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7476 __func__));
7477
7478 if (icr & ICR_LSC) {
7479 uint32_t status = CSR_READ(sc, WMREG_STATUS);
7480
7481 if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
7482 wm_gig_downshift_workaround_ich8lan(sc);
7483
7484 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
7485 device_xname(sc->sc_dev)));
7486 mii_pollstat(&sc->sc_mii);
7487 if (sc->sc_type == WM_T_82543) {
7488 int miistatus, active;
7489
7490 /*
7491 * With 82543, we need to force speed and
7492 * duplex on the MAC equal to what the PHY
7493 * speed and duplex configuration is.
7494 */
7495 miistatus = sc->sc_mii.mii_media_status;
7496
7497 if (miistatus & IFM_ACTIVE) {
7498 active = sc->sc_mii.mii_media_active;
7499 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
7500 switch (IFM_SUBTYPE(active)) {
7501 case IFM_10_T:
7502 sc->sc_ctrl |= CTRL_SPEED_10;
7503 break;
7504 case IFM_100_TX:
7505 sc->sc_ctrl |= CTRL_SPEED_100;
7506 break;
7507 case IFM_1000_T:
7508 sc->sc_ctrl |= CTRL_SPEED_1000;
7509 break;
7510 default:
7511 /*
7512 * fiber?
7513 * Shoud not enter here.
7514 */
7515 printf("unknown media (%x)\n", active);
7516 break;
7517 }
7518 if (active & IFM_FDX)
7519 sc->sc_ctrl |= CTRL_FD;
7520 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7521 }
7522 } else if ((sc->sc_type == WM_T_ICH8)
7523 && (sc->sc_phytype == WMPHY_IGP_3)) {
7524 wm_kmrn_lock_loss_workaround_ich8lan(sc);
7525 } else if (sc->sc_type == WM_T_PCH) {
7526 wm_k1_gig_workaround_hv(sc,
7527 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
7528 }
7529
7530 if ((sc->sc_phytype == WMPHY_82578)
7531 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
7532 == IFM_1000_T)) {
7533
7534 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
7535 delay(200*1000); /* XXX too big */
7536
7537 /* Link stall fix for link up */
7538 wm_gmii_hv_writereg(sc->sc_dev, 1,
7539 HV_MUX_DATA_CTRL,
7540 HV_MUX_DATA_CTRL_GEN_TO_MAC
7541 | HV_MUX_DATA_CTRL_FORCE_SPEED);
7542 wm_gmii_hv_writereg(sc->sc_dev, 1,
7543 HV_MUX_DATA_CTRL,
7544 HV_MUX_DATA_CTRL_GEN_TO_MAC);
7545 }
7546 }
7547 } else if (icr & ICR_RXSEQ) {
7548 DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
7549 device_xname(sc->sc_dev)));
7550 }
7551 }
7552
7553 /*
7554 * wm_linkintr_tbi:
7555 *
7556 * Helper; handle link interrupts for TBI mode.
7557 */
7558 static void
7559 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
7560 {
7561 uint32_t status;
7562
7563 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7564 __func__));
7565
7566 status = CSR_READ(sc, WMREG_STATUS);
7567 if (icr & ICR_LSC) {
7568 if (status & STATUS_LU) {
7569 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
7570 device_xname(sc->sc_dev),
7571 (status & STATUS_FD) ? "FDX" : "HDX"));
7572 /*
7573 * NOTE: CTRL will update TFCE and RFCE automatically,
7574 * so we should update sc->sc_ctrl
7575 */
7576
7577 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7578 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7579 sc->sc_fcrtl &= ~FCRTL_XONE;
7580 if (status & STATUS_FD)
7581 sc->sc_tctl |=
7582 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7583 else
7584 sc->sc_tctl |=
7585 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7586 if (sc->sc_ctrl & CTRL_TFCE)
7587 sc->sc_fcrtl |= FCRTL_XONE;
7588 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7589 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7590 WMREG_OLD_FCRTL : WMREG_FCRTL,
7591 sc->sc_fcrtl);
7592 sc->sc_tbi_linkup = 1;
7593 } else {
7594 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
7595 device_xname(sc->sc_dev)));
7596 sc->sc_tbi_linkup = 0;
7597 }
7598 /* Update LED */
7599 wm_tbi_serdes_set_linkled(sc);
7600 } else if (icr & ICR_RXSEQ) {
7601 DPRINTF(WM_DEBUG_LINK,
7602 ("%s: LINK: Receive sequence error\n",
7603 device_xname(sc->sc_dev)));
7604 }
7605 }
7606
7607 /*
7608 * wm_linkintr_serdes:
7609 *
7610 * Helper; handle link interrupts for TBI mode.
7611 */
7612 static void
7613 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
7614 {
7615 struct mii_data *mii = &sc->sc_mii;
7616 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7617 uint32_t pcs_adv, pcs_lpab, reg;
7618
7619 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7620 __func__));
7621
7622 if (icr & ICR_LSC) {
7623 /* Check PCS */
7624 reg = CSR_READ(sc, WMREG_PCS_LSTS);
7625 if ((reg & PCS_LSTS_LINKOK) != 0) {
7626 mii->mii_media_status |= IFM_ACTIVE;
7627 sc->sc_tbi_linkup = 1;
7628 } else {
7629 mii->mii_media_status |= IFM_NONE;
7630 sc->sc_tbi_linkup = 0;
7631 wm_tbi_serdes_set_linkled(sc);
7632 return;
7633 }
7634 mii->mii_media_active |= IFM_1000_SX;
7635 if ((reg & PCS_LSTS_FDX) != 0)
7636 mii->mii_media_active |= IFM_FDX;
7637 else
7638 mii->mii_media_active |= IFM_HDX;
7639 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7640 /* Check flow */
7641 reg = CSR_READ(sc, WMREG_PCS_LSTS);
7642 if ((reg & PCS_LSTS_AN_COMP) == 0) {
7643 DPRINTF(WM_DEBUG_LINK,
7644 ("XXX LINKOK but not ACOMP\n"));
7645 return;
7646 }
7647 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
7648 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
7649 DPRINTF(WM_DEBUG_LINK,
7650 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
7651 if ((pcs_adv & TXCW_SYM_PAUSE)
7652 && (pcs_lpab & TXCW_SYM_PAUSE)) {
7653 mii->mii_media_active |= IFM_FLOW
7654 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
7655 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
7656 && (pcs_adv & TXCW_ASYM_PAUSE)
7657 && (pcs_lpab & TXCW_SYM_PAUSE)
7658 && (pcs_lpab & TXCW_ASYM_PAUSE))
7659 mii->mii_media_active |= IFM_FLOW
7660 | IFM_ETH_TXPAUSE;
7661 else if ((pcs_adv & TXCW_SYM_PAUSE)
7662 && (pcs_adv & TXCW_ASYM_PAUSE)
7663 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
7664 && (pcs_lpab & TXCW_ASYM_PAUSE))
7665 mii->mii_media_active |= IFM_FLOW
7666 | IFM_ETH_RXPAUSE;
7667 }
7668 /* Update LED */
7669 wm_tbi_serdes_set_linkled(sc);
7670 } else {
7671 DPRINTF(WM_DEBUG_LINK,
7672 ("%s: LINK: Receive sequence error\n",
7673 device_xname(sc->sc_dev)));
7674 }
7675 }
7676
7677 /*
7678 * wm_linkintr:
7679 *
7680 * Helper; handle link interrupts.
7681 */
7682 static void
7683 wm_linkintr(struct wm_softc *sc, uint32_t icr)
7684 {
7685
7686 KASSERT(WM_CORE_LOCKED(sc));
7687
7688 if (sc->sc_flags & WM_F_HAS_MII)
7689 wm_linkintr_gmii(sc, icr);
7690 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7691 && (sc->sc_type >= WM_T_82575))
7692 wm_linkintr_serdes(sc, icr);
7693 else
7694 wm_linkintr_tbi(sc, icr);
7695 }
7696
7697 /*
7698 * wm_intr_legacy:
7699 *
7700 * Interrupt service routine for INTx and MSI.
7701 */
7702 static int
7703 wm_intr_legacy(void *arg)
7704 {
7705 struct wm_softc *sc = arg;
7706 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7707 struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
7708 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7709 uint32_t icr, rndval = 0;
7710 int handled = 0;
7711
7712 DPRINTF(WM_DEBUG_TX,
7713 ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
7714 while (1 /* CONSTCOND */) {
7715 icr = CSR_READ(sc, WMREG_ICR);
7716 if ((icr & sc->sc_icr) == 0)
7717 break;
7718 if (rndval == 0)
7719 rndval = icr;
7720
7721 mutex_enter(rxq->rxq_lock);
7722
7723 if (rxq->rxq_stopping) {
7724 mutex_exit(rxq->rxq_lock);
7725 break;
7726 }
7727
7728 handled = 1;
7729
7730 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7731 if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
7732 DPRINTF(WM_DEBUG_RX,
7733 ("%s: RX: got Rx intr 0x%08x\n",
7734 device_xname(sc->sc_dev),
7735 icr & (ICR_RXDMT0 | ICR_RXT0)));
7736 WM_Q_EVCNT_INCR(rxq, rxintr);
7737 }
7738 #endif
7739 wm_rxeof(rxq);
7740
7741 mutex_exit(rxq->rxq_lock);
7742 mutex_enter(txq->txq_lock);
7743
7744 if (txq->txq_stopping) {
7745 mutex_exit(txq->txq_lock);
7746 break;
7747 }
7748
7749 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7750 if (icr & ICR_TXDW) {
7751 DPRINTF(WM_DEBUG_TX,
7752 ("%s: TX: got TXDW interrupt\n",
7753 device_xname(sc->sc_dev)));
7754 WM_Q_EVCNT_INCR(txq, txdw);
7755 }
7756 #endif
7757 wm_txeof(sc, txq);
7758
7759 mutex_exit(txq->txq_lock);
7760 WM_CORE_LOCK(sc);
7761
7762 if (sc->sc_core_stopping) {
7763 WM_CORE_UNLOCK(sc);
7764 break;
7765 }
7766
7767 if (icr & (ICR_LSC | ICR_RXSEQ)) {
7768 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7769 wm_linkintr(sc, icr);
7770 }
7771
7772 WM_CORE_UNLOCK(sc);
7773
7774 if (icr & ICR_RXO) {
7775 #if defined(WM_DEBUG)
7776 log(LOG_WARNING, "%s: Receive overrun\n",
7777 device_xname(sc->sc_dev));
7778 #endif /* defined(WM_DEBUG) */
7779 }
7780 }
7781
7782 rnd_add_uint32(&sc->rnd_source, rndval);
7783
7784 if (handled) {
7785 /* Try to get more packets going. */
7786 ifp->if_start(ifp);
7787 }
7788
7789 return handled;
7790 }
7791
7792 static int
7793 wm_txrxintr_msix(void *arg)
7794 {
7795 struct wm_queue *wmq = arg;
7796 struct wm_txqueue *txq = &wmq->wmq_txq;
7797 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
7798 struct wm_softc *sc = txq->txq_sc;
7799 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7800
7801 KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
7802
7803 DPRINTF(WM_DEBUG_TX,
7804 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
7805
7806 if (sc->sc_type == WM_T_82574)
7807 CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
7808 else if (sc->sc_type == WM_T_82575)
7809 CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
7810 else
7811 CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
7812
7813 mutex_enter(txq->txq_lock);
7814
7815 if (txq->txq_stopping) {
7816 mutex_exit(txq->txq_lock);
7817 return 0;
7818 }
7819
7820 WM_Q_EVCNT_INCR(txq, txdw);
7821 wm_txeof(sc, txq);
7822
7823 /* Try to get more packets going. */
7824 if (pcq_peek(txq->txq_interq) != NULL)
7825 wm_nq_transmit_locked(ifp, txq);
7826 /*
7827 * There are still some upper layer processing which call
7828 * ifp->if_start(). e.g. ALTQ
7829 */
7830 if (wmq->wmq_id == 0) {
7831 if (!IFQ_IS_EMPTY(&ifp->if_snd))
7832 wm_nq_start_locked(ifp);
7833 }
7834
7835 mutex_exit(txq->txq_lock);
7836
7837 DPRINTF(WM_DEBUG_RX,
7838 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
7839 mutex_enter(rxq->rxq_lock);
7840
7841 if (rxq->rxq_stopping) {
7842 mutex_exit(rxq->rxq_lock);
7843 return 0;
7844 }
7845
7846 WM_Q_EVCNT_INCR(rxq, rxintr);
7847 wm_rxeof(rxq);
7848 mutex_exit(rxq->rxq_lock);
7849
7850 if (sc->sc_type == WM_T_82574)
7851 CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
7852 else if (sc->sc_type == WM_T_82575)
7853 CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
7854 else
7855 CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
7856
7857 return 1;
7858 }
7859
7860 /*
7861 * wm_linkintr_msix:
7862 *
7863 * Interrupt service routine for link status change for MSI-X.
7864 */
7865 static int
7866 wm_linkintr_msix(void *arg)
7867 {
7868 struct wm_softc *sc = arg;
7869 uint32_t reg;
7870
7871 DPRINTF(WM_DEBUG_LINK,
7872 ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
7873
7874 reg = CSR_READ(sc, WMREG_ICR);
7875 WM_CORE_LOCK(sc);
7876 if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
7877 goto out;
7878
7879 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7880 wm_linkintr(sc, ICR_LSC);
7881
7882 out:
7883 WM_CORE_UNLOCK(sc);
7884
7885 if (sc->sc_type == WM_T_82574)
7886 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
7887 else if (sc->sc_type == WM_T_82575)
7888 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
7889 else
7890 CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
7891
7892 return 1;
7893 }
7894
7895 /*
7896 * Media related.
7897 * GMII, SGMII, TBI (and SERDES)
7898 */
7899
7900 /* Common */
7901
7902 /*
7903 * wm_tbi_serdes_set_linkled:
7904 *
7905 * Update the link LED on TBI and SERDES devices.
7906 */
7907 static void
7908 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
7909 {
7910
7911 if (sc->sc_tbi_linkup)
7912 sc->sc_ctrl |= CTRL_SWDPIN(0);
7913 else
7914 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7915
7916 /* 82540 or newer devices are active low */
7917 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7918
7919 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7920 }
7921
7922 /* GMII related */
7923
7924 /*
7925 * wm_gmii_reset:
7926 *
7927 * Reset the PHY.
7928 */
7929 static void
7930 wm_gmii_reset(struct wm_softc *sc)
7931 {
7932 uint32_t reg;
7933 int rv;
7934
7935 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
7936 device_xname(sc->sc_dev), __func__));
7937
7938 rv = sc->phy.acquire(sc);
7939 if (rv != 0) {
7940 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7941 __func__);
7942 return;
7943 }
7944
7945 switch (sc->sc_type) {
7946 case WM_T_82542_2_0:
7947 case WM_T_82542_2_1:
7948 /* null */
7949 break;
7950 case WM_T_82543:
7951 /*
7952 * With 82543, we need to force speed and duplex on the MAC
7953 * equal to what the PHY speed and duplex configuration is.
7954 * In addition, we need to perform a hardware reset on the PHY
7955 * to take it out of reset.
7956 */
7957 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
7958 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7959
7960 /* The PHY reset pin is active-low. */
7961 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7962 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
7963 CTRL_EXT_SWDPIN(4));
7964 reg |= CTRL_EXT_SWDPIO(4);
7965
7966 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7967 CSR_WRITE_FLUSH(sc);
7968 delay(10*1000);
7969
7970 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
7971 CSR_WRITE_FLUSH(sc);
7972 delay(150);
7973 #if 0
7974 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
7975 #endif
7976 delay(20*1000); /* XXX extra delay to get PHY ID? */
7977 break;
7978 case WM_T_82544: /* reset 10000us */
7979 case WM_T_82540:
7980 case WM_T_82545:
7981 case WM_T_82545_3:
7982 case WM_T_82546:
7983 case WM_T_82546_3:
7984 case WM_T_82541:
7985 case WM_T_82541_2:
7986 case WM_T_82547:
7987 case WM_T_82547_2:
7988 case WM_T_82571: /* reset 100us */
7989 case WM_T_82572:
7990 case WM_T_82573:
7991 case WM_T_82574:
7992 case WM_T_82575:
7993 case WM_T_82576:
7994 case WM_T_82580:
7995 case WM_T_I350:
7996 case WM_T_I354:
7997 case WM_T_I210:
7998 case WM_T_I211:
7999 case WM_T_82583:
8000 case WM_T_80003:
8001 /* generic reset */
8002 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8003 CSR_WRITE_FLUSH(sc);
8004 delay(20000);
8005 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8006 CSR_WRITE_FLUSH(sc);
8007 delay(20000);
8008
8009 if ((sc->sc_type == WM_T_82541)
8010 || (sc->sc_type == WM_T_82541_2)
8011 || (sc->sc_type == WM_T_82547)
8012 || (sc->sc_type == WM_T_82547_2)) {
8013 /* workaround for igp are done in igp_reset() */
8014 /* XXX add code to set LED after phy reset */
8015 }
8016 break;
8017 case WM_T_ICH8:
8018 case WM_T_ICH9:
8019 case WM_T_ICH10:
8020 case WM_T_PCH:
8021 case WM_T_PCH2:
8022 case WM_T_PCH_LPT:
8023 case WM_T_PCH_SPT:
8024 /* generic reset */
8025 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8026 CSR_WRITE_FLUSH(sc);
8027 delay(100);
8028 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8029 CSR_WRITE_FLUSH(sc);
8030 delay(150);
8031 break;
8032 default:
8033 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
8034 __func__);
8035 break;
8036 }
8037
8038 sc->phy.release(sc);
8039
8040 /* get_cfg_done */
8041 wm_get_cfg_done(sc);
8042
8043 /* extra setup */
8044 switch (sc->sc_type) {
8045 case WM_T_82542_2_0:
8046 case WM_T_82542_2_1:
8047 case WM_T_82543:
8048 case WM_T_82544:
8049 case WM_T_82540:
8050 case WM_T_82545:
8051 case WM_T_82545_3:
8052 case WM_T_82546:
8053 case WM_T_82546_3:
8054 case WM_T_82541_2:
8055 case WM_T_82547_2:
8056 case WM_T_82571:
8057 case WM_T_82572:
8058 case WM_T_82573:
8059 case WM_T_82575:
8060 case WM_T_82576:
8061 case WM_T_82580:
8062 case WM_T_I350:
8063 case WM_T_I354:
8064 case WM_T_I210:
8065 case WM_T_I211:
8066 case WM_T_80003:
8067 /* null */
8068 break;
8069 case WM_T_82574:
8070 case WM_T_82583:
8071 wm_lplu_d0_disable(sc);
8072 break;
8073 case WM_T_82541:
8074 case WM_T_82547:
8075 /* XXX Configure actively LED after PHY reset */
8076 break;
8077 case WM_T_ICH8:
8078 case WM_T_ICH9:
8079 case WM_T_ICH10:
8080 case WM_T_PCH:
8081 case WM_T_PCH2:
8082 case WM_T_PCH_LPT:
8083 case WM_T_PCH_SPT:
8084 /* Allow time for h/w to get to a quiescent state afer reset */
8085 delay(10*1000);
8086
8087 if (sc->sc_type == WM_T_PCH)
8088 wm_hv_phy_workaround_ich8lan(sc);
8089
8090 if (sc->sc_type == WM_T_PCH2)
8091 wm_lv_phy_workaround_ich8lan(sc);
8092
8093 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
8094 /*
8095 * dummy read to clear the phy wakeup bit after lcd
8096 * reset
8097 */
8098 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
8099 }
8100
8101 /*
8102 * XXX Configure the LCD with th extended configuration region
8103 * in NVM
8104 */
8105
8106 /* Disable D0 LPLU. */
8107 if (sc->sc_type >= WM_T_PCH) /* PCH* */
8108 wm_lplu_d0_disable_pch(sc);
8109 else
8110 wm_lplu_d0_disable(sc); /* ICH* */
8111 break;
8112 default:
8113 panic("%s: unknown type\n", __func__);
8114 break;
8115 }
8116 }
8117
8118 /*
8119 * wm_get_phy_id_82575:
8120 *
8121 * Return PHY ID. Return -1 if it failed.
8122 */
8123 static int
8124 wm_get_phy_id_82575(struct wm_softc *sc)
8125 {
8126 uint32_t reg;
8127 int phyid = -1;
8128
8129 /* XXX */
8130 if ((sc->sc_flags & WM_F_SGMII) == 0)
8131 return -1;
8132
8133 if (wm_sgmii_uses_mdio(sc)) {
8134 switch (sc->sc_type) {
8135 case WM_T_82575:
8136 case WM_T_82576:
8137 reg = CSR_READ(sc, WMREG_MDIC);
8138 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
8139 break;
8140 case WM_T_82580:
8141 case WM_T_I350:
8142 case WM_T_I354:
8143 case WM_T_I210:
8144 case WM_T_I211:
8145 reg = CSR_READ(sc, WMREG_MDICNFG);
8146 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
8147 break;
8148 default:
8149 return -1;
8150 }
8151 }
8152
8153 return phyid;
8154 }
8155
8156
8157 /*
8158 * wm_gmii_mediainit:
8159 *
8160 * Initialize media for use on 1000BASE-T devices.
8161 */
8162 static void
8163 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
8164 {
8165 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8166 struct mii_data *mii = &sc->sc_mii;
8167 uint32_t reg;
8168
8169 DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
8170 device_xname(sc->sc_dev), __func__));
8171
8172 /* We have GMII. */
8173 sc->sc_flags |= WM_F_HAS_MII;
8174
8175 if (sc->sc_type == WM_T_80003)
8176 sc->sc_tipg = TIPG_1000T_80003_DFLT;
8177 else
8178 sc->sc_tipg = TIPG_1000T_DFLT;
8179
8180 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
8181 if ((sc->sc_type == WM_T_82580)
8182 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
8183 || (sc->sc_type == WM_T_I211)) {
8184 reg = CSR_READ(sc, WMREG_PHPM);
8185 reg &= ~PHPM_GO_LINK_D;
8186 CSR_WRITE(sc, WMREG_PHPM, reg);
8187 }
8188
8189 /*
8190 * Let the chip set speed/duplex on its own based on
8191 * signals from the PHY.
8192 * XXXbouyer - I'm not sure this is right for the 80003,
8193 * the em driver only sets CTRL_SLU here - but it seems to work.
8194 */
8195 sc->sc_ctrl |= CTRL_SLU;
8196 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8197
8198 /* Initialize our media structures and probe the GMII. */
8199 mii->mii_ifp = ifp;
8200
8201 /*
8202 * Determine the PHY access method.
8203 *
8204 * For SGMII, use SGMII specific method.
8205 *
8206 * For some devices, we can determine the PHY access method
8207 * from sc_type.
8208 *
8209 * For ICH and PCH variants, it's difficult to determine the PHY
8210 * access method by sc_type, so use the PCI product ID for some
8211 * devices.
8212 * For other ICH8 variants, try to use igp's method. If the PHY
8213 * can't detect, then use bm's method.
8214 */
8215 switch (prodid) {
8216 case PCI_PRODUCT_INTEL_PCH_M_LM:
8217 case PCI_PRODUCT_INTEL_PCH_M_LC:
8218 /* 82577 */
8219 sc->sc_phytype = WMPHY_82577;
8220 break;
8221 case PCI_PRODUCT_INTEL_PCH_D_DM:
8222 case PCI_PRODUCT_INTEL_PCH_D_DC:
8223 /* 82578 */
8224 sc->sc_phytype = WMPHY_82578;
8225 break;
8226 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
8227 case PCI_PRODUCT_INTEL_PCH2_LV_V:
8228 /* 82579 */
8229 sc->sc_phytype = WMPHY_82579;
8230 break;
8231 case PCI_PRODUCT_INTEL_82801H_82567V_3:
8232 case PCI_PRODUCT_INTEL_82801I_BM:
8233 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
8234 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
8235 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
8236 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
8237 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
8238 /* ICH8, 9, 10 with 82567 */
8239 sc->sc_phytype = WMPHY_BM;
8240 mii->mii_readreg = wm_gmii_bm_readreg;
8241 mii->mii_writereg = wm_gmii_bm_writereg;
8242 break;
8243 default:
8244 if (((sc->sc_flags & WM_F_SGMII) != 0)
8245 && !wm_sgmii_uses_mdio(sc)){
8246 /* SGMII */
8247 mii->mii_readreg = wm_sgmii_readreg;
8248 mii->mii_writereg = wm_sgmii_writereg;
8249 } else if ((sc->sc_type == WM_T_82574)
8250 || (sc->sc_type == WM_T_82583)) {
8251 /* BM2 (phyaddr == 1) */
8252 sc->sc_phytype = WMPHY_BM;
8253 mii->mii_readreg = wm_gmii_bm_readreg;
8254 mii->mii_writereg = wm_gmii_bm_writereg;
8255 } else if (sc->sc_type >= WM_T_ICH8) {
8256 /* non-82567 ICH8, 9 and 10 */
8257 mii->mii_readreg = wm_gmii_i82544_readreg;
8258 mii->mii_writereg = wm_gmii_i82544_writereg;
8259 } else if (sc->sc_type >= WM_T_80003) {
8260 /* 80003 */
8261 sc->sc_phytype = WMPHY_GG82563;
8262 mii->mii_readreg = wm_gmii_i80003_readreg;
8263 mii->mii_writereg = wm_gmii_i80003_writereg;
8264 } else if (sc->sc_type >= WM_T_I210) {
8265 /* I210 and I211 */
8266 sc->sc_phytype = WMPHY_210;
8267 mii->mii_readreg = wm_gmii_gs40g_readreg;
8268 mii->mii_writereg = wm_gmii_gs40g_writereg;
8269 } else if (sc->sc_type >= WM_T_82580) {
8270 /* 82580, I350 and I354 */
8271 sc->sc_phytype = WMPHY_82580;
8272 mii->mii_readreg = wm_gmii_82580_readreg;
8273 mii->mii_writereg = wm_gmii_82580_writereg;
8274 } else if (sc->sc_type >= WM_T_82544) {
8275 /* 82544, 0, [56], [17], 8257[1234] and 82583 */
8276 mii->mii_readreg = wm_gmii_i82544_readreg;
8277 mii->mii_writereg = wm_gmii_i82544_writereg;
8278 } else {
8279 mii->mii_readreg = wm_gmii_i82543_readreg;
8280 mii->mii_writereg = wm_gmii_i82543_writereg;
8281 }
8282 break;
8283 }
8284 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
8285 /* All PCH* use _hv_ */
8286 mii->mii_readreg = wm_gmii_hv_readreg;
8287 mii->mii_writereg = wm_gmii_hv_writereg;
8288 }
8289 mii->mii_statchg = wm_gmii_statchg;
8290
8291 wm_gmii_reset(sc);
8292
8293 sc->sc_ethercom.ec_mii = &sc->sc_mii;
8294 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
8295 wm_gmii_mediastatus);
8296
8297 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
8298 || (sc->sc_type == WM_T_82580)
8299 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
8300 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
8301 if ((sc->sc_flags & WM_F_SGMII) == 0) {
8302 /* Attach only one port */
8303 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
8304 MII_OFFSET_ANY, MIIF_DOPAUSE);
8305 } else {
8306 int i, id;
8307 uint32_t ctrl_ext;
8308
8309 id = wm_get_phy_id_82575(sc);
8310 if (id != -1) {
8311 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
8312 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
8313 }
8314 if ((id == -1)
8315 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
8316 /* Power on sgmii phy if it is disabled */
8317 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8318 CSR_WRITE(sc, WMREG_CTRL_EXT,
8319 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
8320 CSR_WRITE_FLUSH(sc);
8321 delay(300*1000); /* XXX too long */
8322
8323 /* from 1 to 8 */
8324 for (i = 1; i < 8; i++)
8325 mii_attach(sc->sc_dev, &sc->sc_mii,
8326 0xffffffff, i, MII_OFFSET_ANY,
8327 MIIF_DOPAUSE);
8328
8329 /* restore previous sfp cage power state */
8330 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8331 }
8332 }
8333 } else {
8334 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8335 MII_OFFSET_ANY, MIIF_DOPAUSE);
8336 }
8337
8338 /*
8339 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
8340 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
8341 */
8342 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
8343 (LIST_FIRST(&mii->mii_phys) == NULL)) {
8344 wm_set_mdio_slow_mode_hv(sc);
8345 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8346 MII_OFFSET_ANY, MIIF_DOPAUSE);
8347 }
8348
8349 /*
8350 * (For ICH8 variants)
8351 * If PHY detection failed, use BM's r/w function and retry.
8352 */
8353 if (LIST_FIRST(&mii->mii_phys) == NULL) {
8354 /* if failed, retry with *_bm_* */
8355 mii->mii_readreg = wm_gmii_bm_readreg;
8356 mii->mii_writereg = wm_gmii_bm_writereg;
8357
8358 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8359 MII_OFFSET_ANY, MIIF_DOPAUSE);
8360 }
8361
8362 if (LIST_FIRST(&mii->mii_phys) == NULL) {
8363 /* Any PHY wasn't find */
8364 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
8365 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
8366 sc->sc_phytype = WMPHY_NONE;
8367 } else {
8368 /*
8369 * PHY Found!
8370 * Check PHY type.
8371 */
8372 uint32_t model;
8373 struct mii_softc *child;
8374
8375 child = LIST_FIRST(&mii->mii_phys);
8376 model = child->mii_mpd_model;
8377 if (model == MII_MODEL_yyINTEL_I82566)
8378 sc->sc_phytype = WMPHY_IGP_3;
8379
8380 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
8381 }
8382 }
8383
8384 /*
8385 * wm_gmii_mediachange: [ifmedia interface function]
8386 *
8387 * Set hardware to newly-selected media on a 1000BASE-T device.
8388 */
8389 static int
8390 wm_gmii_mediachange(struct ifnet *ifp)
8391 {
8392 struct wm_softc *sc = ifp->if_softc;
8393 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8394 int rc;
8395
8396 DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
8397 device_xname(sc->sc_dev), __func__));
8398 if ((ifp->if_flags & IFF_UP) == 0)
8399 return 0;
8400
8401 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
8402 sc->sc_ctrl |= CTRL_SLU;
8403 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8404 || (sc->sc_type > WM_T_82543)) {
8405 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
8406 } else {
8407 sc->sc_ctrl &= ~CTRL_ASDE;
8408 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
8409 if (ife->ifm_media & IFM_FDX)
8410 sc->sc_ctrl |= CTRL_FD;
8411 switch (IFM_SUBTYPE(ife->ifm_media)) {
8412 case IFM_10_T:
8413 sc->sc_ctrl |= CTRL_SPEED_10;
8414 break;
8415 case IFM_100_TX:
8416 sc->sc_ctrl |= CTRL_SPEED_100;
8417 break;
8418 case IFM_1000_T:
8419 sc->sc_ctrl |= CTRL_SPEED_1000;
8420 break;
8421 default:
8422 panic("wm_gmii_mediachange: bad media 0x%x",
8423 ife->ifm_media);
8424 }
8425 }
8426 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8427 if (sc->sc_type <= WM_T_82543)
8428 wm_gmii_reset(sc);
8429
8430 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
8431 return 0;
8432 return rc;
8433 }
8434
8435 /*
8436 * wm_gmii_mediastatus: [ifmedia interface function]
8437 *
8438 * Get the current interface media status on a 1000BASE-T device.
8439 */
8440 static void
8441 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8442 {
8443 struct wm_softc *sc = ifp->if_softc;
8444
8445 ether_mediastatus(ifp, ifmr);
8446 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
8447 | sc->sc_flowflags;
8448 }
8449
8450 #define MDI_IO CTRL_SWDPIN(2)
8451 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
8452 #define MDI_CLK CTRL_SWDPIN(3)
8453
8454 static void
8455 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
8456 {
8457 uint32_t i, v;
8458
8459 v = CSR_READ(sc, WMREG_CTRL);
8460 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8461 v |= MDI_DIR | CTRL_SWDPIO(3);
8462
8463 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
8464 if (data & i)
8465 v |= MDI_IO;
8466 else
8467 v &= ~MDI_IO;
8468 CSR_WRITE(sc, WMREG_CTRL, v);
8469 CSR_WRITE_FLUSH(sc);
8470 delay(10);
8471 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8472 CSR_WRITE_FLUSH(sc);
8473 delay(10);
8474 CSR_WRITE(sc, WMREG_CTRL, v);
8475 CSR_WRITE_FLUSH(sc);
8476 delay(10);
8477 }
8478 }
8479
8480 static uint32_t
8481 wm_i82543_mii_recvbits(struct wm_softc *sc)
8482 {
8483 uint32_t v, i, data = 0;
8484
8485 v = CSR_READ(sc, WMREG_CTRL);
8486 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8487 v |= CTRL_SWDPIO(3);
8488
8489 CSR_WRITE(sc, WMREG_CTRL, v);
8490 CSR_WRITE_FLUSH(sc);
8491 delay(10);
8492 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8493 CSR_WRITE_FLUSH(sc);
8494 delay(10);
8495 CSR_WRITE(sc, WMREG_CTRL, v);
8496 CSR_WRITE_FLUSH(sc);
8497 delay(10);
8498
8499 for (i = 0; i < 16; i++) {
8500 data <<= 1;
8501 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8502 CSR_WRITE_FLUSH(sc);
8503 delay(10);
8504 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
8505 data |= 1;
8506 CSR_WRITE(sc, WMREG_CTRL, v);
8507 CSR_WRITE_FLUSH(sc);
8508 delay(10);
8509 }
8510
8511 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8512 CSR_WRITE_FLUSH(sc);
8513 delay(10);
8514 CSR_WRITE(sc, WMREG_CTRL, v);
8515 CSR_WRITE_FLUSH(sc);
8516 delay(10);
8517
8518 return data;
8519 }
8520
8521 #undef MDI_IO
8522 #undef MDI_DIR
8523 #undef MDI_CLK
8524
8525 /*
8526 * wm_gmii_i82543_readreg: [mii interface function]
8527 *
8528 * Read a PHY register on the GMII (i82543 version).
8529 */
8530 static int
8531 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
8532 {
8533 struct wm_softc *sc = device_private(self);
8534 int rv;
8535
8536 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8537 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
8538 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
8539 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
8540
8541 DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
8542 device_xname(sc->sc_dev), phy, reg, rv));
8543
8544 return rv;
8545 }
8546
8547 /*
8548 * wm_gmii_i82543_writereg: [mii interface function]
8549 *
8550 * Write a PHY register on the GMII (i82543 version).
8551 */
8552 static void
8553 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
8554 {
8555 struct wm_softc *sc = device_private(self);
8556
8557 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8558 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
8559 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
8560 (MII_COMMAND_START << 30), 32);
8561 }
8562
8563 /*
8564 * wm_gmii_mdic_readreg: [mii interface function]
8565 *
8566 * Read a PHY register on the GMII.
8567 */
8568 static int
8569 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
8570 {
8571 struct wm_softc *sc = device_private(self);
8572 uint32_t mdic = 0;
8573 int i, rv;
8574
8575 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
8576 MDIC_REGADD(reg));
8577
8578 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8579 mdic = CSR_READ(sc, WMREG_MDIC);
8580 if (mdic & MDIC_READY)
8581 break;
8582 delay(50);
8583 }
8584
8585 if ((mdic & MDIC_READY) == 0) {
8586 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
8587 device_xname(sc->sc_dev), phy, reg);
8588 rv = 0;
8589 } else if (mdic & MDIC_E) {
8590 #if 0 /* This is normal if no PHY is present. */
8591 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
8592 device_xname(sc->sc_dev), phy, reg);
8593 #endif
8594 rv = 0;
8595 } else {
8596 rv = MDIC_DATA(mdic);
8597 if (rv == 0xffff)
8598 rv = 0;
8599 }
8600
8601 return rv;
8602 }
8603
8604 /*
8605 * wm_gmii_mdic_writereg: [mii interface function]
8606 *
8607 * Write a PHY register on the GMII.
8608 */
8609 static void
8610 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
8611 {
8612 struct wm_softc *sc = device_private(self);
8613 uint32_t mdic = 0;
8614 int i;
8615
8616 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
8617 MDIC_REGADD(reg) | MDIC_DATA(val));
8618
8619 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8620 mdic = CSR_READ(sc, WMREG_MDIC);
8621 if (mdic & MDIC_READY)
8622 break;
8623 delay(50);
8624 }
8625
8626 if ((mdic & MDIC_READY) == 0)
8627 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
8628 device_xname(sc->sc_dev), phy, reg);
8629 else if (mdic & MDIC_E)
8630 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
8631 device_xname(sc->sc_dev), phy, reg);
8632 }
8633
8634 /*
8635 * wm_gmii_i82544_readreg: [mii interface function]
8636 *
8637 * Read a PHY register on the GMII.
8638 */
8639 static int
8640 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
8641 {
8642 struct wm_softc *sc = device_private(self);
8643 int rv;
8644
8645 if (sc->phy.acquire(sc)) {
8646 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8647 __func__);
8648 return 0;
8649 }
8650 rv = wm_gmii_mdic_readreg(self, phy, reg);
8651 sc->phy.release(sc);
8652
8653 return rv;
8654 }
8655
8656 /*
8657 * wm_gmii_i82544_writereg: [mii interface function]
8658 *
8659 * Write a PHY register on the GMII.
8660 */
8661 static void
8662 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
8663 {
8664 struct wm_softc *sc = device_private(self);
8665
8666 if (sc->phy.acquire(sc)) {
8667 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8668 __func__);
8669 }
8670 wm_gmii_mdic_writereg(self, phy, reg, val);
8671 sc->phy.release(sc);
8672 }
8673
8674 /*
8675 * wm_gmii_i80003_readreg: [mii interface function]
8676 *
8677 * Read a PHY register on the kumeran
8678 * This could be handled by the PHY layer if we didn't have to lock the
8679 * ressource ...
8680 */
8681 static int
8682 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
8683 {
8684 struct wm_softc *sc = device_private(self);
8685 int rv;
8686
8687 if (phy != 1) /* only one PHY on kumeran bus */
8688 return 0;
8689
8690 if (sc->phy.acquire(sc)) {
8691 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8692 __func__);
8693 return 0;
8694 }
8695
8696 if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
8697 wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8698 reg >> GG82563_PAGE_SHIFT);
8699 } else {
8700 wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8701 reg >> GG82563_PAGE_SHIFT);
8702 }
8703 /* Wait more 200us for a bug of the ready bit in the MDIC register */
8704 delay(200);
8705 rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
8706 delay(200);
8707 sc->phy.release(sc);
8708
8709 return rv;
8710 }
8711
8712 /*
8713 * wm_gmii_i80003_writereg: [mii interface function]
8714 *
8715 * Write a PHY register on the kumeran.
8716 * This could be handled by the PHY layer if we didn't have to lock the
8717 * ressource ...
8718 */
8719 static void
8720 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
8721 {
8722 struct wm_softc *sc = device_private(self);
8723
8724 if (phy != 1) /* only one PHY on kumeran bus */
8725 return;
8726
8727 if (sc->phy.acquire(sc)) {
8728 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8729 __func__);
8730 return;
8731 }
8732
8733 if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
8734 wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8735 reg >> GG82563_PAGE_SHIFT);
8736 } else {
8737 wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8738 reg >> GG82563_PAGE_SHIFT);
8739 }
8740 /* Wait more 200us for a bug of the ready bit in the MDIC register */
8741 delay(200);
8742 wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
8743 delay(200);
8744
8745 sc->phy.release(sc);
8746 }
8747
8748 /*
8749 * wm_gmii_bm_readreg: [mii interface function]
8750 *
8751 * Read a PHY register on the kumeran
8752 * This could be handled by the PHY layer if we didn't have to lock the
8753 * ressource ...
8754 */
8755 static int
8756 wm_gmii_bm_readreg(device_t self, int phy, int reg)
8757 {
8758 struct wm_softc *sc = device_private(self);
8759 uint16_t page = reg >> BME1000_PAGE_SHIFT;
8760 uint16_t val;
8761 int rv;
8762
8763 if (sc->phy.acquire(sc)) {
8764 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8765 __func__);
8766 return 0;
8767 }
8768
8769 if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
8770 phy = ((page >= 768) || ((page == 0) && (reg == 25))
8771 || (reg == 31)) ? 1 : phy;
8772 /* Page 800 works differently than the rest so it has its own func */
8773 if (page == BM_WUC_PAGE) {
8774 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
8775 rv = val;
8776 goto release;
8777 }
8778
8779 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8780 if ((phy == 1) && (sc->sc_type != WM_T_82574)
8781 && (sc->sc_type != WM_T_82583))
8782 wm_gmii_mdic_writereg(self, phy,
8783 MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
8784 else
8785 wm_gmii_mdic_writereg(self, phy,
8786 BME1000_PHY_PAGE_SELECT, page);
8787 }
8788
8789 rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
8790
8791 release:
8792 sc->phy.release(sc);
8793 return rv;
8794 }
8795
8796 /*
8797 * wm_gmii_bm_writereg: [mii interface function]
8798 *
8799 * Write a PHY register on the kumeran.
8800 * This could be handled by the PHY layer if we didn't have to lock the
8801 * ressource ...
8802 */
8803 static void
8804 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
8805 {
8806 struct wm_softc *sc = device_private(self);
8807 uint16_t page = reg >> BME1000_PAGE_SHIFT;
8808
8809 if (sc->phy.acquire(sc)) {
8810 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8811 __func__);
8812 return;
8813 }
8814
8815 if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
8816 phy = ((page >= 768) || ((page == 0) && (reg == 25))
8817 || (reg == 31)) ? 1 : phy;
8818 /* Page 800 works differently than the rest so it has its own func */
8819 if (page == BM_WUC_PAGE) {
8820 uint16_t tmp;
8821
8822 tmp = val;
8823 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
8824 goto release;
8825 }
8826
8827 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8828 if ((phy == 1) && (sc->sc_type != WM_T_82574)
8829 && (sc->sc_type != WM_T_82583))
8830 wm_gmii_mdic_writereg(self, phy,
8831 MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
8832 else
8833 wm_gmii_mdic_writereg(self, phy,
8834 BME1000_PHY_PAGE_SELECT, page);
8835 }
8836
8837 wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
8838
8839 release:
8840 sc->phy.release(sc);
8841 }
8842
8843 static void
8844 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
8845 {
8846 struct wm_softc *sc = device_private(self);
8847 uint16_t regnum = BM_PHY_REG_NUM(offset);
8848 uint16_t wuce;
8849
8850 DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
8851 device_xname(sc->sc_dev), __func__));
8852 /* XXX Gig must be disabled for MDIO accesses to page 800 */
8853 if (sc->sc_type == WM_T_PCH) {
8854 /* XXX e1000 driver do nothing... why? */
8855 }
8856
8857 /* Set page 769 */
8858 wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8859 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
8860
8861 wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
8862
8863 wuce &= ~BM_WUC_HOST_WU_BIT;
8864 wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG,
8865 wuce | BM_WUC_ENABLE_BIT);
8866
8867 /* Select page 800 */
8868 wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8869 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
8870
8871 /* Write page 800 */
8872 wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
8873
8874 if (rd)
8875 *val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
8876 else
8877 wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
8878
8879 /* Set page 769 */
8880 wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8881 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
8882
8883 wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
8884 }
8885
8886 /*
8887 * wm_gmii_hv_readreg: [mii interface function]
8888 *
8889 * Read a PHY register on the kumeran
8890 * This could be handled by the PHY layer if we didn't have to lock the
8891 * ressource ...
8892 */
8893 static int
8894 wm_gmii_hv_readreg(device_t self, int phy, int reg)
8895 {
8896 struct wm_softc *sc = device_private(self);
8897 int rv;
8898
8899 DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
8900 device_xname(sc->sc_dev), __func__));
8901 if (sc->phy.acquire(sc)) {
8902 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8903 __func__);
8904 return 0;
8905 }
8906
8907 rv = wm_gmii_hv_readreg_locked(self, phy, reg);
8908 sc->phy.release(sc);
8909 return rv;
8910 }
8911
8912 static int
8913 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
8914 {
8915 struct wm_softc *sc = device_private(self);
8916 uint16_t page = BM_PHY_REG_PAGE(reg);
8917 uint16_t regnum = BM_PHY_REG_NUM(reg);
8918 uint16_t val;
8919 int rv;
8920
8921 /* XXX Workaround failure in MDIO access while cable is disconnected */
8922 if (sc->sc_phytype == WMPHY_82577) {
8923 /* XXX must write */
8924 }
8925
8926 /* Page 800 works differently than the rest so it has its own func */
8927 if (page == BM_WUC_PAGE) {
8928 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
8929 return val;
8930 }
8931
8932 /*
8933 * Lower than page 768 works differently than the rest so it has its
8934 * own func
8935 */
8936 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
8937 printf("gmii_hv_readreg!!!\n");
8938 return 0;
8939 }
8940
8941 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
8942 wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8943 page << BME1000_PAGE_SHIFT);
8944 }
8945
8946 rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
8947 return rv;
8948 }
8949
8950 /*
8951 * wm_gmii_hv_writereg: [mii interface function]
8952 *
8953 * Write a PHY register on the kumeran.
8954 * This could be handled by the PHY layer if we didn't have to lock the
8955 * ressource ...
8956 */
8957 static void
8958 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
8959 {
8960 struct wm_softc *sc = device_private(self);
8961
8962 DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
8963 device_xname(sc->sc_dev), __func__));
8964
8965 if (sc->phy.acquire(sc)) {
8966 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8967 __func__);
8968 return;
8969 }
8970
8971 wm_gmii_hv_writereg_locked(self, phy, reg, val);
8972 sc->phy.release(sc);
8973 }
8974
8975 static void
8976 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
8977 {
8978 uint16_t page = BM_PHY_REG_PAGE(reg);
8979 uint16_t regnum = BM_PHY_REG_NUM(reg);
8980
8981 /* XXX Workaround failure in MDIO access while cable is disconnected */
8982
8983 /* Page 800 works differently than the rest so it has its own func */
8984 if (page == BM_WUC_PAGE) {
8985 uint16_t tmp;
8986
8987 tmp = val;
8988 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
8989 return;
8990 }
8991
8992 /*
8993 * Lower than page 768 works differently than the rest so it has its
8994 * own func
8995 */
8996 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
8997 printf("gmii_hv_writereg!!!\n");
8998 return;
8999 }
9000
9001 /*
9002 * XXX Workaround MDIO accesses being disabled after entering IEEE
9003 * Power Down (whenever bit 11 of the PHY control register is set)
9004 */
9005
9006 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
9007 wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
9008 page << BME1000_PAGE_SHIFT);
9009 }
9010
9011 wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
9012 }
9013
9014 /*
9015 * wm_gmii_82580_readreg: [mii interface function]
9016 *
9017 * Read a PHY register on the 82580 and I350.
9018 * This could be handled by the PHY layer if we didn't have to lock the
9019 * ressource ...
9020 */
9021 static int
9022 wm_gmii_82580_readreg(device_t self, int phy, int reg)
9023 {
9024 struct wm_softc *sc = device_private(self);
9025 int rv;
9026
9027 if (sc->phy.acquire(sc) != 0) {
9028 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9029 __func__);
9030 return 0;
9031 }
9032
9033 rv = wm_gmii_mdic_readreg(self, phy, reg);
9034
9035 sc->phy.release(sc);
9036 return rv;
9037 }
9038
9039 /*
9040 * wm_gmii_82580_writereg: [mii interface function]
9041 *
9042 * Write a PHY register on the 82580 and I350.
9043 * This could be handled by the PHY layer if we didn't have to lock the
9044 * ressource ...
9045 */
9046 static void
9047 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
9048 {
9049 struct wm_softc *sc = device_private(self);
9050
9051 if (sc->phy.acquire(sc) != 0) {
9052 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9053 __func__);
9054 return;
9055 }
9056
9057 wm_gmii_mdic_writereg(self, phy, reg, val);
9058
9059 sc->phy.release(sc);
9060 }
9061
9062 /*
9063 * wm_gmii_gs40g_readreg: [mii interface function]
9064 *
9065 * Read a PHY register on the I2100 and I211.
9066 * This could be handled by the PHY layer if we didn't have to lock the
9067 * ressource ...
9068 */
9069 static int
9070 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
9071 {
9072 struct wm_softc *sc = device_private(self);
9073 int page, offset;
9074 int rv;
9075
9076 /* Acquire semaphore */
9077 if (sc->phy.acquire(sc)) {
9078 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9079 __func__);
9080 return 0;
9081 }
9082
9083 /* Page select */
9084 page = reg >> GS40G_PAGE_SHIFT;
9085 wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
9086
9087 /* Read reg */
9088 offset = reg & GS40G_OFFSET_MASK;
9089 rv = wm_gmii_mdic_readreg(self, phy, offset);
9090
9091 sc->phy.release(sc);
9092 return rv;
9093 }
9094
9095 /*
9096 * wm_gmii_gs40g_writereg: [mii interface function]
9097 *
9098 * Write a PHY register on the I210 and I211.
9099 * This could be handled by the PHY layer if we didn't have to lock the
9100 * ressource ...
9101 */
9102 static void
9103 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
9104 {
9105 struct wm_softc *sc = device_private(self);
9106 int page, offset;
9107
9108 /* Acquire semaphore */
9109 if (sc->phy.acquire(sc)) {
9110 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9111 __func__);
9112 return;
9113 }
9114
9115 /* Page select */
9116 page = reg >> GS40G_PAGE_SHIFT;
9117 wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
9118
9119 /* Write reg */
9120 offset = reg & GS40G_OFFSET_MASK;
9121 wm_gmii_mdic_writereg(self, phy, offset, val);
9122
9123 /* Release semaphore */
9124 sc->phy.release(sc);
9125 }
9126
9127 /*
9128 * wm_gmii_statchg: [mii interface function]
9129 *
9130 * Callback from MII layer when media changes.
9131 */
9132 static void
9133 wm_gmii_statchg(struct ifnet *ifp)
9134 {
9135 struct wm_softc *sc = ifp->if_softc;
9136 struct mii_data *mii = &sc->sc_mii;
9137
9138 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
9139 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9140 sc->sc_fcrtl &= ~FCRTL_XONE;
9141
9142 /*
9143 * Get flow control negotiation result.
9144 */
9145 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
9146 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
9147 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
9148 mii->mii_media_active &= ~IFM_ETH_FMASK;
9149 }
9150
9151 if (sc->sc_flowflags & IFM_FLOW) {
9152 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
9153 sc->sc_ctrl |= CTRL_TFCE;
9154 sc->sc_fcrtl |= FCRTL_XONE;
9155 }
9156 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
9157 sc->sc_ctrl |= CTRL_RFCE;
9158 }
9159
9160 if (sc->sc_mii.mii_media_active & IFM_FDX) {
9161 DPRINTF(WM_DEBUG_LINK,
9162 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
9163 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9164 } else {
9165 DPRINTF(WM_DEBUG_LINK,
9166 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
9167 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9168 }
9169
9170 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9171 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9172 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
9173 : WMREG_FCRTL, sc->sc_fcrtl);
9174 if (sc->sc_type == WM_T_80003) {
9175 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
9176 case IFM_1000_T:
9177 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
9178 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
9179 sc->sc_tipg = TIPG_1000T_80003_DFLT;
9180 break;
9181 default:
9182 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
9183 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
9184 sc->sc_tipg = TIPG_10_100_80003_DFLT;
9185 break;
9186 }
9187 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
9188 }
9189 }
9190
9191 /*
9192 * wm_kmrn_readreg:
9193 *
9194 * Read a kumeran register
9195 */
9196 static int
9197 wm_kmrn_readreg(struct wm_softc *sc, int reg)
9198 {
9199 int rv;
9200
9201 if (sc->sc_type == WM_T_80003)
9202 rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9203 else
9204 rv = sc->phy.acquire(sc);
9205 if (rv != 0) {
9206 aprint_error_dev(sc->sc_dev,
9207 "%s: failed to get semaphore\n", __func__);
9208 return 0;
9209 }
9210
9211 rv = wm_kmrn_readreg_locked(sc, reg);
9212
9213 if (sc->sc_type == WM_T_80003)
9214 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9215 else
9216 sc->phy.release(sc);
9217
9218 return rv;
9219 }
9220
9221 static int
9222 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
9223 {
9224 int rv;
9225
9226 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
9227 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
9228 KUMCTRLSTA_REN);
9229 CSR_WRITE_FLUSH(sc);
9230 delay(2);
9231
9232 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
9233
9234 return rv;
9235 }
9236
9237 /*
9238 * wm_kmrn_writereg:
9239 *
9240 * Write a kumeran register
9241 */
9242 static void
9243 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
9244 {
9245 int rv;
9246
9247 if (sc->sc_type == WM_T_80003)
9248 rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9249 else
9250 rv = sc->phy.acquire(sc);
9251 if (rv != 0) {
9252 aprint_error_dev(sc->sc_dev,
9253 "%s: failed to get semaphore\n", __func__);
9254 return;
9255 }
9256
9257 wm_kmrn_writereg_locked(sc, reg, val);
9258
9259 if (sc->sc_type == WM_T_80003)
9260 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9261 else
9262 sc->phy.release(sc);
9263 }
9264
9265 static void
9266 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
9267 {
9268
9269 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
9270 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
9271 (val & KUMCTRLSTA_MASK));
9272 }
9273
9274 /* SGMII related */
9275
9276 /*
9277 * wm_sgmii_uses_mdio
9278 *
9279 * Check whether the transaction is to the internal PHY or the external
9280 * MDIO interface. Return true if it's MDIO.
9281 */
9282 static bool
9283 wm_sgmii_uses_mdio(struct wm_softc *sc)
9284 {
9285 uint32_t reg;
9286 bool ismdio = false;
9287
9288 switch (sc->sc_type) {
9289 case WM_T_82575:
9290 case WM_T_82576:
9291 reg = CSR_READ(sc, WMREG_MDIC);
9292 ismdio = ((reg & MDIC_DEST) != 0);
9293 break;
9294 case WM_T_82580:
9295 case WM_T_I350:
9296 case WM_T_I354:
9297 case WM_T_I210:
9298 case WM_T_I211:
9299 reg = CSR_READ(sc, WMREG_MDICNFG);
9300 ismdio = ((reg & MDICNFG_DEST) != 0);
9301 break;
9302 default:
9303 break;
9304 }
9305
9306 return ismdio;
9307 }
9308
9309 /*
9310 * wm_sgmii_readreg: [mii interface function]
9311 *
9312 * Read a PHY register on the SGMII
9313 * This could be handled by the PHY layer if we didn't have to lock the
9314 * ressource ...
9315 */
9316 static int
9317 wm_sgmii_readreg(device_t self, int phy, int reg)
9318 {
9319 struct wm_softc *sc = device_private(self);
9320 uint32_t i2ccmd;
9321 int i, rv;
9322
9323 if (sc->phy.acquire(sc)) {
9324 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9325 __func__);
9326 return 0;
9327 }
9328
9329 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
9330 | (phy << I2CCMD_PHY_ADDR_SHIFT)
9331 | I2CCMD_OPCODE_READ;
9332 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9333
9334 /* Poll the ready bit */
9335 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9336 delay(50);
9337 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9338 if (i2ccmd & I2CCMD_READY)
9339 break;
9340 }
9341 if ((i2ccmd & I2CCMD_READY) == 0)
9342 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
9343 if ((i2ccmd & I2CCMD_ERROR) != 0)
9344 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
9345
9346 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
9347
9348 sc->phy.release(sc);
9349 return rv;
9350 }
9351
9352 /*
9353 * wm_sgmii_writereg: [mii interface function]
9354 *
9355 * Write a PHY register on the SGMII.
9356 * This could be handled by the PHY layer if we didn't have to lock the
9357 * ressource ...
9358 */
9359 static void
9360 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
9361 {
9362 struct wm_softc *sc = device_private(self);
9363 uint32_t i2ccmd;
9364 int i;
9365 int val_swapped;
9366
9367 if (sc->phy.acquire(sc) != 0) {
9368 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9369 __func__);
9370 return;
9371 }
9372 /* Swap the data bytes for the I2C interface */
9373 val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
9374 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
9375 | (phy << I2CCMD_PHY_ADDR_SHIFT)
9376 | I2CCMD_OPCODE_WRITE | val_swapped;
9377 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9378
9379 /* Poll the ready bit */
9380 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9381 delay(50);
9382 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9383 if (i2ccmd & I2CCMD_READY)
9384 break;
9385 }
9386 if ((i2ccmd & I2CCMD_READY) == 0)
9387 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
9388 if ((i2ccmd & I2CCMD_ERROR) != 0)
9389 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
9390
9391 sc->phy.release(sc);
9392 }
9393
9394 /* TBI related */
9395
9396 /*
9397 * wm_tbi_mediainit:
9398 *
9399 * Initialize media for use on 1000BASE-X devices.
9400 */
9401 static void
9402 wm_tbi_mediainit(struct wm_softc *sc)
9403 {
9404 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9405 const char *sep = "";
9406
9407 if (sc->sc_type < WM_T_82543)
9408 sc->sc_tipg = TIPG_WM_DFLT;
9409 else
9410 sc->sc_tipg = TIPG_LG_DFLT;
9411
9412 sc->sc_tbi_serdes_anegticks = 5;
9413
9414 /* Initialize our media structures */
9415 sc->sc_mii.mii_ifp = ifp;
9416 sc->sc_ethercom.ec_mii = &sc->sc_mii;
9417
9418 if ((sc->sc_type >= WM_T_82575)
9419 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
9420 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9421 wm_serdes_mediachange, wm_serdes_mediastatus);
9422 else
9423 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9424 wm_tbi_mediachange, wm_tbi_mediastatus);
9425
9426 /*
9427 * SWD Pins:
9428 *
9429 * 0 = Link LED (output)
9430 * 1 = Loss Of Signal (input)
9431 */
9432 sc->sc_ctrl |= CTRL_SWDPIO(0);
9433
9434 /* XXX Perhaps this is only for TBI */
9435 if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9436 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
9437
9438 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
9439 sc->sc_ctrl &= ~CTRL_LRST;
9440
9441 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9442
9443 #define ADD(ss, mm, dd) \
9444 do { \
9445 aprint_normal("%s%s", sep, ss); \
9446 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
9447 sep = ", "; \
9448 } while (/*CONSTCOND*/0)
9449
9450 aprint_normal_dev(sc->sc_dev, "");
9451
9452 /* Only 82545 is LX */
9453 if (sc->sc_type == WM_T_82545) {
9454 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
9455 ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
9456 } else {
9457 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
9458 ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
9459 }
9460 ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
9461 aprint_normal("\n");
9462
9463 #undef ADD
9464
9465 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
9466 }
9467
9468 /*
9469 * wm_tbi_mediachange: [ifmedia interface function]
9470 *
9471 * Set hardware to newly-selected media on a 1000BASE-X device.
9472 */
9473 static int
9474 wm_tbi_mediachange(struct ifnet *ifp)
9475 {
9476 struct wm_softc *sc = ifp->if_softc;
9477 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9478 uint32_t status;
9479 int i;
9480
9481 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9482 /* XXX need some work for >= 82571 and < 82575 */
9483 if (sc->sc_type < WM_T_82575)
9484 return 0;
9485 }
9486
9487 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9488 || (sc->sc_type >= WM_T_82575))
9489 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9490
9491 sc->sc_ctrl &= ~CTRL_LRST;
9492 sc->sc_txcw = TXCW_ANE;
9493 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9494 sc->sc_txcw |= TXCW_FD | TXCW_HD;
9495 else if (ife->ifm_media & IFM_FDX)
9496 sc->sc_txcw |= TXCW_FD;
9497 else
9498 sc->sc_txcw |= TXCW_HD;
9499
9500 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
9501 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
9502
9503 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
9504 device_xname(sc->sc_dev), sc->sc_txcw));
9505 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9506 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9507 CSR_WRITE_FLUSH(sc);
9508 delay(1000);
9509
9510 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
9511 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
9512
9513 /*
9514 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
9515 * optics detect a signal, 0 if they don't.
9516 */
9517 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
9518 /* Have signal; wait for the link to come up. */
9519 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
9520 delay(10000);
9521 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
9522 break;
9523 }
9524
9525 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
9526 device_xname(sc->sc_dev),i));
9527
9528 status = CSR_READ(sc, WMREG_STATUS);
9529 DPRINTF(WM_DEBUG_LINK,
9530 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
9531 device_xname(sc->sc_dev),status, STATUS_LU));
9532 if (status & STATUS_LU) {
9533 /* Link is up. */
9534 DPRINTF(WM_DEBUG_LINK,
9535 ("%s: LINK: set media -> link up %s\n",
9536 device_xname(sc->sc_dev),
9537 (status & STATUS_FD) ? "FDX" : "HDX"));
9538
9539 /*
9540 * NOTE: CTRL will update TFCE and RFCE automatically,
9541 * so we should update sc->sc_ctrl
9542 */
9543 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
9544 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9545 sc->sc_fcrtl &= ~FCRTL_XONE;
9546 if (status & STATUS_FD)
9547 sc->sc_tctl |=
9548 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9549 else
9550 sc->sc_tctl |=
9551 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9552 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
9553 sc->sc_fcrtl |= FCRTL_XONE;
9554 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9555 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
9556 WMREG_OLD_FCRTL : WMREG_FCRTL,
9557 sc->sc_fcrtl);
9558 sc->sc_tbi_linkup = 1;
9559 } else {
9560 if (i == WM_LINKUP_TIMEOUT)
9561 wm_check_for_link(sc);
9562 /* Link is down. */
9563 DPRINTF(WM_DEBUG_LINK,
9564 ("%s: LINK: set media -> link down\n",
9565 device_xname(sc->sc_dev)));
9566 sc->sc_tbi_linkup = 0;
9567 }
9568 } else {
9569 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
9570 device_xname(sc->sc_dev)));
9571 sc->sc_tbi_linkup = 0;
9572 }
9573
9574 wm_tbi_serdes_set_linkled(sc);
9575
9576 return 0;
9577 }
9578
9579 /*
9580 * wm_tbi_mediastatus: [ifmedia interface function]
9581 *
9582 * Get the current interface media status on a 1000BASE-X device.
9583 */
9584 static void
9585 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9586 {
9587 struct wm_softc *sc = ifp->if_softc;
9588 uint32_t ctrl, status;
9589
9590 ifmr->ifm_status = IFM_AVALID;
9591 ifmr->ifm_active = IFM_ETHER;
9592
9593 status = CSR_READ(sc, WMREG_STATUS);
9594 if ((status & STATUS_LU) == 0) {
9595 ifmr->ifm_active |= IFM_NONE;
9596 return;
9597 }
9598
9599 ifmr->ifm_status |= IFM_ACTIVE;
9600 /* Only 82545 is LX */
9601 if (sc->sc_type == WM_T_82545)
9602 ifmr->ifm_active |= IFM_1000_LX;
9603 else
9604 ifmr->ifm_active |= IFM_1000_SX;
9605 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
9606 ifmr->ifm_active |= IFM_FDX;
9607 else
9608 ifmr->ifm_active |= IFM_HDX;
9609 ctrl = CSR_READ(sc, WMREG_CTRL);
9610 if (ctrl & CTRL_RFCE)
9611 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
9612 if (ctrl & CTRL_TFCE)
9613 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
9614 }
9615
9616 /* XXX TBI only */
9617 static int
9618 wm_check_for_link(struct wm_softc *sc)
9619 {
9620 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9621 uint32_t rxcw;
9622 uint32_t ctrl;
9623 uint32_t status;
9624 uint32_t sig;
9625
9626 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9627 /* XXX need some work for >= 82571 */
9628 if (sc->sc_type >= WM_T_82571) {
9629 sc->sc_tbi_linkup = 1;
9630 return 0;
9631 }
9632 }
9633
9634 rxcw = CSR_READ(sc, WMREG_RXCW);
9635 ctrl = CSR_READ(sc, WMREG_CTRL);
9636 status = CSR_READ(sc, WMREG_STATUS);
9637
9638 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
9639
9640 DPRINTF(WM_DEBUG_LINK,
9641 ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
9642 device_xname(sc->sc_dev), __func__,
9643 ((ctrl & CTRL_SWDPIN(1)) == sig),
9644 ((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
9645
9646 /*
9647 * SWDPIN LU RXCW
9648 * 0 0 0
9649 * 0 0 1 (should not happen)
9650 * 0 1 0 (should not happen)
9651 * 0 1 1 (should not happen)
9652 * 1 0 0 Disable autonego and force linkup
9653 * 1 0 1 got /C/ but not linkup yet
9654 * 1 1 0 (linkup)
9655 * 1 1 1 If IFM_AUTO, back to autonego
9656 *
9657 */
9658 if (((ctrl & CTRL_SWDPIN(1)) == sig)
9659 && ((status & STATUS_LU) == 0)
9660 && ((rxcw & RXCW_C) == 0)) {
9661 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
9662 __func__));
9663 sc->sc_tbi_linkup = 0;
9664 /* Disable auto-negotiation in the TXCW register */
9665 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
9666
9667 /*
9668 * Force link-up and also force full-duplex.
9669 *
9670 * NOTE: CTRL was updated TFCE and RFCE automatically,
9671 * so we should update sc->sc_ctrl
9672 */
9673 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
9674 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9675 } else if (((status & STATUS_LU) != 0)
9676 && ((rxcw & RXCW_C) != 0)
9677 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
9678 sc->sc_tbi_linkup = 1;
9679 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
9680 __func__));
9681 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9682 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
9683 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
9684 && ((rxcw & RXCW_C) != 0)) {
9685 DPRINTF(WM_DEBUG_LINK, ("/C/"));
9686 } else {
9687 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
9688 status));
9689 }
9690
9691 return 0;
9692 }
9693
9694 /*
9695 * wm_tbi_tick:
9696 *
9697 * Check the link on TBI devices.
9698 * This function acts as mii_tick().
9699 */
9700 static void
9701 wm_tbi_tick(struct wm_softc *sc)
9702 {
9703 struct mii_data *mii = &sc->sc_mii;
9704 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9705 uint32_t status;
9706
9707 KASSERT(WM_CORE_LOCKED(sc));
9708
9709 status = CSR_READ(sc, WMREG_STATUS);
9710
9711 /* XXX is this needed? */
9712 (void)CSR_READ(sc, WMREG_RXCW);
9713 (void)CSR_READ(sc, WMREG_CTRL);
9714
9715 /* set link status */
9716 if ((status & STATUS_LU) == 0) {
9717 DPRINTF(WM_DEBUG_LINK,
9718 ("%s: LINK: checklink -> down\n",
9719 device_xname(sc->sc_dev)));
9720 sc->sc_tbi_linkup = 0;
9721 } else if (sc->sc_tbi_linkup == 0) {
9722 DPRINTF(WM_DEBUG_LINK,
9723 ("%s: LINK: checklink -> up %s\n",
9724 device_xname(sc->sc_dev),
9725 (status & STATUS_FD) ? "FDX" : "HDX"));
9726 sc->sc_tbi_linkup = 1;
9727 sc->sc_tbi_serdes_ticks = 0;
9728 }
9729
9730 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
9731 goto setled;
9732
9733 if ((status & STATUS_LU) == 0) {
9734 sc->sc_tbi_linkup = 0;
9735 /* If the timer expired, retry autonegotiation */
9736 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9737 && (++sc->sc_tbi_serdes_ticks
9738 >= sc->sc_tbi_serdes_anegticks)) {
9739 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9740 sc->sc_tbi_serdes_ticks = 0;
9741 /*
9742 * Reset the link, and let autonegotiation do
9743 * its thing
9744 */
9745 sc->sc_ctrl |= CTRL_LRST;
9746 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9747 CSR_WRITE_FLUSH(sc);
9748 delay(1000);
9749 sc->sc_ctrl &= ~CTRL_LRST;
9750 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9751 CSR_WRITE_FLUSH(sc);
9752 delay(1000);
9753 CSR_WRITE(sc, WMREG_TXCW,
9754 sc->sc_txcw & ~TXCW_ANE);
9755 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9756 }
9757 }
9758
9759 setled:
9760 wm_tbi_serdes_set_linkled(sc);
9761 }
9762
9763 /* SERDES related */
9764 static void
9765 wm_serdes_power_up_link_82575(struct wm_softc *sc)
9766 {
9767 uint32_t reg;
9768
9769 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9770 && ((sc->sc_flags & WM_F_SGMII) == 0))
9771 return;
9772
9773 reg = CSR_READ(sc, WMREG_PCS_CFG);
9774 reg |= PCS_CFG_PCS_EN;
9775 CSR_WRITE(sc, WMREG_PCS_CFG, reg);
9776
9777 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9778 reg &= ~CTRL_EXT_SWDPIN(3);
9779 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9780 CSR_WRITE_FLUSH(sc);
9781 }
9782
9783 static int
9784 wm_serdes_mediachange(struct ifnet *ifp)
9785 {
9786 struct wm_softc *sc = ifp->if_softc;
9787 bool pcs_autoneg = true; /* XXX */
9788 uint32_t ctrl_ext, pcs_lctl, reg;
9789
9790 /* XXX Currently, this function is not called on 8257[12] */
9791 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9792 || (sc->sc_type >= WM_T_82575))
9793 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9794
9795 wm_serdes_power_up_link_82575(sc);
9796
9797 sc->sc_ctrl |= CTRL_SLU;
9798
9799 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
9800 sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
9801
9802 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9803 pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
9804 switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
9805 case CTRL_EXT_LINK_MODE_SGMII:
9806 pcs_autoneg = true;
9807 pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
9808 break;
9809 case CTRL_EXT_LINK_MODE_1000KX:
9810 pcs_autoneg = false;
9811 /* FALLTHROUGH */
9812 default:
9813 if ((sc->sc_type == WM_T_82575)
9814 || (sc->sc_type == WM_T_82576)) {
9815 if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
9816 pcs_autoneg = false;
9817 }
9818 sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
9819 | CTRL_FRCFDX;
9820 pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
9821 }
9822 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9823
9824 if (pcs_autoneg) {
9825 pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
9826 pcs_lctl &= ~PCS_LCTL_FORCE_FC;
9827
9828 reg = CSR_READ(sc, WMREG_PCS_ANADV);
9829 reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
9830 reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
9831 CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
9832 } else
9833 pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
9834
9835 CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
9836
9837
9838 return 0;
9839 }
9840
9841 static void
9842 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9843 {
9844 struct wm_softc *sc = ifp->if_softc;
9845 struct mii_data *mii = &sc->sc_mii;
9846 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9847 uint32_t pcs_adv, pcs_lpab, reg;
9848
9849 ifmr->ifm_status = IFM_AVALID;
9850 ifmr->ifm_active = IFM_ETHER;
9851
9852 /* Check PCS */
9853 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9854 if ((reg & PCS_LSTS_LINKOK) == 0) {
9855 ifmr->ifm_active |= IFM_NONE;
9856 sc->sc_tbi_linkup = 0;
9857 goto setled;
9858 }
9859
9860 sc->sc_tbi_linkup = 1;
9861 ifmr->ifm_status |= IFM_ACTIVE;
9862 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
9863 if ((reg & PCS_LSTS_FDX) != 0)
9864 ifmr->ifm_active |= IFM_FDX;
9865 else
9866 ifmr->ifm_active |= IFM_HDX;
9867 mii->mii_media_active &= ~IFM_ETH_FMASK;
9868 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
9869 /* Check flow */
9870 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9871 if ((reg & PCS_LSTS_AN_COMP) == 0) {
9872 DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
9873 goto setled;
9874 }
9875 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
9876 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
9877 DPRINTF(WM_DEBUG_LINK,
9878 ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
9879 if ((pcs_adv & TXCW_SYM_PAUSE)
9880 && (pcs_lpab & TXCW_SYM_PAUSE)) {
9881 mii->mii_media_active |= IFM_FLOW
9882 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
9883 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
9884 && (pcs_adv & TXCW_ASYM_PAUSE)
9885 && (pcs_lpab & TXCW_SYM_PAUSE)
9886 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
9887 mii->mii_media_active |= IFM_FLOW
9888 | IFM_ETH_TXPAUSE;
9889 } else if ((pcs_adv & TXCW_SYM_PAUSE)
9890 && (pcs_adv & TXCW_ASYM_PAUSE)
9891 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
9892 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
9893 mii->mii_media_active |= IFM_FLOW
9894 | IFM_ETH_RXPAUSE;
9895 } else {
9896 }
9897 }
9898 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
9899 | (mii->mii_media_active & IFM_ETH_FMASK);
9900 setled:
9901 wm_tbi_serdes_set_linkled(sc);
9902 }
9903
9904 /*
9905 * wm_serdes_tick:
9906 *
9907 * Check the link on serdes devices.
9908 */
9909 static void
9910 wm_serdes_tick(struct wm_softc *sc)
9911 {
9912 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9913 struct mii_data *mii = &sc->sc_mii;
9914 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9915 uint32_t reg;
9916
9917 KASSERT(WM_CORE_LOCKED(sc));
9918
9919 mii->mii_media_status = IFM_AVALID;
9920 mii->mii_media_active = IFM_ETHER;
9921
9922 /* Check PCS */
9923 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9924 if ((reg & PCS_LSTS_LINKOK) != 0) {
9925 mii->mii_media_status |= IFM_ACTIVE;
9926 sc->sc_tbi_linkup = 1;
9927 sc->sc_tbi_serdes_ticks = 0;
9928 mii->mii_media_active |= IFM_1000_SX; /* XXX */
9929 if ((reg & PCS_LSTS_FDX) != 0)
9930 mii->mii_media_active |= IFM_FDX;
9931 else
9932 mii->mii_media_active |= IFM_HDX;
9933 } else {
9934 mii->mii_media_status |= IFM_NONE;
9935 sc->sc_tbi_linkup = 0;
9936 /* If the timer expired, retry autonegotiation */
9937 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9938 && (++sc->sc_tbi_serdes_ticks
9939 >= sc->sc_tbi_serdes_anegticks)) {
9940 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9941 sc->sc_tbi_serdes_ticks = 0;
9942 /* XXX */
9943 wm_serdes_mediachange(ifp);
9944 }
9945 }
9946
9947 wm_tbi_serdes_set_linkled(sc);
9948 }
9949
9950 /* SFP related */
9951
9952 static int
9953 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
9954 {
9955 uint32_t i2ccmd;
9956 int i;
9957
9958 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
9959 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9960
9961 /* Poll the ready bit */
9962 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9963 delay(50);
9964 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9965 if (i2ccmd & I2CCMD_READY)
9966 break;
9967 }
9968 if ((i2ccmd & I2CCMD_READY) == 0)
9969 return -1;
9970 if ((i2ccmd & I2CCMD_ERROR) != 0)
9971 return -1;
9972
9973 *data = i2ccmd & 0x00ff;
9974
9975 return 0;
9976 }
9977
9978 static uint32_t
9979 wm_sfp_get_media_type(struct wm_softc *sc)
9980 {
9981 uint32_t ctrl_ext;
9982 uint8_t val = 0;
9983 int timeout = 3;
9984 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
9985 int rv = -1;
9986
9987 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9988 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
9989 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
9990 CSR_WRITE_FLUSH(sc);
9991
9992 /* Read SFP module data */
9993 while (timeout) {
9994 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
9995 if (rv == 0)
9996 break;
9997 delay(100*1000); /* XXX too big */
9998 timeout--;
9999 }
10000 if (rv != 0)
10001 goto out;
10002 switch (val) {
10003 case SFF_SFP_ID_SFF:
10004 aprint_normal_dev(sc->sc_dev,
10005 "Module/Connector soldered to board\n");
10006 break;
10007 case SFF_SFP_ID_SFP:
10008 aprint_normal_dev(sc->sc_dev, "SFP\n");
10009 break;
10010 case SFF_SFP_ID_UNKNOWN:
10011 goto out;
10012 default:
10013 break;
10014 }
10015
10016 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
10017 if (rv != 0) {
10018 goto out;
10019 }
10020
10021 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
10022 mediatype = WM_MEDIATYPE_SERDES;
10023 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
10024 sc->sc_flags |= WM_F_SGMII;
10025 mediatype = WM_MEDIATYPE_COPPER;
10026 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
10027 sc->sc_flags |= WM_F_SGMII;
10028 mediatype = WM_MEDIATYPE_SERDES;
10029 }
10030
10031 out:
10032 /* Restore I2C interface setting */
10033 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
10034
10035 return mediatype;
10036 }
10037 /*
10038 * NVM related.
10039 * Microwire, SPI (w/wo EERD) and Flash.
10040 */
10041
10042 /* Both spi and uwire */
10043
10044 /*
10045 * wm_eeprom_sendbits:
10046 *
10047 * Send a series of bits to the EEPROM.
10048 */
10049 static void
10050 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
10051 {
10052 uint32_t reg;
10053 int x;
10054
10055 reg = CSR_READ(sc, WMREG_EECD);
10056
10057 for (x = nbits; x > 0; x--) {
10058 if (bits & (1U << (x - 1)))
10059 reg |= EECD_DI;
10060 else
10061 reg &= ~EECD_DI;
10062 CSR_WRITE(sc, WMREG_EECD, reg);
10063 CSR_WRITE_FLUSH(sc);
10064 delay(2);
10065 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
10066 CSR_WRITE_FLUSH(sc);
10067 delay(2);
10068 CSR_WRITE(sc, WMREG_EECD, reg);
10069 CSR_WRITE_FLUSH(sc);
10070 delay(2);
10071 }
10072 }
10073
10074 /*
10075 * wm_eeprom_recvbits:
10076 *
10077 * Receive a series of bits from the EEPROM.
10078 */
10079 static void
10080 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
10081 {
10082 uint32_t reg, val;
10083 int x;
10084
10085 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
10086
10087 val = 0;
10088 for (x = nbits; x > 0; x--) {
10089 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
10090 CSR_WRITE_FLUSH(sc);
10091 delay(2);
10092 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
10093 val |= (1U << (x - 1));
10094 CSR_WRITE(sc, WMREG_EECD, reg);
10095 CSR_WRITE_FLUSH(sc);
10096 delay(2);
10097 }
10098 *valp = val;
10099 }
10100
10101 /* Microwire */
10102
10103 /*
10104 * wm_nvm_read_uwire:
10105 *
10106 * Read a word from the EEPROM using the MicroWire protocol.
10107 */
10108 static int
10109 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
10110 {
10111 uint32_t reg, val;
10112 int i;
10113
10114 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10115 device_xname(sc->sc_dev), __func__));
10116
10117 for (i = 0; i < wordcnt; i++) {
10118 /* Clear SK and DI. */
10119 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
10120 CSR_WRITE(sc, WMREG_EECD, reg);
10121
10122 /*
10123 * XXX: workaround for a bug in qemu-0.12.x and prior
10124 * and Xen.
10125 *
10126 * We use this workaround only for 82540 because qemu's
10127 * e1000 act as 82540.
10128 */
10129 if (sc->sc_type == WM_T_82540) {
10130 reg |= EECD_SK;
10131 CSR_WRITE(sc, WMREG_EECD, reg);
10132 reg &= ~EECD_SK;
10133 CSR_WRITE(sc, WMREG_EECD, reg);
10134 CSR_WRITE_FLUSH(sc);
10135 delay(2);
10136 }
10137 /* XXX: end of workaround */
10138
10139 /* Set CHIP SELECT. */
10140 reg |= EECD_CS;
10141 CSR_WRITE(sc, WMREG_EECD, reg);
10142 CSR_WRITE_FLUSH(sc);
10143 delay(2);
10144
10145 /* Shift in the READ command. */
10146 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
10147
10148 /* Shift in address. */
10149 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
10150
10151 /* Shift out the data. */
10152 wm_eeprom_recvbits(sc, &val, 16);
10153 data[i] = val & 0xffff;
10154
10155 /* Clear CHIP SELECT. */
10156 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
10157 CSR_WRITE(sc, WMREG_EECD, reg);
10158 CSR_WRITE_FLUSH(sc);
10159 delay(2);
10160 }
10161
10162 return 0;
10163 }
10164
10165 /* SPI */
10166
10167 /*
10168 * Set SPI and FLASH related information from the EECD register.
10169 * For 82541 and 82547, the word size is taken from EEPROM.
10170 */
10171 static int
10172 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
10173 {
10174 int size;
10175 uint32_t reg;
10176 uint16_t data;
10177
10178 reg = CSR_READ(sc, WMREG_EECD);
10179 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
10180
10181 /* Read the size of NVM from EECD by default */
10182 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
10183 switch (sc->sc_type) {
10184 case WM_T_82541:
10185 case WM_T_82541_2:
10186 case WM_T_82547:
10187 case WM_T_82547_2:
10188 /* Set dummy value to access EEPROM */
10189 sc->sc_nvm_wordsize = 64;
10190 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
10191 reg = data;
10192 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
10193 if (size == 0)
10194 size = 6; /* 64 word size */
10195 else
10196 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
10197 break;
10198 case WM_T_80003:
10199 case WM_T_82571:
10200 case WM_T_82572:
10201 case WM_T_82573: /* SPI case */
10202 case WM_T_82574: /* SPI case */
10203 case WM_T_82583: /* SPI case */
10204 size += NVM_WORD_SIZE_BASE_SHIFT;
10205 if (size > 14)
10206 size = 14;
10207 break;
10208 case WM_T_82575:
10209 case WM_T_82576:
10210 case WM_T_82580:
10211 case WM_T_I350:
10212 case WM_T_I354:
10213 case WM_T_I210:
10214 case WM_T_I211:
10215 size += NVM_WORD_SIZE_BASE_SHIFT;
10216 if (size > 15)
10217 size = 15;
10218 break;
10219 default:
10220 aprint_error_dev(sc->sc_dev,
10221 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
10222 return -1;
10223 break;
10224 }
10225
10226 sc->sc_nvm_wordsize = 1 << size;
10227
10228 return 0;
10229 }
10230
10231 /*
10232 * wm_nvm_ready_spi:
10233 *
10234 * Wait for a SPI EEPROM to be ready for commands.
10235 */
10236 static int
10237 wm_nvm_ready_spi(struct wm_softc *sc)
10238 {
10239 uint32_t val;
10240 int usec;
10241
10242 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10243 device_xname(sc->sc_dev), __func__));
10244
10245 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
10246 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
10247 wm_eeprom_recvbits(sc, &val, 8);
10248 if ((val & SPI_SR_RDY) == 0)
10249 break;
10250 }
10251 if (usec >= SPI_MAX_RETRIES) {
10252 aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
10253 return 1;
10254 }
10255 return 0;
10256 }
10257
10258 /*
10259 * wm_nvm_read_spi:
10260 *
10261 * Read a work from the EEPROM using the SPI protocol.
10262 */
10263 static int
10264 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
10265 {
10266 uint32_t reg, val;
10267 int i;
10268 uint8_t opc;
10269
10270 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10271 device_xname(sc->sc_dev), __func__));
10272
10273 /* Clear SK and CS. */
10274 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
10275 CSR_WRITE(sc, WMREG_EECD, reg);
10276 CSR_WRITE_FLUSH(sc);
10277 delay(2);
10278
10279 if (wm_nvm_ready_spi(sc))
10280 return 1;
10281
10282 /* Toggle CS to flush commands. */
10283 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
10284 CSR_WRITE_FLUSH(sc);
10285 delay(2);
10286 CSR_WRITE(sc, WMREG_EECD, reg);
10287 CSR_WRITE_FLUSH(sc);
10288 delay(2);
10289
10290 opc = SPI_OPC_READ;
10291 if (sc->sc_nvm_addrbits == 8 && word >= 128)
10292 opc |= SPI_OPC_A8;
10293
10294 wm_eeprom_sendbits(sc, opc, 8);
10295 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
10296
10297 for (i = 0; i < wordcnt; i++) {
10298 wm_eeprom_recvbits(sc, &val, 16);
10299 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
10300 }
10301
10302 /* Raise CS and clear SK. */
10303 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
10304 CSR_WRITE(sc, WMREG_EECD, reg);
10305 CSR_WRITE_FLUSH(sc);
10306 delay(2);
10307
10308 return 0;
10309 }
10310
10311 /* Using with EERD */
10312
10313 static int
10314 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
10315 {
10316 uint32_t attempts = 100000;
10317 uint32_t i, reg = 0;
10318 int32_t done = -1;
10319
10320 for (i = 0; i < attempts; i++) {
10321 reg = CSR_READ(sc, rw);
10322
10323 if (reg & EERD_DONE) {
10324 done = 0;
10325 break;
10326 }
10327 delay(5);
10328 }
10329
10330 return done;
10331 }
10332
10333 static int
10334 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
10335 uint16_t *data)
10336 {
10337 int i, eerd = 0;
10338 int error = 0;
10339
10340 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10341 device_xname(sc->sc_dev), __func__));
10342
10343 for (i = 0; i < wordcnt; i++) {
10344 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
10345
10346 CSR_WRITE(sc, WMREG_EERD, eerd);
10347 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
10348 if (error != 0)
10349 break;
10350
10351 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
10352 }
10353
10354 return error;
10355 }
10356
10357 /* Flash */
10358
10359 static int
10360 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
10361 {
10362 uint32_t eecd;
10363 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
10364 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
10365 uint8_t sig_byte = 0;
10366
10367 switch (sc->sc_type) {
10368 case WM_T_PCH_SPT:
10369 /*
10370 * In SPT, read from the CTRL_EXT reg instead of accessing the
10371 * sector valid bits from the NVM.
10372 */
10373 *bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
10374 if ((*bank == 0) || (*bank == 1)) {
10375 aprint_error_dev(sc->sc_dev,
10376 "%s: no valid NVM bank present (%u)\n", __func__,
10377 *bank);
10378 return -1;
10379 } else {
10380 *bank = *bank - 2;
10381 return 0;
10382 }
10383 case WM_T_ICH8:
10384 case WM_T_ICH9:
10385 eecd = CSR_READ(sc, WMREG_EECD);
10386 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
10387 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
10388 return 0;
10389 }
10390 /* FALLTHROUGH */
10391 default:
10392 /* Default to 0 */
10393 *bank = 0;
10394
10395 /* Check bank 0 */
10396 wm_read_ich8_byte(sc, act_offset, &sig_byte);
10397 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
10398 *bank = 0;
10399 return 0;
10400 }
10401
10402 /* Check bank 1 */
10403 wm_read_ich8_byte(sc, act_offset + bank1_offset,
10404 &sig_byte);
10405 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
10406 *bank = 1;
10407 return 0;
10408 }
10409 }
10410
10411 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
10412 device_xname(sc->sc_dev)));
10413 return -1;
10414 }
10415
10416 /******************************************************************************
10417 * This function does initial flash setup so that a new read/write/erase cycle
10418 * can be started.
10419 *
10420 * sc - The pointer to the hw structure
10421 ****************************************************************************/
10422 static int32_t
10423 wm_ich8_cycle_init(struct wm_softc *sc)
10424 {
10425 uint16_t hsfsts;
10426 int32_t error = 1;
10427 int32_t i = 0;
10428
10429 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10430
10431 /* May be check the Flash Des Valid bit in Hw status */
10432 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
10433 return error;
10434 }
10435
10436 /* Clear FCERR in Hw status by writing 1 */
10437 /* Clear DAEL in Hw status by writing a 1 */
10438 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
10439
10440 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10441
10442 /*
10443 * Either we should have a hardware SPI cycle in progress bit to check
10444 * against, in order to start a new cycle or FDONE bit should be
10445 * changed in the hardware so that it is 1 after harware reset, which
10446 * can then be used as an indication whether a cycle is in progress or
10447 * has been completed .. we should also have some software semaphore
10448 * mechanism to guard FDONE or the cycle in progress bit so that two
10449 * threads access to those bits can be sequentiallized or a way so that
10450 * 2 threads dont start the cycle at the same time
10451 */
10452
10453 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10454 /*
10455 * There is no cycle running at present, so we can start a
10456 * cycle
10457 */
10458
10459 /* Begin by setting Flash Cycle Done. */
10460 hsfsts |= HSFSTS_DONE;
10461 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10462 error = 0;
10463 } else {
10464 /*
10465 * otherwise poll for sometime so the current cycle has a
10466 * chance to end before giving up.
10467 */
10468 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
10469 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10470 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10471 error = 0;
10472 break;
10473 }
10474 delay(1);
10475 }
10476 if (error == 0) {
10477 /*
10478 * Successful in waiting for previous cycle to timeout,
10479 * now set the Flash Cycle Done.
10480 */
10481 hsfsts |= HSFSTS_DONE;
10482 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10483 }
10484 }
10485 return error;
10486 }
10487
10488 /******************************************************************************
10489 * This function starts a flash cycle and waits for its completion
10490 *
10491 * sc - The pointer to the hw structure
10492 ****************************************************************************/
10493 static int32_t
10494 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
10495 {
10496 uint16_t hsflctl;
10497 uint16_t hsfsts;
10498 int32_t error = 1;
10499 uint32_t i = 0;
10500
10501 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
10502 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10503 hsflctl |= HSFCTL_GO;
10504 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10505
10506 /* Wait till FDONE bit is set to 1 */
10507 do {
10508 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10509 if (hsfsts & HSFSTS_DONE)
10510 break;
10511 delay(1);
10512 i++;
10513 } while (i < timeout);
10514 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
10515 error = 0;
10516
10517 return error;
10518 }
10519
10520 /******************************************************************************
10521 * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
10522 *
10523 * sc - The pointer to the hw structure
10524 * index - The index of the byte or word to read.
10525 * size - Size of data to read, 1=byte 2=word, 4=dword
10526 * data - Pointer to the word to store the value read.
10527 *****************************************************************************/
10528 static int32_t
10529 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
10530 uint32_t size, uint32_t *data)
10531 {
10532 uint16_t hsfsts;
10533 uint16_t hsflctl;
10534 uint32_t flash_linear_address;
10535 uint32_t flash_data = 0;
10536 int32_t error = 1;
10537 int32_t count = 0;
10538
10539 if (size < 1 || size > 4 || data == 0x0 ||
10540 index > ICH_FLASH_LINEAR_ADDR_MASK)
10541 return error;
10542
10543 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
10544 sc->sc_ich8_flash_base;
10545
10546 do {
10547 delay(1);
10548 /* Steps */
10549 error = wm_ich8_cycle_init(sc);
10550 if (error)
10551 break;
10552
10553 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10554 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
10555 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
10556 & HSFCTL_BCOUNT_MASK;
10557 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
10558 if (sc->sc_type == WM_T_PCH_SPT) {
10559 /*
10560 * In SPT, This register is in Lan memory space, not
10561 * flash. Therefore, only 32 bit access is supported.
10562 */
10563 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
10564 (uint32_t)hsflctl);
10565 } else
10566 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10567
10568 /*
10569 * Write the last 24 bits of index into Flash Linear address
10570 * field in Flash Address
10571 */
10572 /* TODO: TBD maybe check the index against the size of flash */
10573
10574 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
10575
10576 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
10577
10578 /*
10579 * Check if FCERR is set to 1, if set to 1, clear it and try
10580 * the whole sequence a few more times, else read in (shift in)
10581 * the Flash Data0, the order is least significant byte first
10582 * msb to lsb
10583 */
10584 if (error == 0) {
10585 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
10586 if (size == 1)
10587 *data = (uint8_t)(flash_data & 0x000000FF);
10588 else if (size == 2)
10589 *data = (uint16_t)(flash_data & 0x0000FFFF);
10590 else if (size == 4)
10591 *data = (uint32_t)flash_data;
10592 break;
10593 } else {
10594 /*
10595 * If we've gotten here, then things are probably
10596 * completely hosed, but if the error condition is
10597 * detected, it won't hurt to give it another try...
10598 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
10599 */
10600 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10601 if (hsfsts & HSFSTS_ERR) {
10602 /* Repeat for some time before giving up. */
10603 continue;
10604 } else if ((hsfsts & HSFSTS_DONE) == 0)
10605 break;
10606 }
10607 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
10608
10609 return error;
10610 }
10611
10612 /******************************************************************************
10613 * Reads a single byte from the NVM using the ICH8 flash access registers.
10614 *
10615 * sc - pointer to wm_hw structure
10616 * index - The index of the byte to read.
10617 * data - Pointer to a byte to store the value read.
10618 *****************************************************************************/
10619 static int32_t
10620 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
10621 {
10622 int32_t status;
10623 uint32_t word = 0;
10624
10625 status = wm_read_ich8_data(sc, index, 1, &word);
10626 if (status == 0)
10627 *data = (uint8_t)word;
10628 else
10629 *data = 0;
10630
10631 return status;
10632 }
10633
10634 /******************************************************************************
10635 * Reads a word from the NVM using the ICH8 flash access registers.
10636 *
10637 * sc - pointer to wm_hw structure
10638 * index - The starting byte index of the word to read.
10639 * data - Pointer to a word to store the value read.
10640 *****************************************************************************/
10641 static int32_t
10642 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
10643 {
10644 int32_t status;
10645 uint32_t word = 0;
10646
10647 status = wm_read_ich8_data(sc, index, 2, &word);
10648 if (status == 0)
10649 *data = (uint16_t)word;
10650 else
10651 *data = 0;
10652
10653 return status;
10654 }
10655
10656 /******************************************************************************
10657 * Reads a dword from the NVM using the ICH8 flash access registers.
10658 *
10659 * sc - pointer to wm_hw structure
10660 * index - The starting byte index of the word to read.
10661 * data - Pointer to a word to store the value read.
10662 *****************************************************************************/
10663 static int32_t
10664 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
10665 {
10666 int32_t status;
10667
10668 status = wm_read_ich8_data(sc, index, 4, data);
10669 return status;
10670 }
10671
10672 /******************************************************************************
10673 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
10674 * register.
10675 *
10676 * sc - Struct containing variables accessed by shared code
10677 * offset - offset of word in the EEPROM to read
10678 * data - word read from the EEPROM
10679 * words - number of words to read
10680 *****************************************************************************/
10681 static int
10682 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
10683 {
10684 int32_t error = 0;
10685 uint32_t flash_bank = 0;
10686 uint32_t act_offset = 0;
10687 uint32_t bank_offset = 0;
10688 uint16_t word = 0;
10689 uint16_t i = 0;
10690
10691 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10692 device_xname(sc->sc_dev), __func__));
10693
10694 /*
10695 * We need to know which is the valid flash bank. In the event
10696 * that we didn't allocate eeprom_shadow_ram, we may not be
10697 * managing flash_bank. So it cannot be trusted and needs
10698 * to be updated with each read.
10699 */
10700 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
10701 if (error) {
10702 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
10703 device_xname(sc->sc_dev)));
10704 flash_bank = 0;
10705 }
10706
10707 /*
10708 * Adjust offset appropriately if we're on bank 1 - adjust for word
10709 * size
10710 */
10711 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
10712
10713 error = wm_get_swfwhw_semaphore(sc);
10714 if (error) {
10715 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10716 __func__);
10717 return error;
10718 }
10719
10720 for (i = 0; i < words; i++) {
10721 /* The NVM part needs a byte offset, hence * 2 */
10722 act_offset = bank_offset + ((offset + i) * 2);
10723 error = wm_read_ich8_word(sc, act_offset, &word);
10724 if (error) {
10725 aprint_error_dev(sc->sc_dev,
10726 "%s: failed to read NVM\n", __func__);
10727 break;
10728 }
10729 data[i] = word;
10730 }
10731
10732 wm_put_swfwhw_semaphore(sc);
10733 return error;
10734 }
10735
10736 /******************************************************************************
10737 * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
10738 * register.
10739 *
10740 * sc - Struct containing variables accessed by shared code
10741 * offset - offset of word in the EEPROM to read
10742 * data - word read from the EEPROM
10743 * words - number of words to read
10744 *****************************************************************************/
10745 static int
10746 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
10747 {
10748 int32_t error = 0;
10749 uint32_t flash_bank = 0;
10750 uint32_t act_offset = 0;
10751 uint32_t bank_offset = 0;
10752 uint32_t dword = 0;
10753 uint16_t i = 0;
10754
10755 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10756 device_xname(sc->sc_dev), __func__));
10757
10758 /*
10759 * We need to know which is the valid flash bank. In the event
10760 * that we didn't allocate eeprom_shadow_ram, we may not be
10761 * managing flash_bank. So it cannot be trusted and needs
10762 * to be updated with each read.
10763 */
10764 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
10765 if (error) {
10766 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
10767 device_xname(sc->sc_dev)));
10768 flash_bank = 0;
10769 }
10770
10771 /*
10772 * Adjust offset appropriately if we're on bank 1 - adjust for word
10773 * size
10774 */
10775 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
10776
10777 error = wm_get_swfwhw_semaphore(sc);
10778 if (error) {
10779 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10780 __func__);
10781 return error;
10782 }
10783
10784 for (i = 0; i < words; i++) {
10785 /* The NVM part needs a byte offset, hence * 2 */
10786 act_offset = bank_offset + ((offset + i) * 2);
10787 /* but we must read dword aligned, so mask ... */
10788 error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
10789 if (error) {
10790 aprint_error_dev(sc->sc_dev,
10791 "%s: failed to read NVM\n", __func__);
10792 break;
10793 }
10794 /* ... and pick out low or high word */
10795 if ((act_offset & 0x2) == 0)
10796 data[i] = (uint16_t)(dword & 0xFFFF);
10797 else
10798 data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
10799 }
10800
10801 wm_put_swfwhw_semaphore(sc);
10802 return error;
10803 }
10804
10805 /* iNVM */
10806
10807 static int
10808 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
10809 {
10810 int32_t rv = 0;
10811 uint32_t invm_dword;
10812 uint16_t i;
10813 uint8_t record_type, word_address;
10814
10815 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10816 device_xname(sc->sc_dev), __func__));
10817
10818 for (i = 0; i < INVM_SIZE; i++) {
10819 invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
10820 /* Get record type */
10821 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
10822 if (record_type == INVM_UNINITIALIZED_STRUCTURE)
10823 break;
10824 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
10825 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
10826 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
10827 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
10828 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
10829 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
10830 if (word_address == address) {
10831 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
10832 rv = 0;
10833 break;
10834 }
10835 }
10836 }
10837
10838 return rv;
10839 }
10840
10841 static int
10842 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
10843 {
10844 int rv = 0;
10845 int i;
10846
10847 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10848 device_xname(sc->sc_dev), __func__));
10849
10850 for (i = 0; i < words; i++) {
10851 switch (offset + i) {
10852 case NVM_OFF_MACADDR:
10853 case NVM_OFF_MACADDR1:
10854 case NVM_OFF_MACADDR2:
10855 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
10856 if (rv != 0) {
10857 data[i] = 0xffff;
10858 rv = -1;
10859 }
10860 break;
10861 case NVM_OFF_CFG2:
10862 rv = wm_nvm_read_word_invm(sc, offset, data);
10863 if (rv != 0) {
10864 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
10865 rv = 0;
10866 }
10867 break;
10868 case NVM_OFF_CFG4:
10869 rv = wm_nvm_read_word_invm(sc, offset, data);
10870 if (rv != 0) {
10871 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
10872 rv = 0;
10873 }
10874 break;
10875 case NVM_OFF_LED_1_CFG:
10876 rv = wm_nvm_read_word_invm(sc, offset, data);
10877 if (rv != 0) {
10878 *data = NVM_LED_1_CFG_DEFAULT_I211;
10879 rv = 0;
10880 }
10881 break;
10882 case NVM_OFF_LED_0_2_CFG:
10883 rv = wm_nvm_read_word_invm(sc, offset, data);
10884 if (rv != 0) {
10885 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
10886 rv = 0;
10887 }
10888 break;
10889 case NVM_OFF_ID_LED_SETTINGS:
10890 rv = wm_nvm_read_word_invm(sc, offset, data);
10891 if (rv != 0) {
10892 *data = ID_LED_RESERVED_FFFF;
10893 rv = 0;
10894 }
10895 break;
10896 default:
10897 DPRINTF(WM_DEBUG_NVM,
10898 ("NVM word 0x%02x is not mapped.\n", offset));
10899 *data = NVM_RESERVED_WORD;
10900 break;
10901 }
10902 }
10903
10904 return rv;
10905 }
10906
10907 /* Lock, detecting NVM type, validate checksum, version and read */
10908
10909 /*
10910 * wm_nvm_acquire:
10911 *
10912 * Perform the EEPROM handshake required on some chips.
10913 */
10914 static int
10915 wm_nvm_acquire(struct wm_softc *sc)
10916 {
10917 uint32_t reg;
10918 int x;
10919 int ret = 0;
10920
10921 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10922 device_xname(sc->sc_dev), __func__));
10923
10924 if (sc->sc_type >= WM_T_ICH8) {
10925 ret = wm_get_nvm_ich8lan(sc);
10926 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
10927 ret = wm_get_swfwhw_semaphore(sc);
10928 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
10929 /* This will also do wm_get_swsm_semaphore() if needed */
10930 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
10931 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
10932 ret = wm_get_swsm_semaphore(sc);
10933 }
10934
10935 if (ret) {
10936 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10937 __func__);
10938 return 1;
10939 }
10940
10941 if (sc->sc_flags & WM_F_LOCK_EECD) {
10942 reg = CSR_READ(sc, WMREG_EECD);
10943
10944 /* Request EEPROM access. */
10945 reg |= EECD_EE_REQ;
10946 CSR_WRITE(sc, WMREG_EECD, reg);
10947
10948 /* ..and wait for it to be granted. */
10949 for (x = 0; x < 1000; x++) {
10950 reg = CSR_READ(sc, WMREG_EECD);
10951 if (reg & EECD_EE_GNT)
10952 break;
10953 delay(5);
10954 }
10955 if ((reg & EECD_EE_GNT) == 0) {
10956 aprint_error_dev(sc->sc_dev,
10957 "could not acquire EEPROM GNT\n");
10958 reg &= ~EECD_EE_REQ;
10959 CSR_WRITE(sc, WMREG_EECD, reg);
10960 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
10961 wm_put_swfwhw_semaphore(sc);
10962 if (sc->sc_flags & WM_F_LOCK_SWFW)
10963 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
10964 else if (sc->sc_flags & WM_F_LOCK_SWSM)
10965 wm_put_swsm_semaphore(sc);
10966 return 1;
10967 }
10968 }
10969
10970 return 0;
10971 }
10972
10973 /*
10974 * wm_nvm_release:
10975 *
10976 * Release the EEPROM mutex.
10977 */
10978 static void
10979 wm_nvm_release(struct wm_softc *sc)
10980 {
10981 uint32_t reg;
10982
10983 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10984 device_xname(sc->sc_dev), __func__));
10985
10986 if (sc->sc_flags & WM_F_LOCK_EECD) {
10987 reg = CSR_READ(sc, WMREG_EECD);
10988 reg &= ~EECD_EE_REQ;
10989 CSR_WRITE(sc, WMREG_EECD, reg);
10990 }
10991
10992 if (sc->sc_type >= WM_T_ICH8) {
10993 wm_put_nvm_ich8lan(sc);
10994 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
10995 wm_put_swfwhw_semaphore(sc);
10996 if (sc->sc_flags & WM_F_LOCK_SWFW)
10997 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
10998 else if (sc->sc_flags & WM_F_LOCK_SWSM)
10999 wm_put_swsm_semaphore(sc);
11000 }
11001
11002 static int
11003 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
11004 {
11005 uint32_t eecd = 0;
11006
11007 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
11008 || sc->sc_type == WM_T_82583) {
11009 eecd = CSR_READ(sc, WMREG_EECD);
11010
11011 /* Isolate bits 15 & 16 */
11012 eecd = ((eecd >> 15) & 0x03);
11013
11014 /* If both bits are set, device is Flash type */
11015 if (eecd == 0x03)
11016 return 0;
11017 }
11018 return 1;
11019 }
11020
11021 static int
11022 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
11023 {
11024 uint32_t eec;
11025
11026 eec = CSR_READ(sc, WMREG_EEC);
11027 if ((eec & EEC_FLASH_DETECTED) != 0)
11028 return 1;
11029
11030 return 0;
11031 }
11032
11033 /*
11034 * wm_nvm_validate_checksum
11035 *
11036 * The checksum is defined as the sum of the first 64 (16 bit) words.
11037 */
11038 static int
11039 wm_nvm_validate_checksum(struct wm_softc *sc)
11040 {
11041 uint16_t checksum;
11042 uint16_t eeprom_data;
11043 #ifdef WM_DEBUG
11044 uint16_t csum_wordaddr, valid_checksum;
11045 #endif
11046 int i;
11047
11048 checksum = 0;
11049
11050 /* Don't check for I211 */
11051 if (sc->sc_type == WM_T_I211)
11052 return 0;
11053
11054 #ifdef WM_DEBUG
11055 if (sc->sc_type == WM_T_PCH_LPT) {
11056 csum_wordaddr = NVM_OFF_COMPAT;
11057 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
11058 } else {
11059 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
11060 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
11061 }
11062
11063 /* Dump EEPROM image for debug */
11064 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
11065 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
11066 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
11067 /* XXX PCH_SPT? */
11068 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
11069 if ((eeprom_data & valid_checksum) == 0) {
11070 DPRINTF(WM_DEBUG_NVM,
11071 ("%s: NVM need to be updated (%04x != %04x)\n",
11072 device_xname(sc->sc_dev), eeprom_data,
11073 valid_checksum));
11074 }
11075 }
11076
11077 if ((wm_debug & WM_DEBUG_NVM) != 0) {
11078 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
11079 for (i = 0; i < NVM_SIZE; i++) {
11080 if (wm_nvm_read(sc, i, 1, &eeprom_data))
11081 printf("XXXX ");
11082 else
11083 printf("%04hx ", eeprom_data);
11084 if (i % 8 == 7)
11085 printf("\n");
11086 }
11087 }
11088
11089 #endif /* WM_DEBUG */
11090
11091 for (i = 0; i < NVM_SIZE; i++) {
11092 if (wm_nvm_read(sc, i, 1, &eeprom_data))
11093 return 1;
11094 checksum += eeprom_data;
11095 }
11096
11097 if (checksum != (uint16_t) NVM_CHECKSUM) {
11098 #ifdef WM_DEBUG
11099 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
11100 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
11101 #endif
11102 }
11103
11104 return 0;
11105 }
11106
11107 static void
11108 wm_nvm_version_invm(struct wm_softc *sc)
11109 {
11110 uint32_t dword;
11111
11112 /*
11113 * Linux's code to decode version is very strange, so we don't
11114 * obey that algorithm and just use word 61 as the document.
11115 * Perhaps it's not perfect though...
11116 *
11117 * Example:
11118 *
11119 * Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
11120 */
11121 dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
11122 dword = __SHIFTOUT(dword, INVM_VER_1);
11123 sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
11124 sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
11125 }
11126
11127 static void
11128 wm_nvm_version(struct wm_softc *sc)
11129 {
11130 uint16_t major, minor, build, patch;
11131 uint16_t uid0, uid1;
11132 uint16_t nvm_data;
11133 uint16_t off;
11134 bool check_version = false;
11135 bool check_optionrom = false;
11136 bool have_build = false;
11137
11138 /*
11139 * Version format:
11140 *
11141 * XYYZ
11142 * X0YZ
11143 * X0YY
11144 *
11145 * Example:
11146 *
11147 * 82571 0x50a2 5.10.2? (the spec update notes about 5.6-5.10)
11148 * 82571 0x50a6 5.10.6?
11149 * 82572 0x506a 5.6.10?
11150 * 82572EI 0x5069 5.6.9?
11151 * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4)
11152 * 0x2013 2.1.3?
11153 * 82583 0x10a0 1.10.0? (document says it's default vaule)
11154 */
11155 wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
11156 switch (sc->sc_type) {
11157 case WM_T_82571:
11158 case WM_T_82572:
11159 case WM_T_82574:
11160 case WM_T_82583:
11161 check_version = true;
11162 check_optionrom = true;
11163 have_build = true;
11164 break;
11165 case WM_T_82575:
11166 case WM_T_82576:
11167 case WM_T_82580:
11168 if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
11169 check_version = true;
11170 break;
11171 case WM_T_I211:
11172 wm_nvm_version_invm(sc);
11173 goto printver;
11174 case WM_T_I210:
11175 if (!wm_nvm_get_flash_presence_i210(sc)) {
11176 wm_nvm_version_invm(sc);
11177 goto printver;
11178 }
11179 /* FALLTHROUGH */
11180 case WM_T_I350:
11181 case WM_T_I354:
11182 check_version = true;
11183 check_optionrom = true;
11184 break;
11185 default:
11186 return;
11187 }
11188 if (check_version) {
11189 wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
11190 major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
11191 if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
11192 minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
11193 build = nvm_data & NVM_BUILD_MASK;
11194 have_build = true;
11195 } else
11196 minor = nvm_data & 0x00ff;
11197
11198 /* Decimal */
11199 minor = (minor / 16) * 10 + (minor % 16);
11200 sc->sc_nvm_ver_major = major;
11201 sc->sc_nvm_ver_minor = minor;
11202
11203 printver:
11204 aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
11205 sc->sc_nvm_ver_minor);
11206 if (have_build) {
11207 sc->sc_nvm_ver_build = build;
11208 aprint_verbose(".%d", build);
11209 }
11210 }
11211 if (check_optionrom) {
11212 wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
11213 /* Option ROM Version */
11214 if ((off != 0x0000) && (off != 0xffff)) {
11215 off += NVM_COMBO_VER_OFF;
11216 wm_nvm_read(sc, off + 1, 1, &uid1);
11217 wm_nvm_read(sc, off, 1, &uid0);
11218 if ((uid0 != 0) && (uid0 != 0xffff)
11219 && (uid1 != 0) && (uid1 != 0xffff)) {
11220 /* 16bits */
11221 major = uid0 >> 8;
11222 build = (uid0 << 8) | (uid1 >> 8);
11223 patch = uid1 & 0x00ff;
11224 aprint_verbose(", option ROM Version %d.%d.%d",
11225 major, build, patch);
11226 }
11227 }
11228 }
11229
11230 wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
11231 aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
11232 }
11233
11234 /*
11235 * wm_nvm_read:
11236 *
11237 * Read data from the serial EEPROM.
11238 */
11239 static int
11240 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
11241 {
11242 int rv;
11243
11244 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11245 device_xname(sc->sc_dev), __func__));
11246
11247 if (sc->sc_flags & WM_F_EEPROM_INVALID)
11248 return 1;
11249
11250 if (wm_nvm_acquire(sc))
11251 return 1;
11252
11253 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
11254 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
11255 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
11256 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
11257 else if (sc->sc_type == WM_T_PCH_SPT)
11258 rv = wm_nvm_read_spt(sc, word, wordcnt, data);
11259 else if (sc->sc_flags & WM_F_EEPROM_INVM)
11260 rv = wm_nvm_read_invm(sc, word, wordcnt, data);
11261 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
11262 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
11263 else if (sc->sc_flags & WM_F_EEPROM_SPI)
11264 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
11265 else
11266 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
11267
11268 wm_nvm_release(sc);
11269 return rv;
11270 }
11271
11272 /*
11273 * Hardware semaphores.
11274 * Very complexed...
11275 */
11276
11277 static int
11278 wm_get_null(struct wm_softc *sc)
11279 {
11280
11281 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11282 device_xname(sc->sc_dev), __func__));
11283 return 0;
11284 }
11285
11286 static void
11287 wm_put_null(struct wm_softc *sc)
11288 {
11289
11290 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11291 device_xname(sc->sc_dev), __func__));
11292 return;
11293 }
11294
11295 /*
11296 * Get hardware semaphore.
11297 * Same as e1000_get_hw_semaphore_generic()
11298 */
11299 static int
11300 wm_get_swsm_semaphore(struct wm_softc *sc)
11301 {
11302 int32_t timeout;
11303 uint32_t swsm;
11304
11305 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11306 device_xname(sc->sc_dev), __func__));
11307 KASSERT(sc->sc_nvm_wordsize > 0);
11308
11309 /* Get the SW semaphore. */
11310 timeout = sc->sc_nvm_wordsize + 1;
11311 while (timeout) {
11312 swsm = CSR_READ(sc, WMREG_SWSM);
11313
11314 if ((swsm & SWSM_SMBI) == 0)
11315 break;
11316
11317 delay(50);
11318 timeout--;
11319 }
11320
11321 if (timeout == 0) {
11322 aprint_error_dev(sc->sc_dev,
11323 "could not acquire SWSM SMBI\n");
11324 return 1;
11325 }
11326
11327 /* Get the FW semaphore. */
11328 timeout = sc->sc_nvm_wordsize + 1;
11329 while (timeout) {
11330 swsm = CSR_READ(sc, WMREG_SWSM);
11331 swsm |= SWSM_SWESMBI;
11332 CSR_WRITE(sc, WMREG_SWSM, swsm);
11333 /* If we managed to set the bit we got the semaphore. */
11334 swsm = CSR_READ(sc, WMREG_SWSM);
11335 if (swsm & SWSM_SWESMBI)
11336 break;
11337
11338 delay(50);
11339 timeout--;
11340 }
11341
11342 if (timeout == 0) {
11343 aprint_error_dev(sc->sc_dev,
11344 "could not acquire SWSM SWESMBI\n");
11345 /* Release semaphores */
11346 wm_put_swsm_semaphore(sc);
11347 return 1;
11348 }
11349 return 0;
11350 }
11351
11352 /*
11353 * Put hardware semaphore.
11354 * Same as e1000_put_hw_semaphore_generic()
11355 */
11356 static void
11357 wm_put_swsm_semaphore(struct wm_softc *sc)
11358 {
11359 uint32_t swsm;
11360
11361 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11362 device_xname(sc->sc_dev), __func__));
11363
11364 swsm = CSR_READ(sc, WMREG_SWSM);
11365 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
11366 CSR_WRITE(sc, WMREG_SWSM, swsm);
11367 }
11368
11369 /*
11370 * Get SW/FW semaphore.
11371 * Same as e1000_acquire_swfw_sync_82575().
11372 */
11373 static int
11374 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
11375 {
11376 uint32_t swfw_sync;
11377 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
11378 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
11379 int timeout = 200;
11380
11381 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11382 device_xname(sc->sc_dev), __func__));
11383 KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
11384
11385 for (timeout = 0; timeout < 200; timeout++) {
11386 if (sc->sc_flags & WM_F_LOCK_SWSM) {
11387 if (wm_get_swsm_semaphore(sc)) {
11388 aprint_error_dev(sc->sc_dev,
11389 "%s: failed to get semaphore\n",
11390 __func__);
11391 return 1;
11392 }
11393 }
11394 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
11395 if ((swfw_sync & (swmask | fwmask)) == 0) {
11396 swfw_sync |= swmask;
11397 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
11398 if (sc->sc_flags & WM_F_LOCK_SWSM)
11399 wm_put_swsm_semaphore(sc);
11400 return 0;
11401 }
11402 if (sc->sc_flags & WM_F_LOCK_SWSM)
11403 wm_put_swsm_semaphore(sc);
11404 delay(5000);
11405 }
11406 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
11407 device_xname(sc->sc_dev), mask, swfw_sync);
11408 return 1;
11409 }
11410
11411 static void
11412 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
11413 {
11414 uint32_t swfw_sync;
11415
11416 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11417 device_xname(sc->sc_dev), __func__));
11418 KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
11419
11420 if (sc->sc_flags & WM_F_LOCK_SWSM) {
11421 while (wm_get_swsm_semaphore(sc) != 0)
11422 continue;
11423 }
11424 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
11425 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
11426 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
11427 if (sc->sc_flags & WM_F_LOCK_SWSM)
11428 wm_put_swsm_semaphore(sc);
11429 }
11430
11431 static int
11432 wm_get_phy_82575(struct wm_softc *sc)
11433 {
11434
11435 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11436 device_xname(sc->sc_dev), __func__));
11437 return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
11438 }
11439
11440 static void
11441 wm_put_phy_82575(struct wm_softc *sc)
11442 {
11443
11444 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11445 device_xname(sc->sc_dev), __func__));
11446 return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
11447 }
11448
11449 static int
11450 wm_get_swfwhw_semaphore(struct wm_softc *sc)
11451 {
11452 uint32_t ext_ctrl;
11453 int timeout = 200;
11454
11455 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11456 device_xname(sc->sc_dev), __func__));
11457
11458 mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
11459 for (timeout = 0; timeout < 200; timeout++) {
11460 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11461 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
11462 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11463
11464 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11465 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
11466 return 0;
11467 delay(5000);
11468 }
11469 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
11470 device_xname(sc->sc_dev), ext_ctrl);
11471 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
11472 return 1;
11473 }
11474
11475 static void
11476 wm_put_swfwhw_semaphore(struct wm_softc *sc)
11477 {
11478 uint32_t ext_ctrl;
11479
11480 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11481 device_xname(sc->sc_dev), __func__));
11482
11483 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11484 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11485 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11486
11487 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
11488 }
11489
11490 static int
11491 wm_get_swflag_ich8lan(struct wm_softc *sc)
11492 {
11493 uint32_t ext_ctrl;
11494 int timeout;
11495
11496 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11497 device_xname(sc->sc_dev), __func__));
11498 mutex_enter(sc->sc_ich_phymtx);
11499 for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
11500 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11501 if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
11502 break;
11503 delay(1000);
11504 }
11505 if (timeout >= WM_PHY_CFG_TIMEOUT) {
11506 printf("%s: SW has already locked the resource\n",
11507 device_xname(sc->sc_dev));
11508 goto out;
11509 }
11510
11511 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
11512 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11513 for (timeout = 0; timeout < 1000; timeout++) {
11514 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11515 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
11516 break;
11517 delay(1000);
11518 }
11519 if (timeout >= 1000) {
11520 printf("%s: failed to acquire semaphore\n",
11521 device_xname(sc->sc_dev));
11522 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11523 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11524 goto out;
11525 }
11526 return 0;
11527
11528 out:
11529 mutex_exit(sc->sc_ich_phymtx);
11530 return 1;
11531 }
11532
11533 static void
11534 wm_put_swflag_ich8lan(struct wm_softc *sc)
11535 {
11536 uint32_t ext_ctrl;
11537
11538 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11539 device_xname(sc->sc_dev), __func__));
11540 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11541 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
11542 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11543 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11544 } else {
11545 printf("%s: Semaphore unexpectedly released\n",
11546 device_xname(sc->sc_dev));
11547 }
11548
11549 mutex_exit(sc->sc_ich_phymtx);
11550 }
11551
11552 static int
11553 wm_get_nvm_ich8lan(struct wm_softc *sc)
11554 {
11555
11556 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11557 device_xname(sc->sc_dev), __func__));
11558 mutex_enter(sc->sc_ich_nvmmtx);
11559
11560 return 0;
11561 }
11562
11563 static void
11564 wm_put_nvm_ich8lan(struct wm_softc *sc)
11565 {
11566
11567 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11568 device_xname(sc->sc_dev), __func__));
11569 mutex_exit(sc->sc_ich_nvmmtx);
11570 }
11571
11572 static int
11573 wm_get_hw_semaphore_82573(struct wm_softc *sc)
11574 {
11575 int i = 0;
11576 uint32_t reg;
11577
11578 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11579 device_xname(sc->sc_dev), __func__));
11580
11581 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11582 do {
11583 CSR_WRITE(sc, WMREG_EXTCNFCTR,
11584 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
11585 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11586 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
11587 break;
11588 delay(2*1000);
11589 i++;
11590 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
11591
11592 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
11593 wm_put_hw_semaphore_82573(sc);
11594 log(LOG_ERR, "%s: Driver can't access the PHY\n",
11595 device_xname(sc->sc_dev));
11596 return -1;
11597 }
11598
11599 return 0;
11600 }
11601
11602 static void
11603 wm_put_hw_semaphore_82573(struct wm_softc *sc)
11604 {
11605 uint32_t reg;
11606
11607 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11608 device_xname(sc->sc_dev), __func__));
11609
11610 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11611 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11612 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
11613 }
11614
11615 /*
11616 * Management mode and power management related subroutines.
11617 * BMC, AMT, suspend/resume and EEE.
11618 */
11619
11620 #ifdef WM_WOL
11621 static int
11622 wm_check_mng_mode(struct wm_softc *sc)
11623 {
11624 int rv;
11625
11626 switch (sc->sc_type) {
11627 case WM_T_ICH8:
11628 case WM_T_ICH9:
11629 case WM_T_ICH10:
11630 case WM_T_PCH:
11631 case WM_T_PCH2:
11632 case WM_T_PCH_LPT:
11633 case WM_T_PCH_SPT:
11634 rv = wm_check_mng_mode_ich8lan(sc);
11635 break;
11636 case WM_T_82574:
11637 case WM_T_82583:
11638 rv = wm_check_mng_mode_82574(sc);
11639 break;
11640 case WM_T_82571:
11641 case WM_T_82572:
11642 case WM_T_82573:
11643 case WM_T_80003:
11644 rv = wm_check_mng_mode_generic(sc);
11645 break;
11646 default:
11647 /* noting to do */
11648 rv = 0;
11649 break;
11650 }
11651
11652 return rv;
11653 }
11654
11655 static int
11656 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
11657 {
11658 uint32_t fwsm;
11659
11660 fwsm = CSR_READ(sc, WMREG_FWSM);
11661
11662 if (((fwsm & FWSM_FW_VALID) != 0)
11663 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
11664 return 1;
11665
11666 return 0;
11667 }
11668
11669 static int
11670 wm_check_mng_mode_82574(struct wm_softc *sc)
11671 {
11672 uint16_t data;
11673
11674 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11675
11676 if ((data & NVM_CFG2_MNGM_MASK) != 0)
11677 return 1;
11678
11679 return 0;
11680 }
11681
11682 static int
11683 wm_check_mng_mode_generic(struct wm_softc *sc)
11684 {
11685 uint32_t fwsm;
11686
11687 fwsm = CSR_READ(sc, WMREG_FWSM);
11688
11689 if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
11690 return 1;
11691
11692 return 0;
11693 }
11694 #endif /* WM_WOL */
11695
11696 static int
11697 wm_enable_mng_pass_thru(struct wm_softc *sc)
11698 {
11699 uint32_t manc, fwsm, factps;
11700
11701 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
11702 return 0;
11703
11704 manc = CSR_READ(sc, WMREG_MANC);
11705
11706 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
11707 device_xname(sc->sc_dev), manc));
11708 if ((manc & MANC_RECV_TCO_EN) == 0)
11709 return 0;
11710
11711 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
11712 fwsm = CSR_READ(sc, WMREG_FWSM);
11713 factps = CSR_READ(sc, WMREG_FACTPS);
11714 if (((factps & FACTPS_MNGCG) == 0)
11715 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
11716 return 1;
11717 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
11718 uint16_t data;
11719
11720 factps = CSR_READ(sc, WMREG_FACTPS);
11721 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11722 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
11723 device_xname(sc->sc_dev), factps, data));
11724 if (((factps & FACTPS_MNGCG) == 0)
11725 && ((data & NVM_CFG2_MNGM_MASK)
11726 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
11727 return 1;
11728 } else if (((manc & MANC_SMBUS_EN) != 0)
11729 && ((manc & MANC_ASF_EN) == 0))
11730 return 1;
11731
11732 return 0;
11733 }
11734
11735 static bool
11736 wm_phy_resetisblocked(struct wm_softc *sc)
11737 {
11738 bool blocked = false;
11739 uint32_t reg;
11740 int i = 0;
11741
11742 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
11743 device_xname(sc->sc_dev), __func__));
11744
11745 switch (sc->sc_type) {
11746 case WM_T_ICH8:
11747 case WM_T_ICH9:
11748 case WM_T_ICH10:
11749 case WM_T_PCH:
11750 case WM_T_PCH2:
11751 case WM_T_PCH_LPT:
11752 case WM_T_PCH_SPT:
11753 do {
11754 reg = CSR_READ(sc, WMREG_FWSM);
11755 if ((reg & FWSM_RSPCIPHY) == 0) {
11756 blocked = true;
11757 delay(10*1000);
11758 continue;
11759 }
11760 blocked = false;
11761 } while (blocked && (i++ < 30));
11762 return blocked;
11763 break;
11764 case WM_T_82571:
11765 case WM_T_82572:
11766 case WM_T_82573:
11767 case WM_T_82574:
11768 case WM_T_82583:
11769 case WM_T_80003:
11770 reg = CSR_READ(sc, WMREG_MANC);
11771 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
11772 return true;
11773 else
11774 return false;
11775 break;
11776 default:
11777 /* no problem */
11778 break;
11779 }
11780
11781 return false;
11782 }
11783
11784 static void
11785 wm_get_hw_control(struct wm_softc *sc)
11786 {
11787 uint32_t reg;
11788
11789 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11790 device_xname(sc->sc_dev), __func__));
11791
11792 switch (sc->sc_type) {
11793 case WM_T_82573:
11794 reg = CSR_READ(sc, WMREG_SWSM);
11795 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
11796 break;
11797 case WM_T_82571:
11798 case WM_T_82572:
11799 case WM_T_82574:
11800 case WM_T_82583:
11801 case WM_T_80003:
11802 case WM_T_ICH8:
11803 case WM_T_ICH9:
11804 case WM_T_ICH10:
11805 case WM_T_PCH:
11806 case WM_T_PCH2:
11807 case WM_T_PCH_LPT:
11808 case WM_T_PCH_SPT:
11809 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11810 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
11811 break;
11812 default:
11813 break;
11814 }
11815 }
11816
11817 static void
11818 wm_release_hw_control(struct wm_softc *sc)
11819 {
11820 uint32_t reg;
11821
11822 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11823 device_xname(sc->sc_dev), __func__));
11824
11825 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
11826 return;
11827
11828 if (sc->sc_type == WM_T_82573) {
11829 reg = CSR_READ(sc, WMREG_SWSM);
11830 reg &= ~SWSM_DRV_LOAD;
11831 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
11832 } else {
11833 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11834 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
11835 }
11836 }
11837
11838 static void
11839 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
11840 {
11841 uint32_t reg;
11842
11843 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
11844 device_xname(sc->sc_dev), __func__));
11845
11846 if (sc->sc_type < WM_T_PCH2)
11847 return;
11848
11849 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11850
11851 if (gate)
11852 reg |= EXTCNFCTR_GATE_PHY_CFG;
11853 else
11854 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
11855
11856 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
11857 }
11858
11859 static void
11860 wm_smbustopci(struct wm_softc *sc)
11861 {
11862 uint32_t fwsm, reg;
11863
11864 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
11865 device_xname(sc->sc_dev), __func__));
11866
11867 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
11868 wm_gate_hw_phy_config_ich8lan(sc, true);
11869
11870 /* Acquire PHY semaphore */
11871 sc->phy.acquire(sc);
11872
11873 fwsm = CSR_READ(sc, WMREG_FWSM);
11874 if (((fwsm & FWSM_FW_VALID) == 0)
11875 && ((wm_phy_resetisblocked(sc) == false))) {
11876 if (sc->sc_type >= WM_T_PCH_LPT) {
11877 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11878 reg |= CTRL_EXT_FORCE_SMBUS;
11879 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11880 CSR_WRITE_FLUSH(sc);
11881 delay(50*1000);
11882 }
11883
11884 /* Toggle LANPHYPC */
11885 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
11886 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
11887 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11888 CSR_WRITE_FLUSH(sc);
11889 delay(1000);
11890 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
11891 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11892 CSR_WRITE_FLUSH(sc);
11893 delay(50*1000);
11894
11895 if (sc->sc_type >= WM_T_PCH_LPT) {
11896 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11897 reg &= ~CTRL_EXT_FORCE_SMBUS;
11898 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11899 }
11900 }
11901
11902 /* Release semaphore */
11903 sc->phy.release(sc);
11904
11905 /*
11906 * Ungate automatic PHY configuration by hardware on non-managed 82579
11907 */
11908 if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0))
11909 wm_gate_hw_phy_config_ich8lan(sc, false);
11910 }
11911
11912 static void
11913 wm_init_manageability(struct wm_softc *sc)
11914 {
11915
11916 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
11917 device_xname(sc->sc_dev), __func__));
11918 if (sc->sc_flags & WM_F_HAS_MANAGE) {
11919 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
11920 uint32_t manc = CSR_READ(sc, WMREG_MANC);
11921
11922 /* Disable hardware interception of ARP */
11923 manc &= ~MANC_ARP_EN;
11924
11925 /* Enable receiving management packets to the host */
11926 if (sc->sc_type >= WM_T_82571) {
11927 manc |= MANC_EN_MNG2HOST;
11928 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
11929 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
11930 }
11931
11932 CSR_WRITE(sc, WMREG_MANC, manc);
11933 }
11934 }
11935
11936 static void
11937 wm_release_manageability(struct wm_softc *sc)
11938 {
11939
11940 if (sc->sc_flags & WM_F_HAS_MANAGE) {
11941 uint32_t manc = CSR_READ(sc, WMREG_MANC);
11942
11943 manc |= MANC_ARP_EN;
11944 if (sc->sc_type >= WM_T_82571)
11945 manc &= ~MANC_EN_MNG2HOST;
11946
11947 CSR_WRITE(sc, WMREG_MANC, manc);
11948 }
11949 }
11950
11951 static void
11952 wm_get_wakeup(struct wm_softc *sc)
11953 {
11954
11955 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
11956 switch (sc->sc_type) {
11957 case WM_T_82573:
11958 case WM_T_82583:
11959 sc->sc_flags |= WM_F_HAS_AMT;
11960 /* FALLTHROUGH */
11961 case WM_T_80003:
11962 case WM_T_82541:
11963 case WM_T_82547:
11964 case WM_T_82571:
11965 case WM_T_82572:
11966 case WM_T_82574:
11967 case WM_T_82575:
11968 case WM_T_82576:
11969 case WM_T_82580:
11970 case WM_T_I350:
11971 case WM_T_I354:
11972 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
11973 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
11974 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
11975 break;
11976 case WM_T_ICH8:
11977 case WM_T_ICH9:
11978 case WM_T_ICH10:
11979 case WM_T_PCH:
11980 case WM_T_PCH2:
11981 case WM_T_PCH_LPT:
11982 case WM_T_PCH_SPT: /* XXX only Q170 chipset? */
11983 sc->sc_flags |= WM_F_HAS_AMT;
11984 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
11985 break;
11986 default:
11987 break;
11988 }
11989
11990 /* 1: HAS_MANAGE */
11991 if (wm_enable_mng_pass_thru(sc) != 0)
11992 sc->sc_flags |= WM_F_HAS_MANAGE;
11993
11994 #ifdef WM_DEBUG
11995 printf("\n");
11996 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
11997 printf("HAS_AMT,");
11998 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
11999 printf("ARC_SUBSYS_VALID,");
12000 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
12001 printf("ASF_FIRMWARE_PRES,");
12002 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
12003 printf("HAS_MANAGE,");
12004 printf("\n");
12005 #endif
12006 /*
12007 * Note that the WOL flags is set after the resetting of the eeprom
12008 * stuff
12009 */
12010 }
12011
12012 #ifdef WM_WOL
12013 /* WOL in the newer chipset interfaces (pchlan) */
12014 static void
12015 wm_enable_phy_wakeup(struct wm_softc *sc)
12016 {
12017 #if 0
12018 uint16_t preg;
12019
12020 /* Copy MAC RARs to PHY RARs */
12021
12022 /* Copy MAC MTA to PHY MTA */
12023
12024 /* Configure PHY Rx Control register */
12025
12026 /* Enable PHY wakeup in MAC register */
12027
12028 /* Configure and enable PHY wakeup in PHY registers */
12029
12030 /* Activate PHY wakeup */
12031
12032 /* XXX */
12033 #endif
12034 }
12035
12036 /* Power down workaround on D3 */
12037 static void
12038 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
12039 {
12040 uint32_t reg;
12041 int i;
12042
12043 for (i = 0; i < 2; i++) {
12044 /* Disable link */
12045 reg = CSR_READ(sc, WMREG_PHY_CTRL);
12046 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
12047 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
12048
12049 /*
12050 * Call gig speed drop workaround on Gig disable before
12051 * accessing any PHY registers
12052 */
12053 if (sc->sc_type == WM_T_ICH8)
12054 wm_gig_downshift_workaround_ich8lan(sc);
12055
12056 /* Write VR power-down enable */
12057 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
12058 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
12059 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
12060 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
12061
12062 /* Read it back and test */
12063 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
12064 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
12065 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
12066 break;
12067
12068 /* Issue PHY reset and repeat at most one more time */
12069 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
12070 }
12071 }
12072
12073 static void
12074 wm_enable_wakeup(struct wm_softc *sc)
12075 {
12076 uint32_t reg, pmreg;
12077 pcireg_t pmode;
12078
12079 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12080 device_xname(sc->sc_dev), __func__));
12081
12082 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
12083 &pmreg, NULL) == 0)
12084 return;
12085
12086 /* Advertise the wakeup capability */
12087 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
12088 | CTRL_SWDPIN(3));
12089 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
12090
12091 /* ICH workaround */
12092 switch (sc->sc_type) {
12093 case WM_T_ICH8:
12094 case WM_T_ICH9:
12095 case WM_T_ICH10:
12096 case WM_T_PCH:
12097 case WM_T_PCH2:
12098 case WM_T_PCH_LPT:
12099 case WM_T_PCH_SPT:
12100 /* Disable gig during WOL */
12101 reg = CSR_READ(sc, WMREG_PHY_CTRL);
12102 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
12103 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
12104 if (sc->sc_type == WM_T_PCH)
12105 wm_gmii_reset(sc);
12106
12107 /* Power down workaround */
12108 if (sc->sc_phytype == WMPHY_82577) {
12109 struct mii_softc *child;
12110
12111 /* Assume that the PHY is copper */
12112 child = LIST_FIRST(&sc->sc_mii.mii_phys);
12113 if (child->mii_mpd_rev <= 2)
12114 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
12115 (768 << 5) | 25, 0x0444); /* magic num */
12116 }
12117 break;
12118 default:
12119 break;
12120 }
12121
12122 /* Keep the laser running on fiber adapters */
12123 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
12124 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
12125 reg = CSR_READ(sc, WMREG_CTRL_EXT);
12126 reg |= CTRL_EXT_SWDPIN(3);
12127 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12128 }
12129
12130 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
12131 #if 0 /* for the multicast packet */
12132 reg |= WUFC_MC;
12133 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
12134 #endif
12135
12136 if (sc->sc_type == WM_T_PCH) {
12137 wm_enable_phy_wakeup(sc);
12138 } else {
12139 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
12140 CSR_WRITE(sc, WMREG_WUFC, reg);
12141 }
12142
12143 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
12144 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
12145 || (sc->sc_type == WM_T_PCH2))
12146 && (sc->sc_phytype == WMPHY_IGP_3))
12147 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
12148
12149 /* Request PME */
12150 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
12151 #if 0
12152 /* Disable WOL */
12153 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
12154 #else
12155 /* For WOL */
12156 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
12157 #endif
12158 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
12159 }
12160 #endif /* WM_WOL */
12161
12162 /* LPLU */
12163
12164 static void
12165 wm_lplu_d0_disable(struct wm_softc *sc)
12166 {
12167 uint32_t reg;
12168
12169 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12170 device_xname(sc->sc_dev), __func__));
12171
12172 reg = CSR_READ(sc, WMREG_PHY_CTRL);
12173 reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
12174 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
12175 }
12176
12177 static void
12178 wm_lplu_d0_disable_pch(struct wm_softc *sc)
12179 {
12180 uint32_t reg;
12181
12182 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12183 device_xname(sc->sc_dev), __func__));
12184
12185 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
12186 reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
12187 reg |= HV_OEM_BITS_ANEGNOW;
12188 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
12189 }
12190
12191 /* EEE */
12192
12193 static void
12194 wm_set_eee_i350(struct wm_softc *sc)
12195 {
12196 uint32_t ipcnfg, eeer;
12197
12198 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
12199 eeer = CSR_READ(sc, WMREG_EEER);
12200
12201 if ((sc->sc_flags & WM_F_EEE) != 0) {
12202 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
12203 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
12204 | EEER_LPI_FC);
12205 } else {
12206 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
12207 ipcnfg &= ~IPCNFG_10BASE_TE;
12208 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
12209 | EEER_LPI_FC);
12210 }
12211
12212 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
12213 CSR_WRITE(sc, WMREG_EEER, eeer);
12214 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
12215 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
12216 }
12217
12218 /*
12219 * Workarounds (mainly PHY related).
12220 * Basically, PHY's workarounds are in the PHY drivers.
12221 */
12222
12223 /* Work-around for 82566 Kumeran PCS lock loss */
12224 static void
12225 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
12226 {
12227 #if 0
12228 int miistatus, active, i;
12229 int reg;
12230
12231 miistatus = sc->sc_mii.mii_media_status;
12232
12233 /* If the link is not up, do nothing */
12234 if ((miistatus & IFM_ACTIVE) == 0)
12235 return;
12236
12237 active = sc->sc_mii.mii_media_active;
12238
12239 /* Nothing to do if the link is other than 1Gbps */
12240 if (IFM_SUBTYPE(active) != IFM_1000_T)
12241 return;
12242
12243 for (i = 0; i < 10; i++) {
12244 /* read twice */
12245 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
12246 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
12247 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
12248 goto out; /* GOOD! */
12249
12250 /* Reset the PHY */
12251 wm_gmii_reset(sc);
12252 delay(5*1000);
12253 }
12254
12255 /* Disable GigE link negotiation */
12256 reg = CSR_READ(sc, WMREG_PHY_CTRL);
12257 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
12258 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
12259
12260 /*
12261 * Call gig speed drop workaround on Gig disable before accessing
12262 * any PHY registers.
12263 */
12264 wm_gig_downshift_workaround_ich8lan(sc);
12265
12266 out:
12267 return;
12268 #endif
12269 }
12270
12271 /* WOL from S5 stops working */
12272 static void
12273 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
12274 {
12275 uint16_t kmrn_reg;
12276
12277 /* Only for igp3 */
12278 if (sc->sc_phytype == WMPHY_IGP_3) {
12279 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
12280 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
12281 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
12282 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
12283 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
12284 }
12285 }
12286
12287 /*
12288 * Workaround for pch's PHYs
12289 * XXX should be moved to new PHY driver?
12290 */
12291 static void
12292 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
12293 {
12294
12295 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12296 device_xname(sc->sc_dev), __func__));
12297 KASSERT(sc->sc_type == WM_T_PCH);
12298
12299 if (sc->sc_phytype == WMPHY_82577)
12300 wm_set_mdio_slow_mode_hv(sc);
12301
12302 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
12303
12304 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
12305
12306 /* 82578 */
12307 if (sc->sc_phytype == WMPHY_82578) {
12308 struct mii_softc *child;
12309
12310 /*
12311 * Return registers to default by doing a soft reset then
12312 * writing 0x3140 to the control register
12313 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
12314 */
12315 child = LIST_FIRST(&sc->sc_mii.mii_phys);
12316 if ((child != NULL) && (child->mii_mpd_rev < 2)) {
12317 PHY_RESET(child);
12318 sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
12319 0x3140);
12320 }
12321 }
12322
12323 /* Select page 0 */
12324 sc->phy.acquire(sc);
12325 wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
12326 sc->phy.release(sc);
12327
12328 /*
12329 * Configure the K1 Si workaround during phy reset assuming there is
12330 * link so that it disables K1 if link is in 1Gbps.
12331 */
12332 wm_k1_gig_workaround_hv(sc, 1);
12333 }
12334
12335 static void
12336 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
12337 {
12338
12339 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12340 device_xname(sc->sc_dev), __func__));
12341 KASSERT(sc->sc_type == WM_T_PCH2);
12342
12343 wm_set_mdio_slow_mode_hv(sc);
12344 }
12345
12346 static int
12347 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
12348 {
12349 int k1_enable = sc->sc_nvm_k1_enabled;
12350
12351 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12352 device_xname(sc->sc_dev), __func__));
12353
12354 if (sc->phy.acquire(sc) != 0)
12355 return -1;
12356
12357 if (link) {
12358 k1_enable = 0;
12359
12360 /* Link stall fix for link up */
12361 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
12362 } else {
12363 /* Link stall fix for link down */
12364 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
12365 }
12366
12367 wm_configure_k1_ich8lan(sc, k1_enable);
12368 sc->phy.release(sc);
12369
12370 return 0;
12371 }
12372
12373 static void
12374 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
12375 {
12376 uint32_t reg;
12377
12378 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
12379 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
12380 reg | HV_KMRN_MDIO_SLOW);
12381 }
12382
12383 static void
12384 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
12385 {
12386 uint32_t ctrl, ctrl_ext, tmp;
12387 uint16_t kmrn_reg;
12388
12389 kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
12390
12391 if (k1_enable)
12392 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
12393 else
12394 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
12395
12396 wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
12397
12398 delay(20);
12399
12400 ctrl = CSR_READ(sc, WMREG_CTRL);
12401 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
12402
12403 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
12404 tmp |= CTRL_FRCSPD;
12405
12406 CSR_WRITE(sc, WMREG_CTRL, tmp);
12407 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
12408 CSR_WRITE_FLUSH(sc);
12409 delay(20);
12410
12411 CSR_WRITE(sc, WMREG_CTRL, ctrl);
12412 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
12413 CSR_WRITE_FLUSH(sc);
12414 delay(20);
12415 }
12416
12417 /* special case - for 82575 - need to do manual init ... */
12418 static void
12419 wm_reset_init_script_82575(struct wm_softc *sc)
12420 {
12421 /*
12422 * remark: this is untested code - we have no board without EEPROM
12423 * same setup as mentioned int the FreeBSD driver for the i82575
12424 */
12425
12426 /* SerDes configuration via SERDESCTRL */
12427 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
12428 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
12429 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
12430 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
12431
12432 /* CCM configuration via CCMCTL register */
12433 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
12434 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
12435
12436 /* PCIe lanes configuration */
12437 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
12438 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
12439 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
12440 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
12441
12442 /* PCIe PLL Configuration */
12443 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
12444 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
12445 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
12446 }
12447
12448 static void
12449 wm_reset_mdicnfg_82580(struct wm_softc *sc)
12450 {
12451 uint32_t reg;
12452 uint16_t nvmword;
12453 int rv;
12454
12455 if ((sc->sc_flags & WM_F_SGMII) == 0)
12456 return;
12457
12458 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
12459 + NVM_OFF_CFG3_PORTA, 1, &nvmword);
12460 if (rv != 0) {
12461 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
12462 __func__);
12463 return;
12464 }
12465
12466 reg = CSR_READ(sc, WMREG_MDICNFG);
12467 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
12468 reg |= MDICNFG_DEST;
12469 if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
12470 reg |= MDICNFG_COM_MDIO;
12471 CSR_WRITE(sc, WMREG_MDICNFG, reg);
12472 }
12473
12474 /*
12475 * I210 Errata 25 and I211 Errata 10
12476 * Slow System Clock.
12477 */
12478 static void
12479 wm_pll_workaround_i210(struct wm_softc *sc)
12480 {
12481 uint32_t mdicnfg, wuc;
12482 uint32_t reg;
12483 pcireg_t pcireg;
12484 uint32_t pmreg;
12485 uint16_t nvmword, tmp_nvmword;
12486 int phyval;
12487 bool wa_done = false;
12488 int i;
12489
12490 /* Save WUC and MDICNFG registers */
12491 wuc = CSR_READ(sc, WMREG_WUC);
12492 mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
12493
12494 reg = mdicnfg & ~MDICNFG_DEST;
12495 CSR_WRITE(sc, WMREG_MDICNFG, reg);
12496
12497 if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
12498 nvmword = INVM_DEFAULT_AL;
12499 tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
12500
12501 /* Get Power Management cap offset */
12502 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
12503 &pmreg, NULL) == 0)
12504 return;
12505 for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
12506 phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
12507 GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
12508
12509 if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
12510 break; /* OK */
12511 }
12512
12513 wa_done = true;
12514 /* Directly reset the internal PHY */
12515 reg = CSR_READ(sc, WMREG_CTRL);
12516 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
12517
12518 reg = CSR_READ(sc, WMREG_CTRL_EXT);
12519 reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
12520 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12521
12522 CSR_WRITE(sc, WMREG_WUC, 0);
12523 reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
12524 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
12525
12526 pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
12527 pmreg + PCI_PMCSR);
12528 pcireg |= PCI_PMCSR_STATE_D3;
12529 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
12530 pmreg + PCI_PMCSR, pcireg);
12531 delay(1000);
12532 pcireg &= ~PCI_PMCSR_STATE_D3;
12533 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
12534 pmreg + PCI_PMCSR, pcireg);
12535
12536 reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
12537 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
12538
12539 /* Restore WUC register */
12540 CSR_WRITE(sc, WMREG_WUC, wuc);
12541 }
12542
12543 /* Restore MDICNFG setting */
12544 CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
12545 if (wa_done)
12546 aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
12547 }
12548