if_wm.c revision 1.431 1 /* $NetBSD: if_wm.c,v 1.431 2016/10/28 05:29:11 knakahara Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - Disable D0 LPLU on 8257[12356], 82580 and I350.
77 * - TX Multi queue improvement (refine queue selection logic)
78 * - Advanced Receive Descriptor
79 * - EEE (Energy Efficiency Ethernet)
80 * - Virtual Function
81 * - Set LED correctly (based on contents in EEPROM)
82 * - Rework how parameters are loaded from the EEPROM.
83 * - Image Unique ID
84 */
85
86 #include <sys/cdefs.h>
87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.431 2016/10/28 05:29:11 knakahara Exp $");
88
89 #ifdef _KERNEL_OPT
90 #include "opt_net_mpsafe.h"
91 #endif
92
93 #include <sys/param.h>
94 #include <sys/systm.h>
95 #include <sys/callout.h>
96 #include <sys/mbuf.h>
97 #include <sys/malloc.h>
98 #include <sys/kmem.h>
99 #include <sys/kernel.h>
100 #include <sys/socket.h>
101 #include <sys/ioctl.h>
102 #include <sys/errno.h>
103 #include <sys/device.h>
104 #include <sys/queue.h>
105 #include <sys/syslog.h>
106 #include <sys/interrupt.h>
107 #include <sys/cpu.h>
108 #include <sys/pcq.h>
109
110 #include <sys/rndsource.h>
111
112 #include <net/if.h>
113 #include <net/if_dl.h>
114 #include <net/if_media.h>
115 #include <net/if_ether.h>
116
117 #include <net/bpf.h>
118
119 #include <netinet/in.h> /* XXX for struct ip */
120 #include <netinet/in_systm.h> /* XXX for struct ip */
121 #include <netinet/ip.h> /* XXX for struct ip */
122 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
123 #include <netinet/tcp.h> /* XXX for struct tcphdr */
124
125 #include <sys/bus.h>
126 #include <sys/intr.h>
127 #include <machine/endian.h>
128
129 #include <dev/mii/mii.h>
130 #include <dev/mii/miivar.h>
131 #include <dev/mii/miidevs.h>
132 #include <dev/mii/mii_bitbang.h>
133 #include <dev/mii/ikphyreg.h>
134 #include <dev/mii/igphyreg.h>
135 #include <dev/mii/igphyvar.h>
136 #include <dev/mii/inbmphyreg.h>
137
138 #include <dev/pci/pcireg.h>
139 #include <dev/pci/pcivar.h>
140 #include <dev/pci/pcidevs.h>
141
142 #include <dev/pci/if_wmreg.h>
143 #include <dev/pci/if_wmvar.h>
144
145 #ifdef WM_DEBUG
146 #define WM_DEBUG_LINK __BIT(0)
147 #define WM_DEBUG_TX __BIT(1)
148 #define WM_DEBUG_RX __BIT(2)
149 #define WM_DEBUG_GMII __BIT(3)
150 #define WM_DEBUG_MANAGE __BIT(4)
151 #define WM_DEBUG_NVM __BIT(5)
152 #define WM_DEBUG_INIT __BIT(6)
153 #define WM_DEBUG_LOCK __BIT(7)
154 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
155 | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
156
157 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
158 #else
159 #define DPRINTF(x, y) /* nothing */
160 #endif /* WM_DEBUG */
161
162 #ifdef NET_MPSAFE
163 #define WM_MPSAFE 1
164 #endif
165
166 /*
167 * This device driver's max interrupt numbers.
168 */
169 #define WM_MAX_NQUEUEINTR 16
170 #define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1)
171
172 /*
173 * Transmit descriptor list size. Due to errata, we can only have
174 * 256 hardware descriptors in the ring on < 82544, but we use 4096
175 * on >= 82544. We tell the upper layers that they can queue a lot
176 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
177 * of them at a time.
178 *
179 * We allow up to 256 (!) DMA segments per packet. Pathological packet
180 * chains containing many small mbufs have been observed in zero-copy
181 * situations with jumbo frames.
182 */
183 #define WM_NTXSEGS 256
184 #define WM_IFQUEUELEN 256
185 #define WM_TXQUEUELEN_MAX 64
186 #define WM_TXQUEUELEN_MAX_82547 16
187 #define WM_TXQUEUELEN(txq) ((txq)->txq_num)
188 #define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1)
189 #define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8)
190 #define WM_NTXDESC_82542 256
191 #define WM_NTXDESC_82544 4096
192 #define WM_NTXDESC(txq) ((txq)->txq_ndesc)
193 #define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1)
194 #define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize)
195 #define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq))
196 #define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq))
197
198 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
199
200 #define WM_TXINTERQSIZE 256
201
202 /*
203 * Receive descriptor list size. We have one Rx buffer for normal
204 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
205 * packet. We allocate 256 receive descriptors, each with a 2k
206 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
207 */
208 #define WM_NRXDESC 256
209 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
210 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
211 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
212
213 typedef union txdescs {
214 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
215 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544];
216 } txdescs_t;
217
218 #define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x))
219 #define WM_CDRXOFF(x) (sizeof(wiseman_rxdesc_t) * x)
220
221 /*
222 * Software state for transmit jobs.
223 */
224 struct wm_txsoft {
225 struct mbuf *txs_mbuf; /* head of our mbuf chain */
226 bus_dmamap_t txs_dmamap; /* our DMA map */
227 int txs_firstdesc; /* first descriptor in packet */
228 int txs_lastdesc; /* last descriptor in packet */
229 int txs_ndesc; /* # of descriptors used */
230 };
231
232 /*
233 * Software state for receive buffers. Each descriptor gets a
234 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
235 * more than one buffer, we chain them together.
236 */
237 struct wm_rxsoft {
238 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
239 bus_dmamap_t rxs_dmamap; /* our DMA map */
240 };
241
242 #define WM_LINKUP_TIMEOUT 50
243
244 static uint16_t swfwphysem[] = {
245 SWFW_PHY0_SM,
246 SWFW_PHY1_SM,
247 SWFW_PHY2_SM,
248 SWFW_PHY3_SM
249 };
250
251 static const uint32_t wm_82580_rxpbs_table[] = {
252 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
253 };
254
255 struct wm_softc;
256
257 #ifdef WM_EVENT_COUNTERS
258 #define WM_Q_EVCNT_DEFINE(qname, evname) \
259 char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
260 struct evcnt qname##_ev_##evname;
261
262 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \
263 do{ \
264 snprintf((q)->qname##_##evname##_evcnt_name, \
265 sizeof((q)->qname##_##evname##_evcnt_name), \
266 "%s%02d%s", #qname, (qnum), #evname); \
267 evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \
268 (evtype), NULL, (xname), \
269 (q)->qname##_##evname##_evcnt_name); \
270 }while(0)
271
272 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
273 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
274
275 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
276 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
277 #endif /* WM_EVENT_COUNTERS */
278
279 struct wm_txqueue {
280 kmutex_t *txq_lock; /* lock for tx operations */
281
282 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */
283
284 /* Software state for the transmit descriptors. */
285 int txq_num; /* must be a power of two */
286 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
287
288 /* TX control data structures. */
289 int txq_ndesc; /* must be a power of two */
290 size_t txq_descsize; /* a tx descriptor size */
291 txdescs_t *txq_descs_u;
292 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */
293 bus_dma_segment_t txq_desc_seg; /* control data segment */
294 int txq_desc_rseg; /* real number of control segment */
295 #define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr
296 #define txq_descs txq_descs_u->sctxu_txdescs
297 #define txq_nq_descs txq_descs_u->sctxu_nq_txdescs
298
299 bus_addr_t txq_tdt_reg; /* offset of TDT register */
300
301 int txq_free; /* number of free Tx descriptors */
302 int txq_next; /* next ready Tx descriptor */
303
304 int txq_sfree; /* number of free Tx jobs */
305 int txq_snext; /* next free Tx job */
306 int txq_sdirty; /* dirty Tx jobs */
307
308 /* These 4 variables are used only on the 82547. */
309 int txq_fifo_size; /* Tx FIFO size */
310 int txq_fifo_head; /* current head of FIFO */
311 uint32_t txq_fifo_addr; /* internal address of start of FIFO */
312 int txq_fifo_stall; /* Tx FIFO is stalled */
313
314 /*
315 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
316 * CPUs. This queue intermediate them without block.
317 */
318 pcq_t *txq_interq;
319
320 /*
321 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
322 * to manage Tx H/W queue's busy flag.
323 */
324 int txq_flags; /* flags for H/W queue, see below */
325 #define WM_TXQ_NO_SPACE 0x1
326
327 bool txq_stopping;
328
329 #ifdef WM_EVENT_COUNTERS
330 WM_Q_EVCNT_DEFINE(txq, txsstall) /* Tx stalled due to no txs */
331 WM_Q_EVCNT_DEFINE(txq, txdstall) /* Tx stalled due to no txd */
332 WM_Q_EVCNT_DEFINE(txq, txfifo_stall) /* Tx FIFO stalls (82547) */
333 WM_Q_EVCNT_DEFINE(txq, txdw) /* Tx descriptor interrupts */
334 WM_Q_EVCNT_DEFINE(txq, txqe) /* Tx queue empty interrupts */
335 /* XXX not used? */
336
337 WM_Q_EVCNT_DEFINE(txq, txipsum) /* IP checksums comp. out-bound */
338 WM_Q_EVCNT_DEFINE(txq,txtusum) /* TCP/UDP cksums comp. out-bound */
339 WM_Q_EVCNT_DEFINE(txq, txtusum6) /* TCP/UDP v6 cksums comp. out-bound */
340 WM_Q_EVCNT_DEFINE(txq, txtso) /* TCP seg offload out-bound (IPv4) */
341 WM_Q_EVCNT_DEFINE(txq, txtso6) /* TCP seg offload out-bound (IPv6) */
342 WM_Q_EVCNT_DEFINE(txq, txtsopain) /* painful header manip. for TSO */
343
344 WM_Q_EVCNT_DEFINE(txq, txdrop) /* Tx packets dropped(too many segs) */
345
346 WM_Q_EVCNT_DEFINE(txq, tu) /* Tx underrun */
347
348 char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
349 struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
350 #endif /* WM_EVENT_COUNTERS */
351 };
352
353 struct wm_rxqueue {
354 kmutex_t *rxq_lock; /* lock for rx operations */
355
356 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */
357
358 /* Software state for the receive descriptors. */
359 wiseman_rxdesc_t *rxq_descs;
360
361 /* RX control data structures. */
362 struct wm_rxsoft rxq_soft[WM_NRXDESC];
363 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */
364 bus_dma_segment_t rxq_desc_seg; /* control data segment */
365 int rxq_desc_rseg; /* real number of control segment */
366 size_t rxq_desc_size; /* control data size */
367 #define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr
368
369 bus_addr_t rxq_rdt_reg; /* offset of RDT register */
370
371 int rxq_ptr; /* next ready Rx desc/queue ent */
372 int rxq_discard;
373 int rxq_len;
374 struct mbuf *rxq_head;
375 struct mbuf *rxq_tail;
376 struct mbuf **rxq_tailp;
377
378 bool rxq_stopping;
379
380 #ifdef WM_EVENT_COUNTERS
381 WM_Q_EVCNT_DEFINE(rxq, rxintr); /* Rx interrupts */
382
383 WM_Q_EVCNT_DEFINE(rxq, rxipsum); /* IP checksums checked in-bound */
384 WM_Q_EVCNT_DEFINE(rxq, rxtusum); /* TCP/UDP cksums checked in-bound */
385 #endif
386 };
387
388 struct wm_queue {
389 int wmq_id; /* index of transmit and receive queues */
390 int wmq_intr_idx; /* index of MSI-X tables */
391
392 struct wm_txqueue wmq_txq;
393 struct wm_rxqueue wmq_rxq;
394 };
395
396 struct wm_phyop {
397 int (*acquire)(struct wm_softc *);
398 void (*release)(struct wm_softc *);
399 };
400
401 /*
402 * Software state per device.
403 */
404 struct wm_softc {
405 device_t sc_dev; /* generic device information */
406 bus_space_tag_t sc_st; /* bus space tag */
407 bus_space_handle_t sc_sh; /* bus space handle */
408 bus_size_t sc_ss; /* bus space size */
409 bus_space_tag_t sc_iot; /* I/O space tag */
410 bus_space_handle_t sc_ioh; /* I/O space handle */
411 bus_size_t sc_ios; /* I/O space size */
412 bus_space_tag_t sc_flasht; /* flash registers space tag */
413 bus_space_handle_t sc_flashh; /* flash registers space handle */
414 bus_size_t sc_flashs; /* flash registers space size */
415 off_t sc_flashreg_offset; /*
416 * offset to flash registers from
417 * start of BAR
418 */
419 bus_dma_tag_t sc_dmat; /* bus DMA tag */
420
421 struct ethercom sc_ethercom; /* ethernet common data */
422 struct mii_data sc_mii; /* MII/media information */
423
424 pci_chipset_tag_t sc_pc;
425 pcitag_t sc_pcitag;
426 int sc_bus_speed; /* PCI/PCIX bus speed */
427 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
428
429 uint16_t sc_pcidevid; /* PCI device ID */
430 wm_chip_type sc_type; /* MAC type */
431 int sc_rev; /* MAC revision */
432 wm_phy_type sc_phytype; /* PHY type */
433 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
434 #define WM_MEDIATYPE_UNKNOWN 0x00
435 #define WM_MEDIATYPE_FIBER 0x01
436 #define WM_MEDIATYPE_COPPER 0x02
437 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
438 int sc_funcid; /* unit number of the chip (0 to 3) */
439 int sc_flags; /* flags; see below */
440 int sc_if_flags; /* last if_flags */
441 int sc_flowflags; /* 802.3x flow control flags */
442 int sc_align_tweak;
443
444 void *sc_ihs[WM_MAX_NINTR]; /*
445 * interrupt cookie.
446 * legacy and msi use sc_ihs[0].
447 */
448 pci_intr_handle_t *sc_intrs; /* legacy and msi use sc_intrs[0] */
449 int sc_nintrs; /* number of interrupts */
450
451 int sc_link_intr_idx; /* index of MSI-X tables */
452
453 callout_t sc_tick_ch; /* tick callout */
454 bool sc_core_stopping;
455
456 int sc_nvm_ver_major;
457 int sc_nvm_ver_minor;
458 int sc_nvm_ver_build;
459 int sc_nvm_addrbits; /* NVM address bits */
460 unsigned int sc_nvm_wordsize; /* NVM word size */
461 int sc_ich8_flash_base;
462 int sc_ich8_flash_bank_size;
463 int sc_nvm_k1_enabled;
464
465 int sc_nqueues;
466 struct wm_queue *sc_queue;
467
468 int sc_affinity_offset;
469
470 #ifdef WM_EVENT_COUNTERS
471 /* Event counters. */
472 struct evcnt sc_ev_linkintr; /* Link interrupts */
473
474 /* WM_T_82542_2_1 only */
475 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
476 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
477 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
478 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
479 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
480 #endif /* WM_EVENT_COUNTERS */
481
482 /* This variable are used only on the 82547. */
483 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
484
485 uint32_t sc_ctrl; /* prototype CTRL register */
486 #if 0
487 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
488 #endif
489 uint32_t sc_icr; /* prototype interrupt bits */
490 uint32_t sc_itr; /* prototype intr throttling reg */
491 uint32_t sc_tctl; /* prototype TCTL register */
492 uint32_t sc_rctl; /* prototype RCTL register */
493 uint32_t sc_txcw; /* prototype TXCW register */
494 uint32_t sc_tipg; /* prototype TIPG register */
495 uint32_t sc_fcrtl; /* prototype FCRTL register */
496 uint32_t sc_pba; /* prototype PBA register */
497
498 int sc_tbi_linkup; /* TBI link status */
499 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
500 int sc_tbi_serdes_ticks; /* tbi ticks */
501
502 int sc_mchash_type; /* multicast filter offset */
503
504 krndsource_t rnd_source; /* random source */
505
506 struct if_percpuq *sc_ipq; /* softint-based input queues */
507
508 kmutex_t *sc_core_lock; /* lock for softc operations */
509 kmutex_t *sc_ich_phymtx; /*
510 * 82574/82583/ICH/PCH specific PHY
511 * mutex. For 82574/82583, the mutex
512 * is used for both PHY and NVM.
513 */
514 kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */
515
516 struct wm_phyop phy;
517 };
518
519 #define WM_CORE_LOCK(_sc) if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
520 #define WM_CORE_UNLOCK(_sc) if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
521 #define WM_CORE_LOCKED(_sc) (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
522
523 #ifdef WM_MPSAFE
524 #define CALLOUT_FLAGS CALLOUT_MPSAFE
525 #else
526 #define CALLOUT_FLAGS 0
527 #endif
528
529 #define WM_RXCHAIN_RESET(rxq) \
530 do { \
531 (rxq)->rxq_tailp = &(rxq)->rxq_head; \
532 *(rxq)->rxq_tailp = NULL; \
533 (rxq)->rxq_len = 0; \
534 } while (/*CONSTCOND*/0)
535
536 #define WM_RXCHAIN_LINK(rxq, m) \
537 do { \
538 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \
539 (rxq)->rxq_tailp = &(m)->m_next; \
540 } while (/*CONSTCOND*/0)
541
542 #ifdef WM_EVENT_COUNTERS
543 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
544 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
545
546 #define WM_Q_EVCNT_INCR(qname, evname) \
547 WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
548 #define WM_Q_EVCNT_ADD(qname, evname, val) \
549 WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
550 #else /* !WM_EVENT_COUNTERS */
551 #define WM_EVCNT_INCR(ev) /* nothing */
552 #define WM_EVCNT_ADD(ev, val) /* nothing */
553
554 #define WM_Q_EVCNT_INCR(qname, evname) /* nothing */
555 #define WM_Q_EVCNT_ADD(qname, evname, val) /* nothing */
556 #endif /* !WM_EVENT_COUNTERS */
557
558 #define CSR_READ(sc, reg) \
559 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
560 #define CSR_WRITE(sc, reg, val) \
561 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
562 #define CSR_WRITE_FLUSH(sc) \
563 (void) CSR_READ((sc), WMREG_STATUS)
564
565 #define ICH8_FLASH_READ32(sc, reg) \
566 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \
567 (reg) + sc->sc_flashreg_offset)
568 #define ICH8_FLASH_WRITE32(sc, reg, data) \
569 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \
570 (reg) + sc->sc_flashreg_offset, (data))
571
572 #define ICH8_FLASH_READ16(sc, reg) \
573 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \
574 (reg) + sc->sc_flashreg_offset)
575 #define ICH8_FLASH_WRITE16(sc, reg, data) \
576 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \
577 (reg) + sc->sc_flashreg_offset, (data))
578
579 #define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
580 #define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
581
582 #define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU)
583 #define WM_CDTXADDR_HI(txq, x) \
584 (sizeof(bus_addr_t) == 8 ? \
585 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
586
587 #define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
588 #define WM_CDRXADDR_HI(rxq, x) \
589 (sizeof(bus_addr_t) == 8 ? \
590 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
591
592 /*
593 * Register read/write functions.
594 * Other than CSR_{READ|WRITE}().
595 */
596 #if 0
597 static inline uint32_t wm_io_read(struct wm_softc *, int);
598 #endif
599 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
600 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
601 uint32_t, uint32_t);
602 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
603
604 /*
605 * Descriptor sync/init functions.
606 */
607 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
608 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
609 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
610
611 /*
612 * Device driver interface functions and commonly used functions.
613 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
614 */
615 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
616 static int wm_match(device_t, cfdata_t, void *);
617 static void wm_attach(device_t, device_t, void *);
618 static int wm_detach(device_t, int);
619 static bool wm_suspend(device_t, const pmf_qual_t *);
620 static bool wm_resume(device_t, const pmf_qual_t *);
621 static void wm_watchdog(struct ifnet *);
622 static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
623 static void wm_tick(void *);
624 static int wm_ifflags_cb(struct ethercom *);
625 static int wm_ioctl(struct ifnet *, u_long, void *);
626 /* MAC address related */
627 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
628 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
629 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
630 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
631 static void wm_set_filter(struct wm_softc *);
632 /* Reset and init related */
633 static void wm_set_vlan(struct wm_softc *);
634 static void wm_set_pcie_completion_timeout(struct wm_softc *);
635 static void wm_get_auto_rd_done(struct wm_softc *);
636 static void wm_lan_init_done(struct wm_softc *);
637 static void wm_get_cfg_done(struct wm_softc *);
638 static void wm_initialize_hardware_bits(struct wm_softc *);
639 static uint32_t wm_rxpbs_adjust_82580(uint32_t);
640 static void wm_reset(struct wm_softc *);
641 static int wm_add_rxbuf(struct wm_rxqueue *, int);
642 static void wm_rxdrain(struct wm_rxqueue *);
643 static void wm_rss_getkey(uint8_t *);
644 static void wm_init_rss(struct wm_softc *);
645 static void wm_adjust_qnum(struct wm_softc *, int);
646 static int wm_setup_legacy(struct wm_softc *);
647 static int wm_setup_msix(struct wm_softc *);
648 static int wm_init(struct ifnet *);
649 static int wm_init_locked(struct ifnet *);
650 static void wm_turnon(struct wm_softc *);
651 static void wm_turnoff(struct wm_softc *);
652 static void wm_stop(struct ifnet *, int);
653 static void wm_stop_locked(struct ifnet *, int);
654 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
655 static void wm_82547_txfifo_stall(void *);
656 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
657 /* DMA related */
658 static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
659 static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
660 static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
661 static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
662 struct wm_txqueue *);
663 static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
664 static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
665 static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
666 struct wm_rxqueue *);
667 static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
668 static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
669 static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
670 static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
671 static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
672 static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
673 static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
674 struct wm_txqueue *);
675 static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
676 struct wm_rxqueue *);
677 static int wm_alloc_txrx_queues(struct wm_softc *);
678 static void wm_free_txrx_queues(struct wm_softc *);
679 static int wm_init_txrx_queues(struct wm_softc *);
680 /* Start */
681 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
682 uint32_t *, uint8_t *);
683 static void wm_start(struct ifnet *);
684 static void wm_start_locked(struct ifnet *);
685 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
686 struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
687 static void wm_nq_start(struct ifnet *);
688 static void wm_nq_start_locked(struct ifnet *);
689 static int wm_nq_transmit(struct ifnet *, struct mbuf *);
690 static inline int wm_nq_select_txqueue(struct ifnet *, struct mbuf *);
691 static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
692 static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
693 /* Interrupt */
694 static int wm_txeof(struct wm_softc *, struct wm_txqueue *);
695 static void wm_rxeof(struct wm_rxqueue *);
696 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
697 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
698 static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
699 static void wm_linkintr(struct wm_softc *, uint32_t);
700 static int wm_intr_legacy(void *);
701 static int wm_txrxintr_msix(void *);
702 static int wm_linkintr_msix(void *);
703
704 /*
705 * Media related.
706 * GMII, SGMII, TBI, SERDES and SFP.
707 */
708 /* Common */
709 static void wm_tbi_serdes_set_linkled(struct wm_softc *);
710 /* GMII related */
711 static void wm_gmii_reset(struct wm_softc *);
712 static int wm_get_phy_id_82575(struct wm_softc *);
713 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
714 static int wm_gmii_mediachange(struct ifnet *);
715 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
716 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
717 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
718 static int wm_gmii_i82543_readreg(device_t, int, int);
719 static void wm_gmii_i82543_writereg(device_t, int, int, int);
720 static int wm_gmii_mdic_readreg(device_t, int, int);
721 static void wm_gmii_mdic_writereg(device_t, int, int, int);
722 static int wm_gmii_i82544_readreg(device_t, int, int);
723 static void wm_gmii_i82544_writereg(device_t, int, int, int);
724 static int wm_gmii_i80003_readreg(device_t, int, int);
725 static void wm_gmii_i80003_writereg(device_t, int, int, int);
726 static int wm_gmii_bm_readreg(device_t, int, int);
727 static void wm_gmii_bm_writereg(device_t, int, int, int);
728 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
729 static int wm_gmii_hv_readreg(device_t, int, int);
730 static int wm_gmii_hv_readreg_locked(device_t, int, int);
731 static void wm_gmii_hv_writereg(device_t, int, int, int);
732 static void wm_gmii_hv_writereg_locked(device_t, int, int, int);
733 static int wm_gmii_82580_readreg(device_t, int, int);
734 static void wm_gmii_82580_writereg(device_t, int, int, int);
735 static int wm_gmii_gs40g_readreg(device_t, int, int);
736 static void wm_gmii_gs40g_writereg(device_t, int, int, int);
737 static void wm_gmii_statchg(struct ifnet *);
738 static int wm_kmrn_readreg(struct wm_softc *, int);
739 static int wm_kmrn_readreg_locked(struct wm_softc *, int);
740 static void wm_kmrn_writereg(struct wm_softc *, int, int);
741 static void wm_kmrn_writereg_locked(struct wm_softc *, int, int);
742 /* SGMII */
743 static bool wm_sgmii_uses_mdio(struct wm_softc *);
744 static int wm_sgmii_readreg(device_t, int, int);
745 static void wm_sgmii_writereg(device_t, int, int, int);
746 /* TBI related */
747 static void wm_tbi_mediainit(struct wm_softc *);
748 static int wm_tbi_mediachange(struct ifnet *);
749 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
750 static int wm_check_for_link(struct wm_softc *);
751 static void wm_tbi_tick(struct wm_softc *);
752 /* SERDES related */
753 static void wm_serdes_power_up_link_82575(struct wm_softc *);
754 static int wm_serdes_mediachange(struct ifnet *);
755 static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
756 static void wm_serdes_tick(struct wm_softc *);
757 /* SFP related */
758 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
759 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
760
761 /*
762 * NVM related.
763 * Microwire, SPI (w/wo EERD) and Flash.
764 */
765 /* Misc functions */
766 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
767 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
768 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
769 /* Microwire */
770 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
771 /* SPI */
772 static int wm_nvm_ready_spi(struct wm_softc *);
773 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
774 /* Using with EERD */
775 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
776 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
777 /* Flash */
778 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
779 unsigned int *);
780 static int32_t wm_ich8_cycle_init(struct wm_softc *);
781 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
782 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
783 uint32_t *);
784 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
785 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
786 static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
787 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
788 static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
789 /* iNVM */
790 static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
791 static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
792 /* Lock, detecting NVM type, validate checksum and read */
793 static int wm_nvm_acquire(struct wm_softc *);
794 static void wm_nvm_release(struct wm_softc *);
795 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
796 static int wm_nvm_get_flash_presence_i210(struct wm_softc *);
797 static int wm_nvm_validate_checksum(struct wm_softc *);
798 static void wm_nvm_version_invm(struct wm_softc *);
799 static void wm_nvm_version(struct wm_softc *);
800 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
801
802 /*
803 * Hardware semaphores.
804 * Very complexed...
805 */
806 static int wm_get_null(struct wm_softc *);
807 static void wm_put_null(struct wm_softc *);
808 static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
809 static void wm_put_swsm_semaphore(struct wm_softc *);
810 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
811 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
812 static int wm_get_phy_82575(struct wm_softc *);
813 static void wm_put_phy_82575(struct wm_softc *);
814 static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
815 static void wm_put_swfwhw_semaphore(struct wm_softc *);
816 static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */
817 static void wm_put_swflag_ich8lan(struct wm_softc *);
818 static int wm_get_nvm_ich8lan(struct wm_softc *); /* For NVM */
819 static void wm_put_nvm_ich8lan(struct wm_softc *);
820 static int wm_get_hw_semaphore_82573(struct wm_softc *);
821 static void wm_put_hw_semaphore_82573(struct wm_softc *);
822
823 /*
824 * Management mode and power management related subroutines.
825 * BMC, AMT, suspend/resume and EEE.
826 */
827 #ifdef WM_WOL
828 static int wm_check_mng_mode(struct wm_softc *);
829 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
830 static int wm_check_mng_mode_82574(struct wm_softc *);
831 static int wm_check_mng_mode_generic(struct wm_softc *);
832 #endif
833 static int wm_enable_mng_pass_thru(struct wm_softc *);
834 static bool wm_phy_resetisblocked(struct wm_softc *);
835 static void wm_get_hw_control(struct wm_softc *);
836 static void wm_release_hw_control(struct wm_softc *);
837 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
838 static void wm_smbustopci(struct wm_softc *);
839 static void wm_init_manageability(struct wm_softc *);
840 static void wm_release_manageability(struct wm_softc *);
841 static void wm_get_wakeup(struct wm_softc *);
842 #ifdef WM_WOL
843 static void wm_enable_phy_wakeup(struct wm_softc *);
844 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
845 static void wm_enable_wakeup(struct wm_softc *);
846 #endif
847 /* LPLU (Low Power Link Up) */
848 static void wm_lplu_d0_disable(struct wm_softc *);
849 static void wm_lplu_d0_disable_pch(struct wm_softc *);
850 /* EEE */
851 static void wm_set_eee_i350(struct wm_softc *);
852
853 /*
854 * Workarounds (mainly PHY related).
855 * Basically, PHY's workarounds are in the PHY drivers.
856 */
857 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
858 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
859 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
860 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
861 static int wm_k1_gig_workaround_hv(struct wm_softc *, int);
862 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
863 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
864 static void wm_reset_init_script_82575(struct wm_softc *);
865 static void wm_reset_mdicnfg_82580(struct wm_softc *);
866 static void wm_pll_workaround_i210(struct wm_softc *);
867
868 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
869 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
870
871 /*
872 * Devices supported by this driver.
873 */
874 static const struct wm_product {
875 pci_vendor_id_t wmp_vendor;
876 pci_product_id_t wmp_product;
877 const char *wmp_name;
878 wm_chip_type wmp_type;
879 uint32_t wmp_flags;
880 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
881 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
882 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
883 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
884 #define WMP_MEDIATYPE(x) ((x) & 0x03)
885 } wm_products[] = {
886 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
887 "Intel i82542 1000BASE-X Ethernet",
888 WM_T_82542_2_1, WMP_F_FIBER },
889
890 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
891 "Intel i82543GC 1000BASE-X Ethernet",
892 WM_T_82543, WMP_F_FIBER },
893
894 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
895 "Intel i82543GC 1000BASE-T Ethernet",
896 WM_T_82543, WMP_F_COPPER },
897
898 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
899 "Intel i82544EI 1000BASE-T Ethernet",
900 WM_T_82544, WMP_F_COPPER },
901
902 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
903 "Intel i82544EI 1000BASE-X Ethernet",
904 WM_T_82544, WMP_F_FIBER },
905
906 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
907 "Intel i82544GC 1000BASE-T Ethernet",
908 WM_T_82544, WMP_F_COPPER },
909
910 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
911 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
912 WM_T_82544, WMP_F_COPPER },
913
914 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
915 "Intel i82540EM 1000BASE-T Ethernet",
916 WM_T_82540, WMP_F_COPPER },
917
918 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
919 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
920 WM_T_82540, WMP_F_COPPER },
921
922 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
923 "Intel i82540EP 1000BASE-T Ethernet",
924 WM_T_82540, WMP_F_COPPER },
925
926 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
927 "Intel i82540EP 1000BASE-T Ethernet",
928 WM_T_82540, WMP_F_COPPER },
929
930 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
931 "Intel i82540EP 1000BASE-T Ethernet",
932 WM_T_82540, WMP_F_COPPER },
933
934 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
935 "Intel i82545EM 1000BASE-T Ethernet",
936 WM_T_82545, WMP_F_COPPER },
937
938 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
939 "Intel i82545GM 1000BASE-T Ethernet",
940 WM_T_82545_3, WMP_F_COPPER },
941
942 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
943 "Intel i82545GM 1000BASE-X Ethernet",
944 WM_T_82545_3, WMP_F_FIBER },
945
946 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
947 "Intel i82545GM Gigabit Ethernet (SERDES)",
948 WM_T_82545_3, WMP_F_SERDES },
949
950 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
951 "Intel i82546EB 1000BASE-T Ethernet",
952 WM_T_82546, WMP_F_COPPER },
953
954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
955 "Intel i82546EB 1000BASE-T Ethernet",
956 WM_T_82546, WMP_F_COPPER },
957
958 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
959 "Intel i82545EM 1000BASE-X Ethernet",
960 WM_T_82545, WMP_F_FIBER },
961
962 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
963 "Intel i82546EB 1000BASE-X Ethernet",
964 WM_T_82546, WMP_F_FIBER },
965
966 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
967 "Intel i82546GB 1000BASE-T Ethernet",
968 WM_T_82546_3, WMP_F_COPPER },
969
970 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
971 "Intel i82546GB 1000BASE-X Ethernet",
972 WM_T_82546_3, WMP_F_FIBER },
973
974 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
975 "Intel i82546GB Gigabit Ethernet (SERDES)",
976 WM_T_82546_3, WMP_F_SERDES },
977
978 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
979 "i82546GB quad-port Gigabit Ethernet",
980 WM_T_82546_3, WMP_F_COPPER },
981
982 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
983 "i82546GB quad-port Gigabit Ethernet (KSP3)",
984 WM_T_82546_3, WMP_F_COPPER },
985
986 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
987 "Intel PRO/1000MT (82546GB)",
988 WM_T_82546_3, WMP_F_COPPER },
989
990 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
991 "Intel i82541EI 1000BASE-T Ethernet",
992 WM_T_82541, WMP_F_COPPER },
993
994 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
995 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
996 WM_T_82541, WMP_F_COPPER },
997
998 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
999 "Intel i82541EI Mobile 1000BASE-T Ethernet",
1000 WM_T_82541, WMP_F_COPPER },
1001
1002 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
1003 "Intel i82541ER 1000BASE-T Ethernet",
1004 WM_T_82541_2, WMP_F_COPPER },
1005
1006 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
1007 "Intel i82541GI 1000BASE-T Ethernet",
1008 WM_T_82541_2, WMP_F_COPPER },
1009
1010 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
1011 "Intel i82541GI Mobile 1000BASE-T Ethernet",
1012 WM_T_82541_2, WMP_F_COPPER },
1013
1014 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
1015 "Intel i82541PI 1000BASE-T Ethernet",
1016 WM_T_82541_2, WMP_F_COPPER },
1017
1018 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
1019 "Intel i82547EI 1000BASE-T Ethernet",
1020 WM_T_82547, WMP_F_COPPER },
1021
1022 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
1023 "Intel i82547EI Mobile 1000BASE-T Ethernet",
1024 WM_T_82547, WMP_F_COPPER },
1025
1026 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
1027 "Intel i82547GI 1000BASE-T Ethernet",
1028 WM_T_82547_2, WMP_F_COPPER },
1029
1030 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
1031 "Intel PRO/1000 PT (82571EB)",
1032 WM_T_82571, WMP_F_COPPER },
1033
1034 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
1035 "Intel PRO/1000 PF (82571EB)",
1036 WM_T_82571, WMP_F_FIBER },
1037
1038 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
1039 "Intel PRO/1000 PB (82571EB)",
1040 WM_T_82571, WMP_F_SERDES },
1041
1042 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1043 "Intel PRO/1000 QT (82571EB)",
1044 WM_T_82571, WMP_F_COPPER },
1045
1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1047 "Intel PRO/1000 PT Quad Port Server Adapter",
1048 WM_T_82571, WMP_F_COPPER, },
1049
1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1051 "Intel Gigabit PT Quad Port Server ExpressModule",
1052 WM_T_82571, WMP_F_COPPER, },
1053
1054 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1055 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1056 WM_T_82571, WMP_F_SERDES, },
1057
1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1059 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1060 WM_T_82571, WMP_F_SERDES, },
1061
1062 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1063 "Intel 82571EB Quad 1000baseX Ethernet",
1064 WM_T_82571, WMP_F_FIBER, },
1065
1066 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
1067 "Intel i82572EI 1000baseT Ethernet",
1068 WM_T_82572, WMP_F_COPPER },
1069
1070 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
1071 "Intel i82572EI 1000baseX Ethernet",
1072 WM_T_82572, WMP_F_FIBER },
1073
1074 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
1075 "Intel i82572EI Gigabit Ethernet (SERDES)",
1076 WM_T_82572, WMP_F_SERDES },
1077
1078 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
1079 "Intel i82572EI 1000baseT Ethernet",
1080 WM_T_82572, WMP_F_COPPER },
1081
1082 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
1083 "Intel i82573E",
1084 WM_T_82573, WMP_F_COPPER },
1085
1086 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
1087 "Intel i82573E IAMT",
1088 WM_T_82573, WMP_F_COPPER },
1089
1090 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
1091 "Intel i82573L Gigabit Ethernet",
1092 WM_T_82573, WMP_F_COPPER },
1093
1094 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
1095 "Intel i82574L",
1096 WM_T_82574, WMP_F_COPPER },
1097
1098 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
1099 "Intel i82574L",
1100 WM_T_82574, WMP_F_COPPER },
1101
1102 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
1103 "Intel i82583V",
1104 WM_T_82583, WMP_F_COPPER },
1105
1106 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1107 "i80003 dual 1000baseT Ethernet",
1108 WM_T_80003, WMP_F_COPPER },
1109
1110 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1111 "i80003 dual 1000baseX Ethernet",
1112 WM_T_80003, WMP_F_COPPER },
1113
1114 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1115 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1116 WM_T_80003, WMP_F_SERDES },
1117
1118 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1119 "Intel i80003 1000baseT Ethernet",
1120 WM_T_80003, WMP_F_COPPER },
1121
1122 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1123 "Intel i80003 Gigabit Ethernet (SERDES)",
1124 WM_T_80003, WMP_F_SERDES },
1125
1126 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
1127 "Intel i82801H (M_AMT) LAN Controller",
1128 WM_T_ICH8, WMP_F_COPPER },
1129 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
1130 "Intel i82801H (AMT) LAN Controller",
1131 WM_T_ICH8, WMP_F_COPPER },
1132 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
1133 "Intel i82801H LAN Controller",
1134 WM_T_ICH8, WMP_F_COPPER },
1135 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1136 "Intel i82801H (IFE) LAN Controller",
1137 WM_T_ICH8, WMP_F_COPPER },
1138 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
1139 "Intel i82801H (M) LAN Controller",
1140 WM_T_ICH8, WMP_F_COPPER },
1141 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1142 "Intel i82801H IFE (GT) LAN Controller",
1143 WM_T_ICH8, WMP_F_COPPER },
1144 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1145 "Intel i82801H IFE (G) LAN Controller",
1146 WM_T_ICH8, WMP_F_COPPER },
1147 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_82567V_3,
1148 "82567V-3 LAN Controller",
1149 WM_T_ICH8, WMP_F_COPPER },
1150 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1151 "82801I (AMT) LAN Controller",
1152 WM_T_ICH9, WMP_F_COPPER },
1153 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1154 "82801I LAN Controller",
1155 WM_T_ICH9, WMP_F_COPPER },
1156 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1157 "82801I (G) LAN Controller",
1158 WM_T_ICH9, WMP_F_COPPER },
1159 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1160 "82801I (GT) LAN Controller",
1161 WM_T_ICH9, WMP_F_COPPER },
1162 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1163 "82801I (C) LAN Controller",
1164 WM_T_ICH9, WMP_F_COPPER },
1165 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1166 "82801I mobile LAN Controller",
1167 WM_T_ICH9, WMP_F_COPPER },
1168 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1169 "82801I mobile (V) LAN Controller",
1170 WM_T_ICH9, WMP_F_COPPER },
1171 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1172 "82801I mobile (AMT) LAN Controller",
1173 WM_T_ICH9, WMP_F_COPPER },
1174 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1175 "82567LM-4 LAN Controller",
1176 WM_T_ICH9, WMP_F_COPPER },
1177 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1178 "82567LM-2 LAN Controller",
1179 WM_T_ICH10, WMP_F_COPPER },
1180 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1181 "82567LF-2 LAN Controller",
1182 WM_T_ICH10, WMP_F_COPPER },
1183 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1184 "82567LM-3 LAN Controller",
1185 WM_T_ICH10, WMP_F_COPPER },
1186 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1187 "82567LF-3 LAN Controller",
1188 WM_T_ICH10, WMP_F_COPPER },
1189 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1190 "82567V-2 LAN Controller",
1191 WM_T_ICH10, WMP_F_COPPER },
1192 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1193 "82567V-3? LAN Controller",
1194 WM_T_ICH10, WMP_F_COPPER },
1195 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1196 "HANKSVILLE LAN Controller",
1197 WM_T_ICH10, WMP_F_COPPER },
1198 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1199 "PCH LAN (82577LM) Controller",
1200 WM_T_PCH, WMP_F_COPPER },
1201 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1202 "PCH LAN (82577LC) Controller",
1203 WM_T_PCH, WMP_F_COPPER },
1204 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1205 "PCH LAN (82578DM) Controller",
1206 WM_T_PCH, WMP_F_COPPER },
1207 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1208 "PCH LAN (82578DC) Controller",
1209 WM_T_PCH, WMP_F_COPPER },
1210 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1211 "PCH2 LAN (82579LM) Controller",
1212 WM_T_PCH2, WMP_F_COPPER },
1213 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1214 "PCH2 LAN (82579V) Controller",
1215 WM_T_PCH2, WMP_F_COPPER },
1216 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1217 "82575EB dual-1000baseT Ethernet",
1218 WM_T_82575, WMP_F_COPPER },
1219 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1220 "82575EB dual-1000baseX Ethernet (SERDES)",
1221 WM_T_82575, WMP_F_SERDES },
1222 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1223 "82575GB quad-1000baseT Ethernet",
1224 WM_T_82575, WMP_F_COPPER },
1225 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1226 "82575GB quad-1000baseT Ethernet (PM)",
1227 WM_T_82575, WMP_F_COPPER },
1228 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1229 "82576 1000BaseT Ethernet",
1230 WM_T_82576, WMP_F_COPPER },
1231 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1232 "82576 1000BaseX Ethernet",
1233 WM_T_82576, WMP_F_FIBER },
1234
1235 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1236 "82576 gigabit Ethernet (SERDES)",
1237 WM_T_82576, WMP_F_SERDES },
1238
1239 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1240 "82576 quad-1000BaseT Ethernet",
1241 WM_T_82576, WMP_F_COPPER },
1242
1243 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1244 "82576 Gigabit ET2 Quad Port Server Adapter",
1245 WM_T_82576, WMP_F_COPPER },
1246
1247 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1248 "82576 gigabit Ethernet",
1249 WM_T_82576, WMP_F_COPPER },
1250
1251 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1252 "82576 gigabit Ethernet (SERDES)",
1253 WM_T_82576, WMP_F_SERDES },
1254 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1255 "82576 quad-gigabit Ethernet (SERDES)",
1256 WM_T_82576, WMP_F_SERDES },
1257
1258 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1259 "82580 1000BaseT Ethernet",
1260 WM_T_82580, WMP_F_COPPER },
1261 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1262 "82580 1000BaseX Ethernet",
1263 WM_T_82580, WMP_F_FIBER },
1264
1265 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1266 "82580 1000BaseT Ethernet (SERDES)",
1267 WM_T_82580, WMP_F_SERDES },
1268
1269 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1270 "82580 gigabit Ethernet (SGMII)",
1271 WM_T_82580, WMP_F_COPPER },
1272 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1273 "82580 dual-1000BaseT Ethernet",
1274 WM_T_82580, WMP_F_COPPER },
1275
1276 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1277 "82580 quad-1000BaseX Ethernet",
1278 WM_T_82580, WMP_F_FIBER },
1279
1280 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1281 "DH89XXCC Gigabit Ethernet (SGMII)",
1282 WM_T_82580, WMP_F_COPPER },
1283
1284 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1285 "DH89XXCC Gigabit Ethernet (SERDES)",
1286 WM_T_82580, WMP_F_SERDES },
1287
1288 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1289 "DH89XXCC 1000BASE-KX Ethernet",
1290 WM_T_82580, WMP_F_SERDES },
1291
1292 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1293 "DH89XXCC Gigabit Ethernet (SFP)",
1294 WM_T_82580, WMP_F_SERDES },
1295
1296 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1297 "I350 Gigabit Network Connection",
1298 WM_T_I350, WMP_F_COPPER },
1299
1300 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1301 "I350 Gigabit Fiber Network Connection",
1302 WM_T_I350, WMP_F_FIBER },
1303
1304 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1305 "I350 Gigabit Backplane Connection",
1306 WM_T_I350, WMP_F_SERDES },
1307
1308 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1309 "I350 Quad Port Gigabit Ethernet",
1310 WM_T_I350, WMP_F_SERDES },
1311
1312 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1313 "I350 Gigabit Connection",
1314 WM_T_I350, WMP_F_COPPER },
1315
1316 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1317 "I354 Gigabit Ethernet (KX)",
1318 WM_T_I354, WMP_F_SERDES },
1319
1320 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1321 "I354 Gigabit Ethernet (SGMII)",
1322 WM_T_I354, WMP_F_COPPER },
1323
1324 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1325 "I354 Gigabit Ethernet (2.5G)",
1326 WM_T_I354, WMP_F_COPPER },
1327
1328 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1329 "I210-T1 Ethernet Server Adapter",
1330 WM_T_I210, WMP_F_COPPER },
1331
1332 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1333 "I210 Ethernet (Copper OEM)",
1334 WM_T_I210, WMP_F_COPPER },
1335
1336 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1337 "I210 Ethernet (Copper IT)",
1338 WM_T_I210, WMP_F_COPPER },
1339
1340 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1341 "I210 Ethernet (FLASH less)",
1342 WM_T_I210, WMP_F_COPPER },
1343
1344 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1345 "I210 Gigabit Ethernet (Fiber)",
1346 WM_T_I210, WMP_F_FIBER },
1347
1348 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1349 "I210 Gigabit Ethernet (SERDES)",
1350 WM_T_I210, WMP_F_SERDES },
1351
1352 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1353 "I210 Gigabit Ethernet (FLASH less)",
1354 WM_T_I210, WMP_F_SERDES },
1355
1356 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1357 "I210 Gigabit Ethernet (SGMII)",
1358 WM_T_I210, WMP_F_COPPER },
1359
1360 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1361 "I211 Ethernet (COPPER)",
1362 WM_T_I211, WMP_F_COPPER },
1363 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1364 "I217 V Ethernet Connection",
1365 WM_T_PCH_LPT, WMP_F_COPPER },
1366 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1367 "I217 LM Ethernet Connection",
1368 WM_T_PCH_LPT, WMP_F_COPPER },
1369 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1370 "I218 V Ethernet Connection",
1371 WM_T_PCH_LPT, WMP_F_COPPER },
1372 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1373 "I218 V Ethernet Connection",
1374 WM_T_PCH_LPT, WMP_F_COPPER },
1375 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1376 "I218 V Ethernet Connection",
1377 WM_T_PCH_LPT, WMP_F_COPPER },
1378 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1379 "I218 LM Ethernet Connection",
1380 WM_T_PCH_LPT, WMP_F_COPPER },
1381 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1382 "I218 LM Ethernet Connection",
1383 WM_T_PCH_LPT, WMP_F_COPPER },
1384 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1385 "I218 LM Ethernet Connection",
1386 WM_T_PCH_LPT, WMP_F_COPPER },
1387 #if 0
1388 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V,
1389 "I219 V Ethernet Connection",
1390 WM_T_PCH_SPT, WMP_F_COPPER },
1391 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V2,
1392 "I219 V Ethernet Connection",
1393 WM_T_PCH_SPT, WMP_F_COPPER },
1394 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V4,
1395 "I219 V Ethernet Connection",
1396 WM_T_PCH_SPT, WMP_F_COPPER },
1397 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V5,
1398 "I219 V Ethernet Connection",
1399 WM_T_PCH_SPT, WMP_F_COPPER },
1400 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM,
1401 "I219 LM Ethernet Connection",
1402 WM_T_PCH_SPT, WMP_F_COPPER },
1403 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM2,
1404 "I219 LM Ethernet Connection",
1405 WM_T_PCH_SPT, WMP_F_COPPER },
1406 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM3,
1407 "I219 LM Ethernet Connection",
1408 WM_T_PCH_SPT, WMP_F_COPPER },
1409 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM4,
1410 "I219 LM Ethernet Connection",
1411 WM_T_PCH_SPT, WMP_F_COPPER },
1412 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM5,
1413 "I219 LM Ethernet Connection",
1414 WM_T_PCH_SPT, WMP_F_COPPER },
1415 #endif
1416 { 0, 0,
1417 NULL,
1418 0, 0 },
1419 };
1420
1421 /*
1422 * Register read/write functions.
1423 * Other than CSR_{READ|WRITE}().
1424 */
1425
1426 #if 0 /* Not currently used */
1427 static inline uint32_t
1428 wm_io_read(struct wm_softc *sc, int reg)
1429 {
1430
1431 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1432 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1433 }
1434 #endif
1435
1436 static inline void
1437 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1438 {
1439
1440 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1441 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1442 }
1443
1444 static inline void
1445 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1446 uint32_t data)
1447 {
1448 uint32_t regval;
1449 int i;
1450
1451 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1452
1453 CSR_WRITE(sc, reg, regval);
1454
1455 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1456 delay(5);
1457 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1458 break;
1459 }
1460 if (i == SCTL_CTL_POLL_TIMEOUT) {
1461 aprint_error("%s: WARNING:"
1462 " i82575 reg 0x%08x setup did not indicate ready\n",
1463 device_xname(sc->sc_dev), reg);
1464 }
1465 }
1466
1467 static inline void
1468 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1469 {
1470 wa->wa_low = htole32(v & 0xffffffffU);
1471 if (sizeof(bus_addr_t) == 8)
1472 wa->wa_high = htole32((uint64_t) v >> 32);
1473 else
1474 wa->wa_high = 0;
1475 }
1476
1477 /*
1478 * Descriptor sync/init functions.
1479 */
1480 static inline void
1481 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1482 {
1483 struct wm_softc *sc = txq->txq_sc;
1484
1485 /* If it will wrap around, sync to the end of the ring. */
1486 if ((start + num) > WM_NTXDESC(txq)) {
1487 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1488 WM_CDTXOFF(txq, start), txq->txq_descsize *
1489 (WM_NTXDESC(txq) - start), ops);
1490 num -= (WM_NTXDESC(txq) - start);
1491 start = 0;
1492 }
1493
1494 /* Now sync whatever is left. */
1495 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1496 WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1497 }
1498
1499 static inline void
1500 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1501 {
1502 struct wm_softc *sc = rxq->rxq_sc;
1503
1504 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1505 WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
1506 }
1507
1508 static inline void
1509 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1510 {
1511 struct wm_softc *sc = rxq->rxq_sc;
1512 struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1513 wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1514 struct mbuf *m = rxs->rxs_mbuf;
1515
1516 /*
1517 * Note: We scoot the packet forward 2 bytes in the buffer
1518 * so that the payload after the Ethernet header is aligned
1519 * to a 4-byte boundary.
1520
1521 * XXX BRAINDAMAGE ALERT!
1522 * The stupid chip uses the same size for every buffer, which
1523 * is set in the Receive Control register. We are using the 2K
1524 * size option, but what we REALLY want is (2K - 2)! For this
1525 * reason, we can't "scoot" packets longer than the standard
1526 * Ethernet MTU. On strict-alignment platforms, if the total
1527 * size exceeds (2K - 2) we set align_tweak to 0 and let
1528 * the upper layer copy the headers.
1529 */
1530 m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1531
1532 wm_set_dma_addr(&rxd->wrx_addr,
1533 rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1534 rxd->wrx_len = 0;
1535 rxd->wrx_cksum = 0;
1536 rxd->wrx_status = 0;
1537 rxd->wrx_errors = 0;
1538 rxd->wrx_special = 0;
1539 wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1540
1541 CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1542 }
1543
1544 /*
1545 * Device driver interface functions and commonly used functions.
1546 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1547 */
1548
1549 /* Lookup supported device table */
1550 static const struct wm_product *
1551 wm_lookup(const struct pci_attach_args *pa)
1552 {
1553 const struct wm_product *wmp;
1554
1555 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1556 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1557 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1558 return wmp;
1559 }
1560 return NULL;
1561 }
1562
1563 /* The match function (ca_match) */
1564 static int
1565 wm_match(device_t parent, cfdata_t cf, void *aux)
1566 {
1567 struct pci_attach_args *pa = aux;
1568
1569 if (wm_lookup(pa) != NULL)
1570 return 1;
1571
1572 return 0;
1573 }
1574
1575 /* The attach function (ca_attach) */
1576 static void
1577 wm_attach(device_t parent, device_t self, void *aux)
1578 {
1579 struct wm_softc *sc = device_private(self);
1580 struct pci_attach_args *pa = aux;
1581 prop_dictionary_t dict;
1582 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1583 pci_chipset_tag_t pc = pa->pa_pc;
1584 int counts[PCI_INTR_TYPE_SIZE];
1585 pci_intr_type_t max_type;
1586 const char *eetype, *xname;
1587 bus_space_tag_t memt;
1588 bus_space_handle_t memh;
1589 bus_size_t memsize;
1590 int memh_valid;
1591 int i, error;
1592 const struct wm_product *wmp;
1593 prop_data_t ea;
1594 prop_number_t pn;
1595 uint8_t enaddr[ETHER_ADDR_LEN];
1596 uint16_t cfg1, cfg2, swdpin, nvmword;
1597 pcireg_t preg, memtype;
1598 uint16_t eeprom_data, apme_mask;
1599 bool force_clear_smbi;
1600 uint32_t link_mode;
1601 uint32_t reg;
1602
1603 sc->sc_dev = self;
1604 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1605 sc->sc_core_stopping = false;
1606
1607 wmp = wm_lookup(pa);
1608 #ifdef DIAGNOSTIC
1609 if (wmp == NULL) {
1610 printf("\n");
1611 panic("wm_attach: impossible");
1612 }
1613 #endif
1614 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1615
1616 sc->sc_pc = pa->pa_pc;
1617 sc->sc_pcitag = pa->pa_tag;
1618
1619 if (pci_dma64_available(pa))
1620 sc->sc_dmat = pa->pa_dmat64;
1621 else
1622 sc->sc_dmat = pa->pa_dmat;
1623
1624 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1625 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1626 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1627
1628 sc->sc_type = wmp->wmp_type;
1629
1630 /* Set default function pointers */
1631 sc->phy.acquire = wm_get_null;
1632 sc->phy.release = wm_put_null;
1633
1634 if (sc->sc_type < WM_T_82543) {
1635 if (sc->sc_rev < 2) {
1636 aprint_error_dev(sc->sc_dev,
1637 "i82542 must be at least rev. 2\n");
1638 return;
1639 }
1640 if (sc->sc_rev < 3)
1641 sc->sc_type = WM_T_82542_2_0;
1642 }
1643
1644 /*
1645 * Disable MSI for Errata:
1646 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1647 *
1648 * 82544: Errata 25
1649 * 82540: Errata 6 (easy to reproduce device timeout)
1650 * 82545: Errata 4 (easy to reproduce device timeout)
1651 * 82546: Errata 26 (easy to reproduce device timeout)
1652 * 82541: Errata 7 (easy to reproduce device timeout)
1653 *
1654 * "Byte Enables 2 and 3 are not set on MSI writes"
1655 *
1656 * 82571 & 82572: Errata 63
1657 */
1658 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1659 || (sc->sc_type == WM_T_82572))
1660 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1661
1662 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1663 || (sc->sc_type == WM_T_82580)
1664 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1665 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1666 sc->sc_flags |= WM_F_NEWQUEUE;
1667
1668 /* Set device properties (mactype) */
1669 dict = device_properties(sc->sc_dev);
1670 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1671
1672 /*
1673 * Map the device. All devices support memory-mapped acccess,
1674 * and it is really required for normal operation.
1675 */
1676 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1677 switch (memtype) {
1678 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1679 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1680 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1681 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1682 break;
1683 default:
1684 memh_valid = 0;
1685 break;
1686 }
1687
1688 if (memh_valid) {
1689 sc->sc_st = memt;
1690 sc->sc_sh = memh;
1691 sc->sc_ss = memsize;
1692 } else {
1693 aprint_error_dev(sc->sc_dev,
1694 "unable to map device registers\n");
1695 return;
1696 }
1697
1698 /*
1699 * In addition, i82544 and later support I/O mapped indirect
1700 * register access. It is not desirable (nor supported in
1701 * this driver) to use it for normal operation, though it is
1702 * required to work around bugs in some chip versions.
1703 */
1704 if (sc->sc_type >= WM_T_82544) {
1705 /* First we have to find the I/O BAR. */
1706 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1707 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1708 if (memtype == PCI_MAPREG_TYPE_IO)
1709 break;
1710 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1711 PCI_MAPREG_MEM_TYPE_64BIT)
1712 i += 4; /* skip high bits, too */
1713 }
1714 if (i < PCI_MAPREG_END) {
1715 /*
1716 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1717 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1718 * It's no problem because newer chips has no this
1719 * bug.
1720 *
1721 * The i8254x doesn't apparently respond when the
1722 * I/O BAR is 0, which looks somewhat like it's not
1723 * been configured.
1724 */
1725 preg = pci_conf_read(pc, pa->pa_tag, i);
1726 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1727 aprint_error_dev(sc->sc_dev,
1728 "WARNING: I/O BAR at zero.\n");
1729 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1730 0, &sc->sc_iot, &sc->sc_ioh,
1731 NULL, &sc->sc_ios) == 0) {
1732 sc->sc_flags |= WM_F_IOH_VALID;
1733 } else {
1734 aprint_error_dev(sc->sc_dev,
1735 "WARNING: unable to map I/O space\n");
1736 }
1737 }
1738
1739 }
1740
1741 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1742 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1743 preg |= PCI_COMMAND_MASTER_ENABLE;
1744 if (sc->sc_type < WM_T_82542_2_1)
1745 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1746 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1747
1748 /* power up chip */
1749 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1750 NULL)) && error != EOPNOTSUPP) {
1751 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1752 return;
1753 }
1754
1755 wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
1756
1757 /* Allocation settings */
1758 max_type = PCI_INTR_TYPE_MSIX;
1759 counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
1760 counts[PCI_INTR_TYPE_MSI] = 1;
1761 counts[PCI_INTR_TYPE_INTX] = 1;
1762
1763 alloc_retry:
1764 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1765 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1766 return;
1767 }
1768
1769 if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1770 error = wm_setup_msix(sc);
1771 if (error) {
1772 pci_intr_release(pc, sc->sc_intrs,
1773 counts[PCI_INTR_TYPE_MSIX]);
1774
1775 /* Setup for MSI: Disable MSI-X */
1776 max_type = PCI_INTR_TYPE_MSI;
1777 counts[PCI_INTR_TYPE_MSI] = 1;
1778 counts[PCI_INTR_TYPE_INTX] = 1;
1779 goto alloc_retry;
1780 }
1781 } else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
1782 wm_adjust_qnum(sc, 0); /* must not use multiqueue */
1783 error = wm_setup_legacy(sc);
1784 if (error) {
1785 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1786 counts[PCI_INTR_TYPE_MSI]);
1787
1788 /* The next try is for INTx: Disable MSI */
1789 max_type = PCI_INTR_TYPE_INTX;
1790 counts[PCI_INTR_TYPE_INTX] = 1;
1791 goto alloc_retry;
1792 }
1793 } else {
1794 wm_adjust_qnum(sc, 0); /* must not use multiqueue */
1795 error = wm_setup_legacy(sc);
1796 if (error) {
1797 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1798 counts[PCI_INTR_TYPE_INTX]);
1799 return;
1800 }
1801 }
1802
1803 /*
1804 * Check the function ID (unit number of the chip).
1805 */
1806 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1807 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1808 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1809 || (sc->sc_type == WM_T_82580)
1810 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1811 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1812 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1813 else
1814 sc->sc_funcid = 0;
1815
1816 /*
1817 * Determine a few things about the bus we're connected to.
1818 */
1819 if (sc->sc_type < WM_T_82543) {
1820 /* We don't really know the bus characteristics here. */
1821 sc->sc_bus_speed = 33;
1822 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1823 /*
1824 * CSA (Communication Streaming Architecture) is about as fast
1825 * a 32-bit 66MHz PCI Bus.
1826 */
1827 sc->sc_flags |= WM_F_CSA;
1828 sc->sc_bus_speed = 66;
1829 aprint_verbose_dev(sc->sc_dev,
1830 "Communication Streaming Architecture\n");
1831 if (sc->sc_type == WM_T_82547) {
1832 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1833 callout_setfunc(&sc->sc_txfifo_ch,
1834 wm_82547_txfifo_stall, sc);
1835 aprint_verbose_dev(sc->sc_dev,
1836 "using 82547 Tx FIFO stall work-around\n");
1837 }
1838 } else if (sc->sc_type >= WM_T_82571) {
1839 sc->sc_flags |= WM_F_PCIE;
1840 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1841 && (sc->sc_type != WM_T_ICH10)
1842 && (sc->sc_type != WM_T_PCH)
1843 && (sc->sc_type != WM_T_PCH2)
1844 && (sc->sc_type != WM_T_PCH_LPT)
1845 && (sc->sc_type != WM_T_PCH_SPT)) {
1846 /* ICH* and PCH* have no PCIe capability registers */
1847 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1848 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1849 NULL) == 0)
1850 aprint_error_dev(sc->sc_dev,
1851 "unable to find PCIe capability\n");
1852 }
1853 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1854 } else {
1855 reg = CSR_READ(sc, WMREG_STATUS);
1856 if (reg & STATUS_BUS64)
1857 sc->sc_flags |= WM_F_BUS64;
1858 if ((reg & STATUS_PCIX_MODE) != 0) {
1859 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1860
1861 sc->sc_flags |= WM_F_PCIX;
1862 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1863 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1864 aprint_error_dev(sc->sc_dev,
1865 "unable to find PCIX capability\n");
1866 else if (sc->sc_type != WM_T_82545_3 &&
1867 sc->sc_type != WM_T_82546_3) {
1868 /*
1869 * Work around a problem caused by the BIOS
1870 * setting the max memory read byte count
1871 * incorrectly.
1872 */
1873 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1874 sc->sc_pcixe_capoff + PCIX_CMD);
1875 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1876 sc->sc_pcixe_capoff + PCIX_STATUS);
1877
1878 bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1879 PCIX_CMD_BYTECNT_SHIFT;
1880 maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1881 PCIX_STATUS_MAXB_SHIFT;
1882 if (bytecnt > maxb) {
1883 aprint_verbose_dev(sc->sc_dev,
1884 "resetting PCI-X MMRBC: %d -> %d\n",
1885 512 << bytecnt, 512 << maxb);
1886 pcix_cmd = (pcix_cmd &
1887 ~PCIX_CMD_BYTECNT_MASK) |
1888 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1889 pci_conf_write(pa->pa_pc, pa->pa_tag,
1890 sc->sc_pcixe_capoff + PCIX_CMD,
1891 pcix_cmd);
1892 }
1893 }
1894 }
1895 /*
1896 * The quad port adapter is special; it has a PCIX-PCIX
1897 * bridge on the board, and can run the secondary bus at
1898 * a higher speed.
1899 */
1900 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1901 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1902 : 66;
1903 } else if (sc->sc_flags & WM_F_PCIX) {
1904 switch (reg & STATUS_PCIXSPD_MASK) {
1905 case STATUS_PCIXSPD_50_66:
1906 sc->sc_bus_speed = 66;
1907 break;
1908 case STATUS_PCIXSPD_66_100:
1909 sc->sc_bus_speed = 100;
1910 break;
1911 case STATUS_PCIXSPD_100_133:
1912 sc->sc_bus_speed = 133;
1913 break;
1914 default:
1915 aprint_error_dev(sc->sc_dev,
1916 "unknown PCIXSPD %d; assuming 66MHz\n",
1917 reg & STATUS_PCIXSPD_MASK);
1918 sc->sc_bus_speed = 66;
1919 break;
1920 }
1921 } else
1922 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1923 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1924 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1925 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1926 }
1927
1928 /* clear interesting stat counters */
1929 CSR_READ(sc, WMREG_COLC);
1930 CSR_READ(sc, WMREG_RXERRC);
1931
1932 /* get PHY control from SMBus to PCIe */
1933 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1934 || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
1935 wm_smbustopci(sc);
1936
1937 if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
1938 || (sc->sc_type >= WM_T_ICH8))
1939 sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
1940 if (sc->sc_type >= WM_T_ICH8)
1941 sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
1942
1943 /* Set PHY, NVM mutex related stuff */
1944 switch (sc->sc_type) {
1945 case WM_T_82542_2_0:
1946 case WM_T_82542_2_1:
1947 case WM_T_82543:
1948 case WM_T_82544:
1949 /* Microwire */
1950 sc->sc_nvm_wordsize = 64;
1951 sc->sc_nvm_addrbits = 6;
1952 break;
1953 case WM_T_82540:
1954 case WM_T_82545:
1955 case WM_T_82545_3:
1956 case WM_T_82546:
1957 case WM_T_82546_3:
1958 /* Microwire */
1959 reg = CSR_READ(sc, WMREG_EECD);
1960 if (reg & EECD_EE_SIZE) {
1961 sc->sc_nvm_wordsize = 256;
1962 sc->sc_nvm_addrbits = 8;
1963 } else {
1964 sc->sc_nvm_wordsize = 64;
1965 sc->sc_nvm_addrbits = 6;
1966 }
1967 sc->sc_flags |= WM_F_LOCK_EECD;
1968 break;
1969 case WM_T_82541:
1970 case WM_T_82541_2:
1971 case WM_T_82547:
1972 case WM_T_82547_2:
1973 sc->sc_flags |= WM_F_LOCK_EECD;
1974 reg = CSR_READ(sc, WMREG_EECD);
1975 if (reg & EECD_EE_TYPE) {
1976 /* SPI */
1977 sc->sc_flags |= WM_F_EEPROM_SPI;
1978 wm_nvm_set_addrbits_size_eecd(sc);
1979 } else {
1980 /* Microwire */
1981 if ((reg & EECD_EE_ABITS) != 0) {
1982 sc->sc_nvm_wordsize = 256;
1983 sc->sc_nvm_addrbits = 8;
1984 } else {
1985 sc->sc_nvm_wordsize = 64;
1986 sc->sc_nvm_addrbits = 6;
1987 }
1988 }
1989 break;
1990 case WM_T_82571:
1991 case WM_T_82572:
1992 /* SPI */
1993 sc->sc_flags |= WM_F_EEPROM_SPI;
1994 wm_nvm_set_addrbits_size_eecd(sc);
1995 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1996 sc->phy.acquire = wm_get_swsm_semaphore;
1997 sc->phy.release = wm_put_swsm_semaphore;
1998 break;
1999 case WM_T_82573:
2000 case WM_T_82574:
2001 case WM_T_82583:
2002 if (sc->sc_type == WM_T_82573) {
2003 sc->sc_flags |= WM_F_LOCK_SWSM;
2004 sc->phy.acquire = wm_get_swsm_semaphore;
2005 sc->phy.release = wm_put_swsm_semaphore;
2006 } else {
2007 sc->sc_flags |= WM_F_LOCK_EXTCNF;
2008 /* Both PHY and NVM use the same semaphore. */
2009 sc->phy.acquire
2010 = wm_get_swfwhw_semaphore;
2011 sc->phy.release
2012 = wm_put_swfwhw_semaphore;
2013 }
2014 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2015 sc->sc_flags |= WM_F_EEPROM_FLASH;
2016 sc->sc_nvm_wordsize = 2048;
2017 } else {
2018 /* SPI */
2019 sc->sc_flags |= WM_F_EEPROM_SPI;
2020 wm_nvm_set_addrbits_size_eecd(sc);
2021 }
2022 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
2023 break;
2024 case WM_T_82575:
2025 case WM_T_82576:
2026 case WM_T_82580:
2027 case WM_T_I350:
2028 case WM_T_I354:
2029 case WM_T_80003:
2030 /* SPI */
2031 sc->sc_flags |= WM_F_EEPROM_SPI;
2032 wm_nvm_set_addrbits_size_eecd(sc);
2033 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
2034 | WM_F_LOCK_SWSM;
2035 sc->phy.acquire = wm_get_phy_82575;
2036 sc->phy.release = wm_put_phy_82575;
2037 break;
2038 case WM_T_ICH8:
2039 case WM_T_ICH9:
2040 case WM_T_ICH10:
2041 case WM_T_PCH:
2042 case WM_T_PCH2:
2043 case WM_T_PCH_LPT:
2044 /* FLASH */
2045 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
2046 sc->sc_nvm_wordsize = 2048;
2047 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2048 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2049 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2050 aprint_error_dev(sc->sc_dev,
2051 "can't map FLASH registers\n");
2052 goto out;
2053 }
2054 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2055 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2056 ICH_FLASH_SECTOR_SIZE;
2057 sc->sc_ich8_flash_bank_size =
2058 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2059 sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2060 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2061 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2062 sc->sc_flashreg_offset = 0;
2063 sc->phy.acquire = wm_get_swflag_ich8lan;
2064 sc->phy.release = wm_put_swflag_ich8lan;
2065 break;
2066 case WM_T_PCH_SPT:
2067 /* SPT has no GFPREG; flash registers mapped through BAR0 */
2068 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
2069 sc->sc_flasht = sc->sc_st;
2070 sc->sc_flashh = sc->sc_sh;
2071 sc->sc_ich8_flash_base = 0;
2072 sc->sc_nvm_wordsize =
2073 (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2074 * NVM_SIZE_MULTIPLIER;
2075 /* It is size in bytes, we want words */
2076 sc->sc_nvm_wordsize /= 2;
2077 /* assume 2 banks */
2078 sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2079 sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2080 sc->phy.acquire = wm_get_swflag_ich8lan;
2081 sc->phy.release = wm_put_swflag_ich8lan;
2082 break;
2083 case WM_T_I210:
2084 case WM_T_I211:
2085 if (wm_nvm_get_flash_presence_i210(sc)) {
2086 wm_nvm_set_addrbits_size_eecd(sc);
2087 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2088 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
2089 } else {
2090 sc->sc_nvm_wordsize = INVM_SIZE;
2091 sc->sc_flags |= WM_F_EEPROM_INVM;
2092 }
2093 sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
2094 sc->phy.acquire = wm_get_phy_82575;
2095 sc->phy.release = wm_put_phy_82575;
2096 break;
2097 default:
2098 break;
2099 }
2100
2101 /* Reset the chip to a known state. */
2102 wm_reset(sc);
2103
2104 /* Ensure the SMBI bit is clear before first NVM or PHY access */
2105 switch (sc->sc_type) {
2106 case WM_T_82571:
2107 case WM_T_82572:
2108 reg = CSR_READ(sc, WMREG_SWSM2);
2109 if ((reg & SWSM2_LOCK) == 0) {
2110 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2111 force_clear_smbi = true;
2112 } else
2113 force_clear_smbi = false;
2114 break;
2115 case WM_T_82573:
2116 case WM_T_82574:
2117 case WM_T_82583:
2118 force_clear_smbi = true;
2119 break;
2120 default:
2121 force_clear_smbi = false;
2122 break;
2123 }
2124 if (force_clear_smbi) {
2125 reg = CSR_READ(sc, WMREG_SWSM);
2126 if ((reg & SWSM_SMBI) != 0)
2127 aprint_error_dev(sc->sc_dev,
2128 "Please update the Bootagent\n");
2129 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2130 }
2131
2132 /*
2133 * Defer printing the EEPROM type until after verifying the checksum
2134 * This allows the EEPROM type to be printed correctly in the case
2135 * that no EEPROM is attached.
2136 */
2137 /*
2138 * Validate the EEPROM checksum. If the checksum fails, flag
2139 * this for later, so we can fail future reads from the EEPROM.
2140 */
2141 if (wm_nvm_validate_checksum(sc)) {
2142 /*
2143 * Read twice again because some PCI-e parts fail the
2144 * first check due to the link being in sleep state.
2145 */
2146 if (wm_nvm_validate_checksum(sc))
2147 sc->sc_flags |= WM_F_EEPROM_INVALID;
2148 }
2149
2150 /* Set device properties (macflags) */
2151 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2152
2153 if (sc->sc_flags & WM_F_EEPROM_INVALID)
2154 aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2155 else {
2156 aprint_verbose_dev(sc->sc_dev, "%u words ",
2157 sc->sc_nvm_wordsize);
2158 if (sc->sc_flags & WM_F_EEPROM_INVM)
2159 aprint_verbose("iNVM");
2160 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2161 aprint_verbose("FLASH(HW)");
2162 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2163 aprint_verbose("FLASH");
2164 else {
2165 if (sc->sc_flags & WM_F_EEPROM_SPI)
2166 eetype = "SPI";
2167 else
2168 eetype = "MicroWire";
2169 aprint_verbose("(%d address bits) %s EEPROM",
2170 sc->sc_nvm_addrbits, eetype);
2171 }
2172 }
2173 wm_nvm_version(sc);
2174 aprint_verbose("\n");
2175
2176 /* Check for I21[01] PLL workaround */
2177 if (sc->sc_type == WM_T_I210)
2178 sc->sc_flags |= WM_F_PLL_WA_I210;
2179 if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
2180 /* NVM image release 3.25 has a workaround */
2181 if ((sc->sc_nvm_ver_major < 3)
2182 || ((sc->sc_nvm_ver_major == 3)
2183 && (sc->sc_nvm_ver_minor < 25))) {
2184 aprint_verbose_dev(sc->sc_dev,
2185 "ROM image version %d.%d is older than 3.25\n",
2186 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2187 sc->sc_flags |= WM_F_PLL_WA_I210;
2188 }
2189 }
2190 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2191 wm_pll_workaround_i210(sc);
2192
2193 wm_get_wakeup(sc);
2194 switch (sc->sc_type) {
2195 case WM_T_82571:
2196 case WM_T_82572:
2197 case WM_T_82573:
2198 case WM_T_82574:
2199 case WM_T_82583:
2200 case WM_T_80003:
2201 case WM_T_ICH8:
2202 case WM_T_ICH9:
2203 case WM_T_ICH10:
2204 case WM_T_PCH:
2205 case WM_T_PCH2:
2206 case WM_T_PCH_LPT:
2207 case WM_T_PCH_SPT:
2208 /* Non-AMT based hardware can now take control from firmware */
2209 if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2210 wm_get_hw_control(sc);
2211 break;
2212 default:
2213 break;
2214 }
2215
2216 /*
2217 * Read the Ethernet address from the EEPROM, if not first found
2218 * in device properties.
2219 */
2220 ea = prop_dictionary_get(dict, "mac-address");
2221 if (ea != NULL) {
2222 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2223 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2224 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2225 } else {
2226 if (wm_read_mac_addr(sc, enaddr) != 0) {
2227 aprint_error_dev(sc->sc_dev,
2228 "unable to read Ethernet address\n");
2229 goto out;
2230 }
2231 }
2232
2233 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2234 ether_sprintf(enaddr));
2235
2236 /*
2237 * Read the config info from the EEPROM, and set up various
2238 * bits in the control registers based on their contents.
2239 */
2240 pn = prop_dictionary_get(dict, "i82543-cfg1");
2241 if (pn != NULL) {
2242 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2243 cfg1 = (uint16_t) prop_number_integer_value(pn);
2244 } else {
2245 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2246 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2247 goto out;
2248 }
2249 }
2250
2251 pn = prop_dictionary_get(dict, "i82543-cfg2");
2252 if (pn != NULL) {
2253 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2254 cfg2 = (uint16_t) prop_number_integer_value(pn);
2255 } else {
2256 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2257 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2258 goto out;
2259 }
2260 }
2261
2262 /* check for WM_F_WOL */
2263 switch (sc->sc_type) {
2264 case WM_T_82542_2_0:
2265 case WM_T_82542_2_1:
2266 case WM_T_82543:
2267 /* dummy? */
2268 eeprom_data = 0;
2269 apme_mask = NVM_CFG3_APME;
2270 break;
2271 case WM_T_82544:
2272 apme_mask = NVM_CFG2_82544_APM_EN;
2273 eeprom_data = cfg2;
2274 break;
2275 case WM_T_82546:
2276 case WM_T_82546_3:
2277 case WM_T_82571:
2278 case WM_T_82572:
2279 case WM_T_82573:
2280 case WM_T_82574:
2281 case WM_T_82583:
2282 case WM_T_80003:
2283 default:
2284 apme_mask = NVM_CFG3_APME;
2285 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2286 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2287 break;
2288 case WM_T_82575:
2289 case WM_T_82576:
2290 case WM_T_82580:
2291 case WM_T_I350:
2292 case WM_T_I354: /* XXX ok? */
2293 case WM_T_ICH8:
2294 case WM_T_ICH9:
2295 case WM_T_ICH10:
2296 case WM_T_PCH:
2297 case WM_T_PCH2:
2298 case WM_T_PCH_LPT:
2299 case WM_T_PCH_SPT:
2300 /* XXX The funcid should be checked on some devices */
2301 apme_mask = WUC_APME;
2302 eeprom_data = CSR_READ(sc, WMREG_WUC);
2303 break;
2304 }
2305
2306 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2307 if ((eeprom_data & apme_mask) != 0)
2308 sc->sc_flags |= WM_F_WOL;
2309 #ifdef WM_DEBUG
2310 if ((sc->sc_flags & WM_F_WOL) != 0)
2311 printf("WOL\n");
2312 #endif
2313
2314 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2315 /* Check NVM for autonegotiation */
2316 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2317 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2318 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2319 }
2320 }
2321
2322 /*
2323 * XXX need special handling for some multiple port cards
2324 * to disable a paticular port.
2325 */
2326
2327 if (sc->sc_type >= WM_T_82544) {
2328 pn = prop_dictionary_get(dict, "i82543-swdpin");
2329 if (pn != NULL) {
2330 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2331 swdpin = (uint16_t) prop_number_integer_value(pn);
2332 } else {
2333 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2334 aprint_error_dev(sc->sc_dev,
2335 "unable to read SWDPIN\n");
2336 goto out;
2337 }
2338 }
2339 }
2340
2341 if (cfg1 & NVM_CFG1_ILOS)
2342 sc->sc_ctrl |= CTRL_ILOS;
2343
2344 /*
2345 * XXX
2346 * This code isn't correct because pin 2 and 3 are located
2347 * in different position on newer chips. Check all datasheet.
2348 *
2349 * Until resolve this problem, check if a chip < 82580
2350 */
2351 if (sc->sc_type <= WM_T_82580) {
2352 if (sc->sc_type >= WM_T_82544) {
2353 sc->sc_ctrl |=
2354 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2355 CTRL_SWDPIO_SHIFT;
2356 sc->sc_ctrl |=
2357 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2358 CTRL_SWDPINS_SHIFT;
2359 } else {
2360 sc->sc_ctrl |=
2361 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2362 CTRL_SWDPIO_SHIFT;
2363 }
2364 }
2365
2366 /* XXX For other than 82580? */
2367 if (sc->sc_type == WM_T_82580) {
2368 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2369 if (nvmword & __BIT(13))
2370 sc->sc_ctrl |= CTRL_ILOS;
2371 }
2372
2373 #if 0
2374 if (sc->sc_type >= WM_T_82544) {
2375 if (cfg1 & NVM_CFG1_IPS0)
2376 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2377 if (cfg1 & NVM_CFG1_IPS1)
2378 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2379 sc->sc_ctrl_ext |=
2380 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2381 CTRL_EXT_SWDPIO_SHIFT;
2382 sc->sc_ctrl_ext |=
2383 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2384 CTRL_EXT_SWDPINS_SHIFT;
2385 } else {
2386 sc->sc_ctrl_ext |=
2387 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2388 CTRL_EXT_SWDPIO_SHIFT;
2389 }
2390 #endif
2391
2392 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2393 #if 0
2394 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2395 #endif
2396
2397 if (sc->sc_type == WM_T_PCH) {
2398 uint16_t val;
2399
2400 /* Save the NVM K1 bit setting */
2401 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2402
2403 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2404 sc->sc_nvm_k1_enabled = 1;
2405 else
2406 sc->sc_nvm_k1_enabled = 0;
2407 }
2408
2409 /*
2410 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2411 * media structures accordingly.
2412 */
2413 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2414 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2415 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2416 || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
2417 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2418 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2419 wm_gmii_mediainit(sc, wmp->wmp_product);
2420 } else if (sc->sc_type < WM_T_82543 ||
2421 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2422 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2423 aprint_error_dev(sc->sc_dev,
2424 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2425 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2426 }
2427 wm_tbi_mediainit(sc);
2428 } else {
2429 switch (sc->sc_type) {
2430 case WM_T_82575:
2431 case WM_T_82576:
2432 case WM_T_82580:
2433 case WM_T_I350:
2434 case WM_T_I354:
2435 case WM_T_I210:
2436 case WM_T_I211:
2437 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2438 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2439 switch (link_mode) {
2440 case CTRL_EXT_LINK_MODE_1000KX:
2441 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2442 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2443 break;
2444 case CTRL_EXT_LINK_MODE_SGMII:
2445 if (wm_sgmii_uses_mdio(sc)) {
2446 aprint_verbose_dev(sc->sc_dev,
2447 "SGMII(MDIO)\n");
2448 sc->sc_flags |= WM_F_SGMII;
2449 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2450 break;
2451 }
2452 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2453 /*FALLTHROUGH*/
2454 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2455 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2456 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2457 if (link_mode
2458 == CTRL_EXT_LINK_MODE_SGMII) {
2459 sc->sc_mediatype
2460 = WM_MEDIATYPE_COPPER;
2461 sc->sc_flags |= WM_F_SGMII;
2462 } else {
2463 sc->sc_mediatype
2464 = WM_MEDIATYPE_SERDES;
2465 aprint_verbose_dev(sc->sc_dev,
2466 "SERDES\n");
2467 }
2468 break;
2469 }
2470 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2471 aprint_verbose_dev(sc->sc_dev,
2472 "SERDES\n");
2473
2474 /* Change current link mode setting */
2475 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2476 switch (sc->sc_mediatype) {
2477 case WM_MEDIATYPE_COPPER:
2478 reg |= CTRL_EXT_LINK_MODE_SGMII;
2479 break;
2480 case WM_MEDIATYPE_SERDES:
2481 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2482 break;
2483 default:
2484 break;
2485 }
2486 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2487 break;
2488 case CTRL_EXT_LINK_MODE_GMII:
2489 default:
2490 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2491 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2492 break;
2493 }
2494
2495 reg &= ~CTRL_EXT_I2C_ENA;
2496 if ((sc->sc_flags & WM_F_SGMII) != 0)
2497 reg |= CTRL_EXT_I2C_ENA;
2498 else
2499 reg &= ~CTRL_EXT_I2C_ENA;
2500 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2501
2502 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2503 wm_gmii_mediainit(sc, wmp->wmp_product);
2504 else
2505 wm_tbi_mediainit(sc);
2506 break;
2507 default:
2508 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
2509 aprint_error_dev(sc->sc_dev,
2510 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2511 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2512 wm_gmii_mediainit(sc, wmp->wmp_product);
2513 }
2514 }
2515
2516 ifp = &sc->sc_ethercom.ec_if;
2517 xname = device_xname(sc->sc_dev);
2518 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2519 ifp->if_softc = sc;
2520 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2521 ifp->if_extflags = IFEF_START_MPSAFE;
2522 ifp->if_ioctl = wm_ioctl;
2523 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
2524 ifp->if_start = wm_nq_start;
2525 if (sc->sc_nqueues > 1)
2526 ifp->if_transmit = wm_nq_transmit;
2527 } else
2528 ifp->if_start = wm_start;
2529 ifp->if_watchdog = wm_watchdog;
2530 ifp->if_init = wm_init;
2531 ifp->if_stop = wm_stop;
2532 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2533 IFQ_SET_READY(&ifp->if_snd);
2534
2535 /* Check for jumbo frame */
2536 switch (sc->sc_type) {
2537 case WM_T_82573:
2538 /* XXX limited to 9234 if ASPM is disabled */
2539 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2540 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2541 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2542 break;
2543 case WM_T_82571:
2544 case WM_T_82572:
2545 case WM_T_82574:
2546 case WM_T_82575:
2547 case WM_T_82576:
2548 case WM_T_82580:
2549 case WM_T_I350:
2550 case WM_T_I354: /* XXXX ok? */
2551 case WM_T_I210:
2552 case WM_T_I211:
2553 case WM_T_80003:
2554 case WM_T_ICH9:
2555 case WM_T_ICH10:
2556 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2557 case WM_T_PCH_LPT:
2558 case WM_T_PCH_SPT:
2559 /* XXX limited to 9234 */
2560 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2561 break;
2562 case WM_T_PCH:
2563 /* XXX limited to 4096 */
2564 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2565 break;
2566 case WM_T_82542_2_0:
2567 case WM_T_82542_2_1:
2568 case WM_T_82583:
2569 case WM_T_ICH8:
2570 /* No support for jumbo frame */
2571 break;
2572 default:
2573 /* ETHER_MAX_LEN_JUMBO */
2574 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2575 break;
2576 }
2577
2578 /* If we're a i82543 or greater, we can support VLANs. */
2579 if (sc->sc_type >= WM_T_82543)
2580 sc->sc_ethercom.ec_capabilities |=
2581 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2582
2583 /*
2584 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2585 * on i82543 and later.
2586 */
2587 if (sc->sc_type >= WM_T_82543) {
2588 ifp->if_capabilities |=
2589 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2590 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2591 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2592 IFCAP_CSUM_TCPv6_Tx |
2593 IFCAP_CSUM_UDPv6_Tx;
2594 }
2595
2596 /*
2597 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2598 *
2599 * 82541GI (8086:1076) ... no
2600 * 82572EI (8086:10b9) ... yes
2601 */
2602 if (sc->sc_type >= WM_T_82571) {
2603 ifp->if_capabilities |=
2604 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2605 }
2606
2607 /*
2608 * If we're a i82544 or greater (except i82547), we can do
2609 * TCP segmentation offload.
2610 */
2611 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2612 ifp->if_capabilities |= IFCAP_TSOv4;
2613 }
2614
2615 if (sc->sc_type >= WM_T_82571) {
2616 ifp->if_capabilities |= IFCAP_TSOv6;
2617 }
2618
2619 #ifdef WM_MPSAFE
2620 sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2621 #else
2622 sc->sc_core_lock = NULL;
2623 #endif
2624
2625 /* Attach the interface. */
2626 if_initialize(ifp);
2627 sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
2628 ether_ifattach(ifp, enaddr);
2629 if_register(ifp);
2630 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2631 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2632 RND_FLAG_DEFAULT);
2633
2634 #ifdef WM_EVENT_COUNTERS
2635 /* Attach event counters. */
2636 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2637 NULL, xname, "linkintr");
2638
2639 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2640 NULL, xname, "tx_xoff");
2641 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2642 NULL, xname, "tx_xon");
2643 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2644 NULL, xname, "rx_xoff");
2645 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2646 NULL, xname, "rx_xon");
2647 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2648 NULL, xname, "rx_macctl");
2649 #endif /* WM_EVENT_COUNTERS */
2650
2651 if (pmf_device_register(self, wm_suspend, wm_resume))
2652 pmf_class_network_register(self, ifp);
2653 else
2654 aprint_error_dev(self, "couldn't establish power handler\n");
2655
2656 sc->sc_flags |= WM_F_ATTACHED;
2657 out:
2658 return;
2659 }
2660
2661 /* The detach function (ca_detach) */
2662 static int
2663 wm_detach(device_t self, int flags __unused)
2664 {
2665 struct wm_softc *sc = device_private(self);
2666 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2667 int i;
2668
2669 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2670 return 0;
2671
2672 /* Stop the interface. Callouts are stopped in it. */
2673 wm_stop(ifp, 1);
2674
2675 pmf_device_deregister(self);
2676
2677 /* Tell the firmware about the release */
2678 WM_CORE_LOCK(sc);
2679 wm_release_manageability(sc);
2680 wm_release_hw_control(sc);
2681 WM_CORE_UNLOCK(sc);
2682
2683 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2684
2685 /* Delete all remaining media. */
2686 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2687
2688 ether_ifdetach(ifp);
2689 if_detach(ifp);
2690 if_percpuq_destroy(sc->sc_ipq);
2691
2692 /* Unload RX dmamaps and free mbufs */
2693 for (i = 0; i < sc->sc_nqueues; i++) {
2694 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
2695 mutex_enter(rxq->rxq_lock);
2696 wm_rxdrain(rxq);
2697 mutex_exit(rxq->rxq_lock);
2698 }
2699 /* Must unlock here */
2700
2701 /* Disestablish the interrupt handler */
2702 for (i = 0; i < sc->sc_nintrs; i++) {
2703 if (sc->sc_ihs[i] != NULL) {
2704 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2705 sc->sc_ihs[i] = NULL;
2706 }
2707 }
2708 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
2709
2710 wm_free_txrx_queues(sc);
2711
2712 /* Unmap the registers */
2713 if (sc->sc_ss) {
2714 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2715 sc->sc_ss = 0;
2716 }
2717 if (sc->sc_ios) {
2718 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2719 sc->sc_ios = 0;
2720 }
2721 if (sc->sc_flashs) {
2722 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
2723 sc->sc_flashs = 0;
2724 }
2725
2726 if (sc->sc_core_lock)
2727 mutex_obj_free(sc->sc_core_lock);
2728 if (sc->sc_ich_phymtx)
2729 mutex_obj_free(sc->sc_ich_phymtx);
2730 if (sc->sc_ich_nvmmtx)
2731 mutex_obj_free(sc->sc_ich_nvmmtx);
2732
2733 return 0;
2734 }
2735
2736 static bool
2737 wm_suspend(device_t self, const pmf_qual_t *qual)
2738 {
2739 struct wm_softc *sc = device_private(self);
2740
2741 wm_release_manageability(sc);
2742 wm_release_hw_control(sc);
2743 #ifdef WM_WOL
2744 wm_enable_wakeup(sc);
2745 #endif
2746
2747 return true;
2748 }
2749
2750 static bool
2751 wm_resume(device_t self, const pmf_qual_t *qual)
2752 {
2753 struct wm_softc *sc = device_private(self);
2754
2755 wm_init_manageability(sc);
2756
2757 return true;
2758 }
2759
2760 /*
2761 * wm_watchdog: [ifnet interface function]
2762 *
2763 * Watchdog timer handler.
2764 */
2765 static void
2766 wm_watchdog(struct ifnet *ifp)
2767 {
2768 int qid;
2769 struct wm_softc *sc = ifp->if_softc;
2770
2771 for (qid = 0; qid < sc->sc_nqueues; qid++) {
2772 struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
2773
2774 wm_watchdog_txq(ifp, txq);
2775 }
2776
2777 /* Reset the interface. */
2778 (void) wm_init(ifp);
2779
2780 /*
2781 * There are still some upper layer processing which call
2782 * ifp->if_start(). e.g. ALTQ
2783 */
2784 /* Try to get more packets going. */
2785 ifp->if_start(ifp);
2786 }
2787
2788 static void
2789 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
2790 {
2791 struct wm_softc *sc = ifp->if_softc;
2792
2793 /*
2794 * Since we're using delayed interrupts, sweep up
2795 * before we report an error.
2796 */
2797 mutex_enter(txq->txq_lock);
2798 wm_txeof(sc, txq);
2799 mutex_exit(txq->txq_lock);
2800
2801 if (txq->txq_free != WM_NTXDESC(txq)) {
2802 #ifdef WM_DEBUG
2803 int i, j;
2804 struct wm_txsoft *txs;
2805 #endif
2806 log(LOG_ERR,
2807 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2808 device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
2809 txq->txq_next);
2810 ifp->if_oerrors++;
2811 #ifdef WM_DEBUG
2812 for (i = txq->txq_sdirty; i != txq->txq_snext ;
2813 i = WM_NEXTTXS(txq, i)) {
2814 txs = &txq->txq_soft[i];
2815 printf("txs %d tx %d -> %d\n",
2816 i, txs->txs_firstdesc, txs->txs_lastdesc);
2817 for (j = txs->txs_firstdesc; ;
2818 j = WM_NEXTTX(txq, j)) {
2819 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2820 txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
2821 printf("\t %#08x%08x\n",
2822 txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
2823 txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
2824 if (j == txs->txs_lastdesc)
2825 break;
2826 }
2827 }
2828 #endif
2829 }
2830 }
2831
2832 /*
2833 * wm_tick:
2834 *
2835 * One second timer, used to check link status, sweep up
2836 * completed transmit jobs, etc.
2837 */
2838 static void
2839 wm_tick(void *arg)
2840 {
2841 struct wm_softc *sc = arg;
2842 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2843 #ifndef WM_MPSAFE
2844 int s = splnet();
2845 #endif
2846
2847 WM_CORE_LOCK(sc);
2848
2849 if (sc->sc_core_stopping)
2850 goto out;
2851
2852 if (sc->sc_type >= WM_T_82542_2_1) {
2853 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2854 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2855 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2856 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2857 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2858 }
2859
2860 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2861 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2862 + CSR_READ(sc, WMREG_CRCERRS)
2863 + CSR_READ(sc, WMREG_ALGNERRC)
2864 + CSR_READ(sc, WMREG_SYMERRC)
2865 + CSR_READ(sc, WMREG_RXERRC)
2866 + CSR_READ(sc, WMREG_SEC)
2867 + CSR_READ(sc, WMREG_CEXTERR)
2868 + CSR_READ(sc, WMREG_RLEC);
2869 /*
2870 * WMREG_RNBC is incremented when there is no available buffers in host
2871 * memory. It does not mean the number of dropped packet. Because
2872 * ethernet controller can receive packets in such case if there is
2873 * space in phy's FIFO.
2874 *
2875 * If you want to know the nubmer of WMREG_RMBC, you should use such as
2876 * own EVCNT instead of if_iqdrops.
2877 */
2878 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
2879
2880 if (sc->sc_flags & WM_F_HAS_MII)
2881 mii_tick(&sc->sc_mii);
2882 else if ((sc->sc_type >= WM_T_82575)
2883 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
2884 wm_serdes_tick(sc);
2885 else
2886 wm_tbi_tick(sc);
2887
2888 out:
2889 WM_CORE_UNLOCK(sc);
2890 #ifndef WM_MPSAFE
2891 splx(s);
2892 #endif
2893
2894 if (!sc->sc_core_stopping)
2895 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2896 }
2897
2898 static int
2899 wm_ifflags_cb(struct ethercom *ec)
2900 {
2901 struct ifnet *ifp = &ec->ec_if;
2902 struct wm_softc *sc = ifp->if_softc;
2903 int rc = 0;
2904
2905 WM_CORE_LOCK(sc);
2906
2907 int change = ifp->if_flags ^ sc->sc_if_flags;
2908 sc->sc_if_flags = ifp->if_flags;
2909
2910 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
2911 rc = ENETRESET;
2912 goto out;
2913 }
2914
2915 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2916 wm_set_filter(sc);
2917
2918 wm_set_vlan(sc);
2919
2920 out:
2921 WM_CORE_UNLOCK(sc);
2922
2923 return rc;
2924 }
2925
2926 /*
2927 * wm_ioctl: [ifnet interface function]
2928 *
2929 * Handle control requests from the operator.
2930 */
2931 static int
2932 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2933 {
2934 struct wm_softc *sc = ifp->if_softc;
2935 struct ifreq *ifr = (struct ifreq *) data;
2936 struct ifaddr *ifa = (struct ifaddr *)data;
2937 struct sockaddr_dl *sdl;
2938 int s, error;
2939
2940 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
2941 device_xname(sc->sc_dev), __func__));
2942
2943 #ifndef WM_MPSAFE
2944 s = splnet();
2945 #endif
2946 switch (cmd) {
2947 case SIOCSIFMEDIA:
2948 case SIOCGIFMEDIA:
2949 WM_CORE_LOCK(sc);
2950 /* Flow control requires full-duplex mode. */
2951 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2952 (ifr->ifr_media & IFM_FDX) == 0)
2953 ifr->ifr_media &= ~IFM_ETH_FMASK;
2954 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2955 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2956 /* We can do both TXPAUSE and RXPAUSE. */
2957 ifr->ifr_media |=
2958 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2959 }
2960 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2961 }
2962 WM_CORE_UNLOCK(sc);
2963 #ifdef WM_MPSAFE
2964 s = splnet();
2965 #endif
2966 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2967 #ifdef WM_MPSAFE
2968 splx(s);
2969 #endif
2970 break;
2971 case SIOCINITIFADDR:
2972 WM_CORE_LOCK(sc);
2973 if (ifa->ifa_addr->sa_family == AF_LINK) {
2974 sdl = satosdl(ifp->if_dl->ifa_addr);
2975 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2976 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2977 /* unicast address is first multicast entry */
2978 wm_set_filter(sc);
2979 error = 0;
2980 WM_CORE_UNLOCK(sc);
2981 break;
2982 }
2983 WM_CORE_UNLOCK(sc);
2984 /*FALLTHROUGH*/
2985 default:
2986 #ifdef WM_MPSAFE
2987 s = splnet();
2988 #endif
2989 /* It may call wm_start, so unlock here */
2990 error = ether_ioctl(ifp, cmd, data);
2991 #ifdef WM_MPSAFE
2992 splx(s);
2993 #endif
2994 if (error != ENETRESET)
2995 break;
2996
2997 error = 0;
2998
2999 if (cmd == SIOCSIFCAP) {
3000 error = (*ifp->if_init)(ifp);
3001 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3002 ;
3003 else if (ifp->if_flags & IFF_RUNNING) {
3004 /*
3005 * Multicast list has changed; set the hardware filter
3006 * accordingly.
3007 */
3008 WM_CORE_LOCK(sc);
3009 wm_set_filter(sc);
3010 WM_CORE_UNLOCK(sc);
3011 }
3012 break;
3013 }
3014
3015 #ifndef WM_MPSAFE
3016 splx(s);
3017 #endif
3018 return error;
3019 }
3020
3021 /* MAC address related */
3022
3023 /*
3024 * Get the offset of MAC address and return it.
3025 * If error occured, use offset 0.
3026 */
3027 static uint16_t
3028 wm_check_alt_mac_addr(struct wm_softc *sc)
3029 {
3030 uint16_t myea[ETHER_ADDR_LEN / 2];
3031 uint16_t offset = NVM_OFF_MACADDR;
3032
3033 /* Try to read alternative MAC address pointer */
3034 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
3035 return 0;
3036
3037 /* Check pointer if it's valid or not. */
3038 if ((offset == 0x0000) || (offset == 0xffff))
3039 return 0;
3040
3041 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
3042 /*
3043 * Check whether alternative MAC address is valid or not.
3044 * Some cards have non 0xffff pointer but those don't use
3045 * alternative MAC address in reality.
3046 *
3047 * Check whether the broadcast bit is set or not.
3048 */
3049 if (wm_nvm_read(sc, offset, 1, myea) == 0)
3050 if (((myea[0] & 0xff) & 0x01) == 0)
3051 return offset; /* Found */
3052
3053 /* Not found */
3054 return 0;
3055 }
3056
3057 static int
3058 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
3059 {
3060 uint16_t myea[ETHER_ADDR_LEN / 2];
3061 uint16_t offset = NVM_OFF_MACADDR;
3062 int do_invert = 0;
3063
3064 switch (sc->sc_type) {
3065 case WM_T_82580:
3066 case WM_T_I350:
3067 case WM_T_I354:
3068 /* EEPROM Top Level Partitioning */
3069 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
3070 break;
3071 case WM_T_82571:
3072 case WM_T_82575:
3073 case WM_T_82576:
3074 case WM_T_80003:
3075 case WM_T_I210:
3076 case WM_T_I211:
3077 offset = wm_check_alt_mac_addr(sc);
3078 if (offset == 0)
3079 if ((sc->sc_funcid & 0x01) == 1)
3080 do_invert = 1;
3081 break;
3082 default:
3083 if ((sc->sc_funcid & 0x01) == 1)
3084 do_invert = 1;
3085 break;
3086 }
3087
3088 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
3089 goto bad;
3090
3091 enaddr[0] = myea[0] & 0xff;
3092 enaddr[1] = myea[0] >> 8;
3093 enaddr[2] = myea[1] & 0xff;
3094 enaddr[3] = myea[1] >> 8;
3095 enaddr[4] = myea[2] & 0xff;
3096 enaddr[5] = myea[2] >> 8;
3097
3098 /*
3099 * Toggle the LSB of the MAC address on the second port
3100 * of some dual port cards.
3101 */
3102 if (do_invert != 0)
3103 enaddr[5] ^= 1;
3104
3105 return 0;
3106
3107 bad:
3108 return -1;
3109 }
3110
3111 /*
3112 * wm_set_ral:
3113 *
3114 * Set an entery in the receive address list.
3115 */
3116 static void
3117 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3118 {
3119 uint32_t ral_lo, ral_hi;
3120
3121 if (enaddr != NULL) {
3122 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3123 (enaddr[3] << 24);
3124 ral_hi = enaddr[4] | (enaddr[5] << 8);
3125 ral_hi |= RAL_AV;
3126 } else {
3127 ral_lo = 0;
3128 ral_hi = 0;
3129 }
3130
3131 if (sc->sc_type >= WM_T_82544) {
3132 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3133 ral_lo);
3134 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3135 ral_hi);
3136 } else {
3137 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3138 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3139 }
3140 }
3141
3142 /*
3143 * wm_mchash:
3144 *
3145 * Compute the hash of the multicast address for the 4096-bit
3146 * multicast filter.
3147 */
3148 static uint32_t
3149 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3150 {
3151 static const int lo_shift[4] = { 4, 3, 2, 0 };
3152 static const int hi_shift[4] = { 4, 5, 6, 8 };
3153 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3154 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3155 uint32_t hash;
3156
3157 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3158 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3159 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3160 || (sc->sc_type == WM_T_PCH_SPT)) {
3161 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3162 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3163 return (hash & 0x3ff);
3164 }
3165 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3166 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3167
3168 return (hash & 0xfff);
3169 }
3170
3171 /*
3172 * wm_set_filter:
3173 *
3174 * Set up the receive filter.
3175 */
3176 static void
3177 wm_set_filter(struct wm_softc *sc)
3178 {
3179 struct ethercom *ec = &sc->sc_ethercom;
3180 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3181 struct ether_multi *enm;
3182 struct ether_multistep step;
3183 bus_addr_t mta_reg;
3184 uint32_t hash, reg, bit;
3185 int i, size, ralmax;
3186
3187 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3188 device_xname(sc->sc_dev), __func__));
3189
3190 if (sc->sc_type >= WM_T_82544)
3191 mta_reg = WMREG_CORDOVA_MTA;
3192 else
3193 mta_reg = WMREG_MTA;
3194
3195 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3196
3197 if (ifp->if_flags & IFF_BROADCAST)
3198 sc->sc_rctl |= RCTL_BAM;
3199 if (ifp->if_flags & IFF_PROMISC) {
3200 sc->sc_rctl |= RCTL_UPE;
3201 goto allmulti;
3202 }
3203
3204 /*
3205 * Set the station address in the first RAL slot, and
3206 * clear the remaining slots.
3207 */
3208 if (sc->sc_type == WM_T_ICH8)
3209 size = WM_RAL_TABSIZE_ICH8 -1;
3210 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3211 || (sc->sc_type == WM_T_PCH))
3212 size = WM_RAL_TABSIZE_ICH8;
3213 else if (sc->sc_type == WM_T_PCH2)
3214 size = WM_RAL_TABSIZE_PCH2;
3215 else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
3216 size = WM_RAL_TABSIZE_PCH_LPT;
3217 else if (sc->sc_type == WM_T_82575)
3218 size = WM_RAL_TABSIZE_82575;
3219 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3220 size = WM_RAL_TABSIZE_82576;
3221 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3222 size = WM_RAL_TABSIZE_I350;
3223 else
3224 size = WM_RAL_TABSIZE;
3225 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3226
3227 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
3228 i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
3229 switch (i) {
3230 case 0:
3231 /* We can use all entries */
3232 ralmax = size;
3233 break;
3234 case 1:
3235 /* Only RAR[0] */
3236 ralmax = 1;
3237 break;
3238 default:
3239 /* available SHRA + RAR[0] */
3240 ralmax = i + 1;
3241 }
3242 } else
3243 ralmax = size;
3244 for (i = 1; i < size; i++) {
3245 if (i < ralmax)
3246 wm_set_ral(sc, NULL, i);
3247 }
3248
3249 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3250 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3251 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3252 || (sc->sc_type == WM_T_PCH_SPT))
3253 size = WM_ICH8_MC_TABSIZE;
3254 else
3255 size = WM_MC_TABSIZE;
3256 /* Clear out the multicast table. */
3257 for (i = 0; i < size; i++)
3258 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3259
3260 ETHER_FIRST_MULTI(step, ec, enm);
3261 while (enm != NULL) {
3262 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3263 /*
3264 * We must listen to a range of multicast addresses.
3265 * For now, just accept all multicasts, rather than
3266 * trying to set only those filter bits needed to match
3267 * the range. (At this time, the only use of address
3268 * ranges is for IP multicast routing, for which the
3269 * range is big enough to require all bits set.)
3270 */
3271 goto allmulti;
3272 }
3273
3274 hash = wm_mchash(sc, enm->enm_addrlo);
3275
3276 reg = (hash >> 5);
3277 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3278 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3279 || (sc->sc_type == WM_T_PCH2)
3280 || (sc->sc_type == WM_T_PCH_LPT)
3281 || (sc->sc_type == WM_T_PCH_SPT))
3282 reg &= 0x1f;
3283 else
3284 reg &= 0x7f;
3285 bit = hash & 0x1f;
3286
3287 hash = CSR_READ(sc, mta_reg + (reg << 2));
3288 hash |= 1U << bit;
3289
3290 if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
3291 /*
3292 * 82544 Errata 9: Certain register cannot be written
3293 * with particular alignments in PCI-X bus operation
3294 * (FCAH, MTA and VFTA).
3295 */
3296 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3297 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3298 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3299 } else
3300 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3301
3302 ETHER_NEXT_MULTI(step, enm);
3303 }
3304
3305 ifp->if_flags &= ~IFF_ALLMULTI;
3306 goto setit;
3307
3308 allmulti:
3309 ifp->if_flags |= IFF_ALLMULTI;
3310 sc->sc_rctl |= RCTL_MPE;
3311
3312 setit:
3313 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3314 }
3315
3316 /* Reset and init related */
3317
3318 static void
3319 wm_set_vlan(struct wm_softc *sc)
3320 {
3321
3322 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3323 device_xname(sc->sc_dev), __func__));
3324
3325 /* Deal with VLAN enables. */
3326 if (VLAN_ATTACHED(&sc->sc_ethercom))
3327 sc->sc_ctrl |= CTRL_VME;
3328 else
3329 sc->sc_ctrl &= ~CTRL_VME;
3330
3331 /* Write the control registers. */
3332 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3333 }
3334
3335 static void
3336 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3337 {
3338 uint32_t gcr;
3339 pcireg_t ctrl2;
3340
3341 gcr = CSR_READ(sc, WMREG_GCR);
3342
3343 /* Only take action if timeout value is defaulted to 0 */
3344 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3345 goto out;
3346
3347 if ((gcr & GCR_CAP_VER2) == 0) {
3348 gcr |= GCR_CMPL_TMOUT_10MS;
3349 goto out;
3350 }
3351
3352 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3353 sc->sc_pcixe_capoff + PCIE_DCSR2);
3354 ctrl2 |= WM_PCIE_DCSR2_16MS;
3355 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3356 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3357
3358 out:
3359 /* Disable completion timeout resend */
3360 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3361
3362 CSR_WRITE(sc, WMREG_GCR, gcr);
3363 }
3364
3365 void
3366 wm_get_auto_rd_done(struct wm_softc *sc)
3367 {
3368 int i;
3369
3370 /* wait for eeprom to reload */
3371 switch (sc->sc_type) {
3372 case WM_T_82571:
3373 case WM_T_82572:
3374 case WM_T_82573:
3375 case WM_T_82574:
3376 case WM_T_82583:
3377 case WM_T_82575:
3378 case WM_T_82576:
3379 case WM_T_82580:
3380 case WM_T_I350:
3381 case WM_T_I354:
3382 case WM_T_I210:
3383 case WM_T_I211:
3384 case WM_T_80003:
3385 case WM_T_ICH8:
3386 case WM_T_ICH9:
3387 for (i = 0; i < 10; i++) {
3388 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3389 break;
3390 delay(1000);
3391 }
3392 if (i == 10) {
3393 log(LOG_ERR, "%s: auto read from eeprom failed to "
3394 "complete\n", device_xname(sc->sc_dev));
3395 }
3396 break;
3397 default:
3398 break;
3399 }
3400 }
3401
3402 void
3403 wm_lan_init_done(struct wm_softc *sc)
3404 {
3405 uint32_t reg = 0;
3406 int i;
3407
3408 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3409 device_xname(sc->sc_dev), __func__));
3410
3411 /* Wait for eeprom to reload */
3412 switch (sc->sc_type) {
3413 case WM_T_ICH10:
3414 case WM_T_PCH:
3415 case WM_T_PCH2:
3416 case WM_T_PCH_LPT:
3417 case WM_T_PCH_SPT:
3418 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3419 reg = CSR_READ(sc, WMREG_STATUS);
3420 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3421 break;
3422 delay(100);
3423 }
3424 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3425 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3426 "complete\n", device_xname(sc->sc_dev), __func__);
3427 }
3428 break;
3429 default:
3430 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3431 __func__);
3432 break;
3433 }
3434
3435 reg &= ~STATUS_LAN_INIT_DONE;
3436 CSR_WRITE(sc, WMREG_STATUS, reg);
3437 }
3438
3439 void
3440 wm_get_cfg_done(struct wm_softc *sc)
3441 {
3442 int mask;
3443 uint32_t reg;
3444 int i;
3445
3446 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3447 device_xname(sc->sc_dev), __func__));
3448
3449 /* Wait for eeprom to reload */
3450 switch (sc->sc_type) {
3451 case WM_T_82542_2_0:
3452 case WM_T_82542_2_1:
3453 /* null */
3454 break;
3455 case WM_T_82543:
3456 case WM_T_82544:
3457 case WM_T_82540:
3458 case WM_T_82545:
3459 case WM_T_82545_3:
3460 case WM_T_82546:
3461 case WM_T_82546_3:
3462 case WM_T_82541:
3463 case WM_T_82541_2:
3464 case WM_T_82547:
3465 case WM_T_82547_2:
3466 case WM_T_82573:
3467 case WM_T_82574:
3468 case WM_T_82583:
3469 /* generic */
3470 delay(10*1000);
3471 break;
3472 case WM_T_80003:
3473 case WM_T_82571:
3474 case WM_T_82572:
3475 case WM_T_82575:
3476 case WM_T_82576:
3477 case WM_T_82580:
3478 case WM_T_I350:
3479 case WM_T_I354:
3480 case WM_T_I210:
3481 case WM_T_I211:
3482 if (sc->sc_type == WM_T_82571) {
3483 /* Only 82571 shares port 0 */
3484 mask = EEMNGCTL_CFGDONE_0;
3485 } else
3486 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3487 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3488 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3489 break;
3490 delay(1000);
3491 }
3492 if (i >= WM_PHY_CFG_TIMEOUT) {
3493 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3494 device_xname(sc->sc_dev), __func__));
3495 }
3496 break;
3497 case WM_T_ICH8:
3498 case WM_T_ICH9:
3499 case WM_T_ICH10:
3500 case WM_T_PCH:
3501 case WM_T_PCH2:
3502 case WM_T_PCH_LPT:
3503 case WM_T_PCH_SPT:
3504 delay(10*1000);
3505 if (sc->sc_type >= WM_T_ICH10)
3506 wm_lan_init_done(sc);
3507 else
3508 wm_get_auto_rd_done(sc);
3509
3510 reg = CSR_READ(sc, WMREG_STATUS);
3511 if ((reg & STATUS_PHYRA) != 0)
3512 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3513 break;
3514 default:
3515 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3516 __func__);
3517 break;
3518 }
3519 }
3520
3521 /* Init hardware bits */
3522 void
3523 wm_initialize_hardware_bits(struct wm_softc *sc)
3524 {
3525 uint32_t tarc0, tarc1, reg;
3526
3527 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3528 device_xname(sc->sc_dev), __func__));
3529
3530 /* For 82571 variant, 80003 and ICHs */
3531 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3532 || (sc->sc_type >= WM_T_80003)) {
3533
3534 /* Transmit Descriptor Control 0 */
3535 reg = CSR_READ(sc, WMREG_TXDCTL(0));
3536 reg |= TXDCTL_COUNT_DESC;
3537 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3538
3539 /* Transmit Descriptor Control 1 */
3540 reg = CSR_READ(sc, WMREG_TXDCTL(1));
3541 reg |= TXDCTL_COUNT_DESC;
3542 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3543
3544 /* TARC0 */
3545 tarc0 = CSR_READ(sc, WMREG_TARC0);
3546 switch (sc->sc_type) {
3547 case WM_T_82571:
3548 case WM_T_82572:
3549 case WM_T_82573:
3550 case WM_T_82574:
3551 case WM_T_82583:
3552 case WM_T_80003:
3553 /* Clear bits 30..27 */
3554 tarc0 &= ~__BITS(30, 27);
3555 break;
3556 default:
3557 break;
3558 }
3559
3560 switch (sc->sc_type) {
3561 case WM_T_82571:
3562 case WM_T_82572:
3563 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3564
3565 tarc1 = CSR_READ(sc, WMREG_TARC1);
3566 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3567 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3568 /* 8257[12] Errata No.7 */
3569 tarc1 |= __BIT(22); /* TARC1 bits 22 */
3570
3571 /* TARC1 bit 28 */
3572 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3573 tarc1 &= ~__BIT(28);
3574 else
3575 tarc1 |= __BIT(28);
3576 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3577
3578 /*
3579 * 8257[12] Errata No.13
3580 * Disable Dyamic Clock Gating.
3581 */
3582 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3583 reg &= ~CTRL_EXT_DMA_DYN_CLK;
3584 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3585 break;
3586 case WM_T_82573:
3587 case WM_T_82574:
3588 case WM_T_82583:
3589 if ((sc->sc_type == WM_T_82574)
3590 || (sc->sc_type == WM_T_82583))
3591 tarc0 |= __BIT(26); /* TARC0 bit 26 */
3592
3593 /* Extended Device Control */
3594 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3595 reg &= ~__BIT(23); /* Clear bit 23 */
3596 reg |= __BIT(22); /* Set bit 22 */
3597 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3598
3599 /* Device Control */
3600 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
3601 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3602
3603 /* PCIe Control Register */
3604 /*
3605 * 82573 Errata (unknown).
3606 *
3607 * 82574 Errata 25 and 82583 Errata 12
3608 * "Dropped Rx Packets":
3609 * NVM Image Version 2.1.4 and newer has no this bug.
3610 */
3611 reg = CSR_READ(sc, WMREG_GCR);
3612 reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
3613 CSR_WRITE(sc, WMREG_GCR, reg);
3614
3615 if ((sc->sc_type == WM_T_82574)
3616 || (sc->sc_type == WM_T_82583)) {
3617 /*
3618 * Document says this bit must be set for
3619 * proper operation.
3620 */
3621 reg = CSR_READ(sc, WMREG_GCR);
3622 reg |= __BIT(22);
3623 CSR_WRITE(sc, WMREG_GCR, reg);
3624
3625 /*
3626 * Apply workaround for hardware errata
3627 * documented in errata docs Fixes issue where
3628 * some error prone or unreliable PCIe
3629 * completions are occurring, particularly
3630 * with ASPM enabled. Without fix, issue can
3631 * cause Tx timeouts.
3632 */
3633 reg = CSR_READ(sc, WMREG_GCR2);
3634 reg |= __BIT(0);
3635 CSR_WRITE(sc, WMREG_GCR2, reg);
3636 }
3637 break;
3638 case WM_T_80003:
3639 /* TARC0 */
3640 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3641 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3642 tarc0 &= ~__BIT(20); /* Clear bits 20 */
3643
3644 /* TARC1 bit 28 */
3645 tarc1 = CSR_READ(sc, WMREG_TARC1);
3646 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3647 tarc1 &= ~__BIT(28);
3648 else
3649 tarc1 |= __BIT(28);
3650 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3651 break;
3652 case WM_T_ICH8:
3653 case WM_T_ICH9:
3654 case WM_T_ICH10:
3655 case WM_T_PCH:
3656 case WM_T_PCH2:
3657 case WM_T_PCH_LPT:
3658 case WM_T_PCH_SPT:
3659 /* TARC0 */
3660 if ((sc->sc_type == WM_T_ICH8)
3661 || (sc->sc_type == WM_T_PCH_SPT)) {
3662 /* Set TARC0 bits 29 and 28 */
3663 tarc0 |= __BITS(29, 28);
3664 }
3665 /* Set TARC0 bits 23,24,26,27 */
3666 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3667
3668 /* CTRL_EXT */
3669 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3670 reg |= __BIT(22); /* Set bit 22 */
3671 /*
3672 * Enable PHY low-power state when MAC is at D3
3673 * w/o WoL
3674 */
3675 if (sc->sc_type >= WM_T_PCH)
3676 reg |= CTRL_EXT_PHYPDEN;
3677 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3678
3679 /* TARC1 */
3680 tarc1 = CSR_READ(sc, WMREG_TARC1);
3681 /* bit 28 */
3682 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3683 tarc1 &= ~__BIT(28);
3684 else
3685 tarc1 |= __BIT(28);
3686 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3687 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3688
3689 /* Device Status */
3690 if (sc->sc_type == WM_T_ICH8) {
3691 reg = CSR_READ(sc, WMREG_STATUS);
3692 reg &= ~__BIT(31);
3693 CSR_WRITE(sc, WMREG_STATUS, reg);
3694
3695 }
3696
3697 /* IOSFPC */
3698 if (sc->sc_type == WM_T_PCH_SPT) {
3699 reg = CSR_READ(sc, WMREG_IOSFPC);
3700 reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
3701 CSR_WRITE(sc, WMREG_IOSFPC, reg);
3702 }
3703 /*
3704 * Work-around descriptor data corruption issue during
3705 * NFS v2 UDP traffic, just disable the NFS filtering
3706 * capability.
3707 */
3708 reg = CSR_READ(sc, WMREG_RFCTL);
3709 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3710 CSR_WRITE(sc, WMREG_RFCTL, reg);
3711 break;
3712 default:
3713 break;
3714 }
3715 CSR_WRITE(sc, WMREG_TARC0, tarc0);
3716
3717 /*
3718 * 8257[12] Errata No.52 and some others.
3719 * Avoid RSS Hash Value bug.
3720 */
3721 switch (sc->sc_type) {
3722 case WM_T_82571:
3723 case WM_T_82572:
3724 case WM_T_82573:
3725 case WM_T_80003:
3726 case WM_T_ICH8:
3727 reg = CSR_READ(sc, WMREG_RFCTL);
3728 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3729 CSR_WRITE(sc, WMREG_RFCTL, reg);
3730 break;
3731 default:
3732 break;
3733 }
3734 }
3735 }
3736
3737 static uint32_t
3738 wm_rxpbs_adjust_82580(uint32_t val)
3739 {
3740 uint32_t rv = 0;
3741
3742 if (val < __arraycount(wm_82580_rxpbs_table))
3743 rv = wm_82580_rxpbs_table[val];
3744
3745 return rv;
3746 }
3747
3748 /*
3749 * wm_reset:
3750 *
3751 * Reset the i82542 chip.
3752 */
3753 static void
3754 wm_reset(struct wm_softc *sc)
3755 {
3756 int phy_reset = 0;
3757 int i, error = 0;
3758 uint32_t reg;
3759
3760 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3761 device_xname(sc->sc_dev), __func__));
3762 KASSERT(sc->sc_type != 0);
3763
3764 /*
3765 * Allocate on-chip memory according to the MTU size.
3766 * The Packet Buffer Allocation register must be written
3767 * before the chip is reset.
3768 */
3769 switch (sc->sc_type) {
3770 case WM_T_82547:
3771 case WM_T_82547_2:
3772 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3773 PBA_22K : PBA_30K;
3774 for (i = 0; i < sc->sc_nqueues; i++) {
3775 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3776 txq->txq_fifo_head = 0;
3777 txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3778 txq->txq_fifo_size =
3779 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3780 txq->txq_fifo_stall = 0;
3781 }
3782 break;
3783 case WM_T_82571:
3784 case WM_T_82572:
3785 case WM_T_82575: /* XXX need special handing for jumbo frames */
3786 case WM_T_80003:
3787 sc->sc_pba = PBA_32K;
3788 break;
3789 case WM_T_82573:
3790 sc->sc_pba = PBA_12K;
3791 break;
3792 case WM_T_82574:
3793 case WM_T_82583:
3794 sc->sc_pba = PBA_20K;
3795 break;
3796 case WM_T_82576:
3797 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
3798 sc->sc_pba &= RXPBS_SIZE_MASK_82576;
3799 break;
3800 case WM_T_82580:
3801 case WM_T_I350:
3802 case WM_T_I354:
3803 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
3804 break;
3805 case WM_T_I210:
3806 case WM_T_I211:
3807 sc->sc_pba = PBA_34K;
3808 break;
3809 case WM_T_ICH8:
3810 /* Workaround for a bit corruption issue in FIFO memory */
3811 sc->sc_pba = PBA_8K;
3812 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3813 break;
3814 case WM_T_ICH9:
3815 case WM_T_ICH10:
3816 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3817 PBA_14K : PBA_10K;
3818 break;
3819 case WM_T_PCH:
3820 case WM_T_PCH2:
3821 case WM_T_PCH_LPT:
3822 case WM_T_PCH_SPT:
3823 sc->sc_pba = PBA_26K;
3824 break;
3825 default:
3826 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3827 PBA_40K : PBA_48K;
3828 break;
3829 }
3830 /*
3831 * Only old or non-multiqueue devices have the PBA register
3832 * XXX Need special handling for 82575.
3833 */
3834 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3835 || (sc->sc_type == WM_T_82575))
3836 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3837
3838 /* Prevent the PCI-E bus from sticking */
3839 if (sc->sc_flags & WM_F_PCIE) {
3840 int timeout = 800;
3841
3842 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3843 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3844
3845 while (timeout--) {
3846 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3847 == 0)
3848 break;
3849 delay(100);
3850 }
3851 }
3852
3853 /* Set the completion timeout for interface */
3854 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3855 || (sc->sc_type == WM_T_82580)
3856 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3857 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3858 wm_set_pcie_completion_timeout(sc);
3859
3860 /* Clear interrupt */
3861 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3862 if (sc->sc_nintrs > 1) {
3863 if (sc->sc_type != WM_T_82574) {
3864 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3865 CSR_WRITE(sc, WMREG_EIAC, 0);
3866 } else {
3867 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3868 }
3869 }
3870
3871 /* Stop the transmit and receive processes. */
3872 CSR_WRITE(sc, WMREG_RCTL, 0);
3873 sc->sc_rctl &= ~RCTL_EN;
3874 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3875 CSR_WRITE_FLUSH(sc);
3876
3877 /* XXX set_tbi_sbp_82543() */
3878
3879 delay(10*1000);
3880
3881 /* Must acquire the MDIO ownership before MAC reset */
3882 switch (sc->sc_type) {
3883 case WM_T_82573:
3884 case WM_T_82574:
3885 case WM_T_82583:
3886 error = wm_get_hw_semaphore_82573(sc);
3887 break;
3888 default:
3889 break;
3890 }
3891
3892 /*
3893 * 82541 Errata 29? & 82547 Errata 28?
3894 * See also the description about PHY_RST bit in CTRL register
3895 * in 8254x_GBe_SDM.pdf.
3896 */
3897 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3898 CSR_WRITE(sc, WMREG_CTRL,
3899 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3900 CSR_WRITE_FLUSH(sc);
3901 delay(5000);
3902 }
3903
3904 switch (sc->sc_type) {
3905 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3906 case WM_T_82541:
3907 case WM_T_82541_2:
3908 case WM_T_82547:
3909 case WM_T_82547_2:
3910 /*
3911 * On some chipsets, a reset through a memory-mapped write
3912 * cycle can cause the chip to reset before completing the
3913 * write cycle. This causes major headache that can be
3914 * avoided by issuing the reset via indirect register writes
3915 * through I/O space.
3916 *
3917 * So, if we successfully mapped the I/O BAR at attach time,
3918 * use that. Otherwise, try our luck with a memory-mapped
3919 * reset.
3920 */
3921 if (sc->sc_flags & WM_F_IOH_VALID)
3922 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3923 else
3924 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3925 break;
3926 case WM_T_82545_3:
3927 case WM_T_82546_3:
3928 /* Use the shadow control register on these chips. */
3929 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3930 break;
3931 case WM_T_80003:
3932 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3933 sc->phy.acquire(sc);
3934 CSR_WRITE(sc, WMREG_CTRL, reg);
3935 sc->phy.release(sc);
3936 break;
3937 case WM_T_ICH8:
3938 case WM_T_ICH9:
3939 case WM_T_ICH10:
3940 case WM_T_PCH:
3941 case WM_T_PCH2:
3942 case WM_T_PCH_LPT:
3943 case WM_T_PCH_SPT:
3944 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3945 if (wm_phy_resetisblocked(sc) == false) {
3946 /*
3947 * Gate automatic PHY configuration by hardware on
3948 * non-managed 82579
3949 */
3950 if ((sc->sc_type == WM_T_PCH2)
3951 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3952 == 0))
3953 wm_gate_hw_phy_config_ich8lan(sc, true);
3954
3955 reg |= CTRL_PHY_RESET;
3956 phy_reset = 1;
3957 } else
3958 printf("XXX reset is blocked!!!\n");
3959 sc->phy.acquire(sc);
3960 CSR_WRITE(sc, WMREG_CTRL, reg);
3961 /* Don't insert a completion barrier when reset */
3962 delay(20*1000);
3963 mutex_exit(sc->sc_ich_phymtx);
3964 break;
3965 case WM_T_82580:
3966 case WM_T_I350:
3967 case WM_T_I354:
3968 case WM_T_I210:
3969 case WM_T_I211:
3970 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3971 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3972 CSR_WRITE_FLUSH(sc);
3973 delay(5000);
3974 break;
3975 case WM_T_82542_2_0:
3976 case WM_T_82542_2_1:
3977 case WM_T_82543:
3978 case WM_T_82540:
3979 case WM_T_82545:
3980 case WM_T_82546:
3981 case WM_T_82571:
3982 case WM_T_82572:
3983 case WM_T_82573:
3984 case WM_T_82574:
3985 case WM_T_82575:
3986 case WM_T_82576:
3987 case WM_T_82583:
3988 default:
3989 /* Everything else can safely use the documented method. */
3990 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3991 break;
3992 }
3993
3994 /* Must release the MDIO ownership after MAC reset */
3995 switch (sc->sc_type) {
3996 case WM_T_82573:
3997 case WM_T_82574:
3998 case WM_T_82583:
3999 if (error == 0)
4000 wm_put_hw_semaphore_82573(sc);
4001 break;
4002 default:
4003 break;
4004 }
4005
4006 if (phy_reset != 0) {
4007 wm_get_cfg_done(sc);
4008 delay(10 * 1000);
4009 if (sc->sc_type >= WM_T_PCH) {
4010 reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
4011 BM_PORT_GEN_CFG);
4012 reg &= ~BM_WUC_HOST_WU_BIT;
4013 wm_gmii_hv_writereg(sc->sc_dev, 2,
4014 BM_PORT_GEN_CFG, reg);
4015 }
4016 }
4017
4018 /* reload EEPROM */
4019 switch (sc->sc_type) {
4020 case WM_T_82542_2_0:
4021 case WM_T_82542_2_1:
4022 case WM_T_82543:
4023 case WM_T_82544:
4024 delay(10);
4025 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4026 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4027 CSR_WRITE_FLUSH(sc);
4028 delay(2000);
4029 break;
4030 case WM_T_82540:
4031 case WM_T_82545:
4032 case WM_T_82545_3:
4033 case WM_T_82546:
4034 case WM_T_82546_3:
4035 delay(5*1000);
4036 /* XXX Disable HW ARPs on ASF enabled adapters */
4037 break;
4038 case WM_T_82541:
4039 case WM_T_82541_2:
4040 case WM_T_82547:
4041 case WM_T_82547_2:
4042 delay(20000);
4043 /* XXX Disable HW ARPs on ASF enabled adapters */
4044 break;
4045 case WM_T_82571:
4046 case WM_T_82572:
4047 case WM_T_82573:
4048 case WM_T_82574:
4049 case WM_T_82583:
4050 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4051 delay(10);
4052 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4053 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4054 CSR_WRITE_FLUSH(sc);
4055 }
4056 /* check EECD_EE_AUTORD */
4057 wm_get_auto_rd_done(sc);
4058 /*
4059 * Phy configuration from NVM just starts after EECD_AUTO_RD
4060 * is set.
4061 */
4062 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4063 || (sc->sc_type == WM_T_82583))
4064 delay(25*1000);
4065 break;
4066 case WM_T_82575:
4067 case WM_T_82576:
4068 case WM_T_82580:
4069 case WM_T_I350:
4070 case WM_T_I354:
4071 case WM_T_I210:
4072 case WM_T_I211:
4073 case WM_T_80003:
4074 /* check EECD_EE_AUTORD */
4075 wm_get_auto_rd_done(sc);
4076 break;
4077 case WM_T_ICH8:
4078 case WM_T_ICH9:
4079 case WM_T_ICH10:
4080 case WM_T_PCH:
4081 case WM_T_PCH2:
4082 case WM_T_PCH_LPT:
4083 case WM_T_PCH_SPT:
4084 break;
4085 default:
4086 panic("%s: unknown type\n", __func__);
4087 }
4088
4089 /* Check whether EEPROM is present or not */
4090 switch (sc->sc_type) {
4091 case WM_T_82575:
4092 case WM_T_82576:
4093 case WM_T_82580:
4094 case WM_T_I350:
4095 case WM_T_I354:
4096 case WM_T_ICH8:
4097 case WM_T_ICH9:
4098 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4099 /* Not found */
4100 sc->sc_flags |= WM_F_EEPROM_INVALID;
4101 if (sc->sc_type == WM_T_82575)
4102 wm_reset_init_script_82575(sc);
4103 }
4104 break;
4105 default:
4106 break;
4107 }
4108
4109 if ((sc->sc_type == WM_T_82580)
4110 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
4111 /* clear global device reset status bit */
4112 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4113 }
4114
4115 /* Clear any pending interrupt events. */
4116 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4117 reg = CSR_READ(sc, WMREG_ICR);
4118 if (sc->sc_nintrs > 1) {
4119 if (sc->sc_type != WM_T_82574) {
4120 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4121 CSR_WRITE(sc, WMREG_EIAC, 0);
4122 } else
4123 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4124 }
4125
4126 /* reload sc_ctrl */
4127 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4128
4129 if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
4130 wm_set_eee_i350(sc);
4131
4132 /* dummy read from WUC */
4133 if (sc->sc_type == WM_T_PCH)
4134 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4135 /*
4136 * For PCH, this write will make sure that any noise will be detected
4137 * as a CRC error and be dropped rather than show up as a bad packet
4138 * to the DMA engine
4139 */
4140 if (sc->sc_type == WM_T_PCH)
4141 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4142
4143 if (sc->sc_type >= WM_T_82544)
4144 CSR_WRITE(sc, WMREG_WUC, 0);
4145
4146 wm_reset_mdicnfg_82580(sc);
4147
4148 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
4149 wm_pll_workaround_i210(sc);
4150 }
4151
4152 /*
4153 * wm_add_rxbuf:
4154 *
4155 * Add a receive buffer to the indiciated descriptor.
4156 */
4157 static int
4158 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
4159 {
4160 struct wm_softc *sc = rxq->rxq_sc;
4161 struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
4162 struct mbuf *m;
4163 int error;
4164
4165 KASSERT(mutex_owned(rxq->rxq_lock));
4166
4167 MGETHDR(m, M_DONTWAIT, MT_DATA);
4168 if (m == NULL)
4169 return ENOBUFS;
4170
4171 MCLGET(m, M_DONTWAIT);
4172 if ((m->m_flags & M_EXT) == 0) {
4173 m_freem(m);
4174 return ENOBUFS;
4175 }
4176
4177 if (rxs->rxs_mbuf != NULL)
4178 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4179
4180 rxs->rxs_mbuf = m;
4181
4182 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4183 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4184 BUS_DMA_READ | BUS_DMA_NOWAIT);
4185 if (error) {
4186 /* XXX XXX XXX */
4187 aprint_error_dev(sc->sc_dev,
4188 "unable to load rx DMA map %d, error = %d\n",
4189 idx, error);
4190 panic("wm_add_rxbuf");
4191 }
4192
4193 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4194 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4195
4196 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4197 if ((sc->sc_rctl & RCTL_EN) != 0)
4198 wm_init_rxdesc(rxq, idx);
4199 } else
4200 wm_init_rxdesc(rxq, idx);
4201
4202 return 0;
4203 }
4204
4205 /*
4206 * wm_rxdrain:
4207 *
4208 * Drain the receive queue.
4209 */
4210 static void
4211 wm_rxdrain(struct wm_rxqueue *rxq)
4212 {
4213 struct wm_softc *sc = rxq->rxq_sc;
4214 struct wm_rxsoft *rxs;
4215 int i;
4216
4217 KASSERT(mutex_owned(rxq->rxq_lock));
4218
4219 for (i = 0; i < WM_NRXDESC; i++) {
4220 rxs = &rxq->rxq_soft[i];
4221 if (rxs->rxs_mbuf != NULL) {
4222 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4223 m_freem(rxs->rxs_mbuf);
4224 rxs->rxs_mbuf = NULL;
4225 }
4226 }
4227 }
4228
4229
4230 /*
4231 * XXX copy from FreeBSD's sys/net/rss_config.c
4232 */
4233 /*
4234 * RSS secret key, intended to prevent attacks on load-balancing. Its
4235 * effectiveness may be limited by algorithm choice and available entropy
4236 * during the boot.
4237 *
4238 * XXXRW: And that we don't randomize it yet!
4239 *
4240 * This is the default Microsoft RSS specification key which is also
4241 * the Chelsio T5 firmware default key.
4242 */
4243 #define RSS_KEYSIZE 40
4244 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
4245 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
4246 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
4247 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
4248 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
4249 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
4250 };
4251
4252 /*
4253 * Caller must pass an array of size sizeof(rss_key).
4254 *
4255 * XXX
4256 * As if_ixgbe may use this function, this function should not be
4257 * if_wm specific function.
4258 */
4259 static void
4260 wm_rss_getkey(uint8_t *key)
4261 {
4262
4263 memcpy(key, wm_rss_key, sizeof(wm_rss_key));
4264 }
4265
4266 /*
4267 * Setup registers for RSS.
4268 *
4269 * XXX not yet VMDq support
4270 */
4271 static void
4272 wm_init_rss(struct wm_softc *sc)
4273 {
4274 uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
4275 int i;
4276
4277 CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
4278
4279 for (i = 0; i < RETA_NUM_ENTRIES; i++) {
4280 int qid, reta_ent;
4281
4282 qid = i % sc->sc_nqueues;
4283 switch(sc->sc_type) {
4284 case WM_T_82574:
4285 reta_ent = __SHIFTIN(qid,
4286 RETA_ENT_QINDEX_MASK_82574);
4287 break;
4288 case WM_T_82575:
4289 reta_ent = __SHIFTIN(qid,
4290 RETA_ENT_QINDEX1_MASK_82575);
4291 break;
4292 default:
4293 reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
4294 break;
4295 }
4296
4297 reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
4298 reta_reg &= ~RETA_ENTRY_MASK_Q(i);
4299 reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
4300 CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
4301 }
4302
4303 wm_rss_getkey((uint8_t *)rss_key);
4304 for (i = 0; i < RSSRK_NUM_REGS; i++)
4305 CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
4306
4307 if (sc->sc_type == WM_T_82574)
4308 mrqc = MRQC_ENABLE_RSS_MQ_82574;
4309 else
4310 mrqc = MRQC_ENABLE_RSS_MQ;
4311
4312 /* XXXX
4313 * The same as FreeBSD igb.
4314 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
4315 */
4316 mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
4317 mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
4318 mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
4319 mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
4320
4321 CSR_WRITE(sc, WMREG_MRQC, mrqc);
4322 }
4323
4324 /*
4325 * Adjust TX and RX queue numbers which the system actulally uses.
4326 *
4327 * The numbers are affected by below parameters.
4328 * - The nubmer of hardware queues
4329 * - The number of MSI-X vectors (= "nvectors" argument)
4330 * - ncpu
4331 */
4332 static void
4333 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
4334 {
4335 int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
4336
4337 if (nvectors < 2) {
4338 sc->sc_nqueues = 1;
4339 return;
4340 }
4341
4342 switch(sc->sc_type) {
4343 case WM_T_82572:
4344 hw_ntxqueues = 2;
4345 hw_nrxqueues = 2;
4346 break;
4347 case WM_T_82574:
4348 hw_ntxqueues = 2;
4349 hw_nrxqueues = 2;
4350 break;
4351 case WM_T_82575:
4352 hw_ntxqueues = 4;
4353 hw_nrxqueues = 4;
4354 break;
4355 case WM_T_82576:
4356 hw_ntxqueues = 16;
4357 hw_nrxqueues = 16;
4358 break;
4359 case WM_T_82580:
4360 case WM_T_I350:
4361 case WM_T_I354:
4362 hw_ntxqueues = 8;
4363 hw_nrxqueues = 8;
4364 break;
4365 case WM_T_I210:
4366 hw_ntxqueues = 4;
4367 hw_nrxqueues = 4;
4368 break;
4369 case WM_T_I211:
4370 hw_ntxqueues = 2;
4371 hw_nrxqueues = 2;
4372 break;
4373 /*
4374 * As below ethernet controllers does not support MSI-X,
4375 * this driver let them not use multiqueue.
4376 * - WM_T_80003
4377 * - WM_T_ICH8
4378 * - WM_T_ICH9
4379 * - WM_T_ICH10
4380 * - WM_T_PCH
4381 * - WM_T_PCH2
4382 * - WM_T_PCH_LPT
4383 */
4384 default:
4385 hw_ntxqueues = 1;
4386 hw_nrxqueues = 1;
4387 break;
4388 }
4389
4390 hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
4391
4392 /*
4393 * As queues more than MSI-X vectors cannot improve scaling, we limit
4394 * the number of queues used actually.
4395 */
4396 if (nvectors < hw_nqueues + 1) {
4397 sc->sc_nqueues = nvectors - 1;
4398 } else {
4399 sc->sc_nqueues = hw_nqueues;
4400 }
4401
4402 /*
4403 * As queues more then cpus cannot improve scaling, we limit
4404 * the number of queues used actually.
4405 */
4406 if (ncpu < sc->sc_nqueues)
4407 sc->sc_nqueues = ncpu;
4408 }
4409
4410 /*
4411 * Both single interrupt MSI and INTx can use this function.
4412 */
4413 static int
4414 wm_setup_legacy(struct wm_softc *sc)
4415 {
4416 pci_chipset_tag_t pc = sc->sc_pc;
4417 const char *intrstr = NULL;
4418 char intrbuf[PCI_INTRSTR_LEN];
4419 int error;
4420
4421 error = wm_alloc_txrx_queues(sc);
4422 if (error) {
4423 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4424 error);
4425 return ENOMEM;
4426 }
4427 intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
4428 sizeof(intrbuf));
4429 #ifdef WM_MPSAFE
4430 pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
4431 #endif
4432 sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
4433 IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
4434 if (sc->sc_ihs[0] == NULL) {
4435 aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
4436 (pci_intr_type(pc, sc->sc_intrs[0])
4437 == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
4438 return ENOMEM;
4439 }
4440
4441 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
4442 sc->sc_nintrs = 1;
4443 return 0;
4444 }
4445
4446 static int
4447 wm_setup_msix(struct wm_softc *sc)
4448 {
4449 void *vih;
4450 kcpuset_t *affinity;
4451 int qidx, error, intr_idx, txrx_established;
4452 pci_chipset_tag_t pc = sc->sc_pc;
4453 const char *intrstr = NULL;
4454 char intrbuf[PCI_INTRSTR_LEN];
4455 char intr_xname[INTRDEVNAMEBUF];
4456
4457 if (sc->sc_nqueues < ncpu) {
4458 /*
4459 * To avoid other devices' interrupts, the affinity of Tx/Rx
4460 * interrupts start from CPU#1.
4461 */
4462 sc->sc_affinity_offset = 1;
4463 } else {
4464 /*
4465 * In this case, this device use all CPUs. So, we unify
4466 * affinitied cpu_index to msix vector number for readability.
4467 */
4468 sc->sc_affinity_offset = 0;
4469 }
4470
4471 error = wm_alloc_txrx_queues(sc);
4472 if (error) {
4473 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4474 error);
4475 return ENOMEM;
4476 }
4477
4478 kcpuset_create(&affinity, false);
4479 intr_idx = 0;
4480
4481 /*
4482 * TX and RX
4483 */
4484 txrx_established = 0;
4485 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
4486 struct wm_queue *wmq = &sc->sc_queue[qidx];
4487 int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
4488
4489 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4490 sizeof(intrbuf));
4491 #ifdef WM_MPSAFE
4492 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
4493 PCI_INTR_MPSAFE, true);
4494 #endif
4495 memset(intr_xname, 0, sizeof(intr_xname));
4496 snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
4497 device_xname(sc->sc_dev), qidx);
4498 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4499 IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
4500 if (vih == NULL) {
4501 aprint_error_dev(sc->sc_dev,
4502 "unable to establish MSI-X(for TX and RX)%s%s\n",
4503 intrstr ? " at " : "",
4504 intrstr ? intrstr : "");
4505
4506 goto fail;
4507 }
4508 kcpuset_zero(affinity);
4509 /* Round-robin affinity */
4510 kcpuset_set(affinity, affinity_to);
4511 error = interrupt_distribute(vih, affinity, NULL);
4512 if (error == 0) {
4513 aprint_normal_dev(sc->sc_dev,
4514 "for TX and RX interrupting at %s affinity to %u\n",
4515 intrstr, affinity_to);
4516 } else {
4517 aprint_normal_dev(sc->sc_dev,
4518 "for TX and RX interrupting at %s\n", intrstr);
4519 }
4520 sc->sc_ihs[intr_idx] = vih;
4521 wmq->wmq_id= qidx;
4522 wmq->wmq_intr_idx = intr_idx;
4523
4524 txrx_established++;
4525 intr_idx++;
4526 }
4527
4528 /*
4529 * LINK
4530 */
4531 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4532 sizeof(intrbuf));
4533 #ifdef WM_MPSAFE
4534 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
4535 #endif
4536 memset(intr_xname, 0, sizeof(intr_xname));
4537 snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
4538 device_xname(sc->sc_dev));
4539 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4540 IPL_NET, wm_linkintr_msix, sc, intr_xname);
4541 if (vih == NULL) {
4542 aprint_error_dev(sc->sc_dev,
4543 "unable to establish MSI-X(for LINK)%s%s\n",
4544 intrstr ? " at " : "",
4545 intrstr ? intrstr : "");
4546
4547 goto fail;
4548 }
4549 /* keep default affinity to LINK interrupt */
4550 aprint_normal_dev(sc->sc_dev,
4551 "for LINK interrupting at %s\n", intrstr);
4552 sc->sc_ihs[intr_idx] = vih;
4553 sc->sc_link_intr_idx = intr_idx;
4554
4555 sc->sc_nintrs = sc->sc_nqueues + 1;
4556 kcpuset_destroy(affinity);
4557 return 0;
4558
4559 fail:
4560 for (qidx = 0; qidx < txrx_established; qidx++) {
4561 struct wm_queue *wmq = &sc->sc_queue[qidx];
4562 pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
4563 sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
4564 }
4565
4566 kcpuset_destroy(affinity);
4567 return ENOMEM;
4568 }
4569
4570 static void
4571 wm_turnon(struct wm_softc *sc)
4572 {
4573 int i;
4574
4575 for(i = 0; i < sc->sc_nqueues; i++) {
4576 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4577 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
4578
4579 mutex_enter(txq->txq_lock);
4580 txq->txq_stopping = false;
4581 mutex_exit(txq->txq_lock);
4582
4583 mutex_enter(rxq->rxq_lock);
4584 rxq->rxq_stopping = false;
4585 mutex_exit(rxq->rxq_lock);
4586 }
4587
4588 WM_CORE_LOCK(sc);
4589 sc->sc_core_stopping = false;
4590 WM_CORE_UNLOCK(sc);
4591 }
4592
4593 static void
4594 wm_turnoff(struct wm_softc *sc)
4595 {
4596 int i;
4597
4598 WM_CORE_LOCK(sc);
4599 sc->sc_core_stopping = true;
4600 WM_CORE_UNLOCK(sc);
4601
4602 for(i = 0; i < sc->sc_nqueues; i++) {
4603 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
4604 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4605
4606 mutex_enter(rxq->rxq_lock);
4607 rxq->rxq_stopping = true;
4608 mutex_exit(rxq->rxq_lock);
4609
4610 mutex_enter(txq->txq_lock);
4611 txq->txq_stopping = true;
4612 mutex_exit(txq->txq_lock);
4613 }
4614 }
4615
4616 /*
4617 * wm_init: [ifnet interface function]
4618 *
4619 * Initialize the interface.
4620 */
4621 static int
4622 wm_init(struct ifnet *ifp)
4623 {
4624 struct wm_softc *sc = ifp->if_softc;
4625 int ret;
4626
4627 WM_CORE_LOCK(sc);
4628 ret = wm_init_locked(ifp);
4629 WM_CORE_UNLOCK(sc);
4630
4631 return ret;
4632 }
4633
4634 static int
4635 wm_init_locked(struct ifnet *ifp)
4636 {
4637 struct wm_softc *sc = ifp->if_softc;
4638 int i, j, trynum, error = 0;
4639 uint32_t reg;
4640
4641 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4642 device_xname(sc->sc_dev), __func__));
4643 KASSERT(WM_CORE_LOCKED(sc));
4644
4645 /*
4646 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4647 * There is a small but measurable benefit to avoiding the adjusment
4648 * of the descriptor so that the headers are aligned, for normal mtu,
4649 * on such platforms. One possibility is that the DMA itself is
4650 * slightly more efficient if the front of the entire packet (instead
4651 * of the front of the headers) is aligned.
4652 *
4653 * Note we must always set align_tweak to 0 if we are using
4654 * jumbo frames.
4655 */
4656 #ifdef __NO_STRICT_ALIGNMENT
4657 sc->sc_align_tweak = 0;
4658 #else
4659 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4660 sc->sc_align_tweak = 0;
4661 else
4662 sc->sc_align_tweak = 2;
4663 #endif /* __NO_STRICT_ALIGNMENT */
4664
4665 /* Cancel any pending I/O. */
4666 wm_stop_locked(ifp, 0);
4667
4668 /* update statistics before reset */
4669 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4670 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4671
4672 /* Reset the chip to a known state. */
4673 wm_reset(sc);
4674
4675 switch (sc->sc_type) {
4676 case WM_T_82571:
4677 case WM_T_82572:
4678 case WM_T_82573:
4679 case WM_T_82574:
4680 case WM_T_82583:
4681 case WM_T_80003:
4682 case WM_T_ICH8:
4683 case WM_T_ICH9:
4684 case WM_T_ICH10:
4685 case WM_T_PCH:
4686 case WM_T_PCH2:
4687 case WM_T_PCH_LPT:
4688 case WM_T_PCH_SPT:
4689 /* AMT based hardware can now take control from firmware */
4690 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
4691 wm_get_hw_control(sc);
4692 break;
4693 default:
4694 break;
4695 }
4696
4697 /* Init hardware bits */
4698 wm_initialize_hardware_bits(sc);
4699
4700 /* Reset the PHY. */
4701 if (sc->sc_flags & WM_F_HAS_MII)
4702 wm_gmii_reset(sc);
4703
4704 /* Calculate (E)ITR value */
4705 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4706 sc->sc_itr = 450; /* For EITR */
4707 } else if (sc->sc_type >= WM_T_82543) {
4708 /*
4709 * Set up the interrupt throttling register (units of 256ns)
4710 * Note that a footnote in Intel's documentation says this
4711 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4712 * or 10Mbit mode. Empirically, it appears to be the case
4713 * that that is also true for the 1024ns units of the other
4714 * interrupt-related timer registers -- so, really, we ought
4715 * to divide this value by 4 when the link speed is low.
4716 *
4717 * XXX implement this division at link speed change!
4718 */
4719
4720 /*
4721 * For N interrupts/sec, set this value to:
4722 * 1000000000 / (N * 256). Note that we set the
4723 * absolute and packet timer values to this value
4724 * divided by 4 to get "simple timer" behavior.
4725 */
4726
4727 sc->sc_itr = 1500; /* 2604 ints/sec */
4728 }
4729
4730 error = wm_init_txrx_queues(sc);
4731 if (error)
4732 goto out;
4733
4734 /*
4735 * Clear out the VLAN table -- we don't use it (yet).
4736 */
4737 CSR_WRITE(sc, WMREG_VET, 0);
4738 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4739 trynum = 10; /* Due to hw errata */
4740 else
4741 trynum = 1;
4742 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4743 for (j = 0; j < trynum; j++)
4744 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4745
4746 /*
4747 * Set up flow-control parameters.
4748 *
4749 * XXX Values could probably stand some tuning.
4750 */
4751 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4752 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4753 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
4754 && (sc->sc_type != WM_T_PCH_SPT)) {
4755 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4756 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4757 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4758 }
4759
4760 sc->sc_fcrtl = FCRTL_DFLT;
4761 if (sc->sc_type < WM_T_82543) {
4762 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4763 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4764 } else {
4765 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4766 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4767 }
4768
4769 if (sc->sc_type == WM_T_80003)
4770 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4771 else
4772 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4773
4774 /* Writes the control register. */
4775 wm_set_vlan(sc);
4776
4777 if (sc->sc_flags & WM_F_HAS_MII) {
4778 int val;
4779
4780 switch (sc->sc_type) {
4781 case WM_T_80003:
4782 case WM_T_ICH8:
4783 case WM_T_ICH9:
4784 case WM_T_ICH10:
4785 case WM_T_PCH:
4786 case WM_T_PCH2:
4787 case WM_T_PCH_LPT:
4788 case WM_T_PCH_SPT:
4789 /*
4790 * Set the mac to wait the maximum time between each
4791 * iteration and increase the max iterations when
4792 * polling the phy; this fixes erroneous timeouts at
4793 * 10Mbps.
4794 */
4795 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4796 0xFFFF);
4797 val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
4798 val |= 0x3F;
4799 wm_kmrn_writereg(sc,
4800 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4801 break;
4802 default:
4803 break;
4804 }
4805
4806 if (sc->sc_type == WM_T_80003) {
4807 val = CSR_READ(sc, WMREG_CTRL_EXT);
4808 val &= ~CTRL_EXT_LINK_MODE_MASK;
4809 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4810
4811 /* Bypass RX and TX FIFO's */
4812 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4813 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4814 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4815 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4816 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4817 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4818 }
4819 }
4820 #if 0
4821 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4822 #endif
4823
4824 /* Set up checksum offload parameters. */
4825 reg = CSR_READ(sc, WMREG_RXCSUM);
4826 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4827 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4828 reg |= RXCSUM_IPOFL;
4829 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4830 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4831 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4832 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4833 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4834
4835 /* Set up MSI-X */
4836 if (sc->sc_nintrs > 1) {
4837 uint32_t ivar;
4838 struct wm_queue *wmq;
4839 int qid, qintr_idx;
4840
4841 if (sc->sc_type == WM_T_82575) {
4842 /* Interrupt control */
4843 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4844 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
4845 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4846
4847 /* TX and RX */
4848 for (i = 0; i < sc->sc_nqueues; i++) {
4849 wmq = &sc->sc_queue[i];
4850 CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
4851 EITR_TX_QUEUE(wmq->wmq_id)
4852 | EITR_RX_QUEUE(wmq->wmq_id));
4853 }
4854 /* Link status */
4855 CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
4856 EITR_OTHER);
4857 } else if (sc->sc_type == WM_T_82574) {
4858 /* Interrupt control */
4859 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4860 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
4861 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4862
4863 ivar = 0;
4864 /* TX and RX */
4865 for (i = 0; i < sc->sc_nqueues; i++) {
4866 wmq = &sc->sc_queue[i];
4867 qid = wmq->wmq_id;
4868 qintr_idx = wmq->wmq_intr_idx;
4869
4870 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
4871 IVAR_TX_MASK_Q_82574(qid));
4872 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
4873 IVAR_RX_MASK_Q_82574(qid));
4874 }
4875 /* Link status */
4876 ivar |= __SHIFTIN((IVAR_VALID_82574
4877 | sc->sc_link_intr_idx), IVAR_OTHER_MASK);
4878 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
4879 } else {
4880 /* Interrupt control */
4881 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
4882 | GPIE_EIAME | GPIE_PBA);
4883
4884 switch (sc->sc_type) {
4885 case WM_T_82580:
4886 case WM_T_I350:
4887 case WM_T_I354:
4888 case WM_T_I210:
4889 case WM_T_I211:
4890 /* TX and RX */
4891 for (i = 0; i < sc->sc_nqueues; i++) {
4892 wmq = &sc->sc_queue[i];
4893 qid = wmq->wmq_id;
4894 qintr_idx = wmq->wmq_intr_idx;
4895
4896 ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
4897 ivar &= ~IVAR_TX_MASK_Q(qid);
4898 ivar |= __SHIFTIN((qintr_idx
4899 | IVAR_VALID),
4900 IVAR_TX_MASK_Q(qid));
4901 ivar &= ~IVAR_RX_MASK_Q(qid);
4902 ivar |= __SHIFTIN((qintr_idx
4903 | IVAR_VALID),
4904 IVAR_RX_MASK_Q(qid));
4905 CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
4906 }
4907 break;
4908 case WM_T_82576:
4909 /* TX and RX */
4910 for (i = 0; i < sc->sc_nqueues; i++) {
4911 wmq = &sc->sc_queue[i];
4912 qid = wmq->wmq_id;
4913 qintr_idx = wmq->wmq_intr_idx;
4914
4915 ivar = CSR_READ(sc,
4916 WMREG_IVAR_Q_82576(qid));
4917 ivar &= ~IVAR_TX_MASK_Q_82576(qid);
4918 ivar |= __SHIFTIN((qintr_idx
4919 | IVAR_VALID),
4920 IVAR_TX_MASK_Q_82576(qid));
4921 ivar &= ~IVAR_RX_MASK_Q_82576(qid);
4922 ivar |= __SHIFTIN((qintr_idx
4923 | IVAR_VALID),
4924 IVAR_RX_MASK_Q_82576(qid));
4925 CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
4926 ivar);
4927 }
4928 break;
4929 default:
4930 break;
4931 }
4932
4933 /* Link status */
4934 ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
4935 IVAR_MISC_OTHER);
4936 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
4937 }
4938
4939 if (sc->sc_nqueues > 1) {
4940 wm_init_rss(sc);
4941
4942 /*
4943 ** NOTE: Receive Full-Packet Checksum Offload
4944 ** is mutually exclusive with Multiqueue. However
4945 ** this is not the same as TCP/IP checksums which
4946 ** still work.
4947 */
4948 reg = CSR_READ(sc, WMREG_RXCSUM);
4949 reg |= RXCSUM_PCSD;
4950 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4951 }
4952 }
4953
4954 /* Set up the interrupt registers. */
4955 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4956 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4957 ICR_RXO | ICR_RXT0;
4958 if (sc->sc_nintrs > 1) {
4959 uint32_t mask;
4960 struct wm_queue *wmq;
4961
4962 switch (sc->sc_type) {
4963 case WM_T_82574:
4964 CSR_WRITE(sc, WMREG_EIAC_82574,
4965 WMREG_EIAC_82574_MSIX_MASK);
4966 sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
4967 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4968 break;
4969 default:
4970 if (sc->sc_type == WM_T_82575) {
4971 mask = 0;
4972 for (i = 0; i < sc->sc_nqueues; i++) {
4973 wmq = &sc->sc_queue[i];
4974 mask |= EITR_TX_QUEUE(wmq->wmq_id);
4975 mask |= EITR_RX_QUEUE(wmq->wmq_id);
4976 }
4977 mask |= EITR_OTHER;
4978 } else {
4979 mask = 0;
4980 for (i = 0; i < sc->sc_nqueues; i++) {
4981 wmq = &sc->sc_queue[i];
4982 mask |= 1 << wmq->wmq_intr_idx;
4983 }
4984 mask |= 1 << sc->sc_link_intr_idx;
4985 }
4986 CSR_WRITE(sc, WMREG_EIAC, mask);
4987 CSR_WRITE(sc, WMREG_EIAM, mask);
4988 CSR_WRITE(sc, WMREG_EIMS, mask);
4989 CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
4990 break;
4991 }
4992 } else
4993 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4994
4995 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4996 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4997 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4998 || (sc->sc_type == WM_T_PCH_SPT)) {
4999 reg = CSR_READ(sc, WMREG_KABGTXD);
5000 reg |= KABGTXD_BGSQLBIAS;
5001 CSR_WRITE(sc, WMREG_KABGTXD, reg);
5002 }
5003
5004 /* Set up the inter-packet gap. */
5005 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
5006
5007 if (sc->sc_type >= WM_T_82543) {
5008 /*
5009 * XXX 82574 has both ITR and EITR. SET EITR when we use
5010 * the multi queue function with MSI-X.
5011 */
5012 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5013 int qidx;
5014 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5015 struct wm_queue *wmq = &sc->sc_queue[qidx];
5016 CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
5017 sc->sc_itr);
5018 }
5019 /*
5020 * Link interrupts occur much less than TX
5021 * interrupts and RX interrupts. So, we don't
5022 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
5023 * FreeBSD's if_igb.
5024 */
5025 } else
5026 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
5027 }
5028
5029 /* Set the VLAN ethernetype. */
5030 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
5031
5032 /*
5033 * Set up the transmit control register; we start out with
5034 * a collision distance suitable for FDX, but update it whe
5035 * we resolve the media type.
5036 */
5037 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
5038 | TCTL_CT(TX_COLLISION_THRESHOLD)
5039 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5040 if (sc->sc_type >= WM_T_82571)
5041 sc->sc_tctl |= TCTL_MULR;
5042 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5043
5044 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5045 /* Write TDT after TCTL.EN is set. See the document. */
5046 CSR_WRITE(sc, WMREG_TDT(0), 0);
5047 }
5048
5049 if (sc->sc_type == WM_T_80003) {
5050 reg = CSR_READ(sc, WMREG_TCTL_EXT);
5051 reg &= ~TCTL_EXT_GCEX_MASK;
5052 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
5053 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
5054 }
5055
5056 /* Set the media. */
5057 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
5058 goto out;
5059
5060 /* Configure for OS presence */
5061 wm_init_manageability(sc);
5062
5063 /*
5064 * Set up the receive control register; we actually program
5065 * the register when we set the receive filter. Use multicast
5066 * address offset type 0.
5067 *
5068 * Only the i82544 has the ability to strip the incoming
5069 * CRC, so we don't enable that feature.
5070 */
5071 sc->sc_mchash_type = 0;
5072 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
5073 | RCTL_MO(sc->sc_mchash_type);
5074
5075 /*
5076 * The I350 has a bug where it always strips the CRC whether
5077 * asked to or not. So ask for stripped CRC here and cope in rxeof
5078 */
5079 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
5080 || (sc->sc_type == WM_T_I210))
5081 sc->sc_rctl |= RCTL_SECRC;
5082
5083 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
5084 && (ifp->if_mtu > ETHERMTU)) {
5085 sc->sc_rctl |= RCTL_LPE;
5086 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5087 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
5088 }
5089
5090 if (MCLBYTES == 2048) {
5091 sc->sc_rctl |= RCTL_2k;
5092 } else {
5093 if (sc->sc_type >= WM_T_82543) {
5094 switch (MCLBYTES) {
5095 case 4096:
5096 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
5097 break;
5098 case 8192:
5099 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
5100 break;
5101 case 16384:
5102 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
5103 break;
5104 default:
5105 panic("wm_init: MCLBYTES %d unsupported",
5106 MCLBYTES);
5107 break;
5108 }
5109 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
5110 }
5111
5112 /* Set the receive filter. */
5113 wm_set_filter(sc);
5114
5115 /* Enable ECC */
5116 switch (sc->sc_type) {
5117 case WM_T_82571:
5118 reg = CSR_READ(sc, WMREG_PBA_ECC);
5119 reg |= PBA_ECC_CORR_EN;
5120 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
5121 break;
5122 case WM_T_PCH_LPT:
5123 case WM_T_PCH_SPT:
5124 reg = CSR_READ(sc, WMREG_PBECCSTS);
5125 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
5126 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
5127
5128 reg = CSR_READ(sc, WMREG_CTRL);
5129 reg |= CTRL_MEHE;
5130 CSR_WRITE(sc, WMREG_CTRL, reg);
5131 break;
5132 default:
5133 break;
5134 }
5135
5136 /* On 575 and later set RDT only if RX enabled */
5137 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5138 int qidx;
5139 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5140 struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
5141 for (i = 0; i < WM_NRXDESC; i++) {
5142 mutex_enter(rxq->rxq_lock);
5143 wm_init_rxdesc(rxq, i);
5144 mutex_exit(rxq->rxq_lock);
5145
5146 }
5147 }
5148 }
5149
5150 wm_turnon(sc);
5151
5152 /* Start the one second link check clock. */
5153 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
5154
5155 /* ...all done! */
5156 ifp->if_flags |= IFF_RUNNING;
5157 ifp->if_flags &= ~IFF_OACTIVE;
5158
5159 out:
5160 sc->sc_if_flags = ifp->if_flags;
5161 if (error)
5162 log(LOG_ERR, "%s: interface not running\n",
5163 device_xname(sc->sc_dev));
5164 return error;
5165 }
5166
5167 /*
5168 * wm_stop: [ifnet interface function]
5169 *
5170 * Stop transmission on the interface.
5171 */
5172 static void
5173 wm_stop(struct ifnet *ifp, int disable)
5174 {
5175 struct wm_softc *sc = ifp->if_softc;
5176
5177 WM_CORE_LOCK(sc);
5178 wm_stop_locked(ifp, disable);
5179 WM_CORE_UNLOCK(sc);
5180 }
5181
5182 static void
5183 wm_stop_locked(struct ifnet *ifp, int disable)
5184 {
5185 struct wm_softc *sc = ifp->if_softc;
5186 struct wm_txsoft *txs;
5187 int i, qidx;
5188
5189 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5190 device_xname(sc->sc_dev), __func__));
5191 KASSERT(WM_CORE_LOCKED(sc));
5192
5193 wm_turnoff(sc);
5194
5195 /* Stop the one second clock. */
5196 callout_stop(&sc->sc_tick_ch);
5197
5198 /* Stop the 82547 Tx FIFO stall check timer. */
5199 if (sc->sc_type == WM_T_82547)
5200 callout_stop(&sc->sc_txfifo_ch);
5201
5202 if (sc->sc_flags & WM_F_HAS_MII) {
5203 /* Down the MII. */
5204 mii_down(&sc->sc_mii);
5205 } else {
5206 #if 0
5207 /* Should we clear PHY's status properly? */
5208 wm_reset(sc);
5209 #endif
5210 }
5211
5212 /* Stop the transmit and receive processes. */
5213 CSR_WRITE(sc, WMREG_TCTL, 0);
5214 CSR_WRITE(sc, WMREG_RCTL, 0);
5215 sc->sc_rctl &= ~RCTL_EN;
5216
5217 /*
5218 * Clear the interrupt mask to ensure the device cannot assert its
5219 * interrupt line.
5220 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
5221 * service any currently pending or shared interrupt.
5222 */
5223 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5224 sc->sc_icr = 0;
5225 if (sc->sc_nintrs > 1) {
5226 if (sc->sc_type != WM_T_82574) {
5227 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5228 CSR_WRITE(sc, WMREG_EIAC, 0);
5229 } else
5230 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5231 }
5232
5233 /* Release any queued transmit buffers. */
5234 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5235 struct wm_queue *wmq = &sc->sc_queue[qidx];
5236 struct wm_txqueue *txq = &wmq->wmq_txq;
5237 mutex_enter(txq->txq_lock);
5238 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5239 txs = &txq->txq_soft[i];
5240 if (txs->txs_mbuf != NULL) {
5241 bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
5242 m_freem(txs->txs_mbuf);
5243 txs->txs_mbuf = NULL;
5244 }
5245 }
5246 if (sc->sc_type == WM_T_PCH_SPT) {
5247 pcireg_t preg;
5248 uint32_t reg;
5249 int nexttx;
5250
5251 /* First, disable MULR fix in FEXTNVM11 */
5252 reg = CSR_READ(sc, WMREG_FEXTNVM11);
5253 reg |= FEXTNVM11_DIS_MULRFIX;
5254 CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
5255
5256 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
5257 WM_PCI_DESCRING_STATUS);
5258 reg = CSR_READ(sc, WMREG_TDLEN(0));
5259 printf("XXX RST: FLUSH = %08x, len = %u\n",
5260 (uint32_t)(preg & DESCRING_STATUS_FLUSH_REQ), reg);
5261 if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0)
5262 && (reg != 0)) {
5263 /* TX */
5264 printf("XXX need TX flush (reg = %08x)\n",
5265 preg);
5266 wm_init_tx_descs(sc, txq);
5267 wm_init_tx_regs(sc, wmq, txq);
5268 nexttx = txq->txq_next;
5269 wm_set_dma_addr(
5270 &txq->txq_descs[nexttx].wtx_addr,
5271 WM_CDTXADDR(txq, nexttx));
5272 txq->txq_descs[nexttx].wtx_cmdlen
5273 = htole32(WTX_CMD_IFCS | 512);
5274 wm_cdtxsync(txq, nexttx, 1,
5275 BUS_DMASYNC_PREREAD |BUS_DMASYNC_PREWRITE);
5276 CSR_WRITE(sc, WMREG_TCTL, TCTL_EN);
5277 CSR_WRITE(sc, WMREG_TDT(0), nexttx);
5278 CSR_WRITE_FLUSH(sc);
5279 delay(250);
5280 CSR_WRITE(sc, WMREG_TCTL, 0);
5281 }
5282 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
5283 WM_PCI_DESCRING_STATUS);
5284 if (preg & DESCRING_STATUS_FLUSH_REQ) {
5285 /* RX */
5286 printf("XXX need RX flush\n");
5287 }
5288 }
5289 mutex_exit(txq->txq_lock);
5290 }
5291
5292 /* Mark the interface as down and cancel the watchdog timer. */
5293 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
5294 ifp->if_timer = 0;
5295
5296 if (disable) {
5297 for (i = 0; i < sc->sc_nqueues; i++) {
5298 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5299 mutex_enter(rxq->rxq_lock);
5300 wm_rxdrain(rxq);
5301 mutex_exit(rxq->rxq_lock);
5302 }
5303 }
5304
5305 #if 0 /* notyet */
5306 if (sc->sc_type >= WM_T_82544)
5307 CSR_WRITE(sc, WMREG_WUC, 0);
5308 #endif
5309 }
5310
5311 static void
5312 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
5313 {
5314 struct mbuf *m;
5315 int i;
5316
5317 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
5318 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
5319 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
5320 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
5321 m->m_data, m->m_len, m->m_flags);
5322 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
5323 i, i == 1 ? "" : "s");
5324 }
5325
5326 /*
5327 * wm_82547_txfifo_stall:
5328 *
5329 * Callout used to wait for the 82547 Tx FIFO to drain,
5330 * reset the FIFO pointers, and restart packet transmission.
5331 */
5332 static void
5333 wm_82547_txfifo_stall(void *arg)
5334 {
5335 struct wm_softc *sc = arg;
5336 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
5337
5338 mutex_enter(txq->txq_lock);
5339
5340 if (txq->txq_stopping)
5341 goto out;
5342
5343 if (txq->txq_fifo_stall) {
5344 if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
5345 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
5346 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
5347 /*
5348 * Packets have drained. Stop transmitter, reset
5349 * FIFO pointers, restart transmitter, and kick
5350 * the packet queue.
5351 */
5352 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
5353 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
5354 CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
5355 CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
5356 CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
5357 CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
5358 CSR_WRITE(sc, WMREG_TCTL, tctl);
5359 CSR_WRITE_FLUSH(sc);
5360
5361 txq->txq_fifo_head = 0;
5362 txq->txq_fifo_stall = 0;
5363 wm_start_locked(&sc->sc_ethercom.ec_if);
5364 } else {
5365 /*
5366 * Still waiting for packets to drain; try again in
5367 * another tick.
5368 */
5369 callout_schedule(&sc->sc_txfifo_ch, 1);
5370 }
5371 }
5372
5373 out:
5374 mutex_exit(txq->txq_lock);
5375 }
5376
5377 /*
5378 * wm_82547_txfifo_bugchk:
5379 *
5380 * Check for bug condition in the 82547 Tx FIFO. We need to
5381 * prevent enqueueing a packet that would wrap around the end
5382 * if the Tx FIFO ring buffer, otherwise the chip will croak.
5383 *
5384 * We do this by checking the amount of space before the end
5385 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
5386 * the Tx FIFO, wait for all remaining packets to drain, reset
5387 * the internal FIFO pointers to the beginning, and restart
5388 * transmission on the interface.
5389 */
5390 #define WM_FIFO_HDR 0x10
5391 #define WM_82547_PAD_LEN 0x3e0
5392 static int
5393 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
5394 {
5395 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
5396 int space = txq->txq_fifo_size - txq->txq_fifo_head;
5397 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
5398
5399 /* Just return if already stalled. */
5400 if (txq->txq_fifo_stall)
5401 return 1;
5402
5403 if (sc->sc_mii.mii_media_active & IFM_FDX) {
5404 /* Stall only occurs in half-duplex mode. */
5405 goto send_packet;
5406 }
5407
5408 if (len >= WM_82547_PAD_LEN + space) {
5409 txq->txq_fifo_stall = 1;
5410 callout_schedule(&sc->sc_txfifo_ch, 1);
5411 return 1;
5412 }
5413
5414 send_packet:
5415 txq->txq_fifo_head += len;
5416 if (txq->txq_fifo_head >= txq->txq_fifo_size)
5417 txq->txq_fifo_head -= txq->txq_fifo_size;
5418
5419 return 0;
5420 }
5421
5422 static int
5423 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5424 {
5425 int error;
5426
5427 /*
5428 * Allocate the control data structures, and create and load the
5429 * DMA map for it.
5430 *
5431 * NOTE: All Tx descriptors must be in the same 4G segment of
5432 * memory. So must Rx descriptors. We simplify by allocating
5433 * both sets within the same 4G segment.
5434 */
5435 if (sc->sc_type < WM_T_82544)
5436 WM_NTXDESC(txq) = WM_NTXDESC_82542;
5437 else
5438 WM_NTXDESC(txq) = WM_NTXDESC_82544;
5439 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5440 txq->txq_descsize = sizeof(nq_txdesc_t);
5441 else
5442 txq->txq_descsize = sizeof(wiseman_txdesc_t);
5443
5444 if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
5445 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
5446 1, &txq->txq_desc_rseg, 0)) != 0) {
5447 aprint_error_dev(sc->sc_dev,
5448 "unable to allocate TX control data, error = %d\n",
5449 error);
5450 goto fail_0;
5451 }
5452
5453 if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
5454 txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
5455 (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
5456 aprint_error_dev(sc->sc_dev,
5457 "unable to map TX control data, error = %d\n", error);
5458 goto fail_1;
5459 }
5460
5461 if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
5462 WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
5463 aprint_error_dev(sc->sc_dev,
5464 "unable to create TX control data DMA map, error = %d\n",
5465 error);
5466 goto fail_2;
5467 }
5468
5469 if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
5470 txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
5471 aprint_error_dev(sc->sc_dev,
5472 "unable to load TX control data DMA map, error = %d\n",
5473 error);
5474 goto fail_3;
5475 }
5476
5477 return 0;
5478
5479 fail_3:
5480 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5481 fail_2:
5482 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5483 WM_TXDESCS_SIZE(txq));
5484 fail_1:
5485 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5486 fail_0:
5487 return error;
5488 }
5489
5490 static void
5491 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5492 {
5493
5494 bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
5495 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5496 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5497 WM_TXDESCS_SIZE(txq));
5498 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5499 }
5500
5501 static int
5502 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5503 {
5504 int error;
5505
5506 /*
5507 * Allocate the control data structures, and create and load the
5508 * DMA map for it.
5509 *
5510 * NOTE: All Tx descriptors must be in the same 4G segment of
5511 * memory. So must Rx descriptors. We simplify by allocating
5512 * both sets within the same 4G segment.
5513 */
5514 rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
5515 if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
5516 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
5517 1, &rxq->rxq_desc_rseg, 0)) != 0) {
5518 aprint_error_dev(sc->sc_dev,
5519 "unable to allocate RX control data, error = %d\n",
5520 error);
5521 goto fail_0;
5522 }
5523
5524 if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
5525 rxq->rxq_desc_rseg, rxq->rxq_desc_size,
5526 (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
5527 aprint_error_dev(sc->sc_dev,
5528 "unable to map RX control data, error = %d\n", error);
5529 goto fail_1;
5530 }
5531
5532 if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
5533 rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
5534 aprint_error_dev(sc->sc_dev,
5535 "unable to create RX control data DMA map, error = %d\n",
5536 error);
5537 goto fail_2;
5538 }
5539
5540 if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
5541 rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
5542 aprint_error_dev(sc->sc_dev,
5543 "unable to load RX control data DMA map, error = %d\n",
5544 error);
5545 goto fail_3;
5546 }
5547
5548 return 0;
5549
5550 fail_3:
5551 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5552 fail_2:
5553 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5554 rxq->rxq_desc_size);
5555 fail_1:
5556 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5557 fail_0:
5558 return error;
5559 }
5560
5561 static void
5562 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5563 {
5564
5565 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
5566 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5567 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5568 rxq->rxq_desc_size);
5569 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5570 }
5571
5572
5573 static int
5574 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5575 {
5576 int i, error;
5577
5578 /* Create the transmit buffer DMA maps. */
5579 WM_TXQUEUELEN(txq) =
5580 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
5581 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
5582 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5583 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
5584 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
5585 &txq->txq_soft[i].txs_dmamap)) != 0) {
5586 aprint_error_dev(sc->sc_dev,
5587 "unable to create Tx DMA map %d, error = %d\n",
5588 i, error);
5589 goto fail;
5590 }
5591 }
5592
5593 return 0;
5594
5595 fail:
5596 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5597 if (txq->txq_soft[i].txs_dmamap != NULL)
5598 bus_dmamap_destroy(sc->sc_dmat,
5599 txq->txq_soft[i].txs_dmamap);
5600 }
5601 return error;
5602 }
5603
5604 static void
5605 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5606 {
5607 int i;
5608
5609 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5610 if (txq->txq_soft[i].txs_dmamap != NULL)
5611 bus_dmamap_destroy(sc->sc_dmat,
5612 txq->txq_soft[i].txs_dmamap);
5613 }
5614 }
5615
5616 static int
5617 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5618 {
5619 int i, error;
5620
5621 /* Create the receive buffer DMA maps. */
5622 for (i = 0; i < WM_NRXDESC; i++) {
5623 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
5624 MCLBYTES, 0, 0,
5625 &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
5626 aprint_error_dev(sc->sc_dev,
5627 "unable to create Rx DMA map %d error = %d\n",
5628 i, error);
5629 goto fail;
5630 }
5631 rxq->rxq_soft[i].rxs_mbuf = NULL;
5632 }
5633
5634 return 0;
5635
5636 fail:
5637 for (i = 0; i < WM_NRXDESC; i++) {
5638 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5639 bus_dmamap_destroy(sc->sc_dmat,
5640 rxq->rxq_soft[i].rxs_dmamap);
5641 }
5642 return error;
5643 }
5644
5645 static void
5646 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5647 {
5648 int i;
5649
5650 for (i = 0; i < WM_NRXDESC; i++) {
5651 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5652 bus_dmamap_destroy(sc->sc_dmat,
5653 rxq->rxq_soft[i].rxs_dmamap);
5654 }
5655 }
5656
5657 /*
5658 * wm_alloc_quques:
5659 * Allocate {tx,rx}descs and {tx,rx} buffers
5660 */
5661 static int
5662 wm_alloc_txrx_queues(struct wm_softc *sc)
5663 {
5664 int i, error, tx_done, rx_done;
5665
5666 sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
5667 KM_SLEEP);
5668 if (sc->sc_queue == NULL) {
5669 aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
5670 error = ENOMEM;
5671 goto fail_0;
5672 }
5673
5674 /*
5675 * For transmission
5676 */
5677 error = 0;
5678 tx_done = 0;
5679 for (i = 0; i < sc->sc_nqueues; i++) {
5680 #ifdef WM_EVENT_COUNTERS
5681 int j;
5682 const char *xname;
5683 #endif
5684 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5685 txq->txq_sc = sc;
5686 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5687
5688 error = wm_alloc_tx_descs(sc, txq);
5689 if (error)
5690 break;
5691 error = wm_alloc_tx_buffer(sc, txq);
5692 if (error) {
5693 wm_free_tx_descs(sc, txq);
5694 break;
5695 }
5696 txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
5697 if (txq->txq_interq == NULL) {
5698 wm_free_tx_descs(sc, txq);
5699 wm_free_tx_buffer(sc, txq);
5700 error = ENOMEM;
5701 break;
5702 }
5703
5704 #ifdef WM_EVENT_COUNTERS
5705 xname = device_xname(sc->sc_dev);
5706
5707 WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
5708 WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
5709 WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
5710 WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
5711 WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
5712
5713 WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
5714 WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
5715 WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
5716 WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
5717 WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
5718 WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
5719
5720 for (j = 0; j < WM_NTXSEGS; j++) {
5721 snprintf(txq->txq_txseg_evcnt_names[j],
5722 sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
5723 evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
5724 NULL, xname, txq->txq_txseg_evcnt_names[j]);
5725 }
5726
5727 WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
5728
5729 WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
5730 #endif /* WM_EVENT_COUNTERS */
5731
5732 tx_done++;
5733 }
5734 if (error)
5735 goto fail_1;
5736
5737 /*
5738 * For recieve
5739 */
5740 error = 0;
5741 rx_done = 0;
5742 for (i = 0; i < sc->sc_nqueues; i++) {
5743 #ifdef WM_EVENT_COUNTERS
5744 const char *xname;
5745 #endif
5746 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5747 rxq->rxq_sc = sc;
5748 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5749
5750 error = wm_alloc_rx_descs(sc, rxq);
5751 if (error)
5752 break;
5753
5754 error = wm_alloc_rx_buffer(sc, rxq);
5755 if (error) {
5756 wm_free_rx_descs(sc, rxq);
5757 break;
5758 }
5759
5760 #ifdef WM_EVENT_COUNTERS
5761 xname = device_xname(sc->sc_dev);
5762
5763 WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
5764
5765 WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
5766 WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
5767 #endif /* WM_EVENT_COUNTERS */
5768
5769 rx_done++;
5770 }
5771 if (error)
5772 goto fail_2;
5773
5774 return 0;
5775
5776 fail_2:
5777 for (i = 0; i < rx_done; i++) {
5778 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5779 wm_free_rx_buffer(sc, rxq);
5780 wm_free_rx_descs(sc, rxq);
5781 if (rxq->rxq_lock)
5782 mutex_obj_free(rxq->rxq_lock);
5783 }
5784 fail_1:
5785 for (i = 0; i < tx_done; i++) {
5786 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5787 pcq_destroy(txq->txq_interq);
5788 wm_free_tx_buffer(sc, txq);
5789 wm_free_tx_descs(sc, txq);
5790 if (txq->txq_lock)
5791 mutex_obj_free(txq->txq_lock);
5792 }
5793
5794 kmem_free(sc->sc_queue,
5795 sizeof(struct wm_queue) * sc->sc_nqueues);
5796 fail_0:
5797 return error;
5798 }
5799
5800 /*
5801 * wm_free_quques:
5802 * Free {tx,rx}descs and {tx,rx} buffers
5803 */
5804 static void
5805 wm_free_txrx_queues(struct wm_softc *sc)
5806 {
5807 int i;
5808
5809 for (i = 0; i < sc->sc_nqueues; i++) {
5810 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5811 wm_free_rx_buffer(sc, rxq);
5812 wm_free_rx_descs(sc, rxq);
5813 if (rxq->rxq_lock)
5814 mutex_obj_free(rxq->rxq_lock);
5815 }
5816
5817 for (i = 0; i < sc->sc_nqueues; i++) {
5818 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5819 wm_free_tx_buffer(sc, txq);
5820 wm_free_tx_descs(sc, txq);
5821 if (txq->txq_lock)
5822 mutex_obj_free(txq->txq_lock);
5823 }
5824
5825 kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
5826 }
5827
5828 static void
5829 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5830 {
5831
5832 KASSERT(mutex_owned(txq->txq_lock));
5833
5834 /* Initialize the transmit descriptor ring. */
5835 memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
5836 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
5837 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5838 txq->txq_free = WM_NTXDESC(txq);
5839 txq->txq_next = 0;
5840 }
5841
5842 static void
5843 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
5844 struct wm_txqueue *txq)
5845 {
5846
5847 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5848 device_xname(sc->sc_dev), __func__));
5849 KASSERT(mutex_owned(txq->txq_lock));
5850
5851 if (sc->sc_type < WM_T_82543) {
5852 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
5853 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
5854 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
5855 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
5856 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
5857 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
5858 } else {
5859 int qid = wmq->wmq_id;
5860
5861 CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
5862 CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
5863 CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
5864 CSR_WRITE(sc, WMREG_TDH(qid), 0);
5865
5866 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5867 /*
5868 * Don't write TDT before TCTL.EN is set.
5869 * See the document.
5870 */
5871 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
5872 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
5873 | TXDCTL_WTHRESH(0));
5874 else {
5875 /* ITR / 4 */
5876 CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
5877 if (sc->sc_type >= WM_T_82540) {
5878 /* should be same */
5879 CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
5880 }
5881
5882 CSR_WRITE(sc, WMREG_TDT(qid), 0);
5883 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
5884 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
5885 }
5886 }
5887 }
5888
5889 static void
5890 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5891 {
5892 int i;
5893
5894 KASSERT(mutex_owned(txq->txq_lock));
5895
5896 /* Initialize the transmit job descriptors. */
5897 for (i = 0; i < WM_TXQUEUELEN(txq); i++)
5898 txq->txq_soft[i].txs_mbuf = NULL;
5899 txq->txq_sfree = WM_TXQUEUELEN(txq);
5900 txq->txq_snext = 0;
5901 txq->txq_sdirty = 0;
5902 }
5903
5904 static void
5905 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
5906 struct wm_txqueue *txq)
5907 {
5908
5909 KASSERT(mutex_owned(txq->txq_lock));
5910
5911 /*
5912 * Set up some register offsets that are different between
5913 * the i82542 and the i82543 and later chips.
5914 */
5915 if (sc->sc_type < WM_T_82543)
5916 txq->txq_tdt_reg = WMREG_OLD_TDT;
5917 else
5918 txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
5919
5920 wm_init_tx_descs(sc, txq);
5921 wm_init_tx_regs(sc, wmq, txq);
5922 wm_init_tx_buffer(sc, txq);
5923 }
5924
5925 static void
5926 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
5927 struct wm_rxqueue *rxq)
5928 {
5929
5930 KASSERT(mutex_owned(rxq->rxq_lock));
5931
5932 /*
5933 * Initialize the receive descriptor and receive job
5934 * descriptor rings.
5935 */
5936 if (sc->sc_type < WM_T_82543) {
5937 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
5938 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
5939 CSR_WRITE(sc, WMREG_OLD_RDLEN0,
5940 sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
5941 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
5942 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
5943 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
5944
5945 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
5946 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
5947 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
5948 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
5949 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
5950 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
5951 } else {
5952 int qid = wmq->wmq_id;
5953
5954 CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
5955 CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
5956 CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
5957
5958 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5959 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
5960 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
5961 CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
5962 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
5963 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
5964 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
5965 | RXDCTL_WTHRESH(1));
5966 CSR_WRITE(sc, WMREG_RDH(qid), 0);
5967 CSR_WRITE(sc, WMREG_RDT(qid), 0);
5968 } else {
5969 CSR_WRITE(sc, WMREG_RDH(qid), 0);
5970 CSR_WRITE(sc, WMREG_RDT(qid), 0);
5971 /* ITR / 4 */
5972 CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
5973 /* MUST be same */
5974 CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
5975 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
5976 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
5977 }
5978 }
5979 }
5980
5981 static int
5982 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5983 {
5984 struct wm_rxsoft *rxs;
5985 int error, i;
5986
5987 KASSERT(mutex_owned(rxq->rxq_lock));
5988
5989 for (i = 0; i < WM_NRXDESC; i++) {
5990 rxs = &rxq->rxq_soft[i];
5991 if (rxs->rxs_mbuf == NULL) {
5992 if ((error = wm_add_rxbuf(rxq, i)) != 0) {
5993 log(LOG_ERR, "%s: unable to allocate or map "
5994 "rx buffer %d, error = %d\n",
5995 device_xname(sc->sc_dev), i, error);
5996 /*
5997 * XXX Should attempt to run with fewer receive
5998 * XXX buffers instead of just failing.
5999 */
6000 wm_rxdrain(rxq);
6001 return ENOMEM;
6002 }
6003 } else {
6004 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
6005 wm_init_rxdesc(rxq, i);
6006 /*
6007 * For 82575 and newer device, the RX descriptors
6008 * must be initialized after the setting of RCTL.EN in
6009 * wm_set_filter()
6010 */
6011 }
6012 }
6013 rxq->rxq_ptr = 0;
6014 rxq->rxq_discard = 0;
6015 WM_RXCHAIN_RESET(rxq);
6016
6017 return 0;
6018 }
6019
6020 static int
6021 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
6022 struct wm_rxqueue *rxq)
6023 {
6024
6025 KASSERT(mutex_owned(rxq->rxq_lock));
6026
6027 /*
6028 * Set up some register offsets that are different between
6029 * the i82542 and the i82543 and later chips.
6030 */
6031 if (sc->sc_type < WM_T_82543)
6032 rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
6033 else
6034 rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
6035
6036 wm_init_rx_regs(sc, wmq, rxq);
6037 return wm_init_rx_buffer(sc, rxq);
6038 }
6039
6040 /*
6041 * wm_init_quques:
6042 * Initialize {tx,rx}descs and {tx,rx} buffers
6043 */
6044 static int
6045 wm_init_txrx_queues(struct wm_softc *sc)
6046 {
6047 int i, error = 0;
6048
6049 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
6050 device_xname(sc->sc_dev), __func__));
6051
6052 for (i = 0; i < sc->sc_nqueues; i++) {
6053 struct wm_queue *wmq = &sc->sc_queue[i];
6054 struct wm_txqueue *txq = &wmq->wmq_txq;
6055 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6056
6057 mutex_enter(txq->txq_lock);
6058 wm_init_tx_queue(sc, wmq, txq);
6059 mutex_exit(txq->txq_lock);
6060
6061 mutex_enter(rxq->rxq_lock);
6062 error = wm_init_rx_queue(sc, wmq, rxq);
6063 mutex_exit(rxq->rxq_lock);
6064 if (error)
6065 break;
6066 }
6067
6068 return error;
6069 }
6070
6071 /*
6072 * wm_tx_offload:
6073 *
6074 * Set up TCP/IP checksumming parameters for the
6075 * specified packet.
6076 */
6077 static int
6078 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
6079 uint8_t *fieldsp)
6080 {
6081 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6082 struct mbuf *m0 = txs->txs_mbuf;
6083 struct livengood_tcpip_ctxdesc *t;
6084 uint32_t ipcs, tucs, cmd, cmdlen, seg;
6085 uint32_t ipcse;
6086 struct ether_header *eh;
6087 int offset, iphl;
6088 uint8_t fields;
6089
6090 /*
6091 * XXX It would be nice if the mbuf pkthdr had offset
6092 * fields for the protocol headers.
6093 */
6094
6095 eh = mtod(m0, struct ether_header *);
6096 switch (htons(eh->ether_type)) {
6097 case ETHERTYPE_IP:
6098 case ETHERTYPE_IPV6:
6099 offset = ETHER_HDR_LEN;
6100 break;
6101
6102 case ETHERTYPE_VLAN:
6103 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6104 break;
6105
6106 default:
6107 /*
6108 * Don't support this protocol or encapsulation.
6109 */
6110 *fieldsp = 0;
6111 *cmdp = 0;
6112 return 0;
6113 }
6114
6115 if ((m0->m_pkthdr.csum_flags &
6116 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
6117 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6118 } else {
6119 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
6120 }
6121 ipcse = offset + iphl - 1;
6122
6123 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
6124 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
6125 seg = 0;
6126 fields = 0;
6127
6128 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6129 int hlen = offset + iphl;
6130 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6131
6132 if (__predict_false(m0->m_len <
6133 (hlen + sizeof(struct tcphdr)))) {
6134 /*
6135 * TCP/IP headers are not in the first mbuf; we need
6136 * to do this the slow and painful way. Let's just
6137 * hope this doesn't happen very often.
6138 */
6139 struct tcphdr th;
6140
6141 WM_Q_EVCNT_INCR(txq, txtsopain);
6142
6143 m_copydata(m0, hlen, sizeof(th), &th);
6144 if (v4) {
6145 struct ip ip;
6146
6147 m_copydata(m0, offset, sizeof(ip), &ip);
6148 ip.ip_len = 0;
6149 m_copyback(m0,
6150 offset + offsetof(struct ip, ip_len),
6151 sizeof(ip.ip_len), &ip.ip_len);
6152 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6153 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6154 } else {
6155 struct ip6_hdr ip6;
6156
6157 m_copydata(m0, offset, sizeof(ip6), &ip6);
6158 ip6.ip6_plen = 0;
6159 m_copyback(m0,
6160 offset + offsetof(struct ip6_hdr, ip6_plen),
6161 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6162 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6163 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6164 }
6165 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6166 sizeof(th.th_sum), &th.th_sum);
6167
6168 hlen += th.th_off << 2;
6169 } else {
6170 /*
6171 * TCP/IP headers are in the first mbuf; we can do
6172 * this the easy way.
6173 */
6174 struct tcphdr *th;
6175
6176 if (v4) {
6177 struct ip *ip =
6178 (void *)(mtod(m0, char *) + offset);
6179 th = (void *)(mtod(m0, char *) + hlen);
6180
6181 ip->ip_len = 0;
6182 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6183 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6184 } else {
6185 struct ip6_hdr *ip6 =
6186 (void *)(mtod(m0, char *) + offset);
6187 th = (void *)(mtod(m0, char *) + hlen);
6188
6189 ip6->ip6_plen = 0;
6190 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6191 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6192 }
6193 hlen += th->th_off << 2;
6194 }
6195
6196 if (v4) {
6197 WM_Q_EVCNT_INCR(txq, txtso);
6198 cmdlen |= WTX_TCPIP_CMD_IP;
6199 } else {
6200 WM_Q_EVCNT_INCR(txq, txtso6);
6201 ipcse = 0;
6202 }
6203 cmd |= WTX_TCPIP_CMD_TSE;
6204 cmdlen |= WTX_TCPIP_CMD_TSE |
6205 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
6206 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
6207 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
6208 }
6209
6210 /*
6211 * NOTE: Even if we're not using the IP or TCP/UDP checksum
6212 * offload feature, if we load the context descriptor, we
6213 * MUST provide valid values for IPCSS and TUCSS fields.
6214 */
6215
6216 ipcs = WTX_TCPIP_IPCSS(offset) |
6217 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
6218 WTX_TCPIP_IPCSE(ipcse);
6219 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
6220 WM_Q_EVCNT_INCR(txq, txipsum);
6221 fields |= WTX_IXSM;
6222 }
6223
6224 offset += iphl;
6225
6226 if (m0->m_pkthdr.csum_flags &
6227 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
6228 WM_Q_EVCNT_INCR(txq, txtusum);
6229 fields |= WTX_TXSM;
6230 tucs = WTX_TCPIP_TUCSS(offset) |
6231 WTX_TCPIP_TUCSO(offset +
6232 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
6233 WTX_TCPIP_TUCSE(0) /* rest of packet */;
6234 } else if ((m0->m_pkthdr.csum_flags &
6235 (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
6236 WM_Q_EVCNT_INCR(txq, txtusum6);
6237 fields |= WTX_TXSM;
6238 tucs = WTX_TCPIP_TUCSS(offset) |
6239 WTX_TCPIP_TUCSO(offset +
6240 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
6241 WTX_TCPIP_TUCSE(0) /* rest of packet */;
6242 } else {
6243 /* Just initialize it to a valid TCP context. */
6244 tucs = WTX_TCPIP_TUCSS(offset) |
6245 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
6246 WTX_TCPIP_TUCSE(0) /* rest of packet */;
6247 }
6248
6249 /* Fill in the context descriptor. */
6250 t = (struct livengood_tcpip_ctxdesc *)
6251 &txq->txq_descs[txq->txq_next];
6252 t->tcpip_ipcs = htole32(ipcs);
6253 t->tcpip_tucs = htole32(tucs);
6254 t->tcpip_cmdlen = htole32(cmdlen);
6255 t->tcpip_seg = htole32(seg);
6256 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6257
6258 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6259 txs->txs_ndesc++;
6260
6261 *cmdp = cmd;
6262 *fieldsp = fields;
6263
6264 return 0;
6265 }
6266
6267 /*
6268 * wm_start: [ifnet interface function]
6269 *
6270 * Start packet transmission on the interface.
6271 */
6272 static void
6273 wm_start(struct ifnet *ifp)
6274 {
6275 struct wm_softc *sc = ifp->if_softc;
6276 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6277
6278 KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
6279
6280 mutex_enter(txq->txq_lock);
6281 if (!txq->txq_stopping)
6282 wm_start_locked(ifp);
6283 mutex_exit(txq->txq_lock);
6284 }
6285
6286 static void
6287 wm_start_locked(struct ifnet *ifp)
6288 {
6289 struct wm_softc *sc = ifp->if_softc;
6290 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6291 struct mbuf *m0;
6292 struct m_tag *mtag;
6293 struct wm_txsoft *txs;
6294 bus_dmamap_t dmamap;
6295 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
6296 bus_addr_t curaddr;
6297 bus_size_t seglen, curlen;
6298 uint32_t cksumcmd;
6299 uint8_t cksumfields;
6300
6301 KASSERT(mutex_owned(txq->txq_lock));
6302
6303 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6304 return;
6305
6306 /* Remember the previous number of free descriptors. */
6307 ofree = txq->txq_free;
6308
6309 /*
6310 * Loop through the send queue, setting up transmit descriptors
6311 * until we drain the queue, or use up all available transmit
6312 * descriptors.
6313 */
6314 for (;;) {
6315 m0 = NULL;
6316
6317 /* Get a work queue entry. */
6318 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6319 wm_txeof(sc, txq);
6320 if (txq->txq_sfree == 0) {
6321 DPRINTF(WM_DEBUG_TX,
6322 ("%s: TX: no free job descriptors\n",
6323 device_xname(sc->sc_dev)));
6324 WM_Q_EVCNT_INCR(txq, txsstall);
6325 break;
6326 }
6327 }
6328
6329 /* Grab a packet off the queue. */
6330 IFQ_DEQUEUE(&ifp->if_snd, m0);
6331 if (m0 == NULL)
6332 break;
6333
6334 DPRINTF(WM_DEBUG_TX,
6335 ("%s: TX: have packet to transmit: %p\n",
6336 device_xname(sc->sc_dev), m0));
6337
6338 txs = &txq->txq_soft[txq->txq_snext];
6339 dmamap = txs->txs_dmamap;
6340
6341 use_tso = (m0->m_pkthdr.csum_flags &
6342 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
6343
6344 /*
6345 * So says the Linux driver:
6346 * The controller does a simple calculation to make sure
6347 * there is enough room in the FIFO before initiating the
6348 * DMA for each buffer. The calc is:
6349 * 4 = ceil(buffer len / MSS)
6350 * To make sure we don't overrun the FIFO, adjust the max
6351 * buffer len if the MSS drops.
6352 */
6353 dmamap->dm_maxsegsz =
6354 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
6355 ? m0->m_pkthdr.segsz << 2
6356 : WTX_MAX_LEN;
6357
6358 /*
6359 * Load the DMA map. If this fails, the packet either
6360 * didn't fit in the allotted number of segments, or we
6361 * were short on resources. For the too-many-segments
6362 * case, we simply report an error and drop the packet,
6363 * since we can't sanely copy a jumbo packet to a single
6364 * buffer.
6365 */
6366 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6367 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
6368 if (error) {
6369 if (error == EFBIG) {
6370 WM_Q_EVCNT_INCR(txq, txdrop);
6371 log(LOG_ERR, "%s: Tx packet consumes too many "
6372 "DMA segments, dropping...\n",
6373 device_xname(sc->sc_dev));
6374 wm_dump_mbuf_chain(sc, m0);
6375 m_freem(m0);
6376 continue;
6377 }
6378 /* Short on resources, just stop for now. */
6379 DPRINTF(WM_DEBUG_TX,
6380 ("%s: TX: dmamap load failed: %d\n",
6381 device_xname(sc->sc_dev), error));
6382 break;
6383 }
6384
6385 segs_needed = dmamap->dm_nsegs;
6386 if (use_tso) {
6387 /* For sentinel descriptor; see below. */
6388 segs_needed++;
6389 }
6390
6391 /*
6392 * Ensure we have enough descriptors free to describe
6393 * the packet. Note, we always reserve one descriptor
6394 * at the end of the ring due to the semantics of the
6395 * TDT register, plus one more in the event we need
6396 * to load offload context.
6397 */
6398 if (segs_needed > txq->txq_free - 2) {
6399 /*
6400 * Not enough free descriptors to transmit this
6401 * packet. We haven't committed anything yet,
6402 * so just unload the DMA map, put the packet
6403 * pack on the queue, and punt. Notify the upper
6404 * layer that there are no more slots left.
6405 */
6406 DPRINTF(WM_DEBUG_TX,
6407 ("%s: TX: need %d (%d) descriptors, have %d\n",
6408 device_xname(sc->sc_dev), dmamap->dm_nsegs,
6409 segs_needed, txq->txq_free - 1));
6410 ifp->if_flags |= IFF_OACTIVE;
6411 bus_dmamap_unload(sc->sc_dmat, dmamap);
6412 WM_Q_EVCNT_INCR(txq, txdstall);
6413 break;
6414 }
6415
6416 /*
6417 * Check for 82547 Tx FIFO bug. We need to do this
6418 * once we know we can transmit the packet, since we
6419 * do some internal FIFO space accounting here.
6420 */
6421 if (sc->sc_type == WM_T_82547 &&
6422 wm_82547_txfifo_bugchk(sc, m0)) {
6423 DPRINTF(WM_DEBUG_TX,
6424 ("%s: TX: 82547 Tx FIFO bug detected\n",
6425 device_xname(sc->sc_dev)));
6426 ifp->if_flags |= IFF_OACTIVE;
6427 bus_dmamap_unload(sc->sc_dmat, dmamap);
6428 WM_Q_EVCNT_INCR(txq, txfifo_stall);
6429 break;
6430 }
6431
6432 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6433
6434 DPRINTF(WM_DEBUG_TX,
6435 ("%s: TX: packet has %d (%d) DMA segments\n",
6436 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6437
6438 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
6439
6440 /*
6441 * Store a pointer to the packet so that we can free it
6442 * later.
6443 *
6444 * Initially, we consider the number of descriptors the
6445 * packet uses the number of DMA segments. This may be
6446 * incremented by 1 if we do checksum offload (a descriptor
6447 * is used to set the checksum context).
6448 */
6449 txs->txs_mbuf = m0;
6450 txs->txs_firstdesc = txq->txq_next;
6451 txs->txs_ndesc = segs_needed;
6452
6453 /* Set up offload parameters for this packet. */
6454 if (m0->m_pkthdr.csum_flags &
6455 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
6456 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
6457 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
6458 if (wm_tx_offload(sc, txs, &cksumcmd,
6459 &cksumfields) != 0) {
6460 /* Error message already displayed. */
6461 bus_dmamap_unload(sc->sc_dmat, dmamap);
6462 continue;
6463 }
6464 } else {
6465 cksumcmd = 0;
6466 cksumfields = 0;
6467 }
6468
6469 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
6470
6471 /* Sync the DMA map. */
6472 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6473 BUS_DMASYNC_PREWRITE);
6474
6475 /* Initialize the transmit descriptor. */
6476 for (nexttx = txq->txq_next, seg = 0;
6477 seg < dmamap->dm_nsegs; seg++) {
6478 for (seglen = dmamap->dm_segs[seg].ds_len,
6479 curaddr = dmamap->dm_segs[seg].ds_addr;
6480 seglen != 0;
6481 curaddr += curlen, seglen -= curlen,
6482 nexttx = WM_NEXTTX(txq, nexttx)) {
6483 curlen = seglen;
6484
6485 /*
6486 * So says the Linux driver:
6487 * Work around for premature descriptor
6488 * write-backs in TSO mode. Append a
6489 * 4-byte sentinel descriptor.
6490 */
6491 if (use_tso && seg == dmamap->dm_nsegs - 1 &&
6492 curlen > 8)
6493 curlen -= 4;
6494
6495 wm_set_dma_addr(
6496 &txq->txq_descs[nexttx].wtx_addr, curaddr);
6497 txq->txq_descs[nexttx].wtx_cmdlen
6498 = htole32(cksumcmd | curlen);
6499 txq->txq_descs[nexttx].wtx_fields.wtxu_status
6500 = 0;
6501 txq->txq_descs[nexttx].wtx_fields.wtxu_options
6502 = cksumfields;
6503 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
6504 lasttx = nexttx;
6505
6506 DPRINTF(WM_DEBUG_TX,
6507 ("%s: TX: desc %d: low %#" PRIx64 ", "
6508 "len %#04zx\n",
6509 device_xname(sc->sc_dev), nexttx,
6510 (uint64_t)curaddr, curlen));
6511 }
6512 }
6513
6514 KASSERT(lasttx != -1);
6515
6516 /*
6517 * Set up the command byte on the last descriptor of
6518 * the packet. If we're in the interrupt delay window,
6519 * delay the interrupt.
6520 */
6521 txq->txq_descs[lasttx].wtx_cmdlen |=
6522 htole32(WTX_CMD_EOP | WTX_CMD_RS);
6523
6524 /*
6525 * If VLANs are enabled and the packet has a VLAN tag, set
6526 * up the descriptor to encapsulate the packet for us.
6527 *
6528 * This is only valid on the last descriptor of the packet.
6529 */
6530 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6531 txq->txq_descs[lasttx].wtx_cmdlen |=
6532 htole32(WTX_CMD_VLE);
6533 txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
6534 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6535 }
6536
6537 txs->txs_lastdesc = lasttx;
6538
6539 DPRINTF(WM_DEBUG_TX,
6540 ("%s: TX: desc %d: cmdlen 0x%08x\n",
6541 device_xname(sc->sc_dev),
6542 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
6543
6544 /* Sync the descriptors we're using. */
6545 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
6546 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
6547
6548 /* Give the packet to the chip. */
6549 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
6550
6551 DPRINTF(WM_DEBUG_TX,
6552 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6553
6554 DPRINTF(WM_DEBUG_TX,
6555 ("%s: TX: finished transmitting packet, job %d\n",
6556 device_xname(sc->sc_dev), txq->txq_snext));
6557
6558 /* Advance the tx pointer. */
6559 txq->txq_free -= txs->txs_ndesc;
6560 txq->txq_next = nexttx;
6561
6562 txq->txq_sfree--;
6563 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
6564
6565 /* Pass the packet to any BPF listeners. */
6566 bpf_mtap(ifp, m0);
6567 }
6568
6569 if (m0 != NULL) {
6570 ifp->if_flags |= IFF_OACTIVE;
6571 WM_Q_EVCNT_INCR(txq, txdrop);
6572 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
6573 __func__));
6574 m_freem(m0);
6575 }
6576
6577 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
6578 /* No more slots; notify upper layer. */
6579 ifp->if_flags |= IFF_OACTIVE;
6580 }
6581
6582 if (txq->txq_free != ofree) {
6583 /* Set a watchdog timer in case the chip flakes out. */
6584 ifp->if_timer = 5;
6585 }
6586 }
6587
6588 /*
6589 * wm_nq_tx_offload:
6590 *
6591 * Set up TCP/IP checksumming parameters for the
6592 * specified packet, for NEWQUEUE devices
6593 */
6594 static int
6595 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
6596 struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
6597 {
6598 struct mbuf *m0 = txs->txs_mbuf;
6599 struct m_tag *mtag;
6600 uint32_t vl_len, mssidx, cmdc;
6601 struct ether_header *eh;
6602 int offset, iphl;
6603
6604 /*
6605 * XXX It would be nice if the mbuf pkthdr had offset
6606 * fields for the protocol headers.
6607 */
6608 *cmdlenp = 0;
6609 *fieldsp = 0;
6610
6611 eh = mtod(m0, struct ether_header *);
6612 switch (htons(eh->ether_type)) {
6613 case ETHERTYPE_IP:
6614 case ETHERTYPE_IPV6:
6615 offset = ETHER_HDR_LEN;
6616 break;
6617
6618 case ETHERTYPE_VLAN:
6619 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6620 break;
6621
6622 default:
6623 /* Don't support this protocol or encapsulation. */
6624 *do_csum = false;
6625 return 0;
6626 }
6627 *do_csum = true;
6628 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
6629 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
6630
6631 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
6632 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
6633
6634 if ((m0->m_pkthdr.csum_flags &
6635 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
6636 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6637 } else {
6638 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
6639 }
6640 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
6641 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
6642
6643 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6644 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
6645 << NQTXC_VLLEN_VLAN_SHIFT);
6646 *cmdlenp |= NQTX_CMD_VLE;
6647 }
6648
6649 mssidx = 0;
6650
6651 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6652 int hlen = offset + iphl;
6653 int tcp_hlen;
6654 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6655
6656 if (__predict_false(m0->m_len <
6657 (hlen + sizeof(struct tcphdr)))) {
6658 /*
6659 * TCP/IP headers are not in the first mbuf; we need
6660 * to do this the slow and painful way. Let's just
6661 * hope this doesn't happen very often.
6662 */
6663 struct tcphdr th;
6664
6665 WM_Q_EVCNT_INCR(txq, txtsopain);
6666
6667 m_copydata(m0, hlen, sizeof(th), &th);
6668 if (v4) {
6669 struct ip ip;
6670
6671 m_copydata(m0, offset, sizeof(ip), &ip);
6672 ip.ip_len = 0;
6673 m_copyback(m0,
6674 offset + offsetof(struct ip, ip_len),
6675 sizeof(ip.ip_len), &ip.ip_len);
6676 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6677 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6678 } else {
6679 struct ip6_hdr ip6;
6680
6681 m_copydata(m0, offset, sizeof(ip6), &ip6);
6682 ip6.ip6_plen = 0;
6683 m_copyback(m0,
6684 offset + offsetof(struct ip6_hdr, ip6_plen),
6685 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6686 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6687 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6688 }
6689 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6690 sizeof(th.th_sum), &th.th_sum);
6691
6692 tcp_hlen = th.th_off << 2;
6693 } else {
6694 /*
6695 * TCP/IP headers are in the first mbuf; we can do
6696 * this the easy way.
6697 */
6698 struct tcphdr *th;
6699
6700 if (v4) {
6701 struct ip *ip =
6702 (void *)(mtod(m0, char *) + offset);
6703 th = (void *)(mtod(m0, char *) + hlen);
6704
6705 ip->ip_len = 0;
6706 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6707 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6708 } else {
6709 struct ip6_hdr *ip6 =
6710 (void *)(mtod(m0, char *) + offset);
6711 th = (void *)(mtod(m0, char *) + hlen);
6712
6713 ip6->ip6_plen = 0;
6714 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6715 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6716 }
6717 tcp_hlen = th->th_off << 2;
6718 }
6719 hlen += tcp_hlen;
6720 *cmdlenp |= NQTX_CMD_TSE;
6721
6722 if (v4) {
6723 WM_Q_EVCNT_INCR(txq, txtso);
6724 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
6725 } else {
6726 WM_Q_EVCNT_INCR(txq, txtso6);
6727 *fieldsp |= NQTXD_FIELDS_TUXSM;
6728 }
6729 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
6730 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6731 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
6732 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
6733 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
6734 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
6735 } else {
6736 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
6737 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6738 }
6739
6740 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
6741 *fieldsp |= NQTXD_FIELDS_IXSM;
6742 cmdc |= NQTXC_CMD_IP4;
6743 }
6744
6745 if (m0->m_pkthdr.csum_flags &
6746 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6747 WM_Q_EVCNT_INCR(txq, txtusum);
6748 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6749 cmdc |= NQTXC_CMD_TCP;
6750 } else {
6751 cmdc |= NQTXC_CMD_UDP;
6752 }
6753 cmdc |= NQTXC_CMD_IP4;
6754 *fieldsp |= NQTXD_FIELDS_TUXSM;
6755 }
6756 if (m0->m_pkthdr.csum_flags &
6757 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6758 WM_Q_EVCNT_INCR(txq, txtusum6);
6759 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6760 cmdc |= NQTXC_CMD_TCP;
6761 } else {
6762 cmdc |= NQTXC_CMD_UDP;
6763 }
6764 cmdc |= NQTXC_CMD_IP6;
6765 *fieldsp |= NQTXD_FIELDS_TUXSM;
6766 }
6767
6768 /* Fill in the context descriptor. */
6769 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
6770 htole32(vl_len);
6771 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
6772 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
6773 htole32(cmdc);
6774 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
6775 htole32(mssidx);
6776 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6777 DPRINTF(WM_DEBUG_TX,
6778 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
6779 txq->txq_next, 0, vl_len));
6780 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
6781 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6782 txs->txs_ndesc++;
6783 return 0;
6784 }
6785
6786 /*
6787 * wm_nq_start: [ifnet interface function]
6788 *
6789 * Start packet transmission on the interface for NEWQUEUE devices
6790 */
6791 static void
6792 wm_nq_start(struct ifnet *ifp)
6793 {
6794 struct wm_softc *sc = ifp->if_softc;
6795 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6796
6797 KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
6798
6799 mutex_enter(txq->txq_lock);
6800 if (!txq->txq_stopping)
6801 wm_nq_start_locked(ifp);
6802 mutex_exit(txq->txq_lock);
6803 }
6804
6805 static void
6806 wm_nq_start_locked(struct ifnet *ifp)
6807 {
6808 struct wm_softc *sc = ifp->if_softc;
6809 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6810
6811 wm_nq_send_common_locked(ifp, txq, false);
6812 }
6813
6814 static inline int
6815 wm_nq_select_txqueue(struct ifnet *ifp, struct mbuf *m)
6816 {
6817 struct wm_softc *sc = ifp->if_softc;
6818 u_int cpuid = cpu_index(curcpu());
6819
6820 /*
6821 * Currently, simple distribute strategy.
6822 * TODO:
6823 * destribute by flowid(RSS has value).
6824 */
6825 return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
6826 }
6827
6828 static int
6829 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
6830 {
6831 int qid;
6832 struct wm_softc *sc = ifp->if_softc;
6833 struct wm_txqueue *txq;
6834
6835 qid = wm_nq_select_txqueue(ifp, m);
6836 txq = &sc->sc_queue[qid].wmq_txq;
6837
6838 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
6839 m_freem(m);
6840 WM_Q_EVCNT_INCR(txq, txdrop);
6841 return ENOBUFS;
6842 }
6843
6844 if (mutex_tryenter(txq->txq_lock)) {
6845 /* XXXX should be per TX queue */
6846 ifp->if_obytes += m->m_pkthdr.len;
6847 if (m->m_flags & M_MCAST)
6848 ifp->if_omcasts++;
6849
6850 if (!txq->txq_stopping)
6851 wm_nq_transmit_locked(ifp, txq);
6852 mutex_exit(txq->txq_lock);
6853 }
6854
6855 return 0;
6856 }
6857
6858 static void
6859 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
6860 {
6861
6862 wm_nq_send_common_locked(ifp, txq, true);
6863 }
6864
6865 static void
6866 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
6867 bool is_transmit)
6868 {
6869 struct wm_softc *sc = ifp->if_softc;
6870 struct mbuf *m0;
6871 struct m_tag *mtag;
6872 struct wm_txsoft *txs;
6873 bus_dmamap_t dmamap;
6874 int error, nexttx, lasttx = -1, seg, segs_needed;
6875 bool do_csum, sent;
6876
6877 KASSERT(mutex_owned(txq->txq_lock));
6878
6879 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6880 return;
6881 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
6882 return;
6883
6884 sent = false;
6885
6886 /*
6887 * Loop through the send queue, setting up transmit descriptors
6888 * until we drain the queue, or use up all available transmit
6889 * descriptors.
6890 */
6891 for (;;) {
6892 m0 = NULL;
6893
6894 /* Get a work queue entry. */
6895 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6896 wm_txeof(sc, txq);
6897 if (txq->txq_sfree == 0) {
6898 DPRINTF(WM_DEBUG_TX,
6899 ("%s: TX: no free job descriptors\n",
6900 device_xname(sc->sc_dev)));
6901 WM_Q_EVCNT_INCR(txq, txsstall);
6902 break;
6903 }
6904 }
6905
6906 /* Grab a packet off the queue. */
6907 if (is_transmit)
6908 m0 = pcq_get(txq->txq_interq);
6909 else
6910 IFQ_DEQUEUE(&ifp->if_snd, m0);
6911 if (m0 == NULL)
6912 break;
6913
6914 DPRINTF(WM_DEBUG_TX,
6915 ("%s: TX: have packet to transmit: %p\n",
6916 device_xname(sc->sc_dev), m0));
6917
6918 txs = &txq->txq_soft[txq->txq_snext];
6919 dmamap = txs->txs_dmamap;
6920
6921 /*
6922 * Load the DMA map. If this fails, the packet either
6923 * didn't fit in the allotted number of segments, or we
6924 * were short on resources. For the too-many-segments
6925 * case, we simply report an error and drop the packet,
6926 * since we can't sanely copy a jumbo packet to a single
6927 * buffer.
6928 */
6929 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6930 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
6931 if (error) {
6932 if (error == EFBIG) {
6933 WM_Q_EVCNT_INCR(txq, txdrop);
6934 log(LOG_ERR, "%s: Tx packet consumes too many "
6935 "DMA segments, dropping...\n",
6936 device_xname(sc->sc_dev));
6937 wm_dump_mbuf_chain(sc, m0);
6938 m_freem(m0);
6939 continue;
6940 }
6941 /* Short on resources, just stop for now. */
6942 DPRINTF(WM_DEBUG_TX,
6943 ("%s: TX: dmamap load failed: %d\n",
6944 device_xname(sc->sc_dev), error));
6945 break;
6946 }
6947
6948 segs_needed = dmamap->dm_nsegs;
6949
6950 /*
6951 * Ensure we have enough descriptors free to describe
6952 * the packet. Note, we always reserve one descriptor
6953 * at the end of the ring due to the semantics of the
6954 * TDT register, plus one more in the event we need
6955 * to load offload context.
6956 */
6957 if (segs_needed > txq->txq_free - 2) {
6958 /*
6959 * Not enough free descriptors to transmit this
6960 * packet. We haven't committed anything yet,
6961 * so just unload the DMA map, put the packet
6962 * pack on the queue, and punt. Notify the upper
6963 * layer that there are no more slots left.
6964 */
6965 DPRINTF(WM_DEBUG_TX,
6966 ("%s: TX: need %d (%d) descriptors, have %d\n",
6967 device_xname(sc->sc_dev), dmamap->dm_nsegs,
6968 segs_needed, txq->txq_free - 1));
6969 txq->txq_flags |= WM_TXQ_NO_SPACE;
6970 bus_dmamap_unload(sc->sc_dmat, dmamap);
6971 WM_Q_EVCNT_INCR(txq, txdstall);
6972 break;
6973 }
6974
6975 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6976
6977 DPRINTF(WM_DEBUG_TX,
6978 ("%s: TX: packet has %d (%d) DMA segments\n",
6979 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6980
6981 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
6982
6983 /*
6984 * Store a pointer to the packet so that we can free it
6985 * later.
6986 *
6987 * Initially, we consider the number of descriptors the
6988 * packet uses the number of DMA segments. This may be
6989 * incremented by 1 if we do checksum offload (a descriptor
6990 * is used to set the checksum context).
6991 */
6992 txs->txs_mbuf = m0;
6993 txs->txs_firstdesc = txq->txq_next;
6994 txs->txs_ndesc = segs_needed;
6995
6996 /* Set up offload parameters for this packet. */
6997 uint32_t cmdlen, fields, dcmdlen;
6998 if (m0->m_pkthdr.csum_flags &
6999 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
7000 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7001 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
7002 if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
7003 &do_csum) != 0) {
7004 /* Error message already displayed. */
7005 bus_dmamap_unload(sc->sc_dmat, dmamap);
7006 continue;
7007 }
7008 } else {
7009 do_csum = false;
7010 cmdlen = 0;
7011 fields = 0;
7012 }
7013
7014 /* Sync the DMA map. */
7015 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
7016 BUS_DMASYNC_PREWRITE);
7017
7018 /* Initialize the first transmit descriptor. */
7019 nexttx = txq->txq_next;
7020 if (!do_csum) {
7021 /* setup a legacy descriptor */
7022 wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
7023 dmamap->dm_segs[0].ds_addr);
7024 txq->txq_descs[nexttx].wtx_cmdlen =
7025 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
7026 txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
7027 txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
7028 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
7029 NULL) {
7030 txq->txq_descs[nexttx].wtx_cmdlen |=
7031 htole32(WTX_CMD_VLE);
7032 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
7033 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
7034 } else {
7035 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
7036 }
7037 dcmdlen = 0;
7038 } else {
7039 /* setup an advanced data descriptor */
7040 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
7041 htole64(dmamap->dm_segs[0].ds_addr);
7042 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
7043 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
7044 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
7045 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
7046 htole32(fields);
7047 DPRINTF(WM_DEBUG_TX,
7048 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
7049 device_xname(sc->sc_dev), nexttx,
7050 (uint64_t)dmamap->dm_segs[0].ds_addr));
7051 DPRINTF(WM_DEBUG_TX,
7052 ("\t 0x%08x%08x\n", fields,
7053 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
7054 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
7055 }
7056
7057 lasttx = nexttx;
7058 nexttx = WM_NEXTTX(txq, nexttx);
7059 /*
7060 * fill in the next descriptors. legacy or adcanced format
7061 * is the same here
7062 */
7063 for (seg = 1; seg < dmamap->dm_nsegs;
7064 seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
7065 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
7066 htole64(dmamap->dm_segs[seg].ds_addr);
7067 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
7068 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
7069 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
7070 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
7071 lasttx = nexttx;
7072
7073 DPRINTF(WM_DEBUG_TX,
7074 ("%s: TX: desc %d: %#" PRIx64 ", "
7075 "len %#04zx\n",
7076 device_xname(sc->sc_dev), nexttx,
7077 (uint64_t)dmamap->dm_segs[seg].ds_addr,
7078 dmamap->dm_segs[seg].ds_len));
7079 }
7080
7081 KASSERT(lasttx != -1);
7082
7083 /*
7084 * Set up the command byte on the last descriptor of
7085 * the packet. If we're in the interrupt delay window,
7086 * delay the interrupt.
7087 */
7088 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
7089 (NQTX_CMD_EOP | NQTX_CMD_RS));
7090 txq->txq_descs[lasttx].wtx_cmdlen |=
7091 htole32(WTX_CMD_EOP | WTX_CMD_RS);
7092
7093 txs->txs_lastdesc = lasttx;
7094
7095 DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
7096 device_xname(sc->sc_dev),
7097 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
7098
7099 /* Sync the descriptors we're using. */
7100 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
7101 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7102
7103 /* Give the packet to the chip. */
7104 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
7105 sent = true;
7106
7107 DPRINTF(WM_DEBUG_TX,
7108 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
7109
7110 DPRINTF(WM_DEBUG_TX,
7111 ("%s: TX: finished transmitting packet, job %d\n",
7112 device_xname(sc->sc_dev), txq->txq_snext));
7113
7114 /* Advance the tx pointer. */
7115 txq->txq_free -= txs->txs_ndesc;
7116 txq->txq_next = nexttx;
7117
7118 txq->txq_sfree--;
7119 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
7120
7121 /* Pass the packet to any BPF listeners. */
7122 bpf_mtap(ifp, m0);
7123 }
7124
7125 if (m0 != NULL) {
7126 txq->txq_flags |= WM_TXQ_NO_SPACE;
7127 WM_Q_EVCNT_INCR(txq, txdrop);
7128 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
7129 __func__));
7130 m_freem(m0);
7131 }
7132
7133 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
7134 /* No more slots; notify upper layer. */
7135 txq->txq_flags |= WM_TXQ_NO_SPACE;
7136 }
7137
7138 if (sent) {
7139 /* Set a watchdog timer in case the chip flakes out. */
7140 ifp->if_timer = 5;
7141 }
7142 }
7143
7144 /* Interrupt */
7145
7146 /*
7147 * wm_txeof:
7148 *
7149 * Helper; handle transmit interrupts.
7150 */
7151 static int
7152 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
7153 {
7154 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7155 struct wm_txsoft *txs;
7156 bool processed = false;
7157 int count = 0;
7158 int i;
7159 uint8_t status;
7160
7161 KASSERT(mutex_owned(txq->txq_lock));
7162
7163 if (txq->txq_stopping)
7164 return 0;
7165
7166 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7167 txq->txq_flags &= ~WM_TXQ_NO_SPACE;
7168 else
7169 ifp->if_flags &= ~IFF_OACTIVE;
7170
7171 /*
7172 * Go through the Tx list and free mbufs for those
7173 * frames which have been transmitted.
7174 */
7175 for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
7176 i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
7177 txs = &txq->txq_soft[i];
7178
7179 DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
7180 device_xname(sc->sc_dev), i));
7181
7182 wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
7183 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
7184
7185 status =
7186 txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
7187 if ((status & WTX_ST_DD) == 0) {
7188 wm_cdtxsync(txq, txs->txs_lastdesc, 1,
7189 BUS_DMASYNC_PREREAD);
7190 break;
7191 }
7192
7193 processed = true;
7194 count++;
7195 DPRINTF(WM_DEBUG_TX,
7196 ("%s: TX: job %d done: descs %d..%d\n",
7197 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
7198 txs->txs_lastdesc));
7199
7200 /*
7201 * XXX We should probably be using the statistics
7202 * XXX registers, but I don't know if they exist
7203 * XXX on chips before the i82544.
7204 */
7205
7206 #ifdef WM_EVENT_COUNTERS
7207 if (status & WTX_ST_TU)
7208 WM_Q_EVCNT_INCR(txq, tu);
7209 #endif /* WM_EVENT_COUNTERS */
7210
7211 if (status & (WTX_ST_EC | WTX_ST_LC)) {
7212 ifp->if_oerrors++;
7213 if (status & WTX_ST_LC)
7214 log(LOG_WARNING, "%s: late collision\n",
7215 device_xname(sc->sc_dev));
7216 else if (status & WTX_ST_EC) {
7217 ifp->if_collisions += 16;
7218 log(LOG_WARNING, "%s: excessive collisions\n",
7219 device_xname(sc->sc_dev));
7220 }
7221 } else
7222 ifp->if_opackets++;
7223
7224 txq->txq_free += txs->txs_ndesc;
7225 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
7226 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
7227 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
7228 m_freem(txs->txs_mbuf);
7229 txs->txs_mbuf = NULL;
7230 }
7231
7232 /* Update the dirty transmit buffer pointer. */
7233 txq->txq_sdirty = i;
7234 DPRINTF(WM_DEBUG_TX,
7235 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
7236
7237 if (count != 0)
7238 rnd_add_uint32(&sc->rnd_source, count);
7239
7240 /*
7241 * If there are no more pending transmissions, cancel the watchdog
7242 * timer.
7243 */
7244 if (txq->txq_sfree == WM_TXQUEUELEN(txq))
7245 ifp->if_timer = 0;
7246
7247 return processed;
7248 }
7249
7250 /*
7251 * wm_rxeof:
7252 *
7253 * Helper; handle receive interrupts.
7254 */
7255 static void
7256 wm_rxeof(struct wm_rxqueue *rxq)
7257 {
7258 struct wm_softc *sc = rxq->rxq_sc;
7259 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7260 struct wm_rxsoft *rxs;
7261 struct mbuf *m;
7262 int i, len;
7263 int count = 0;
7264 uint8_t status, errors;
7265 uint16_t vlantag;
7266
7267 KASSERT(mutex_owned(rxq->rxq_lock));
7268
7269 for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
7270 rxs = &rxq->rxq_soft[i];
7271
7272 DPRINTF(WM_DEBUG_RX,
7273 ("%s: RX: checking descriptor %d\n",
7274 device_xname(sc->sc_dev), i));
7275
7276 wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
7277
7278 status = rxq->rxq_descs[i].wrx_status;
7279 errors = rxq->rxq_descs[i].wrx_errors;
7280 len = le16toh(rxq->rxq_descs[i].wrx_len);
7281 vlantag = rxq->rxq_descs[i].wrx_special;
7282
7283 if ((status & WRX_ST_DD) == 0) {
7284 /* We have processed all of the receive descriptors. */
7285 wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
7286 break;
7287 }
7288
7289 count++;
7290 if (__predict_false(rxq->rxq_discard)) {
7291 DPRINTF(WM_DEBUG_RX,
7292 ("%s: RX: discarding contents of descriptor %d\n",
7293 device_xname(sc->sc_dev), i));
7294 wm_init_rxdesc(rxq, i);
7295 if (status & WRX_ST_EOP) {
7296 /* Reset our state. */
7297 DPRINTF(WM_DEBUG_RX,
7298 ("%s: RX: resetting rxdiscard -> 0\n",
7299 device_xname(sc->sc_dev)));
7300 rxq->rxq_discard = 0;
7301 }
7302 continue;
7303 }
7304
7305 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
7306 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
7307
7308 m = rxs->rxs_mbuf;
7309
7310 /*
7311 * Add a new receive buffer to the ring, unless of
7312 * course the length is zero. Treat the latter as a
7313 * failed mapping.
7314 */
7315 if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
7316 /*
7317 * Failed, throw away what we've done so
7318 * far, and discard the rest of the packet.
7319 */
7320 ifp->if_ierrors++;
7321 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
7322 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
7323 wm_init_rxdesc(rxq, i);
7324 if ((status & WRX_ST_EOP) == 0)
7325 rxq->rxq_discard = 1;
7326 if (rxq->rxq_head != NULL)
7327 m_freem(rxq->rxq_head);
7328 WM_RXCHAIN_RESET(rxq);
7329 DPRINTF(WM_DEBUG_RX,
7330 ("%s: RX: Rx buffer allocation failed, "
7331 "dropping packet%s\n", device_xname(sc->sc_dev),
7332 rxq->rxq_discard ? " (discard)" : ""));
7333 continue;
7334 }
7335
7336 m->m_len = len;
7337 rxq->rxq_len += len;
7338 DPRINTF(WM_DEBUG_RX,
7339 ("%s: RX: buffer at %p len %d\n",
7340 device_xname(sc->sc_dev), m->m_data, len));
7341
7342 /* If this is not the end of the packet, keep looking. */
7343 if ((status & WRX_ST_EOP) == 0) {
7344 WM_RXCHAIN_LINK(rxq, m);
7345 DPRINTF(WM_DEBUG_RX,
7346 ("%s: RX: not yet EOP, rxlen -> %d\n",
7347 device_xname(sc->sc_dev), rxq->rxq_len));
7348 continue;
7349 }
7350
7351 /*
7352 * Okay, we have the entire packet now. The chip is
7353 * configured to include the FCS except I350 and I21[01]
7354 * (not all chips can be configured to strip it),
7355 * so we need to trim it.
7356 * May need to adjust length of previous mbuf in the
7357 * chain if the current mbuf is too short.
7358 * For an eratta, the RCTL_SECRC bit in RCTL register
7359 * is always set in I350, so we don't trim it.
7360 */
7361 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
7362 && (sc->sc_type != WM_T_I210)
7363 && (sc->sc_type != WM_T_I211)) {
7364 if (m->m_len < ETHER_CRC_LEN) {
7365 rxq->rxq_tail->m_len
7366 -= (ETHER_CRC_LEN - m->m_len);
7367 m->m_len = 0;
7368 } else
7369 m->m_len -= ETHER_CRC_LEN;
7370 len = rxq->rxq_len - ETHER_CRC_LEN;
7371 } else
7372 len = rxq->rxq_len;
7373
7374 WM_RXCHAIN_LINK(rxq, m);
7375
7376 *rxq->rxq_tailp = NULL;
7377 m = rxq->rxq_head;
7378
7379 WM_RXCHAIN_RESET(rxq);
7380
7381 DPRINTF(WM_DEBUG_RX,
7382 ("%s: RX: have entire packet, len -> %d\n",
7383 device_xname(sc->sc_dev), len));
7384
7385 /* If an error occurred, update stats and drop the packet. */
7386 if (errors &
7387 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
7388 if (errors & WRX_ER_SE)
7389 log(LOG_WARNING, "%s: symbol error\n",
7390 device_xname(sc->sc_dev));
7391 else if (errors & WRX_ER_SEQ)
7392 log(LOG_WARNING, "%s: receive sequence error\n",
7393 device_xname(sc->sc_dev));
7394 else if (errors & WRX_ER_CE)
7395 log(LOG_WARNING, "%s: CRC error\n",
7396 device_xname(sc->sc_dev));
7397 m_freem(m);
7398 continue;
7399 }
7400
7401 /* No errors. Receive the packet. */
7402 m_set_rcvif(m, ifp);
7403 m->m_pkthdr.len = len;
7404
7405 /*
7406 * If VLANs are enabled, VLAN packets have been unwrapped
7407 * for us. Associate the tag with the packet.
7408 */
7409 /* XXXX should check for i350 and i354 */
7410 if ((status & WRX_ST_VP) != 0) {
7411 VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
7412 }
7413
7414 /* Set up checksum info for this packet. */
7415 if ((status & WRX_ST_IXSM) == 0) {
7416 if (status & WRX_ST_IPCS) {
7417 WM_Q_EVCNT_INCR(rxq, rxipsum);
7418 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
7419 if (errors & WRX_ER_IPE)
7420 m->m_pkthdr.csum_flags |=
7421 M_CSUM_IPv4_BAD;
7422 }
7423 if (status & WRX_ST_TCPCS) {
7424 /*
7425 * Note: we don't know if this was TCP or UDP,
7426 * so we just set both bits, and expect the
7427 * upper layers to deal.
7428 */
7429 WM_Q_EVCNT_INCR(rxq, rxtusum);
7430 m->m_pkthdr.csum_flags |=
7431 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7432 M_CSUM_TCPv6 | M_CSUM_UDPv6;
7433 if (errors & WRX_ER_TCPE)
7434 m->m_pkthdr.csum_flags |=
7435 M_CSUM_TCP_UDP_BAD;
7436 }
7437 }
7438
7439 ifp->if_ipackets++;
7440
7441 mutex_exit(rxq->rxq_lock);
7442
7443 /* Pass this up to any BPF listeners. */
7444 bpf_mtap(ifp, m);
7445
7446 /* Pass it on. */
7447 if_percpuq_enqueue(sc->sc_ipq, m);
7448
7449 mutex_enter(rxq->rxq_lock);
7450
7451 if (rxq->rxq_stopping)
7452 break;
7453 }
7454
7455 /* Update the receive pointer. */
7456 rxq->rxq_ptr = i;
7457 if (count != 0)
7458 rnd_add_uint32(&sc->rnd_source, count);
7459
7460 DPRINTF(WM_DEBUG_RX,
7461 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
7462 }
7463
7464 /*
7465 * wm_linkintr_gmii:
7466 *
7467 * Helper; handle link interrupts for GMII.
7468 */
7469 static void
7470 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
7471 {
7472
7473 KASSERT(WM_CORE_LOCKED(sc));
7474
7475 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7476 __func__));
7477
7478 if (icr & ICR_LSC) {
7479 uint32_t status = CSR_READ(sc, WMREG_STATUS);
7480
7481 if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
7482 wm_gig_downshift_workaround_ich8lan(sc);
7483
7484 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
7485 device_xname(sc->sc_dev)));
7486 mii_pollstat(&sc->sc_mii);
7487 if (sc->sc_type == WM_T_82543) {
7488 int miistatus, active;
7489
7490 /*
7491 * With 82543, we need to force speed and
7492 * duplex on the MAC equal to what the PHY
7493 * speed and duplex configuration is.
7494 */
7495 miistatus = sc->sc_mii.mii_media_status;
7496
7497 if (miistatus & IFM_ACTIVE) {
7498 active = sc->sc_mii.mii_media_active;
7499 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
7500 switch (IFM_SUBTYPE(active)) {
7501 case IFM_10_T:
7502 sc->sc_ctrl |= CTRL_SPEED_10;
7503 break;
7504 case IFM_100_TX:
7505 sc->sc_ctrl |= CTRL_SPEED_100;
7506 break;
7507 case IFM_1000_T:
7508 sc->sc_ctrl |= CTRL_SPEED_1000;
7509 break;
7510 default:
7511 /*
7512 * fiber?
7513 * Shoud not enter here.
7514 */
7515 printf("unknown media (%x)\n", active);
7516 break;
7517 }
7518 if (active & IFM_FDX)
7519 sc->sc_ctrl |= CTRL_FD;
7520 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7521 }
7522 } else if ((sc->sc_type == WM_T_ICH8)
7523 && (sc->sc_phytype == WMPHY_IGP_3)) {
7524 wm_kmrn_lock_loss_workaround_ich8lan(sc);
7525 } else if (sc->sc_type == WM_T_PCH) {
7526 wm_k1_gig_workaround_hv(sc,
7527 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
7528 }
7529
7530 if ((sc->sc_phytype == WMPHY_82578)
7531 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
7532 == IFM_1000_T)) {
7533
7534 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
7535 delay(200*1000); /* XXX too big */
7536
7537 /* Link stall fix for link up */
7538 wm_gmii_hv_writereg(sc->sc_dev, 1,
7539 HV_MUX_DATA_CTRL,
7540 HV_MUX_DATA_CTRL_GEN_TO_MAC
7541 | HV_MUX_DATA_CTRL_FORCE_SPEED);
7542 wm_gmii_hv_writereg(sc->sc_dev, 1,
7543 HV_MUX_DATA_CTRL,
7544 HV_MUX_DATA_CTRL_GEN_TO_MAC);
7545 }
7546 }
7547 } else if (icr & ICR_RXSEQ) {
7548 DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
7549 device_xname(sc->sc_dev)));
7550 }
7551 }
7552
7553 /*
7554 * wm_linkintr_tbi:
7555 *
7556 * Helper; handle link interrupts for TBI mode.
7557 */
7558 static void
7559 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
7560 {
7561 uint32_t status;
7562
7563 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7564 __func__));
7565
7566 status = CSR_READ(sc, WMREG_STATUS);
7567 if (icr & ICR_LSC) {
7568 if (status & STATUS_LU) {
7569 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
7570 device_xname(sc->sc_dev),
7571 (status & STATUS_FD) ? "FDX" : "HDX"));
7572 /*
7573 * NOTE: CTRL will update TFCE and RFCE automatically,
7574 * so we should update sc->sc_ctrl
7575 */
7576
7577 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7578 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7579 sc->sc_fcrtl &= ~FCRTL_XONE;
7580 if (status & STATUS_FD)
7581 sc->sc_tctl |=
7582 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7583 else
7584 sc->sc_tctl |=
7585 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7586 if (sc->sc_ctrl & CTRL_TFCE)
7587 sc->sc_fcrtl |= FCRTL_XONE;
7588 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7589 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7590 WMREG_OLD_FCRTL : WMREG_FCRTL,
7591 sc->sc_fcrtl);
7592 sc->sc_tbi_linkup = 1;
7593 } else {
7594 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
7595 device_xname(sc->sc_dev)));
7596 sc->sc_tbi_linkup = 0;
7597 }
7598 /* Update LED */
7599 wm_tbi_serdes_set_linkled(sc);
7600 } else if (icr & ICR_RXSEQ) {
7601 DPRINTF(WM_DEBUG_LINK,
7602 ("%s: LINK: Receive sequence error\n",
7603 device_xname(sc->sc_dev)));
7604 }
7605 }
7606
7607 /*
7608 * wm_linkintr_serdes:
7609 *
7610 * Helper; handle link interrupts for TBI mode.
7611 */
7612 static void
7613 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
7614 {
7615 struct mii_data *mii = &sc->sc_mii;
7616 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7617 uint32_t pcs_adv, pcs_lpab, reg;
7618
7619 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7620 __func__));
7621
7622 if (icr & ICR_LSC) {
7623 /* Check PCS */
7624 reg = CSR_READ(sc, WMREG_PCS_LSTS);
7625 if ((reg & PCS_LSTS_LINKOK) != 0) {
7626 mii->mii_media_status |= IFM_ACTIVE;
7627 sc->sc_tbi_linkup = 1;
7628 } else {
7629 mii->mii_media_status |= IFM_NONE;
7630 sc->sc_tbi_linkup = 0;
7631 wm_tbi_serdes_set_linkled(sc);
7632 return;
7633 }
7634 mii->mii_media_active |= IFM_1000_SX;
7635 if ((reg & PCS_LSTS_FDX) != 0)
7636 mii->mii_media_active |= IFM_FDX;
7637 else
7638 mii->mii_media_active |= IFM_HDX;
7639 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7640 /* Check flow */
7641 reg = CSR_READ(sc, WMREG_PCS_LSTS);
7642 if ((reg & PCS_LSTS_AN_COMP) == 0) {
7643 DPRINTF(WM_DEBUG_LINK,
7644 ("XXX LINKOK but not ACOMP\n"));
7645 return;
7646 }
7647 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
7648 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
7649 DPRINTF(WM_DEBUG_LINK,
7650 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
7651 if ((pcs_adv & TXCW_SYM_PAUSE)
7652 && (pcs_lpab & TXCW_SYM_PAUSE)) {
7653 mii->mii_media_active |= IFM_FLOW
7654 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
7655 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
7656 && (pcs_adv & TXCW_ASYM_PAUSE)
7657 && (pcs_lpab & TXCW_SYM_PAUSE)
7658 && (pcs_lpab & TXCW_ASYM_PAUSE))
7659 mii->mii_media_active |= IFM_FLOW
7660 | IFM_ETH_TXPAUSE;
7661 else if ((pcs_adv & TXCW_SYM_PAUSE)
7662 && (pcs_adv & TXCW_ASYM_PAUSE)
7663 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
7664 && (pcs_lpab & TXCW_ASYM_PAUSE))
7665 mii->mii_media_active |= IFM_FLOW
7666 | IFM_ETH_RXPAUSE;
7667 }
7668 /* Update LED */
7669 wm_tbi_serdes_set_linkled(sc);
7670 } else {
7671 DPRINTF(WM_DEBUG_LINK,
7672 ("%s: LINK: Receive sequence error\n",
7673 device_xname(sc->sc_dev)));
7674 }
7675 }
7676
7677 /*
7678 * wm_linkintr:
7679 *
7680 * Helper; handle link interrupts.
7681 */
7682 static void
7683 wm_linkintr(struct wm_softc *sc, uint32_t icr)
7684 {
7685
7686 KASSERT(WM_CORE_LOCKED(sc));
7687
7688 if (sc->sc_flags & WM_F_HAS_MII)
7689 wm_linkintr_gmii(sc, icr);
7690 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7691 && (sc->sc_type >= WM_T_82575))
7692 wm_linkintr_serdes(sc, icr);
7693 else
7694 wm_linkintr_tbi(sc, icr);
7695 }
7696
7697 /*
7698 * wm_intr_legacy:
7699 *
7700 * Interrupt service routine for INTx and MSI.
7701 */
7702 static int
7703 wm_intr_legacy(void *arg)
7704 {
7705 struct wm_softc *sc = arg;
7706 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7707 struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
7708 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7709 uint32_t icr, rndval = 0;
7710 int handled = 0;
7711
7712 DPRINTF(WM_DEBUG_TX,
7713 ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
7714 while (1 /* CONSTCOND */) {
7715 icr = CSR_READ(sc, WMREG_ICR);
7716 if ((icr & sc->sc_icr) == 0)
7717 break;
7718 if (rndval == 0)
7719 rndval = icr;
7720
7721 mutex_enter(rxq->rxq_lock);
7722
7723 if (rxq->rxq_stopping) {
7724 mutex_exit(rxq->rxq_lock);
7725 break;
7726 }
7727
7728 handled = 1;
7729
7730 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7731 if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
7732 DPRINTF(WM_DEBUG_RX,
7733 ("%s: RX: got Rx intr 0x%08x\n",
7734 device_xname(sc->sc_dev),
7735 icr & (ICR_RXDMT0 | ICR_RXT0)));
7736 WM_Q_EVCNT_INCR(rxq, rxintr);
7737 }
7738 #endif
7739 wm_rxeof(rxq);
7740
7741 mutex_exit(rxq->rxq_lock);
7742 mutex_enter(txq->txq_lock);
7743
7744 if (txq->txq_stopping) {
7745 mutex_exit(txq->txq_lock);
7746 break;
7747 }
7748
7749 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7750 if (icr & ICR_TXDW) {
7751 DPRINTF(WM_DEBUG_TX,
7752 ("%s: TX: got TXDW interrupt\n",
7753 device_xname(sc->sc_dev)));
7754 WM_Q_EVCNT_INCR(txq, txdw);
7755 }
7756 #endif
7757 wm_txeof(sc, txq);
7758
7759 mutex_exit(txq->txq_lock);
7760 WM_CORE_LOCK(sc);
7761
7762 if (sc->sc_core_stopping) {
7763 WM_CORE_UNLOCK(sc);
7764 break;
7765 }
7766
7767 if (icr & (ICR_LSC | ICR_RXSEQ)) {
7768 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7769 wm_linkintr(sc, icr);
7770 }
7771
7772 WM_CORE_UNLOCK(sc);
7773
7774 if (icr & ICR_RXO) {
7775 #if defined(WM_DEBUG)
7776 log(LOG_WARNING, "%s: Receive overrun\n",
7777 device_xname(sc->sc_dev));
7778 #endif /* defined(WM_DEBUG) */
7779 }
7780 }
7781
7782 rnd_add_uint32(&sc->rnd_source, rndval);
7783
7784 if (handled) {
7785 /* Try to get more packets going. */
7786 ifp->if_start(ifp);
7787 }
7788
7789 return handled;
7790 }
7791
7792 static int
7793 wm_txrxintr_msix(void *arg)
7794 {
7795 struct wm_queue *wmq = arg;
7796 struct wm_txqueue *txq = &wmq->wmq_txq;
7797 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
7798 struct wm_softc *sc = txq->txq_sc;
7799 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7800
7801 KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
7802
7803 DPRINTF(WM_DEBUG_TX,
7804 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
7805
7806 if (sc->sc_type == WM_T_82574)
7807 CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
7808 else if (sc->sc_type == WM_T_82575)
7809 CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
7810 else
7811 CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
7812
7813 mutex_enter(txq->txq_lock);
7814
7815 if (txq->txq_stopping) {
7816 mutex_exit(txq->txq_lock);
7817 return 0;
7818 }
7819
7820 WM_Q_EVCNT_INCR(txq, txdw);
7821 wm_txeof(sc, txq);
7822
7823 /* Try to get more packets going. */
7824 if (pcq_peek(txq->txq_interq) != NULL)
7825 wm_nq_transmit_locked(ifp, txq);
7826 /*
7827 * There are still some upper layer processing which call
7828 * ifp->if_start(). e.g. ALTQ
7829 */
7830 if (wmq->wmq_id == 0) {
7831 if (!IFQ_IS_EMPTY(&ifp->if_snd))
7832 wm_nq_start_locked(ifp);
7833 }
7834
7835 mutex_exit(txq->txq_lock);
7836
7837 DPRINTF(WM_DEBUG_RX,
7838 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
7839 mutex_enter(rxq->rxq_lock);
7840
7841 if (rxq->rxq_stopping) {
7842 mutex_exit(rxq->rxq_lock);
7843 return 0;
7844 }
7845
7846 WM_Q_EVCNT_INCR(rxq, rxintr);
7847 wm_rxeof(rxq);
7848 mutex_exit(rxq->rxq_lock);
7849
7850 if (sc->sc_type == WM_T_82574)
7851 CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
7852 else if (sc->sc_type == WM_T_82575)
7853 CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
7854 else
7855 CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
7856
7857 return 1;
7858 }
7859
7860 /*
7861 * wm_linkintr_msix:
7862 *
7863 * Interrupt service routine for link status change for MSI-X.
7864 */
7865 static int
7866 wm_linkintr_msix(void *arg)
7867 {
7868 struct wm_softc *sc = arg;
7869 uint32_t reg;
7870
7871 DPRINTF(WM_DEBUG_LINK,
7872 ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
7873
7874 reg = CSR_READ(sc, WMREG_ICR);
7875 WM_CORE_LOCK(sc);
7876 if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
7877 goto out;
7878
7879 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7880 wm_linkintr(sc, ICR_LSC);
7881
7882 out:
7883 WM_CORE_UNLOCK(sc);
7884
7885 if (sc->sc_type == WM_T_82574)
7886 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
7887 else if (sc->sc_type == WM_T_82575)
7888 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
7889 else
7890 CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
7891
7892 return 1;
7893 }
7894
7895 /*
7896 * Media related.
7897 * GMII, SGMII, TBI (and SERDES)
7898 */
7899
7900 /* Common */
7901
7902 /*
7903 * wm_tbi_serdes_set_linkled:
7904 *
7905 * Update the link LED on TBI and SERDES devices.
7906 */
7907 static void
7908 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
7909 {
7910
7911 if (sc->sc_tbi_linkup)
7912 sc->sc_ctrl |= CTRL_SWDPIN(0);
7913 else
7914 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7915
7916 /* 82540 or newer devices are active low */
7917 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7918
7919 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7920 }
7921
7922 /* GMII related */
7923
7924 /*
7925 * wm_gmii_reset:
7926 *
7927 * Reset the PHY.
7928 */
7929 static void
7930 wm_gmii_reset(struct wm_softc *sc)
7931 {
7932 uint32_t reg;
7933 int rv;
7934
7935 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
7936 device_xname(sc->sc_dev), __func__));
7937
7938 rv = sc->phy.acquire(sc);
7939 if (rv != 0) {
7940 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7941 __func__);
7942 return;
7943 }
7944
7945 switch (sc->sc_type) {
7946 case WM_T_82542_2_0:
7947 case WM_T_82542_2_1:
7948 /* null */
7949 break;
7950 case WM_T_82543:
7951 /*
7952 * With 82543, we need to force speed and duplex on the MAC
7953 * equal to what the PHY speed and duplex configuration is.
7954 * In addition, we need to perform a hardware reset on the PHY
7955 * to take it out of reset.
7956 */
7957 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
7958 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7959
7960 /* The PHY reset pin is active-low. */
7961 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7962 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
7963 CTRL_EXT_SWDPIN(4));
7964 reg |= CTRL_EXT_SWDPIO(4);
7965
7966 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7967 CSR_WRITE_FLUSH(sc);
7968 delay(10*1000);
7969
7970 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
7971 CSR_WRITE_FLUSH(sc);
7972 delay(150);
7973 #if 0
7974 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
7975 #endif
7976 delay(20*1000); /* XXX extra delay to get PHY ID? */
7977 break;
7978 case WM_T_82544: /* reset 10000us */
7979 case WM_T_82540:
7980 case WM_T_82545:
7981 case WM_T_82545_3:
7982 case WM_T_82546:
7983 case WM_T_82546_3:
7984 case WM_T_82541:
7985 case WM_T_82541_2:
7986 case WM_T_82547:
7987 case WM_T_82547_2:
7988 case WM_T_82571: /* reset 100us */
7989 case WM_T_82572:
7990 case WM_T_82573:
7991 case WM_T_82574:
7992 case WM_T_82575:
7993 case WM_T_82576:
7994 case WM_T_82580:
7995 case WM_T_I350:
7996 case WM_T_I354:
7997 case WM_T_I210:
7998 case WM_T_I211:
7999 case WM_T_82583:
8000 case WM_T_80003:
8001 /* generic reset */
8002 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8003 CSR_WRITE_FLUSH(sc);
8004 delay(20000);
8005 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8006 CSR_WRITE_FLUSH(sc);
8007 delay(20000);
8008
8009 if ((sc->sc_type == WM_T_82541)
8010 || (sc->sc_type == WM_T_82541_2)
8011 || (sc->sc_type == WM_T_82547)
8012 || (sc->sc_type == WM_T_82547_2)) {
8013 /* workaround for igp are done in igp_reset() */
8014 /* XXX add code to set LED after phy reset */
8015 }
8016 break;
8017 case WM_T_ICH8:
8018 case WM_T_ICH9:
8019 case WM_T_ICH10:
8020 case WM_T_PCH:
8021 case WM_T_PCH2:
8022 case WM_T_PCH_LPT:
8023 case WM_T_PCH_SPT:
8024 /* generic reset */
8025 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8026 CSR_WRITE_FLUSH(sc);
8027 delay(100);
8028 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8029 CSR_WRITE_FLUSH(sc);
8030 delay(150);
8031 break;
8032 default:
8033 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
8034 __func__);
8035 break;
8036 }
8037
8038 sc->phy.release(sc);
8039
8040 /* get_cfg_done */
8041 wm_get_cfg_done(sc);
8042
8043 /* extra setup */
8044 switch (sc->sc_type) {
8045 case WM_T_82542_2_0:
8046 case WM_T_82542_2_1:
8047 case WM_T_82543:
8048 case WM_T_82544:
8049 case WM_T_82540:
8050 case WM_T_82545:
8051 case WM_T_82545_3:
8052 case WM_T_82546:
8053 case WM_T_82546_3:
8054 case WM_T_82541_2:
8055 case WM_T_82547_2:
8056 case WM_T_82571:
8057 case WM_T_82572:
8058 case WM_T_82573:
8059 case WM_T_82575:
8060 case WM_T_82576:
8061 case WM_T_82580:
8062 case WM_T_I350:
8063 case WM_T_I354:
8064 case WM_T_I210:
8065 case WM_T_I211:
8066 case WM_T_80003:
8067 /* null */
8068 break;
8069 case WM_T_82574:
8070 case WM_T_82583:
8071 wm_lplu_d0_disable(sc);
8072 break;
8073 case WM_T_82541:
8074 case WM_T_82547:
8075 /* XXX Configure actively LED after PHY reset */
8076 break;
8077 case WM_T_ICH8:
8078 case WM_T_ICH9:
8079 case WM_T_ICH10:
8080 case WM_T_PCH:
8081 case WM_T_PCH2:
8082 case WM_T_PCH_LPT:
8083 case WM_T_PCH_SPT:
8084 /* Allow time for h/w to get to a quiescent state afer reset */
8085 delay(10*1000);
8086
8087 if (sc->sc_type == WM_T_PCH)
8088 wm_hv_phy_workaround_ich8lan(sc);
8089
8090 if (sc->sc_type == WM_T_PCH2)
8091 wm_lv_phy_workaround_ich8lan(sc);
8092
8093 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
8094 /*
8095 * dummy read to clear the phy wakeup bit after lcd
8096 * reset
8097 */
8098 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
8099 }
8100
8101 /*
8102 * XXX Configure the LCD with th extended configuration region
8103 * in NVM
8104 */
8105
8106 /* Disable D0 LPLU. */
8107 if (sc->sc_type >= WM_T_PCH) /* PCH* */
8108 wm_lplu_d0_disable_pch(sc);
8109 else
8110 wm_lplu_d0_disable(sc); /* ICH* */
8111 break;
8112 default:
8113 panic("%s: unknown type\n", __func__);
8114 break;
8115 }
8116 }
8117
8118 /*
8119 * wm_get_phy_id_82575:
8120 *
8121 * Return PHY ID. Return -1 if it failed.
8122 */
8123 static int
8124 wm_get_phy_id_82575(struct wm_softc *sc)
8125 {
8126 uint32_t reg;
8127 int phyid = -1;
8128
8129 /* XXX */
8130 if ((sc->sc_flags & WM_F_SGMII) == 0)
8131 return -1;
8132
8133 if (wm_sgmii_uses_mdio(sc)) {
8134 switch (sc->sc_type) {
8135 case WM_T_82575:
8136 case WM_T_82576:
8137 reg = CSR_READ(sc, WMREG_MDIC);
8138 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
8139 break;
8140 case WM_T_82580:
8141 case WM_T_I350:
8142 case WM_T_I354:
8143 case WM_T_I210:
8144 case WM_T_I211:
8145 reg = CSR_READ(sc, WMREG_MDICNFG);
8146 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
8147 break;
8148 default:
8149 return -1;
8150 }
8151 }
8152
8153 return phyid;
8154 }
8155
8156
8157 /*
8158 * wm_gmii_mediainit:
8159 *
8160 * Initialize media for use on 1000BASE-T devices.
8161 */
8162 static void
8163 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
8164 {
8165 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8166 struct mii_data *mii = &sc->sc_mii;
8167 uint32_t reg;
8168
8169 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
8170 device_xname(sc->sc_dev), __func__));
8171
8172 /* We have GMII. */
8173 sc->sc_flags |= WM_F_HAS_MII;
8174
8175 if (sc->sc_type == WM_T_80003)
8176 sc->sc_tipg = TIPG_1000T_80003_DFLT;
8177 else
8178 sc->sc_tipg = TIPG_1000T_DFLT;
8179
8180 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
8181 if ((sc->sc_type == WM_T_82580)
8182 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
8183 || (sc->sc_type == WM_T_I211)) {
8184 reg = CSR_READ(sc, WMREG_PHPM);
8185 reg &= ~PHPM_GO_LINK_D;
8186 CSR_WRITE(sc, WMREG_PHPM, reg);
8187 }
8188
8189 /*
8190 * Let the chip set speed/duplex on its own based on
8191 * signals from the PHY.
8192 * XXXbouyer - I'm not sure this is right for the 80003,
8193 * the em driver only sets CTRL_SLU here - but it seems to work.
8194 */
8195 sc->sc_ctrl |= CTRL_SLU;
8196 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8197
8198 /* Initialize our media structures and probe the GMII. */
8199 mii->mii_ifp = ifp;
8200
8201 /*
8202 * Determine the PHY access method.
8203 *
8204 * For SGMII, use SGMII specific method.
8205 *
8206 * For some devices, we can determine the PHY access method
8207 * from sc_type.
8208 *
8209 * For ICH and PCH variants, it's difficult to determine the PHY
8210 * access method by sc_type, so use the PCI product ID for some
8211 * devices.
8212 * For other ICH8 variants, try to use igp's method. If the PHY
8213 * can't detect, then use bm's method.
8214 */
8215 switch (prodid) {
8216 case PCI_PRODUCT_INTEL_PCH_M_LM:
8217 case PCI_PRODUCT_INTEL_PCH_M_LC:
8218 /* 82577 */
8219 sc->sc_phytype = WMPHY_82577;
8220 break;
8221 case PCI_PRODUCT_INTEL_PCH_D_DM:
8222 case PCI_PRODUCT_INTEL_PCH_D_DC:
8223 /* 82578 */
8224 sc->sc_phytype = WMPHY_82578;
8225 break;
8226 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
8227 case PCI_PRODUCT_INTEL_PCH2_LV_V:
8228 /* 82579 */
8229 sc->sc_phytype = WMPHY_82579;
8230 break;
8231 case PCI_PRODUCT_INTEL_82801H_82567V_3:
8232 case PCI_PRODUCT_INTEL_82801I_BM:
8233 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
8234 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
8235 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
8236 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
8237 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
8238 /* ICH8, 9, 10 with 82567 */
8239 sc->sc_phytype = WMPHY_BM;
8240 mii->mii_readreg = wm_gmii_bm_readreg;
8241 mii->mii_writereg = wm_gmii_bm_writereg;
8242 break;
8243 default:
8244 if (((sc->sc_flags & WM_F_SGMII) != 0)
8245 && !wm_sgmii_uses_mdio(sc)){
8246 /* SGMII */
8247 mii->mii_readreg = wm_sgmii_readreg;
8248 mii->mii_writereg = wm_sgmii_writereg;
8249 } else if (sc->sc_type >= WM_T_ICH8) {
8250 /* non-82567 ICH8, 9 and 10 */
8251 mii->mii_readreg = wm_gmii_i82544_readreg;
8252 mii->mii_writereg = wm_gmii_i82544_writereg;
8253 } else if (sc->sc_type >= WM_T_80003) {
8254 /* 80003 */
8255 mii->mii_readreg = wm_gmii_i80003_readreg;
8256 mii->mii_writereg = wm_gmii_i80003_writereg;
8257 } else if (sc->sc_type >= WM_T_I210) {
8258 /* I210 and I211 */
8259 mii->mii_readreg = wm_gmii_gs40g_readreg;
8260 mii->mii_writereg = wm_gmii_gs40g_writereg;
8261 } else if (sc->sc_type >= WM_T_82580) {
8262 /* 82580, I350 and I354 */
8263 sc->sc_phytype = WMPHY_82580;
8264 mii->mii_readreg = wm_gmii_82580_readreg;
8265 mii->mii_writereg = wm_gmii_82580_writereg;
8266 } else if (sc->sc_type >= WM_T_82544) {
8267 /* 82544, 0, [56], [17], 8257[1234] and 82583 */
8268 mii->mii_readreg = wm_gmii_i82544_readreg;
8269 mii->mii_writereg = wm_gmii_i82544_writereg;
8270 } else {
8271 mii->mii_readreg = wm_gmii_i82543_readreg;
8272 mii->mii_writereg = wm_gmii_i82543_writereg;
8273 }
8274 break;
8275 }
8276 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
8277 /* All PCH* use _hv_ */
8278 mii->mii_readreg = wm_gmii_hv_readreg;
8279 mii->mii_writereg = wm_gmii_hv_writereg;
8280 }
8281 mii->mii_statchg = wm_gmii_statchg;
8282
8283 wm_gmii_reset(sc);
8284
8285 sc->sc_ethercom.ec_mii = &sc->sc_mii;
8286 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
8287 wm_gmii_mediastatus);
8288
8289 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
8290 || (sc->sc_type == WM_T_82580)
8291 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
8292 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
8293 if ((sc->sc_flags & WM_F_SGMII) == 0) {
8294 /* Attach only one port */
8295 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
8296 MII_OFFSET_ANY, MIIF_DOPAUSE);
8297 } else {
8298 int i, id;
8299 uint32_t ctrl_ext;
8300
8301 id = wm_get_phy_id_82575(sc);
8302 if (id != -1) {
8303 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
8304 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
8305 }
8306 if ((id == -1)
8307 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
8308 /* Power on sgmii phy if it is disabled */
8309 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8310 CSR_WRITE(sc, WMREG_CTRL_EXT,
8311 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
8312 CSR_WRITE_FLUSH(sc);
8313 delay(300*1000); /* XXX too long */
8314
8315 /* from 1 to 8 */
8316 for (i = 1; i < 8; i++)
8317 mii_attach(sc->sc_dev, &sc->sc_mii,
8318 0xffffffff, i, MII_OFFSET_ANY,
8319 MIIF_DOPAUSE);
8320
8321 /* restore previous sfp cage power state */
8322 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8323 }
8324 }
8325 } else {
8326 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8327 MII_OFFSET_ANY, MIIF_DOPAUSE);
8328 }
8329
8330 /*
8331 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
8332 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
8333 */
8334 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
8335 (LIST_FIRST(&mii->mii_phys) == NULL)) {
8336 wm_set_mdio_slow_mode_hv(sc);
8337 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8338 MII_OFFSET_ANY, MIIF_DOPAUSE);
8339 }
8340
8341 /*
8342 * (For ICH8 variants)
8343 * If PHY detection failed, use BM's r/w function and retry.
8344 */
8345 if (LIST_FIRST(&mii->mii_phys) == NULL) {
8346 /* if failed, retry with *_bm_* */
8347 mii->mii_readreg = wm_gmii_bm_readreg;
8348 mii->mii_writereg = wm_gmii_bm_writereg;
8349
8350 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8351 MII_OFFSET_ANY, MIIF_DOPAUSE);
8352 }
8353
8354 if (LIST_FIRST(&mii->mii_phys) == NULL) {
8355 /* Any PHY wasn't find */
8356 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
8357 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
8358 sc->sc_phytype = WMPHY_NONE;
8359 } else {
8360 /*
8361 * PHY Found!
8362 * Check PHY type.
8363 */
8364 uint32_t model;
8365 struct mii_softc *child;
8366
8367 child = LIST_FIRST(&mii->mii_phys);
8368 model = child->mii_mpd_model;
8369 if (model == MII_MODEL_yyINTEL_I82566)
8370 sc->sc_phytype = WMPHY_IGP_3;
8371
8372 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
8373 }
8374 }
8375
8376 /*
8377 * wm_gmii_mediachange: [ifmedia interface function]
8378 *
8379 * Set hardware to newly-selected media on a 1000BASE-T device.
8380 */
8381 static int
8382 wm_gmii_mediachange(struct ifnet *ifp)
8383 {
8384 struct wm_softc *sc = ifp->if_softc;
8385 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8386 int rc;
8387
8388 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
8389 device_xname(sc->sc_dev), __func__));
8390 if ((ifp->if_flags & IFF_UP) == 0)
8391 return 0;
8392
8393 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
8394 sc->sc_ctrl |= CTRL_SLU;
8395 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8396 || (sc->sc_type > WM_T_82543)) {
8397 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
8398 } else {
8399 sc->sc_ctrl &= ~CTRL_ASDE;
8400 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
8401 if (ife->ifm_media & IFM_FDX)
8402 sc->sc_ctrl |= CTRL_FD;
8403 switch (IFM_SUBTYPE(ife->ifm_media)) {
8404 case IFM_10_T:
8405 sc->sc_ctrl |= CTRL_SPEED_10;
8406 break;
8407 case IFM_100_TX:
8408 sc->sc_ctrl |= CTRL_SPEED_100;
8409 break;
8410 case IFM_1000_T:
8411 sc->sc_ctrl |= CTRL_SPEED_1000;
8412 break;
8413 default:
8414 panic("wm_gmii_mediachange: bad media 0x%x",
8415 ife->ifm_media);
8416 }
8417 }
8418 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8419 if (sc->sc_type <= WM_T_82543)
8420 wm_gmii_reset(sc);
8421
8422 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
8423 return 0;
8424 return rc;
8425 }
8426
8427 /*
8428 * wm_gmii_mediastatus: [ifmedia interface function]
8429 *
8430 * Get the current interface media status on a 1000BASE-T device.
8431 */
8432 static void
8433 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8434 {
8435 struct wm_softc *sc = ifp->if_softc;
8436
8437 ether_mediastatus(ifp, ifmr);
8438 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
8439 | sc->sc_flowflags;
8440 }
8441
8442 #define MDI_IO CTRL_SWDPIN(2)
8443 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
8444 #define MDI_CLK CTRL_SWDPIN(3)
8445
8446 static void
8447 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
8448 {
8449 uint32_t i, v;
8450
8451 v = CSR_READ(sc, WMREG_CTRL);
8452 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8453 v |= MDI_DIR | CTRL_SWDPIO(3);
8454
8455 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
8456 if (data & i)
8457 v |= MDI_IO;
8458 else
8459 v &= ~MDI_IO;
8460 CSR_WRITE(sc, WMREG_CTRL, v);
8461 CSR_WRITE_FLUSH(sc);
8462 delay(10);
8463 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8464 CSR_WRITE_FLUSH(sc);
8465 delay(10);
8466 CSR_WRITE(sc, WMREG_CTRL, v);
8467 CSR_WRITE_FLUSH(sc);
8468 delay(10);
8469 }
8470 }
8471
8472 static uint32_t
8473 wm_i82543_mii_recvbits(struct wm_softc *sc)
8474 {
8475 uint32_t v, i, data = 0;
8476
8477 v = CSR_READ(sc, WMREG_CTRL);
8478 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8479 v |= CTRL_SWDPIO(3);
8480
8481 CSR_WRITE(sc, WMREG_CTRL, v);
8482 CSR_WRITE_FLUSH(sc);
8483 delay(10);
8484 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8485 CSR_WRITE_FLUSH(sc);
8486 delay(10);
8487 CSR_WRITE(sc, WMREG_CTRL, v);
8488 CSR_WRITE_FLUSH(sc);
8489 delay(10);
8490
8491 for (i = 0; i < 16; i++) {
8492 data <<= 1;
8493 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8494 CSR_WRITE_FLUSH(sc);
8495 delay(10);
8496 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
8497 data |= 1;
8498 CSR_WRITE(sc, WMREG_CTRL, v);
8499 CSR_WRITE_FLUSH(sc);
8500 delay(10);
8501 }
8502
8503 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8504 CSR_WRITE_FLUSH(sc);
8505 delay(10);
8506 CSR_WRITE(sc, WMREG_CTRL, v);
8507 CSR_WRITE_FLUSH(sc);
8508 delay(10);
8509
8510 return data;
8511 }
8512
8513 #undef MDI_IO
8514 #undef MDI_DIR
8515 #undef MDI_CLK
8516
8517 /*
8518 * wm_gmii_i82543_readreg: [mii interface function]
8519 *
8520 * Read a PHY register on the GMII (i82543 version).
8521 */
8522 static int
8523 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
8524 {
8525 struct wm_softc *sc = device_private(self);
8526 int rv;
8527
8528 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8529 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
8530 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
8531 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
8532
8533 DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
8534 device_xname(sc->sc_dev), phy, reg, rv));
8535
8536 return rv;
8537 }
8538
8539 /*
8540 * wm_gmii_i82543_writereg: [mii interface function]
8541 *
8542 * Write a PHY register on the GMII (i82543 version).
8543 */
8544 static void
8545 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
8546 {
8547 struct wm_softc *sc = device_private(self);
8548
8549 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8550 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
8551 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
8552 (MII_COMMAND_START << 30), 32);
8553 }
8554
8555 /*
8556 * wm_gmii_mdic_readreg: [mii interface function]
8557 *
8558 * Read a PHY register on the GMII.
8559 */
8560 static int
8561 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
8562 {
8563 struct wm_softc *sc = device_private(self);
8564 uint32_t mdic = 0;
8565 int i, rv;
8566
8567 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
8568 MDIC_REGADD(reg));
8569
8570 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8571 mdic = CSR_READ(sc, WMREG_MDIC);
8572 if (mdic & MDIC_READY)
8573 break;
8574 delay(50);
8575 }
8576
8577 if ((mdic & MDIC_READY) == 0) {
8578 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
8579 device_xname(sc->sc_dev), phy, reg);
8580 rv = 0;
8581 } else if (mdic & MDIC_E) {
8582 #if 0 /* This is normal if no PHY is present. */
8583 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
8584 device_xname(sc->sc_dev), phy, reg);
8585 #endif
8586 rv = 0;
8587 } else {
8588 rv = MDIC_DATA(mdic);
8589 if (rv == 0xffff)
8590 rv = 0;
8591 }
8592
8593 return rv;
8594 }
8595
8596 /*
8597 * wm_gmii_mdic_writereg: [mii interface function]
8598 *
8599 * Write a PHY register on the GMII.
8600 */
8601 static void
8602 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
8603 {
8604 struct wm_softc *sc = device_private(self);
8605 uint32_t mdic = 0;
8606 int i;
8607
8608 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
8609 MDIC_REGADD(reg) | MDIC_DATA(val));
8610
8611 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8612 mdic = CSR_READ(sc, WMREG_MDIC);
8613 if (mdic & MDIC_READY)
8614 break;
8615 delay(50);
8616 }
8617
8618 if ((mdic & MDIC_READY) == 0)
8619 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
8620 device_xname(sc->sc_dev), phy, reg);
8621 else if (mdic & MDIC_E)
8622 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
8623 device_xname(sc->sc_dev), phy, reg);
8624 }
8625
8626 /*
8627 * wm_gmii_i82544_readreg: [mii interface function]
8628 *
8629 * Read a PHY register on the GMII.
8630 */
8631 static int
8632 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
8633 {
8634 struct wm_softc *sc = device_private(self);
8635 int rv;
8636
8637 if (sc->phy.acquire(sc)) {
8638 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8639 __func__);
8640 return 0;
8641 }
8642 rv = wm_gmii_mdic_readreg(self, phy, reg);
8643 sc->phy.release(sc);
8644
8645 return rv;
8646 }
8647
8648 /*
8649 * wm_gmii_i82544_writereg: [mii interface function]
8650 *
8651 * Write a PHY register on the GMII.
8652 */
8653 static void
8654 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
8655 {
8656 struct wm_softc *sc = device_private(self);
8657
8658 if (sc->phy.acquire(sc)) {
8659 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8660 __func__);
8661 }
8662 wm_gmii_mdic_writereg(self, phy, reg, val);
8663 sc->phy.release(sc);
8664 }
8665
8666 /*
8667 * wm_gmii_i80003_readreg: [mii interface function]
8668 *
8669 * Read a PHY register on the kumeran
8670 * This could be handled by the PHY layer if we didn't have to lock the
8671 * ressource ...
8672 */
8673 static int
8674 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
8675 {
8676 struct wm_softc *sc = device_private(self);
8677 int rv;
8678
8679 if (phy != 1) /* only one PHY on kumeran bus */
8680 return 0;
8681
8682 if (sc->phy.acquire(sc)) {
8683 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8684 __func__);
8685 return 0;
8686 }
8687
8688 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
8689 wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8690 reg >> GG82563_PAGE_SHIFT);
8691 } else {
8692 wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8693 reg >> GG82563_PAGE_SHIFT);
8694 }
8695 /* Wait more 200us for a bug of the ready bit in the MDIC register */
8696 delay(200);
8697 rv = wm_gmii_mdic_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
8698 delay(200);
8699 sc->phy.release(sc);
8700
8701 return rv;
8702 }
8703
8704 /*
8705 * wm_gmii_i80003_writereg: [mii interface function]
8706 *
8707 * Write a PHY register on the kumeran.
8708 * This could be handled by the PHY layer if we didn't have to lock the
8709 * ressource ...
8710 */
8711 static void
8712 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
8713 {
8714 struct wm_softc *sc = device_private(self);
8715
8716 if (phy != 1) /* only one PHY on kumeran bus */
8717 return;
8718
8719 if (sc->phy.acquire(sc)) {
8720 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8721 __func__);
8722 return;
8723 }
8724
8725 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
8726 wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8727 reg >> GG82563_PAGE_SHIFT);
8728 } else {
8729 wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8730 reg >> GG82563_PAGE_SHIFT);
8731 }
8732 /* Wait more 200us for a bug of the ready bit in the MDIC register */
8733 delay(200);
8734 wm_gmii_mdic_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
8735 delay(200);
8736
8737 sc->phy.release(sc);
8738 }
8739
8740 /*
8741 * wm_gmii_bm_readreg: [mii interface function]
8742 *
8743 * Read a PHY register on the kumeran
8744 * This could be handled by the PHY layer if we didn't have to lock the
8745 * ressource ...
8746 */
8747 static int
8748 wm_gmii_bm_readreg(device_t self, int phy, int reg)
8749 {
8750 struct wm_softc *sc = device_private(self);
8751 int rv;
8752
8753 if (sc->phy.acquire(sc)) {
8754 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8755 __func__);
8756 return 0;
8757 }
8758
8759 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8760 if (phy == 1)
8761 wm_gmii_mdic_writereg(self, phy,
8762 MII_IGPHY_PAGE_SELECT, reg);
8763 else
8764 wm_gmii_mdic_writereg(self, phy,
8765 GG82563_PHY_PAGE_SELECT,
8766 reg >> GG82563_PAGE_SHIFT);
8767 }
8768
8769 rv = wm_gmii_mdic_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
8770 sc->phy.release(sc);
8771 return rv;
8772 }
8773
8774 /*
8775 * wm_gmii_bm_writereg: [mii interface function]
8776 *
8777 * Write a PHY register on the kumeran.
8778 * This could be handled by the PHY layer if we didn't have to lock the
8779 * ressource ...
8780 */
8781 static void
8782 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
8783 {
8784 struct wm_softc *sc = device_private(self);
8785
8786 if (sc->phy.acquire(sc)) {
8787 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8788 __func__);
8789 return;
8790 }
8791
8792 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8793 if (phy == 1)
8794 wm_gmii_mdic_writereg(self, phy,
8795 MII_IGPHY_PAGE_SELECT, reg);
8796 else
8797 wm_gmii_mdic_writereg(self, phy,
8798 GG82563_PHY_PAGE_SELECT,
8799 reg >> GG82563_PAGE_SHIFT);
8800 }
8801
8802 wm_gmii_mdic_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
8803 sc->phy.release(sc);
8804 }
8805
8806 static void
8807 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
8808 {
8809 struct wm_softc *sc = device_private(self);
8810 uint16_t regnum = BM_PHY_REG_NUM(offset);
8811 uint16_t wuce;
8812
8813 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
8814 device_xname(sc->sc_dev), __func__));
8815 /* XXX Gig must be disabled for MDIO accesses to page 800 */
8816 if (sc->sc_type == WM_T_PCH) {
8817 /* XXX e1000 driver do nothing... why? */
8818 }
8819
8820 /* Set page 769 */
8821 wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8822 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
8823
8824 wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
8825
8826 wuce &= ~BM_WUC_HOST_WU_BIT;
8827 wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG,
8828 wuce | BM_WUC_ENABLE_BIT);
8829
8830 /* Select page 800 */
8831 wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8832 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
8833
8834 /* Write page 800 */
8835 wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
8836
8837 if (rd)
8838 *val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
8839 else
8840 wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
8841
8842 /* Set page 769 */
8843 wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8844 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
8845
8846 wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
8847 }
8848
8849 /*
8850 * wm_gmii_hv_readreg: [mii interface function]
8851 *
8852 * Read a PHY register on the kumeran
8853 * This could be handled by the PHY layer if we didn't have to lock the
8854 * ressource ...
8855 */
8856 static int
8857 wm_gmii_hv_readreg(device_t self, int phy, int reg)
8858 {
8859 struct wm_softc *sc = device_private(self);
8860 int rv;
8861
8862 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
8863 device_xname(sc->sc_dev), __func__));
8864 if (sc->phy.acquire(sc)) {
8865 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8866 __func__);
8867 return 0;
8868 }
8869
8870 rv = wm_gmii_hv_readreg_locked(self, phy, reg);
8871 sc->phy.release(sc);
8872 return rv;
8873 }
8874
8875 static int
8876 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
8877 {
8878 struct wm_softc *sc = device_private(self);
8879 uint16_t page = BM_PHY_REG_PAGE(reg);
8880 uint16_t regnum = BM_PHY_REG_NUM(reg);
8881 uint16_t val;
8882 int rv;
8883
8884 /* XXX Workaround failure in MDIO access while cable is disconnected */
8885 if (sc->sc_phytype == WMPHY_82577) {
8886 /* XXX must write */
8887 }
8888
8889 /* Page 800 works differently than the rest so it has its own func */
8890 if (page == BM_WUC_PAGE) {
8891 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
8892 return val;
8893 }
8894
8895 /*
8896 * Lower than page 768 works differently than the rest so it has its
8897 * own func
8898 */
8899 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
8900 printf("gmii_hv_readreg!!!\n");
8901 return 0;
8902 }
8903
8904 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
8905 wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8906 page << BME1000_PAGE_SHIFT);
8907 }
8908
8909 rv = wm_gmii_mdic_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
8910 return rv;
8911 }
8912
8913 /*
8914 * wm_gmii_hv_writereg: [mii interface function]
8915 *
8916 * Write a PHY register on the kumeran.
8917 * This could be handled by the PHY layer if we didn't have to lock the
8918 * ressource ...
8919 */
8920 static void
8921 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
8922 {
8923 struct wm_softc *sc = device_private(self);
8924
8925 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
8926 device_xname(sc->sc_dev), __func__));
8927
8928 if (sc->phy.acquire(sc)) {
8929 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8930 __func__);
8931 return;
8932 }
8933
8934 wm_gmii_hv_writereg_locked(self, phy, reg, val);
8935 sc->phy.release(sc);
8936 }
8937
8938 static void
8939 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
8940 {
8941 uint16_t page = BM_PHY_REG_PAGE(reg);
8942 uint16_t regnum = BM_PHY_REG_NUM(reg);
8943
8944 /* XXX Workaround failure in MDIO access while cable is disconnected */
8945
8946 /* Page 800 works differently than the rest so it has its own func */
8947 if (page == BM_WUC_PAGE) {
8948 uint16_t tmp;
8949
8950 tmp = val;
8951 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
8952 return;
8953 }
8954
8955 /*
8956 * Lower than page 768 works differently than the rest so it has its
8957 * own func
8958 */
8959 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
8960 printf("gmii_hv_writereg!!!\n");
8961 return;
8962 }
8963
8964 /*
8965 * XXX Workaround MDIO accesses being disabled after entering IEEE
8966 * Power Down (whenever bit 11 of the PHY control register is set)
8967 */
8968
8969 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
8970 wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8971 page << BME1000_PAGE_SHIFT);
8972 }
8973
8974 wm_gmii_mdic_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
8975 }
8976
8977 /*
8978 * wm_gmii_82580_readreg: [mii interface function]
8979 *
8980 * Read a PHY register on the 82580 and I350.
8981 * This could be handled by the PHY layer if we didn't have to lock the
8982 * ressource ...
8983 */
8984 static int
8985 wm_gmii_82580_readreg(device_t self, int phy, int reg)
8986 {
8987 struct wm_softc *sc = device_private(self);
8988 int rv;
8989
8990 if (sc->phy.acquire(sc) != 0) {
8991 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8992 __func__);
8993 return 0;
8994 }
8995
8996 rv = wm_gmii_mdic_readreg(self, phy, reg);
8997
8998 sc->phy.release(sc);
8999 return rv;
9000 }
9001
9002 /*
9003 * wm_gmii_82580_writereg: [mii interface function]
9004 *
9005 * Write a PHY register on the 82580 and I350.
9006 * This could be handled by the PHY layer if we didn't have to lock the
9007 * ressource ...
9008 */
9009 static void
9010 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
9011 {
9012 struct wm_softc *sc = device_private(self);
9013
9014 if (sc->phy.acquire(sc) != 0) {
9015 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9016 __func__);
9017 return;
9018 }
9019
9020 wm_gmii_mdic_writereg(self, phy, reg, val);
9021
9022 sc->phy.release(sc);
9023 }
9024
9025 /*
9026 * wm_gmii_gs40g_readreg: [mii interface function]
9027 *
9028 * Read a PHY register on the I2100 and I211.
9029 * This could be handled by the PHY layer if we didn't have to lock the
9030 * ressource ...
9031 */
9032 static int
9033 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
9034 {
9035 struct wm_softc *sc = device_private(self);
9036 int page, offset;
9037 int rv;
9038
9039 /* Acquire semaphore */
9040 if (sc->phy.acquire(sc)) {
9041 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9042 __func__);
9043 return 0;
9044 }
9045
9046 /* Page select */
9047 page = reg >> GS40G_PAGE_SHIFT;
9048 wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
9049
9050 /* Read reg */
9051 offset = reg & GS40G_OFFSET_MASK;
9052 rv = wm_gmii_mdic_readreg(self, phy, offset);
9053
9054 sc->phy.release(sc);
9055 return rv;
9056 }
9057
9058 /*
9059 * wm_gmii_gs40g_writereg: [mii interface function]
9060 *
9061 * Write a PHY register on the I210 and I211.
9062 * This could be handled by the PHY layer if we didn't have to lock the
9063 * ressource ...
9064 */
9065 static void
9066 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
9067 {
9068 struct wm_softc *sc = device_private(self);
9069 int page, offset;
9070
9071 /* Acquire semaphore */
9072 if (sc->phy.acquire(sc)) {
9073 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9074 __func__);
9075 return;
9076 }
9077
9078 /* Page select */
9079 page = reg >> GS40G_PAGE_SHIFT;
9080 wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
9081
9082 /* Write reg */
9083 offset = reg & GS40G_OFFSET_MASK;
9084 wm_gmii_mdic_writereg(self, phy, offset, val);
9085
9086 /* Release semaphore */
9087 sc->phy.release(sc);
9088 }
9089
9090 /*
9091 * wm_gmii_statchg: [mii interface function]
9092 *
9093 * Callback from MII layer when media changes.
9094 */
9095 static void
9096 wm_gmii_statchg(struct ifnet *ifp)
9097 {
9098 struct wm_softc *sc = ifp->if_softc;
9099 struct mii_data *mii = &sc->sc_mii;
9100
9101 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
9102 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9103 sc->sc_fcrtl &= ~FCRTL_XONE;
9104
9105 /*
9106 * Get flow control negotiation result.
9107 */
9108 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
9109 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
9110 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
9111 mii->mii_media_active &= ~IFM_ETH_FMASK;
9112 }
9113
9114 if (sc->sc_flowflags & IFM_FLOW) {
9115 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
9116 sc->sc_ctrl |= CTRL_TFCE;
9117 sc->sc_fcrtl |= FCRTL_XONE;
9118 }
9119 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
9120 sc->sc_ctrl |= CTRL_RFCE;
9121 }
9122
9123 if (sc->sc_mii.mii_media_active & IFM_FDX) {
9124 DPRINTF(WM_DEBUG_LINK,
9125 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
9126 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9127 } else {
9128 DPRINTF(WM_DEBUG_LINK,
9129 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
9130 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9131 }
9132
9133 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9134 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9135 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
9136 : WMREG_FCRTL, sc->sc_fcrtl);
9137 if (sc->sc_type == WM_T_80003) {
9138 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
9139 case IFM_1000_T:
9140 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
9141 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
9142 sc->sc_tipg = TIPG_1000T_80003_DFLT;
9143 break;
9144 default:
9145 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
9146 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
9147 sc->sc_tipg = TIPG_10_100_80003_DFLT;
9148 break;
9149 }
9150 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
9151 }
9152 }
9153
9154 /*
9155 * wm_kmrn_readreg:
9156 *
9157 * Read a kumeran register
9158 */
9159 static int
9160 wm_kmrn_readreg(struct wm_softc *sc, int reg)
9161 {
9162 int rv;
9163
9164 if (sc->sc_type == WM_T_80003)
9165 rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9166 else
9167 rv = sc->phy.acquire(sc);
9168 if (rv != 0) {
9169 aprint_error_dev(sc->sc_dev,
9170 "%s: failed to get semaphore\n", __func__);
9171 return 0;
9172 }
9173
9174 rv = wm_kmrn_readreg_locked(sc, reg);
9175
9176 if (sc->sc_type == WM_T_80003)
9177 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9178 else
9179 sc->phy.release(sc);
9180
9181 return rv;
9182 }
9183
9184 static int
9185 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
9186 {
9187 int rv;
9188
9189 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
9190 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
9191 KUMCTRLSTA_REN);
9192 CSR_WRITE_FLUSH(sc);
9193 delay(2);
9194
9195 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
9196
9197 return rv;
9198 }
9199
9200 /*
9201 * wm_kmrn_writereg:
9202 *
9203 * Write a kumeran register
9204 */
9205 static void
9206 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
9207 {
9208 int rv;
9209
9210 if (sc->sc_type == WM_T_80003)
9211 rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9212 else
9213 rv = sc->phy.acquire(sc);
9214 if (rv != 0) {
9215 aprint_error_dev(sc->sc_dev,
9216 "%s: failed to get semaphore\n", __func__);
9217 return;
9218 }
9219
9220 wm_kmrn_writereg_locked(sc, reg, val);
9221
9222 if (sc->sc_type == WM_T_80003)
9223 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9224 else
9225 sc->phy.release(sc);
9226 }
9227
9228 static void
9229 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
9230 {
9231
9232 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
9233 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
9234 (val & KUMCTRLSTA_MASK));
9235 }
9236
9237 /* SGMII related */
9238
9239 /*
9240 * wm_sgmii_uses_mdio
9241 *
9242 * Check whether the transaction is to the internal PHY or the external
9243 * MDIO interface. Return true if it's MDIO.
9244 */
9245 static bool
9246 wm_sgmii_uses_mdio(struct wm_softc *sc)
9247 {
9248 uint32_t reg;
9249 bool ismdio = false;
9250
9251 switch (sc->sc_type) {
9252 case WM_T_82575:
9253 case WM_T_82576:
9254 reg = CSR_READ(sc, WMREG_MDIC);
9255 ismdio = ((reg & MDIC_DEST) != 0);
9256 break;
9257 case WM_T_82580:
9258 case WM_T_I350:
9259 case WM_T_I354:
9260 case WM_T_I210:
9261 case WM_T_I211:
9262 reg = CSR_READ(sc, WMREG_MDICNFG);
9263 ismdio = ((reg & MDICNFG_DEST) != 0);
9264 break;
9265 default:
9266 break;
9267 }
9268
9269 return ismdio;
9270 }
9271
9272 /*
9273 * wm_sgmii_readreg: [mii interface function]
9274 *
9275 * Read a PHY register on the SGMII
9276 * This could be handled by the PHY layer if we didn't have to lock the
9277 * ressource ...
9278 */
9279 static int
9280 wm_sgmii_readreg(device_t self, int phy, int reg)
9281 {
9282 struct wm_softc *sc = device_private(self);
9283 uint32_t i2ccmd;
9284 int i, rv;
9285
9286 if (sc->phy.acquire(sc)) {
9287 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9288 __func__);
9289 return 0;
9290 }
9291
9292 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
9293 | (phy << I2CCMD_PHY_ADDR_SHIFT)
9294 | I2CCMD_OPCODE_READ;
9295 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9296
9297 /* Poll the ready bit */
9298 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9299 delay(50);
9300 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9301 if (i2ccmd & I2CCMD_READY)
9302 break;
9303 }
9304 if ((i2ccmd & I2CCMD_READY) == 0)
9305 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
9306 if ((i2ccmd & I2CCMD_ERROR) != 0)
9307 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
9308
9309 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
9310
9311 sc->phy.release(sc);
9312 return rv;
9313 }
9314
9315 /*
9316 * wm_sgmii_writereg: [mii interface function]
9317 *
9318 * Write a PHY register on the SGMII.
9319 * This could be handled by the PHY layer if we didn't have to lock the
9320 * ressource ...
9321 */
9322 static void
9323 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
9324 {
9325 struct wm_softc *sc = device_private(self);
9326 uint32_t i2ccmd;
9327 int i;
9328 int val_swapped;
9329
9330 if (sc->phy.acquire(sc) != 0) {
9331 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9332 __func__);
9333 return;
9334 }
9335 /* Swap the data bytes for the I2C interface */
9336 val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
9337 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
9338 | (phy << I2CCMD_PHY_ADDR_SHIFT)
9339 | I2CCMD_OPCODE_WRITE | val_swapped;
9340 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9341
9342 /* Poll the ready bit */
9343 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9344 delay(50);
9345 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9346 if (i2ccmd & I2CCMD_READY)
9347 break;
9348 }
9349 if ((i2ccmd & I2CCMD_READY) == 0)
9350 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
9351 if ((i2ccmd & I2CCMD_ERROR) != 0)
9352 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
9353
9354 sc->phy.release(sc);
9355 }
9356
9357 /* TBI related */
9358
9359 /*
9360 * wm_tbi_mediainit:
9361 *
9362 * Initialize media for use on 1000BASE-X devices.
9363 */
9364 static void
9365 wm_tbi_mediainit(struct wm_softc *sc)
9366 {
9367 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9368 const char *sep = "";
9369
9370 if (sc->sc_type < WM_T_82543)
9371 sc->sc_tipg = TIPG_WM_DFLT;
9372 else
9373 sc->sc_tipg = TIPG_LG_DFLT;
9374
9375 sc->sc_tbi_serdes_anegticks = 5;
9376
9377 /* Initialize our media structures */
9378 sc->sc_mii.mii_ifp = ifp;
9379 sc->sc_ethercom.ec_mii = &sc->sc_mii;
9380
9381 if ((sc->sc_type >= WM_T_82575)
9382 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
9383 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9384 wm_serdes_mediachange, wm_serdes_mediastatus);
9385 else
9386 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9387 wm_tbi_mediachange, wm_tbi_mediastatus);
9388
9389 /*
9390 * SWD Pins:
9391 *
9392 * 0 = Link LED (output)
9393 * 1 = Loss Of Signal (input)
9394 */
9395 sc->sc_ctrl |= CTRL_SWDPIO(0);
9396
9397 /* XXX Perhaps this is only for TBI */
9398 if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9399 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
9400
9401 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
9402 sc->sc_ctrl &= ~CTRL_LRST;
9403
9404 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9405
9406 #define ADD(ss, mm, dd) \
9407 do { \
9408 aprint_normal("%s%s", sep, ss); \
9409 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
9410 sep = ", "; \
9411 } while (/*CONSTCOND*/0)
9412
9413 aprint_normal_dev(sc->sc_dev, "");
9414
9415 /* Only 82545 is LX */
9416 if (sc->sc_type == WM_T_82545) {
9417 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
9418 ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
9419 } else {
9420 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
9421 ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
9422 }
9423 ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
9424 aprint_normal("\n");
9425
9426 #undef ADD
9427
9428 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
9429 }
9430
9431 /*
9432 * wm_tbi_mediachange: [ifmedia interface function]
9433 *
9434 * Set hardware to newly-selected media on a 1000BASE-X device.
9435 */
9436 static int
9437 wm_tbi_mediachange(struct ifnet *ifp)
9438 {
9439 struct wm_softc *sc = ifp->if_softc;
9440 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9441 uint32_t status;
9442 int i;
9443
9444 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9445 /* XXX need some work for >= 82571 and < 82575 */
9446 if (sc->sc_type < WM_T_82575)
9447 return 0;
9448 }
9449
9450 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9451 || (sc->sc_type >= WM_T_82575))
9452 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9453
9454 sc->sc_ctrl &= ~CTRL_LRST;
9455 sc->sc_txcw = TXCW_ANE;
9456 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9457 sc->sc_txcw |= TXCW_FD | TXCW_HD;
9458 else if (ife->ifm_media & IFM_FDX)
9459 sc->sc_txcw |= TXCW_FD;
9460 else
9461 sc->sc_txcw |= TXCW_HD;
9462
9463 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
9464 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
9465
9466 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
9467 device_xname(sc->sc_dev), sc->sc_txcw));
9468 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9469 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9470 CSR_WRITE_FLUSH(sc);
9471 delay(1000);
9472
9473 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
9474 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
9475
9476 /*
9477 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
9478 * optics detect a signal, 0 if they don't.
9479 */
9480 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
9481 /* Have signal; wait for the link to come up. */
9482 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
9483 delay(10000);
9484 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
9485 break;
9486 }
9487
9488 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
9489 device_xname(sc->sc_dev),i));
9490
9491 status = CSR_READ(sc, WMREG_STATUS);
9492 DPRINTF(WM_DEBUG_LINK,
9493 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
9494 device_xname(sc->sc_dev),status, STATUS_LU));
9495 if (status & STATUS_LU) {
9496 /* Link is up. */
9497 DPRINTF(WM_DEBUG_LINK,
9498 ("%s: LINK: set media -> link up %s\n",
9499 device_xname(sc->sc_dev),
9500 (status & STATUS_FD) ? "FDX" : "HDX"));
9501
9502 /*
9503 * NOTE: CTRL will update TFCE and RFCE automatically,
9504 * so we should update sc->sc_ctrl
9505 */
9506 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
9507 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9508 sc->sc_fcrtl &= ~FCRTL_XONE;
9509 if (status & STATUS_FD)
9510 sc->sc_tctl |=
9511 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9512 else
9513 sc->sc_tctl |=
9514 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9515 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
9516 sc->sc_fcrtl |= FCRTL_XONE;
9517 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9518 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
9519 WMREG_OLD_FCRTL : WMREG_FCRTL,
9520 sc->sc_fcrtl);
9521 sc->sc_tbi_linkup = 1;
9522 } else {
9523 if (i == WM_LINKUP_TIMEOUT)
9524 wm_check_for_link(sc);
9525 /* Link is down. */
9526 DPRINTF(WM_DEBUG_LINK,
9527 ("%s: LINK: set media -> link down\n",
9528 device_xname(sc->sc_dev)));
9529 sc->sc_tbi_linkup = 0;
9530 }
9531 } else {
9532 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
9533 device_xname(sc->sc_dev)));
9534 sc->sc_tbi_linkup = 0;
9535 }
9536
9537 wm_tbi_serdes_set_linkled(sc);
9538
9539 return 0;
9540 }
9541
9542 /*
9543 * wm_tbi_mediastatus: [ifmedia interface function]
9544 *
9545 * Get the current interface media status on a 1000BASE-X device.
9546 */
9547 static void
9548 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9549 {
9550 struct wm_softc *sc = ifp->if_softc;
9551 uint32_t ctrl, status;
9552
9553 ifmr->ifm_status = IFM_AVALID;
9554 ifmr->ifm_active = IFM_ETHER;
9555
9556 status = CSR_READ(sc, WMREG_STATUS);
9557 if ((status & STATUS_LU) == 0) {
9558 ifmr->ifm_active |= IFM_NONE;
9559 return;
9560 }
9561
9562 ifmr->ifm_status |= IFM_ACTIVE;
9563 /* Only 82545 is LX */
9564 if (sc->sc_type == WM_T_82545)
9565 ifmr->ifm_active |= IFM_1000_LX;
9566 else
9567 ifmr->ifm_active |= IFM_1000_SX;
9568 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
9569 ifmr->ifm_active |= IFM_FDX;
9570 else
9571 ifmr->ifm_active |= IFM_HDX;
9572 ctrl = CSR_READ(sc, WMREG_CTRL);
9573 if (ctrl & CTRL_RFCE)
9574 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
9575 if (ctrl & CTRL_TFCE)
9576 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
9577 }
9578
9579 /* XXX TBI only */
9580 static int
9581 wm_check_for_link(struct wm_softc *sc)
9582 {
9583 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9584 uint32_t rxcw;
9585 uint32_t ctrl;
9586 uint32_t status;
9587 uint32_t sig;
9588
9589 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9590 /* XXX need some work for >= 82571 */
9591 if (sc->sc_type >= WM_T_82571) {
9592 sc->sc_tbi_linkup = 1;
9593 return 0;
9594 }
9595 }
9596
9597 rxcw = CSR_READ(sc, WMREG_RXCW);
9598 ctrl = CSR_READ(sc, WMREG_CTRL);
9599 status = CSR_READ(sc, WMREG_STATUS);
9600
9601 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
9602
9603 DPRINTF(WM_DEBUG_LINK,
9604 ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
9605 device_xname(sc->sc_dev), __func__,
9606 ((ctrl & CTRL_SWDPIN(1)) == sig),
9607 ((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
9608
9609 /*
9610 * SWDPIN LU RXCW
9611 * 0 0 0
9612 * 0 0 1 (should not happen)
9613 * 0 1 0 (should not happen)
9614 * 0 1 1 (should not happen)
9615 * 1 0 0 Disable autonego and force linkup
9616 * 1 0 1 got /C/ but not linkup yet
9617 * 1 1 0 (linkup)
9618 * 1 1 1 If IFM_AUTO, back to autonego
9619 *
9620 */
9621 if (((ctrl & CTRL_SWDPIN(1)) == sig)
9622 && ((status & STATUS_LU) == 0)
9623 && ((rxcw & RXCW_C) == 0)) {
9624 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
9625 __func__));
9626 sc->sc_tbi_linkup = 0;
9627 /* Disable auto-negotiation in the TXCW register */
9628 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
9629
9630 /*
9631 * Force link-up and also force full-duplex.
9632 *
9633 * NOTE: CTRL was updated TFCE and RFCE automatically,
9634 * so we should update sc->sc_ctrl
9635 */
9636 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
9637 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9638 } else if (((status & STATUS_LU) != 0)
9639 && ((rxcw & RXCW_C) != 0)
9640 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
9641 sc->sc_tbi_linkup = 1;
9642 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
9643 __func__));
9644 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9645 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
9646 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
9647 && ((rxcw & RXCW_C) != 0)) {
9648 DPRINTF(WM_DEBUG_LINK, ("/C/"));
9649 } else {
9650 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
9651 status));
9652 }
9653
9654 return 0;
9655 }
9656
9657 /*
9658 * wm_tbi_tick:
9659 *
9660 * Check the link on TBI devices.
9661 * This function acts as mii_tick().
9662 */
9663 static void
9664 wm_tbi_tick(struct wm_softc *sc)
9665 {
9666 struct mii_data *mii = &sc->sc_mii;
9667 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9668 uint32_t status;
9669
9670 KASSERT(WM_CORE_LOCKED(sc));
9671
9672 status = CSR_READ(sc, WMREG_STATUS);
9673
9674 /* XXX is this needed? */
9675 (void)CSR_READ(sc, WMREG_RXCW);
9676 (void)CSR_READ(sc, WMREG_CTRL);
9677
9678 /* set link status */
9679 if ((status & STATUS_LU) == 0) {
9680 DPRINTF(WM_DEBUG_LINK,
9681 ("%s: LINK: checklink -> down\n",
9682 device_xname(sc->sc_dev)));
9683 sc->sc_tbi_linkup = 0;
9684 } else if (sc->sc_tbi_linkup == 0) {
9685 DPRINTF(WM_DEBUG_LINK,
9686 ("%s: LINK: checklink -> up %s\n",
9687 device_xname(sc->sc_dev),
9688 (status & STATUS_FD) ? "FDX" : "HDX"));
9689 sc->sc_tbi_linkup = 1;
9690 sc->sc_tbi_serdes_ticks = 0;
9691 }
9692
9693 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
9694 goto setled;
9695
9696 if ((status & STATUS_LU) == 0) {
9697 sc->sc_tbi_linkup = 0;
9698 /* If the timer expired, retry autonegotiation */
9699 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9700 && (++sc->sc_tbi_serdes_ticks
9701 >= sc->sc_tbi_serdes_anegticks)) {
9702 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9703 sc->sc_tbi_serdes_ticks = 0;
9704 /*
9705 * Reset the link, and let autonegotiation do
9706 * its thing
9707 */
9708 sc->sc_ctrl |= CTRL_LRST;
9709 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9710 CSR_WRITE_FLUSH(sc);
9711 delay(1000);
9712 sc->sc_ctrl &= ~CTRL_LRST;
9713 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9714 CSR_WRITE_FLUSH(sc);
9715 delay(1000);
9716 CSR_WRITE(sc, WMREG_TXCW,
9717 sc->sc_txcw & ~TXCW_ANE);
9718 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9719 }
9720 }
9721
9722 setled:
9723 wm_tbi_serdes_set_linkled(sc);
9724 }
9725
9726 /* SERDES related */
9727 static void
9728 wm_serdes_power_up_link_82575(struct wm_softc *sc)
9729 {
9730 uint32_t reg;
9731
9732 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9733 && ((sc->sc_flags & WM_F_SGMII) == 0))
9734 return;
9735
9736 reg = CSR_READ(sc, WMREG_PCS_CFG);
9737 reg |= PCS_CFG_PCS_EN;
9738 CSR_WRITE(sc, WMREG_PCS_CFG, reg);
9739
9740 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9741 reg &= ~CTRL_EXT_SWDPIN(3);
9742 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9743 CSR_WRITE_FLUSH(sc);
9744 }
9745
9746 static int
9747 wm_serdes_mediachange(struct ifnet *ifp)
9748 {
9749 struct wm_softc *sc = ifp->if_softc;
9750 bool pcs_autoneg = true; /* XXX */
9751 uint32_t ctrl_ext, pcs_lctl, reg;
9752
9753 /* XXX Currently, this function is not called on 8257[12] */
9754 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9755 || (sc->sc_type >= WM_T_82575))
9756 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9757
9758 wm_serdes_power_up_link_82575(sc);
9759
9760 sc->sc_ctrl |= CTRL_SLU;
9761
9762 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
9763 sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
9764
9765 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9766 pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
9767 switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
9768 case CTRL_EXT_LINK_MODE_SGMII:
9769 pcs_autoneg = true;
9770 pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
9771 break;
9772 case CTRL_EXT_LINK_MODE_1000KX:
9773 pcs_autoneg = false;
9774 /* FALLTHROUGH */
9775 default:
9776 if ((sc->sc_type == WM_T_82575)
9777 || (sc->sc_type == WM_T_82576)) {
9778 if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
9779 pcs_autoneg = false;
9780 }
9781 sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
9782 | CTRL_FRCFDX;
9783 pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
9784 }
9785 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9786
9787 if (pcs_autoneg) {
9788 pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
9789 pcs_lctl &= ~PCS_LCTL_FORCE_FC;
9790
9791 reg = CSR_READ(sc, WMREG_PCS_ANADV);
9792 reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
9793 reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
9794 CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
9795 } else
9796 pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
9797
9798 CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
9799
9800
9801 return 0;
9802 }
9803
9804 static void
9805 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9806 {
9807 struct wm_softc *sc = ifp->if_softc;
9808 struct mii_data *mii = &sc->sc_mii;
9809 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9810 uint32_t pcs_adv, pcs_lpab, reg;
9811
9812 ifmr->ifm_status = IFM_AVALID;
9813 ifmr->ifm_active = IFM_ETHER;
9814
9815 /* Check PCS */
9816 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9817 if ((reg & PCS_LSTS_LINKOK) == 0) {
9818 ifmr->ifm_active |= IFM_NONE;
9819 sc->sc_tbi_linkup = 0;
9820 goto setled;
9821 }
9822
9823 sc->sc_tbi_linkup = 1;
9824 ifmr->ifm_status |= IFM_ACTIVE;
9825 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
9826 if ((reg & PCS_LSTS_FDX) != 0)
9827 ifmr->ifm_active |= IFM_FDX;
9828 else
9829 ifmr->ifm_active |= IFM_HDX;
9830 mii->mii_media_active &= ~IFM_ETH_FMASK;
9831 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
9832 /* Check flow */
9833 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9834 if ((reg & PCS_LSTS_AN_COMP) == 0) {
9835 DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
9836 goto setled;
9837 }
9838 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
9839 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
9840 DPRINTF(WM_DEBUG_LINK,
9841 ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
9842 if ((pcs_adv & TXCW_SYM_PAUSE)
9843 && (pcs_lpab & TXCW_SYM_PAUSE)) {
9844 mii->mii_media_active |= IFM_FLOW
9845 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
9846 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
9847 && (pcs_adv & TXCW_ASYM_PAUSE)
9848 && (pcs_lpab & TXCW_SYM_PAUSE)
9849 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
9850 mii->mii_media_active |= IFM_FLOW
9851 | IFM_ETH_TXPAUSE;
9852 } else if ((pcs_adv & TXCW_SYM_PAUSE)
9853 && (pcs_adv & TXCW_ASYM_PAUSE)
9854 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
9855 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
9856 mii->mii_media_active |= IFM_FLOW
9857 | IFM_ETH_RXPAUSE;
9858 } else {
9859 }
9860 }
9861 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
9862 | (mii->mii_media_active & IFM_ETH_FMASK);
9863 setled:
9864 wm_tbi_serdes_set_linkled(sc);
9865 }
9866
9867 /*
9868 * wm_serdes_tick:
9869 *
9870 * Check the link on serdes devices.
9871 */
9872 static void
9873 wm_serdes_tick(struct wm_softc *sc)
9874 {
9875 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9876 struct mii_data *mii = &sc->sc_mii;
9877 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9878 uint32_t reg;
9879
9880 KASSERT(WM_CORE_LOCKED(sc));
9881
9882 mii->mii_media_status = IFM_AVALID;
9883 mii->mii_media_active = IFM_ETHER;
9884
9885 /* Check PCS */
9886 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9887 if ((reg & PCS_LSTS_LINKOK) != 0) {
9888 mii->mii_media_status |= IFM_ACTIVE;
9889 sc->sc_tbi_linkup = 1;
9890 sc->sc_tbi_serdes_ticks = 0;
9891 mii->mii_media_active |= IFM_1000_SX; /* XXX */
9892 if ((reg & PCS_LSTS_FDX) != 0)
9893 mii->mii_media_active |= IFM_FDX;
9894 else
9895 mii->mii_media_active |= IFM_HDX;
9896 } else {
9897 mii->mii_media_status |= IFM_NONE;
9898 sc->sc_tbi_linkup = 0;
9899 /* If the timer expired, retry autonegotiation */
9900 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9901 && (++sc->sc_tbi_serdes_ticks
9902 >= sc->sc_tbi_serdes_anegticks)) {
9903 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9904 sc->sc_tbi_serdes_ticks = 0;
9905 /* XXX */
9906 wm_serdes_mediachange(ifp);
9907 }
9908 }
9909
9910 wm_tbi_serdes_set_linkled(sc);
9911 }
9912
9913 /* SFP related */
9914
9915 static int
9916 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
9917 {
9918 uint32_t i2ccmd;
9919 int i;
9920
9921 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
9922 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9923
9924 /* Poll the ready bit */
9925 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9926 delay(50);
9927 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9928 if (i2ccmd & I2CCMD_READY)
9929 break;
9930 }
9931 if ((i2ccmd & I2CCMD_READY) == 0)
9932 return -1;
9933 if ((i2ccmd & I2CCMD_ERROR) != 0)
9934 return -1;
9935
9936 *data = i2ccmd & 0x00ff;
9937
9938 return 0;
9939 }
9940
9941 static uint32_t
9942 wm_sfp_get_media_type(struct wm_softc *sc)
9943 {
9944 uint32_t ctrl_ext;
9945 uint8_t val = 0;
9946 int timeout = 3;
9947 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
9948 int rv = -1;
9949
9950 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9951 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
9952 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
9953 CSR_WRITE_FLUSH(sc);
9954
9955 /* Read SFP module data */
9956 while (timeout) {
9957 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
9958 if (rv == 0)
9959 break;
9960 delay(100*1000); /* XXX too big */
9961 timeout--;
9962 }
9963 if (rv != 0)
9964 goto out;
9965 switch (val) {
9966 case SFF_SFP_ID_SFF:
9967 aprint_normal_dev(sc->sc_dev,
9968 "Module/Connector soldered to board\n");
9969 break;
9970 case SFF_SFP_ID_SFP:
9971 aprint_normal_dev(sc->sc_dev, "SFP\n");
9972 break;
9973 case SFF_SFP_ID_UNKNOWN:
9974 goto out;
9975 default:
9976 break;
9977 }
9978
9979 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
9980 if (rv != 0) {
9981 goto out;
9982 }
9983
9984 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
9985 mediatype = WM_MEDIATYPE_SERDES;
9986 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
9987 sc->sc_flags |= WM_F_SGMII;
9988 mediatype = WM_MEDIATYPE_COPPER;
9989 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
9990 sc->sc_flags |= WM_F_SGMII;
9991 mediatype = WM_MEDIATYPE_SERDES;
9992 }
9993
9994 out:
9995 /* Restore I2C interface setting */
9996 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9997
9998 return mediatype;
9999 }
10000 /*
10001 * NVM related.
10002 * Microwire, SPI (w/wo EERD) and Flash.
10003 */
10004
10005 /* Both spi and uwire */
10006
10007 /*
10008 * wm_eeprom_sendbits:
10009 *
10010 * Send a series of bits to the EEPROM.
10011 */
10012 static void
10013 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
10014 {
10015 uint32_t reg;
10016 int x;
10017
10018 reg = CSR_READ(sc, WMREG_EECD);
10019
10020 for (x = nbits; x > 0; x--) {
10021 if (bits & (1U << (x - 1)))
10022 reg |= EECD_DI;
10023 else
10024 reg &= ~EECD_DI;
10025 CSR_WRITE(sc, WMREG_EECD, reg);
10026 CSR_WRITE_FLUSH(sc);
10027 delay(2);
10028 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
10029 CSR_WRITE_FLUSH(sc);
10030 delay(2);
10031 CSR_WRITE(sc, WMREG_EECD, reg);
10032 CSR_WRITE_FLUSH(sc);
10033 delay(2);
10034 }
10035 }
10036
10037 /*
10038 * wm_eeprom_recvbits:
10039 *
10040 * Receive a series of bits from the EEPROM.
10041 */
10042 static void
10043 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
10044 {
10045 uint32_t reg, val;
10046 int x;
10047
10048 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
10049
10050 val = 0;
10051 for (x = nbits; x > 0; x--) {
10052 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
10053 CSR_WRITE_FLUSH(sc);
10054 delay(2);
10055 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
10056 val |= (1U << (x - 1));
10057 CSR_WRITE(sc, WMREG_EECD, reg);
10058 CSR_WRITE_FLUSH(sc);
10059 delay(2);
10060 }
10061 *valp = val;
10062 }
10063
10064 /* Microwire */
10065
10066 /*
10067 * wm_nvm_read_uwire:
10068 *
10069 * Read a word from the EEPROM using the MicroWire protocol.
10070 */
10071 static int
10072 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
10073 {
10074 uint32_t reg, val;
10075 int i;
10076
10077 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10078 device_xname(sc->sc_dev), __func__));
10079
10080 for (i = 0; i < wordcnt; i++) {
10081 /* Clear SK and DI. */
10082 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
10083 CSR_WRITE(sc, WMREG_EECD, reg);
10084
10085 /*
10086 * XXX: workaround for a bug in qemu-0.12.x and prior
10087 * and Xen.
10088 *
10089 * We use this workaround only for 82540 because qemu's
10090 * e1000 act as 82540.
10091 */
10092 if (sc->sc_type == WM_T_82540) {
10093 reg |= EECD_SK;
10094 CSR_WRITE(sc, WMREG_EECD, reg);
10095 reg &= ~EECD_SK;
10096 CSR_WRITE(sc, WMREG_EECD, reg);
10097 CSR_WRITE_FLUSH(sc);
10098 delay(2);
10099 }
10100 /* XXX: end of workaround */
10101
10102 /* Set CHIP SELECT. */
10103 reg |= EECD_CS;
10104 CSR_WRITE(sc, WMREG_EECD, reg);
10105 CSR_WRITE_FLUSH(sc);
10106 delay(2);
10107
10108 /* Shift in the READ command. */
10109 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
10110
10111 /* Shift in address. */
10112 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
10113
10114 /* Shift out the data. */
10115 wm_eeprom_recvbits(sc, &val, 16);
10116 data[i] = val & 0xffff;
10117
10118 /* Clear CHIP SELECT. */
10119 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
10120 CSR_WRITE(sc, WMREG_EECD, reg);
10121 CSR_WRITE_FLUSH(sc);
10122 delay(2);
10123 }
10124
10125 return 0;
10126 }
10127
10128 /* SPI */
10129
10130 /*
10131 * Set SPI and FLASH related information from the EECD register.
10132 * For 82541 and 82547, the word size is taken from EEPROM.
10133 */
10134 static int
10135 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
10136 {
10137 int size;
10138 uint32_t reg;
10139 uint16_t data;
10140
10141 reg = CSR_READ(sc, WMREG_EECD);
10142 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
10143
10144 /* Read the size of NVM from EECD by default */
10145 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
10146 switch (sc->sc_type) {
10147 case WM_T_82541:
10148 case WM_T_82541_2:
10149 case WM_T_82547:
10150 case WM_T_82547_2:
10151 /* Set dummy value to access EEPROM */
10152 sc->sc_nvm_wordsize = 64;
10153 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
10154 reg = data;
10155 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
10156 if (size == 0)
10157 size = 6; /* 64 word size */
10158 else
10159 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
10160 break;
10161 case WM_T_80003:
10162 case WM_T_82571:
10163 case WM_T_82572:
10164 case WM_T_82573: /* SPI case */
10165 case WM_T_82574: /* SPI case */
10166 case WM_T_82583: /* SPI case */
10167 size += NVM_WORD_SIZE_BASE_SHIFT;
10168 if (size > 14)
10169 size = 14;
10170 break;
10171 case WM_T_82575:
10172 case WM_T_82576:
10173 case WM_T_82580:
10174 case WM_T_I350:
10175 case WM_T_I354:
10176 case WM_T_I210:
10177 case WM_T_I211:
10178 size += NVM_WORD_SIZE_BASE_SHIFT;
10179 if (size > 15)
10180 size = 15;
10181 break;
10182 default:
10183 aprint_error_dev(sc->sc_dev,
10184 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
10185 return -1;
10186 break;
10187 }
10188
10189 sc->sc_nvm_wordsize = 1 << size;
10190
10191 return 0;
10192 }
10193
10194 /*
10195 * wm_nvm_ready_spi:
10196 *
10197 * Wait for a SPI EEPROM to be ready for commands.
10198 */
10199 static int
10200 wm_nvm_ready_spi(struct wm_softc *sc)
10201 {
10202 uint32_t val;
10203 int usec;
10204
10205 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10206 device_xname(sc->sc_dev), __func__));
10207
10208 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
10209 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
10210 wm_eeprom_recvbits(sc, &val, 8);
10211 if ((val & SPI_SR_RDY) == 0)
10212 break;
10213 }
10214 if (usec >= SPI_MAX_RETRIES) {
10215 aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
10216 return 1;
10217 }
10218 return 0;
10219 }
10220
10221 /*
10222 * wm_nvm_read_spi:
10223 *
10224 * Read a work from the EEPROM using the SPI protocol.
10225 */
10226 static int
10227 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
10228 {
10229 uint32_t reg, val;
10230 int i;
10231 uint8_t opc;
10232
10233 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10234 device_xname(sc->sc_dev), __func__));
10235
10236 /* Clear SK and CS. */
10237 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
10238 CSR_WRITE(sc, WMREG_EECD, reg);
10239 CSR_WRITE_FLUSH(sc);
10240 delay(2);
10241
10242 if (wm_nvm_ready_spi(sc))
10243 return 1;
10244
10245 /* Toggle CS to flush commands. */
10246 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
10247 CSR_WRITE_FLUSH(sc);
10248 delay(2);
10249 CSR_WRITE(sc, WMREG_EECD, reg);
10250 CSR_WRITE_FLUSH(sc);
10251 delay(2);
10252
10253 opc = SPI_OPC_READ;
10254 if (sc->sc_nvm_addrbits == 8 && word >= 128)
10255 opc |= SPI_OPC_A8;
10256
10257 wm_eeprom_sendbits(sc, opc, 8);
10258 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
10259
10260 for (i = 0; i < wordcnt; i++) {
10261 wm_eeprom_recvbits(sc, &val, 16);
10262 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
10263 }
10264
10265 /* Raise CS and clear SK. */
10266 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
10267 CSR_WRITE(sc, WMREG_EECD, reg);
10268 CSR_WRITE_FLUSH(sc);
10269 delay(2);
10270
10271 return 0;
10272 }
10273
10274 /* Using with EERD */
10275
10276 static int
10277 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
10278 {
10279 uint32_t attempts = 100000;
10280 uint32_t i, reg = 0;
10281 int32_t done = -1;
10282
10283 for (i = 0; i < attempts; i++) {
10284 reg = CSR_READ(sc, rw);
10285
10286 if (reg & EERD_DONE) {
10287 done = 0;
10288 break;
10289 }
10290 delay(5);
10291 }
10292
10293 return done;
10294 }
10295
10296 static int
10297 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
10298 uint16_t *data)
10299 {
10300 int i, eerd = 0;
10301 int error = 0;
10302
10303 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10304 device_xname(sc->sc_dev), __func__));
10305
10306 for (i = 0; i < wordcnt; i++) {
10307 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
10308
10309 CSR_WRITE(sc, WMREG_EERD, eerd);
10310 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
10311 if (error != 0)
10312 break;
10313
10314 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
10315 }
10316
10317 return error;
10318 }
10319
10320 /* Flash */
10321
10322 static int
10323 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
10324 {
10325 uint32_t eecd;
10326 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
10327 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
10328 uint8_t sig_byte = 0;
10329
10330 switch (sc->sc_type) {
10331 case WM_T_PCH_SPT:
10332 /*
10333 * In SPT, read from the CTRL_EXT reg instead of accessing the
10334 * sector valid bits from the NVM.
10335 */
10336 *bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
10337 if ((*bank == 0) || (*bank == 1)) {
10338 aprint_error_dev(sc->sc_dev,
10339 "%s: no valid NVM bank present (%u)\n", __func__,
10340 *bank);
10341 return -1;
10342 } else {
10343 *bank = *bank - 2;
10344 return 0;
10345 }
10346 case WM_T_ICH8:
10347 case WM_T_ICH9:
10348 eecd = CSR_READ(sc, WMREG_EECD);
10349 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
10350 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
10351 return 0;
10352 }
10353 /* FALLTHROUGH */
10354 default:
10355 /* Default to 0 */
10356 *bank = 0;
10357
10358 /* Check bank 0 */
10359 wm_read_ich8_byte(sc, act_offset, &sig_byte);
10360 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
10361 *bank = 0;
10362 return 0;
10363 }
10364
10365 /* Check bank 1 */
10366 wm_read_ich8_byte(sc, act_offset + bank1_offset,
10367 &sig_byte);
10368 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
10369 *bank = 1;
10370 return 0;
10371 }
10372 }
10373
10374 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
10375 device_xname(sc->sc_dev)));
10376 return -1;
10377 }
10378
10379 /******************************************************************************
10380 * This function does initial flash setup so that a new read/write/erase cycle
10381 * can be started.
10382 *
10383 * sc - The pointer to the hw structure
10384 ****************************************************************************/
10385 static int32_t
10386 wm_ich8_cycle_init(struct wm_softc *sc)
10387 {
10388 uint16_t hsfsts;
10389 int32_t error = 1;
10390 int32_t i = 0;
10391
10392 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10393
10394 /* May be check the Flash Des Valid bit in Hw status */
10395 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
10396 return error;
10397 }
10398
10399 /* Clear FCERR in Hw status by writing 1 */
10400 /* Clear DAEL in Hw status by writing a 1 */
10401 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
10402
10403 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10404
10405 /*
10406 * Either we should have a hardware SPI cycle in progress bit to check
10407 * against, in order to start a new cycle or FDONE bit should be
10408 * changed in the hardware so that it is 1 after harware reset, which
10409 * can then be used as an indication whether a cycle is in progress or
10410 * has been completed .. we should also have some software semaphore
10411 * mechanism to guard FDONE or the cycle in progress bit so that two
10412 * threads access to those bits can be sequentiallized or a way so that
10413 * 2 threads dont start the cycle at the same time
10414 */
10415
10416 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10417 /*
10418 * There is no cycle running at present, so we can start a
10419 * cycle
10420 */
10421
10422 /* Begin by setting Flash Cycle Done. */
10423 hsfsts |= HSFSTS_DONE;
10424 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10425 error = 0;
10426 } else {
10427 /*
10428 * otherwise poll for sometime so the current cycle has a
10429 * chance to end before giving up.
10430 */
10431 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
10432 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10433 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10434 error = 0;
10435 break;
10436 }
10437 delay(1);
10438 }
10439 if (error == 0) {
10440 /*
10441 * Successful in waiting for previous cycle to timeout,
10442 * now set the Flash Cycle Done.
10443 */
10444 hsfsts |= HSFSTS_DONE;
10445 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10446 }
10447 }
10448 return error;
10449 }
10450
10451 /******************************************************************************
10452 * This function starts a flash cycle and waits for its completion
10453 *
10454 * sc - The pointer to the hw structure
10455 ****************************************************************************/
10456 static int32_t
10457 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
10458 {
10459 uint16_t hsflctl;
10460 uint16_t hsfsts;
10461 int32_t error = 1;
10462 uint32_t i = 0;
10463
10464 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
10465 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10466 hsflctl |= HSFCTL_GO;
10467 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10468
10469 /* Wait till FDONE bit is set to 1 */
10470 do {
10471 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10472 if (hsfsts & HSFSTS_DONE)
10473 break;
10474 delay(1);
10475 i++;
10476 } while (i < timeout);
10477 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
10478 error = 0;
10479
10480 return error;
10481 }
10482
10483 /******************************************************************************
10484 * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
10485 *
10486 * sc - The pointer to the hw structure
10487 * index - The index of the byte or word to read.
10488 * size - Size of data to read, 1=byte 2=word, 4=dword
10489 * data - Pointer to the word to store the value read.
10490 *****************************************************************************/
10491 static int32_t
10492 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
10493 uint32_t size, uint32_t *data)
10494 {
10495 uint16_t hsfsts;
10496 uint16_t hsflctl;
10497 uint32_t flash_linear_address;
10498 uint32_t flash_data = 0;
10499 int32_t error = 1;
10500 int32_t count = 0;
10501
10502 if (size < 1 || size > 4 || data == 0x0 ||
10503 index > ICH_FLASH_LINEAR_ADDR_MASK)
10504 return error;
10505
10506 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
10507 sc->sc_ich8_flash_base;
10508
10509 do {
10510 delay(1);
10511 /* Steps */
10512 error = wm_ich8_cycle_init(sc);
10513 if (error)
10514 break;
10515
10516 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10517 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
10518 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
10519 & HSFCTL_BCOUNT_MASK;
10520 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
10521 if (sc->sc_type == WM_T_PCH_SPT) {
10522 /*
10523 * In SPT, This register is in Lan memory space, not
10524 * flash. Therefore, only 32 bit access is supported.
10525 */
10526 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
10527 (uint32_t)hsflctl);
10528 } else
10529 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10530
10531 /*
10532 * Write the last 24 bits of index into Flash Linear address
10533 * field in Flash Address
10534 */
10535 /* TODO: TBD maybe check the index against the size of flash */
10536
10537 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
10538
10539 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
10540
10541 /*
10542 * Check if FCERR is set to 1, if set to 1, clear it and try
10543 * the whole sequence a few more times, else read in (shift in)
10544 * the Flash Data0, the order is least significant byte first
10545 * msb to lsb
10546 */
10547 if (error == 0) {
10548 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
10549 if (size == 1)
10550 *data = (uint8_t)(flash_data & 0x000000FF);
10551 else if (size == 2)
10552 *data = (uint16_t)(flash_data & 0x0000FFFF);
10553 else if (size == 4)
10554 *data = (uint32_t)flash_data;
10555 break;
10556 } else {
10557 /*
10558 * If we've gotten here, then things are probably
10559 * completely hosed, but if the error condition is
10560 * detected, it won't hurt to give it another try...
10561 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
10562 */
10563 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10564 if (hsfsts & HSFSTS_ERR) {
10565 /* Repeat for some time before giving up. */
10566 continue;
10567 } else if ((hsfsts & HSFSTS_DONE) == 0)
10568 break;
10569 }
10570 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
10571
10572 return error;
10573 }
10574
10575 /******************************************************************************
10576 * Reads a single byte from the NVM using the ICH8 flash access registers.
10577 *
10578 * sc - pointer to wm_hw structure
10579 * index - The index of the byte to read.
10580 * data - Pointer to a byte to store the value read.
10581 *****************************************************************************/
10582 static int32_t
10583 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
10584 {
10585 int32_t status;
10586 uint32_t word = 0;
10587
10588 status = wm_read_ich8_data(sc, index, 1, &word);
10589 if (status == 0)
10590 *data = (uint8_t)word;
10591 else
10592 *data = 0;
10593
10594 return status;
10595 }
10596
10597 /******************************************************************************
10598 * Reads a word from the NVM using the ICH8 flash access registers.
10599 *
10600 * sc - pointer to wm_hw structure
10601 * index - The starting byte index of the word to read.
10602 * data - Pointer to a word to store the value read.
10603 *****************************************************************************/
10604 static int32_t
10605 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
10606 {
10607 int32_t status;
10608 uint32_t word = 0;
10609
10610 status = wm_read_ich8_data(sc, index, 2, &word);
10611 if (status == 0)
10612 *data = (uint16_t)word;
10613 else
10614 *data = 0;
10615
10616 return status;
10617 }
10618
10619 /******************************************************************************
10620 * Reads a dword from the NVM using the ICH8 flash access registers.
10621 *
10622 * sc - pointer to wm_hw structure
10623 * index - The starting byte index of the word to read.
10624 * data - Pointer to a word to store the value read.
10625 *****************************************************************************/
10626 static int32_t
10627 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
10628 {
10629 int32_t status;
10630
10631 status = wm_read_ich8_data(sc, index, 4, data);
10632 return status;
10633 }
10634
10635 /******************************************************************************
10636 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
10637 * register.
10638 *
10639 * sc - Struct containing variables accessed by shared code
10640 * offset - offset of word in the EEPROM to read
10641 * data - word read from the EEPROM
10642 * words - number of words to read
10643 *****************************************************************************/
10644 static int
10645 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
10646 {
10647 int32_t error = 0;
10648 uint32_t flash_bank = 0;
10649 uint32_t act_offset = 0;
10650 uint32_t bank_offset = 0;
10651 uint16_t word = 0;
10652 uint16_t i = 0;
10653
10654 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10655 device_xname(sc->sc_dev), __func__));
10656
10657 /*
10658 * We need to know which is the valid flash bank. In the event
10659 * that we didn't allocate eeprom_shadow_ram, we may not be
10660 * managing flash_bank. So it cannot be trusted and needs
10661 * to be updated with each read.
10662 */
10663 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
10664 if (error) {
10665 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
10666 device_xname(sc->sc_dev)));
10667 flash_bank = 0;
10668 }
10669
10670 /*
10671 * Adjust offset appropriately if we're on bank 1 - adjust for word
10672 * size
10673 */
10674 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
10675
10676 error = wm_get_swfwhw_semaphore(sc);
10677 if (error) {
10678 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10679 __func__);
10680 return error;
10681 }
10682
10683 for (i = 0; i < words; i++) {
10684 /* The NVM part needs a byte offset, hence * 2 */
10685 act_offset = bank_offset + ((offset + i) * 2);
10686 error = wm_read_ich8_word(sc, act_offset, &word);
10687 if (error) {
10688 aprint_error_dev(sc->sc_dev,
10689 "%s: failed to read NVM\n", __func__);
10690 break;
10691 }
10692 data[i] = word;
10693 }
10694
10695 wm_put_swfwhw_semaphore(sc);
10696 return error;
10697 }
10698
10699 /******************************************************************************
10700 * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
10701 * register.
10702 *
10703 * sc - Struct containing variables accessed by shared code
10704 * offset - offset of word in the EEPROM to read
10705 * data - word read from the EEPROM
10706 * words - number of words to read
10707 *****************************************************************************/
10708 static int
10709 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
10710 {
10711 int32_t error = 0;
10712 uint32_t flash_bank = 0;
10713 uint32_t act_offset = 0;
10714 uint32_t bank_offset = 0;
10715 uint32_t dword = 0;
10716 uint16_t i = 0;
10717
10718 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10719 device_xname(sc->sc_dev), __func__));
10720
10721 /*
10722 * We need to know which is the valid flash bank. In the event
10723 * that we didn't allocate eeprom_shadow_ram, we may not be
10724 * managing flash_bank. So it cannot be trusted and needs
10725 * to be updated with each read.
10726 */
10727 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
10728 if (error) {
10729 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
10730 device_xname(sc->sc_dev)));
10731 flash_bank = 0;
10732 }
10733
10734 /*
10735 * Adjust offset appropriately if we're on bank 1 - adjust for word
10736 * size
10737 */
10738 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
10739
10740 error = wm_get_swfwhw_semaphore(sc);
10741 if (error) {
10742 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10743 __func__);
10744 return error;
10745 }
10746
10747 for (i = 0; i < words; i++) {
10748 /* The NVM part needs a byte offset, hence * 2 */
10749 act_offset = bank_offset + ((offset + i) * 2);
10750 /* but we must read dword aligned, so mask ... */
10751 error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
10752 if (error) {
10753 aprint_error_dev(sc->sc_dev,
10754 "%s: failed to read NVM\n", __func__);
10755 break;
10756 }
10757 /* ... and pick out low or high word */
10758 if ((act_offset & 0x2) == 0)
10759 data[i] = (uint16_t)(dword & 0xFFFF);
10760 else
10761 data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
10762 }
10763
10764 wm_put_swfwhw_semaphore(sc);
10765 return error;
10766 }
10767
10768 /* iNVM */
10769
10770 static int
10771 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
10772 {
10773 int32_t rv = 0;
10774 uint32_t invm_dword;
10775 uint16_t i;
10776 uint8_t record_type, word_address;
10777
10778 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10779 device_xname(sc->sc_dev), __func__));
10780
10781 for (i = 0; i < INVM_SIZE; i++) {
10782 invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
10783 /* Get record type */
10784 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
10785 if (record_type == INVM_UNINITIALIZED_STRUCTURE)
10786 break;
10787 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
10788 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
10789 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
10790 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
10791 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
10792 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
10793 if (word_address == address) {
10794 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
10795 rv = 0;
10796 break;
10797 }
10798 }
10799 }
10800
10801 return rv;
10802 }
10803
10804 static int
10805 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
10806 {
10807 int rv = 0;
10808 int i;
10809
10810 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10811 device_xname(sc->sc_dev), __func__));
10812
10813 for (i = 0; i < words; i++) {
10814 switch (offset + i) {
10815 case NVM_OFF_MACADDR:
10816 case NVM_OFF_MACADDR1:
10817 case NVM_OFF_MACADDR2:
10818 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
10819 if (rv != 0) {
10820 data[i] = 0xffff;
10821 rv = -1;
10822 }
10823 break;
10824 case NVM_OFF_CFG2:
10825 rv = wm_nvm_read_word_invm(sc, offset, data);
10826 if (rv != 0) {
10827 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
10828 rv = 0;
10829 }
10830 break;
10831 case NVM_OFF_CFG4:
10832 rv = wm_nvm_read_word_invm(sc, offset, data);
10833 if (rv != 0) {
10834 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
10835 rv = 0;
10836 }
10837 break;
10838 case NVM_OFF_LED_1_CFG:
10839 rv = wm_nvm_read_word_invm(sc, offset, data);
10840 if (rv != 0) {
10841 *data = NVM_LED_1_CFG_DEFAULT_I211;
10842 rv = 0;
10843 }
10844 break;
10845 case NVM_OFF_LED_0_2_CFG:
10846 rv = wm_nvm_read_word_invm(sc, offset, data);
10847 if (rv != 0) {
10848 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
10849 rv = 0;
10850 }
10851 break;
10852 case NVM_OFF_ID_LED_SETTINGS:
10853 rv = wm_nvm_read_word_invm(sc, offset, data);
10854 if (rv != 0) {
10855 *data = ID_LED_RESERVED_FFFF;
10856 rv = 0;
10857 }
10858 break;
10859 default:
10860 DPRINTF(WM_DEBUG_NVM,
10861 ("NVM word 0x%02x is not mapped.\n", offset));
10862 *data = NVM_RESERVED_WORD;
10863 break;
10864 }
10865 }
10866
10867 return rv;
10868 }
10869
10870 /* Lock, detecting NVM type, validate checksum, version and read */
10871
10872 /*
10873 * wm_nvm_acquire:
10874 *
10875 * Perform the EEPROM handshake required on some chips.
10876 */
10877 static int
10878 wm_nvm_acquire(struct wm_softc *sc)
10879 {
10880 uint32_t reg;
10881 int x;
10882 int ret = 0;
10883
10884 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10885 device_xname(sc->sc_dev), __func__));
10886
10887 if (sc->sc_type >= WM_T_ICH8) {
10888 ret = wm_get_nvm_ich8lan(sc);
10889 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
10890 ret = wm_get_swfwhw_semaphore(sc);
10891 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
10892 /* This will also do wm_get_swsm_semaphore() if needed */
10893 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
10894 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
10895 ret = wm_get_swsm_semaphore(sc);
10896 }
10897
10898 if (ret) {
10899 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10900 __func__);
10901 return 1;
10902 }
10903
10904 if (sc->sc_flags & WM_F_LOCK_EECD) {
10905 reg = CSR_READ(sc, WMREG_EECD);
10906
10907 /* Request EEPROM access. */
10908 reg |= EECD_EE_REQ;
10909 CSR_WRITE(sc, WMREG_EECD, reg);
10910
10911 /* ..and wait for it to be granted. */
10912 for (x = 0; x < 1000; x++) {
10913 reg = CSR_READ(sc, WMREG_EECD);
10914 if (reg & EECD_EE_GNT)
10915 break;
10916 delay(5);
10917 }
10918 if ((reg & EECD_EE_GNT) == 0) {
10919 aprint_error_dev(sc->sc_dev,
10920 "could not acquire EEPROM GNT\n");
10921 reg &= ~EECD_EE_REQ;
10922 CSR_WRITE(sc, WMREG_EECD, reg);
10923 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
10924 wm_put_swfwhw_semaphore(sc);
10925 if (sc->sc_flags & WM_F_LOCK_SWFW)
10926 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
10927 else if (sc->sc_flags & WM_F_LOCK_SWSM)
10928 wm_put_swsm_semaphore(sc);
10929 return 1;
10930 }
10931 }
10932
10933 return 0;
10934 }
10935
10936 /*
10937 * wm_nvm_release:
10938 *
10939 * Release the EEPROM mutex.
10940 */
10941 static void
10942 wm_nvm_release(struct wm_softc *sc)
10943 {
10944 uint32_t reg;
10945
10946 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10947 device_xname(sc->sc_dev), __func__));
10948
10949 if (sc->sc_flags & WM_F_LOCK_EECD) {
10950 reg = CSR_READ(sc, WMREG_EECD);
10951 reg &= ~EECD_EE_REQ;
10952 CSR_WRITE(sc, WMREG_EECD, reg);
10953 }
10954
10955 if (sc->sc_type >= WM_T_ICH8) {
10956 wm_put_nvm_ich8lan(sc);
10957 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
10958 wm_put_swfwhw_semaphore(sc);
10959 if (sc->sc_flags & WM_F_LOCK_SWFW)
10960 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
10961 else if (sc->sc_flags & WM_F_LOCK_SWSM)
10962 wm_put_swsm_semaphore(sc);
10963 }
10964
10965 static int
10966 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
10967 {
10968 uint32_t eecd = 0;
10969
10970 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
10971 || sc->sc_type == WM_T_82583) {
10972 eecd = CSR_READ(sc, WMREG_EECD);
10973
10974 /* Isolate bits 15 & 16 */
10975 eecd = ((eecd >> 15) & 0x03);
10976
10977 /* If both bits are set, device is Flash type */
10978 if (eecd == 0x03)
10979 return 0;
10980 }
10981 return 1;
10982 }
10983
10984 static int
10985 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
10986 {
10987 uint32_t eec;
10988
10989 eec = CSR_READ(sc, WMREG_EEC);
10990 if ((eec & EEC_FLASH_DETECTED) != 0)
10991 return 1;
10992
10993 return 0;
10994 }
10995
10996 /*
10997 * wm_nvm_validate_checksum
10998 *
10999 * The checksum is defined as the sum of the first 64 (16 bit) words.
11000 */
11001 static int
11002 wm_nvm_validate_checksum(struct wm_softc *sc)
11003 {
11004 uint16_t checksum;
11005 uint16_t eeprom_data;
11006 #ifdef WM_DEBUG
11007 uint16_t csum_wordaddr, valid_checksum;
11008 #endif
11009 int i;
11010
11011 checksum = 0;
11012
11013 /* Don't check for I211 */
11014 if (sc->sc_type == WM_T_I211)
11015 return 0;
11016
11017 #ifdef WM_DEBUG
11018 if (sc->sc_type == WM_T_PCH_LPT) {
11019 csum_wordaddr = NVM_OFF_COMPAT;
11020 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
11021 } else {
11022 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
11023 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
11024 }
11025
11026 /* Dump EEPROM image for debug */
11027 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
11028 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
11029 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
11030 /* XXX PCH_SPT? */
11031 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
11032 if ((eeprom_data & valid_checksum) == 0) {
11033 DPRINTF(WM_DEBUG_NVM,
11034 ("%s: NVM need to be updated (%04x != %04x)\n",
11035 device_xname(sc->sc_dev), eeprom_data,
11036 valid_checksum));
11037 }
11038 }
11039
11040 if ((wm_debug & WM_DEBUG_NVM) != 0) {
11041 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
11042 for (i = 0; i < NVM_SIZE; i++) {
11043 if (wm_nvm_read(sc, i, 1, &eeprom_data))
11044 printf("XXXX ");
11045 else
11046 printf("%04hx ", eeprom_data);
11047 if (i % 8 == 7)
11048 printf("\n");
11049 }
11050 }
11051
11052 #endif /* WM_DEBUG */
11053
11054 for (i = 0; i < NVM_SIZE; i++) {
11055 if (wm_nvm_read(sc, i, 1, &eeprom_data))
11056 return 1;
11057 checksum += eeprom_data;
11058 }
11059
11060 if (checksum != (uint16_t) NVM_CHECKSUM) {
11061 #ifdef WM_DEBUG
11062 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
11063 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
11064 #endif
11065 }
11066
11067 return 0;
11068 }
11069
11070 static void
11071 wm_nvm_version_invm(struct wm_softc *sc)
11072 {
11073 uint32_t dword;
11074
11075 /*
11076 * Linux's code to decode version is very strange, so we don't
11077 * obey that algorithm and just use word 61 as the document.
11078 * Perhaps it's not perfect though...
11079 *
11080 * Example:
11081 *
11082 * Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
11083 */
11084 dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
11085 dword = __SHIFTOUT(dword, INVM_VER_1);
11086 sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
11087 sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
11088 }
11089
11090 static void
11091 wm_nvm_version(struct wm_softc *sc)
11092 {
11093 uint16_t major, minor, build, patch;
11094 uint16_t uid0, uid1;
11095 uint16_t nvm_data;
11096 uint16_t off;
11097 bool check_version = false;
11098 bool check_optionrom = false;
11099 bool have_build = false;
11100
11101 /*
11102 * Version format:
11103 *
11104 * XYYZ
11105 * X0YZ
11106 * X0YY
11107 *
11108 * Example:
11109 *
11110 * 82571 0x50a2 5.10.2? (the spec update notes about 5.6-5.10)
11111 * 82571 0x50a6 5.10.6?
11112 * 82572 0x506a 5.6.10?
11113 * 82572EI 0x5069 5.6.9?
11114 * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4)
11115 * 0x2013 2.1.3?
11116 * 82583 0x10a0 1.10.0? (document says it's default vaule)
11117 */
11118 wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
11119 switch (sc->sc_type) {
11120 case WM_T_82571:
11121 case WM_T_82572:
11122 case WM_T_82574:
11123 case WM_T_82583:
11124 check_version = true;
11125 check_optionrom = true;
11126 have_build = true;
11127 break;
11128 case WM_T_82575:
11129 case WM_T_82576:
11130 case WM_T_82580:
11131 if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
11132 check_version = true;
11133 break;
11134 case WM_T_I211:
11135 wm_nvm_version_invm(sc);
11136 goto printver;
11137 case WM_T_I210:
11138 if (!wm_nvm_get_flash_presence_i210(sc)) {
11139 wm_nvm_version_invm(sc);
11140 goto printver;
11141 }
11142 /* FALLTHROUGH */
11143 case WM_T_I350:
11144 case WM_T_I354:
11145 check_version = true;
11146 check_optionrom = true;
11147 break;
11148 default:
11149 return;
11150 }
11151 if (check_version) {
11152 wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
11153 major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
11154 if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
11155 minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
11156 build = nvm_data & NVM_BUILD_MASK;
11157 have_build = true;
11158 } else
11159 minor = nvm_data & 0x00ff;
11160
11161 /* Decimal */
11162 minor = (minor / 16) * 10 + (minor % 16);
11163 sc->sc_nvm_ver_major = major;
11164 sc->sc_nvm_ver_minor = minor;
11165
11166 printver:
11167 aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
11168 sc->sc_nvm_ver_minor);
11169 if (have_build) {
11170 sc->sc_nvm_ver_build = build;
11171 aprint_verbose(".%d", build);
11172 }
11173 }
11174 if (check_optionrom) {
11175 wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
11176 /* Option ROM Version */
11177 if ((off != 0x0000) && (off != 0xffff)) {
11178 off += NVM_COMBO_VER_OFF;
11179 wm_nvm_read(sc, off + 1, 1, &uid1);
11180 wm_nvm_read(sc, off, 1, &uid0);
11181 if ((uid0 != 0) && (uid0 != 0xffff)
11182 && (uid1 != 0) && (uid1 != 0xffff)) {
11183 /* 16bits */
11184 major = uid0 >> 8;
11185 build = (uid0 << 8) | (uid1 >> 8);
11186 patch = uid1 & 0x00ff;
11187 aprint_verbose(", option ROM Version %d.%d.%d",
11188 major, build, patch);
11189 }
11190 }
11191 }
11192
11193 wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
11194 aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
11195 }
11196
11197 /*
11198 * wm_nvm_read:
11199 *
11200 * Read data from the serial EEPROM.
11201 */
11202 static int
11203 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
11204 {
11205 int rv;
11206
11207 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11208 device_xname(sc->sc_dev), __func__));
11209
11210 if (sc->sc_flags & WM_F_EEPROM_INVALID)
11211 return 1;
11212
11213 if (wm_nvm_acquire(sc))
11214 return 1;
11215
11216 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
11217 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
11218 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
11219 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
11220 else if (sc->sc_type == WM_T_PCH_SPT)
11221 rv = wm_nvm_read_spt(sc, word, wordcnt, data);
11222 else if (sc->sc_flags & WM_F_EEPROM_INVM)
11223 rv = wm_nvm_read_invm(sc, word, wordcnt, data);
11224 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
11225 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
11226 else if (sc->sc_flags & WM_F_EEPROM_SPI)
11227 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
11228 else
11229 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
11230
11231 wm_nvm_release(sc);
11232 return rv;
11233 }
11234
11235 /*
11236 * Hardware semaphores.
11237 * Very complexed...
11238 */
11239
11240 static int
11241 wm_get_null(struct wm_softc *sc)
11242 {
11243
11244 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11245 device_xname(sc->sc_dev), __func__));
11246 return 0;
11247 }
11248
11249 static void
11250 wm_put_null(struct wm_softc *sc)
11251 {
11252
11253 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11254 device_xname(sc->sc_dev), __func__));
11255 return;
11256 }
11257
11258 /*
11259 * Get hardware semaphore.
11260 * Same as e1000_get_hw_semaphore_generic()
11261 */
11262 static int
11263 wm_get_swsm_semaphore(struct wm_softc *sc)
11264 {
11265 int32_t timeout;
11266 uint32_t swsm;
11267
11268 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11269 device_xname(sc->sc_dev), __func__));
11270 KASSERT(sc->sc_nvm_wordsize > 0);
11271
11272 /* Get the SW semaphore. */
11273 timeout = sc->sc_nvm_wordsize + 1;
11274 while (timeout) {
11275 swsm = CSR_READ(sc, WMREG_SWSM);
11276
11277 if ((swsm & SWSM_SMBI) == 0)
11278 break;
11279
11280 delay(50);
11281 timeout--;
11282 }
11283
11284 if (timeout == 0) {
11285 aprint_error_dev(sc->sc_dev,
11286 "could not acquire SWSM SMBI\n");
11287 return 1;
11288 }
11289
11290 /* Get the FW semaphore. */
11291 timeout = sc->sc_nvm_wordsize + 1;
11292 while (timeout) {
11293 swsm = CSR_READ(sc, WMREG_SWSM);
11294 swsm |= SWSM_SWESMBI;
11295 CSR_WRITE(sc, WMREG_SWSM, swsm);
11296 /* If we managed to set the bit we got the semaphore. */
11297 swsm = CSR_READ(sc, WMREG_SWSM);
11298 if (swsm & SWSM_SWESMBI)
11299 break;
11300
11301 delay(50);
11302 timeout--;
11303 }
11304
11305 if (timeout == 0) {
11306 aprint_error_dev(sc->sc_dev,
11307 "could not acquire SWSM SWESMBI\n");
11308 /* Release semaphores */
11309 wm_put_swsm_semaphore(sc);
11310 return 1;
11311 }
11312 return 0;
11313 }
11314
11315 /*
11316 * Put hardware semaphore.
11317 * Same as e1000_put_hw_semaphore_generic()
11318 */
11319 static void
11320 wm_put_swsm_semaphore(struct wm_softc *sc)
11321 {
11322 uint32_t swsm;
11323
11324 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11325 device_xname(sc->sc_dev), __func__));
11326
11327 swsm = CSR_READ(sc, WMREG_SWSM);
11328 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
11329 CSR_WRITE(sc, WMREG_SWSM, swsm);
11330 }
11331
11332 /*
11333 * Get SW/FW semaphore.
11334 * Same as e1000_acquire_swfw_sync_82575().
11335 */
11336 static int
11337 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
11338 {
11339 uint32_t swfw_sync;
11340 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
11341 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
11342 int timeout = 200;
11343
11344 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11345 device_xname(sc->sc_dev), __func__));
11346 KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
11347
11348 for (timeout = 0; timeout < 200; timeout++) {
11349 if (sc->sc_flags & WM_F_LOCK_SWSM) {
11350 if (wm_get_swsm_semaphore(sc)) {
11351 aprint_error_dev(sc->sc_dev,
11352 "%s: failed to get semaphore\n",
11353 __func__);
11354 return 1;
11355 }
11356 }
11357 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
11358 if ((swfw_sync & (swmask | fwmask)) == 0) {
11359 swfw_sync |= swmask;
11360 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
11361 if (sc->sc_flags & WM_F_LOCK_SWSM)
11362 wm_put_swsm_semaphore(sc);
11363 return 0;
11364 }
11365 if (sc->sc_flags & WM_F_LOCK_SWSM)
11366 wm_put_swsm_semaphore(sc);
11367 delay(5000);
11368 }
11369 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
11370 device_xname(sc->sc_dev), mask, swfw_sync);
11371 return 1;
11372 }
11373
11374 static void
11375 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
11376 {
11377 uint32_t swfw_sync;
11378
11379 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11380 device_xname(sc->sc_dev), __func__));
11381 KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
11382
11383 if (sc->sc_flags & WM_F_LOCK_SWSM) {
11384 while (wm_get_swsm_semaphore(sc) != 0)
11385 continue;
11386 }
11387 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
11388 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
11389 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
11390 if (sc->sc_flags & WM_F_LOCK_SWSM)
11391 wm_put_swsm_semaphore(sc);
11392 }
11393
11394 static int
11395 wm_get_phy_82575(struct wm_softc *sc)
11396 {
11397
11398 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11399 device_xname(sc->sc_dev), __func__));
11400 return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
11401 }
11402
11403 static void
11404 wm_put_phy_82575(struct wm_softc *sc)
11405 {
11406
11407 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11408 device_xname(sc->sc_dev), __func__));
11409 return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
11410 }
11411
11412 static int
11413 wm_get_swfwhw_semaphore(struct wm_softc *sc)
11414 {
11415 uint32_t ext_ctrl;
11416 int timeout = 200;
11417
11418 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11419 device_xname(sc->sc_dev), __func__));
11420
11421 mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
11422 for (timeout = 0; timeout < 200; timeout++) {
11423 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11424 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
11425 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11426
11427 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11428 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
11429 return 0;
11430 delay(5000);
11431 }
11432 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
11433 device_xname(sc->sc_dev), ext_ctrl);
11434 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
11435 return 1;
11436 }
11437
11438 static void
11439 wm_put_swfwhw_semaphore(struct wm_softc *sc)
11440 {
11441 uint32_t ext_ctrl;
11442
11443 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11444 device_xname(sc->sc_dev), __func__));
11445
11446 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11447 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11448 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11449
11450 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
11451 }
11452
11453 static int
11454 wm_get_swflag_ich8lan(struct wm_softc *sc)
11455 {
11456 uint32_t ext_ctrl;
11457 int timeout;
11458
11459 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11460 device_xname(sc->sc_dev), __func__));
11461 mutex_enter(sc->sc_ich_phymtx);
11462 for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
11463 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11464 if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
11465 break;
11466 delay(1000);
11467 }
11468 if (timeout >= WM_PHY_CFG_TIMEOUT) {
11469 printf("%s: SW has already locked the resource\n",
11470 device_xname(sc->sc_dev));
11471 goto out;
11472 }
11473
11474 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
11475 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11476 for (timeout = 0; timeout < 1000; timeout++) {
11477 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11478 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
11479 break;
11480 delay(1000);
11481 }
11482 if (timeout >= 1000) {
11483 printf("%s: failed to acquire semaphore\n",
11484 device_xname(sc->sc_dev));
11485 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11486 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11487 goto out;
11488 }
11489 return 0;
11490
11491 out:
11492 mutex_exit(sc->sc_ich_phymtx);
11493 return 1;
11494 }
11495
11496 static void
11497 wm_put_swflag_ich8lan(struct wm_softc *sc)
11498 {
11499 uint32_t ext_ctrl;
11500
11501 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11502 device_xname(sc->sc_dev), __func__));
11503 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11504 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
11505 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11506 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11507 } else {
11508 printf("%s: Semaphore unexpectedly released\n",
11509 device_xname(sc->sc_dev));
11510 }
11511
11512 mutex_exit(sc->sc_ich_phymtx);
11513 }
11514
11515 static int
11516 wm_get_nvm_ich8lan(struct wm_softc *sc)
11517 {
11518
11519 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11520 device_xname(sc->sc_dev), __func__));
11521 mutex_enter(sc->sc_ich_nvmmtx);
11522
11523 return 0;
11524 }
11525
11526 static void
11527 wm_put_nvm_ich8lan(struct wm_softc *sc)
11528 {
11529
11530 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11531 device_xname(sc->sc_dev), __func__));
11532 mutex_exit(sc->sc_ich_nvmmtx);
11533 }
11534
11535 static int
11536 wm_get_hw_semaphore_82573(struct wm_softc *sc)
11537 {
11538 int i = 0;
11539 uint32_t reg;
11540
11541 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11542 device_xname(sc->sc_dev), __func__));
11543
11544 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11545 do {
11546 CSR_WRITE(sc, WMREG_EXTCNFCTR,
11547 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
11548 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11549 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
11550 break;
11551 delay(2*1000);
11552 i++;
11553 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
11554
11555 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
11556 wm_put_hw_semaphore_82573(sc);
11557 log(LOG_ERR, "%s: Driver can't access the PHY\n",
11558 device_xname(sc->sc_dev));
11559 return -1;
11560 }
11561
11562 return 0;
11563 }
11564
11565 static void
11566 wm_put_hw_semaphore_82573(struct wm_softc *sc)
11567 {
11568 uint32_t reg;
11569
11570 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11571 device_xname(sc->sc_dev), __func__));
11572
11573 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11574 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11575 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
11576 }
11577
11578 /*
11579 * Management mode and power management related subroutines.
11580 * BMC, AMT, suspend/resume and EEE.
11581 */
11582
11583 #ifdef WM_WOL
11584 static int
11585 wm_check_mng_mode(struct wm_softc *sc)
11586 {
11587 int rv;
11588
11589 switch (sc->sc_type) {
11590 case WM_T_ICH8:
11591 case WM_T_ICH9:
11592 case WM_T_ICH10:
11593 case WM_T_PCH:
11594 case WM_T_PCH2:
11595 case WM_T_PCH_LPT:
11596 case WM_T_PCH_SPT:
11597 rv = wm_check_mng_mode_ich8lan(sc);
11598 break;
11599 case WM_T_82574:
11600 case WM_T_82583:
11601 rv = wm_check_mng_mode_82574(sc);
11602 break;
11603 case WM_T_82571:
11604 case WM_T_82572:
11605 case WM_T_82573:
11606 case WM_T_80003:
11607 rv = wm_check_mng_mode_generic(sc);
11608 break;
11609 default:
11610 /* noting to do */
11611 rv = 0;
11612 break;
11613 }
11614
11615 return rv;
11616 }
11617
11618 static int
11619 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
11620 {
11621 uint32_t fwsm;
11622
11623 fwsm = CSR_READ(sc, WMREG_FWSM);
11624
11625 if (((fwsm & FWSM_FW_VALID) != 0)
11626 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
11627 return 1;
11628
11629 return 0;
11630 }
11631
11632 static int
11633 wm_check_mng_mode_82574(struct wm_softc *sc)
11634 {
11635 uint16_t data;
11636
11637 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11638
11639 if ((data & NVM_CFG2_MNGM_MASK) != 0)
11640 return 1;
11641
11642 return 0;
11643 }
11644
11645 static int
11646 wm_check_mng_mode_generic(struct wm_softc *sc)
11647 {
11648 uint32_t fwsm;
11649
11650 fwsm = CSR_READ(sc, WMREG_FWSM);
11651
11652 if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
11653 return 1;
11654
11655 return 0;
11656 }
11657 #endif /* WM_WOL */
11658
11659 static int
11660 wm_enable_mng_pass_thru(struct wm_softc *sc)
11661 {
11662 uint32_t manc, fwsm, factps;
11663
11664 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
11665 return 0;
11666
11667 manc = CSR_READ(sc, WMREG_MANC);
11668
11669 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
11670 device_xname(sc->sc_dev), manc));
11671 if ((manc & MANC_RECV_TCO_EN) == 0)
11672 return 0;
11673
11674 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
11675 fwsm = CSR_READ(sc, WMREG_FWSM);
11676 factps = CSR_READ(sc, WMREG_FACTPS);
11677 if (((factps & FACTPS_MNGCG) == 0)
11678 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
11679 return 1;
11680 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
11681 uint16_t data;
11682
11683 factps = CSR_READ(sc, WMREG_FACTPS);
11684 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11685 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
11686 device_xname(sc->sc_dev), factps, data));
11687 if (((factps & FACTPS_MNGCG) == 0)
11688 && ((data & NVM_CFG2_MNGM_MASK)
11689 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
11690 return 1;
11691 } else if (((manc & MANC_SMBUS_EN) != 0)
11692 && ((manc & MANC_ASF_EN) == 0))
11693 return 1;
11694
11695 return 0;
11696 }
11697
11698 static bool
11699 wm_phy_resetisblocked(struct wm_softc *sc)
11700 {
11701 bool blocked = false;
11702 uint32_t reg;
11703 int i = 0;
11704
11705 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
11706 device_xname(sc->sc_dev), __func__));
11707
11708 switch (sc->sc_type) {
11709 case WM_T_ICH8:
11710 case WM_T_ICH9:
11711 case WM_T_ICH10:
11712 case WM_T_PCH:
11713 case WM_T_PCH2:
11714 case WM_T_PCH_LPT:
11715 case WM_T_PCH_SPT:
11716 do {
11717 reg = CSR_READ(sc, WMREG_FWSM);
11718 if ((reg & FWSM_RSPCIPHY) == 0) {
11719 blocked = true;
11720 delay(10*1000);
11721 continue;
11722 }
11723 blocked = false;
11724 } while (blocked && (i++ < 30));
11725 return blocked;
11726 break;
11727 case WM_T_82571:
11728 case WM_T_82572:
11729 case WM_T_82573:
11730 case WM_T_82574:
11731 case WM_T_82583:
11732 case WM_T_80003:
11733 reg = CSR_READ(sc, WMREG_MANC);
11734 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
11735 return true;
11736 else
11737 return false;
11738 break;
11739 default:
11740 /* no problem */
11741 break;
11742 }
11743
11744 return false;
11745 }
11746
11747 static void
11748 wm_get_hw_control(struct wm_softc *sc)
11749 {
11750 uint32_t reg;
11751
11752 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11753 device_xname(sc->sc_dev), __func__));
11754
11755 switch (sc->sc_type) {
11756 case WM_T_82573:
11757 reg = CSR_READ(sc, WMREG_SWSM);
11758 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
11759 break;
11760 case WM_T_82571:
11761 case WM_T_82572:
11762 case WM_T_82574:
11763 case WM_T_82583:
11764 case WM_T_80003:
11765 case WM_T_ICH8:
11766 case WM_T_ICH9:
11767 case WM_T_ICH10:
11768 case WM_T_PCH:
11769 case WM_T_PCH2:
11770 case WM_T_PCH_LPT:
11771 case WM_T_PCH_SPT:
11772 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11773 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
11774 break;
11775 default:
11776 break;
11777 }
11778 }
11779
11780 static void
11781 wm_release_hw_control(struct wm_softc *sc)
11782 {
11783 uint32_t reg;
11784
11785 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11786 device_xname(sc->sc_dev), __func__));
11787
11788 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
11789 return;
11790
11791 if (sc->sc_type == WM_T_82573) {
11792 reg = CSR_READ(sc, WMREG_SWSM);
11793 reg &= ~SWSM_DRV_LOAD;
11794 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
11795 } else {
11796 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11797 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
11798 }
11799 }
11800
11801 static void
11802 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
11803 {
11804 uint32_t reg;
11805
11806 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
11807 device_xname(sc->sc_dev), __func__));
11808
11809 if (sc->sc_type < WM_T_PCH2)
11810 return;
11811
11812 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11813
11814 if (gate)
11815 reg |= EXTCNFCTR_GATE_PHY_CFG;
11816 else
11817 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
11818
11819 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
11820 }
11821
11822 static void
11823 wm_smbustopci(struct wm_softc *sc)
11824 {
11825 uint32_t fwsm, reg;
11826
11827 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
11828 device_xname(sc->sc_dev), __func__));
11829
11830 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
11831 wm_gate_hw_phy_config_ich8lan(sc, true);
11832
11833 /* Acquire PHY semaphore */
11834 sc->phy.acquire(sc);
11835
11836 fwsm = CSR_READ(sc, WMREG_FWSM);
11837 if (((fwsm & FWSM_FW_VALID) == 0)
11838 && ((wm_phy_resetisblocked(sc) == false))) {
11839 if (sc->sc_type >= WM_T_PCH_LPT) {
11840 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11841 reg |= CTRL_EXT_FORCE_SMBUS;
11842 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11843 CSR_WRITE_FLUSH(sc);
11844 delay(50*1000);
11845 }
11846
11847 /* Toggle LANPHYPC */
11848 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
11849 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
11850 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11851 CSR_WRITE_FLUSH(sc);
11852 delay(1000);
11853 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
11854 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11855 CSR_WRITE_FLUSH(sc);
11856 delay(50*1000);
11857
11858 if (sc->sc_type >= WM_T_PCH_LPT) {
11859 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11860 reg &= ~CTRL_EXT_FORCE_SMBUS;
11861 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11862 }
11863 }
11864
11865 /* Release semaphore */
11866 sc->phy.release(sc);
11867
11868 /*
11869 * Ungate automatic PHY configuration by hardware on non-managed 82579
11870 */
11871 if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0))
11872 wm_gate_hw_phy_config_ich8lan(sc, false);
11873 }
11874
11875 static void
11876 wm_init_manageability(struct wm_softc *sc)
11877 {
11878
11879 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
11880 device_xname(sc->sc_dev), __func__));
11881 if (sc->sc_flags & WM_F_HAS_MANAGE) {
11882 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
11883 uint32_t manc = CSR_READ(sc, WMREG_MANC);
11884
11885 /* Disable hardware interception of ARP */
11886 manc &= ~MANC_ARP_EN;
11887
11888 /* Enable receiving management packets to the host */
11889 if (sc->sc_type >= WM_T_82571) {
11890 manc |= MANC_EN_MNG2HOST;
11891 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
11892 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
11893 }
11894
11895 CSR_WRITE(sc, WMREG_MANC, manc);
11896 }
11897 }
11898
11899 static void
11900 wm_release_manageability(struct wm_softc *sc)
11901 {
11902
11903 if (sc->sc_flags & WM_F_HAS_MANAGE) {
11904 uint32_t manc = CSR_READ(sc, WMREG_MANC);
11905
11906 manc |= MANC_ARP_EN;
11907 if (sc->sc_type >= WM_T_82571)
11908 manc &= ~MANC_EN_MNG2HOST;
11909
11910 CSR_WRITE(sc, WMREG_MANC, manc);
11911 }
11912 }
11913
11914 static void
11915 wm_get_wakeup(struct wm_softc *sc)
11916 {
11917
11918 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
11919 switch (sc->sc_type) {
11920 case WM_T_82573:
11921 case WM_T_82583:
11922 sc->sc_flags |= WM_F_HAS_AMT;
11923 /* FALLTHROUGH */
11924 case WM_T_80003:
11925 case WM_T_82541:
11926 case WM_T_82547:
11927 case WM_T_82571:
11928 case WM_T_82572:
11929 case WM_T_82574:
11930 case WM_T_82575:
11931 case WM_T_82576:
11932 case WM_T_82580:
11933 case WM_T_I350:
11934 case WM_T_I354:
11935 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
11936 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
11937 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
11938 break;
11939 case WM_T_ICH8:
11940 case WM_T_ICH9:
11941 case WM_T_ICH10:
11942 case WM_T_PCH:
11943 case WM_T_PCH2:
11944 case WM_T_PCH_LPT:
11945 case WM_T_PCH_SPT: /* XXX only Q170 chipset? */
11946 sc->sc_flags |= WM_F_HAS_AMT;
11947 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
11948 break;
11949 default:
11950 break;
11951 }
11952
11953 /* 1: HAS_MANAGE */
11954 if (wm_enable_mng_pass_thru(sc) != 0)
11955 sc->sc_flags |= WM_F_HAS_MANAGE;
11956
11957 #ifdef WM_DEBUG
11958 printf("\n");
11959 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
11960 printf("HAS_AMT,");
11961 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
11962 printf("ARC_SUBSYS_VALID,");
11963 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
11964 printf("ASF_FIRMWARE_PRES,");
11965 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
11966 printf("HAS_MANAGE,");
11967 printf("\n");
11968 #endif
11969 /*
11970 * Note that the WOL flags is set after the resetting of the eeprom
11971 * stuff
11972 */
11973 }
11974
11975 #ifdef WM_WOL
11976 /* WOL in the newer chipset interfaces (pchlan) */
11977 static void
11978 wm_enable_phy_wakeup(struct wm_softc *sc)
11979 {
11980 #if 0
11981 uint16_t preg;
11982
11983 /* Copy MAC RARs to PHY RARs */
11984
11985 /* Copy MAC MTA to PHY MTA */
11986
11987 /* Configure PHY Rx Control register */
11988
11989 /* Enable PHY wakeup in MAC register */
11990
11991 /* Configure and enable PHY wakeup in PHY registers */
11992
11993 /* Activate PHY wakeup */
11994
11995 /* XXX */
11996 #endif
11997 }
11998
11999 /* Power down workaround on D3 */
12000 static void
12001 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
12002 {
12003 uint32_t reg;
12004 int i;
12005
12006 for (i = 0; i < 2; i++) {
12007 /* Disable link */
12008 reg = CSR_READ(sc, WMREG_PHY_CTRL);
12009 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
12010 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
12011
12012 /*
12013 * Call gig speed drop workaround on Gig disable before
12014 * accessing any PHY registers
12015 */
12016 if (sc->sc_type == WM_T_ICH8)
12017 wm_gig_downshift_workaround_ich8lan(sc);
12018
12019 /* Write VR power-down enable */
12020 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
12021 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
12022 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
12023 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
12024
12025 /* Read it back and test */
12026 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
12027 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
12028 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
12029 break;
12030
12031 /* Issue PHY reset and repeat at most one more time */
12032 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
12033 }
12034 }
12035
12036 static void
12037 wm_enable_wakeup(struct wm_softc *sc)
12038 {
12039 uint32_t reg, pmreg;
12040 pcireg_t pmode;
12041
12042 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12043 device_xname(sc->sc_dev), __func__));
12044
12045 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
12046 &pmreg, NULL) == 0)
12047 return;
12048
12049 /* Advertise the wakeup capability */
12050 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
12051 | CTRL_SWDPIN(3));
12052 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
12053
12054 /* ICH workaround */
12055 switch (sc->sc_type) {
12056 case WM_T_ICH8:
12057 case WM_T_ICH9:
12058 case WM_T_ICH10:
12059 case WM_T_PCH:
12060 case WM_T_PCH2:
12061 case WM_T_PCH_LPT:
12062 case WM_T_PCH_SPT:
12063 /* Disable gig during WOL */
12064 reg = CSR_READ(sc, WMREG_PHY_CTRL);
12065 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
12066 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
12067 if (sc->sc_type == WM_T_PCH)
12068 wm_gmii_reset(sc);
12069
12070 /* Power down workaround */
12071 if (sc->sc_phytype == WMPHY_82577) {
12072 struct mii_softc *child;
12073
12074 /* Assume that the PHY is copper */
12075 child = LIST_FIRST(&sc->sc_mii.mii_phys);
12076 if (child->mii_mpd_rev <= 2)
12077 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
12078 (768 << 5) | 25, 0x0444); /* magic num */
12079 }
12080 break;
12081 default:
12082 break;
12083 }
12084
12085 /* Keep the laser running on fiber adapters */
12086 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
12087 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
12088 reg = CSR_READ(sc, WMREG_CTRL_EXT);
12089 reg |= CTRL_EXT_SWDPIN(3);
12090 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12091 }
12092
12093 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
12094 #if 0 /* for the multicast packet */
12095 reg |= WUFC_MC;
12096 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
12097 #endif
12098
12099 if (sc->sc_type == WM_T_PCH) {
12100 wm_enable_phy_wakeup(sc);
12101 } else {
12102 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
12103 CSR_WRITE(sc, WMREG_WUFC, reg);
12104 }
12105
12106 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
12107 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
12108 || (sc->sc_type == WM_T_PCH2))
12109 && (sc->sc_phytype == WMPHY_IGP_3))
12110 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
12111
12112 /* Request PME */
12113 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
12114 #if 0
12115 /* Disable WOL */
12116 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
12117 #else
12118 /* For WOL */
12119 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
12120 #endif
12121 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
12122 }
12123 #endif /* WM_WOL */
12124
12125 /* LPLU */
12126
12127 static void
12128 wm_lplu_d0_disable(struct wm_softc *sc)
12129 {
12130 uint32_t reg;
12131
12132 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12133 device_xname(sc->sc_dev), __func__));
12134
12135 reg = CSR_READ(sc, WMREG_PHY_CTRL);
12136 reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
12137 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
12138 }
12139
12140 static void
12141 wm_lplu_d0_disable_pch(struct wm_softc *sc)
12142 {
12143 uint32_t reg;
12144
12145 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12146 device_xname(sc->sc_dev), __func__));
12147
12148 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
12149 reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
12150 reg |= HV_OEM_BITS_ANEGNOW;
12151 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
12152 }
12153
12154 /* EEE */
12155
12156 static void
12157 wm_set_eee_i350(struct wm_softc *sc)
12158 {
12159 uint32_t ipcnfg, eeer;
12160
12161 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
12162 eeer = CSR_READ(sc, WMREG_EEER);
12163
12164 if ((sc->sc_flags & WM_F_EEE) != 0) {
12165 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
12166 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
12167 | EEER_LPI_FC);
12168 } else {
12169 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
12170 ipcnfg &= ~IPCNFG_10BASE_TE;
12171 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
12172 | EEER_LPI_FC);
12173 }
12174
12175 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
12176 CSR_WRITE(sc, WMREG_EEER, eeer);
12177 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
12178 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
12179 }
12180
12181 /*
12182 * Workarounds (mainly PHY related).
12183 * Basically, PHY's workarounds are in the PHY drivers.
12184 */
12185
12186 /* Work-around for 82566 Kumeran PCS lock loss */
12187 static void
12188 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
12189 {
12190 #if 0
12191 int miistatus, active, i;
12192 int reg;
12193
12194 miistatus = sc->sc_mii.mii_media_status;
12195
12196 /* If the link is not up, do nothing */
12197 if ((miistatus & IFM_ACTIVE) == 0)
12198 return;
12199
12200 active = sc->sc_mii.mii_media_active;
12201
12202 /* Nothing to do if the link is other than 1Gbps */
12203 if (IFM_SUBTYPE(active) != IFM_1000_T)
12204 return;
12205
12206 for (i = 0; i < 10; i++) {
12207 /* read twice */
12208 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
12209 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
12210 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
12211 goto out; /* GOOD! */
12212
12213 /* Reset the PHY */
12214 wm_gmii_reset(sc);
12215 delay(5*1000);
12216 }
12217
12218 /* Disable GigE link negotiation */
12219 reg = CSR_READ(sc, WMREG_PHY_CTRL);
12220 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
12221 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
12222
12223 /*
12224 * Call gig speed drop workaround on Gig disable before accessing
12225 * any PHY registers.
12226 */
12227 wm_gig_downshift_workaround_ich8lan(sc);
12228
12229 out:
12230 return;
12231 #endif
12232 }
12233
12234 /* WOL from S5 stops working */
12235 static void
12236 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
12237 {
12238 uint16_t kmrn_reg;
12239
12240 /* Only for igp3 */
12241 if (sc->sc_phytype == WMPHY_IGP_3) {
12242 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
12243 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
12244 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
12245 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
12246 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
12247 }
12248 }
12249
12250 /*
12251 * Workaround for pch's PHYs
12252 * XXX should be moved to new PHY driver?
12253 */
12254 static void
12255 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
12256 {
12257
12258 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12259 device_xname(sc->sc_dev), __func__));
12260 KASSERT(sc->sc_type == WM_T_PCH);
12261
12262 if (sc->sc_phytype == WMPHY_82577)
12263 wm_set_mdio_slow_mode_hv(sc);
12264
12265 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
12266
12267 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
12268
12269 /* 82578 */
12270 if (sc->sc_phytype == WMPHY_82578) {
12271 struct mii_softc *child;
12272
12273 /*
12274 * Return registers to default by doing a soft reset then
12275 * writing 0x3140 to the control register
12276 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
12277 */
12278 child = LIST_FIRST(&sc->sc_mii.mii_phys);
12279 if ((child != NULL) && (child->mii_mpd_rev < 2)) {
12280 printf("XXX 82578 rev < 2\n");
12281 PHY_RESET(child);
12282 sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
12283 0x3140);
12284 }
12285 }
12286
12287 /* Select page 0 */
12288 sc->phy.acquire(sc);
12289 wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
12290 sc->phy.release(sc);
12291
12292 /*
12293 * Configure the K1 Si workaround during phy reset assuming there is
12294 * link so that it disables K1 if link is in 1Gbps.
12295 */
12296 wm_k1_gig_workaround_hv(sc, 1);
12297 }
12298
12299 static void
12300 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
12301 {
12302
12303 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12304 device_xname(sc->sc_dev), __func__));
12305 KASSERT(sc->sc_type == WM_T_PCH2);
12306
12307 wm_set_mdio_slow_mode_hv(sc);
12308 }
12309
12310 static int
12311 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
12312 {
12313 int k1_enable = sc->sc_nvm_k1_enabled;
12314
12315 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12316 device_xname(sc->sc_dev), __func__));
12317
12318 if (sc->phy.acquire(sc) != 0)
12319 return -1;
12320
12321 if (link) {
12322 k1_enable = 0;
12323
12324 /* Link stall fix for link up */
12325 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
12326 } else {
12327 /* Link stall fix for link down */
12328 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
12329 }
12330
12331 wm_configure_k1_ich8lan(sc, k1_enable);
12332 sc->phy.release(sc);
12333
12334 return 0;
12335 }
12336
12337 static void
12338 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
12339 {
12340 uint32_t reg;
12341
12342 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
12343 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
12344 reg | HV_KMRN_MDIO_SLOW);
12345 }
12346
12347 static void
12348 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
12349 {
12350 uint32_t ctrl, ctrl_ext, tmp;
12351 uint16_t kmrn_reg;
12352
12353 kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
12354
12355 if (k1_enable)
12356 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
12357 else
12358 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
12359
12360 wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
12361
12362 delay(20);
12363
12364 ctrl = CSR_READ(sc, WMREG_CTRL);
12365 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
12366
12367 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
12368 tmp |= CTRL_FRCSPD;
12369
12370 CSR_WRITE(sc, WMREG_CTRL, tmp);
12371 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
12372 CSR_WRITE_FLUSH(sc);
12373 delay(20);
12374
12375 CSR_WRITE(sc, WMREG_CTRL, ctrl);
12376 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
12377 CSR_WRITE_FLUSH(sc);
12378 delay(20);
12379 }
12380
12381 /* special case - for 82575 - need to do manual init ... */
12382 static void
12383 wm_reset_init_script_82575(struct wm_softc *sc)
12384 {
12385 /*
12386 * remark: this is untested code - we have no board without EEPROM
12387 * same setup as mentioned int the FreeBSD driver for the i82575
12388 */
12389
12390 /* SerDes configuration via SERDESCTRL */
12391 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
12392 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
12393 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
12394 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
12395
12396 /* CCM configuration via CCMCTL register */
12397 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
12398 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
12399
12400 /* PCIe lanes configuration */
12401 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
12402 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
12403 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
12404 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
12405
12406 /* PCIe PLL Configuration */
12407 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
12408 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
12409 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
12410 }
12411
12412 static void
12413 wm_reset_mdicnfg_82580(struct wm_softc *sc)
12414 {
12415 uint32_t reg;
12416 uint16_t nvmword;
12417 int rv;
12418
12419 if ((sc->sc_flags & WM_F_SGMII) == 0)
12420 return;
12421
12422 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
12423 + NVM_OFF_CFG3_PORTA, 1, &nvmword);
12424 if (rv != 0) {
12425 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
12426 __func__);
12427 return;
12428 }
12429
12430 reg = CSR_READ(sc, WMREG_MDICNFG);
12431 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
12432 reg |= MDICNFG_DEST;
12433 if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
12434 reg |= MDICNFG_COM_MDIO;
12435 CSR_WRITE(sc, WMREG_MDICNFG, reg);
12436 }
12437
12438 /*
12439 * I210 Errata 25 and I211 Errata 10
12440 * Slow System Clock.
12441 */
12442 static void
12443 wm_pll_workaround_i210(struct wm_softc *sc)
12444 {
12445 uint32_t mdicnfg, wuc;
12446 uint32_t reg;
12447 pcireg_t pcireg;
12448 uint32_t pmreg;
12449 uint16_t nvmword, tmp_nvmword;
12450 int phyval;
12451 bool wa_done = false;
12452 int i;
12453
12454 /* Save WUC and MDICNFG registers */
12455 wuc = CSR_READ(sc, WMREG_WUC);
12456 mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
12457
12458 reg = mdicnfg & ~MDICNFG_DEST;
12459 CSR_WRITE(sc, WMREG_MDICNFG, reg);
12460
12461 if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
12462 nvmword = INVM_DEFAULT_AL;
12463 tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
12464
12465 /* Get Power Management cap offset */
12466 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
12467 &pmreg, NULL) == 0)
12468 return;
12469 for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
12470 phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
12471 GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
12472
12473 if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
12474 break; /* OK */
12475 }
12476
12477 wa_done = true;
12478 /* Directly reset the internal PHY */
12479 reg = CSR_READ(sc, WMREG_CTRL);
12480 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
12481
12482 reg = CSR_READ(sc, WMREG_CTRL_EXT);
12483 reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
12484 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12485
12486 CSR_WRITE(sc, WMREG_WUC, 0);
12487 reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
12488 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
12489
12490 pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
12491 pmreg + PCI_PMCSR);
12492 pcireg |= PCI_PMCSR_STATE_D3;
12493 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
12494 pmreg + PCI_PMCSR, pcireg);
12495 delay(1000);
12496 pcireg &= ~PCI_PMCSR_STATE_D3;
12497 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
12498 pmreg + PCI_PMCSR, pcireg);
12499
12500 reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
12501 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
12502
12503 /* Restore WUC register */
12504 CSR_WRITE(sc, WMREG_WUC, wuc);
12505 }
12506
12507 /* Restore MDICNFG setting */
12508 CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
12509 if (wa_done)
12510 aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
12511 }
12512