if_wm.c revision 1.461 1 /* $NetBSD: if_wm.c,v 1.461 2017/01/04 04:43:08 knakahara Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - Disable D0 LPLU on 8257[12356], 82580 and I350.
77 * - TX Multi queue improvement (refine queue selection logic)
78 * - Advanced Receive Descriptor
79 * - EEE (Energy Efficiency Ethernet)
80 * - Virtual Function
81 * - Set LED correctly (based on contents in EEPROM)
82 * - Rework how parameters are loaded from the EEPROM.
83 * - Image Unique ID
84 */
85
86 #include <sys/cdefs.h>
87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.461 2017/01/04 04:43:08 knakahara Exp $");
88
89 #ifdef _KERNEL_OPT
90 #include "opt_net_mpsafe.h"
91 #endif
92
93 #include <sys/param.h>
94 #include <sys/systm.h>
95 #include <sys/callout.h>
96 #include <sys/mbuf.h>
97 #include <sys/malloc.h>
98 #include <sys/kmem.h>
99 #include <sys/kernel.h>
100 #include <sys/socket.h>
101 #include <sys/ioctl.h>
102 #include <sys/errno.h>
103 #include <sys/device.h>
104 #include <sys/queue.h>
105 #include <sys/syslog.h>
106 #include <sys/interrupt.h>
107 #include <sys/cpu.h>
108 #include <sys/pcq.h>
109
110 #include <sys/rndsource.h>
111
112 #include <net/if.h>
113 #include <net/if_dl.h>
114 #include <net/if_media.h>
115 #include <net/if_ether.h>
116
117 #include <net/bpf.h>
118
119 #include <netinet/in.h> /* XXX for struct ip */
120 #include <netinet/in_systm.h> /* XXX for struct ip */
121 #include <netinet/ip.h> /* XXX for struct ip */
122 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
123 #include <netinet/tcp.h> /* XXX for struct tcphdr */
124
125 #include <sys/bus.h>
126 #include <sys/intr.h>
127 #include <machine/endian.h>
128
129 #include <dev/mii/mii.h>
130 #include <dev/mii/miivar.h>
131 #include <dev/mii/miidevs.h>
132 #include <dev/mii/mii_bitbang.h>
133 #include <dev/mii/ikphyreg.h>
134 #include <dev/mii/igphyreg.h>
135 #include <dev/mii/igphyvar.h>
136 #include <dev/mii/inbmphyreg.h>
137
138 #include <dev/pci/pcireg.h>
139 #include <dev/pci/pcivar.h>
140 #include <dev/pci/pcidevs.h>
141
142 #include <dev/pci/if_wmreg.h>
143 #include <dev/pci/if_wmvar.h>
144
145 #ifdef WM_DEBUG
146 #define WM_DEBUG_LINK __BIT(0)
147 #define WM_DEBUG_TX __BIT(1)
148 #define WM_DEBUG_RX __BIT(2)
149 #define WM_DEBUG_GMII __BIT(3)
150 #define WM_DEBUG_MANAGE __BIT(4)
151 #define WM_DEBUG_NVM __BIT(5)
152 #define WM_DEBUG_INIT __BIT(6)
153 #define WM_DEBUG_LOCK __BIT(7)
154 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
155 | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
156
157 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
158 #else
159 #define DPRINTF(x, y) /* nothing */
160 #endif /* WM_DEBUG */
161
162 #ifdef NET_MPSAFE
163 #define WM_MPSAFE 1
164 #endif
165
166 /*
167 * This device driver's max interrupt numbers.
168 */
169 #define WM_MAX_NQUEUEINTR 16
170 #define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1)
171
172 /*
173 * Transmit descriptor list size. Due to errata, we can only have
174 * 256 hardware descriptors in the ring on < 82544, but we use 4096
175 * on >= 82544. We tell the upper layers that they can queue a lot
176 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
177 * of them at a time.
178 *
179 * We allow up to 256 (!) DMA segments per packet. Pathological packet
180 * chains containing many small mbufs have been observed in zero-copy
181 * situations with jumbo frames.
182 */
183 #define WM_NTXSEGS 256
184 #define WM_IFQUEUELEN 256
185 #define WM_TXQUEUELEN_MAX 64
186 #define WM_TXQUEUELEN_MAX_82547 16
187 #define WM_TXQUEUELEN(txq) ((txq)->txq_num)
188 #define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1)
189 #define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8)
190 #define WM_NTXDESC_82542 256
191 #define WM_NTXDESC_82544 4096
192 #define WM_NTXDESC(txq) ((txq)->txq_ndesc)
193 #define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1)
194 #define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize)
195 #define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq))
196 #define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq))
197
198 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
199
200 #define WM_TXINTERQSIZE 256
201
202 /*
203 * Receive descriptor list size. We have one Rx buffer for normal
204 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
205 * packet. We allocate 256 receive descriptors, each with a 2k
206 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
207 */
208 #define WM_NRXDESC 256
209 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
210 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
211 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
212
213 typedef union txdescs {
214 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
215 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544];
216 } txdescs_t;
217
218 #define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x))
219 #define WM_CDRXOFF(x) (sizeof(wiseman_rxdesc_t) * x)
220
221 /*
222 * Software state for transmit jobs.
223 */
224 struct wm_txsoft {
225 struct mbuf *txs_mbuf; /* head of our mbuf chain */
226 bus_dmamap_t txs_dmamap; /* our DMA map */
227 int txs_firstdesc; /* first descriptor in packet */
228 int txs_lastdesc; /* last descriptor in packet */
229 int txs_ndesc; /* # of descriptors used */
230 };
231
232 /*
233 * Software state for receive buffers. Each descriptor gets a
234 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
235 * more than one buffer, we chain them together.
236 */
237 struct wm_rxsoft {
238 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
239 bus_dmamap_t rxs_dmamap; /* our DMA map */
240 };
241
242 #define WM_LINKUP_TIMEOUT 50
243
244 static uint16_t swfwphysem[] = {
245 SWFW_PHY0_SM,
246 SWFW_PHY1_SM,
247 SWFW_PHY2_SM,
248 SWFW_PHY3_SM
249 };
250
251 static const uint32_t wm_82580_rxpbs_table[] = {
252 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
253 };
254
255 struct wm_softc;
256
257 #ifdef WM_EVENT_COUNTERS
258 #define WM_Q_EVCNT_DEFINE(qname, evname) \
259 char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
260 struct evcnt qname##_ev_##evname;
261
262 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \
263 do{ \
264 snprintf((q)->qname##_##evname##_evcnt_name, \
265 sizeof((q)->qname##_##evname##_evcnt_name), \
266 "%s%02d%s", #qname, (qnum), #evname); \
267 evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \
268 (evtype), NULL, (xname), \
269 (q)->qname##_##evname##_evcnt_name); \
270 }while(0)
271
272 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
273 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
274
275 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
276 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
277 #endif /* WM_EVENT_COUNTERS */
278
279 struct wm_txqueue {
280 kmutex_t *txq_lock; /* lock for tx operations */
281
282 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */
283
284 /* Software state for the transmit descriptors. */
285 int txq_num; /* must be a power of two */
286 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
287
288 /* TX control data structures. */
289 int txq_ndesc; /* must be a power of two */
290 size_t txq_descsize; /* a tx descriptor size */
291 txdescs_t *txq_descs_u;
292 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */
293 bus_dma_segment_t txq_desc_seg; /* control data segment */
294 int txq_desc_rseg; /* real number of control segment */
295 #define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr
296 #define txq_descs txq_descs_u->sctxu_txdescs
297 #define txq_nq_descs txq_descs_u->sctxu_nq_txdescs
298
299 bus_addr_t txq_tdt_reg; /* offset of TDT register */
300
301 int txq_free; /* number of free Tx descriptors */
302 int txq_next; /* next ready Tx descriptor */
303
304 int txq_sfree; /* number of free Tx jobs */
305 int txq_snext; /* next free Tx job */
306 int txq_sdirty; /* dirty Tx jobs */
307
308 /* These 4 variables are used only on the 82547. */
309 int txq_fifo_size; /* Tx FIFO size */
310 int txq_fifo_head; /* current head of FIFO */
311 uint32_t txq_fifo_addr; /* internal address of start of FIFO */
312 int txq_fifo_stall; /* Tx FIFO is stalled */
313
314 /*
315 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
316 * CPUs. This queue intermediate them without block.
317 */
318 pcq_t *txq_interq;
319
320 /*
321 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
322 * to manage Tx H/W queue's busy flag.
323 */
324 int txq_flags; /* flags for H/W queue, see below */
325 #define WM_TXQ_NO_SPACE 0x1
326
327 bool txq_stopping;
328
329 #ifdef WM_EVENT_COUNTERS
330 WM_Q_EVCNT_DEFINE(txq, txsstall) /* Tx stalled due to no txs */
331 WM_Q_EVCNT_DEFINE(txq, txdstall) /* Tx stalled due to no txd */
332 WM_Q_EVCNT_DEFINE(txq, txfifo_stall) /* Tx FIFO stalls (82547) */
333 WM_Q_EVCNT_DEFINE(txq, txdw) /* Tx descriptor interrupts */
334 WM_Q_EVCNT_DEFINE(txq, txqe) /* Tx queue empty interrupts */
335 /* XXX not used? */
336
337 WM_Q_EVCNT_DEFINE(txq, txipsum) /* IP checksums comp. out-bound */
338 WM_Q_EVCNT_DEFINE(txq,txtusum) /* TCP/UDP cksums comp. out-bound */
339 WM_Q_EVCNT_DEFINE(txq, txtusum6) /* TCP/UDP v6 cksums comp. out-bound */
340 WM_Q_EVCNT_DEFINE(txq, txtso) /* TCP seg offload out-bound (IPv4) */
341 WM_Q_EVCNT_DEFINE(txq, txtso6) /* TCP seg offload out-bound (IPv6) */
342 WM_Q_EVCNT_DEFINE(txq, txtsopain) /* painful header manip. for TSO */
343
344 WM_Q_EVCNT_DEFINE(txq, txdrop) /* Tx packets dropped(too many segs) */
345
346 WM_Q_EVCNT_DEFINE(txq, tu) /* Tx underrun */
347
348 char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
349 struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
350 #endif /* WM_EVENT_COUNTERS */
351 };
352
353 struct wm_rxqueue {
354 kmutex_t *rxq_lock; /* lock for rx operations */
355
356 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */
357
358 /* Software state for the receive descriptors. */
359 wiseman_rxdesc_t *rxq_descs;
360
361 /* RX control data structures. */
362 struct wm_rxsoft rxq_soft[WM_NRXDESC];
363 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */
364 bus_dma_segment_t rxq_desc_seg; /* control data segment */
365 int rxq_desc_rseg; /* real number of control segment */
366 size_t rxq_desc_size; /* control data size */
367 #define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr
368
369 bus_addr_t rxq_rdt_reg; /* offset of RDT register */
370
371 int rxq_ptr; /* next ready Rx desc/queue ent */
372 int rxq_discard;
373 int rxq_len;
374 struct mbuf *rxq_head;
375 struct mbuf *rxq_tail;
376 struct mbuf **rxq_tailp;
377
378 bool rxq_stopping;
379
380 #ifdef WM_EVENT_COUNTERS
381 WM_Q_EVCNT_DEFINE(rxq, rxintr); /* Rx interrupts */
382
383 WM_Q_EVCNT_DEFINE(rxq, rxipsum); /* IP checksums checked in-bound */
384 WM_Q_EVCNT_DEFINE(rxq, rxtusum); /* TCP/UDP cksums checked in-bound */
385 #endif
386 };
387
388 struct wm_queue {
389 int wmq_id; /* index of transmit and receive queues */
390 int wmq_intr_idx; /* index of MSI-X tables */
391
392 struct wm_txqueue wmq_txq;
393 struct wm_rxqueue wmq_rxq;
394 };
395
396 struct wm_phyop {
397 int (*acquire)(struct wm_softc *);
398 void (*release)(struct wm_softc *);
399 int reset_delay_us;
400 };
401
402 /*
403 * Software state per device.
404 */
405 struct wm_softc {
406 device_t sc_dev; /* generic device information */
407 bus_space_tag_t sc_st; /* bus space tag */
408 bus_space_handle_t sc_sh; /* bus space handle */
409 bus_size_t sc_ss; /* bus space size */
410 bus_space_tag_t sc_iot; /* I/O space tag */
411 bus_space_handle_t sc_ioh; /* I/O space handle */
412 bus_size_t sc_ios; /* I/O space size */
413 bus_space_tag_t sc_flasht; /* flash registers space tag */
414 bus_space_handle_t sc_flashh; /* flash registers space handle */
415 bus_size_t sc_flashs; /* flash registers space size */
416 off_t sc_flashreg_offset; /*
417 * offset to flash registers from
418 * start of BAR
419 */
420 bus_dma_tag_t sc_dmat; /* bus DMA tag */
421
422 struct ethercom sc_ethercom; /* ethernet common data */
423 struct mii_data sc_mii; /* MII/media information */
424
425 pci_chipset_tag_t sc_pc;
426 pcitag_t sc_pcitag;
427 int sc_bus_speed; /* PCI/PCIX bus speed */
428 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
429
430 uint16_t sc_pcidevid; /* PCI device ID */
431 wm_chip_type sc_type; /* MAC type */
432 int sc_rev; /* MAC revision */
433 wm_phy_type sc_phytype; /* PHY type */
434 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
435 #define WM_MEDIATYPE_UNKNOWN 0x00
436 #define WM_MEDIATYPE_FIBER 0x01
437 #define WM_MEDIATYPE_COPPER 0x02
438 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
439 int sc_funcid; /* unit number of the chip (0 to 3) */
440 int sc_flags; /* flags; see below */
441 int sc_if_flags; /* last if_flags */
442 int sc_flowflags; /* 802.3x flow control flags */
443 int sc_align_tweak;
444
445 void *sc_ihs[WM_MAX_NINTR]; /*
446 * interrupt cookie.
447 * legacy and msi use sc_ihs[0].
448 */
449 pci_intr_handle_t *sc_intrs; /* legacy and msi use sc_intrs[0] */
450 int sc_nintrs; /* number of interrupts */
451
452 int sc_link_intr_idx; /* index of MSI-X tables */
453
454 callout_t sc_tick_ch; /* tick callout */
455 bool sc_core_stopping;
456
457 int sc_nvm_ver_major;
458 int sc_nvm_ver_minor;
459 int sc_nvm_ver_build;
460 int sc_nvm_addrbits; /* NVM address bits */
461 unsigned int sc_nvm_wordsize; /* NVM word size */
462 int sc_ich8_flash_base;
463 int sc_ich8_flash_bank_size;
464 int sc_nvm_k1_enabled;
465
466 int sc_nqueues;
467 struct wm_queue *sc_queue;
468
469 int sc_affinity_offset;
470
471 #ifdef WM_EVENT_COUNTERS
472 /* Event counters. */
473 struct evcnt sc_ev_linkintr; /* Link interrupts */
474
475 /* WM_T_82542_2_1 only */
476 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
477 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
478 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
479 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
480 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
481 #endif /* WM_EVENT_COUNTERS */
482
483 /* This variable are used only on the 82547. */
484 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
485
486 uint32_t sc_ctrl; /* prototype CTRL register */
487 #if 0
488 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
489 #endif
490 uint32_t sc_icr; /* prototype interrupt bits */
491 uint32_t sc_itr; /* prototype intr throttling reg */
492 uint32_t sc_tctl; /* prototype TCTL register */
493 uint32_t sc_rctl; /* prototype RCTL register */
494 uint32_t sc_txcw; /* prototype TXCW register */
495 uint32_t sc_tipg; /* prototype TIPG register */
496 uint32_t sc_fcrtl; /* prototype FCRTL register */
497 uint32_t sc_pba; /* prototype PBA register */
498
499 int sc_tbi_linkup; /* TBI link status */
500 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
501 int sc_tbi_serdes_ticks; /* tbi ticks */
502
503 int sc_mchash_type; /* multicast filter offset */
504
505 krndsource_t rnd_source; /* random source */
506
507 struct if_percpuq *sc_ipq; /* softint-based input queues */
508
509 kmutex_t *sc_core_lock; /* lock for softc operations */
510 kmutex_t *sc_ich_phymtx; /*
511 * 82574/82583/ICH/PCH specific PHY
512 * mutex. For 82574/82583, the mutex
513 * is used for both PHY and NVM.
514 */
515 kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */
516
517 struct wm_phyop phy;
518 };
519
520 #define WM_CORE_LOCK(_sc) if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
521 #define WM_CORE_UNLOCK(_sc) if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
522 #define WM_CORE_LOCKED(_sc) (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
523
524 #ifdef WM_MPSAFE
525 #define CALLOUT_FLAGS CALLOUT_MPSAFE
526 #else
527 #define CALLOUT_FLAGS 0
528 #endif
529
530 #define WM_RXCHAIN_RESET(rxq) \
531 do { \
532 (rxq)->rxq_tailp = &(rxq)->rxq_head; \
533 *(rxq)->rxq_tailp = NULL; \
534 (rxq)->rxq_len = 0; \
535 } while (/*CONSTCOND*/0)
536
537 #define WM_RXCHAIN_LINK(rxq, m) \
538 do { \
539 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \
540 (rxq)->rxq_tailp = &(m)->m_next; \
541 } while (/*CONSTCOND*/0)
542
543 #ifdef WM_EVENT_COUNTERS
544 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
545 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
546
547 #define WM_Q_EVCNT_INCR(qname, evname) \
548 WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
549 #define WM_Q_EVCNT_ADD(qname, evname, val) \
550 WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
551 #else /* !WM_EVENT_COUNTERS */
552 #define WM_EVCNT_INCR(ev) /* nothing */
553 #define WM_EVCNT_ADD(ev, val) /* nothing */
554
555 #define WM_Q_EVCNT_INCR(qname, evname) /* nothing */
556 #define WM_Q_EVCNT_ADD(qname, evname, val) /* nothing */
557 #endif /* !WM_EVENT_COUNTERS */
558
559 #define CSR_READ(sc, reg) \
560 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
561 #define CSR_WRITE(sc, reg, val) \
562 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
563 #define CSR_WRITE_FLUSH(sc) \
564 (void) CSR_READ((sc), WMREG_STATUS)
565
566 #define ICH8_FLASH_READ32(sc, reg) \
567 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \
568 (reg) + sc->sc_flashreg_offset)
569 #define ICH8_FLASH_WRITE32(sc, reg, data) \
570 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \
571 (reg) + sc->sc_flashreg_offset, (data))
572
573 #define ICH8_FLASH_READ16(sc, reg) \
574 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \
575 (reg) + sc->sc_flashreg_offset)
576 #define ICH8_FLASH_WRITE16(sc, reg, data) \
577 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \
578 (reg) + sc->sc_flashreg_offset, (data))
579
580 #define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
581 #define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
582
583 #define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU)
584 #define WM_CDTXADDR_HI(txq, x) \
585 (sizeof(bus_addr_t) == 8 ? \
586 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
587
588 #define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
589 #define WM_CDRXADDR_HI(rxq, x) \
590 (sizeof(bus_addr_t) == 8 ? \
591 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
592
593 /*
594 * Register read/write functions.
595 * Other than CSR_{READ|WRITE}().
596 */
597 #if 0
598 static inline uint32_t wm_io_read(struct wm_softc *, int);
599 #endif
600 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
601 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
602 uint32_t, uint32_t);
603 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
604
605 /*
606 * Descriptor sync/init functions.
607 */
608 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
609 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
610 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
611
612 /*
613 * Device driver interface functions and commonly used functions.
614 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
615 */
616 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
617 static int wm_match(device_t, cfdata_t, void *);
618 static void wm_attach(device_t, device_t, void *);
619 static int wm_detach(device_t, int);
620 static bool wm_suspend(device_t, const pmf_qual_t *);
621 static bool wm_resume(device_t, const pmf_qual_t *);
622 static void wm_watchdog(struct ifnet *);
623 static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
624 static void wm_tick(void *);
625 static int wm_ifflags_cb(struct ethercom *);
626 static int wm_ioctl(struct ifnet *, u_long, void *);
627 /* MAC address related */
628 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
629 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
630 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
631 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
632 static void wm_set_filter(struct wm_softc *);
633 /* Reset and init related */
634 static void wm_set_vlan(struct wm_softc *);
635 static void wm_set_pcie_completion_timeout(struct wm_softc *);
636 static void wm_get_auto_rd_done(struct wm_softc *);
637 static void wm_lan_init_done(struct wm_softc *);
638 static void wm_get_cfg_done(struct wm_softc *);
639 static void wm_initialize_hardware_bits(struct wm_softc *);
640 static uint32_t wm_rxpbs_adjust_82580(uint32_t);
641 static void wm_reset_phy(struct wm_softc *);
642 static void wm_flush_desc_rings(struct wm_softc *);
643 static void wm_reset(struct wm_softc *);
644 static int wm_add_rxbuf(struct wm_rxqueue *, int);
645 static void wm_rxdrain(struct wm_rxqueue *);
646 static void wm_rss_getkey(uint8_t *);
647 static void wm_init_rss(struct wm_softc *);
648 static void wm_adjust_qnum(struct wm_softc *, int);
649 static int wm_setup_legacy(struct wm_softc *);
650 static int wm_setup_msix(struct wm_softc *);
651 static int wm_init(struct ifnet *);
652 static int wm_init_locked(struct ifnet *);
653 static void wm_turnon(struct wm_softc *);
654 static void wm_turnoff(struct wm_softc *);
655 static void wm_stop(struct ifnet *, int);
656 static void wm_stop_locked(struct ifnet *, int);
657 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
658 static void wm_82547_txfifo_stall(void *);
659 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
660 /* DMA related */
661 static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
662 static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
663 static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
664 static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
665 struct wm_txqueue *);
666 static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
667 static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
668 static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
669 struct wm_rxqueue *);
670 static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
671 static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
672 static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
673 static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
674 static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
675 static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
676 static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
677 struct wm_txqueue *);
678 static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
679 struct wm_rxqueue *);
680 static int wm_alloc_txrx_queues(struct wm_softc *);
681 static void wm_free_txrx_queues(struct wm_softc *);
682 static int wm_init_txrx_queues(struct wm_softc *);
683 /* Start */
684 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
685 uint32_t *, uint8_t *);
686 static inline int wm_select_txqueue(struct ifnet *, struct mbuf *);
687 static void wm_start(struct ifnet *);
688 static void wm_start_locked(struct ifnet *);
689 static int wm_transmit(struct ifnet *, struct mbuf *);
690 static void wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
691 static void wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
692 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
693 struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
694 static void wm_nq_start(struct ifnet *);
695 static void wm_nq_start_locked(struct ifnet *);
696 static int wm_nq_transmit(struct ifnet *, struct mbuf *);
697 static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
698 static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
699 static void wm_deferred_start(struct ifnet *);
700 /* Interrupt */
701 static int wm_txeof(struct wm_softc *, struct wm_txqueue *);
702 static void wm_rxeof(struct wm_rxqueue *);
703 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
704 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
705 static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
706 static void wm_linkintr(struct wm_softc *, uint32_t);
707 static int wm_intr_legacy(void *);
708 static int wm_txrxintr_msix(void *);
709 static int wm_linkintr_msix(void *);
710
711 /*
712 * Media related.
713 * GMII, SGMII, TBI, SERDES and SFP.
714 */
715 /* Common */
716 static void wm_tbi_serdes_set_linkled(struct wm_softc *);
717 /* GMII related */
718 static void wm_gmii_reset(struct wm_softc *);
719 static int wm_get_phy_id_82575(struct wm_softc *);
720 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
721 static int wm_gmii_mediachange(struct ifnet *);
722 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
723 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
724 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
725 static int wm_gmii_i82543_readreg(device_t, int, int);
726 static void wm_gmii_i82543_writereg(device_t, int, int, int);
727 static int wm_gmii_mdic_readreg(device_t, int, int);
728 static void wm_gmii_mdic_writereg(device_t, int, int, int);
729 static int wm_gmii_i82544_readreg(device_t, int, int);
730 static void wm_gmii_i82544_writereg(device_t, int, int, int);
731 static int wm_gmii_i80003_readreg(device_t, int, int);
732 static void wm_gmii_i80003_writereg(device_t, int, int, int);
733 static int wm_gmii_bm_readreg(device_t, int, int);
734 static void wm_gmii_bm_writereg(device_t, int, int, int);
735 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
736 static int wm_gmii_hv_readreg(device_t, int, int);
737 static int wm_gmii_hv_readreg_locked(device_t, int, int);
738 static void wm_gmii_hv_writereg(device_t, int, int, int);
739 static void wm_gmii_hv_writereg_locked(device_t, int, int, int);
740 static int wm_gmii_82580_readreg(device_t, int, int);
741 static void wm_gmii_82580_writereg(device_t, int, int, int);
742 static int wm_gmii_gs40g_readreg(device_t, int, int);
743 static void wm_gmii_gs40g_writereg(device_t, int, int, int);
744 static void wm_gmii_statchg(struct ifnet *);
745 /*
746 * kumeran related (80003, ICH* and PCH*).
747 * These functions are not for accessing MII registers but for accessing
748 * kumeran specific registers.
749 */
750 static int wm_kmrn_readreg(struct wm_softc *, int);
751 static int wm_kmrn_readreg_locked(struct wm_softc *, int);
752 static void wm_kmrn_writereg(struct wm_softc *, int, int);
753 static void wm_kmrn_writereg_locked(struct wm_softc *, int, int);
754 /* SGMII */
755 static bool wm_sgmii_uses_mdio(struct wm_softc *);
756 static int wm_sgmii_readreg(device_t, int, int);
757 static void wm_sgmii_writereg(device_t, int, int, int);
758 /* TBI related */
759 static void wm_tbi_mediainit(struct wm_softc *);
760 static int wm_tbi_mediachange(struct ifnet *);
761 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
762 static int wm_check_for_link(struct wm_softc *);
763 static void wm_tbi_tick(struct wm_softc *);
764 /* SERDES related */
765 static void wm_serdes_power_up_link_82575(struct wm_softc *);
766 static int wm_serdes_mediachange(struct ifnet *);
767 static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
768 static void wm_serdes_tick(struct wm_softc *);
769 /* SFP related */
770 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
771 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
772
773 /*
774 * NVM related.
775 * Microwire, SPI (w/wo EERD) and Flash.
776 */
777 /* Misc functions */
778 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
779 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
780 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
781 /* Microwire */
782 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
783 /* SPI */
784 static int wm_nvm_ready_spi(struct wm_softc *);
785 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
786 /* Using with EERD */
787 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
788 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
789 /* Flash */
790 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
791 unsigned int *);
792 static int32_t wm_ich8_cycle_init(struct wm_softc *);
793 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
794 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
795 uint32_t *);
796 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
797 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
798 static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
799 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
800 static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
801 /* iNVM */
802 static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
803 static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
804 /* Lock, detecting NVM type, validate checksum and read */
805 static int wm_nvm_acquire(struct wm_softc *);
806 static void wm_nvm_release(struct wm_softc *);
807 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
808 static int wm_nvm_get_flash_presence_i210(struct wm_softc *);
809 static int wm_nvm_validate_checksum(struct wm_softc *);
810 static void wm_nvm_version_invm(struct wm_softc *);
811 static void wm_nvm_version(struct wm_softc *);
812 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
813
814 /*
815 * Hardware semaphores.
816 * Very complexed...
817 */
818 static int wm_get_null(struct wm_softc *);
819 static void wm_put_null(struct wm_softc *);
820 static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
821 static void wm_put_swsm_semaphore(struct wm_softc *);
822 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
823 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
824 static int wm_get_phy_82575(struct wm_softc *);
825 static void wm_put_phy_82575(struct wm_softc *);
826 static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
827 static void wm_put_swfwhw_semaphore(struct wm_softc *);
828 static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */
829 static void wm_put_swflag_ich8lan(struct wm_softc *);
830 static int wm_get_nvm_ich8lan(struct wm_softc *); /* For NVM */
831 static void wm_put_nvm_ich8lan(struct wm_softc *);
832 static int wm_get_hw_semaphore_82573(struct wm_softc *);
833 static void wm_put_hw_semaphore_82573(struct wm_softc *);
834
835 /*
836 * Management mode and power management related subroutines.
837 * BMC, AMT, suspend/resume and EEE.
838 */
839 #if 0
840 static int wm_check_mng_mode(struct wm_softc *);
841 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
842 static int wm_check_mng_mode_82574(struct wm_softc *);
843 static int wm_check_mng_mode_generic(struct wm_softc *);
844 #endif
845 static int wm_enable_mng_pass_thru(struct wm_softc *);
846 static bool wm_phy_resetisblocked(struct wm_softc *);
847 static void wm_get_hw_control(struct wm_softc *);
848 static void wm_release_hw_control(struct wm_softc *);
849 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
850 static void wm_smbustopci(struct wm_softc *);
851 static void wm_init_manageability(struct wm_softc *);
852 static void wm_release_manageability(struct wm_softc *);
853 static void wm_get_wakeup(struct wm_softc *);
854 static void wm_ulp_disable(struct wm_softc *);
855 static void wm_enable_phy_wakeup(struct wm_softc *);
856 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
857 static void wm_enable_wakeup(struct wm_softc *);
858 /* LPLU (Low Power Link Up) */
859 static void wm_lplu_d0_disable(struct wm_softc *);
860 static void wm_lplu_d0_disable_pch(struct wm_softc *);
861 /* EEE */
862 static void wm_set_eee_i350(struct wm_softc *);
863
864 /*
865 * Workarounds (mainly PHY related).
866 * Basically, PHY's workarounds are in the PHY drivers.
867 */
868 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
869 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
870 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
871 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
872 static int wm_k1_gig_workaround_hv(struct wm_softc *, int);
873 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
874 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
875 static void wm_reset_init_script_82575(struct wm_softc *);
876 static void wm_reset_mdicnfg_82580(struct wm_softc *);
877 static bool wm_phy_is_accessible_pchlan(struct wm_softc *);
878 static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
879 static int wm_platform_pm_pch_lpt(struct wm_softc *, bool);
880 static void wm_pll_workaround_i210(struct wm_softc *);
881
882 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
883 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
884
885 /*
886 * Devices supported by this driver.
887 */
888 static const struct wm_product {
889 pci_vendor_id_t wmp_vendor;
890 pci_product_id_t wmp_product;
891 const char *wmp_name;
892 wm_chip_type wmp_type;
893 uint32_t wmp_flags;
894 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
895 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
896 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
897 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
898 #define WMP_MEDIATYPE(x) ((x) & 0x03)
899 } wm_products[] = {
900 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
901 "Intel i82542 1000BASE-X Ethernet",
902 WM_T_82542_2_1, WMP_F_FIBER },
903
904 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
905 "Intel i82543GC 1000BASE-X Ethernet",
906 WM_T_82543, WMP_F_FIBER },
907
908 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
909 "Intel i82543GC 1000BASE-T Ethernet",
910 WM_T_82543, WMP_F_COPPER },
911
912 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
913 "Intel i82544EI 1000BASE-T Ethernet",
914 WM_T_82544, WMP_F_COPPER },
915
916 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
917 "Intel i82544EI 1000BASE-X Ethernet",
918 WM_T_82544, WMP_F_FIBER },
919
920 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
921 "Intel i82544GC 1000BASE-T Ethernet",
922 WM_T_82544, WMP_F_COPPER },
923
924 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
925 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
926 WM_T_82544, WMP_F_COPPER },
927
928 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
929 "Intel i82540EM 1000BASE-T Ethernet",
930 WM_T_82540, WMP_F_COPPER },
931
932 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
933 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
934 WM_T_82540, WMP_F_COPPER },
935
936 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
937 "Intel i82540EP 1000BASE-T Ethernet",
938 WM_T_82540, WMP_F_COPPER },
939
940 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
941 "Intel i82540EP 1000BASE-T Ethernet",
942 WM_T_82540, WMP_F_COPPER },
943
944 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
945 "Intel i82540EP 1000BASE-T Ethernet",
946 WM_T_82540, WMP_F_COPPER },
947
948 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
949 "Intel i82545EM 1000BASE-T Ethernet",
950 WM_T_82545, WMP_F_COPPER },
951
952 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
953 "Intel i82545GM 1000BASE-T Ethernet",
954 WM_T_82545_3, WMP_F_COPPER },
955
956 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
957 "Intel i82545GM 1000BASE-X Ethernet",
958 WM_T_82545_3, WMP_F_FIBER },
959
960 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
961 "Intel i82545GM Gigabit Ethernet (SERDES)",
962 WM_T_82545_3, WMP_F_SERDES },
963
964 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
965 "Intel i82546EB 1000BASE-T Ethernet",
966 WM_T_82546, WMP_F_COPPER },
967
968 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
969 "Intel i82546EB 1000BASE-T Ethernet",
970 WM_T_82546, WMP_F_COPPER },
971
972 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
973 "Intel i82545EM 1000BASE-X Ethernet",
974 WM_T_82545, WMP_F_FIBER },
975
976 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
977 "Intel i82546EB 1000BASE-X Ethernet",
978 WM_T_82546, WMP_F_FIBER },
979
980 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
981 "Intel i82546GB 1000BASE-T Ethernet",
982 WM_T_82546_3, WMP_F_COPPER },
983
984 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
985 "Intel i82546GB 1000BASE-X Ethernet",
986 WM_T_82546_3, WMP_F_FIBER },
987
988 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
989 "Intel i82546GB Gigabit Ethernet (SERDES)",
990 WM_T_82546_3, WMP_F_SERDES },
991
992 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
993 "i82546GB quad-port Gigabit Ethernet",
994 WM_T_82546_3, WMP_F_COPPER },
995
996 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
997 "i82546GB quad-port Gigabit Ethernet (KSP3)",
998 WM_T_82546_3, WMP_F_COPPER },
999
1000 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
1001 "Intel PRO/1000MT (82546GB)",
1002 WM_T_82546_3, WMP_F_COPPER },
1003
1004 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
1005 "Intel i82541EI 1000BASE-T Ethernet",
1006 WM_T_82541, WMP_F_COPPER },
1007
1008 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
1009 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
1010 WM_T_82541, WMP_F_COPPER },
1011
1012 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
1013 "Intel i82541EI Mobile 1000BASE-T Ethernet",
1014 WM_T_82541, WMP_F_COPPER },
1015
1016 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
1017 "Intel i82541ER 1000BASE-T Ethernet",
1018 WM_T_82541_2, WMP_F_COPPER },
1019
1020 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
1021 "Intel i82541GI 1000BASE-T Ethernet",
1022 WM_T_82541_2, WMP_F_COPPER },
1023
1024 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
1025 "Intel i82541GI Mobile 1000BASE-T Ethernet",
1026 WM_T_82541_2, WMP_F_COPPER },
1027
1028 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
1029 "Intel i82541PI 1000BASE-T Ethernet",
1030 WM_T_82541_2, WMP_F_COPPER },
1031
1032 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
1033 "Intel i82547EI 1000BASE-T Ethernet",
1034 WM_T_82547, WMP_F_COPPER },
1035
1036 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
1037 "Intel i82547EI Mobile 1000BASE-T Ethernet",
1038 WM_T_82547, WMP_F_COPPER },
1039
1040 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
1041 "Intel i82547GI 1000BASE-T Ethernet",
1042 WM_T_82547_2, WMP_F_COPPER },
1043
1044 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
1045 "Intel PRO/1000 PT (82571EB)",
1046 WM_T_82571, WMP_F_COPPER },
1047
1048 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
1049 "Intel PRO/1000 PF (82571EB)",
1050 WM_T_82571, WMP_F_FIBER },
1051
1052 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
1053 "Intel PRO/1000 PB (82571EB)",
1054 WM_T_82571, WMP_F_SERDES },
1055
1056 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1057 "Intel PRO/1000 QT (82571EB)",
1058 WM_T_82571, WMP_F_COPPER },
1059
1060 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1061 "Intel PRO/1000 PT Quad Port Server Adapter",
1062 WM_T_82571, WMP_F_COPPER, },
1063
1064 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1065 "Intel Gigabit PT Quad Port Server ExpressModule",
1066 WM_T_82571, WMP_F_COPPER, },
1067
1068 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1069 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1070 WM_T_82571, WMP_F_SERDES, },
1071
1072 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1073 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1074 WM_T_82571, WMP_F_SERDES, },
1075
1076 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1077 "Intel 82571EB Quad 1000baseX Ethernet",
1078 WM_T_82571, WMP_F_FIBER, },
1079
1080 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
1081 "Intel i82572EI 1000baseT Ethernet",
1082 WM_T_82572, WMP_F_COPPER },
1083
1084 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
1085 "Intel i82572EI 1000baseX Ethernet",
1086 WM_T_82572, WMP_F_FIBER },
1087
1088 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
1089 "Intel i82572EI Gigabit Ethernet (SERDES)",
1090 WM_T_82572, WMP_F_SERDES },
1091
1092 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
1093 "Intel i82572EI 1000baseT Ethernet",
1094 WM_T_82572, WMP_F_COPPER },
1095
1096 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
1097 "Intel i82573E",
1098 WM_T_82573, WMP_F_COPPER },
1099
1100 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
1101 "Intel i82573E IAMT",
1102 WM_T_82573, WMP_F_COPPER },
1103
1104 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
1105 "Intel i82573L Gigabit Ethernet",
1106 WM_T_82573, WMP_F_COPPER },
1107
1108 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
1109 "Intel i82574L",
1110 WM_T_82574, WMP_F_COPPER },
1111
1112 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
1113 "Intel i82574L",
1114 WM_T_82574, WMP_F_COPPER },
1115
1116 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
1117 "Intel i82583V",
1118 WM_T_82583, WMP_F_COPPER },
1119
1120 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1121 "i80003 dual 1000baseT Ethernet",
1122 WM_T_80003, WMP_F_COPPER },
1123
1124 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1125 "i80003 dual 1000baseX Ethernet",
1126 WM_T_80003, WMP_F_COPPER },
1127
1128 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1129 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1130 WM_T_80003, WMP_F_SERDES },
1131
1132 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1133 "Intel i80003 1000baseT Ethernet",
1134 WM_T_80003, WMP_F_COPPER },
1135
1136 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1137 "Intel i80003 Gigabit Ethernet (SERDES)",
1138 WM_T_80003, WMP_F_SERDES },
1139
1140 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
1141 "Intel i82801H (M_AMT) LAN Controller",
1142 WM_T_ICH8, WMP_F_COPPER },
1143 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
1144 "Intel i82801H (AMT) LAN Controller",
1145 WM_T_ICH8, WMP_F_COPPER },
1146 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
1147 "Intel i82801H LAN Controller",
1148 WM_T_ICH8, WMP_F_COPPER },
1149 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1150 "Intel i82801H (IFE) 10/100 LAN Controller",
1151 WM_T_ICH8, WMP_F_COPPER },
1152 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
1153 "Intel i82801H (M) LAN Controller",
1154 WM_T_ICH8, WMP_F_COPPER },
1155 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1156 "Intel i82801H IFE (GT) 10/100 LAN Controller",
1157 WM_T_ICH8, WMP_F_COPPER },
1158 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1159 "Intel i82801H IFE (G) 10/100 LAN Controller",
1160 WM_T_ICH8, WMP_F_COPPER },
1161 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_82567V_3,
1162 "82567V-3 LAN Controller",
1163 WM_T_ICH8, WMP_F_COPPER },
1164 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1165 "82801I (AMT) LAN Controller",
1166 WM_T_ICH9, WMP_F_COPPER },
1167 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1168 "82801I 10/100 LAN Controller",
1169 WM_T_ICH9, WMP_F_COPPER },
1170 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1171 "82801I (G) 10/100 LAN Controller",
1172 WM_T_ICH9, WMP_F_COPPER },
1173 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1174 "82801I (GT) 10/100 LAN Controller",
1175 WM_T_ICH9, WMP_F_COPPER },
1176 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1177 "82801I (C) LAN Controller",
1178 WM_T_ICH9, WMP_F_COPPER },
1179 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1180 "82801I mobile LAN Controller",
1181 WM_T_ICH9, WMP_F_COPPER },
1182 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_V,
1183 "82801I mobile (V) LAN Controller",
1184 WM_T_ICH9, WMP_F_COPPER },
1185 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1186 "82801I mobile (AMT) LAN Controller",
1187 WM_T_ICH9, WMP_F_COPPER },
1188 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1189 "82567LM-4 LAN Controller",
1190 WM_T_ICH9, WMP_F_COPPER },
1191 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1192 "82567LM-2 LAN Controller",
1193 WM_T_ICH10, WMP_F_COPPER },
1194 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1195 "82567LF-2 LAN Controller",
1196 WM_T_ICH10, WMP_F_COPPER },
1197 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1198 "82567LM-3 LAN Controller",
1199 WM_T_ICH10, WMP_F_COPPER },
1200 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1201 "82567LF-3 LAN Controller",
1202 WM_T_ICH10, WMP_F_COPPER },
1203 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1204 "82567V-2 LAN Controller",
1205 WM_T_ICH10, WMP_F_COPPER },
1206 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1207 "82567V-3? LAN Controller",
1208 WM_T_ICH10, WMP_F_COPPER },
1209 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1210 "HANKSVILLE LAN Controller",
1211 WM_T_ICH10, WMP_F_COPPER },
1212 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1213 "PCH LAN (82577LM) Controller",
1214 WM_T_PCH, WMP_F_COPPER },
1215 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1216 "PCH LAN (82577LC) Controller",
1217 WM_T_PCH, WMP_F_COPPER },
1218 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1219 "PCH LAN (82578DM) Controller",
1220 WM_T_PCH, WMP_F_COPPER },
1221 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1222 "PCH LAN (82578DC) Controller",
1223 WM_T_PCH, WMP_F_COPPER },
1224 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1225 "PCH2 LAN (82579LM) Controller",
1226 WM_T_PCH2, WMP_F_COPPER },
1227 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1228 "PCH2 LAN (82579V) Controller",
1229 WM_T_PCH2, WMP_F_COPPER },
1230 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1231 "82575EB dual-1000baseT Ethernet",
1232 WM_T_82575, WMP_F_COPPER },
1233 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1234 "82575EB dual-1000baseX Ethernet (SERDES)",
1235 WM_T_82575, WMP_F_SERDES },
1236 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1237 "82575GB quad-1000baseT Ethernet",
1238 WM_T_82575, WMP_F_COPPER },
1239 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1240 "82575GB quad-1000baseT Ethernet (PM)",
1241 WM_T_82575, WMP_F_COPPER },
1242 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1243 "82576 1000BaseT Ethernet",
1244 WM_T_82576, WMP_F_COPPER },
1245 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1246 "82576 1000BaseX Ethernet",
1247 WM_T_82576, WMP_F_FIBER },
1248
1249 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1250 "82576 gigabit Ethernet (SERDES)",
1251 WM_T_82576, WMP_F_SERDES },
1252
1253 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1254 "82576 quad-1000BaseT Ethernet",
1255 WM_T_82576, WMP_F_COPPER },
1256
1257 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1258 "82576 Gigabit ET2 Quad Port Server Adapter",
1259 WM_T_82576, WMP_F_COPPER },
1260
1261 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1262 "82576 gigabit Ethernet",
1263 WM_T_82576, WMP_F_COPPER },
1264
1265 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1266 "82576 gigabit Ethernet (SERDES)",
1267 WM_T_82576, WMP_F_SERDES },
1268 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1269 "82576 quad-gigabit Ethernet (SERDES)",
1270 WM_T_82576, WMP_F_SERDES },
1271
1272 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1273 "82580 1000BaseT Ethernet",
1274 WM_T_82580, WMP_F_COPPER },
1275 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1276 "82580 1000BaseX Ethernet",
1277 WM_T_82580, WMP_F_FIBER },
1278
1279 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1280 "82580 1000BaseT Ethernet (SERDES)",
1281 WM_T_82580, WMP_F_SERDES },
1282
1283 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1284 "82580 gigabit Ethernet (SGMII)",
1285 WM_T_82580, WMP_F_COPPER },
1286 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1287 "82580 dual-1000BaseT Ethernet",
1288 WM_T_82580, WMP_F_COPPER },
1289
1290 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1291 "82580 quad-1000BaseX Ethernet",
1292 WM_T_82580, WMP_F_FIBER },
1293
1294 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1295 "DH89XXCC Gigabit Ethernet (SGMII)",
1296 WM_T_82580, WMP_F_COPPER },
1297
1298 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1299 "DH89XXCC Gigabit Ethernet (SERDES)",
1300 WM_T_82580, WMP_F_SERDES },
1301
1302 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1303 "DH89XXCC 1000BASE-KX Ethernet",
1304 WM_T_82580, WMP_F_SERDES },
1305
1306 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1307 "DH89XXCC Gigabit Ethernet (SFP)",
1308 WM_T_82580, WMP_F_SERDES },
1309
1310 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1311 "I350 Gigabit Network Connection",
1312 WM_T_I350, WMP_F_COPPER },
1313
1314 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1315 "I350 Gigabit Fiber Network Connection",
1316 WM_T_I350, WMP_F_FIBER },
1317
1318 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1319 "I350 Gigabit Backplane Connection",
1320 WM_T_I350, WMP_F_SERDES },
1321
1322 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1323 "I350 Quad Port Gigabit Ethernet",
1324 WM_T_I350, WMP_F_SERDES },
1325
1326 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1327 "I350 Gigabit Connection",
1328 WM_T_I350, WMP_F_COPPER },
1329
1330 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1331 "I354 Gigabit Ethernet (KX)",
1332 WM_T_I354, WMP_F_SERDES },
1333
1334 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1335 "I354 Gigabit Ethernet (SGMII)",
1336 WM_T_I354, WMP_F_COPPER },
1337
1338 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1339 "I354 Gigabit Ethernet (2.5G)",
1340 WM_T_I354, WMP_F_COPPER },
1341
1342 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1343 "I210-T1 Ethernet Server Adapter",
1344 WM_T_I210, WMP_F_COPPER },
1345
1346 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1347 "I210 Ethernet (Copper OEM)",
1348 WM_T_I210, WMP_F_COPPER },
1349
1350 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1351 "I210 Ethernet (Copper IT)",
1352 WM_T_I210, WMP_F_COPPER },
1353
1354 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1355 "I210 Ethernet (FLASH less)",
1356 WM_T_I210, WMP_F_COPPER },
1357
1358 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1359 "I210 Gigabit Ethernet (Fiber)",
1360 WM_T_I210, WMP_F_FIBER },
1361
1362 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1363 "I210 Gigabit Ethernet (SERDES)",
1364 WM_T_I210, WMP_F_SERDES },
1365
1366 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1367 "I210 Gigabit Ethernet (FLASH less)",
1368 WM_T_I210, WMP_F_SERDES },
1369
1370 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1371 "I210 Gigabit Ethernet (SGMII)",
1372 WM_T_I210, WMP_F_COPPER },
1373
1374 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1375 "I211 Ethernet (COPPER)",
1376 WM_T_I211, WMP_F_COPPER },
1377 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1378 "I217 V Ethernet Connection",
1379 WM_T_PCH_LPT, WMP_F_COPPER },
1380 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1381 "I217 LM Ethernet Connection",
1382 WM_T_PCH_LPT, WMP_F_COPPER },
1383 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1384 "I218 V Ethernet Connection",
1385 WM_T_PCH_LPT, WMP_F_COPPER },
1386 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1387 "I218 V Ethernet Connection",
1388 WM_T_PCH_LPT, WMP_F_COPPER },
1389 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1390 "I218 V Ethernet Connection",
1391 WM_T_PCH_LPT, WMP_F_COPPER },
1392 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1393 "I218 LM Ethernet Connection",
1394 WM_T_PCH_LPT, WMP_F_COPPER },
1395 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1396 "I218 LM Ethernet Connection",
1397 WM_T_PCH_LPT, WMP_F_COPPER },
1398 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1399 "I218 LM Ethernet Connection",
1400 WM_T_PCH_LPT, WMP_F_COPPER },
1401 #if 0
1402 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V,
1403 "I219 V Ethernet Connection",
1404 WM_T_PCH_SPT, WMP_F_COPPER },
1405 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V2,
1406 "I219 V Ethernet Connection",
1407 WM_T_PCH_SPT, WMP_F_COPPER },
1408 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V4,
1409 "I219 V Ethernet Connection",
1410 WM_T_PCH_SPT, WMP_F_COPPER },
1411 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V5,
1412 "I219 V Ethernet Connection",
1413 WM_T_PCH_SPT, WMP_F_COPPER },
1414 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM,
1415 "I219 LM Ethernet Connection",
1416 WM_T_PCH_SPT, WMP_F_COPPER },
1417 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM2,
1418 "I219 LM Ethernet Connection",
1419 WM_T_PCH_SPT, WMP_F_COPPER },
1420 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM3,
1421 "I219 LM Ethernet Connection",
1422 WM_T_PCH_SPT, WMP_F_COPPER },
1423 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM4,
1424 "I219 LM Ethernet Connection",
1425 WM_T_PCH_SPT, WMP_F_COPPER },
1426 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM5,
1427 "I219 LM Ethernet Connection",
1428 WM_T_PCH_SPT, WMP_F_COPPER },
1429 #endif
1430 { 0, 0,
1431 NULL,
1432 0, 0 },
1433 };
1434
1435 /*
1436 * Register read/write functions.
1437 * Other than CSR_{READ|WRITE}().
1438 */
1439
1440 #if 0 /* Not currently used */
1441 static inline uint32_t
1442 wm_io_read(struct wm_softc *sc, int reg)
1443 {
1444
1445 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1446 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1447 }
1448 #endif
1449
1450 static inline void
1451 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1452 {
1453
1454 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1455 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1456 }
1457
1458 static inline void
1459 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1460 uint32_t data)
1461 {
1462 uint32_t regval;
1463 int i;
1464
1465 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1466
1467 CSR_WRITE(sc, reg, regval);
1468
1469 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1470 delay(5);
1471 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1472 break;
1473 }
1474 if (i == SCTL_CTL_POLL_TIMEOUT) {
1475 aprint_error("%s: WARNING:"
1476 " i82575 reg 0x%08x setup did not indicate ready\n",
1477 device_xname(sc->sc_dev), reg);
1478 }
1479 }
1480
1481 static inline void
1482 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1483 {
1484 wa->wa_low = htole32(v & 0xffffffffU);
1485 if (sizeof(bus_addr_t) == 8)
1486 wa->wa_high = htole32((uint64_t) v >> 32);
1487 else
1488 wa->wa_high = 0;
1489 }
1490
1491 /*
1492 * Descriptor sync/init functions.
1493 */
1494 static inline void
1495 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1496 {
1497 struct wm_softc *sc = txq->txq_sc;
1498
1499 /* If it will wrap around, sync to the end of the ring. */
1500 if ((start + num) > WM_NTXDESC(txq)) {
1501 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1502 WM_CDTXOFF(txq, start), txq->txq_descsize *
1503 (WM_NTXDESC(txq) - start), ops);
1504 num -= (WM_NTXDESC(txq) - start);
1505 start = 0;
1506 }
1507
1508 /* Now sync whatever is left. */
1509 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1510 WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1511 }
1512
1513 static inline void
1514 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1515 {
1516 struct wm_softc *sc = rxq->rxq_sc;
1517
1518 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1519 WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
1520 }
1521
1522 static inline void
1523 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1524 {
1525 struct wm_softc *sc = rxq->rxq_sc;
1526 struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1527 wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1528 struct mbuf *m = rxs->rxs_mbuf;
1529
1530 /*
1531 * Note: We scoot the packet forward 2 bytes in the buffer
1532 * so that the payload after the Ethernet header is aligned
1533 * to a 4-byte boundary.
1534
1535 * XXX BRAINDAMAGE ALERT!
1536 * The stupid chip uses the same size for every buffer, which
1537 * is set in the Receive Control register. We are using the 2K
1538 * size option, but what we REALLY want is (2K - 2)! For this
1539 * reason, we can't "scoot" packets longer than the standard
1540 * Ethernet MTU. On strict-alignment platforms, if the total
1541 * size exceeds (2K - 2) we set align_tweak to 0 and let
1542 * the upper layer copy the headers.
1543 */
1544 m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1545
1546 wm_set_dma_addr(&rxd->wrx_addr,
1547 rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1548 rxd->wrx_len = 0;
1549 rxd->wrx_cksum = 0;
1550 rxd->wrx_status = 0;
1551 rxd->wrx_errors = 0;
1552 rxd->wrx_special = 0;
1553 wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1554
1555 CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1556 }
1557
1558 /*
1559 * Device driver interface functions and commonly used functions.
1560 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1561 */
1562
1563 /* Lookup supported device table */
1564 static const struct wm_product *
1565 wm_lookup(const struct pci_attach_args *pa)
1566 {
1567 const struct wm_product *wmp;
1568
1569 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1570 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1571 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1572 return wmp;
1573 }
1574 return NULL;
1575 }
1576
1577 /* The match function (ca_match) */
1578 static int
1579 wm_match(device_t parent, cfdata_t cf, void *aux)
1580 {
1581 struct pci_attach_args *pa = aux;
1582
1583 if (wm_lookup(pa) != NULL)
1584 return 1;
1585
1586 return 0;
1587 }
1588
1589 /* The attach function (ca_attach) */
1590 static void
1591 wm_attach(device_t parent, device_t self, void *aux)
1592 {
1593 struct wm_softc *sc = device_private(self);
1594 struct pci_attach_args *pa = aux;
1595 prop_dictionary_t dict;
1596 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1597 pci_chipset_tag_t pc = pa->pa_pc;
1598 int counts[PCI_INTR_TYPE_SIZE];
1599 pci_intr_type_t max_type;
1600 const char *eetype, *xname;
1601 bus_space_tag_t memt;
1602 bus_space_handle_t memh;
1603 bus_size_t memsize;
1604 int memh_valid;
1605 int i, error;
1606 const struct wm_product *wmp;
1607 prop_data_t ea;
1608 prop_number_t pn;
1609 uint8_t enaddr[ETHER_ADDR_LEN];
1610 uint16_t cfg1, cfg2, swdpin, nvmword;
1611 pcireg_t preg, memtype;
1612 uint16_t eeprom_data, apme_mask;
1613 bool force_clear_smbi;
1614 uint32_t link_mode;
1615 uint32_t reg;
1616 void (*deferred_start_func)(struct ifnet *) = NULL;
1617
1618 sc->sc_dev = self;
1619 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1620 sc->sc_core_stopping = false;
1621
1622 wmp = wm_lookup(pa);
1623 #ifdef DIAGNOSTIC
1624 if (wmp == NULL) {
1625 printf("\n");
1626 panic("wm_attach: impossible");
1627 }
1628 #endif
1629 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1630
1631 sc->sc_pc = pa->pa_pc;
1632 sc->sc_pcitag = pa->pa_tag;
1633
1634 if (pci_dma64_available(pa))
1635 sc->sc_dmat = pa->pa_dmat64;
1636 else
1637 sc->sc_dmat = pa->pa_dmat;
1638
1639 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1640 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1641 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1642
1643 sc->sc_type = wmp->wmp_type;
1644
1645 /* Set default function pointers */
1646 sc->phy.acquire = wm_get_null;
1647 sc->phy.release = wm_put_null;
1648 sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
1649
1650 if (sc->sc_type < WM_T_82543) {
1651 if (sc->sc_rev < 2) {
1652 aprint_error_dev(sc->sc_dev,
1653 "i82542 must be at least rev. 2\n");
1654 return;
1655 }
1656 if (sc->sc_rev < 3)
1657 sc->sc_type = WM_T_82542_2_0;
1658 }
1659
1660 /*
1661 * Disable MSI for Errata:
1662 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1663 *
1664 * 82544: Errata 25
1665 * 82540: Errata 6 (easy to reproduce device timeout)
1666 * 82545: Errata 4 (easy to reproduce device timeout)
1667 * 82546: Errata 26 (easy to reproduce device timeout)
1668 * 82541: Errata 7 (easy to reproduce device timeout)
1669 *
1670 * "Byte Enables 2 and 3 are not set on MSI writes"
1671 *
1672 * 82571 & 82572: Errata 63
1673 */
1674 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1675 || (sc->sc_type == WM_T_82572))
1676 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1677
1678 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1679 || (sc->sc_type == WM_T_82580)
1680 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1681 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1682 sc->sc_flags |= WM_F_NEWQUEUE;
1683
1684 /* Set device properties (mactype) */
1685 dict = device_properties(sc->sc_dev);
1686 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1687
1688 /*
1689 * Map the device. All devices support memory-mapped acccess,
1690 * and it is really required for normal operation.
1691 */
1692 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1693 switch (memtype) {
1694 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1695 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1696 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1697 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1698 break;
1699 default:
1700 memh_valid = 0;
1701 break;
1702 }
1703
1704 if (memh_valid) {
1705 sc->sc_st = memt;
1706 sc->sc_sh = memh;
1707 sc->sc_ss = memsize;
1708 } else {
1709 aprint_error_dev(sc->sc_dev,
1710 "unable to map device registers\n");
1711 return;
1712 }
1713
1714 /*
1715 * In addition, i82544 and later support I/O mapped indirect
1716 * register access. It is not desirable (nor supported in
1717 * this driver) to use it for normal operation, though it is
1718 * required to work around bugs in some chip versions.
1719 */
1720 if (sc->sc_type >= WM_T_82544) {
1721 /* First we have to find the I/O BAR. */
1722 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1723 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1724 if (memtype == PCI_MAPREG_TYPE_IO)
1725 break;
1726 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1727 PCI_MAPREG_MEM_TYPE_64BIT)
1728 i += 4; /* skip high bits, too */
1729 }
1730 if (i < PCI_MAPREG_END) {
1731 /*
1732 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1733 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1734 * It's no problem because newer chips has no this
1735 * bug.
1736 *
1737 * The i8254x doesn't apparently respond when the
1738 * I/O BAR is 0, which looks somewhat like it's not
1739 * been configured.
1740 */
1741 preg = pci_conf_read(pc, pa->pa_tag, i);
1742 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1743 aprint_error_dev(sc->sc_dev,
1744 "WARNING: I/O BAR at zero.\n");
1745 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1746 0, &sc->sc_iot, &sc->sc_ioh,
1747 NULL, &sc->sc_ios) == 0) {
1748 sc->sc_flags |= WM_F_IOH_VALID;
1749 } else {
1750 aprint_error_dev(sc->sc_dev,
1751 "WARNING: unable to map I/O space\n");
1752 }
1753 }
1754
1755 }
1756
1757 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1758 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1759 preg |= PCI_COMMAND_MASTER_ENABLE;
1760 if (sc->sc_type < WM_T_82542_2_1)
1761 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1762 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1763
1764 /* power up chip */
1765 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1766 NULL)) && error != EOPNOTSUPP) {
1767 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1768 return;
1769 }
1770
1771 wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
1772
1773 /* Allocation settings */
1774 max_type = PCI_INTR_TYPE_MSIX;
1775 counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
1776 counts[PCI_INTR_TYPE_MSI] = 1;
1777 counts[PCI_INTR_TYPE_INTX] = 1;
1778
1779 alloc_retry:
1780 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1781 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1782 return;
1783 }
1784
1785 if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1786 error = wm_setup_msix(sc);
1787 if (error) {
1788 pci_intr_release(pc, sc->sc_intrs,
1789 counts[PCI_INTR_TYPE_MSIX]);
1790
1791 /* Setup for MSI: Disable MSI-X */
1792 max_type = PCI_INTR_TYPE_MSI;
1793 counts[PCI_INTR_TYPE_MSI] = 1;
1794 counts[PCI_INTR_TYPE_INTX] = 1;
1795 goto alloc_retry;
1796 }
1797 } else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
1798 wm_adjust_qnum(sc, 0); /* must not use multiqueue */
1799 error = wm_setup_legacy(sc);
1800 if (error) {
1801 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1802 counts[PCI_INTR_TYPE_MSI]);
1803
1804 /* The next try is for INTx: Disable MSI */
1805 max_type = PCI_INTR_TYPE_INTX;
1806 counts[PCI_INTR_TYPE_INTX] = 1;
1807 goto alloc_retry;
1808 }
1809 } else {
1810 wm_adjust_qnum(sc, 0); /* must not use multiqueue */
1811 error = wm_setup_legacy(sc);
1812 if (error) {
1813 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1814 counts[PCI_INTR_TYPE_INTX]);
1815 return;
1816 }
1817 }
1818
1819 /*
1820 * Check the function ID (unit number of the chip).
1821 */
1822 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1823 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1824 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1825 || (sc->sc_type == WM_T_82580)
1826 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1827 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1828 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1829 else
1830 sc->sc_funcid = 0;
1831
1832 /*
1833 * Determine a few things about the bus we're connected to.
1834 */
1835 if (sc->sc_type < WM_T_82543) {
1836 /* We don't really know the bus characteristics here. */
1837 sc->sc_bus_speed = 33;
1838 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1839 /*
1840 * CSA (Communication Streaming Architecture) is about as fast
1841 * a 32-bit 66MHz PCI Bus.
1842 */
1843 sc->sc_flags |= WM_F_CSA;
1844 sc->sc_bus_speed = 66;
1845 aprint_verbose_dev(sc->sc_dev,
1846 "Communication Streaming Architecture\n");
1847 if (sc->sc_type == WM_T_82547) {
1848 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1849 callout_setfunc(&sc->sc_txfifo_ch,
1850 wm_82547_txfifo_stall, sc);
1851 aprint_verbose_dev(sc->sc_dev,
1852 "using 82547 Tx FIFO stall work-around\n");
1853 }
1854 } else if (sc->sc_type >= WM_T_82571) {
1855 sc->sc_flags |= WM_F_PCIE;
1856 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1857 && (sc->sc_type != WM_T_ICH10)
1858 && (sc->sc_type != WM_T_PCH)
1859 && (sc->sc_type != WM_T_PCH2)
1860 && (sc->sc_type != WM_T_PCH_LPT)
1861 && (sc->sc_type != WM_T_PCH_SPT)) {
1862 /* ICH* and PCH* have no PCIe capability registers */
1863 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1864 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1865 NULL) == 0)
1866 aprint_error_dev(sc->sc_dev,
1867 "unable to find PCIe capability\n");
1868 }
1869 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1870 } else {
1871 reg = CSR_READ(sc, WMREG_STATUS);
1872 if (reg & STATUS_BUS64)
1873 sc->sc_flags |= WM_F_BUS64;
1874 if ((reg & STATUS_PCIX_MODE) != 0) {
1875 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1876
1877 sc->sc_flags |= WM_F_PCIX;
1878 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1879 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1880 aprint_error_dev(sc->sc_dev,
1881 "unable to find PCIX capability\n");
1882 else if (sc->sc_type != WM_T_82545_3 &&
1883 sc->sc_type != WM_T_82546_3) {
1884 /*
1885 * Work around a problem caused by the BIOS
1886 * setting the max memory read byte count
1887 * incorrectly.
1888 */
1889 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1890 sc->sc_pcixe_capoff + PCIX_CMD);
1891 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1892 sc->sc_pcixe_capoff + PCIX_STATUS);
1893
1894 bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1895 PCIX_CMD_BYTECNT_SHIFT;
1896 maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1897 PCIX_STATUS_MAXB_SHIFT;
1898 if (bytecnt > maxb) {
1899 aprint_verbose_dev(sc->sc_dev,
1900 "resetting PCI-X MMRBC: %d -> %d\n",
1901 512 << bytecnt, 512 << maxb);
1902 pcix_cmd = (pcix_cmd &
1903 ~PCIX_CMD_BYTECNT_MASK) |
1904 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1905 pci_conf_write(pa->pa_pc, pa->pa_tag,
1906 sc->sc_pcixe_capoff + PCIX_CMD,
1907 pcix_cmd);
1908 }
1909 }
1910 }
1911 /*
1912 * The quad port adapter is special; it has a PCIX-PCIX
1913 * bridge on the board, and can run the secondary bus at
1914 * a higher speed.
1915 */
1916 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1917 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1918 : 66;
1919 } else if (sc->sc_flags & WM_F_PCIX) {
1920 switch (reg & STATUS_PCIXSPD_MASK) {
1921 case STATUS_PCIXSPD_50_66:
1922 sc->sc_bus_speed = 66;
1923 break;
1924 case STATUS_PCIXSPD_66_100:
1925 sc->sc_bus_speed = 100;
1926 break;
1927 case STATUS_PCIXSPD_100_133:
1928 sc->sc_bus_speed = 133;
1929 break;
1930 default:
1931 aprint_error_dev(sc->sc_dev,
1932 "unknown PCIXSPD %d; assuming 66MHz\n",
1933 reg & STATUS_PCIXSPD_MASK);
1934 sc->sc_bus_speed = 66;
1935 break;
1936 }
1937 } else
1938 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1939 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1940 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1941 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1942 }
1943
1944 /* clear interesting stat counters */
1945 CSR_READ(sc, WMREG_COLC);
1946 CSR_READ(sc, WMREG_RXERRC);
1947
1948 if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
1949 || (sc->sc_type >= WM_T_ICH8))
1950 sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
1951 if (sc->sc_type >= WM_T_ICH8)
1952 sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
1953
1954 /* Set PHY, NVM mutex related stuff */
1955 switch (sc->sc_type) {
1956 case WM_T_82542_2_0:
1957 case WM_T_82542_2_1:
1958 case WM_T_82543:
1959 case WM_T_82544:
1960 /* Microwire */
1961 sc->sc_nvm_wordsize = 64;
1962 sc->sc_nvm_addrbits = 6;
1963 break;
1964 case WM_T_82540:
1965 case WM_T_82545:
1966 case WM_T_82545_3:
1967 case WM_T_82546:
1968 case WM_T_82546_3:
1969 /* Microwire */
1970 reg = CSR_READ(sc, WMREG_EECD);
1971 if (reg & EECD_EE_SIZE) {
1972 sc->sc_nvm_wordsize = 256;
1973 sc->sc_nvm_addrbits = 8;
1974 } else {
1975 sc->sc_nvm_wordsize = 64;
1976 sc->sc_nvm_addrbits = 6;
1977 }
1978 sc->sc_flags |= WM_F_LOCK_EECD;
1979 break;
1980 case WM_T_82541:
1981 case WM_T_82541_2:
1982 case WM_T_82547:
1983 case WM_T_82547_2:
1984 sc->sc_flags |= WM_F_LOCK_EECD;
1985 reg = CSR_READ(sc, WMREG_EECD);
1986 if (reg & EECD_EE_TYPE) {
1987 /* SPI */
1988 sc->sc_flags |= WM_F_EEPROM_SPI;
1989 wm_nvm_set_addrbits_size_eecd(sc);
1990 } else {
1991 /* Microwire */
1992 if ((reg & EECD_EE_ABITS) != 0) {
1993 sc->sc_nvm_wordsize = 256;
1994 sc->sc_nvm_addrbits = 8;
1995 } else {
1996 sc->sc_nvm_wordsize = 64;
1997 sc->sc_nvm_addrbits = 6;
1998 }
1999 }
2000 break;
2001 case WM_T_82571:
2002 case WM_T_82572:
2003 /* SPI */
2004 sc->sc_flags |= WM_F_EEPROM_SPI;
2005 wm_nvm_set_addrbits_size_eecd(sc);
2006 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
2007 sc->phy.acquire = wm_get_swsm_semaphore;
2008 sc->phy.release = wm_put_swsm_semaphore;
2009 break;
2010 case WM_T_82573:
2011 case WM_T_82574:
2012 case WM_T_82583:
2013 if (sc->sc_type == WM_T_82573) {
2014 sc->sc_flags |= WM_F_LOCK_SWSM;
2015 sc->phy.acquire = wm_get_swsm_semaphore;
2016 sc->phy.release = wm_put_swsm_semaphore;
2017 } else {
2018 sc->sc_flags |= WM_F_LOCK_EXTCNF;
2019 /* Both PHY and NVM use the same semaphore. */
2020 sc->phy.acquire
2021 = wm_get_swfwhw_semaphore;
2022 sc->phy.release
2023 = wm_put_swfwhw_semaphore;
2024 }
2025 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2026 sc->sc_flags |= WM_F_EEPROM_FLASH;
2027 sc->sc_nvm_wordsize = 2048;
2028 } else {
2029 /* SPI */
2030 sc->sc_flags |= WM_F_EEPROM_SPI;
2031 wm_nvm_set_addrbits_size_eecd(sc);
2032 }
2033 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
2034 break;
2035 case WM_T_82575:
2036 case WM_T_82576:
2037 case WM_T_82580:
2038 case WM_T_I350:
2039 case WM_T_I354:
2040 case WM_T_80003:
2041 /* SPI */
2042 sc->sc_flags |= WM_F_EEPROM_SPI;
2043 wm_nvm_set_addrbits_size_eecd(sc);
2044 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
2045 | WM_F_LOCK_SWSM;
2046 sc->phy.acquire = wm_get_phy_82575;
2047 sc->phy.release = wm_put_phy_82575;
2048 break;
2049 case WM_T_ICH8:
2050 case WM_T_ICH9:
2051 case WM_T_ICH10:
2052 case WM_T_PCH:
2053 case WM_T_PCH2:
2054 case WM_T_PCH_LPT:
2055 /* FLASH */
2056 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
2057 sc->sc_nvm_wordsize = 2048;
2058 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2059 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2060 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2061 aprint_error_dev(sc->sc_dev,
2062 "can't map FLASH registers\n");
2063 goto out;
2064 }
2065 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2066 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2067 ICH_FLASH_SECTOR_SIZE;
2068 sc->sc_ich8_flash_bank_size =
2069 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2070 sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2071 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2072 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2073 sc->sc_flashreg_offset = 0;
2074 sc->phy.acquire = wm_get_swflag_ich8lan;
2075 sc->phy.release = wm_put_swflag_ich8lan;
2076 break;
2077 case WM_T_PCH_SPT:
2078 /* SPT has no GFPREG; flash registers mapped through BAR0 */
2079 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
2080 sc->sc_flasht = sc->sc_st;
2081 sc->sc_flashh = sc->sc_sh;
2082 sc->sc_ich8_flash_base = 0;
2083 sc->sc_nvm_wordsize =
2084 (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2085 * NVM_SIZE_MULTIPLIER;
2086 /* It is size in bytes, we want words */
2087 sc->sc_nvm_wordsize /= 2;
2088 /* assume 2 banks */
2089 sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2090 sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2091 sc->phy.acquire = wm_get_swflag_ich8lan;
2092 sc->phy.release = wm_put_swflag_ich8lan;
2093 break;
2094 case WM_T_I210:
2095 case WM_T_I211:
2096 if (wm_nvm_get_flash_presence_i210(sc)) {
2097 wm_nvm_set_addrbits_size_eecd(sc);
2098 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2099 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
2100 } else {
2101 sc->sc_nvm_wordsize = INVM_SIZE;
2102 sc->sc_flags |= WM_F_EEPROM_INVM;
2103 }
2104 sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
2105 sc->phy.acquire = wm_get_phy_82575;
2106 sc->phy.release = wm_put_phy_82575;
2107 break;
2108 default:
2109 break;
2110 }
2111
2112 /* Reset the chip to a known state. */
2113 wm_reset(sc);
2114
2115 /* Ensure the SMBI bit is clear before first NVM or PHY access */
2116 switch (sc->sc_type) {
2117 case WM_T_82571:
2118 case WM_T_82572:
2119 reg = CSR_READ(sc, WMREG_SWSM2);
2120 if ((reg & SWSM2_LOCK) == 0) {
2121 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2122 force_clear_smbi = true;
2123 } else
2124 force_clear_smbi = false;
2125 break;
2126 case WM_T_82573:
2127 case WM_T_82574:
2128 case WM_T_82583:
2129 force_clear_smbi = true;
2130 break;
2131 default:
2132 force_clear_smbi = false;
2133 break;
2134 }
2135 if (force_clear_smbi) {
2136 reg = CSR_READ(sc, WMREG_SWSM);
2137 if ((reg & SWSM_SMBI) != 0)
2138 aprint_error_dev(sc->sc_dev,
2139 "Please update the Bootagent\n");
2140 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2141 }
2142
2143 /*
2144 * Defer printing the EEPROM type until after verifying the checksum
2145 * This allows the EEPROM type to be printed correctly in the case
2146 * that no EEPROM is attached.
2147 */
2148 /*
2149 * Validate the EEPROM checksum. If the checksum fails, flag
2150 * this for later, so we can fail future reads from the EEPROM.
2151 */
2152 if (wm_nvm_validate_checksum(sc)) {
2153 /*
2154 * Read twice again because some PCI-e parts fail the
2155 * first check due to the link being in sleep state.
2156 */
2157 if (wm_nvm_validate_checksum(sc))
2158 sc->sc_flags |= WM_F_EEPROM_INVALID;
2159 }
2160
2161 /* Set device properties (macflags) */
2162 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2163
2164 if (sc->sc_flags & WM_F_EEPROM_INVALID)
2165 aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2166 else {
2167 aprint_verbose_dev(sc->sc_dev, "%u words ",
2168 sc->sc_nvm_wordsize);
2169 if (sc->sc_flags & WM_F_EEPROM_INVM)
2170 aprint_verbose("iNVM");
2171 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2172 aprint_verbose("FLASH(HW)");
2173 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2174 aprint_verbose("FLASH");
2175 else {
2176 if (sc->sc_flags & WM_F_EEPROM_SPI)
2177 eetype = "SPI";
2178 else
2179 eetype = "MicroWire";
2180 aprint_verbose("(%d address bits) %s EEPROM",
2181 sc->sc_nvm_addrbits, eetype);
2182 }
2183 }
2184 wm_nvm_version(sc);
2185 aprint_verbose("\n");
2186
2187 /* Check for I21[01] PLL workaround */
2188 if (sc->sc_type == WM_T_I210)
2189 sc->sc_flags |= WM_F_PLL_WA_I210;
2190 if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
2191 /* NVM image release 3.25 has a workaround */
2192 if ((sc->sc_nvm_ver_major < 3)
2193 || ((sc->sc_nvm_ver_major == 3)
2194 && (sc->sc_nvm_ver_minor < 25))) {
2195 aprint_verbose_dev(sc->sc_dev,
2196 "ROM image version %d.%d is older than 3.25\n",
2197 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2198 sc->sc_flags |= WM_F_PLL_WA_I210;
2199 }
2200 }
2201 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2202 wm_pll_workaround_i210(sc);
2203
2204 wm_get_wakeup(sc);
2205
2206 /* Non-AMT based hardware can now take control from firmware */
2207 if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2208 wm_get_hw_control(sc);
2209
2210 /*
2211 * Read the Ethernet address from the EEPROM, if not first found
2212 * in device properties.
2213 */
2214 ea = prop_dictionary_get(dict, "mac-address");
2215 if (ea != NULL) {
2216 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2217 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2218 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2219 } else {
2220 if (wm_read_mac_addr(sc, enaddr) != 0) {
2221 aprint_error_dev(sc->sc_dev,
2222 "unable to read Ethernet address\n");
2223 goto out;
2224 }
2225 }
2226
2227 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2228 ether_sprintf(enaddr));
2229
2230 /*
2231 * Read the config info from the EEPROM, and set up various
2232 * bits in the control registers based on their contents.
2233 */
2234 pn = prop_dictionary_get(dict, "i82543-cfg1");
2235 if (pn != NULL) {
2236 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2237 cfg1 = (uint16_t) prop_number_integer_value(pn);
2238 } else {
2239 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2240 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2241 goto out;
2242 }
2243 }
2244
2245 pn = prop_dictionary_get(dict, "i82543-cfg2");
2246 if (pn != NULL) {
2247 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2248 cfg2 = (uint16_t) prop_number_integer_value(pn);
2249 } else {
2250 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2251 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2252 goto out;
2253 }
2254 }
2255
2256 /* check for WM_F_WOL */
2257 switch (sc->sc_type) {
2258 case WM_T_82542_2_0:
2259 case WM_T_82542_2_1:
2260 case WM_T_82543:
2261 /* dummy? */
2262 eeprom_data = 0;
2263 apme_mask = NVM_CFG3_APME;
2264 break;
2265 case WM_T_82544:
2266 apme_mask = NVM_CFG2_82544_APM_EN;
2267 eeprom_data = cfg2;
2268 break;
2269 case WM_T_82546:
2270 case WM_T_82546_3:
2271 case WM_T_82571:
2272 case WM_T_82572:
2273 case WM_T_82573:
2274 case WM_T_82574:
2275 case WM_T_82583:
2276 case WM_T_80003:
2277 default:
2278 apme_mask = NVM_CFG3_APME;
2279 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2280 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2281 break;
2282 case WM_T_82575:
2283 case WM_T_82576:
2284 case WM_T_82580:
2285 case WM_T_I350:
2286 case WM_T_I354: /* XXX ok? */
2287 case WM_T_ICH8:
2288 case WM_T_ICH9:
2289 case WM_T_ICH10:
2290 case WM_T_PCH:
2291 case WM_T_PCH2:
2292 case WM_T_PCH_LPT:
2293 case WM_T_PCH_SPT:
2294 /* XXX The funcid should be checked on some devices */
2295 apme_mask = WUC_APME;
2296 eeprom_data = CSR_READ(sc, WMREG_WUC);
2297 break;
2298 }
2299
2300 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2301 if ((eeprom_data & apme_mask) != 0)
2302 sc->sc_flags |= WM_F_WOL;
2303 #ifdef WM_DEBUG
2304 if ((sc->sc_flags & WM_F_WOL) != 0)
2305 printf("WOL\n");
2306 #endif
2307
2308 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2309 /* Check NVM for autonegotiation */
2310 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2311 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2312 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2313 }
2314 }
2315
2316 /*
2317 * XXX need special handling for some multiple port cards
2318 * to disable a paticular port.
2319 */
2320
2321 if (sc->sc_type >= WM_T_82544) {
2322 pn = prop_dictionary_get(dict, "i82543-swdpin");
2323 if (pn != NULL) {
2324 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2325 swdpin = (uint16_t) prop_number_integer_value(pn);
2326 } else {
2327 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2328 aprint_error_dev(sc->sc_dev,
2329 "unable to read SWDPIN\n");
2330 goto out;
2331 }
2332 }
2333 }
2334
2335 if (cfg1 & NVM_CFG1_ILOS)
2336 sc->sc_ctrl |= CTRL_ILOS;
2337
2338 /*
2339 * XXX
2340 * This code isn't correct because pin 2 and 3 are located
2341 * in different position on newer chips. Check all datasheet.
2342 *
2343 * Until resolve this problem, check if a chip < 82580
2344 */
2345 if (sc->sc_type <= WM_T_82580) {
2346 if (sc->sc_type >= WM_T_82544) {
2347 sc->sc_ctrl |=
2348 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2349 CTRL_SWDPIO_SHIFT;
2350 sc->sc_ctrl |=
2351 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2352 CTRL_SWDPINS_SHIFT;
2353 } else {
2354 sc->sc_ctrl |=
2355 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2356 CTRL_SWDPIO_SHIFT;
2357 }
2358 }
2359
2360 /* XXX For other than 82580? */
2361 if (sc->sc_type == WM_T_82580) {
2362 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2363 if (nvmword & __BIT(13))
2364 sc->sc_ctrl |= CTRL_ILOS;
2365 }
2366
2367 #if 0
2368 if (sc->sc_type >= WM_T_82544) {
2369 if (cfg1 & NVM_CFG1_IPS0)
2370 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2371 if (cfg1 & NVM_CFG1_IPS1)
2372 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2373 sc->sc_ctrl_ext |=
2374 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2375 CTRL_EXT_SWDPIO_SHIFT;
2376 sc->sc_ctrl_ext |=
2377 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2378 CTRL_EXT_SWDPINS_SHIFT;
2379 } else {
2380 sc->sc_ctrl_ext |=
2381 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2382 CTRL_EXT_SWDPIO_SHIFT;
2383 }
2384 #endif
2385
2386 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2387 #if 0
2388 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2389 #endif
2390
2391 if (sc->sc_type == WM_T_PCH) {
2392 uint16_t val;
2393
2394 /* Save the NVM K1 bit setting */
2395 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2396
2397 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2398 sc->sc_nvm_k1_enabled = 1;
2399 else
2400 sc->sc_nvm_k1_enabled = 0;
2401 }
2402
2403 /*
2404 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2405 * media structures accordingly.
2406 */
2407 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2408 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2409 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2410 || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
2411 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2412 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2413 wm_gmii_mediainit(sc, wmp->wmp_product);
2414 } else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2415 || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
2416 || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
2417 || (sc->sc_type ==WM_T_I211)) {
2418 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2419 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2420 switch (link_mode) {
2421 case CTRL_EXT_LINK_MODE_1000KX:
2422 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2423 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2424 break;
2425 case CTRL_EXT_LINK_MODE_SGMII:
2426 if (wm_sgmii_uses_mdio(sc)) {
2427 aprint_verbose_dev(sc->sc_dev,
2428 "SGMII(MDIO)\n");
2429 sc->sc_flags |= WM_F_SGMII;
2430 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2431 break;
2432 }
2433 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2434 /*FALLTHROUGH*/
2435 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2436 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2437 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2438 if (link_mode
2439 == CTRL_EXT_LINK_MODE_SGMII) {
2440 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2441 sc->sc_flags |= WM_F_SGMII;
2442 } else {
2443 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2444 aprint_verbose_dev(sc->sc_dev,
2445 "SERDES\n");
2446 }
2447 break;
2448 }
2449 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2450 aprint_verbose_dev(sc->sc_dev, "SERDES\n");
2451
2452 /* Change current link mode setting */
2453 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2454 switch (sc->sc_mediatype) {
2455 case WM_MEDIATYPE_COPPER:
2456 reg |= CTRL_EXT_LINK_MODE_SGMII;
2457 break;
2458 case WM_MEDIATYPE_SERDES:
2459 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2460 break;
2461 default:
2462 break;
2463 }
2464 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2465 break;
2466 case CTRL_EXT_LINK_MODE_GMII:
2467 default:
2468 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2469 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2470 break;
2471 }
2472
2473 reg &= ~CTRL_EXT_I2C_ENA;
2474 if ((sc->sc_flags & WM_F_SGMII) != 0)
2475 reg |= CTRL_EXT_I2C_ENA;
2476 else
2477 reg &= ~CTRL_EXT_I2C_ENA;
2478 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2479
2480 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2481 wm_gmii_mediainit(sc, wmp->wmp_product);
2482 else
2483 wm_tbi_mediainit(sc);
2484 } else if (sc->sc_type < WM_T_82543 ||
2485 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2486 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2487 aprint_error_dev(sc->sc_dev,
2488 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2489 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2490 }
2491 wm_tbi_mediainit(sc);
2492 } else {
2493 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
2494 aprint_error_dev(sc->sc_dev,
2495 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2496 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2497 }
2498 wm_gmii_mediainit(sc, wmp->wmp_product);
2499 }
2500
2501 ifp = &sc->sc_ethercom.ec_if;
2502 xname = device_xname(sc->sc_dev);
2503 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2504 ifp->if_softc = sc;
2505 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2506 ifp->if_extflags = IFEF_START_MPSAFE;
2507 ifp->if_ioctl = wm_ioctl;
2508 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
2509 ifp->if_start = wm_nq_start;
2510 if (sc->sc_nqueues > 1) {
2511 ifp->if_transmit = wm_nq_transmit;
2512 deferred_start_func = wm_deferred_start;
2513 }
2514 } else {
2515 ifp->if_start = wm_start;
2516 if (sc->sc_nqueues > 1) {
2517 ifp->if_transmit = wm_transmit;
2518 deferred_start_func = wm_deferred_start;
2519 }
2520 }
2521 ifp->if_watchdog = wm_watchdog;
2522 ifp->if_init = wm_init;
2523 ifp->if_stop = wm_stop;
2524 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2525 IFQ_SET_READY(&ifp->if_snd);
2526
2527 /* Check for jumbo frame */
2528 switch (sc->sc_type) {
2529 case WM_T_82573:
2530 /* XXX limited to 9234 if ASPM is disabled */
2531 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2532 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2533 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2534 break;
2535 case WM_T_82571:
2536 case WM_T_82572:
2537 case WM_T_82574:
2538 case WM_T_82575:
2539 case WM_T_82576:
2540 case WM_T_82580:
2541 case WM_T_I350:
2542 case WM_T_I354: /* XXXX ok? */
2543 case WM_T_I210:
2544 case WM_T_I211:
2545 case WM_T_80003:
2546 case WM_T_ICH9:
2547 case WM_T_ICH10:
2548 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2549 case WM_T_PCH_LPT:
2550 case WM_T_PCH_SPT:
2551 /* XXX limited to 9234 */
2552 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2553 break;
2554 case WM_T_PCH:
2555 /* XXX limited to 4096 */
2556 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2557 break;
2558 case WM_T_82542_2_0:
2559 case WM_T_82542_2_1:
2560 case WM_T_82583:
2561 case WM_T_ICH8:
2562 /* No support for jumbo frame */
2563 break;
2564 default:
2565 /* ETHER_MAX_LEN_JUMBO */
2566 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2567 break;
2568 }
2569
2570 /* If we're a i82543 or greater, we can support VLANs. */
2571 if (sc->sc_type >= WM_T_82543)
2572 sc->sc_ethercom.ec_capabilities |=
2573 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2574
2575 /*
2576 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2577 * on i82543 and later.
2578 */
2579 if (sc->sc_type >= WM_T_82543) {
2580 ifp->if_capabilities |=
2581 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2582 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2583 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2584 IFCAP_CSUM_TCPv6_Tx |
2585 IFCAP_CSUM_UDPv6_Tx;
2586 }
2587
2588 /*
2589 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2590 *
2591 * 82541GI (8086:1076) ... no
2592 * 82572EI (8086:10b9) ... yes
2593 */
2594 if (sc->sc_type >= WM_T_82571) {
2595 ifp->if_capabilities |=
2596 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2597 }
2598
2599 /*
2600 * If we're a i82544 or greater (except i82547), we can do
2601 * TCP segmentation offload.
2602 */
2603 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2604 ifp->if_capabilities |= IFCAP_TSOv4;
2605 }
2606
2607 if (sc->sc_type >= WM_T_82571) {
2608 ifp->if_capabilities |= IFCAP_TSOv6;
2609 }
2610
2611 #ifdef WM_MPSAFE
2612 sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2613 #else
2614 sc->sc_core_lock = NULL;
2615 #endif
2616
2617 /* Attach the interface. */
2618 if_initialize(ifp);
2619 sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
2620 if_deferred_start_init(ifp, deferred_start_func);
2621 ether_ifattach(ifp, enaddr);
2622 if_register(ifp);
2623 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2624 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2625 RND_FLAG_DEFAULT);
2626
2627 #ifdef WM_EVENT_COUNTERS
2628 /* Attach event counters. */
2629 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2630 NULL, xname, "linkintr");
2631
2632 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2633 NULL, xname, "tx_xoff");
2634 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2635 NULL, xname, "tx_xon");
2636 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2637 NULL, xname, "rx_xoff");
2638 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2639 NULL, xname, "rx_xon");
2640 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2641 NULL, xname, "rx_macctl");
2642 #endif /* WM_EVENT_COUNTERS */
2643
2644 if (pmf_device_register(self, wm_suspend, wm_resume))
2645 pmf_class_network_register(self, ifp);
2646 else
2647 aprint_error_dev(self, "couldn't establish power handler\n");
2648
2649 sc->sc_flags |= WM_F_ATTACHED;
2650 out:
2651 return;
2652 }
2653
2654 /* The detach function (ca_detach) */
2655 static int
2656 wm_detach(device_t self, int flags __unused)
2657 {
2658 struct wm_softc *sc = device_private(self);
2659 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2660 int i;
2661
2662 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2663 return 0;
2664
2665 /* Stop the interface. Callouts are stopped in it. */
2666 wm_stop(ifp, 1);
2667
2668 pmf_device_deregister(self);
2669
2670 /* Tell the firmware about the release */
2671 WM_CORE_LOCK(sc);
2672 wm_release_manageability(sc);
2673 wm_release_hw_control(sc);
2674 wm_enable_wakeup(sc);
2675 WM_CORE_UNLOCK(sc);
2676
2677 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2678
2679 /* Delete all remaining media. */
2680 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2681
2682 ether_ifdetach(ifp);
2683 if_detach(ifp);
2684 if_percpuq_destroy(sc->sc_ipq);
2685
2686 /* Unload RX dmamaps and free mbufs */
2687 for (i = 0; i < sc->sc_nqueues; i++) {
2688 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
2689 mutex_enter(rxq->rxq_lock);
2690 wm_rxdrain(rxq);
2691 mutex_exit(rxq->rxq_lock);
2692 }
2693 /* Must unlock here */
2694
2695 /* Disestablish the interrupt handler */
2696 for (i = 0; i < sc->sc_nintrs; i++) {
2697 if (sc->sc_ihs[i] != NULL) {
2698 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2699 sc->sc_ihs[i] = NULL;
2700 }
2701 }
2702 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
2703
2704 wm_free_txrx_queues(sc);
2705
2706 /* Unmap the registers */
2707 if (sc->sc_ss) {
2708 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2709 sc->sc_ss = 0;
2710 }
2711 if (sc->sc_ios) {
2712 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2713 sc->sc_ios = 0;
2714 }
2715 if (sc->sc_flashs) {
2716 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
2717 sc->sc_flashs = 0;
2718 }
2719
2720 if (sc->sc_core_lock)
2721 mutex_obj_free(sc->sc_core_lock);
2722 if (sc->sc_ich_phymtx)
2723 mutex_obj_free(sc->sc_ich_phymtx);
2724 if (sc->sc_ich_nvmmtx)
2725 mutex_obj_free(sc->sc_ich_nvmmtx);
2726
2727 return 0;
2728 }
2729
2730 static bool
2731 wm_suspend(device_t self, const pmf_qual_t *qual)
2732 {
2733 struct wm_softc *sc = device_private(self);
2734
2735 wm_release_manageability(sc);
2736 wm_release_hw_control(sc);
2737 wm_enable_wakeup(sc);
2738
2739 return true;
2740 }
2741
2742 static bool
2743 wm_resume(device_t self, const pmf_qual_t *qual)
2744 {
2745 struct wm_softc *sc = device_private(self);
2746
2747 wm_init_manageability(sc);
2748
2749 return true;
2750 }
2751
2752 /*
2753 * wm_watchdog: [ifnet interface function]
2754 *
2755 * Watchdog timer handler.
2756 */
2757 static void
2758 wm_watchdog(struct ifnet *ifp)
2759 {
2760 int qid;
2761 struct wm_softc *sc = ifp->if_softc;
2762
2763 for (qid = 0; qid < sc->sc_nqueues; qid++) {
2764 struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
2765
2766 wm_watchdog_txq(ifp, txq);
2767 }
2768
2769 /* Reset the interface. */
2770 (void) wm_init(ifp);
2771
2772 /*
2773 * There are still some upper layer processing which call
2774 * ifp->if_start(). e.g. ALTQ
2775 */
2776 /* Try to get more packets going. */
2777 ifp->if_start(ifp);
2778 }
2779
2780 static void
2781 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
2782 {
2783 struct wm_softc *sc = ifp->if_softc;
2784
2785 /*
2786 * Since we're using delayed interrupts, sweep up
2787 * before we report an error.
2788 */
2789 mutex_enter(txq->txq_lock);
2790 wm_txeof(sc, txq);
2791 mutex_exit(txq->txq_lock);
2792
2793 if (txq->txq_free != WM_NTXDESC(txq)) {
2794 #ifdef WM_DEBUG
2795 int i, j;
2796 struct wm_txsoft *txs;
2797 #endif
2798 log(LOG_ERR,
2799 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2800 device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
2801 txq->txq_next);
2802 ifp->if_oerrors++;
2803 #ifdef WM_DEBUG
2804 for (i = txq->txq_sdirty; i != txq->txq_snext ;
2805 i = WM_NEXTTXS(txq, i)) {
2806 txs = &txq->txq_soft[i];
2807 printf("txs %d tx %d -> %d\n",
2808 i, txs->txs_firstdesc, txs->txs_lastdesc);
2809 for (j = txs->txs_firstdesc; ;
2810 j = WM_NEXTTX(txq, j)) {
2811 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2812 txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
2813 printf("\t %#08x%08x\n",
2814 txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
2815 txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
2816 if (j == txs->txs_lastdesc)
2817 break;
2818 }
2819 }
2820 #endif
2821 }
2822 }
2823
2824 /*
2825 * wm_tick:
2826 *
2827 * One second timer, used to check link status, sweep up
2828 * completed transmit jobs, etc.
2829 */
2830 static void
2831 wm_tick(void *arg)
2832 {
2833 struct wm_softc *sc = arg;
2834 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2835 #ifndef WM_MPSAFE
2836 int s = splnet();
2837 #endif
2838
2839 WM_CORE_LOCK(sc);
2840
2841 if (sc->sc_core_stopping)
2842 goto out;
2843
2844 if (sc->sc_type >= WM_T_82542_2_1) {
2845 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2846 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2847 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2848 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2849 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2850 }
2851
2852 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2853 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2854 + CSR_READ(sc, WMREG_CRCERRS)
2855 + CSR_READ(sc, WMREG_ALGNERRC)
2856 + CSR_READ(sc, WMREG_SYMERRC)
2857 + CSR_READ(sc, WMREG_RXERRC)
2858 + CSR_READ(sc, WMREG_SEC)
2859 + CSR_READ(sc, WMREG_CEXTERR)
2860 + CSR_READ(sc, WMREG_RLEC);
2861 /*
2862 * WMREG_RNBC is incremented when there is no available buffers in host
2863 * memory. It does not mean the number of dropped packet. Because
2864 * ethernet controller can receive packets in such case if there is
2865 * space in phy's FIFO.
2866 *
2867 * If you want to know the nubmer of WMREG_RMBC, you should use such as
2868 * own EVCNT instead of if_iqdrops.
2869 */
2870 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
2871
2872 if (sc->sc_flags & WM_F_HAS_MII)
2873 mii_tick(&sc->sc_mii);
2874 else if ((sc->sc_type >= WM_T_82575)
2875 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
2876 wm_serdes_tick(sc);
2877 else
2878 wm_tbi_tick(sc);
2879
2880 out:
2881 WM_CORE_UNLOCK(sc);
2882 #ifndef WM_MPSAFE
2883 splx(s);
2884 #endif
2885
2886 if (!sc->sc_core_stopping)
2887 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2888 }
2889
2890 static int
2891 wm_ifflags_cb(struct ethercom *ec)
2892 {
2893 struct ifnet *ifp = &ec->ec_if;
2894 struct wm_softc *sc = ifp->if_softc;
2895 int rc = 0;
2896
2897 WM_CORE_LOCK(sc);
2898
2899 int change = ifp->if_flags ^ sc->sc_if_flags;
2900 sc->sc_if_flags = ifp->if_flags;
2901
2902 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
2903 rc = ENETRESET;
2904 goto out;
2905 }
2906
2907 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2908 wm_set_filter(sc);
2909
2910 wm_set_vlan(sc);
2911
2912 out:
2913 WM_CORE_UNLOCK(sc);
2914
2915 return rc;
2916 }
2917
2918 /*
2919 * wm_ioctl: [ifnet interface function]
2920 *
2921 * Handle control requests from the operator.
2922 */
2923 static int
2924 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2925 {
2926 struct wm_softc *sc = ifp->if_softc;
2927 struct ifreq *ifr = (struct ifreq *) data;
2928 struct ifaddr *ifa = (struct ifaddr *)data;
2929 struct sockaddr_dl *sdl;
2930 int s, error;
2931
2932 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
2933 device_xname(sc->sc_dev), __func__));
2934
2935 #ifndef WM_MPSAFE
2936 s = splnet();
2937 #endif
2938 switch (cmd) {
2939 case SIOCSIFMEDIA:
2940 case SIOCGIFMEDIA:
2941 WM_CORE_LOCK(sc);
2942 /* Flow control requires full-duplex mode. */
2943 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2944 (ifr->ifr_media & IFM_FDX) == 0)
2945 ifr->ifr_media &= ~IFM_ETH_FMASK;
2946 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2947 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2948 /* We can do both TXPAUSE and RXPAUSE. */
2949 ifr->ifr_media |=
2950 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2951 }
2952 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2953 }
2954 WM_CORE_UNLOCK(sc);
2955 #ifdef WM_MPSAFE
2956 s = splnet();
2957 #endif
2958 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2959 #ifdef WM_MPSAFE
2960 splx(s);
2961 #endif
2962 break;
2963 case SIOCINITIFADDR:
2964 WM_CORE_LOCK(sc);
2965 if (ifa->ifa_addr->sa_family == AF_LINK) {
2966 sdl = satosdl(ifp->if_dl->ifa_addr);
2967 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2968 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2969 /* unicast address is first multicast entry */
2970 wm_set_filter(sc);
2971 error = 0;
2972 WM_CORE_UNLOCK(sc);
2973 break;
2974 }
2975 WM_CORE_UNLOCK(sc);
2976 /*FALLTHROUGH*/
2977 default:
2978 #ifdef WM_MPSAFE
2979 s = splnet();
2980 #endif
2981 /* It may call wm_start, so unlock here */
2982 error = ether_ioctl(ifp, cmd, data);
2983 #ifdef WM_MPSAFE
2984 splx(s);
2985 #endif
2986 if (error != ENETRESET)
2987 break;
2988
2989 error = 0;
2990
2991 if (cmd == SIOCSIFCAP) {
2992 error = (*ifp->if_init)(ifp);
2993 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2994 ;
2995 else if (ifp->if_flags & IFF_RUNNING) {
2996 /*
2997 * Multicast list has changed; set the hardware filter
2998 * accordingly.
2999 */
3000 WM_CORE_LOCK(sc);
3001 wm_set_filter(sc);
3002 WM_CORE_UNLOCK(sc);
3003 }
3004 break;
3005 }
3006
3007 #ifndef WM_MPSAFE
3008 splx(s);
3009 #endif
3010 return error;
3011 }
3012
3013 /* MAC address related */
3014
3015 /*
3016 * Get the offset of MAC address and return it.
3017 * If error occured, use offset 0.
3018 */
3019 static uint16_t
3020 wm_check_alt_mac_addr(struct wm_softc *sc)
3021 {
3022 uint16_t myea[ETHER_ADDR_LEN / 2];
3023 uint16_t offset = NVM_OFF_MACADDR;
3024
3025 /* Try to read alternative MAC address pointer */
3026 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
3027 return 0;
3028
3029 /* Check pointer if it's valid or not. */
3030 if ((offset == 0x0000) || (offset == 0xffff))
3031 return 0;
3032
3033 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
3034 /*
3035 * Check whether alternative MAC address is valid or not.
3036 * Some cards have non 0xffff pointer but those don't use
3037 * alternative MAC address in reality.
3038 *
3039 * Check whether the broadcast bit is set or not.
3040 */
3041 if (wm_nvm_read(sc, offset, 1, myea) == 0)
3042 if (((myea[0] & 0xff) & 0x01) == 0)
3043 return offset; /* Found */
3044
3045 /* Not found */
3046 return 0;
3047 }
3048
3049 static int
3050 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
3051 {
3052 uint16_t myea[ETHER_ADDR_LEN / 2];
3053 uint16_t offset = NVM_OFF_MACADDR;
3054 int do_invert = 0;
3055
3056 switch (sc->sc_type) {
3057 case WM_T_82580:
3058 case WM_T_I350:
3059 case WM_T_I354:
3060 /* EEPROM Top Level Partitioning */
3061 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
3062 break;
3063 case WM_T_82571:
3064 case WM_T_82575:
3065 case WM_T_82576:
3066 case WM_T_80003:
3067 case WM_T_I210:
3068 case WM_T_I211:
3069 offset = wm_check_alt_mac_addr(sc);
3070 if (offset == 0)
3071 if ((sc->sc_funcid & 0x01) == 1)
3072 do_invert = 1;
3073 break;
3074 default:
3075 if ((sc->sc_funcid & 0x01) == 1)
3076 do_invert = 1;
3077 break;
3078 }
3079
3080 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
3081 goto bad;
3082
3083 enaddr[0] = myea[0] & 0xff;
3084 enaddr[1] = myea[0] >> 8;
3085 enaddr[2] = myea[1] & 0xff;
3086 enaddr[3] = myea[1] >> 8;
3087 enaddr[4] = myea[2] & 0xff;
3088 enaddr[5] = myea[2] >> 8;
3089
3090 /*
3091 * Toggle the LSB of the MAC address on the second port
3092 * of some dual port cards.
3093 */
3094 if (do_invert != 0)
3095 enaddr[5] ^= 1;
3096
3097 return 0;
3098
3099 bad:
3100 return -1;
3101 }
3102
3103 /*
3104 * wm_set_ral:
3105 *
3106 * Set an entery in the receive address list.
3107 */
3108 static void
3109 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3110 {
3111 uint32_t ral_lo, ral_hi;
3112
3113 if (enaddr != NULL) {
3114 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3115 (enaddr[3] << 24);
3116 ral_hi = enaddr[4] | (enaddr[5] << 8);
3117 ral_hi |= RAL_AV;
3118 } else {
3119 ral_lo = 0;
3120 ral_hi = 0;
3121 }
3122
3123 if (sc->sc_type >= WM_T_82544) {
3124 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3125 ral_lo);
3126 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3127 ral_hi);
3128 } else {
3129 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3130 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3131 }
3132 }
3133
3134 /*
3135 * wm_mchash:
3136 *
3137 * Compute the hash of the multicast address for the 4096-bit
3138 * multicast filter.
3139 */
3140 static uint32_t
3141 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3142 {
3143 static const int lo_shift[4] = { 4, 3, 2, 0 };
3144 static const int hi_shift[4] = { 4, 5, 6, 8 };
3145 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3146 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3147 uint32_t hash;
3148
3149 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3150 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3151 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3152 || (sc->sc_type == WM_T_PCH_SPT)) {
3153 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3154 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3155 return (hash & 0x3ff);
3156 }
3157 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3158 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3159
3160 return (hash & 0xfff);
3161 }
3162
3163 /*
3164 * wm_set_filter:
3165 *
3166 * Set up the receive filter.
3167 */
3168 static void
3169 wm_set_filter(struct wm_softc *sc)
3170 {
3171 struct ethercom *ec = &sc->sc_ethercom;
3172 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3173 struct ether_multi *enm;
3174 struct ether_multistep step;
3175 bus_addr_t mta_reg;
3176 uint32_t hash, reg, bit;
3177 int i, size, ralmax;
3178
3179 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3180 device_xname(sc->sc_dev), __func__));
3181
3182 if (sc->sc_type >= WM_T_82544)
3183 mta_reg = WMREG_CORDOVA_MTA;
3184 else
3185 mta_reg = WMREG_MTA;
3186
3187 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3188
3189 if (ifp->if_flags & IFF_BROADCAST)
3190 sc->sc_rctl |= RCTL_BAM;
3191 if (ifp->if_flags & IFF_PROMISC) {
3192 sc->sc_rctl |= RCTL_UPE;
3193 goto allmulti;
3194 }
3195
3196 /*
3197 * Set the station address in the first RAL slot, and
3198 * clear the remaining slots.
3199 */
3200 if (sc->sc_type == WM_T_ICH8)
3201 size = WM_RAL_TABSIZE_ICH8 -1;
3202 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3203 || (sc->sc_type == WM_T_PCH))
3204 size = WM_RAL_TABSIZE_ICH8;
3205 else if (sc->sc_type == WM_T_PCH2)
3206 size = WM_RAL_TABSIZE_PCH2;
3207 else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
3208 size = WM_RAL_TABSIZE_PCH_LPT;
3209 else if (sc->sc_type == WM_T_82575)
3210 size = WM_RAL_TABSIZE_82575;
3211 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3212 size = WM_RAL_TABSIZE_82576;
3213 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3214 size = WM_RAL_TABSIZE_I350;
3215 else
3216 size = WM_RAL_TABSIZE;
3217 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3218
3219 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
3220 i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
3221 switch (i) {
3222 case 0:
3223 /* We can use all entries */
3224 ralmax = size;
3225 break;
3226 case 1:
3227 /* Only RAR[0] */
3228 ralmax = 1;
3229 break;
3230 default:
3231 /* available SHRA + RAR[0] */
3232 ralmax = i + 1;
3233 }
3234 } else
3235 ralmax = size;
3236 for (i = 1; i < size; i++) {
3237 if (i < ralmax)
3238 wm_set_ral(sc, NULL, i);
3239 }
3240
3241 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3242 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3243 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3244 || (sc->sc_type == WM_T_PCH_SPT))
3245 size = WM_ICH8_MC_TABSIZE;
3246 else
3247 size = WM_MC_TABSIZE;
3248 /* Clear out the multicast table. */
3249 for (i = 0; i < size; i++)
3250 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3251
3252 ETHER_LOCK(ec);
3253 ETHER_FIRST_MULTI(step, ec, enm);
3254 while (enm != NULL) {
3255 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3256 ETHER_UNLOCK(ec);
3257 /*
3258 * We must listen to a range of multicast addresses.
3259 * For now, just accept all multicasts, rather than
3260 * trying to set only those filter bits needed to match
3261 * the range. (At this time, the only use of address
3262 * ranges is for IP multicast routing, for which the
3263 * range is big enough to require all bits set.)
3264 */
3265 goto allmulti;
3266 }
3267
3268 hash = wm_mchash(sc, enm->enm_addrlo);
3269
3270 reg = (hash >> 5);
3271 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3272 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3273 || (sc->sc_type == WM_T_PCH2)
3274 || (sc->sc_type == WM_T_PCH_LPT)
3275 || (sc->sc_type == WM_T_PCH_SPT))
3276 reg &= 0x1f;
3277 else
3278 reg &= 0x7f;
3279 bit = hash & 0x1f;
3280
3281 hash = CSR_READ(sc, mta_reg + (reg << 2));
3282 hash |= 1U << bit;
3283
3284 if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
3285 /*
3286 * 82544 Errata 9: Certain register cannot be written
3287 * with particular alignments in PCI-X bus operation
3288 * (FCAH, MTA and VFTA).
3289 */
3290 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3291 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3292 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3293 } else
3294 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3295
3296 ETHER_NEXT_MULTI(step, enm);
3297 }
3298 ETHER_UNLOCK(ec);
3299
3300 ifp->if_flags &= ~IFF_ALLMULTI;
3301 goto setit;
3302
3303 allmulti:
3304 ifp->if_flags |= IFF_ALLMULTI;
3305 sc->sc_rctl |= RCTL_MPE;
3306
3307 setit:
3308 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3309 }
3310
3311 /* Reset and init related */
3312
3313 static void
3314 wm_set_vlan(struct wm_softc *sc)
3315 {
3316
3317 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3318 device_xname(sc->sc_dev), __func__));
3319
3320 /* Deal with VLAN enables. */
3321 if (VLAN_ATTACHED(&sc->sc_ethercom))
3322 sc->sc_ctrl |= CTRL_VME;
3323 else
3324 sc->sc_ctrl &= ~CTRL_VME;
3325
3326 /* Write the control registers. */
3327 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3328 }
3329
3330 static void
3331 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3332 {
3333 uint32_t gcr;
3334 pcireg_t ctrl2;
3335
3336 gcr = CSR_READ(sc, WMREG_GCR);
3337
3338 /* Only take action if timeout value is defaulted to 0 */
3339 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3340 goto out;
3341
3342 if ((gcr & GCR_CAP_VER2) == 0) {
3343 gcr |= GCR_CMPL_TMOUT_10MS;
3344 goto out;
3345 }
3346
3347 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3348 sc->sc_pcixe_capoff + PCIE_DCSR2);
3349 ctrl2 |= WM_PCIE_DCSR2_16MS;
3350 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3351 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3352
3353 out:
3354 /* Disable completion timeout resend */
3355 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3356
3357 CSR_WRITE(sc, WMREG_GCR, gcr);
3358 }
3359
3360 void
3361 wm_get_auto_rd_done(struct wm_softc *sc)
3362 {
3363 int i;
3364
3365 /* wait for eeprom to reload */
3366 switch (sc->sc_type) {
3367 case WM_T_82571:
3368 case WM_T_82572:
3369 case WM_T_82573:
3370 case WM_T_82574:
3371 case WM_T_82583:
3372 case WM_T_82575:
3373 case WM_T_82576:
3374 case WM_T_82580:
3375 case WM_T_I350:
3376 case WM_T_I354:
3377 case WM_T_I210:
3378 case WM_T_I211:
3379 case WM_T_80003:
3380 case WM_T_ICH8:
3381 case WM_T_ICH9:
3382 for (i = 0; i < 10; i++) {
3383 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3384 break;
3385 delay(1000);
3386 }
3387 if (i == 10) {
3388 log(LOG_ERR, "%s: auto read from eeprom failed to "
3389 "complete\n", device_xname(sc->sc_dev));
3390 }
3391 break;
3392 default:
3393 break;
3394 }
3395 }
3396
3397 void
3398 wm_lan_init_done(struct wm_softc *sc)
3399 {
3400 uint32_t reg = 0;
3401 int i;
3402
3403 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3404 device_xname(sc->sc_dev), __func__));
3405
3406 /* Wait for eeprom to reload */
3407 switch (sc->sc_type) {
3408 case WM_T_ICH10:
3409 case WM_T_PCH:
3410 case WM_T_PCH2:
3411 case WM_T_PCH_LPT:
3412 case WM_T_PCH_SPT:
3413 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3414 reg = CSR_READ(sc, WMREG_STATUS);
3415 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3416 break;
3417 delay(100);
3418 }
3419 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3420 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3421 "complete\n", device_xname(sc->sc_dev), __func__);
3422 }
3423 break;
3424 default:
3425 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3426 __func__);
3427 break;
3428 }
3429
3430 reg &= ~STATUS_LAN_INIT_DONE;
3431 CSR_WRITE(sc, WMREG_STATUS, reg);
3432 }
3433
3434 void
3435 wm_get_cfg_done(struct wm_softc *sc)
3436 {
3437 int mask;
3438 uint32_t reg;
3439 int i;
3440
3441 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3442 device_xname(sc->sc_dev), __func__));
3443
3444 /* Wait for eeprom to reload */
3445 switch (sc->sc_type) {
3446 case WM_T_82542_2_0:
3447 case WM_T_82542_2_1:
3448 /* null */
3449 break;
3450 case WM_T_82543:
3451 case WM_T_82544:
3452 case WM_T_82540:
3453 case WM_T_82545:
3454 case WM_T_82545_3:
3455 case WM_T_82546:
3456 case WM_T_82546_3:
3457 case WM_T_82541:
3458 case WM_T_82541_2:
3459 case WM_T_82547:
3460 case WM_T_82547_2:
3461 case WM_T_82573:
3462 case WM_T_82574:
3463 case WM_T_82583:
3464 /* generic */
3465 delay(10*1000);
3466 break;
3467 case WM_T_80003:
3468 case WM_T_82571:
3469 case WM_T_82572:
3470 case WM_T_82575:
3471 case WM_T_82576:
3472 case WM_T_82580:
3473 case WM_T_I350:
3474 case WM_T_I354:
3475 case WM_T_I210:
3476 case WM_T_I211:
3477 if (sc->sc_type == WM_T_82571) {
3478 /* Only 82571 shares port 0 */
3479 mask = EEMNGCTL_CFGDONE_0;
3480 } else
3481 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3482 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3483 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3484 break;
3485 delay(1000);
3486 }
3487 if (i >= WM_PHY_CFG_TIMEOUT) {
3488 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3489 device_xname(sc->sc_dev), __func__));
3490 }
3491 break;
3492 case WM_T_ICH8:
3493 case WM_T_ICH9:
3494 case WM_T_ICH10:
3495 case WM_T_PCH:
3496 case WM_T_PCH2:
3497 case WM_T_PCH_LPT:
3498 case WM_T_PCH_SPT:
3499 delay(10*1000);
3500 if (sc->sc_type >= WM_T_ICH10)
3501 wm_lan_init_done(sc);
3502 else
3503 wm_get_auto_rd_done(sc);
3504
3505 reg = CSR_READ(sc, WMREG_STATUS);
3506 if ((reg & STATUS_PHYRA) != 0)
3507 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3508 break;
3509 default:
3510 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3511 __func__);
3512 break;
3513 }
3514 }
3515
3516 /* Init hardware bits */
3517 void
3518 wm_initialize_hardware_bits(struct wm_softc *sc)
3519 {
3520 uint32_t tarc0, tarc1, reg;
3521
3522 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3523 device_xname(sc->sc_dev), __func__));
3524
3525 /* For 82571 variant, 80003 and ICHs */
3526 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3527 || (sc->sc_type >= WM_T_80003)) {
3528
3529 /* Transmit Descriptor Control 0 */
3530 reg = CSR_READ(sc, WMREG_TXDCTL(0));
3531 reg |= TXDCTL_COUNT_DESC;
3532 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3533
3534 /* Transmit Descriptor Control 1 */
3535 reg = CSR_READ(sc, WMREG_TXDCTL(1));
3536 reg |= TXDCTL_COUNT_DESC;
3537 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3538
3539 /* TARC0 */
3540 tarc0 = CSR_READ(sc, WMREG_TARC0);
3541 switch (sc->sc_type) {
3542 case WM_T_82571:
3543 case WM_T_82572:
3544 case WM_T_82573:
3545 case WM_T_82574:
3546 case WM_T_82583:
3547 case WM_T_80003:
3548 /* Clear bits 30..27 */
3549 tarc0 &= ~__BITS(30, 27);
3550 break;
3551 default:
3552 break;
3553 }
3554
3555 switch (sc->sc_type) {
3556 case WM_T_82571:
3557 case WM_T_82572:
3558 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3559
3560 tarc1 = CSR_READ(sc, WMREG_TARC1);
3561 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3562 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3563 /* 8257[12] Errata No.7 */
3564 tarc1 |= __BIT(22); /* TARC1 bits 22 */
3565
3566 /* TARC1 bit 28 */
3567 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3568 tarc1 &= ~__BIT(28);
3569 else
3570 tarc1 |= __BIT(28);
3571 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3572
3573 /*
3574 * 8257[12] Errata No.13
3575 * Disable Dyamic Clock Gating.
3576 */
3577 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3578 reg &= ~CTRL_EXT_DMA_DYN_CLK;
3579 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3580 break;
3581 case WM_T_82573:
3582 case WM_T_82574:
3583 case WM_T_82583:
3584 if ((sc->sc_type == WM_T_82574)
3585 || (sc->sc_type == WM_T_82583))
3586 tarc0 |= __BIT(26); /* TARC0 bit 26 */
3587
3588 /* Extended Device Control */
3589 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3590 reg &= ~__BIT(23); /* Clear bit 23 */
3591 reg |= __BIT(22); /* Set bit 22 */
3592 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3593
3594 /* Device Control */
3595 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
3596 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3597
3598 /* PCIe Control Register */
3599 /*
3600 * 82573 Errata (unknown).
3601 *
3602 * 82574 Errata 25 and 82583 Errata 12
3603 * "Dropped Rx Packets":
3604 * NVM Image Version 2.1.4 and newer has no this bug.
3605 */
3606 reg = CSR_READ(sc, WMREG_GCR);
3607 reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
3608 CSR_WRITE(sc, WMREG_GCR, reg);
3609
3610 if ((sc->sc_type == WM_T_82574)
3611 || (sc->sc_type == WM_T_82583)) {
3612 /*
3613 * Document says this bit must be set for
3614 * proper operation.
3615 */
3616 reg = CSR_READ(sc, WMREG_GCR);
3617 reg |= __BIT(22);
3618 CSR_WRITE(sc, WMREG_GCR, reg);
3619
3620 /*
3621 * Apply workaround for hardware errata
3622 * documented in errata docs Fixes issue where
3623 * some error prone or unreliable PCIe
3624 * completions are occurring, particularly
3625 * with ASPM enabled. Without fix, issue can
3626 * cause Tx timeouts.
3627 */
3628 reg = CSR_READ(sc, WMREG_GCR2);
3629 reg |= __BIT(0);
3630 CSR_WRITE(sc, WMREG_GCR2, reg);
3631 }
3632 break;
3633 case WM_T_80003:
3634 /* TARC0 */
3635 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3636 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3637 tarc0 &= ~__BIT(20); /* Clear bits 20 */
3638
3639 /* TARC1 bit 28 */
3640 tarc1 = CSR_READ(sc, WMREG_TARC1);
3641 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3642 tarc1 &= ~__BIT(28);
3643 else
3644 tarc1 |= __BIT(28);
3645 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3646 break;
3647 case WM_T_ICH8:
3648 case WM_T_ICH9:
3649 case WM_T_ICH10:
3650 case WM_T_PCH:
3651 case WM_T_PCH2:
3652 case WM_T_PCH_LPT:
3653 case WM_T_PCH_SPT:
3654 /* TARC0 */
3655 if ((sc->sc_type == WM_T_ICH8)
3656 || (sc->sc_type == WM_T_PCH_SPT)) {
3657 /* Set TARC0 bits 29 and 28 */
3658 tarc0 |= __BITS(29, 28);
3659 }
3660 /* Set TARC0 bits 23,24,26,27 */
3661 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3662
3663 /* CTRL_EXT */
3664 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3665 reg |= __BIT(22); /* Set bit 22 */
3666 /*
3667 * Enable PHY low-power state when MAC is at D3
3668 * w/o WoL
3669 */
3670 if (sc->sc_type >= WM_T_PCH)
3671 reg |= CTRL_EXT_PHYPDEN;
3672 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3673
3674 /* TARC1 */
3675 tarc1 = CSR_READ(sc, WMREG_TARC1);
3676 /* bit 28 */
3677 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3678 tarc1 &= ~__BIT(28);
3679 else
3680 tarc1 |= __BIT(28);
3681 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3682 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3683
3684 /* Device Status */
3685 if (sc->sc_type == WM_T_ICH8) {
3686 reg = CSR_READ(sc, WMREG_STATUS);
3687 reg &= ~__BIT(31);
3688 CSR_WRITE(sc, WMREG_STATUS, reg);
3689
3690 }
3691
3692 /* IOSFPC */
3693 if (sc->sc_type == WM_T_PCH_SPT) {
3694 reg = CSR_READ(sc, WMREG_IOSFPC);
3695 reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
3696 CSR_WRITE(sc, WMREG_IOSFPC, reg);
3697 }
3698 /*
3699 * Work-around descriptor data corruption issue during
3700 * NFS v2 UDP traffic, just disable the NFS filtering
3701 * capability.
3702 */
3703 reg = CSR_READ(sc, WMREG_RFCTL);
3704 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3705 CSR_WRITE(sc, WMREG_RFCTL, reg);
3706 break;
3707 default:
3708 break;
3709 }
3710 CSR_WRITE(sc, WMREG_TARC0, tarc0);
3711
3712 /*
3713 * 8257[12] Errata No.52 and some others.
3714 * Avoid RSS Hash Value bug.
3715 */
3716 switch (sc->sc_type) {
3717 case WM_T_82571:
3718 case WM_T_82572:
3719 case WM_T_82573:
3720 case WM_T_80003:
3721 case WM_T_ICH8:
3722 reg = CSR_READ(sc, WMREG_RFCTL);
3723 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3724 CSR_WRITE(sc, WMREG_RFCTL, reg);
3725 break;
3726 default:
3727 break;
3728 }
3729 }
3730 }
3731
3732 static uint32_t
3733 wm_rxpbs_adjust_82580(uint32_t val)
3734 {
3735 uint32_t rv = 0;
3736
3737 if (val < __arraycount(wm_82580_rxpbs_table))
3738 rv = wm_82580_rxpbs_table[val];
3739
3740 return rv;
3741 }
3742
3743 /*
3744 * wm_reset_phy:
3745 *
3746 * generic PHY reset function.
3747 * Same as e1000_phy_hw_reset_generic()
3748 */
3749 static void
3750 wm_reset_phy(struct wm_softc *sc)
3751 {
3752 uint32_t reg;
3753
3754 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3755 device_xname(sc->sc_dev), __func__));
3756 if (wm_phy_resetisblocked(sc))
3757 return;
3758
3759 sc->phy.acquire(sc);
3760
3761 reg = CSR_READ(sc, WMREG_CTRL);
3762 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
3763 CSR_WRITE_FLUSH(sc);
3764
3765 delay(sc->phy.reset_delay_us);
3766
3767 CSR_WRITE(sc, WMREG_CTRL, reg);
3768 CSR_WRITE_FLUSH(sc);
3769
3770 delay(150);
3771
3772 sc->phy.release(sc);
3773
3774 wm_get_cfg_done(sc);
3775 }
3776
3777 static void
3778 wm_flush_desc_rings(struct wm_softc *sc)
3779 {
3780 pcireg_t preg;
3781 uint32_t reg;
3782 int nexttx;
3783
3784 /* First, disable MULR fix in FEXTNVM11 */
3785 reg = CSR_READ(sc, WMREG_FEXTNVM11);
3786 reg |= FEXTNVM11_DIS_MULRFIX;
3787 CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
3788
3789 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
3790 reg = CSR_READ(sc, WMREG_TDLEN(0));
3791 if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
3792 struct wm_txqueue *txq;
3793 wiseman_txdesc_t *txd;
3794
3795 /* TX */
3796 printf("%s: Need TX flush (reg = %08x, len = %u)\n",
3797 device_xname(sc->sc_dev), preg, reg);
3798 reg = CSR_READ(sc, WMREG_TCTL);
3799 CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
3800
3801 txq = &sc->sc_queue[0].wmq_txq;
3802 nexttx = txq->txq_next;
3803 txd = &txq->txq_descs[nexttx];
3804 wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
3805 txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
3806 txd->wtx_fields.wtxu_status = 0;
3807 txd->wtx_fields.wtxu_options = 0;
3808 txd->wtx_fields.wtxu_vlan = 0;
3809
3810 bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
3811 BUS_SPACE_BARRIER_WRITE);
3812
3813 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
3814 CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
3815 bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
3816 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
3817 delay(250);
3818 }
3819 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
3820 if (preg & DESCRING_STATUS_FLUSH_REQ) {
3821 uint32_t rctl;
3822
3823 /* RX */
3824 printf("%s: Need RX flush (reg = %08x)\n",
3825 device_xname(sc->sc_dev), preg);
3826 rctl = CSR_READ(sc, WMREG_RCTL);
3827 CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
3828 CSR_WRITE_FLUSH(sc);
3829 delay(150);
3830
3831 reg = CSR_READ(sc, WMREG_RXDCTL(0));
3832 /* zero the lower 14 bits (prefetch and host thresholds) */
3833 reg &= 0xffffc000;
3834 /*
3835 * update thresholds: prefetch threshold to 31, host threshold
3836 * to 1 and make sure the granularity is "descriptors" and not
3837 * "cache lines"
3838 */
3839 reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
3840 CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
3841
3842 /*
3843 * momentarily enable the RX ring for the changes to take
3844 * effect
3845 */
3846 CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
3847 CSR_WRITE_FLUSH(sc);
3848 delay(150);
3849 CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
3850 }
3851 }
3852
3853 /*
3854 * wm_reset:
3855 *
3856 * Reset the i82542 chip.
3857 */
3858 static void
3859 wm_reset(struct wm_softc *sc)
3860 {
3861 int phy_reset = 0;
3862 int i, error = 0;
3863 uint32_t reg;
3864
3865 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3866 device_xname(sc->sc_dev), __func__));
3867 KASSERT(sc->sc_type != 0);
3868
3869 /*
3870 * Allocate on-chip memory according to the MTU size.
3871 * The Packet Buffer Allocation register must be written
3872 * before the chip is reset.
3873 */
3874 switch (sc->sc_type) {
3875 case WM_T_82547:
3876 case WM_T_82547_2:
3877 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3878 PBA_22K : PBA_30K;
3879 for (i = 0; i < sc->sc_nqueues; i++) {
3880 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3881 txq->txq_fifo_head = 0;
3882 txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3883 txq->txq_fifo_size =
3884 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3885 txq->txq_fifo_stall = 0;
3886 }
3887 break;
3888 case WM_T_82571:
3889 case WM_T_82572:
3890 case WM_T_82575: /* XXX need special handing for jumbo frames */
3891 case WM_T_80003:
3892 sc->sc_pba = PBA_32K;
3893 break;
3894 case WM_T_82573:
3895 sc->sc_pba = PBA_12K;
3896 break;
3897 case WM_T_82574:
3898 case WM_T_82583:
3899 sc->sc_pba = PBA_20K;
3900 break;
3901 case WM_T_82576:
3902 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
3903 sc->sc_pba &= RXPBS_SIZE_MASK_82576;
3904 break;
3905 case WM_T_82580:
3906 case WM_T_I350:
3907 case WM_T_I354:
3908 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
3909 break;
3910 case WM_T_I210:
3911 case WM_T_I211:
3912 sc->sc_pba = PBA_34K;
3913 break;
3914 case WM_T_ICH8:
3915 /* Workaround for a bit corruption issue in FIFO memory */
3916 sc->sc_pba = PBA_8K;
3917 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3918 break;
3919 case WM_T_ICH9:
3920 case WM_T_ICH10:
3921 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3922 PBA_14K : PBA_10K;
3923 break;
3924 case WM_T_PCH:
3925 case WM_T_PCH2:
3926 case WM_T_PCH_LPT:
3927 case WM_T_PCH_SPT:
3928 sc->sc_pba = PBA_26K;
3929 break;
3930 default:
3931 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3932 PBA_40K : PBA_48K;
3933 break;
3934 }
3935 /*
3936 * Only old or non-multiqueue devices have the PBA register
3937 * XXX Need special handling for 82575.
3938 */
3939 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3940 || (sc->sc_type == WM_T_82575))
3941 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3942
3943 /* Prevent the PCI-E bus from sticking */
3944 if (sc->sc_flags & WM_F_PCIE) {
3945 int timeout = 800;
3946
3947 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3948 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3949
3950 while (timeout--) {
3951 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3952 == 0)
3953 break;
3954 delay(100);
3955 }
3956 }
3957
3958 /* Set the completion timeout for interface */
3959 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3960 || (sc->sc_type == WM_T_82580)
3961 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3962 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3963 wm_set_pcie_completion_timeout(sc);
3964
3965 /* Clear interrupt */
3966 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3967 if (sc->sc_nintrs > 1) {
3968 if (sc->sc_type != WM_T_82574) {
3969 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3970 CSR_WRITE(sc, WMREG_EIAC, 0);
3971 } else {
3972 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3973 }
3974 }
3975
3976 /* Stop the transmit and receive processes. */
3977 CSR_WRITE(sc, WMREG_RCTL, 0);
3978 sc->sc_rctl &= ~RCTL_EN;
3979 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3980 CSR_WRITE_FLUSH(sc);
3981
3982 /* XXX set_tbi_sbp_82543() */
3983
3984 delay(10*1000);
3985
3986 /* Must acquire the MDIO ownership before MAC reset */
3987 switch (sc->sc_type) {
3988 case WM_T_82573:
3989 case WM_T_82574:
3990 case WM_T_82583:
3991 error = wm_get_hw_semaphore_82573(sc);
3992 break;
3993 default:
3994 break;
3995 }
3996
3997 /*
3998 * 82541 Errata 29? & 82547 Errata 28?
3999 * See also the description about PHY_RST bit in CTRL register
4000 * in 8254x_GBe_SDM.pdf.
4001 */
4002 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
4003 CSR_WRITE(sc, WMREG_CTRL,
4004 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
4005 CSR_WRITE_FLUSH(sc);
4006 delay(5000);
4007 }
4008
4009 switch (sc->sc_type) {
4010 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
4011 case WM_T_82541:
4012 case WM_T_82541_2:
4013 case WM_T_82547:
4014 case WM_T_82547_2:
4015 /*
4016 * On some chipsets, a reset through a memory-mapped write
4017 * cycle can cause the chip to reset before completing the
4018 * write cycle. This causes major headache that can be
4019 * avoided by issuing the reset via indirect register writes
4020 * through I/O space.
4021 *
4022 * So, if we successfully mapped the I/O BAR at attach time,
4023 * use that. Otherwise, try our luck with a memory-mapped
4024 * reset.
4025 */
4026 if (sc->sc_flags & WM_F_IOH_VALID)
4027 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
4028 else
4029 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
4030 break;
4031 case WM_T_82545_3:
4032 case WM_T_82546_3:
4033 /* Use the shadow control register on these chips. */
4034 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
4035 break;
4036 case WM_T_80003:
4037 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4038 sc->phy.acquire(sc);
4039 CSR_WRITE(sc, WMREG_CTRL, reg);
4040 sc->phy.release(sc);
4041 break;
4042 case WM_T_ICH8:
4043 case WM_T_ICH9:
4044 case WM_T_ICH10:
4045 case WM_T_PCH:
4046 case WM_T_PCH2:
4047 case WM_T_PCH_LPT:
4048 case WM_T_PCH_SPT:
4049 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4050 if (wm_phy_resetisblocked(sc) == false) {
4051 /*
4052 * Gate automatic PHY configuration by hardware on
4053 * non-managed 82579
4054 */
4055 if ((sc->sc_type == WM_T_PCH2)
4056 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
4057 == 0))
4058 wm_gate_hw_phy_config_ich8lan(sc, true);
4059
4060 reg |= CTRL_PHY_RESET;
4061 phy_reset = 1;
4062 } else
4063 printf("XXX reset is blocked!!!\n");
4064 sc->phy.acquire(sc);
4065 CSR_WRITE(sc, WMREG_CTRL, reg);
4066 /* Don't insert a completion barrier when reset */
4067 delay(20*1000);
4068 mutex_exit(sc->sc_ich_phymtx);
4069 break;
4070 case WM_T_82580:
4071 case WM_T_I350:
4072 case WM_T_I354:
4073 case WM_T_I210:
4074 case WM_T_I211:
4075 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4076 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
4077 CSR_WRITE_FLUSH(sc);
4078 delay(5000);
4079 break;
4080 case WM_T_82542_2_0:
4081 case WM_T_82542_2_1:
4082 case WM_T_82543:
4083 case WM_T_82540:
4084 case WM_T_82545:
4085 case WM_T_82546:
4086 case WM_T_82571:
4087 case WM_T_82572:
4088 case WM_T_82573:
4089 case WM_T_82574:
4090 case WM_T_82575:
4091 case WM_T_82576:
4092 case WM_T_82583:
4093 default:
4094 /* Everything else can safely use the documented method. */
4095 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4096 break;
4097 }
4098
4099 /* Must release the MDIO ownership after MAC reset */
4100 switch (sc->sc_type) {
4101 case WM_T_82573:
4102 case WM_T_82574:
4103 case WM_T_82583:
4104 if (error == 0)
4105 wm_put_hw_semaphore_82573(sc);
4106 break;
4107 default:
4108 break;
4109 }
4110
4111 if (phy_reset != 0)
4112 wm_get_cfg_done(sc);
4113
4114 /* reload EEPROM */
4115 switch (sc->sc_type) {
4116 case WM_T_82542_2_0:
4117 case WM_T_82542_2_1:
4118 case WM_T_82543:
4119 case WM_T_82544:
4120 delay(10);
4121 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4122 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4123 CSR_WRITE_FLUSH(sc);
4124 delay(2000);
4125 break;
4126 case WM_T_82540:
4127 case WM_T_82545:
4128 case WM_T_82545_3:
4129 case WM_T_82546:
4130 case WM_T_82546_3:
4131 delay(5*1000);
4132 /* XXX Disable HW ARPs on ASF enabled adapters */
4133 break;
4134 case WM_T_82541:
4135 case WM_T_82541_2:
4136 case WM_T_82547:
4137 case WM_T_82547_2:
4138 delay(20000);
4139 /* XXX Disable HW ARPs on ASF enabled adapters */
4140 break;
4141 case WM_T_82571:
4142 case WM_T_82572:
4143 case WM_T_82573:
4144 case WM_T_82574:
4145 case WM_T_82583:
4146 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4147 delay(10);
4148 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4149 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4150 CSR_WRITE_FLUSH(sc);
4151 }
4152 /* check EECD_EE_AUTORD */
4153 wm_get_auto_rd_done(sc);
4154 /*
4155 * Phy configuration from NVM just starts after EECD_AUTO_RD
4156 * is set.
4157 */
4158 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4159 || (sc->sc_type == WM_T_82583))
4160 delay(25*1000);
4161 break;
4162 case WM_T_82575:
4163 case WM_T_82576:
4164 case WM_T_82580:
4165 case WM_T_I350:
4166 case WM_T_I354:
4167 case WM_T_I210:
4168 case WM_T_I211:
4169 case WM_T_80003:
4170 /* check EECD_EE_AUTORD */
4171 wm_get_auto_rd_done(sc);
4172 break;
4173 case WM_T_ICH8:
4174 case WM_T_ICH9:
4175 case WM_T_ICH10:
4176 case WM_T_PCH:
4177 case WM_T_PCH2:
4178 case WM_T_PCH_LPT:
4179 case WM_T_PCH_SPT:
4180 break;
4181 default:
4182 panic("%s: unknown type\n", __func__);
4183 }
4184
4185 /* Check whether EEPROM is present or not */
4186 switch (sc->sc_type) {
4187 case WM_T_82575:
4188 case WM_T_82576:
4189 case WM_T_82580:
4190 case WM_T_I350:
4191 case WM_T_I354:
4192 case WM_T_ICH8:
4193 case WM_T_ICH9:
4194 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4195 /* Not found */
4196 sc->sc_flags |= WM_F_EEPROM_INVALID;
4197 if (sc->sc_type == WM_T_82575)
4198 wm_reset_init_script_82575(sc);
4199 }
4200 break;
4201 default:
4202 break;
4203 }
4204
4205 if ((sc->sc_type == WM_T_82580)
4206 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
4207 /* clear global device reset status bit */
4208 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4209 }
4210
4211 /* Clear any pending interrupt events. */
4212 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4213 reg = CSR_READ(sc, WMREG_ICR);
4214 if (sc->sc_nintrs > 1) {
4215 if (sc->sc_type != WM_T_82574) {
4216 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4217 CSR_WRITE(sc, WMREG_EIAC, 0);
4218 } else
4219 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4220 }
4221
4222 /* reload sc_ctrl */
4223 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4224
4225 if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
4226 wm_set_eee_i350(sc);
4227
4228 /* Clear the host wakeup bit after lcd reset */
4229 if (sc->sc_type >= WM_T_PCH) {
4230 reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
4231 BM_PORT_GEN_CFG);
4232 reg &= ~BM_WUC_HOST_WU_BIT;
4233 wm_gmii_hv_writereg(sc->sc_dev, 2,
4234 BM_PORT_GEN_CFG, reg);
4235 }
4236
4237 /*
4238 * For PCH, this write will make sure that any noise will be detected
4239 * as a CRC error and be dropped rather than show up as a bad packet
4240 * to the DMA engine
4241 */
4242 if (sc->sc_type == WM_T_PCH)
4243 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4244
4245 if (sc->sc_type >= WM_T_82544)
4246 CSR_WRITE(sc, WMREG_WUC, 0);
4247
4248 wm_reset_mdicnfg_82580(sc);
4249
4250 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
4251 wm_pll_workaround_i210(sc);
4252 }
4253
4254 /*
4255 * wm_add_rxbuf:
4256 *
4257 * Add a receive buffer to the indiciated descriptor.
4258 */
4259 static int
4260 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
4261 {
4262 struct wm_softc *sc = rxq->rxq_sc;
4263 struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
4264 struct mbuf *m;
4265 int error;
4266
4267 KASSERT(mutex_owned(rxq->rxq_lock));
4268
4269 MGETHDR(m, M_DONTWAIT, MT_DATA);
4270 if (m == NULL)
4271 return ENOBUFS;
4272
4273 MCLGET(m, M_DONTWAIT);
4274 if ((m->m_flags & M_EXT) == 0) {
4275 m_freem(m);
4276 return ENOBUFS;
4277 }
4278
4279 if (rxs->rxs_mbuf != NULL)
4280 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4281
4282 rxs->rxs_mbuf = m;
4283
4284 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4285 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4286 BUS_DMA_READ | BUS_DMA_NOWAIT);
4287 if (error) {
4288 /* XXX XXX XXX */
4289 aprint_error_dev(sc->sc_dev,
4290 "unable to load rx DMA map %d, error = %d\n",
4291 idx, error);
4292 panic("wm_add_rxbuf");
4293 }
4294
4295 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4296 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4297
4298 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4299 if ((sc->sc_rctl & RCTL_EN) != 0)
4300 wm_init_rxdesc(rxq, idx);
4301 } else
4302 wm_init_rxdesc(rxq, idx);
4303
4304 return 0;
4305 }
4306
4307 /*
4308 * wm_rxdrain:
4309 *
4310 * Drain the receive queue.
4311 */
4312 static void
4313 wm_rxdrain(struct wm_rxqueue *rxq)
4314 {
4315 struct wm_softc *sc = rxq->rxq_sc;
4316 struct wm_rxsoft *rxs;
4317 int i;
4318
4319 KASSERT(mutex_owned(rxq->rxq_lock));
4320
4321 for (i = 0; i < WM_NRXDESC; i++) {
4322 rxs = &rxq->rxq_soft[i];
4323 if (rxs->rxs_mbuf != NULL) {
4324 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4325 m_freem(rxs->rxs_mbuf);
4326 rxs->rxs_mbuf = NULL;
4327 }
4328 }
4329 }
4330
4331
4332 /*
4333 * XXX copy from FreeBSD's sys/net/rss_config.c
4334 */
4335 /*
4336 * RSS secret key, intended to prevent attacks on load-balancing. Its
4337 * effectiveness may be limited by algorithm choice and available entropy
4338 * during the boot.
4339 *
4340 * XXXRW: And that we don't randomize it yet!
4341 *
4342 * This is the default Microsoft RSS specification key which is also
4343 * the Chelsio T5 firmware default key.
4344 */
4345 #define RSS_KEYSIZE 40
4346 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
4347 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
4348 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
4349 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
4350 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
4351 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
4352 };
4353
4354 /*
4355 * Caller must pass an array of size sizeof(rss_key).
4356 *
4357 * XXX
4358 * As if_ixgbe may use this function, this function should not be
4359 * if_wm specific function.
4360 */
4361 static void
4362 wm_rss_getkey(uint8_t *key)
4363 {
4364
4365 memcpy(key, wm_rss_key, sizeof(wm_rss_key));
4366 }
4367
4368 /*
4369 * Setup registers for RSS.
4370 *
4371 * XXX not yet VMDq support
4372 */
4373 static void
4374 wm_init_rss(struct wm_softc *sc)
4375 {
4376 uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
4377 int i;
4378
4379 CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
4380
4381 for (i = 0; i < RETA_NUM_ENTRIES; i++) {
4382 int qid, reta_ent;
4383
4384 qid = i % sc->sc_nqueues;
4385 switch(sc->sc_type) {
4386 case WM_T_82574:
4387 reta_ent = __SHIFTIN(qid,
4388 RETA_ENT_QINDEX_MASK_82574);
4389 break;
4390 case WM_T_82575:
4391 reta_ent = __SHIFTIN(qid,
4392 RETA_ENT_QINDEX1_MASK_82575);
4393 break;
4394 default:
4395 reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
4396 break;
4397 }
4398
4399 reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
4400 reta_reg &= ~RETA_ENTRY_MASK_Q(i);
4401 reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
4402 CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
4403 }
4404
4405 wm_rss_getkey((uint8_t *)rss_key);
4406 for (i = 0; i < RSSRK_NUM_REGS; i++)
4407 CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
4408
4409 if (sc->sc_type == WM_T_82574)
4410 mrqc = MRQC_ENABLE_RSS_MQ_82574;
4411 else
4412 mrqc = MRQC_ENABLE_RSS_MQ;
4413
4414 /* XXXX
4415 * The same as FreeBSD igb.
4416 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
4417 */
4418 mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
4419 mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
4420 mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
4421 mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
4422
4423 CSR_WRITE(sc, WMREG_MRQC, mrqc);
4424 }
4425
4426 /*
4427 * Adjust TX and RX queue numbers which the system actulally uses.
4428 *
4429 * The numbers are affected by below parameters.
4430 * - The nubmer of hardware queues
4431 * - The number of MSI-X vectors (= "nvectors" argument)
4432 * - ncpu
4433 */
4434 static void
4435 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
4436 {
4437 int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
4438
4439 if (nvectors < 2) {
4440 sc->sc_nqueues = 1;
4441 return;
4442 }
4443
4444 switch(sc->sc_type) {
4445 case WM_T_82572:
4446 hw_ntxqueues = 2;
4447 hw_nrxqueues = 2;
4448 break;
4449 case WM_T_82574:
4450 hw_ntxqueues = 2;
4451 hw_nrxqueues = 2;
4452 break;
4453 case WM_T_82575:
4454 hw_ntxqueues = 4;
4455 hw_nrxqueues = 4;
4456 break;
4457 case WM_T_82576:
4458 hw_ntxqueues = 16;
4459 hw_nrxqueues = 16;
4460 break;
4461 case WM_T_82580:
4462 case WM_T_I350:
4463 case WM_T_I354:
4464 hw_ntxqueues = 8;
4465 hw_nrxqueues = 8;
4466 break;
4467 case WM_T_I210:
4468 hw_ntxqueues = 4;
4469 hw_nrxqueues = 4;
4470 break;
4471 case WM_T_I211:
4472 hw_ntxqueues = 2;
4473 hw_nrxqueues = 2;
4474 break;
4475 /*
4476 * As below ethernet controllers does not support MSI-X,
4477 * this driver let them not use multiqueue.
4478 * - WM_T_80003
4479 * - WM_T_ICH8
4480 * - WM_T_ICH9
4481 * - WM_T_ICH10
4482 * - WM_T_PCH
4483 * - WM_T_PCH2
4484 * - WM_T_PCH_LPT
4485 */
4486 default:
4487 hw_ntxqueues = 1;
4488 hw_nrxqueues = 1;
4489 break;
4490 }
4491
4492 hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
4493
4494 /*
4495 * As queues more than MSI-X vectors cannot improve scaling, we limit
4496 * the number of queues used actually.
4497 */
4498 if (nvectors < hw_nqueues + 1) {
4499 sc->sc_nqueues = nvectors - 1;
4500 } else {
4501 sc->sc_nqueues = hw_nqueues;
4502 }
4503
4504 /*
4505 * As queues more then cpus cannot improve scaling, we limit
4506 * the number of queues used actually.
4507 */
4508 if (ncpu < sc->sc_nqueues)
4509 sc->sc_nqueues = ncpu;
4510 }
4511
4512 /*
4513 * Both single interrupt MSI and INTx can use this function.
4514 */
4515 static int
4516 wm_setup_legacy(struct wm_softc *sc)
4517 {
4518 pci_chipset_tag_t pc = sc->sc_pc;
4519 const char *intrstr = NULL;
4520 char intrbuf[PCI_INTRSTR_LEN];
4521 int error;
4522
4523 error = wm_alloc_txrx_queues(sc);
4524 if (error) {
4525 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4526 error);
4527 return ENOMEM;
4528 }
4529 intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
4530 sizeof(intrbuf));
4531 #ifdef WM_MPSAFE
4532 pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
4533 #endif
4534 sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
4535 IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
4536 if (sc->sc_ihs[0] == NULL) {
4537 aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
4538 (pci_intr_type(pc, sc->sc_intrs[0])
4539 == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
4540 return ENOMEM;
4541 }
4542
4543 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
4544 sc->sc_nintrs = 1;
4545 return 0;
4546 }
4547
4548 static int
4549 wm_setup_msix(struct wm_softc *sc)
4550 {
4551 void *vih;
4552 kcpuset_t *affinity;
4553 int qidx, error, intr_idx, txrx_established;
4554 pci_chipset_tag_t pc = sc->sc_pc;
4555 const char *intrstr = NULL;
4556 char intrbuf[PCI_INTRSTR_LEN];
4557 char intr_xname[INTRDEVNAMEBUF];
4558
4559 if (sc->sc_nqueues < ncpu) {
4560 /*
4561 * To avoid other devices' interrupts, the affinity of Tx/Rx
4562 * interrupts start from CPU#1.
4563 */
4564 sc->sc_affinity_offset = 1;
4565 } else {
4566 /*
4567 * In this case, this device use all CPUs. So, we unify
4568 * affinitied cpu_index to msix vector number for readability.
4569 */
4570 sc->sc_affinity_offset = 0;
4571 }
4572
4573 error = wm_alloc_txrx_queues(sc);
4574 if (error) {
4575 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4576 error);
4577 return ENOMEM;
4578 }
4579
4580 kcpuset_create(&affinity, false);
4581 intr_idx = 0;
4582
4583 /*
4584 * TX and RX
4585 */
4586 txrx_established = 0;
4587 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
4588 struct wm_queue *wmq = &sc->sc_queue[qidx];
4589 int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
4590
4591 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4592 sizeof(intrbuf));
4593 #ifdef WM_MPSAFE
4594 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
4595 PCI_INTR_MPSAFE, true);
4596 #endif
4597 memset(intr_xname, 0, sizeof(intr_xname));
4598 snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
4599 device_xname(sc->sc_dev), qidx);
4600 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4601 IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
4602 if (vih == NULL) {
4603 aprint_error_dev(sc->sc_dev,
4604 "unable to establish MSI-X(for TX and RX)%s%s\n",
4605 intrstr ? " at " : "",
4606 intrstr ? intrstr : "");
4607
4608 goto fail;
4609 }
4610 kcpuset_zero(affinity);
4611 /* Round-robin affinity */
4612 kcpuset_set(affinity, affinity_to);
4613 error = interrupt_distribute(vih, affinity, NULL);
4614 if (error == 0) {
4615 aprint_normal_dev(sc->sc_dev,
4616 "for TX and RX interrupting at %s affinity to %u\n",
4617 intrstr, affinity_to);
4618 } else {
4619 aprint_normal_dev(sc->sc_dev,
4620 "for TX and RX interrupting at %s\n", intrstr);
4621 }
4622 sc->sc_ihs[intr_idx] = vih;
4623 wmq->wmq_id= qidx;
4624 wmq->wmq_intr_idx = intr_idx;
4625
4626 txrx_established++;
4627 intr_idx++;
4628 }
4629
4630 /*
4631 * LINK
4632 */
4633 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4634 sizeof(intrbuf));
4635 #ifdef WM_MPSAFE
4636 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
4637 #endif
4638 memset(intr_xname, 0, sizeof(intr_xname));
4639 snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
4640 device_xname(sc->sc_dev));
4641 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4642 IPL_NET, wm_linkintr_msix, sc, intr_xname);
4643 if (vih == NULL) {
4644 aprint_error_dev(sc->sc_dev,
4645 "unable to establish MSI-X(for LINK)%s%s\n",
4646 intrstr ? " at " : "",
4647 intrstr ? intrstr : "");
4648
4649 goto fail;
4650 }
4651 /* keep default affinity to LINK interrupt */
4652 aprint_normal_dev(sc->sc_dev,
4653 "for LINK interrupting at %s\n", intrstr);
4654 sc->sc_ihs[intr_idx] = vih;
4655 sc->sc_link_intr_idx = intr_idx;
4656
4657 sc->sc_nintrs = sc->sc_nqueues + 1;
4658 kcpuset_destroy(affinity);
4659 return 0;
4660
4661 fail:
4662 for (qidx = 0; qidx < txrx_established; qidx++) {
4663 struct wm_queue *wmq = &sc->sc_queue[qidx];
4664 pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
4665 sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
4666 }
4667
4668 kcpuset_destroy(affinity);
4669 return ENOMEM;
4670 }
4671
4672 static void
4673 wm_turnon(struct wm_softc *sc)
4674 {
4675 int i;
4676
4677 KASSERT(WM_CORE_LOCKED(sc));
4678
4679 for(i = 0; i < sc->sc_nqueues; i++) {
4680 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4681 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
4682
4683 mutex_enter(txq->txq_lock);
4684 txq->txq_stopping = false;
4685 mutex_exit(txq->txq_lock);
4686
4687 mutex_enter(rxq->rxq_lock);
4688 rxq->rxq_stopping = false;
4689 mutex_exit(rxq->rxq_lock);
4690 }
4691
4692 sc->sc_core_stopping = false;
4693 }
4694
4695 static void
4696 wm_turnoff(struct wm_softc *sc)
4697 {
4698 int i;
4699
4700 KASSERT(WM_CORE_LOCKED(sc));
4701
4702 sc->sc_core_stopping = true;
4703
4704 for(i = 0; i < sc->sc_nqueues; i++) {
4705 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
4706 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4707
4708 mutex_enter(rxq->rxq_lock);
4709 rxq->rxq_stopping = true;
4710 mutex_exit(rxq->rxq_lock);
4711
4712 mutex_enter(txq->txq_lock);
4713 txq->txq_stopping = true;
4714 mutex_exit(txq->txq_lock);
4715 }
4716 }
4717
4718 /*
4719 * wm_init: [ifnet interface function]
4720 *
4721 * Initialize the interface.
4722 */
4723 static int
4724 wm_init(struct ifnet *ifp)
4725 {
4726 struct wm_softc *sc = ifp->if_softc;
4727 int ret;
4728
4729 WM_CORE_LOCK(sc);
4730 ret = wm_init_locked(ifp);
4731 WM_CORE_UNLOCK(sc);
4732
4733 return ret;
4734 }
4735
4736 static int
4737 wm_init_locked(struct ifnet *ifp)
4738 {
4739 struct wm_softc *sc = ifp->if_softc;
4740 int i, j, trynum, error = 0;
4741 uint32_t reg;
4742
4743 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4744 device_xname(sc->sc_dev), __func__));
4745 KASSERT(WM_CORE_LOCKED(sc));
4746
4747 /*
4748 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4749 * There is a small but measurable benefit to avoiding the adjusment
4750 * of the descriptor so that the headers are aligned, for normal mtu,
4751 * on such platforms. One possibility is that the DMA itself is
4752 * slightly more efficient if the front of the entire packet (instead
4753 * of the front of the headers) is aligned.
4754 *
4755 * Note we must always set align_tweak to 0 if we are using
4756 * jumbo frames.
4757 */
4758 #ifdef __NO_STRICT_ALIGNMENT
4759 sc->sc_align_tweak = 0;
4760 #else
4761 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4762 sc->sc_align_tweak = 0;
4763 else
4764 sc->sc_align_tweak = 2;
4765 #endif /* __NO_STRICT_ALIGNMENT */
4766
4767 /* Cancel any pending I/O. */
4768 wm_stop_locked(ifp, 0);
4769
4770 /* update statistics before reset */
4771 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4772 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4773
4774 /* PCH_SPT hardware workaround */
4775 if (sc->sc_type == WM_T_PCH_SPT)
4776 wm_flush_desc_rings(sc);
4777
4778 /* Reset the chip to a known state. */
4779 wm_reset(sc);
4780
4781 /* AMT based hardware can now take control from firmware */
4782 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
4783 wm_get_hw_control(sc);
4784
4785 /* Init hardware bits */
4786 wm_initialize_hardware_bits(sc);
4787
4788 /* Reset the PHY. */
4789 if (sc->sc_flags & WM_F_HAS_MII)
4790 wm_gmii_reset(sc);
4791
4792 /* Calculate (E)ITR value */
4793 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4794 sc->sc_itr = 450; /* For EITR */
4795 } else if (sc->sc_type >= WM_T_82543) {
4796 /*
4797 * Set up the interrupt throttling register (units of 256ns)
4798 * Note that a footnote in Intel's documentation says this
4799 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4800 * or 10Mbit mode. Empirically, it appears to be the case
4801 * that that is also true for the 1024ns units of the other
4802 * interrupt-related timer registers -- so, really, we ought
4803 * to divide this value by 4 when the link speed is low.
4804 *
4805 * XXX implement this division at link speed change!
4806 */
4807
4808 /*
4809 * For N interrupts/sec, set this value to:
4810 * 1000000000 / (N * 256). Note that we set the
4811 * absolute and packet timer values to this value
4812 * divided by 4 to get "simple timer" behavior.
4813 */
4814
4815 sc->sc_itr = 1500; /* 2604 ints/sec */
4816 }
4817
4818 error = wm_init_txrx_queues(sc);
4819 if (error)
4820 goto out;
4821
4822 /*
4823 * Clear out the VLAN table -- we don't use it (yet).
4824 */
4825 CSR_WRITE(sc, WMREG_VET, 0);
4826 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4827 trynum = 10; /* Due to hw errata */
4828 else
4829 trynum = 1;
4830 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4831 for (j = 0; j < trynum; j++)
4832 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4833
4834 /*
4835 * Set up flow-control parameters.
4836 *
4837 * XXX Values could probably stand some tuning.
4838 */
4839 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4840 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4841 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
4842 && (sc->sc_type != WM_T_PCH_SPT)) {
4843 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4844 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4845 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4846 }
4847
4848 sc->sc_fcrtl = FCRTL_DFLT;
4849 if (sc->sc_type < WM_T_82543) {
4850 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4851 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4852 } else {
4853 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4854 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4855 }
4856
4857 if (sc->sc_type == WM_T_80003)
4858 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4859 else
4860 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4861
4862 /* Writes the control register. */
4863 wm_set_vlan(sc);
4864
4865 if (sc->sc_flags & WM_F_HAS_MII) {
4866 int val;
4867
4868 switch (sc->sc_type) {
4869 case WM_T_80003:
4870 case WM_T_ICH8:
4871 case WM_T_ICH9:
4872 case WM_T_ICH10:
4873 case WM_T_PCH:
4874 case WM_T_PCH2:
4875 case WM_T_PCH_LPT:
4876 case WM_T_PCH_SPT:
4877 /*
4878 * Set the mac to wait the maximum time between each
4879 * iteration and increase the max iterations when
4880 * polling the phy; this fixes erroneous timeouts at
4881 * 10Mbps.
4882 */
4883 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4884 0xFFFF);
4885 val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
4886 val |= 0x3F;
4887 wm_kmrn_writereg(sc,
4888 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4889 break;
4890 default:
4891 break;
4892 }
4893
4894 if (sc->sc_type == WM_T_80003) {
4895 val = CSR_READ(sc, WMREG_CTRL_EXT);
4896 val &= ~CTRL_EXT_LINK_MODE_MASK;
4897 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4898
4899 /* Bypass RX and TX FIFO's */
4900 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4901 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4902 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4903 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4904 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4905 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4906 }
4907 }
4908 #if 0
4909 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4910 #endif
4911
4912 /* Set up checksum offload parameters. */
4913 reg = CSR_READ(sc, WMREG_RXCSUM);
4914 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4915 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4916 reg |= RXCSUM_IPOFL;
4917 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4918 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4919 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4920 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4921 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4922
4923 /* Set up MSI-X */
4924 if (sc->sc_nintrs > 1) {
4925 uint32_t ivar;
4926 struct wm_queue *wmq;
4927 int qid, qintr_idx;
4928
4929 if (sc->sc_type == WM_T_82575) {
4930 /* Interrupt control */
4931 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4932 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
4933 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4934
4935 /* TX and RX */
4936 for (i = 0; i < sc->sc_nqueues; i++) {
4937 wmq = &sc->sc_queue[i];
4938 CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
4939 EITR_TX_QUEUE(wmq->wmq_id)
4940 | EITR_RX_QUEUE(wmq->wmq_id));
4941 }
4942 /* Link status */
4943 CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
4944 EITR_OTHER);
4945 } else if (sc->sc_type == WM_T_82574) {
4946 /* Interrupt control */
4947 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4948 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
4949 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4950
4951 ivar = 0;
4952 /* TX and RX */
4953 for (i = 0; i < sc->sc_nqueues; i++) {
4954 wmq = &sc->sc_queue[i];
4955 qid = wmq->wmq_id;
4956 qintr_idx = wmq->wmq_intr_idx;
4957
4958 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
4959 IVAR_TX_MASK_Q_82574(qid));
4960 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
4961 IVAR_RX_MASK_Q_82574(qid));
4962 }
4963 /* Link status */
4964 ivar |= __SHIFTIN((IVAR_VALID_82574
4965 | sc->sc_link_intr_idx), IVAR_OTHER_MASK);
4966 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
4967 } else {
4968 /* Interrupt control */
4969 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
4970 | GPIE_EIAME | GPIE_PBA);
4971
4972 switch (sc->sc_type) {
4973 case WM_T_82580:
4974 case WM_T_I350:
4975 case WM_T_I354:
4976 case WM_T_I210:
4977 case WM_T_I211:
4978 /* TX and RX */
4979 for (i = 0; i < sc->sc_nqueues; i++) {
4980 wmq = &sc->sc_queue[i];
4981 qid = wmq->wmq_id;
4982 qintr_idx = wmq->wmq_intr_idx;
4983
4984 ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
4985 ivar &= ~IVAR_TX_MASK_Q(qid);
4986 ivar |= __SHIFTIN((qintr_idx
4987 | IVAR_VALID),
4988 IVAR_TX_MASK_Q(qid));
4989 ivar &= ~IVAR_RX_MASK_Q(qid);
4990 ivar |= __SHIFTIN((qintr_idx
4991 | IVAR_VALID),
4992 IVAR_RX_MASK_Q(qid));
4993 CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
4994 }
4995 break;
4996 case WM_T_82576:
4997 /* TX and RX */
4998 for (i = 0; i < sc->sc_nqueues; i++) {
4999 wmq = &sc->sc_queue[i];
5000 qid = wmq->wmq_id;
5001 qintr_idx = wmq->wmq_intr_idx;
5002
5003 ivar = CSR_READ(sc,
5004 WMREG_IVAR_Q_82576(qid));
5005 ivar &= ~IVAR_TX_MASK_Q_82576(qid);
5006 ivar |= __SHIFTIN((qintr_idx
5007 | IVAR_VALID),
5008 IVAR_TX_MASK_Q_82576(qid));
5009 ivar &= ~IVAR_RX_MASK_Q_82576(qid);
5010 ivar |= __SHIFTIN((qintr_idx
5011 | IVAR_VALID),
5012 IVAR_RX_MASK_Q_82576(qid));
5013 CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
5014 ivar);
5015 }
5016 break;
5017 default:
5018 break;
5019 }
5020
5021 /* Link status */
5022 ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
5023 IVAR_MISC_OTHER);
5024 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
5025 }
5026
5027 if (sc->sc_nqueues > 1) {
5028 wm_init_rss(sc);
5029
5030 /*
5031 ** NOTE: Receive Full-Packet Checksum Offload
5032 ** is mutually exclusive with Multiqueue. However
5033 ** this is not the same as TCP/IP checksums which
5034 ** still work.
5035 */
5036 reg = CSR_READ(sc, WMREG_RXCSUM);
5037 reg |= RXCSUM_PCSD;
5038 CSR_WRITE(sc, WMREG_RXCSUM, reg);
5039 }
5040 }
5041
5042 /* Set up the interrupt registers. */
5043 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5044 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
5045 ICR_RXO | ICR_RXT0;
5046 if (sc->sc_nintrs > 1) {
5047 uint32_t mask;
5048 struct wm_queue *wmq;
5049
5050 switch (sc->sc_type) {
5051 case WM_T_82574:
5052 CSR_WRITE(sc, WMREG_EIAC_82574,
5053 WMREG_EIAC_82574_MSIX_MASK);
5054 sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
5055 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
5056 break;
5057 default:
5058 if (sc->sc_type == WM_T_82575) {
5059 mask = 0;
5060 for (i = 0; i < sc->sc_nqueues; i++) {
5061 wmq = &sc->sc_queue[i];
5062 mask |= EITR_TX_QUEUE(wmq->wmq_id);
5063 mask |= EITR_RX_QUEUE(wmq->wmq_id);
5064 }
5065 mask |= EITR_OTHER;
5066 } else {
5067 mask = 0;
5068 for (i = 0; i < sc->sc_nqueues; i++) {
5069 wmq = &sc->sc_queue[i];
5070 mask |= 1 << wmq->wmq_intr_idx;
5071 }
5072 mask |= 1 << sc->sc_link_intr_idx;
5073 }
5074 CSR_WRITE(sc, WMREG_EIAC, mask);
5075 CSR_WRITE(sc, WMREG_EIAM, mask);
5076 CSR_WRITE(sc, WMREG_EIMS, mask);
5077 CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
5078 break;
5079 }
5080 } else
5081 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
5082
5083 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5084 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5085 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
5086 || (sc->sc_type == WM_T_PCH_SPT)) {
5087 reg = CSR_READ(sc, WMREG_KABGTXD);
5088 reg |= KABGTXD_BGSQLBIAS;
5089 CSR_WRITE(sc, WMREG_KABGTXD, reg);
5090 }
5091
5092 /* Set up the inter-packet gap. */
5093 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
5094
5095 if (sc->sc_type >= WM_T_82543) {
5096 /*
5097 * XXX 82574 has both ITR and EITR. SET EITR when we use
5098 * the multi queue function with MSI-X.
5099 */
5100 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5101 int qidx;
5102 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5103 struct wm_queue *wmq = &sc->sc_queue[qidx];
5104 CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
5105 sc->sc_itr);
5106 }
5107 /*
5108 * Link interrupts occur much less than TX
5109 * interrupts and RX interrupts. So, we don't
5110 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
5111 * FreeBSD's if_igb.
5112 */
5113 } else
5114 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
5115 }
5116
5117 /* Set the VLAN ethernetype. */
5118 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
5119
5120 /*
5121 * Set up the transmit control register; we start out with
5122 * a collision distance suitable for FDX, but update it whe
5123 * we resolve the media type.
5124 */
5125 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
5126 | TCTL_CT(TX_COLLISION_THRESHOLD)
5127 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5128 if (sc->sc_type >= WM_T_82571)
5129 sc->sc_tctl |= TCTL_MULR;
5130 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5131
5132 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5133 /* Write TDT after TCTL.EN is set. See the document. */
5134 CSR_WRITE(sc, WMREG_TDT(0), 0);
5135 }
5136
5137 if (sc->sc_type == WM_T_80003) {
5138 reg = CSR_READ(sc, WMREG_TCTL_EXT);
5139 reg &= ~TCTL_EXT_GCEX_MASK;
5140 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
5141 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
5142 }
5143
5144 /* Set the media. */
5145 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
5146 goto out;
5147
5148 /* Configure for OS presence */
5149 wm_init_manageability(sc);
5150
5151 /*
5152 * Set up the receive control register; we actually program
5153 * the register when we set the receive filter. Use multicast
5154 * address offset type 0.
5155 *
5156 * Only the i82544 has the ability to strip the incoming
5157 * CRC, so we don't enable that feature.
5158 */
5159 sc->sc_mchash_type = 0;
5160 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
5161 | RCTL_MO(sc->sc_mchash_type);
5162
5163 /*
5164 * The I350 has a bug where it always strips the CRC whether
5165 * asked to or not. So ask for stripped CRC here and cope in rxeof
5166 */
5167 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
5168 || (sc->sc_type == WM_T_I210))
5169 sc->sc_rctl |= RCTL_SECRC;
5170
5171 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
5172 && (ifp->if_mtu > ETHERMTU)) {
5173 sc->sc_rctl |= RCTL_LPE;
5174 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5175 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
5176 }
5177
5178 if (MCLBYTES == 2048) {
5179 sc->sc_rctl |= RCTL_2k;
5180 } else {
5181 if (sc->sc_type >= WM_T_82543) {
5182 switch (MCLBYTES) {
5183 case 4096:
5184 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
5185 break;
5186 case 8192:
5187 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
5188 break;
5189 case 16384:
5190 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
5191 break;
5192 default:
5193 panic("wm_init: MCLBYTES %d unsupported",
5194 MCLBYTES);
5195 break;
5196 }
5197 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
5198 }
5199
5200 /* Set the receive filter. */
5201 wm_set_filter(sc);
5202
5203 /* Enable ECC */
5204 switch (sc->sc_type) {
5205 case WM_T_82571:
5206 reg = CSR_READ(sc, WMREG_PBA_ECC);
5207 reg |= PBA_ECC_CORR_EN;
5208 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
5209 break;
5210 case WM_T_PCH_LPT:
5211 case WM_T_PCH_SPT:
5212 reg = CSR_READ(sc, WMREG_PBECCSTS);
5213 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
5214 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
5215
5216 sc->sc_ctrl |= CTRL_MEHE;
5217 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5218 break;
5219 default:
5220 break;
5221 }
5222
5223 /* On 575 and later set RDT only if RX enabled */
5224 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5225 int qidx;
5226 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5227 struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
5228 for (i = 0; i < WM_NRXDESC; i++) {
5229 mutex_enter(rxq->rxq_lock);
5230 wm_init_rxdesc(rxq, i);
5231 mutex_exit(rxq->rxq_lock);
5232
5233 }
5234 }
5235 }
5236
5237 wm_turnon(sc);
5238
5239 /* Start the one second link check clock. */
5240 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
5241
5242 /* ...all done! */
5243 ifp->if_flags |= IFF_RUNNING;
5244 ifp->if_flags &= ~IFF_OACTIVE;
5245
5246 out:
5247 sc->sc_if_flags = ifp->if_flags;
5248 if (error)
5249 log(LOG_ERR, "%s: interface not running\n",
5250 device_xname(sc->sc_dev));
5251 return error;
5252 }
5253
5254 /*
5255 * wm_stop: [ifnet interface function]
5256 *
5257 * Stop transmission on the interface.
5258 */
5259 static void
5260 wm_stop(struct ifnet *ifp, int disable)
5261 {
5262 struct wm_softc *sc = ifp->if_softc;
5263
5264 WM_CORE_LOCK(sc);
5265 wm_stop_locked(ifp, disable);
5266 WM_CORE_UNLOCK(sc);
5267 }
5268
5269 static void
5270 wm_stop_locked(struct ifnet *ifp, int disable)
5271 {
5272 struct wm_softc *sc = ifp->if_softc;
5273 struct wm_txsoft *txs;
5274 int i, qidx;
5275
5276 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5277 device_xname(sc->sc_dev), __func__));
5278 KASSERT(WM_CORE_LOCKED(sc));
5279
5280 wm_turnoff(sc);
5281
5282 /* Stop the one second clock. */
5283 callout_stop(&sc->sc_tick_ch);
5284
5285 /* Stop the 82547 Tx FIFO stall check timer. */
5286 if (sc->sc_type == WM_T_82547)
5287 callout_stop(&sc->sc_txfifo_ch);
5288
5289 if (sc->sc_flags & WM_F_HAS_MII) {
5290 /* Down the MII. */
5291 mii_down(&sc->sc_mii);
5292 } else {
5293 #if 0
5294 /* Should we clear PHY's status properly? */
5295 wm_reset(sc);
5296 #endif
5297 }
5298
5299 /* Stop the transmit and receive processes. */
5300 CSR_WRITE(sc, WMREG_TCTL, 0);
5301 CSR_WRITE(sc, WMREG_RCTL, 0);
5302 sc->sc_rctl &= ~RCTL_EN;
5303
5304 /*
5305 * Clear the interrupt mask to ensure the device cannot assert its
5306 * interrupt line.
5307 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
5308 * service any currently pending or shared interrupt.
5309 */
5310 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5311 sc->sc_icr = 0;
5312 if (sc->sc_nintrs > 1) {
5313 if (sc->sc_type != WM_T_82574) {
5314 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5315 CSR_WRITE(sc, WMREG_EIAC, 0);
5316 } else
5317 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5318 }
5319
5320 /* Release any queued transmit buffers. */
5321 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5322 struct wm_queue *wmq = &sc->sc_queue[qidx];
5323 struct wm_txqueue *txq = &wmq->wmq_txq;
5324 mutex_enter(txq->txq_lock);
5325 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5326 txs = &txq->txq_soft[i];
5327 if (txs->txs_mbuf != NULL) {
5328 bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
5329 m_freem(txs->txs_mbuf);
5330 txs->txs_mbuf = NULL;
5331 }
5332 }
5333 mutex_exit(txq->txq_lock);
5334 }
5335
5336 /* Mark the interface as down and cancel the watchdog timer. */
5337 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
5338 ifp->if_timer = 0;
5339
5340 if (disable) {
5341 for (i = 0; i < sc->sc_nqueues; i++) {
5342 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5343 mutex_enter(rxq->rxq_lock);
5344 wm_rxdrain(rxq);
5345 mutex_exit(rxq->rxq_lock);
5346 }
5347 }
5348
5349 #if 0 /* notyet */
5350 if (sc->sc_type >= WM_T_82544)
5351 CSR_WRITE(sc, WMREG_WUC, 0);
5352 #endif
5353 }
5354
5355 static void
5356 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
5357 {
5358 struct mbuf *m;
5359 int i;
5360
5361 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
5362 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
5363 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
5364 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
5365 m->m_data, m->m_len, m->m_flags);
5366 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
5367 i, i == 1 ? "" : "s");
5368 }
5369
5370 /*
5371 * wm_82547_txfifo_stall:
5372 *
5373 * Callout used to wait for the 82547 Tx FIFO to drain,
5374 * reset the FIFO pointers, and restart packet transmission.
5375 */
5376 static void
5377 wm_82547_txfifo_stall(void *arg)
5378 {
5379 struct wm_softc *sc = arg;
5380 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
5381
5382 mutex_enter(txq->txq_lock);
5383
5384 if (txq->txq_stopping)
5385 goto out;
5386
5387 if (txq->txq_fifo_stall) {
5388 if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
5389 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
5390 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
5391 /*
5392 * Packets have drained. Stop transmitter, reset
5393 * FIFO pointers, restart transmitter, and kick
5394 * the packet queue.
5395 */
5396 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
5397 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
5398 CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
5399 CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
5400 CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
5401 CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
5402 CSR_WRITE(sc, WMREG_TCTL, tctl);
5403 CSR_WRITE_FLUSH(sc);
5404
5405 txq->txq_fifo_head = 0;
5406 txq->txq_fifo_stall = 0;
5407 wm_start_locked(&sc->sc_ethercom.ec_if);
5408 } else {
5409 /*
5410 * Still waiting for packets to drain; try again in
5411 * another tick.
5412 */
5413 callout_schedule(&sc->sc_txfifo_ch, 1);
5414 }
5415 }
5416
5417 out:
5418 mutex_exit(txq->txq_lock);
5419 }
5420
5421 /*
5422 * wm_82547_txfifo_bugchk:
5423 *
5424 * Check for bug condition in the 82547 Tx FIFO. We need to
5425 * prevent enqueueing a packet that would wrap around the end
5426 * if the Tx FIFO ring buffer, otherwise the chip will croak.
5427 *
5428 * We do this by checking the amount of space before the end
5429 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
5430 * the Tx FIFO, wait for all remaining packets to drain, reset
5431 * the internal FIFO pointers to the beginning, and restart
5432 * transmission on the interface.
5433 */
5434 #define WM_FIFO_HDR 0x10
5435 #define WM_82547_PAD_LEN 0x3e0
5436 static int
5437 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
5438 {
5439 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
5440 int space = txq->txq_fifo_size - txq->txq_fifo_head;
5441 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
5442
5443 /* Just return if already stalled. */
5444 if (txq->txq_fifo_stall)
5445 return 1;
5446
5447 if (sc->sc_mii.mii_media_active & IFM_FDX) {
5448 /* Stall only occurs in half-duplex mode. */
5449 goto send_packet;
5450 }
5451
5452 if (len >= WM_82547_PAD_LEN + space) {
5453 txq->txq_fifo_stall = 1;
5454 callout_schedule(&sc->sc_txfifo_ch, 1);
5455 return 1;
5456 }
5457
5458 send_packet:
5459 txq->txq_fifo_head += len;
5460 if (txq->txq_fifo_head >= txq->txq_fifo_size)
5461 txq->txq_fifo_head -= txq->txq_fifo_size;
5462
5463 return 0;
5464 }
5465
5466 static int
5467 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5468 {
5469 int error;
5470
5471 /*
5472 * Allocate the control data structures, and create and load the
5473 * DMA map for it.
5474 *
5475 * NOTE: All Tx descriptors must be in the same 4G segment of
5476 * memory. So must Rx descriptors. We simplify by allocating
5477 * both sets within the same 4G segment.
5478 */
5479 if (sc->sc_type < WM_T_82544)
5480 WM_NTXDESC(txq) = WM_NTXDESC_82542;
5481 else
5482 WM_NTXDESC(txq) = WM_NTXDESC_82544;
5483 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5484 txq->txq_descsize = sizeof(nq_txdesc_t);
5485 else
5486 txq->txq_descsize = sizeof(wiseman_txdesc_t);
5487
5488 if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
5489 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
5490 1, &txq->txq_desc_rseg, 0)) != 0) {
5491 aprint_error_dev(sc->sc_dev,
5492 "unable to allocate TX control data, error = %d\n",
5493 error);
5494 goto fail_0;
5495 }
5496
5497 if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
5498 txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
5499 (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
5500 aprint_error_dev(sc->sc_dev,
5501 "unable to map TX control data, error = %d\n", error);
5502 goto fail_1;
5503 }
5504
5505 if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
5506 WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
5507 aprint_error_dev(sc->sc_dev,
5508 "unable to create TX control data DMA map, error = %d\n",
5509 error);
5510 goto fail_2;
5511 }
5512
5513 if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
5514 txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
5515 aprint_error_dev(sc->sc_dev,
5516 "unable to load TX control data DMA map, error = %d\n",
5517 error);
5518 goto fail_3;
5519 }
5520
5521 return 0;
5522
5523 fail_3:
5524 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5525 fail_2:
5526 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5527 WM_TXDESCS_SIZE(txq));
5528 fail_1:
5529 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5530 fail_0:
5531 return error;
5532 }
5533
5534 static void
5535 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5536 {
5537
5538 bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
5539 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5540 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5541 WM_TXDESCS_SIZE(txq));
5542 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5543 }
5544
5545 static int
5546 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5547 {
5548 int error;
5549
5550 /*
5551 * Allocate the control data structures, and create and load the
5552 * DMA map for it.
5553 *
5554 * NOTE: All Tx descriptors must be in the same 4G segment of
5555 * memory. So must Rx descriptors. We simplify by allocating
5556 * both sets within the same 4G segment.
5557 */
5558 rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
5559 if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
5560 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
5561 1, &rxq->rxq_desc_rseg, 0)) != 0) {
5562 aprint_error_dev(sc->sc_dev,
5563 "unable to allocate RX control data, error = %d\n",
5564 error);
5565 goto fail_0;
5566 }
5567
5568 if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
5569 rxq->rxq_desc_rseg, rxq->rxq_desc_size,
5570 (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
5571 aprint_error_dev(sc->sc_dev,
5572 "unable to map RX control data, error = %d\n", error);
5573 goto fail_1;
5574 }
5575
5576 if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
5577 rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
5578 aprint_error_dev(sc->sc_dev,
5579 "unable to create RX control data DMA map, error = %d\n",
5580 error);
5581 goto fail_2;
5582 }
5583
5584 if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
5585 rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
5586 aprint_error_dev(sc->sc_dev,
5587 "unable to load RX control data DMA map, error = %d\n",
5588 error);
5589 goto fail_3;
5590 }
5591
5592 return 0;
5593
5594 fail_3:
5595 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5596 fail_2:
5597 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5598 rxq->rxq_desc_size);
5599 fail_1:
5600 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5601 fail_0:
5602 return error;
5603 }
5604
5605 static void
5606 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5607 {
5608
5609 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
5610 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5611 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5612 rxq->rxq_desc_size);
5613 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5614 }
5615
5616
5617 static int
5618 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5619 {
5620 int i, error;
5621
5622 /* Create the transmit buffer DMA maps. */
5623 WM_TXQUEUELEN(txq) =
5624 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
5625 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
5626 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5627 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
5628 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
5629 &txq->txq_soft[i].txs_dmamap)) != 0) {
5630 aprint_error_dev(sc->sc_dev,
5631 "unable to create Tx DMA map %d, error = %d\n",
5632 i, error);
5633 goto fail;
5634 }
5635 }
5636
5637 return 0;
5638
5639 fail:
5640 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5641 if (txq->txq_soft[i].txs_dmamap != NULL)
5642 bus_dmamap_destroy(sc->sc_dmat,
5643 txq->txq_soft[i].txs_dmamap);
5644 }
5645 return error;
5646 }
5647
5648 static void
5649 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5650 {
5651 int i;
5652
5653 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5654 if (txq->txq_soft[i].txs_dmamap != NULL)
5655 bus_dmamap_destroy(sc->sc_dmat,
5656 txq->txq_soft[i].txs_dmamap);
5657 }
5658 }
5659
5660 static int
5661 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5662 {
5663 int i, error;
5664
5665 /* Create the receive buffer DMA maps. */
5666 for (i = 0; i < WM_NRXDESC; i++) {
5667 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
5668 MCLBYTES, 0, 0,
5669 &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
5670 aprint_error_dev(sc->sc_dev,
5671 "unable to create Rx DMA map %d error = %d\n",
5672 i, error);
5673 goto fail;
5674 }
5675 rxq->rxq_soft[i].rxs_mbuf = NULL;
5676 }
5677
5678 return 0;
5679
5680 fail:
5681 for (i = 0; i < WM_NRXDESC; i++) {
5682 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5683 bus_dmamap_destroy(sc->sc_dmat,
5684 rxq->rxq_soft[i].rxs_dmamap);
5685 }
5686 return error;
5687 }
5688
5689 static void
5690 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5691 {
5692 int i;
5693
5694 for (i = 0; i < WM_NRXDESC; i++) {
5695 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5696 bus_dmamap_destroy(sc->sc_dmat,
5697 rxq->rxq_soft[i].rxs_dmamap);
5698 }
5699 }
5700
5701 /*
5702 * wm_alloc_quques:
5703 * Allocate {tx,rx}descs and {tx,rx} buffers
5704 */
5705 static int
5706 wm_alloc_txrx_queues(struct wm_softc *sc)
5707 {
5708 int i, error, tx_done, rx_done;
5709
5710 sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
5711 KM_SLEEP);
5712 if (sc->sc_queue == NULL) {
5713 aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
5714 error = ENOMEM;
5715 goto fail_0;
5716 }
5717
5718 /*
5719 * For transmission
5720 */
5721 error = 0;
5722 tx_done = 0;
5723 for (i = 0; i < sc->sc_nqueues; i++) {
5724 #ifdef WM_EVENT_COUNTERS
5725 int j;
5726 const char *xname;
5727 #endif
5728 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5729 txq->txq_sc = sc;
5730 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5731
5732 error = wm_alloc_tx_descs(sc, txq);
5733 if (error)
5734 break;
5735 error = wm_alloc_tx_buffer(sc, txq);
5736 if (error) {
5737 wm_free_tx_descs(sc, txq);
5738 break;
5739 }
5740 txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
5741 if (txq->txq_interq == NULL) {
5742 wm_free_tx_descs(sc, txq);
5743 wm_free_tx_buffer(sc, txq);
5744 error = ENOMEM;
5745 break;
5746 }
5747
5748 #ifdef WM_EVENT_COUNTERS
5749 xname = device_xname(sc->sc_dev);
5750
5751 WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
5752 WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
5753 WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
5754 WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
5755 WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
5756
5757 WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
5758 WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
5759 WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
5760 WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
5761 WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
5762 WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
5763
5764 for (j = 0; j < WM_NTXSEGS; j++) {
5765 snprintf(txq->txq_txseg_evcnt_names[j],
5766 sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
5767 evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
5768 NULL, xname, txq->txq_txseg_evcnt_names[j]);
5769 }
5770
5771 WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
5772
5773 WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
5774 #endif /* WM_EVENT_COUNTERS */
5775
5776 tx_done++;
5777 }
5778 if (error)
5779 goto fail_1;
5780
5781 /*
5782 * For recieve
5783 */
5784 error = 0;
5785 rx_done = 0;
5786 for (i = 0; i < sc->sc_nqueues; i++) {
5787 #ifdef WM_EVENT_COUNTERS
5788 const char *xname;
5789 #endif
5790 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5791 rxq->rxq_sc = sc;
5792 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5793
5794 error = wm_alloc_rx_descs(sc, rxq);
5795 if (error)
5796 break;
5797
5798 error = wm_alloc_rx_buffer(sc, rxq);
5799 if (error) {
5800 wm_free_rx_descs(sc, rxq);
5801 break;
5802 }
5803
5804 #ifdef WM_EVENT_COUNTERS
5805 xname = device_xname(sc->sc_dev);
5806
5807 WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
5808
5809 WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
5810 WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
5811 #endif /* WM_EVENT_COUNTERS */
5812
5813 rx_done++;
5814 }
5815 if (error)
5816 goto fail_2;
5817
5818 return 0;
5819
5820 fail_2:
5821 for (i = 0; i < rx_done; i++) {
5822 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5823 wm_free_rx_buffer(sc, rxq);
5824 wm_free_rx_descs(sc, rxq);
5825 if (rxq->rxq_lock)
5826 mutex_obj_free(rxq->rxq_lock);
5827 }
5828 fail_1:
5829 for (i = 0; i < tx_done; i++) {
5830 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5831 pcq_destroy(txq->txq_interq);
5832 wm_free_tx_buffer(sc, txq);
5833 wm_free_tx_descs(sc, txq);
5834 if (txq->txq_lock)
5835 mutex_obj_free(txq->txq_lock);
5836 }
5837
5838 kmem_free(sc->sc_queue,
5839 sizeof(struct wm_queue) * sc->sc_nqueues);
5840 fail_0:
5841 return error;
5842 }
5843
5844 /*
5845 * wm_free_quques:
5846 * Free {tx,rx}descs and {tx,rx} buffers
5847 */
5848 static void
5849 wm_free_txrx_queues(struct wm_softc *sc)
5850 {
5851 int i;
5852
5853 for (i = 0; i < sc->sc_nqueues; i++) {
5854 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5855 wm_free_rx_buffer(sc, rxq);
5856 wm_free_rx_descs(sc, rxq);
5857 if (rxq->rxq_lock)
5858 mutex_obj_free(rxq->rxq_lock);
5859 }
5860
5861 for (i = 0; i < sc->sc_nqueues; i++) {
5862 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5863 wm_free_tx_buffer(sc, txq);
5864 wm_free_tx_descs(sc, txq);
5865 if (txq->txq_lock)
5866 mutex_obj_free(txq->txq_lock);
5867 }
5868
5869 kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
5870 }
5871
5872 static void
5873 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5874 {
5875
5876 KASSERT(mutex_owned(txq->txq_lock));
5877
5878 /* Initialize the transmit descriptor ring. */
5879 memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
5880 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
5881 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5882 txq->txq_free = WM_NTXDESC(txq);
5883 txq->txq_next = 0;
5884 }
5885
5886 static void
5887 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
5888 struct wm_txqueue *txq)
5889 {
5890
5891 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5892 device_xname(sc->sc_dev), __func__));
5893 KASSERT(mutex_owned(txq->txq_lock));
5894
5895 if (sc->sc_type < WM_T_82543) {
5896 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
5897 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
5898 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
5899 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
5900 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
5901 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
5902 } else {
5903 int qid = wmq->wmq_id;
5904
5905 CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
5906 CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
5907 CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
5908 CSR_WRITE(sc, WMREG_TDH(qid), 0);
5909
5910 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5911 /*
5912 * Don't write TDT before TCTL.EN is set.
5913 * See the document.
5914 */
5915 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
5916 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
5917 | TXDCTL_WTHRESH(0));
5918 else {
5919 /* ITR / 4 */
5920 CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
5921 if (sc->sc_type >= WM_T_82540) {
5922 /* should be same */
5923 CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
5924 }
5925
5926 CSR_WRITE(sc, WMREG_TDT(qid), 0);
5927 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
5928 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
5929 }
5930 }
5931 }
5932
5933 static void
5934 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5935 {
5936 int i;
5937
5938 KASSERT(mutex_owned(txq->txq_lock));
5939
5940 /* Initialize the transmit job descriptors. */
5941 for (i = 0; i < WM_TXQUEUELEN(txq); i++)
5942 txq->txq_soft[i].txs_mbuf = NULL;
5943 txq->txq_sfree = WM_TXQUEUELEN(txq);
5944 txq->txq_snext = 0;
5945 txq->txq_sdirty = 0;
5946 }
5947
5948 static void
5949 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
5950 struct wm_txqueue *txq)
5951 {
5952
5953 KASSERT(mutex_owned(txq->txq_lock));
5954
5955 /*
5956 * Set up some register offsets that are different between
5957 * the i82542 and the i82543 and later chips.
5958 */
5959 if (sc->sc_type < WM_T_82543)
5960 txq->txq_tdt_reg = WMREG_OLD_TDT;
5961 else
5962 txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
5963
5964 wm_init_tx_descs(sc, txq);
5965 wm_init_tx_regs(sc, wmq, txq);
5966 wm_init_tx_buffer(sc, txq);
5967 }
5968
5969 static void
5970 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
5971 struct wm_rxqueue *rxq)
5972 {
5973
5974 KASSERT(mutex_owned(rxq->rxq_lock));
5975
5976 /*
5977 * Initialize the receive descriptor and receive job
5978 * descriptor rings.
5979 */
5980 if (sc->sc_type < WM_T_82543) {
5981 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
5982 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
5983 CSR_WRITE(sc, WMREG_OLD_RDLEN0,
5984 sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
5985 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
5986 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
5987 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
5988
5989 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
5990 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
5991 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
5992 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
5993 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
5994 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
5995 } else {
5996 int qid = wmq->wmq_id;
5997
5998 CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
5999 CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
6000 CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
6001
6002 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6003 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
6004 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
6005 CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
6006 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
6007 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
6008 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
6009 | RXDCTL_WTHRESH(1));
6010 CSR_WRITE(sc, WMREG_RDH(qid), 0);
6011 CSR_WRITE(sc, WMREG_RDT(qid), 0);
6012 } else {
6013 CSR_WRITE(sc, WMREG_RDH(qid), 0);
6014 CSR_WRITE(sc, WMREG_RDT(qid), 0);
6015 /* ITR / 4 */
6016 CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
6017 /* MUST be same */
6018 CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
6019 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
6020 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
6021 }
6022 }
6023 }
6024
6025 static int
6026 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
6027 {
6028 struct wm_rxsoft *rxs;
6029 int error, i;
6030
6031 KASSERT(mutex_owned(rxq->rxq_lock));
6032
6033 for (i = 0; i < WM_NRXDESC; i++) {
6034 rxs = &rxq->rxq_soft[i];
6035 if (rxs->rxs_mbuf == NULL) {
6036 if ((error = wm_add_rxbuf(rxq, i)) != 0) {
6037 log(LOG_ERR, "%s: unable to allocate or map "
6038 "rx buffer %d, error = %d\n",
6039 device_xname(sc->sc_dev), i, error);
6040 /*
6041 * XXX Should attempt to run with fewer receive
6042 * XXX buffers instead of just failing.
6043 */
6044 wm_rxdrain(rxq);
6045 return ENOMEM;
6046 }
6047 } else {
6048 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
6049 wm_init_rxdesc(rxq, i);
6050 /*
6051 * For 82575 and newer device, the RX descriptors
6052 * must be initialized after the setting of RCTL.EN in
6053 * wm_set_filter()
6054 */
6055 }
6056 }
6057 rxq->rxq_ptr = 0;
6058 rxq->rxq_discard = 0;
6059 WM_RXCHAIN_RESET(rxq);
6060
6061 return 0;
6062 }
6063
6064 static int
6065 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
6066 struct wm_rxqueue *rxq)
6067 {
6068
6069 KASSERT(mutex_owned(rxq->rxq_lock));
6070
6071 /*
6072 * Set up some register offsets that are different between
6073 * the i82542 and the i82543 and later chips.
6074 */
6075 if (sc->sc_type < WM_T_82543)
6076 rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
6077 else
6078 rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
6079
6080 wm_init_rx_regs(sc, wmq, rxq);
6081 return wm_init_rx_buffer(sc, rxq);
6082 }
6083
6084 /*
6085 * wm_init_quques:
6086 * Initialize {tx,rx}descs and {tx,rx} buffers
6087 */
6088 static int
6089 wm_init_txrx_queues(struct wm_softc *sc)
6090 {
6091 int i, error = 0;
6092
6093 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
6094 device_xname(sc->sc_dev), __func__));
6095
6096 for (i = 0; i < sc->sc_nqueues; i++) {
6097 struct wm_queue *wmq = &sc->sc_queue[i];
6098 struct wm_txqueue *txq = &wmq->wmq_txq;
6099 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6100
6101 mutex_enter(txq->txq_lock);
6102 wm_init_tx_queue(sc, wmq, txq);
6103 mutex_exit(txq->txq_lock);
6104
6105 mutex_enter(rxq->rxq_lock);
6106 error = wm_init_rx_queue(sc, wmq, rxq);
6107 mutex_exit(rxq->rxq_lock);
6108 if (error)
6109 break;
6110 }
6111
6112 return error;
6113 }
6114
6115 /*
6116 * wm_tx_offload:
6117 *
6118 * Set up TCP/IP checksumming parameters for the
6119 * specified packet.
6120 */
6121 static int
6122 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
6123 uint8_t *fieldsp)
6124 {
6125 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6126 struct mbuf *m0 = txs->txs_mbuf;
6127 struct livengood_tcpip_ctxdesc *t;
6128 uint32_t ipcs, tucs, cmd, cmdlen, seg;
6129 uint32_t ipcse;
6130 struct ether_header *eh;
6131 int offset, iphl;
6132 uint8_t fields;
6133
6134 /*
6135 * XXX It would be nice if the mbuf pkthdr had offset
6136 * fields for the protocol headers.
6137 */
6138
6139 eh = mtod(m0, struct ether_header *);
6140 switch (htons(eh->ether_type)) {
6141 case ETHERTYPE_IP:
6142 case ETHERTYPE_IPV6:
6143 offset = ETHER_HDR_LEN;
6144 break;
6145
6146 case ETHERTYPE_VLAN:
6147 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6148 break;
6149
6150 default:
6151 /*
6152 * Don't support this protocol or encapsulation.
6153 */
6154 *fieldsp = 0;
6155 *cmdp = 0;
6156 return 0;
6157 }
6158
6159 if ((m0->m_pkthdr.csum_flags &
6160 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
6161 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6162 } else {
6163 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
6164 }
6165 ipcse = offset + iphl - 1;
6166
6167 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
6168 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
6169 seg = 0;
6170 fields = 0;
6171
6172 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6173 int hlen = offset + iphl;
6174 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6175
6176 if (__predict_false(m0->m_len <
6177 (hlen + sizeof(struct tcphdr)))) {
6178 /*
6179 * TCP/IP headers are not in the first mbuf; we need
6180 * to do this the slow and painful way. Let's just
6181 * hope this doesn't happen very often.
6182 */
6183 struct tcphdr th;
6184
6185 WM_Q_EVCNT_INCR(txq, txtsopain);
6186
6187 m_copydata(m0, hlen, sizeof(th), &th);
6188 if (v4) {
6189 struct ip ip;
6190
6191 m_copydata(m0, offset, sizeof(ip), &ip);
6192 ip.ip_len = 0;
6193 m_copyback(m0,
6194 offset + offsetof(struct ip, ip_len),
6195 sizeof(ip.ip_len), &ip.ip_len);
6196 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6197 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6198 } else {
6199 struct ip6_hdr ip6;
6200
6201 m_copydata(m0, offset, sizeof(ip6), &ip6);
6202 ip6.ip6_plen = 0;
6203 m_copyback(m0,
6204 offset + offsetof(struct ip6_hdr, ip6_plen),
6205 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6206 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6207 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6208 }
6209 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6210 sizeof(th.th_sum), &th.th_sum);
6211
6212 hlen += th.th_off << 2;
6213 } else {
6214 /*
6215 * TCP/IP headers are in the first mbuf; we can do
6216 * this the easy way.
6217 */
6218 struct tcphdr *th;
6219
6220 if (v4) {
6221 struct ip *ip =
6222 (void *)(mtod(m0, char *) + offset);
6223 th = (void *)(mtod(m0, char *) + hlen);
6224
6225 ip->ip_len = 0;
6226 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6227 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6228 } else {
6229 struct ip6_hdr *ip6 =
6230 (void *)(mtod(m0, char *) + offset);
6231 th = (void *)(mtod(m0, char *) + hlen);
6232
6233 ip6->ip6_plen = 0;
6234 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6235 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6236 }
6237 hlen += th->th_off << 2;
6238 }
6239
6240 if (v4) {
6241 WM_Q_EVCNT_INCR(txq, txtso);
6242 cmdlen |= WTX_TCPIP_CMD_IP;
6243 } else {
6244 WM_Q_EVCNT_INCR(txq, txtso6);
6245 ipcse = 0;
6246 }
6247 cmd |= WTX_TCPIP_CMD_TSE;
6248 cmdlen |= WTX_TCPIP_CMD_TSE |
6249 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
6250 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
6251 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
6252 }
6253
6254 /*
6255 * NOTE: Even if we're not using the IP or TCP/UDP checksum
6256 * offload feature, if we load the context descriptor, we
6257 * MUST provide valid values for IPCSS and TUCSS fields.
6258 */
6259
6260 ipcs = WTX_TCPIP_IPCSS(offset) |
6261 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
6262 WTX_TCPIP_IPCSE(ipcse);
6263 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
6264 WM_Q_EVCNT_INCR(txq, txipsum);
6265 fields |= WTX_IXSM;
6266 }
6267
6268 offset += iphl;
6269
6270 if (m0->m_pkthdr.csum_flags &
6271 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
6272 WM_Q_EVCNT_INCR(txq, txtusum);
6273 fields |= WTX_TXSM;
6274 tucs = WTX_TCPIP_TUCSS(offset) |
6275 WTX_TCPIP_TUCSO(offset +
6276 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
6277 WTX_TCPIP_TUCSE(0) /* rest of packet */;
6278 } else if ((m0->m_pkthdr.csum_flags &
6279 (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
6280 WM_Q_EVCNT_INCR(txq, txtusum6);
6281 fields |= WTX_TXSM;
6282 tucs = WTX_TCPIP_TUCSS(offset) |
6283 WTX_TCPIP_TUCSO(offset +
6284 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
6285 WTX_TCPIP_TUCSE(0) /* rest of packet */;
6286 } else {
6287 /* Just initialize it to a valid TCP context. */
6288 tucs = WTX_TCPIP_TUCSS(offset) |
6289 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
6290 WTX_TCPIP_TUCSE(0) /* rest of packet */;
6291 }
6292
6293 /* Fill in the context descriptor. */
6294 t = (struct livengood_tcpip_ctxdesc *)
6295 &txq->txq_descs[txq->txq_next];
6296 t->tcpip_ipcs = htole32(ipcs);
6297 t->tcpip_tucs = htole32(tucs);
6298 t->tcpip_cmdlen = htole32(cmdlen);
6299 t->tcpip_seg = htole32(seg);
6300 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6301
6302 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6303 txs->txs_ndesc++;
6304
6305 *cmdp = cmd;
6306 *fieldsp = fields;
6307
6308 return 0;
6309 }
6310
6311 static inline int
6312 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
6313 {
6314 struct wm_softc *sc = ifp->if_softc;
6315 u_int cpuid = cpu_index(curcpu());
6316
6317 /*
6318 * Currently, simple distribute strategy.
6319 * TODO:
6320 * distribute by flowid(RSS has value).
6321 */
6322 return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
6323 }
6324
6325 /*
6326 * wm_start: [ifnet interface function]
6327 *
6328 * Start packet transmission on the interface.
6329 */
6330 static void
6331 wm_start(struct ifnet *ifp)
6332 {
6333 struct wm_softc *sc = ifp->if_softc;
6334 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6335
6336 KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
6337
6338 /*
6339 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
6340 */
6341
6342 mutex_enter(txq->txq_lock);
6343 if (!txq->txq_stopping)
6344 wm_start_locked(ifp);
6345 mutex_exit(txq->txq_lock);
6346 }
6347
6348 static void
6349 wm_start_locked(struct ifnet *ifp)
6350 {
6351 struct wm_softc *sc = ifp->if_softc;
6352 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6353
6354 wm_send_common_locked(ifp, txq, false);
6355 }
6356
6357 static int
6358 wm_transmit(struct ifnet *ifp, struct mbuf *m)
6359 {
6360 int qid;
6361 struct wm_softc *sc = ifp->if_softc;
6362 struct wm_txqueue *txq;
6363
6364 qid = wm_select_txqueue(ifp, m);
6365 txq = &sc->sc_queue[qid].wmq_txq;
6366
6367 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
6368 m_freem(m);
6369 WM_Q_EVCNT_INCR(txq, txdrop);
6370 return ENOBUFS;
6371 }
6372
6373 /*
6374 * XXXX NOMPSAFE: ifp->if_data should be percpu.
6375 */
6376 ifp->if_obytes += m->m_pkthdr.len;
6377 if (m->m_flags & M_MCAST)
6378 ifp->if_omcasts++;
6379
6380 if (mutex_tryenter(txq->txq_lock)) {
6381 if (!txq->txq_stopping)
6382 wm_transmit_locked(ifp, txq);
6383 mutex_exit(txq->txq_lock);
6384 }
6385
6386 return 0;
6387 }
6388
6389 static void
6390 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
6391 {
6392
6393 wm_send_common_locked(ifp, txq, true);
6394 }
6395
6396 static void
6397 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
6398 bool is_transmit)
6399 {
6400 struct wm_softc *sc = ifp->if_softc;
6401 struct mbuf *m0;
6402 struct m_tag *mtag;
6403 struct wm_txsoft *txs;
6404 bus_dmamap_t dmamap;
6405 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
6406 bus_addr_t curaddr;
6407 bus_size_t seglen, curlen;
6408 uint32_t cksumcmd;
6409 uint8_t cksumfields;
6410
6411 KASSERT(mutex_owned(txq->txq_lock));
6412
6413 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6414 return;
6415
6416 /* Remember the previous number of free descriptors. */
6417 ofree = txq->txq_free;
6418
6419 /*
6420 * Loop through the send queue, setting up transmit descriptors
6421 * until we drain the queue, or use up all available transmit
6422 * descriptors.
6423 */
6424 for (;;) {
6425 m0 = NULL;
6426
6427 /* Get a work queue entry. */
6428 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6429 wm_txeof(sc, txq);
6430 if (txq->txq_sfree == 0) {
6431 DPRINTF(WM_DEBUG_TX,
6432 ("%s: TX: no free job descriptors\n",
6433 device_xname(sc->sc_dev)));
6434 WM_Q_EVCNT_INCR(txq, txsstall);
6435 break;
6436 }
6437 }
6438
6439 /* Grab a packet off the queue. */
6440 if (is_transmit)
6441 m0 = pcq_get(txq->txq_interq);
6442 else
6443 IFQ_DEQUEUE(&ifp->if_snd, m0);
6444 if (m0 == NULL)
6445 break;
6446
6447 DPRINTF(WM_DEBUG_TX,
6448 ("%s: TX: have packet to transmit: %p\n",
6449 device_xname(sc->sc_dev), m0));
6450
6451 txs = &txq->txq_soft[txq->txq_snext];
6452 dmamap = txs->txs_dmamap;
6453
6454 use_tso = (m0->m_pkthdr.csum_flags &
6455 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
6456
6457 /*
6458 * So says the Linux driver:
6459 * The controller does a simple calculation to make sure
6460 * there is enough room in the FIFO before initiating the
6461 * DMA for each buffer. The calc is:
6462 * 4 = ceil(buffer len / MSS)
6463 * To make sure we don't overrun the FIFO, adjust the max
6464 * buffer len if the MSS drops.
6465 */
6466 dmamap->dm_maxsegsz =
6467 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
6468 ? m0->m_pkthdr.segsz << 2
6469 : WTX_MAX_LEN;
6470
6471 /*
6472 * Load the DMA map. If this fails, the packet either
6473 * didn't fit in the allotted number of segments, or we
6474 * were short on resources. For the too-many-segments
6475 * case, we simply report an error and drop the packet,
6476 * since we can't sanely copy a jumbo packet to a single
6477 * buffer.
6478 */
6479 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6480 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
6481 if (error) {
6482 if (error == EFBIG) {
6483 WM_Q_EVCNT_INCR(txq, txdrop);
6484 log(LOG_ERR, "%s: Tx packet consumes too many "
6485 "DMA segments, dropping...\n",
6486 device_xname(sc->sc_dev));
6487 wm_dump_mbuf_chain(sc, m0);
6488 m_freem(m0);
6489 continue;
6490 }
6491 /* Short on resources, just stop for now. */
6492 DPRINTF(WM_DEBUG_TX,
6493 ("%s: TX: dmamap load failed: %d\n",
6494 device_xname(sc->sc_dev), error));
6495 break;
6496 }
6497
6498 segs_needed = dmamap->dm_nsegs;
6499 if (use_tso) {
6500 /* For sentinel descriptor; see below. */
6501 segs_needed++;
6502 }
6503
6504 /*
6505 * Ensure we have enough descriptors free to describe
6506 * the packet. Note, we always reserve one descriptor
6507 * at the end of the ring due to the semantics of the
6508 * TDT register, plus one more in the event we need
6509 * to load offload context.
6510 */
6511 if (segs_needed > txq->txq_free - 2) {
6512 /*
6513 * Not enough free descriptors to transmit this
6514 * packet. We haven't committed anything yet,
6515 * so just unload the DMA map, put the packet
6516 * pack on the queue, and punt. Notify the upper
6517 * layer that there are no more slots left.
6518 */
6519 DPRINTF(WM_DEBUG_TX,
6520 ("%s: TX: need %d (%d) descriptors, have %d\n",
6521 device_xname(sc->sc_dev), dmamap->dm_nsegs,
6522 segs_needed, txq->txq_free - 1));
6523 ifp->if_flags |= IFF_OACTIVE;
6524 bus_dmamap_unload(sc->sc_dmat, dmamap);
6525 WM_Q_EVCNT_INCR(txq, txdstall);
6526 break;
6527 }
6528
6529 /*
6530 * Check for 82547 Tx FIFO bug. We need to do this
6531 * once we know we can transmit the packet, since we
6532 * do some internal FIFO space accounting here.
6533 */
6534 if (sc->sc_type == WM_T_82547 &&
6535 wm_82547_txfifo_bugchk(sc, m0)) {
6536 DPRINTF(WM_DEBUG_TX,
6537 ("%s: TX: 82547 Tx FIFO bug detected\n",
6538 device_xname(sc->sc_dev)));
6539 ifp->if_flags |= IFF_OACTIVE;
6540 bus_dmamap_unload(sc->sc_dmat, dmamap);
6541 WM_Q_EVCNT_INCR(txq, txfifo_stall);
6542 break;
6543 }
6544
6545 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6546
6547 DPRINTF(WM_DEBUG_TX,
6548 ("%s: TX: packet has %d (%d) DMA segments\n",
6549 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6550
6551 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
6552
6553 /*
6554 * Store a pointer to the packet so that we can free it
6555 * later.
6556 *
6557 * Initially, we consider the number of descriptors the
6558 * packet uses the number of DMA segments. This may be
6559 * incremented by 1 if we do checksum offload (a descriptor
6560 * is used to set the checksum context).
6561 */
6562 txs->txs_mbuf = m0;
6563 txs->txs_firstdesc = txq->txq_next;
6564 txs->txs_ndesc = segs_needed;
6565
6566 /* Set up offload parameters for this packet. */
6567 if (m0->m_pkthdr.csum_flags &
6568 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
6569 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
6570 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
6571 if (wm_tx_offload(sc, txs, &cksumcmd,
6572 &cksumfields) != 0) {
6573 /* Error message already displayed. */
6574 bus_dmamap_unload(sc->sc_dmat, dmamap);
6575 continue;
6576 }
6577 } else {
6578 cksumcmd = 0;
6579 cksumfields = 0;
6580 }
6581
6582 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
6583
6584 /* Sync the DMA map. */
6585 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6586 BUS_DMASYNC_PREWRITE);
6587
6588 /* Initialize the transmit descriptor. */
6589 for (nexttx = txq->txq_next, seg = 0;
6590 seg < dmamap->dm_nsegs; seg++) {
6591 for (seglen = dmamap->dm_segs[seg].ds_len,
6592 curaddr = dmamap->dm_segs[seg].ds_addr;
6593 seglen != 0;
6594 curaddr += curlen, seglen -= curlen,
6595 nexttx = WM_NEXTTX(txq, nexttx)) {
6596 curlen = seglen;
6597
6598 /*
6599 * So says the Linux driver:
6600 * Work around for premature descriptor
6601 * write-backs in TSO mode. Append a
6602 * 4-byte sentinel descriptor.
6603 */
6604 if (use_tso && seg == dmamap->dm_nsegs - 1 &&
6605 curlen > 8)
6606 curlen -= 4;
6607
6608 wm_set_dma_addr(
6609 &txq->txq_descs[nexttx].wtx_addr, curaddr);
6610 txq->txq_descs[nexttx].wtx_cmdlen
6611 = htole32(cksumcmd | curlen);
6612 txq->txq_descs[nexttx].wtx_fields.wtxu_status
6613 = 0;
6614 txq->txq_descs[nexttx].wtx_fields.wtxu_options
6615 = cksumfields;
6616 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
6617 lasttx = nexttx;
6618
6619 DPRINTF(WM_DEBUG_TX,
6620 ("%s: TX: desc %d: low %#" PRIx64 ", "
6621 "len %#04zx\n",
6622 device_xname(sc->sc_dev), nexttx,
6623 (uint64_t)curaddr, curlen));
6624 }
6625 }
6626
6627 KASSERT(lasttx != -1);
6628
6629 /*
6630 * Set up the command byte on the last descriptor of
6631 * the packet. If we're in the interrupt delay window,
6632 * delay the interrupt.
6633 */
6634 txq->txq_descs[lasttx].wtx_cmdlen |=
6635 htole32(WTX_CMD_EOP | WTX_CMD_RS);
6636
6637 /*
6638 * If VLANs are enabled and the packet has a VLAN tag, set
6639 * up the descriptor to encapsulate the packet for us.
6640 *
6641 * This is only valid on the last descriptor of the packet.
6642 */
6643 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6644 txq->txq_descs[lasttx].wtx_cmdlen |=
6645 htole32(WTX_CMD_VLE);
6646 txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
6647 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6648 }
6649
6650 txs->txs_lastdesc = lasttx;
6651
6652 DPRINTF(WM_DEBUG_TX,
6653 ("%s: TX: desc %d: cmdlen 0x%08x\n",
6654 device_xname(sc->sc_dev),
6655 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
6656
6657 /* Sync the descriptors we're using. */
6658 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
6659 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
6660
6661 /* Give the packet to the chip. */
6662 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
6663
6664 DPRINTF(WM_DEBUG_TX,
6665 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6666
6667 DPRINTF(WM_DEBUG_TX,
6668 ("%s: TX: finished transmitting packet, job %d\n",
6669 device_xname(sc->sc_dev), txq->txq_snext));
6670
6671 /* Advance the tx pointer. */
6672 txq->txq_free -= txs->txs_ndesc;
6673 txq->txq_next = nexttx;
6674
6675 txq->txq_sfree--;
6676 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
6677
6678 /* Pass the packet to any BPF listeners. */
6679 bpf_mtap(ifp, m0);
6680 }
6681
6682 if (m0 != NULL) {
6683 ifp->if_flags |= IFF_OACTIVE;
6684 WM_Q_EVCNT_INCR(txq, txdrop);
6685 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
6686 __func__));
6687 m_freem(m0);
6688 }
6689
6690 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
6691 /* No more slots; notify upper layer. */
6692 ifp->if_flags |= IFF_OACTIVE;
6693 }
6694
6695 if (txq->txq_free != ofree) {
6696 /* Set a watchdog timer in case the chip flakes out. */
6697 ifp->if_timer = 5;
6698 }
6699 }
6700
6701 /*
6702 * wm_nq_tx_offload:
6703 *
6704 * Set up TCP/IP checksumming parameters for the
6705 * specified packet, for NEWQUEUE devices
6706 */
6707 static int
6708 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
6709 struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
6710 {
6711 struct mbuf *m0 = txs->txs_mbuf;
6712 struct m_tag *mtag;
6713 uint32_t vl_len, mssidx, cmdc;
6714 struct ether_header *eh;
6715 int offset, iphl;
6716
6717 /*
6718 * XXX It would be nice if the mbuf pkthdr had offset
6719 * fields for the protocol headers.
6720 */
6721 *cmdlenp = 0;
6722 *fieldsp = 0;
6723
6724 eh = mtod(m0, struct ether_header *);
6725 switch (htons(eh->ether_type)) {
6726 case ETHERTYPE_IP:
6727 case ETHERTYPE_IPV6:
6728 offset = ETHER_HDR_LEN;
6729 break;
6730
6731 case ETHERTYPE_VLAN:
6732 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6733 break;
6734
6735 default:
6736 /* Don't support this protocol or encapsulation. */
6737 *do_csum = false;
6738 return 0;
6739 }
6740 *do_csum = true;
6741 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
6742 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
6743
6744 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
6745 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
6746
6747 if ((m0->m_pkthdr.csum_flags &
6748 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
6749 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6750 } else {
6751 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
6752 }
6753 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
6754 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
6755
6756 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6757 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
6758 << NQTXC_VLLEN_VLAN_SHIFT);
6759 *cmdlenp |= NQTX_CMD_VLE;
6760 }
6761
6762 mssidx = 0;
6763
6764 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6765 int hlen = offset + iphl;
6766 int tcp_hlen;
6767 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6768
6769 if (__predict_false(m0->m_len <
6770 (hlen + sizeof(struct tcphdr)))) {
6771 /*
6772 * TCP/IP headers are not in the first mbuf; we need
6773 * to do this the slow and painful way. Let's just
6774 * hope this doesn't happen very often.
6775 */
6776 struct tcphdr th;
6777
6778 WM_Q_EVCNT_INCR(txq, txtsopain);
6779
6780 m_copydata(m0, hlen, sizeof(th), &th);
6781 if (v4) {
6782 struct ip ip;
6783
6784 m_copydata(m0, offset, sizeof(ip), &ip);
6785 ip.ip_len = 0;
6786 m_copyback(m0,
6787 offset + offsetof(struct ip, ip_len),
6788 sizeof(ip.ip_len), &ip.ip_len);
6789 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6790 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6791 } else {
6792 struct ip6_hdr ip6;
6793
6794 m_copydata(m0, offset, sizeof(ip6), &ip6);
6795 ip6.ip6_plen = 0;
6796 m_copyback(m0,
6797 offset + offsetof(struct ip6_hdr, ip6_plen),
6798 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6799 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6800 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6801 }
6802 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6803 sizeof(th.th_sum), &th.th_sum);
6804
6805 tcp_hlen = th.th_off << 2;
6806 } else {
6807 /*
6808 * TCP/IP headers are in the first mbuf; we can do
6809 * this the easy way.
6810 */
6811 struct tcphdr *th;
6812
6813 if (v4) {
6814 struct ip *ip =
6815 (void *)(mtod(m0, char *) + offset);
6816 th = (void *)(mtod(m0, char *) + hlen);
6817
6818 ip->ip_len = 0;
6819 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6820 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6821 } else {
6822 struct ip6_hdr *ip6 =
6823 (void *)(mtod(m0, char *) + offset);
6824 th = (void *)(mtod(m0, char *) + hlen);
6825
6826 ip6->ip6_plen = 0;
6827 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6828 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6829 }
6830 tcp_hlen = th->th_off << 2;
6831 }
6832 hlen += tcp_hlen;
6833 *cmdlenp |= NQTX_CMD_TSE;
6834
6835 if (v4) {
6836 WM_Q_EVCNT_INCR(txq, txtso);
6837 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
6838 } else {
6839 WM_Q_EVCNT_INCR(txq, txtso6);
6840 *fieldsp |= NQTXD_FIELDS_TUXSM;
6841 }
6842 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
6843 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6844 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
6845 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
6846 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
6847 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
6848 } else {
6849 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
6850 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6851 }
6852
6853 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
6854 *fieldsp |= NQTXD_FIELDS_IXSM;
6855 cmdc |= NQTXC_CMD_IP4;
6856 }
6857
6858 if (m0->m_pkthdr.csum_flags &
6859 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6860 WM_Q_EVCNT_INCR(txq, txtusum);
6861 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6862 cmdc |= NQTXC_CMD_TCP;
6863 } else {
6864 cmdc |= NQTXC_CMD_UDP;
6865 }
6866 cmdc |= NQTXC_CMD_IP4;
6867 *fieldsp |= NQTXD_FIELDS_TUXSM;
6868 }
6869 if (m0->m_pkthdr.csum_flags &
6870 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6871 WM_Q_EVCNT_INCR(txq, txtusum6);
6872 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6873 cmdc |= NQTXC_CMD_TCP;
6874 } else {
6875 cmdc |= NQTXC_CMD_UDP;
6876 }
6877 cmdc |= NQTXC_CMD_IP6;
6878 *fieldsp |= NQTXD_FIELDS_TUXSM;
6879 }
6880
6881 /* Fill in the context descriptor. */
6882 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
6883 htole32(vl_len);
6884 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
6885 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
6886 htole32(cmdc);
6887 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
6888 htole32(mssidx);
6889 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6890 DPRINTF(WM_DEBUG_TX,
6891 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
6892 txq->txq_next, 0, vl_len));
6893 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
6894 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6895 txs->txs_ndesc++;
6896 return 0;
6897 }
6898
6899 /*
6900 * wm_nq_start: [ifnet interface function]
6901 *
6902 * Start packet transmission on the interface for NEWQUEUE devices
6903 */
6904 static void
6905 wm_nq_start(struct ifnet *ifp)
6906 {
6907 struct wm_softc *sc = ifp->if_softc;
6908 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6909
6910 KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
6911
6912 /*
6913 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
6914 */
6915
6916 mutex_enter(txq->txq_lock);
6917 if (!txq->txq_stopping)
6918 wm_nq_start_locked(ifp);
6919 mutex_exit(txq->txq_lock);
6920 }
6921
6922 static void
6923 wm_nq_start_locked(struct ifnet *ifp)
6924 {
6925 struct wm_softc *sc = ifp->if_softc;
6926 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6927
6928 wm_nq_send_common_locked(ifp, txq, false);
6929 }
6930
6931 static int
6932 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
6933 {
6934 int qid;
6935 struct wm_softc *sc = ifp->if_softc;
6936 struct wm_txqueue *txq;
6937
6938 qid = wm_select_txqueue(ifp, m);
6939 txq = &sc->sc_queue[qid].wmq_txq;
6940
6941 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
6942 m_freem(m);
6943 WM_Q_EVCNT_INCR(txq, txdrop);
6944 return ENOBUFS;
6945 }
6946
6947 /*
6948 * XXXX NOMPSAFE: ifp->if_data should be percpu.
6949 */
6950 ifp->if_obytes += m->m_pkthdr.len;
6951 if (m->m_flags & M_MCAST)
6952 ifp->if_omcasts++;
6953
6954 if (mutex_tryenter(txq->txq_lock)) {
6955 if (!txq->txq_stopping)
6956 wm_nq_transmit_locked(ifp, txq);
6957 mutex_exit(txq->txq_lock);
6958 }
6959
6960 return 0;
6961 }
6962
6963 static void
6964 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
6965 {
6966
6967 wm_nq_send_common_locked(ifp, txq, true);
6968 }
6969
6970 static void
6971 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
6972 bool is_transmit)
6973 {
6974 struct wm_softc *sc = ifp->if_softc;
6975 struct mbuf *m0;
6976 struct m_tag *mtag;
6977 struct wm_txsoft *txs;
6978 bus_dmamap_t dmamap;
6979 int error, nexttx, lasttx = -1, seg, segs_needed;
6980 bool do_csum, sent;
6981
6982 KASSERT(mutex_owned(txq->txq_lock));
6983
6984 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6985 return;
6986 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
6987 return;
6988
6989 sent = false;
6990
6991 /*
6992 * Loop through the send queue, setting up transmit descriptors
6993 * until we drain the queue, or use up all available transmit
6994 * descriptors.
6995 */
6996 for (;;) {
6997 m0 = NULL;
6998
6999 /* Get a work queue entry. */
7000 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
7001 wm_txeof(sc, txq);
7002 if (txq->txq_sfree == 0) {
7003 DPRINTF(WM_DEBUG_TX,
7004 ("%s: TX: no free job descriptors\n",
7005 device_xname(sc->sc_dev)));
7006 WM_Q_EVCNT_INCR(txq, txsstall);
7007 break;
7008 }
7009 }
7010
7011 /* Grab a packet off the queue. */
7012 if (is_transmit)
7013 m0 = pcq_get(txq->txq_interq);
7014 else
7015 IFQ_DEQUEUE(&ifp->if_snd, m0);
7016 if (m0 == NULL)
7017 break;
7018
7019 DPRINTF(WM_DEBUG_TX,
7020 ("%s: TX: have packet to transmit: %p\n",
7021 device_xname(sc->sc_dev), m0));
7022
7023 txs = &txq->txq_soft[txq->txq_snext];
7024 dmamap = txs->txs_dmamap;
7025
7026 /*
7027 * Load the DMA map. If this fails, the packet either
7028 * didn't fit in the allotted number of segments, or we
7029 * were short on resources. For the too-many-segments
7030 * case, we simply report an error and drop the packet,
7031 * since we can't sanely copy a jumbo packet to a single
7032 * buffer.
7033 */
7034 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
7035 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
7036 if (error) {
7037 if (error == EFBIG) {
7038 WM_Q_EVCNT_INCR(txq, txdrop);
7039 log(LOG_ERR, "%s: Tx packet consumes too many "
7040 "DMA segments, dropping...\n",
7041 device_xname(sc->sc_dev));
7042 wm_dump_mbuf_chain(sc, m0);
7043 m_freem(m0);
7044 continue;
7045 }
7046 /* Short on resources, just stop for now. */
7047 DPRINTF(WM_DEBUG_TX,
7048 ("%s: TX: dmamap load failed: %d\n",
7049 device_xname(sc->sc_dev), error));
7050 break;
7051 }
7052
7053 segs_needed = dmamap->dm_nsegs;
7054
7055 /*
7056 * Ensure we have enough descriptors free to describe
7057 * the packet. Note, we always reserve one descriptor
7058 * at the end of the ring due to the semantics of the
7059 * TDT register, plus one more in the event we need
7060 * to load offload context.
7061 */
7062 if (segs_needed > txq->txq_free - 2) {
7063 /*
7064 * Not enough free descriptors to transmit this
7065 * packet. We haven't committed anything yet,
7066 * so just unload the DMA map, put the packet
7067 * pack on the queue, and punt. Notify the upper
7068 * layer that there are no more slots left.
7069 */
7070 DPRINTF(WM_DEBUG_TX,
7071 ("%s: TX: need %d (%d) descriptors, have %d\n",
7072 device_xname(sc->sc_dev), dmamap->dm_nsegs,
7073 segs_needed, txq->txq_free - 1));
7074 txq->txq_flags |= WM_TXQ_NO_SPACE;
7075 bus_dmamap_unload(sc->sc_dmat, dmamap);
7076 WM_Q_EVCNT_INCR(txq, txdstall);
7077 break;
7078 }
7079
7080 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
7081
7082 DPRINTF(WM_DEBUG_TX,
7083 ("%s: TX: packet has %d (%d) DMA segments\n",
7084 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
7085
7086 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
7087
7088 /*
7089 * Store a pointer to the packet so that we can free it
7090 * later.
7091 *
7092 * Initially, we consider the number of descriptors the
7093 * packet uses the number of DMA segments. This may be
7094 * incremented by 1 if we do checksum offload (a descriptor
7095 * is used to set the checksum context).
7096 */
7097 txs->txs_mbuf = m0;
7098 txs->txs_firstdesc = txq->txq_next;
7099 txs->txs_ndesc = segs_needed;
7100
7101 /* Set up offload parameters for this packet. */
7102 uint32_t cmdlen, fields, dcmdlen;
7103 if (m0->m_pkthdr.csum_flags &
7104 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
7105 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7106 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
7107 if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
7108 &do_csum) != 0) {
7109 /* Error message already displayed. */
7110 bus_dmamap_unload(sc->sc_dmat, dmamap);
7111 continue;
7112 }
7113 } else {
7114 do_csum = false;
7115 cmdlen = 0;
7116 fields = 0;
7117 }
7118
7119 /* Sync the DMA map. */
7120 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
7121 BUS_DMASYNC_PREWRITE);
7122
7123 /* Initialize the first transmit descriptor. */
7124 nexttx = txq->txq_next;
7125 if (!do_csum) {
7126 /* setup a legacy descriptor */
7127 wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
7128 dmamap->dm_segs[0].ds_addr);
7129 txq->txq_descs[nexttx].wtx_cmdlen =
7130 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
7131 txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
7132 txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
7133 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
7134 NULL) {
7135 txq->txq_descs[nexttx].wtx_cmdlen |=
7136 htole32(WTX_CMD_VLE);
7137 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
7138 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
7139 } else {
7140 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
7141 }
7142 dcmdlen = 0;
7143 } else {
7144 /* setup an advanced data descriptor */
7145 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
7146 htole64(dmamap->dm_segs[0].ds_addr);
7147 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
7148 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
7149 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
7150 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
7151 htole32(fields);
7152 DPRINTF(WM_DEBUG_TX,
7153 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
7154 device_xname(sc->sc_dev), nexttx,
7155 (uint64_t)dmamap->dm_segs[0].ds_addr));
7156 DPRINTF(WM_DEBUG_TX,
7157 ("\t 0x%08x%08x\n", fields,
7158 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
7159 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
7160 }
7161
7162 lasttx = nexttx;
7163 nexttx = WM_NEXTTX(txq, nexttx);
7164 /*
7165 * fill in the next descriptors. legacy or adcanced format
7166 * is the same here
7167 */
7168 for (seg = 1; seg < dmamap->dm_nsegs;
7169 seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
7170 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
7171 htole64(dmamap->dm_segs[seg].ds_addr);
7172 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
7173 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
7174 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
7175 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
7176 lasttx = nexttx;
7177
7178 DPRINTF(WM_DEBUG_TX,
7179 ("%s: TX: desc %d: %#" PRIx64 ", "
7180 "len %#04zx\n",
7181 device_xname(sc->sc_dev), nexttx,
7182 (uint64_t)dmamap->dm_segs[seg].ds_addr,
7183 dmamap->dm_segs[seg].ds_len));
7184 }
7185
7186 KASSERT(lasttx != -1);
7187
7188 /*
7189 * Set up the command byte on the last descriptor of
7190 * the packet. If we're in the interrupt delay window,
7191 * delay the interrupt.
7192 */
7193 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
7194 (NQTX_CMD_EOP | NQTX_CMD_RS));
7195 txq->txq_descs[lasttx].wtx_cmdlen |=
7196 htole32(WTX_CMD_EOP | WTX_CMD_RS);
7197
7198 txs->txs_lastdesc = lasttx;
7199
7200 DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
7201 device_xname(sc->sc_dev),
7202 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
7203
7204 /* Sync the descriptors we're using. */
7205 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
7206 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7207
7208 /* Give the packet to the chip. */
7209 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
7210 sent = true;
7211
7212 DPRINTF(WM_DEBUG_TX,
7213 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
7214
7215 DPRINTF(WM_DEBUG_TX,
7216 ("%s: TX: finished transmitting packet, job %d\n",
7217 device_xname(sc->sc_dev), txq->txq_snext));
7218
7219 /* Advance the tx pointer. */
7220 txq->txq_free -= txs->txs_ndesc;
7221 txq->txq_next = nexttx;
7222
7223 txq->txq_sfree--;
7224 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
7225
7226 /* Pass the packet to any BPF listeners. */
7227 bpf_mtap(ifp, m0);
7228 }
7229
7230 if (m0 != NULL) {
7231 txq->txq_flags |= WM_TXQ_NO_SPACE;
7232 WM_Q_EVCNT_INCR(txq, txdrop);
7233 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
7234 __func__));
7235 m_freem(m0);
7236 }
7237
7238 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
7239 /* No more slots; notify upper layer. */
7240 txq->txq_flags |= WM_TXQ_NO_SPACE;
7241 }
7242
7243 if (sent) {
7244 /* Set a watchdog timer in case the chip flakes out. */
7245 ifp->if_timer = 5;
7246 }
7247 }
7248
7249 static void
7250 wm_deferred_start(struct ifnet *ifp)
7251 {
7252 struct wm_softc *sc = ifp->if_softc;
7253 int qid = 0;
7254
7255 /*
7256 * Try to transmit on all Tx queues. Passing a txq somehow and
7257 * transmitting only on the txq may be better.
7258 */
7259 restart:
7260 WM_CORE_LOCK(sc);
7261 if (sc->sc_core_stopping)
7262 goto out;
7263
7264 for (; qid < sc->sc_nqueues; qid++) {
7265 struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
7266
7267 if (!mutex_tryenter(txq->txq_lock))
7268 continue;
7269
7270 if (txq->txq_stopping) {
7271 mutex_exit(txq->txq_lock);
7272 continue;
7273 }
7274 WM_CORE_UNLOCK(sc);
7275
7276 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7277 /* XXX need for ALTQ */
7278 if (qid == 0)
7279 wm_nq_start_locked(ifp);
7280 wm_nq_transmit_locked(ifp, txq);
7281 } else {
7282 /* XXX need for ALTQ */
7283 if (qid == 0)
7284 wm_start_locked(ifp);
7285 wm_transmit_locked(ifp, txq);
7286 }
7287 mutex_exit(txq->txq_lock);
7288
7289 qid++;
7290 goto restart;
7291 }
7292 out:
7293 WM_CORE_UNLOCK(sc);
7294 }
7295
7296 /* Interrupt */
7297
7298 /*
7299 * wm_txeof:
7300 *
7301 * Helper; handle transmit interrupts.
7302 */
7303 static int
7304 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
7305 {
7306 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7307 struct wm_txsoft *txs;
7308 bool processed = false;
7309 int count = 0;
7310 int i;
7311 uint8_t status;
7312
7313 KASSERT(mutex_owned(txq->txq_lock));
7314
7315 if (txq->txq_stopping)
7316 return 0;
7317
7318 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7319 txq->txq_flags &= ~WM_TXQ_NO_SPACE;
7320 else
7321 ifp->if_flags &= ~IFF_OACTIVE;
7322
7323 /*
7324 * Go through the Tx list and free mbufs for those
7325 * frames which have been transmitted.
7326 */
7327 for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
7328 i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
7329 txs = &txq->txq_soft[i];
7330
7331 DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
7332 device_xname(sc->sc_dev), i));
7333
7334 wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
7335 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
7336
7337 status =
7338 txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
7339 if ((status & WTX_ST_DD) == 0) {
7340 wm_cdtxsync(txq, txs->txs_lastdesc, 1,
7341 BUS_DMASYNC_PREREAD);
7342 break;
7343 }
7344
7345 processed = true;
7346 count++;
7347 DPRINTF(WM_DEBUG_TX,
7348 ("%s: TX: job %d done: descs %d..%d\n",
7349 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
7350 txs->txs_lastdesc));
7351
7352 /*
7353 * XXX We should probably be using the statistics
7354 * XXX registers, but I don't know if they exist
7355 * XXX on chips before the i82544.
7356 */
7357
7358 #ifdef WM_EVENT_COUNTERS
7359 if (status & WTX_ST_TU)
7360 WM_Q_EVCNT_INCR(txq, tu);
7361 #endif /* WM_EVENT_COUNTERS */
7362
7363 if (status & (WTX_ST_EC | WTX_ST_LC)) {
7364 ifp->if_oerrors++;
7365 if (status & WTX_ST_LC)
7366 log(LOG_WARNING, "%s: late collision\n",
7367 device_xname(sc->sc_dev));
7368 else if (status & WTX_ST_EC) {
7369 ifp->if_collisions += 16;
7370 log(LOG_WARNING, "%s: excessive collisions\n",
7371 device_xname(sc->sc_dev));
7372 }
7373 } else
7374 ifp->if_opackets++;
7375
7376 txq->txq_free += txs->txs_ndesc;
7377 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
7378 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
7379 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
7380 m_freem(txs->txs_mbuf);
7381 txs->txs_mbuf = NULL;
7382 }
7383
7384 /* Update the dirty transmit buffer pointer. */
7385 txq->txq_sdirty = i;
7386 DPRINTF(WM_DEBUG_TX,
7387 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
7388
7389 if (count != 0)
7390 rnd_add_uint32(&sc->rnd_source, count);
7391
7392 /*
7393 * If there are no more pending transmissions, cancel the watchdog
7394 * timer.
7395 */
7396 if (txq->txq_sfree == WM_TXQUEUELEN(txq))
7397 ifp->if_timer = 0;
7398
7399 return processed;
7400 }
7401
7402 /*
7403 * wm_rxeof:
7404 *
7405 * Helper; handle receive interrupts.
7406 */
7407 static void
7408 wm_rxeof(struct wm_rxqueue *rxq)
7409 {
7410 struct wm_softc *sc = rxq->rxq_sc;
7411 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7412 struct wm_rxsoft *rxs;
7413 struct mbuf *m;
7414 int i, len;
7415 int count = 0;
7416 uint8_t status, errors;
7417 uint16_t vlantag;
7418
7419 KASSERT(mutex_owned(rxq->rxq_lock));
7420
7421 for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
7422 rxs = &rxq->rxq_soft[i];
7423
7424 DPRINTF(WM_DEBUG_RX,
7425 ("%s: RX: checking descriptor %d\n",
7426 device_xname(sc->sc_dev), i));
7427
7428 wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
7429
7430 status = rxq->rxq_descs[i].wrx_status;
7431 errors = rxq->rxq_descs[i].wrx_errors;
7432 len = le16toh(rxq->rxq_descs[i].wrx_len);
7433 vlantag = rxq->rxq_descs[i].wrx_special;
7434
7435 if ((status & WRX_ST_DD) == 0) {
7436 /* We have processed all of the receive descriptors. */
7437 wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
7438 break;
7439 }
7440
7441 count++;
7442 if (__predict_false(rxq->rxq_discard)) {
7443 DPRINTF(WM_DEBUG_RX,
7444 ("%s: RX: discarding contents of descriptor %d\n",
7445 device_xname(sc->sc_dev), i));
7446 wm_init_rxdesc(rxq, i);
7447 if (status & WRX_ST_EOP) {
7448 /* Reset our state. */
7449 DPRINTF(WM_DEBUG_RX,
7450 ("%s: RX: resetting rxdiscard -> 0\n",
7451 device_xname(sc->sc_dev)));
7452 rxq->rxq_discard = 0;
7453 }
7454 continue;
7455 }
7456
7457 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
7458 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
7459
7460 m = rxs->rxs_mbuf;
7461
7462 /*
7463 * Add a new receive buffer to the ring, unless of
7464 * course the length is zero. Treat the latter as a
7465 * failed mapping.
7466 */
7467 if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
7468 /*
7469 * Failed, throw away what we've done so
7470 * far, and discard the rest of the packet.
7471 */
7472 ifp->if_ierrors++;
7473 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
7474 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
7475 wm_init_rxdesc(rxq, i);
7476 if ((status & WRX_ST_EOP) == 0)
7477 rxq->rxq_discard = 1;
7478 if (rxq->rxq_head != NULL)
7479 m_freem(rxq->rxq_head);
7480 WM_RXCHAIN_RESET(rxq);
7481 DPRINTF(WM_DEBUG_RX,
7482 ("%s: RX: Rx buffer allocation failed, "
7483 "dropping packet%s\n", device_xname(sc->sc_dev),
7484 rxq->rxq_discard ? " (discard)" : ""));
7485 continue;
7486 }
7487
7488 m->m_len = len;
7489 rxq->rxq_len += len;
7490 DPRINTF(WM_DEBUG_RX,
7491 ("%s: RX: buffer at %p len %d\n",
7492 device_xname(sc->sc_dev), m->m_data, len));
7493
7494 /* If this is not the end of the packet, keep looking. */
7495 if ((status & WRX_ST_EOP) == 0) {
7496 WM_RXCHAIN_LINK(rxq, m);
7497 DPRINTF(WM_DEBUG_RX,
7498 ("%s: RX: not yet EOP, rxlen -> %d\n",
7499 device_xname(sc->sc_dev), rxq->rxq_len));
7500 continue;
7501 }
7502
7503 /*
7504 * Okay, we have the entire packet now. The chip is
7505 * configured to include the FCS except I350 and I21[01]
7506 * (not all chips can be configured to strip it),
7507 * so we need to trim it.
7508 * May need to adjust length of previous mbuf in the
7509 * chain if the current mbuf is too short.
7510 * For an eratta, the RCTL_SECRC bit in RCTL register
7511 * is always set in I350, so we don't trim it.
7512 */
7513 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
7514 && (sc->sc_type != WM_T_I210)
7515 && (sc->sc_type != WM_T_I211)) {
7516 if (m->m_len < ETHER_CRC_LEN) {
7517 rxq->rxq_tail->m_len
7518 -= (ETHER_CRC_LEN - m->m_len);
7519 m->m_len = 0;
7520 } else
7521 m->m_len -= ETHER_CRC_LEN;
7522 len = rxq->rxq_len - ETHER_CRC_LEN;
7523 } else
7524 len = rxq->rxq_len;
7525
7526 WM_RXCHAIN_LINK(rxq, m);
7527
7528 *rxq->rxq_tailp = NULL;
7529 m = rxq->rxq_head;
7530
7531 WM_RXCHAIN_RESET(rxq);
7532
7533 DPRINTF(WM_DEBUG_RX,
7534 ("%s: RX: have entire packet, len -> %d\n",
7535 device_xname(sc->sc_dev), len));
7536
7537 /* If an error occurred, update stats and drop the packet. */
7538 if (errors &
7539 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
7540 if (errors & WRX_ER_SE)
7541 log(LOG_WARNING, "%s: symbol error\n",
7542 device_xname(sc->sc_dev));
7543 else if (errors & WRX_ER_SEQ)
7544 log(LOG_WARNING, "%s: receive sequence error\n",
7545 device_xname(sc->sc_dev));
7546 else if (errors & WRX_ER_CE)
7547 log(LOG_WARNING, "%s: CRC error\n",
7548 device_xname(sc->sc_dev));
7549 m_freem(m);
7550 continue;
7551 }
7552
7553 /* No errors. Receive the packet. */
7554 m_set_rcvif(m, ifp);
7555 m->m_pkthdr.len = len;
7556
7557 /*
7558 * If VLANs are enabled, VLAN packets have been unwrapped
7559 * for us. Associate the tag with the packet.
7560 */
7561 /* XXXX should check for i350 and i354 */
7562 if ((status & WRX_ST_VP) != 0) {
7563 VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
7564 }
7565
7566 /* Set up checksum info for this packet. */
7567 if ((status & WRX_ST_IXSM) == 0) {
7568 if (status & WRX_ST_IPCS) {
7569 WM_Q_EVCNT_INCR(rxq, rxipsum);
7570 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
7571 if (errors & WRX_ER_IPE)
7572 m->m_pkthdr.csum_flags |=
7573 M_CSUM_IPv4_BAD;
7574 }
7575 if (status & WRX_ST_TCPCS) {
7576 /*
7577 * Note: we don't know if this was TCP or UDP,
7578 * so we just set both bits, and expect the
7579 * upper layers to deal.
7580 */
7581 WM_Q_EVCNT_INCR(rxq, rxtusum);
7582 m->m_pkthdr.csum_flags |=
7583 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7584 M_CSUM_TCPv6 | M_CSUM_UDPv6;
7585 if (errors & WRX_ER_TCPE)
7586 m->m_pkthdr.csum_flags |=
7587 M_CSUM_TCP_UDP_BAD;
7588 }
7589 }
7590
7591 mutex_exit(rxq->rxq_lock);
7592
7593 /* Pass it on. */
7594 if_percpuq_enqueue(sc->sc_ipq, m);
7595
7596 mutex_enter(rxq->rxq_lock);
7597
7598 if (rxq->rxq_stopping)
7599 break;
7600 }
7601
7602 /* Update the receive pointer. */
7603 rxq->rxq_ptr = i;
7604 if (count != 0)
7605 rnd_add_uint32(&sc->rnd_source, count);
7606
7607 DPRINTF(WM_DEBUG_RX,
7608 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
7609 }
7610
7611 /*
7612 * wm_linkintr_gmii:
7613 *
7614 * Helper; handle link interrupts for GMII.
7615 */
7616 static void
7617 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
7618 {
7619
7620 KASSERT(WM_CORE_LOCKED(sc));
7621
7622 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7623 __func__));
7624
7625 if (icr & ICR_LSC) {
7626 uint32_t reg;
7627 uint32_t status = CSR_READ(sc, WMREG_STATUS);
7628
7629 if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
7630 wm_gig_downshift_workaround_ich8lan(sc);
7631
7632 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
7633 device_xname(sc->sc_dev)));
7634 mii_pollstat(&sc->sc_mii);
7635 if (sc->sc_type == WM_T_82543) {
7636 int miistatus, active;
7637
7638 /*
7639 * With 82543, we need to force speed and
7640 * duplex on the MAC equal to what the PHY
7641 * speed and duplex configuration is.
7642 */
7643 miistatus = sc->sc_mii.mii_media_status;
7644
7645 if (miistatus & IFM_ACTIVE) {
7646 active = sc->sc_mii.mii_media_active;
7647 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
7648 switch (IFM_SUBTYPE(active)) {
7649 case IFM_10_T:
7650 sc->sc_ctrl |= CTRL_SPEED_10;
7651 break;
7652 case IFM_100_TX:
7653 sc->sc_ctrl |= CTRL_SPEED_100;
7654 break;
7655 case IFM_1000_T:
7656 sc->sc_ctrl |= CTRL_SPEED_1000;
7657 break;
7658 default:
7659 /*
7660 * fiber?
7661 * Shoud not enter here.
7662 */
7663 printf("unknown media (%x)\n", active);
7664 break;
7665 }
7666 if (active & IFM_FDX)
7667 sc->sc_ctrl |= CTRL_FD;
7668 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7669 }
7670 } else if ((sc->sc_type == WM_T_ICH8)
7671 && (sc->sc_phytype == WMPHY_IGP_3)) {
7672 wm_kmrn_lock_loss_workaround_ich8lan(sc);
7673 } else if (sc->sc_type == WM_T_PCH) {
7674 wm_k1_gig_workaround_hv(sc,
7675 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
7676 }
7677
7678 if ((sc->sc_phytype == WMPHY_82578)
7679 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
7680 == IFM_1000_T)) {
7681
7682 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
7683 delay(200*1000); /* XXX too big */
7684
7685 /* Link stall fix for link up */
7686 wm_gmii_hv_writereg(sc->sc_dev, 1,
7687 HV_MUX_DATA_CTRL,
7688 HV_MUX_DATA_CTRL_GEN_TO_MAC
7689 | HV_MUX_DATA_CTRL_FORCE_SPEED);
7690 wm_gmii_hv_writereg(sc->sc_dev, 1,
7691 HV_MUX_DATA_CTRL,
7692 HV_MUX_DATA_CTRL_GEN_TO_MAC);
7693 }
7694 }
7695 /*
7696 * I217 Packet Loss issue:
7697 * ensure that FEXTNVM4 Beacon Duration is set correctly
7698 * on power up.
7699 * Set the Beacon Duration for I217 to 8 usec
7700 */
7701 if ((sc->sc_type == WM_T_PCH_LPT)
7702 || (sc->sc_type == WM_T_PCH_SPT)) {
7703 reg = CSR_READ(sc, WMREG_FEXTNVM4);
7704 reg &= ~FEXTNVM4_BEACON_DURATION;
7705 reg |= FEXTNVM4_BEACON_DURATION_8US;
7706 CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
7707 }
7708
7709 /* XXX Work-around I218 hang issue */
7710 /* e1000_k1_workaround_lpt_lp() */
7711
7712 if ((sc->sc_type == WM_T_PCH_LPT)
7713 || (sc->sc_type == WM_T_PCH_SPT)) {
7714 /*
7715 * Set platform power management values for Latency
7716 * Tolerance Reporting (LTR)
7717 */
7718 wm_platform_pm_pch_lpt(sc,
7719 ((sc->sc_mii.mii_media_status & IFM_ACTIVE)
7720 != 0));
7721 }
7722
7723 /* FEXTNVM6 K1-off workaround */
7724 if (sc->sc_type == WM_T_PCH_SPT) {
7725 reg = CSR_READ(sc, WMREG_FEXTNVM6);
7726 if (CSR_READ(sc, WMREG_PCIEANACFG)
7727 & FEXTNVM6_K1_OFF_ENABLE)
7728 reg |= FEXTNVM6_K1_OFF_ENABLE;
7729 else
7730 reg &= ~FEXTNVM6_K1_OFF_ENABLE;
7731 CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
7732 }
7733 } else if (icr & ICR_RXSEQ) {
7734 DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
7735 device_xname(sc->sc_dev)));
7736 }
7737 }
7738
7739 /*
7740 * wm_linkintr_tbi:
7741 *
7742 * Helper; handle link interrupts for TBI mode.
7743 */
7744 static void
7745 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
7746 {
7747 uint32_t status;
7748
7749 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7750 __func__));
7751
7752 status = CSR_READ(sc, WMREG_STATUS);
7753 if (icr & ICR_LSC) {
7754 if (status & STATUS_LU) {
7755 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
7756 device_xname(sc->sc_dev),
7757 (status & STATUS_FD) ? "FDX" : "HDX"));
7758 /*
7759 * NOTE: CTRL will update TFCE and RFCE automatically,
7760 * so we should update sc->sc_ctrl
7761 */
7762
7763 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7764 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7765 sc->sc_fcrtl &= ~FCRTL_XONE;
7766 if (status & STATUS_FD)
7767 sc->sc_tctl |=
7768 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7769 else
7770 sc->sc_tctl |=
7771 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7772 if (sc->sc_ctrl & CTRL_TFCE)
7773 sc->sc_fcrtl |= FCRTL_XONE;
7774 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7775 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7776 WMREG_OLD_FCRTL : WMREG_FCRTL,
7777 sc->sc_fcrtl);
7778 sc->sc_tbi_linkup = 1;
7779 } else {
7780 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
7781 device_xname(sc->sc_dev)));
7782 sc->sc_tbi_linkup = 0;
7783 }
7784 /* Update LED */
7785 wm_tbi_serdes_set_linkled(sc);
7786 } else if (icr & ICR_RXSEQ) {
7787 DPRINTF(WM_DEBUG_LINK,
7788 ("%s: LINK: Receive sequence error\n",
7789 device_xname(sc->sc_dev)));
7790 }
7791 }
7792
7793 /*
7794 * wm_linkintr_serdes:
7795 *
7796 * Helper; handle link interrupts for TBI mode.
7797 */
7798 static void
7799 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
7800 {
7801 struct mii_data *mii = &sc->sc_mii;
7802 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7803 uint32_t pcs_adv, pcs_lpab, reg;
7804
7805 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7806 __func__));
7807
7808 if (icr & ICR_LSC) {
7809 /* Check PCS */
7810 reg = CSR_READ(sc, WMREG_PCS_LSTS);
7811 if ((reg & PCS_LSTS_LINKOK) != 0) {
7812 mii->mii_media_status |= IFM_ACTIVE;
7813 sc->sc_tbi_linkup = 1;
7814 } else {
7815 mii->mii_media_status |= IFM_NONE;
7816 sc->sc_tbi_linkup = 0;
7817 wm_tbi_serdes_set_linkled(sc);
7818 return;
7819 }
7820 mii->mii_media_active |= IFM_1000_SX;
7821 if ((reg & PCS_LSTS_FDX) != 0)
7822 mii->mii_media_active |= IFM_FDX;
7823 else
7824 mii->mii_media_active |= IFM_HDX;
7825 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7826 /* Check flow */
7827 reg = CSR_READ(sc, WMREG_PCS_LSTS);
7828 if ((reg & PCS_LSTS_AN_COMP) == 0) {
7829 DPRINTF(WM_DEBUG_LINK,
7830 ("XXX LINKOK but not ACOMP\n"));
7831 return;
7832 }
7833 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
7834 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
7835 DPRINTF(WM_DEBUG_LINK,
7836 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
7837 if ((pcs_adv & TXCW_SYM_PAUSE)
7838 && (pcs_lpab & TXCW_SYM_PAUSE)) {
7839 mii->mii_media_active |= IFM_FLOW
7840 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
7841 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
7842 && (pcs_adv & TXCW_ASYM_PAUSE)
7843 && (pcs_lpab & TXCW_SYM_PAUSE)
7844 && (pcs_lpab & TXCW_ASYM_PAUSE))
7845 mii->mii_media_active |= IFM_FLOW
7846 | IFM_ETH_TXPAUSE;
7847 else if ((pcs_adv & TXCW_SYM_PAUSE)
7848 && (pcs_adv & TXCW_ASYM_PAUSE)
7849 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
7850 && (pcs_lpab & TXCW_ASYM_PAUSE))
7851 mii->mii_media_active |= IFM_FLOW
7852 | IFM_ETH_RXPAUSE;
7853 }
7854 /* Update LED */
7855 wm_tbi_serdes_set_linkled(sc);
7856 } else {
7857 DPRINTF(WM_DEBUG_LINK,
7858 ("%s: LINK: Receive sequence error\n",
7859 device_xname(sc->sc_dev)));
7860 }
7861 }
7862
7863 /*
7864 * wm_linkintr:
7865 *
7866 * Helper; handle link interrupts.
7867 */
7868 static void
7869 wm_linkintr(struct wm_softc *sc, uint32_t icr)
7870 {
7871
7872 KASSERT(WM_CORE_LOCKED(sc));
7873
7874 if (sc->sc_flags & WM_F_HAS_MII)
7875 wm_linkintr_gmii(sc, icr);
7876 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7877 && (sc->sc_type >= WM_T_82575))
7878 wm_linkintr_serdes(sc, icr);
7879 else
7880 wm_linkintr_tbi(sc, icr);
7881 }
7882
7883 /*
7884 * wm_intr_legacy:
7885 *
7886 * Interrupt service routine for INTx and MSI.
7887 */
7888 static int
7889 wm_intr_legacy(void *arg)
7890 {
7891 struct wm_softc *sc = arg;
7892 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7893 struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
7894 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7895 uint32_t icr, rndval = 0;
7896 int handled = 0;
7897
7898 DPRINTF(WM_DEBUG_TX,
7899 ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
7900 while (1 /* CONSTCOND */) {
7901 icr = CSR_READ(sc, WMREG_ICR);
7902 if ((icr & sc->sc_icr) == 0)
7903 break;
7904 if (rndval == 0)
7905 rndval = icr;
7906
7907 mutex_enter(rxq->rxq_lock);
7908
7909 if (rxq->rxq_stopping) {
7910 mutex_exit(rxq->rxq_lock);
7911 break;
7912 }
7913
7914 handled = 1;
7915
7916 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7917 if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
7918 DPRINTF(WM_DEBUG_RX,
7919 ("%s: RX: got Rx intr 0x%08x\n",
7920 device_xname(sc->sc_dev),
7921 icr & (ICR_RXDMT0 | ICR_RXT0)));
7922 WM_Q_EVCNT_INCR(rxq, rxintr);
7923 }
7924 #endif
7925 wm_rxeof(rxq);
7926
7927 mutex_exit(rxq->rxq_lock);
7928 mutex_enter(txq->txq_lock);
7929
7930 if (txq->txq_stopping) {
7931 mutex_exit(txq->txq_lock);
7932 break;
7933 }
7934
7935 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7936 if (icr & ICR_TXDW) {
7937 DPRINTF(WM_DEBUG_TX,
7938 ("%s: TX: got TXDW interrupt\n",
7939 device_xname(sc->sc_dev)));
7940 WM_Q_EVCNT_INCR(txq, txdw);
7941 }
7942 #endif
7943 wm_txeof(sc, txq);
7944
7945 mutex_exit(txq->txq_lock);
7946 WM_CORE_LOCK(sc);
7947
7948 if (sc->sc_core_stopping) {
7949 WM_CORE_UNLOCK(sc);
7950 break;
7951 }
7952
7953 if (icr & (ICR_LSC | ICR_RXSEQ)) {
7954 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7955 wm_linkintr(sc, icr);
7956 }
7957
7958 WM_CORE_UNLOCK(sc);
7959
7960 if (icr & ICR_RXO) {
7961 #if defined(WM_DEBUG)
7962 log(LOG_WARNING, "%s: Receive overrun\n",
7963 device_xname(sc->sc_dev));
7964 #endif /* defined(WM_DEBUG) */
7965 }
7966 }
7967
7968 rnd_add_uint32(&sc->rnd_source, rndval);
7969
7970 if (handled) {
7971 /* Try to get more packets going. */
7972 if_schedule_deferred_start(ifp);
7973 }
7974
7975 return handled;
7976 }
7977
7978 static int
7979 wm_txrxintr_msix(void *arg)
7980 {
7981 struct wm_queue *wmq = arg;
7982 struct wm_txqueue *txq = &wmq->wmq_txq;
7983 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
7984 struct wm_softc *sc = txq->txq_sc;
7985 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7986
7987 KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
7988
7989 DPRINTF(WM_DEBUG_TX,
7990 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
7991
7992 if (sc->sc_type == WM_T_82574)
7993 CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
7994 else if (sc->sc_type == WM_T_82575)
7995 CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
7996 else
7997 CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
7998
7999 mutex_enter(txq->txq_lock);
8000
8001 if (txq->txq_stopping) {
8002 mutex_exit(txq->txq_lock);
8003 return 0;
8004 }
8005
8006 WM_Q_EVCNT_INCR(txq, txdw);
8007 wm_txeof(sc, txq);
8008
8009 /* Try to get more packets going. */
8010 if (pcq_peek(txq->txq_interq) != NULL)
8011 if_schedule_deferred_start(ifp);
8012 /*
8013 * There are still some upper layer processing which call
8014 * ifp->if_start(). e.g. ALTQ
8015 */
8016 if (wmq->wmq_id == 0)
8017 if_schedule_deferred_start(ifp);
8018
8019 mutex_exit(txq->txq_lock);
8020
8021 DPRINTF(WM_DEBUG_RX,
8022 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
8023 mutex_enter(rxq->rxq_lock);
8024
8025 if (rxq->rxq_stopping) {
8026 mutex_exit(rxq->rxq_lock);
8027 return 0;
8028 }
8029
8030 WM_Q_EVCNT_INCR(rxq, rxintr);
8031 wm_rxeof(rxq);
8032 mutex_exit(rxq->rxq_lock);
8033
8034 if (sc->sc_type == WM_T_82574)
8035 CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
8036 else if (sc->sc_type == WM_T_82575)
8037 CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
8038 else
8039 CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
8040
8041 return 1;
8042 }
8043
8044 /*
8045 * wm_linkintr_msix:
8046 *
8047 * Interrupt service routine for link status change for MSI-X.
8048 */
8049 static int
8050 wm_linkintr_msix(void *arg)
8051 {
8052 struct wm_softc *sc = arg;
8053 uint32_t reg;
8054
8055 DPRINTF(WM_DEBUG_LINK,
8056 ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
8057
8058 reg = CSR_READ(sc, WMREG_ICR);
8059 WM_CORE_LOCK(sc);
8060 if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
8061 goto out;
8062
8063 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
8064 wm_linkintr(sc, ICR_LSC);
8065
8066 out:
8067 WM_CORE_UNLOCK(sc);
8068
8069 if (sc->sc_type == WM_T_82574)
8070 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
8071 else if (sc->sc_type == WM_T_82575)
8072 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
8073 else
8074 CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
8075
8076 return 1;
8077 }
8078
8079 /*
8080 * Media related.
8081 * GMII, SGMII, TBI (and SERDES)
8082 */
8083
8084 /* Common */
8085
8086 /*
8087 * wm_tbi_serdes_set_linkled:
8088 *
8089 * Update the link LED on TBI and SERDES devices.
8090 */
8091 static void
8092 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
8093 {
8094
8095 if (sc->sc_tbi_linkup)
8096 sc->sc_ctrl |= CTRL_SWDPIN(0);
8097 else
8098 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
8099
8100 /* 82540 or newer devices are active low */
8101 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
8102
8103 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8104 }
8105
8106 /* GMII related */
8107
8108 /*
8109 * wm_gmii_reset:
8110 *
8111 * Reset the PHY.
8112 */
8113 static void
8114 wm_gmii_reset(struct wm_softc *sc)
8115 {
8116 uint32_t reg;
8117 int rv;
8118
8119 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
8120 device_xname(sc->sc_dev), __func__));
8121
8122 rv = sc->phy.acquire(sc);
8123 if (rv != 0) {
8124 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8125 __func__);
8126 return;
8127 }
8128
8129 switch (sc->sc_type) {
8130 case WM_T_82542_2_0:
8131 case WM_T_82542_2_1:
8132 /* null */
8133 break;
8134 case WM_T_82543:
8135 /*
8136 * With 82543, we need to force speed and duplex on the MAC
8137 * equal to what the PHY speed and duplex configuration is.
8138 * In addition, we need to perform a hardware reset on the PHY
8139 * to take it out of reset.
8140 */
8141 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
8142 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8143
8144 /* The PHY reset pin is active-low. */
8145 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8146 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
8147 CTRL_EXT_SWDPIN(4));
8148 reg |= CTRL_EXT_SWDPIO(4);
8149
8150 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8151 CSR_WRITE_FLUSH(sc);
8152 delay(10*1000);
8153
8154 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
8155 CSR_WRITE_FLUSH(sc);
8156 delay(150);
8157 #if 0
8158 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
8159 #endif
8160 delay(20*1000); /* XXX extra delay to get PHY ID? */
8161 break;
8162 case WM_T_82544: /* reset 10000us */
8163 case WM_T_82540:
8164 case WM_T_82545:
8165 case WM_T_82545_3:
8166 case WM_T_82546:
8167 case WM_T_82546_3:
8168 case WM_T_82541:
8169 case WM_T_82541_2:
8170 case WM_T_82547:
8171 case WM_T_82547_2:
8172 case WM_T_82571: /* reset 100us */
8173 case WM_T_82572:
8174 case WM_T_82573:
8175 case WM_T_82574:
8176 case WM_T_82575:
8177 case WM_T_82576:
8178 case WM_T_82580:
8179 case WM_T_I350:
8180 case WM_T_I354:
8181 case WM_T_I210:
8182 case WM_T_I211:
8183 case WM_T_82583:
8184 case WM_T_80003:
8185 /* generic reset */
8186 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8187 CSR_WRITE_FLUSH(sc);
8188 delay(20000);
8189 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8190 CSR_WRITE_FLUSH(sc);
8191 delay(20000);
8192
8193 if ((sc->sc_type == WM_T_82541)
8194 || (sc->sc_type == WM_T_82541_2)
8195 || (sc->sc_type == WM_T_82547)
8196 || (sc->sc_type == WM_T_82547_2)) {
8197 /* workaround for igp are done in igp_reset() */
8198 /* XXX add code to set LED after phy reset */
8199 }
8200 break;
8201 case WM_T_ICH8:
8202 case WM_T_ICH9:
8203 case WM_T_ICH10:
8204 case WM_T_PCH:
8205 case WM_T_PCH2:
8206 case WM_T_PCH_LPT:
8207 case WM_T_PCH_SPT:
8208 /* generic reset */
8209 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8210 CSR_WRITE_FLUSH(sc);
8211 delay(100);
8212 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8213 CSR_WRITE_FLUSH(sc);
8214 delay(150);
8215 break;
8216 default:
8217 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
8218 __func__);
8219 break;
8220 }
8221
8222 sc->phy.release(sc);
8223
8224 /* get_cfg_done */
8225 wm_get_cfg_done(sc);
8226
8227 /* extra setup */
8228 switch (sc->sc_type) {
8229 case WM_T_82542_2_0:
8230 case WM_T_82542_2_1:
8231 case WM_T_82543:
8232 case WM_T_82544:
8233 case WM_T_82540:
8234 case WM_T_82545:
8235 case WM_T_82545_3:
8236 case WM_T_82546:
8237 case WM_T_82546_3:
8238 case WM_T_82541_2:
8239 case WM_T_82547_2:
8240 case WM_T_82571:
8241 case WM_T_82572:
8242 case WM_T_82573:
8243 case WM_T_82575:
8244 case WM_T_82576:
8245 case WM_T_82580:
8246 case WM_T_I350:
8247 case WM_T_I354:
8248 case WM_T_I210:
8249 case WM_T_I211:
8250 case WM_T_80003:
8251 /* null */
8252 break;
8253 case WM_T_82574:
8254 case WM_T_82583:
8255 wm_lplu_d0_disable(sc);
8256 break;
8257 case WM_T_82541:
8258 case WM_T_82547:
8259 /* XXX Configure actively LED after PHY reset */
8260 break;
8261 case WM_T_ICH8:
8262 case WM_T_ICH9:
8263 case WM_T_ICH10:
8264 case WM_T_PCH:
8265 case WM_T_PCH2:
8266 case WM_T_PCH_LPT:
8267 case WM_T_PCH_SPT:
8268 /* Allow time for h/w to get to a quiescent state afer reset */
8269 delay(10*1000);
8270
8271 if (sc->sc_type == WM_T_PCH)
8272 wm_hv_phy_workaround_ich8lan(sc);
8273
8274 if (sc->sc_type == WM_T_PCH2)
8275 wm_lv_phy_workaround_ich8lan(sc);
8276
8277 /* Clear the host wakeup bit after lcd reset */
8278 if (sc->sc_type >= WM_T_PCH) {
8279 reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
8280 BM_PORT_GEN_CFG);
8281 reg &= ~BM_WUC_HOST_WU_BIT;
8282 wm_gmii_hv_writereg(sc->sc_dev, 2,
8283 BM_PORT_GEN_CFG, reg);
8284 }
8285
8286 /*
8287 * XXX Configure the LCD with th extended configuration region
8288 * in NVM
8289 */
8290
8291 /* Disable D0 LPLU. */
8292 if (sc->sc_type >= WM_T_PCH) /* PCH* */
8293 wm_lplu_d0_disable_pch(sc);
8294 else
8295 wm_lplu_d0_disable(sc); /* ICH* */
8296 break;
8297 default:
8298 panic("%s: unknown type\n", __func__);
8299 break;
8300 }
8301 }
8302
8303 /*
8304 * wm_get_phy_id_82575:
8305 *
8306 * Return PHY ID. Return -1 if it failed.
8307 */
8308 static int
8309 wm_get_phy_id_82575(struct wm_softc *sc)
8310 {
8311 uint32_t reg;
8312 int phyid = -1;
8313
8314 /* XXX */
8315 if ((sc->sc_flags & WM_F_SGMII) == 0)
8316 return -1;
8317
8318 if (wm_sgmii_uses_mdio(sc)) {
8319 switch (sc->sc_type) {
8320 case WM_T_82575:
8321 case WM_T_82576:
8322 reg = CSR_READ(sc, WMREG_MDIC);
8323 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
8324 break;
8325 case WM_T_82580:
8326 case WM_T_I350:
8327 case WM_T_I354:
8328 case WM_T_I210:
8329 case WM_T_I211:
8330 reg = CSR_READ(sc, WMREG_MDICNFG);
8331 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
8332 break;
8333 default:
8334 return -1;
8335 }
8336 }
8337
8338 return phyid;
8339 }
8340
8341
8342 /*
8343 * wm_gmii_mediainit:
8344 *
8345 * Initialize media for use on 1000BASE-T devices.
8346 */
8347 static void
8348 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
8349 {
8350 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8351 struct mii_data *mii = &sc->sc_mii;
8352 uint32_t reg;
8353
8354 DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
8355 device_xname(sc->sc_dev), __func__));
8356
8357 /* We have GMII. */
8358 sc->sc_flags |= WM_F_HAS_MII;
8359
8360 if (sc->sc_type == WM_T_80003)
8361 sc->sc_tipg = TIPG_1000T_80003_DFLT;
8362 else
8363 sc->sc_tipg = TIPG_1000T_DFLT;
8364
8365 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
8366 if ((sc->sc_type == WM_T_82580)
8367 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
8368 || (sc->sc_type == WM_T_I211)) {
8369 reg = CSR_READ(sc, WMREG_PHPM);
8370 reg &= ~PHPM_GO_LINK_D;
8371 CSR_WRITE(sc, WMREG_PHPM, reg);
8372 }
8373
8374 /*
8375 * Let the chip set speed/duplex on its own based on
8376 * signals from the PHY.
8377 * XXXbouyer - I'm not sure this is right for the 80003,
8378 * the em driver only sets CTRL_SLU here - but it seems to work.
8379 */
8380 sc->sc_ctrl |= CTRL_SLU;
8381 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8382
8383 /* Initialize our media structures and probe the GMII. */
8384 mii->mii_ifp = ifp;
8385
8386 /*
8387 * Determine the PHY access method.
8388 *
8389 * For SGMII, use SGMII specific method.
8390 *
8391 * For some devices, we can determine the PHY access method
8392 * from sc_type.
8393 *
8394 * For ICH and PCH variants, it's difficult to determine the PHY
8395 * access method by sc_type, so use the PCI product ID for some
8396 * devices.
8397 * For other ICH8 variants, try to use igp's method. If the PHY
8398 * can't detect, then use bm's method.
8399 */
8400 switch (prodid) {
8401 case PCI_PRODUCT_INTEL_PCH_M_LM:
8402 case PCI_PRODUCT_INTEL_PCH_M_LC:
8403 /* 82577 */
8404 sc->sc_phytype = WMPHY_82577;
8405 break;
8406 case PCI_PRODUCT_INTEL_PCH_D_DM:
8407 case PCI_PRODUCT_INTEL_PCH_D_DC:
8408 /* 82578 */
8409 sc->sc_phytype = WMPHY_82578;
8410 break;
8411 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
8412 case PCI_PRODUCT_INTEL_PCH2_LV_V:
8413 /* 82579 */
8414 sc->sc_phytype = WMPHY_82579;
8415 break;
8416 case PCI_PRODUCT_INTEL_82801H_82567V_3:
8417 case PCI_PRODUCT_INTEL_82801I_BM:
8418 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
8419 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
8420 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
8421 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
8422 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
8423 /* ICH8, 9, 10 with 82567 */
8424 sc->sc_phytype = WMPHY_BM;
8425 mii->mii_readreg = wm_gmii_bm_readreg;
8426 mii->mii_writereg = wm_gmii_bm_writereg;
8427 break;
8428 default:
8429 if (((sc->sc_flags & WM_F_SGMII) != 0)
8430 && !wm_sgmii_uses_mdio(sc)){
8431 /* SGMII */
8432 mii->mii_readreg = wm_sgmii_readreg;
8433 mii->mii_writereg = wm_sgmii_writereg;
8434 } else if ((sc->sc_type == WM_T_82574)
8435 || (sc->sc_type == WM_T_82583)) {
8436 /* BM2 (phyaddr == 1) */
8437 sc->sc_phytype = WMPHY_BM;
8438 mii->mii_readreg = wm_gmii_bm_readreg;
8439 mii->mii_writereg = wm_gmii_bm_writereg;
8440 } else if (sc->sc_type >= WM_T_ICH8) {
8441 /* non-82567 ICH8, 9 and 10 */
8442 mii->mii_readreg = wm_gmii_i82544_readreg;
8443 mii->mii_writereg = wm_gmii_i82544_writereg;
8444 } else if (sc->sc_type >= WM_T_80003) {
8445 /* 80003 */
8446 sc->sc_phytype = WMPHY_GG82563;
8447 mii->mii_readreg = wm_gmii_i80003_readreg;
8448 mii->mii_writereg = wm_gmii_i80003_writereg;
8449 } else if (sc->sc_type >= WM_T_I210) {
8450 /* I210 and I211 */
8451 sc->sc_phytype = WMPHY_210;
8452 mii->mii_readreg = wm_gmii_gs40g_readreg;
8453 mii->mii_writereg = wm_gmii_gs40g_writereg;
8454 } else if (sc->sc_type >= WM_T_82580) {
8455 /* 82580, I350 and I354 */
8456 sc->sc_phytype = WMPHY_82580;
8457 mii->mii_readreg = wm_gmii_82580_readreg;
8458 mii->mii_writereg = wm_gmii_82580_writereg;
8459 } else if (sc->sc_type >= WM_T_82544) {
8460 /* 82544, 0, [56], [17], 8257[1234] and 82583 */
8461 mii->mii_readreg = wm_gmii_i82544_readreg;
8462 mii->mii_writereg = wm_gmii_i82544_writereg;
8463 } else {
8464 mii->mii_readreg = wm_gmii_i82543_readreg;
8465 mii->mii_writereg = wm_gmii_i82543_writereg;
8466 }
8467 break;
8468 }
8469 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
8470 /* All PCH* use _hv_ */
8471 mii->mii_readreg = wm_gmii_hv_readreg;
8472 mii->mii_writereg = wm_gmii_hv_writereg;
8473 }
8474 mii->mii_statchg = wm_gmii_statchg;
8475
8476 /* get PHY control from SMBus to PCIe */
8477 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
8478 || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
8479 wm_smbustopci(sc);
8480
8481 wm_gmii_reset(sc);
8482
8483 sc->sc_ethercom.ec_mii = &sc->sc_mii;
8484 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
8485 wm_gmii_mediastatus);
8486
8487 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
8488 || (sc->sc_type == WM_T_82580)
8489 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
8490 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
8491 if ((sc->sc_flags & WM_F_SGMII) == 0) {
8492 /* Attach only one port */
8493 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
8494 MII_OFFSET_ANY, MIIF_DOPAUSE);
8495 } else {
8496 int i, id;
8497 uint32_t ctrl_ext;
8498
8499 id = wm_get_phy_id_82575(sc);
8500 if (id != -1) {
8501 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
8502 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
8503 }
8504 if ((id == -1)
8505 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
8506 /* Power on sgmii phy if it is disabled */
8507 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8508 CSR_WRITE(sc, WMREG_CTRL_EXT,
8509 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
8510 CSR_WRITE_FLUSH(sc);
8511 delay(300*1000); /* XXX too long */
8512
8513 /* from 1 to 8 */
8514 for (i = 1; i < 8; i++)
8515 mii_attach(sc->sc_dev, &sc->sc_mii,
8516 0xffffffff, i, MII_OFFSET_ANY,
8517 MIIF_DOPAUSE);
8518
8519 /* restore previous sfp cage power state */
8520 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8521 }
8522 }
8523 } else {
8524 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8525 MII_OFFSET_ANY, MIIF_DOPAUSE);
8526 }
8527
8528 /*
8529 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
8530 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
8531 */
8532 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
8533 (LIST_FIRST(&mii->mii_phys) == NULL)) {
8534 wm_set_mdio_slow_mode_hv(sc);
8535 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8536 MII_OFFSET_ANY, MIIF_DOPAUSE);
8537 }
8538
8539 /*
8540 * (For ICH8 variants)
8541 * If PHY detection failed, use BM's r/w function and retry.
8542 */
8543 if (LIST_FIRST(&mii->mii_phys) == NULL) {
8544 /* if failed, retry with *_bm_* */
8545 mii->mii_readreg = wm_gmii_bm_readreg;
8546 mii->mii_writereg = wm_gmii_bm_writereg;
8547
8548 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8549 MII_OFFSET_ANY, MIIF_DOPAUSE);
8550 }
8551
8552 if (LIST_FIRST(&mii->mii_phys) == NULL) {
8553 /* Any PHY wasn't find */
8554 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
8555 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
8556 sc->sc_phytype = WMPHY_NONE;
8557 } else {
8558 /*
8559 * PHY Found!
8560 * Check PHY type.
8561 */
8562 uint32_t model;
8563 struct mii_softc *child;
8564
8565 child = LIST_FIRST(&mii->mii_phys);
8566 model = child->mii_mpd_model;
8567 if (model == MII_MODEL_yyINTEL_I82566)
8568 sc->sc_phytype = WMPHY_IGP_3;
8569
8570 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
8571 }
8572 }
8573
8574 /*
8575 * wm_gmii_mediachange: [ifmedia interface function]
8576 *
8577 * Set hardware to newly-selected media on a 1000BASE-T device.
8578 */
8579 static int
8580 wm_gmii_mediachange(struct ifnet *ifp)
8581 {
8582 struct wm_softc *sc = ifp->if_softc;
8583 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8584 int rc;
8585
8586 DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
8587 device_xname(sc->sc_dev), __func__));
8588 if ((ifp->if_flags & IFF_UP) == 0)
8589 return 0;
8590
8591 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
8592 sc->sc_ctrl |= CTRL_SLU;
8593 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8594 || (sc->sc_type > WM_T_82543)) {
8595 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
8596 } else {
8597 sc->sc_ctrl &= ~CTRL_ASDE;
8598 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
8599 if (ife->ifm_media & IFM_FDX)
8600 sc->sc_ctrl |= CTRL_FD;
8601 switch (IFM_SUBTYPE(ife->ifm_media)) {
8602 case IFM_10_T:
8603 sc->sc_ctrl |= CTRL_SPEED_10;
8604 break;
8605 case IFM_100_TX:
8606 sc->sc_ctrl |= CTRL_SPEED_100;
8607 break;
8608 case IFM_1000_T:
8609 sc->sc_ctrl |= CTRL_SPEED_1000;
8610 break;
8611 default:
8612 panic("wm_gmii_mediachange: bad media 0x%x",
8613 ife->ifm_media);
8614 }
8615 }
8616 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8617 if (sc->sc_type <= WM_T_82543)
8618 wm_gmii_reset(sc);
8619
8620 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
8621 return 0;
8622 return rc;
8623 }
8624
8625 /*
8626 * wm_gmii_mediastatus: [ifmedia interface function]
8627 *
8628 * Get the current interface media status on a 1000BASE-T device.
8629 */
8630 static void
8631 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8632 {
8633 struct wm_softc *sc = ifp->if_softc;
8634
8635 ether_mediastatus(ifp, ifmr);
8636 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
8637 | sc->sc_flowflags;
8638 }
8639
8640 #define MDI_IO CTRL_SWDPIN(2)
8641 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
8642 #define MDI_CLK CTRL_SWDPIN(3)
8643
8644 static void
8645 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
8646 {
8647 uint32_t i, v;
8648
8649 v = CSR_READ(sc, WMREG_CTRL);
8650 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8651 v |= MDI_DIR | CTRL_SWDPIO(3);
8652
8653 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
8654 if (data & i)
8655 v |= MDI_IO;
8656 else
8657 v &= ~MDI_IO;
8658 CSR_WRITE(sc, WMREG_CTRL, v);
8659 CSR_WRITE_FLUSH(sc);
8660 delay(10);
8661 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8662 CSR_WRITE_FLUSH(sc);
8663 delay(10);
8664 CSR_WRITE(sc, WMREG_CTRL, v);
8665 CSR_WRITE_FLUSH(sc);
8666 delay(10);
8667 }
8668 }
8669
8670 static uint32_t
8671 wm_i82543_mii_recvbits(struct wm_softc *sc)
8672 {
8673 uint32_t v, i, data = 0;
8674
8675 v = CSR_READ(sc, WMREG_CTRL);
8676 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8677 v |= CTRL_SWDPIO(3);
8678
8679 CSR_WRITE(sc, WMREG_CTRL, v);
8680 CSR_WRITE_FLUSH(sc);
8681 delay(10);
8682 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8683 CSR_WRITE_FLUSH(sc);
8684 delay(10);
8685 CSR_WRITE(sc, WMREG_CTRL, v);
8686 CSR_WRITE_FLUSH(sc);
8687 delay(10);
8688
8689 for (i = 0; i < 16; i++) {
8690 data <<= 1;
8691 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8692 CSR_WRITE_FLUSH(sc);
8693 delay(10);
8694 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
8695 data |= 1;
8696 CSR_WRITE(sc, WMREG_CTRL, v);
8697 CSR_WRITE_FLUSH(sc);
8698 delay(10);
8699 }
8700
8701 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8702 CSR_WRITE_FLUSH(sc);
8703 delay(10);
8704 CSR_WRITE(sc, WMREG_CTRL, v);
8705 CSR_WRITE_FLUSH(sc);
8706 delay(10);
8707
8708 return data;
8709 }
8710
8711 #undef MDI_IO
8712 #undef MDI_DIR
8713 #undef MDI_CLK
8714
8715 /*
8716 * wm_gmii_i82543_readreg: [mii interface function]
8717 *
8718 * Read a PHY register on the GMII (i82543 version).
8719 */
8720 static int
8721 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
8722 {
8723 struct wm_softc *sc = device_private(self);
8724 int rv;
8725
8726 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8727 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
8728 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
8729 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
8730
8731 DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
8732 device_xname(sc->sc_dev), phy, reg, rv));
8733
8734 return rv;
8735 }
8736
8737 /*
8738 * wm_gmii_i82543_writereg: [mii interface function]
8739 *
8740 * Write a PHY register on the GMII (i82543 version).
8741 */
8742 static void
8743 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
8744 {
8745 struct wm_softc *sc = device_private(self);
8746
8747 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8748 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
8749 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
8750 (MII_COMMAND_START << 30), 32);
8751 }
8752
8753 /*
8754 * wm_gmii_mdic_readreg: [mii interface function]
8755 *
8756 * Read a PHY register on the GMII.
8757 */
8758 static int
8759 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
8760 {
8761 struct wm_softc *sc = device_private(self);
8762 uint32_t mdic = 0;
8763 int i, rv;
8764
8765 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
8766 MDIC_REGADD(reg));
8767
8768 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8769 mdic = CSR_READ(sc, WMREG_MDIC);
8770 if (mdic & MDIC_READY)
8771 break;
8772 delay(50);
8773 }
8774
8775 if ((mdic & MDIC_READY) == 0) {
8776 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
8777 device_xname(sc->sc_dev), phy, reg);
8778 rv = 0;
8779 } else if (mdic & MDIC_E) {
8780 #if 0 /* This is normal if no PHY is present. */
8781 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
8782 device_xname(sc->sc_dev), phy, reg);
8783 #endif
8784 rv = 0;
8785 } else {
8786 rv = MDIC_DATA(mdic);
8787 if (rv == 0xffff)
8788 rv = 0;
8789 }
8790
8791 return rv;
8792 }
8793
8794 /*
8795 * wm_gmii_mdic_writereg: [mii interface function]
8796 *
8797 * Write a PHY register on the GMII.
8798 */
8799 static void
8800 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
8801 {
8802 struct wm_softc *sc = device_private(self);
8803 uint32_t mdic = 0;
8804 int i;
8805
8806 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
8807 MDIC_REGADD(reg) | MDIC_DATA(val));
8808
8809 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8810 mdic = CSR_READ(sc, WMREG_MDIC);
8811 if (mdic & MDIC_READY)
8812 break;
8813 delay(50);
8814 }
8815
8816 if ((mdic & MDIC_READY) == 0)
8817 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
8818 device_xname(sc->sc_dev), phy, reg);
8819 else if (mdic & MDIC_E)
8820 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
8821 device_xname(sc->sc_dev), phy, reg);
8822 }
8823
8824 /*
8825 * wm_gmii_i82544_readreg: [mii interface function]
8826 *
8827 * Read a PHY register on the GMII.
8828 */
8829 static int
8830 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
8831 {
8832 struct wm_softc *sc = device_private(self);
8833 int rv;
8834
8835 if (sc->phy.acquire(sc)) {
8836 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8837 __func__);
8838 return 0;
8839 }
8840 rv = wm_gmii_mdic_readreg(self, phy, reg);
8841 sc->phy.release(sc);
8842
8843 return rv;
8844 }
8845
8846 /*
8847 * wm_gmii_i82544_writereg: [mii interface function]
8848 *
8849 * Write a PHY register on the GMII.
8850 */
8851 static void
8852 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
8853 {
8854 struct wm_softc *sc = device_private(self);
8855
8856 if (sc->phy.acquire(sc)) {
8857 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8858 __func__);
8859 }
8860 wm_gmii_mdic_writereg(self, phy, reg, val);
8861 sc->phy.release(sc);
8862 }
8863
8864 /*
8865 * wm_gmii_i80003_readreg: [mii interface function]
8866 *
8867 * Read a PHY register on the kumeran
8868 * This could be handled by the PHY layer if we didn't have to lock the
8869 * ressource ...
8870 */
8871 static int
8872 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
8873 {
8874 struct wm_softc *sc = device_private(self);
8875 int rv;
8876
8877 if (phy != 1) /* only one PHY on kumeran bus */
8878 return 0;
8879
8880 if (sc->phy.acquire(sc)) {
8881 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8882 __func__);
8883 return 0;
8884 }
8885
8886 if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
8887 wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8888 reg >> GG82563_PAGE_SHIFT);
8889 } else {
8890 wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8891 reg >> GG82563_PAGE_SHIFT);
8892 }
8893 /* Wait more 200us for a bug of the ready bit in the MDIC register */
8894 delay(200);
8895 rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
8896 delay(200);
8897 sc->phy.release(sc);
8898
8899 return rv;
8900 }
8901
8902 /*
8903 * wm_gmii_i80003_writereg: [mii interface function]
8904 *
8905 * Write a PHY register on the kumeran.
8906 * This could be handled by the PHY layer if we didn't have to lock the
8907 * ressource ...
8908 */
8909 static void
8910 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
8911 {
8912 struct wm_softc *sc = device_private(self);
8913
8914 if (phy != 1) /* only one PHY on kumeran bus */
8915 return;
8916
8917 if (sc->phy.acquire(sc)) {
8918 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8919 __func__);
8920 return;
8921 }
8922
8923 if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
8924 wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8925 reg >> GG82563_PAGE_SHIFT);
8926 } else {
8927 wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8928 reg >> GG82563_PAGE_SHIFT);
8929 }
8930 /* Wait more 200us for a bug of the ready bit in the MDIC register */
8931 delay(200);
8932 wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
8933 delay(200);
8934
8935 sc->phy.release(sc);
8936 }
8937
8938 /*
8939 * wm_gmii_bm_readreg: [mii interface function]
8940 *
8941 * Read a PHY register on the kumeran
8942 * This could be handled by the PHY layer if we didn't have to lock the
8943 * ressource ...
8944 */
8945 static int
8946 wm_gmii_bm_readreg(device_t self, int phy, int reg)
8947 {
8948 struct wm_softc *sc = device_private(self);
8949 uint16_t page = reg >> BME1000_PAGE_SHIFT;
8950 uint16_t val;
8951 int rv;
8952
8953 if (sc->phy.acquire(sc)) {
8954 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8955 __func__);
8956 return 0;
8957 }
8958
8959 if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
8960 phy = ((page >= 768) || ((page == 0) && (reg == 25))
8961 || (reg == 31)) ? 1 : phy;
8962 /* Page 800 works differently than the rest so it has its own func */
8963 if (page == BM_WUC_PAGE) {
8964 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
8965 rv = val;
8966 goto release;
8967 }
8968
8969 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8970 if ((phy == 1) && (sc->sc_type != WM_T_82574)
8971 && (sc->sc_type != WM_T_82583))
8972 wm_gmii_mdic_writereg(self, phy,
8973 MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
8974 else
8975 wm_gmii_mdic_writereg(self, phy,
8976 BME1000_PHY_PAGE_SELECT, page);
8977 }
8978
8979 rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
8980
8981 release:
8982 sc->phy.release(sc);
8983 return rv;
8984 }
8985
8986 /*
8987 * wm_gmii_bm_writereg: [mii interface function]
8988 *
8989 * Write a PHY register on the kumeran.
8990 * This could be handled by the PHY layer if we didn't have to lock the
8991 * ressource ...
8992 */
8993 static void
8994 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
8995 {
8996 struct wm_softc *sc = device_private(self);
8997 uint16_t page = reg >> BME1000_PAGE_SHIFT;
8998
8999 if (sc->phy.acquire(sc)) {
9000 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9001 __func__);
9002 return;
9003 }
9004
9005 if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
9006 phy = ((page >= 768) || ((page == 0) && (reg == 25))
9007 || (reg == 31)) ? 1 : phy;
9008 /* Page 800 works differently than the rest so it has its own func */
9009 if (page == BM_WUC_PAGE) {
9010 uint16_t tmp;
9011
9012 tmp = val;
9013 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
9014 goto release;
9015 }
9016
9017 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
9018 if ((phy == 1) && (sc->sc_type != WM_T_82574)
9019 && (sc->sc_type != WM_T_82583))
9020 wm_gmii_mdic_writereg(self, phy,
9021 MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
9022 else
9023 wm_gmii_mdic_writereg(self, phy,
9024 BME1000_PHY_PAGE_SELECT, page);
9025 }
9026
9027 wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
9028
9029 release:
9030 sc->phy.release(sc);
9031 }
9032
9033 static void
9034 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
9035 {
9036 struct wm_softc *sc = device_private(self);
9037 uint16_t regnum = BM_PHY_REG_NUM(offset);
9038 uint16_t wuce, reg;
9039
9040 DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
9041 device_xname(sc->sc_dev), __func__));
9042 /* XXX Gig must be disabled for MDIO accesses to page 800 */
9043 if (sc->sc_type == WM_T_PCH) {
9044 /* XXX e1000 driver do nothing... why? */
9045 }
9046
9047 /*
9048 * 1) Enable PHY wakeup register first.
9049 * See e1000_enable_phy_wakeup_reg_access_bm().
9050 */
9051
9052 /* Set page 769 */
9053 wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
9054 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
9055
9056 /* Read WUCE and save it */
9057 wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
9058
9059 reg = wuce | BM_WUC_ENABLE_BIT;
9060 reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
9061 wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
9062
9063 /* Select page 800 */
9064 wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
9065 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
9066
9067 /*
9068 * 2) Access PHY wakeup register.
9069 * See e1000_access_phy_wakeup_reg_bm.
9070 */
9071
9072 /* Write page 800 */
9073 wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
9074
9075 if (rd)
9076 *val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
9077 else
9078 wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
9079
9080 /*
9081 * 3) Disable PHY wakeup register.
9082 * See e1000_disable_phy_wakeup_reg_access_bm().
9083 */
9084 /* Set page 769 */
9085 wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
9086 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
9087
9088 wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
9089 }
9090
9091 /*
9092 * wm_gmii_hv_readreg: [mii interface function]
9093 *
9094 * Read a PHY register on the kumeran
9095 * This could be handled by the PHY layer if we didn't have to lock the
9096 * ressource ...
9097 */
9098 static int
9099 wm_gmii_hv_readreg(device_t self, int phy, int reg)
9100 {
9101 struct wm_softc *sc = device_private(self);
9102 int rv;
9103
9104 DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
9105 device_xname(sc->sc_dev), __func__));
9106 if (sc->phy.acquire(sc)) {
9107 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9108 __func__);
9109 return 0;
9110 }
9111
9112 rv = wm_gmii_hv_readreg_locked(self, phy, reg);
9113 sc->phy.release(sc);
9114 return rv;
9115 }
9116
9117 static int
9118 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
9119 {
9120 uint16_t page = BM_PHY_REG_PAGE(reg);
9121 uint16_t regnum = BM_PHY_REG_NUM(reg);
9122 uint16_t val;
9123 int rv;
9124
9125 phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
9126
9127 /* Page 800 works differently than the rest so it has its own func */
9128 if (page == BM_WUC_PAGE) {
9129 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
9130 return val;
9131 }
9132
9133 /*
9134 * Lower than page 768 works differently than the rest so it has its
9135 * own func
9136 */
9137 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
9138 printf("gmii_hv_readreg!!!\n");
9139 return 0;
9140 }
9141
9142 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
9143 wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
9144 page << BME1000_PAGE_SHIFT);
9145 }
9146
9147 rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
9148 return rv;
9149 }
9150
9151 /*
9152 * wm_gmii_hv_writereg: [mii interface function]
9153 *
9154 * Write a PHY register on the kumeran.
9155 * This could be handled by the PHY layer if we didn't have to lock the
9156 * ressource ...
9157 */
9158 static void
9159 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
9160 {
9161 struct wm_softc *sc = device_private(self);
9162
9163 DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
9164 device_xname(sc->sc_dev), __func__));
9165
9166 if (sc->phy.acquire(sc)) {
9167 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9168 __func__);
9169 return;
9170 }
9171
9172 wm_gmii_hv_writereg_locked(self, phy, reg, val);
9173 sc->phy.release(sc);
9174 }
9175
9176 static void
9177 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
9178 {
9179 struct wm_softc *sc = device_private(self);
9180 uint16_t page = BM_PHY_REG_PAGE(reg);
9181 uint16_t regnum = BM_PHY_REG_NUM(reg);
9182
9183 phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
9184
9185 /* Page 800 works differently than the rest so it has its own func */
9186 if (page == BM_WUC_PAGE) {
9187 uint16_t tmp;
9188
9189 tmp = val;
9190 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
9191 return;
9192 }
9193
9194 /*
9195 * Lower than page 768 works differently than the rest so it has its
9196 * own func
9197 */
9198 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
9199 printf("gmii_hv_writereg!!!\n");
9200 return;
9201 }
9202
9203 {
9204 /*
9205 * XXX Workaround MDIO accesses being disabled after entering
9206 * IEEE Power Down (whenever bit 11 of the PHY control
9207 * register is set)
9208 */
9209 if (sc->sc_phytype == WMPHY_82578) {
9210 struct mii_softc *child;
9211
9212 child = LIST_FIRST(&sc->sc_mii.mii_phys);
9213 if ((child != NULL) && (child->mii_mpd_rev >= 1)
9214 && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
9215 && ((val & (1 << 11)) != 0)) {
9216 printf("XXX need workaround\n");
9217 }
9218 }
9219
9220 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
9221 wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
9222 page << BME1000_PAGE_SHIFT);
9223 }
9224 }
9225
9226 wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
9227 }
9228
9229 /*
9230 * wm_gmii_82580_readreg: [mii interface function]
9231 *
9232 * Read a PHY register on the 82580 and I350.
9233 * This could be handled by the PHY layer if we didn't have to lock the
9234 * ressource ...
9235 */
9236 static int
9237 wm_gmii_82580_readreg(device_t self, int phy, int reg)
9238 {
9239 struct wm_softc *sc = device_private(self);
9240 int rv;
9241
9242 if (sc->phy.acquire(sc) != 0) {
9243 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9244 __func__);
9245 return 0;
9246 }
9247
9248 rv = wm_gmii_mdic_readreg(self, phy, reg);
9249
9250 sc->phy.release(sc);
9251 return rv;
9252 }
9253
9254 /*
9255 * wm_gmii_82580_writereg: [mii interface function]
9256 *
9257 * Write a PHY register on the 82580 and I350.
9258 * This could be handled by the PHY layer if we didn't have to lock the
9259 * ressource ...
9260 */
9261 static void
9262 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
9263 {
9264 struct wm_softc *sc = device_private(self);
9265
9266 if (sc->phy.acquire(sc) != 0) {
9267 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9268 __func__);
9269 return;
9270 }
9271
9272 wm_gmii_mdic_writereg(self, phy, reg, val);
9273
9274 sc->phy.release(sc);
9275 }
9276
9277 /*
9278 * wm_gmii_gs40g_readreg: [mii interface function]
9279 *
9280 * Read a PHY register on the I2100 and I211.
9281 * This could be handled by the PHY layer if we didn't have to lock the
9282 * ressource ...
9283 */
9284 static int
9285 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
9286 {
9287 struct wm_softc *sc = device_private(self);
9288 int page, offset;
9289 int rv;
9290
9291 /* Acquire semaphore */
9292 if (sc->phy.acquire(sc)) {
9293 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9294 __func__);
9295 return 0;
9296 }
9297
9298 /* Page select */
9299 page = reg >> GS40G_PAGE_SHIFT;
9300 wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
9301
9302 /* Read reg */
9303 offset = reg & GS40G_OFFSET_MASK;
9304 rv = wm_gmii_mdic_readreg(self, phy, offset);
9305
9306 sc->phy.release(sc);
9307 return rv;
9308 }
9309
9310 /*
9311 * wm_gmii_gs40g_writereg: [mii interface function]
9312 *
9313 * Write a PHY register on the I210 and I211.
9314 * This could be handled by the PHY layer if we didn't have to lock the
9315 * ressource ...
9316 */
9317 static void
9318 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
9319 {
9320 struct wm_softc *sc = device_private(self);
9321 int page, offset;
9322
9323 /* Acquire semaphore */
9324 if (sc->phy.acquire(sc)) {
9325 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9326 __func__);
9327 return;
9328 }
9329
9330 /* Page select */
9331 page = reg >> GS40G_PAGE_SHIFT;
9332 wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
9333
9334 /* Write reg */
9335 offset = reg & GS40G_OFFSET_MASK;
9336 wm_gmii_mdic_writereg(self, phy, offset, val);
9337
9338 /* Release semaphore */
9339 sc->phy.release(sc);
9340 }
9341
9342 /*
9343 * wm_gmii_statchg: [mii interface function]
9344 *
9345 * Callback from MII layer when media changes.
9346 */
9347 static void
9348 wm_gmii_statchg(struct ifnet *ifp)
9349 {
9350 struct wm_softc *sc = ifp->if_softc;
9351 struct mii_data *mii = &sc->sc_mii;
9352
9353 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
9354 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9355 sc->sc_fcrtl &= ~FCRTL_XONE;
9356
9357 /*
9358 * Get flow control negotiation result.
9359 */
9360 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
9361 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
9362 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
9363 mii->mii_media_active &= ~IFM_ETH_FMASK;
9364 }
9365
9366 if (sc->sc_flowflags & IFM_FLOW) {
9367 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
9368 sc->sc_ctrl |= CTRL_TFCE;
9369 sc->sc_fcrtl |= FCRTL_XONE;
9370 }
9371 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
9372 sc->sc_ctrl |= CTRL_RFCE;
9373 }
9374
9375 if (sc->sc_mii.mii_media_active & IFM_FDX) {
9376 DPRINTF(WM_DEBUG_LINK,
9377 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
9378 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9379 } else {
9380 DPRINTF(WM_DEBUG_LINK,
9381 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
9382 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9383 }
9384
9385 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9386 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9387 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
9388 : WMREG_FCRTL, sc->sc_fcrtl);
9389 if (sc->sc_type == WM_T_80003) {
9390 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
9391 case IFM_1000_T:
9392 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
9393 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
9394 sc->sc_tipg = TIPG_1000T_80003_DFLT;
9395 break;
9396 default:
9397 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
9398 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
9399 sc->sc_tipg = TIPG_10_100_80003_DFLT;
9400 break;
9401 }
9402 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
9403 }
9404 }
9405
9406 /* kumeran related (80003, ICH* and PCH*) */
9407
9408 /*
9409 * wm_kmrn_readreg:
9410 *
9411 * Read a kumeran register
9412 */
9413 static int
9414 wm_kmrn_readreg(struct wm_softc *sc, int reg)
9415 {
9416 int rv;
9417
9418 if (sc->sc_type == WM_T_80003)
9419 rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9420 else
9421 rv = sc->phy.acquire(sc);
9422 if (rv != 0) {
9423 aprint_error_dev(sc->sc_dev,
9424 "%s: failed to get semaphore\n", __func__);
9425 return 0;
9426 }
9427
9428 rv = wm_kmrn_readreg_locked(sc, reg);
9429
9430 if (sc->sc_type == WM_T_80003)
9431 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9432 else
9433 sc->phy.release(sc);
9434
9435 return rv;
9436 }
9437
9438 static int
9439 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
9440 {
9441 int rv;
9442
9443 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
9444 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
9445 KUMCTRLSTA_REN);
9446 CSR_WRITE_FLUSH(sc);
9447 delay(2);
9448
9449 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
9450
9451 return rv;
9452 }
9453
9454 /*
9455 * wm_kmrn_writereg:
9456 *
9457 * Write a kumeran register
9458 */
9459 static void
9460 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
9461 {
9462 int rv;
9463
9464 if (sc->sc_type == WM_T_80003)
9465 rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9466 else
9467 rv = sc->phy.acquire(sc);
9468 if (rv != 0) {
9469 aprint_error_dev(sc->sc_dev,
9470 "%s: failed to get semaphore\n", __func__);
9471 return;
9472 }
9473
9474 wm_kmrn_writereg_locked(sc, reg, val);
9475
9476 if (sc->sc_type == WM_T_80003)
9477 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9478 else
9479 sc->phy.release(sc);
9480 }
9481
9482 static void
9483 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
9484 {
9485
9486 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
9487 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
9488 (val & KUMCTRLSTA_MASK));
9489 }
9490
9491 /* SGMII related */
9492
9493 /*
9494 * wm_sgmii_uses_mdio
9495 *
9496 * Check whether the transaction is to the internal PHY or the external
9497 * MDIO interface. Return true if it's MDIO.
9498 */
9499 static bool
9500 wm_sgmii_uses_mdio(struct wm_softc *sc)
9501 {
9502 uint32_t reg;
9503 bool ismdio = false;
9504
9505 switch (sc->sc_type) {
9506 case WM_T_82575:
9507 case WM_T_82576:
9508 reg = CSR_READ(sc, WMREG_MDIC);
9509 ismdio = ((reg & MDIC_DEST) != 0);
9510 break;
9511 case WM_T_82580:
9512 case WM_T_I350:
9513 case WM_T_I354:
9514 case WM_T_I210:
9515 case WM_T_I211:
9516 reg = CSR_READ(sc, WMREG_MDICNFG);
9517 ismdio = ((reg & MDICNFG_DEST) != 0);
9518 break;
9519 default:
9520 break;
9521 }
9522
9523 return ismdio;
9524 }
9525
9526 /*
9527 * wm_sgmii_readreg: [mii interface function]
9528 *
9529 * Read a PHY register on the SGMII
9530 * This could be handled by the PHY layer if we didn't have to lock the
9531 * ressource ...
9532 */
9533 static int
9534 wm_sgmii_readreg(device_t self, int phy, int reg)
9535 {
9536 struct wm_softc *sc = device_private(self);
9537 uint32_t i2ccmd;
9538 int i, rv;
9539
9540 if (sc->phy.acquire(sc)) {
9541 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9542 __func__);
9543 return 0;
9544 }
9545
9546 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
9547 | (phy << I2CCMD_PHY_ADDR_SHIFT)
9548 | I2CCMD_OPCODE_READ;
9549 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9550
9551 /* Poll the ready bit */
9552 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9553 delay(50);
9554 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9555 if (i2ccmd & I2CCMD_READY)
9556 break;
9557 }
9558 if ((i2ccmd & I2CCMD_READY) == 0)
9559 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
9560 if ((i2ccmd & I2CCMD_ERROR) != 0)
9561 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
9562
9563 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
9564
9565 sc->phy.release(sc);
9566 return rv;
9567 }
9568
9569 /*
9570 * wm_sgmii_writereg: [mii interface function]
9571 *
9572 * Write a PHY register on the SGMII.
9573 * This could be handled by the PHY layer if we didn't have to lock the
9574 * ressource ...
9575 */
9576 static void
9577 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
9578 {
9579 struct wm_softc *sc = device_private(self);
9580 uint32_t i2ccmd;
9581 int i;
9582 int val_swapped;
9583
9584 if (sc->phy.acquire(sc) != 0) {
9585 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9586 __func__);
9587 return;
9588 }
9589 /* Swap the data bytes for the I2C interface */
9590 val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
9591 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
9592 | (phy << I2CCMD_PHY_ADDR_SHIFT)
9593 | I2CCMD_OPCODE_WRITE | val_swapped;
9594 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9595
9596 /* Poll the ready bit */
9597 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9598 delay(50);
9599 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9600 if (i2ccmd & I2CCMD_READY)
9601 break;
9602 }
9603 if ((i2ccmd & I2CCMD_READY) == 0)
9604 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
9605 if ((i2ccmd & I2CCMD_ERROR) != 0)
9606 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
9607
9608 sc->phy.release(sc);
9609 }
9610
9611 /* TBI related */
9612
9613 /*
9614 * wm_tbi_mediainit:
9615 *
9616 * Initialize media for use on 1000BASE-X devices.
9617 */
9618 static void
9619 wm_tbi_mediainit(struct wm_softc *sc)
9620 {
9621 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9622 const char *sep = "";
9623
9624 if (sc->sc_type < WM_T_82543)
9625 sc->sc_tipg = TIPG_WM_DFLT;
9626 else
9627 sc->sc_tipg = TIPG_LG_DFLT;
9628
9629 sc->sc_tbi_serdes_anegticks = 5;
9630
9631 /* Initialize our media structures */
9632 sc->sc_mii.mii_ifp = ifp;
9633 sc->sc_ethercom.ec_mii = &sc->sc_mii;
9634
9635 if ((sc->sc_type >= WM_T_82575)
9636 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
9637 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9638 wm_serdes_mediachange, wm_serdes_mediastatus);
9639 else
9640 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9641 wm_tbi_mediachange, wm_tbi_mediastatus);
9642
9643 /*
9644 * SWD Pins:
9645 *
9646 * 0 = Link LED (output)
9647 * 1 = Loss Of Signal (input)
9648 */
9649 sc->sc_ctrl |= CTRL_SWDPIO(0);
9650
9651 /* XXX Perhaps this is only for TBI */
9652 if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9653 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
9654
9655 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
9656 sc->sc_ctrl &= ~CTRL_LRST;
9657
9658 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9659
9660 #define ADD(ss, mm, dd) \
9661 do { \
9662 aprint_normal("%s%s", sep, ss); \
9663 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
9664 sep = ", "; \
9665 } while (/*CONSTCOND*/0)
9666
9667 aprint_normal_dev(sc->sc_dev, "");
9668
9669 if (sc->sc_type == WM_T_I354) {
9670 uint32_t status;
9671
9672 status = CSR_READ(sc, WMREG_STATUS);
9673 if (((status & STATUS_2P5_SKU) != 0)
9674 && ((status & STATUS_2P5_SKU_OVER) == 0)) {
9675 ADD("2500baseKX-FDX", IFM_2500_SX | IFM_FDX,ANAR_X_FD);
9676 } else
9677 ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX,ANAR_X_FD);
9678 } else if (sc->sc_type == WM_T_82545) {
9679 /* Only 82545 is LX (XXX except SFP) */
9680 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
9681 ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
9682 } else {
9683 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
9684 ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
9685 }
9686 ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
9687 aprint_normal("\n");
9688
9689 #undef ADD
9690
9691 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
9692 }
9693
9694 /*
9695 * wm_tbi_mediachange: [ifmedia interface function]
9696 *
9697 * Set hardware to newly-selected media on a 1000BASE-X device.
9698 */
9699 static int
9700 wm_tbi_mediachange(struct ifnet *ifp)
9701 {
9702 struct wm_softc *sc = ifp->if_softc;
9703 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9704 uint32_t status;
9705 int i;
9706
9707 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9708 /* XXX need some work for >= 82571 and < 82575 */
9709 if (sc->sc_type < WM_T_82575)
9710 return 0;
9711 }
9712
9713 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9714 || (sc->sc_type >= WM_T_82575))
9715 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9716
9717 sc->sc_ctrl &= ~CTRL_LRST;
9718 sc->sc_txcw = TXCW_ANE;
9719 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9720 sc->sc_txcw |= TXCW_FD | TXCW_HD;
9721 else if (ife->ifm_media & IFM_FDX)
9722 sc->sc_txcw |= TXCW_FD;
9723 else
9724 sc->sc_txcw |= TXCW_HD;
9725
9726 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
9727 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
9728
9729 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
9730 device_xname(sc->sc_dev), sc->sc_txcw));
9731 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9732 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9733 CSR_WRITE_FLUSH(sc);
9734 delay(1000);
9735
9736 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
9737 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
9738
9739 /*
9740 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
9741 * optics detect a signal, 0 if they don't.
9742 */
9743 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
9744 /* Have signal; wait for the link to come up. */
9745 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
9746 delay(10000);
9747 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
9748 break;
9749 }
9750
9751 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
9752 device_xname(sc->sc_dev),i));
9753
9754 status = CSR_READ(sc, WMREG_STATUS);
9755 DPRINTF(WM_DEBUG_LINK,
9756 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
9757 device_xname(sc->sc_dev),status, STATUS_LU));
9758 if (status & STATUS_LU) {
9759 /* Link is up. */
9760 DPRINTF(WM_DEBUG_LINK,
9761 ("%s: LINK: set media -> link up %s\n",
9762 device_xname(sc->sc_dev),
9763 (status & STATUS_FD) ? "FDX" : "HDX"));
9764
9765 /*
9766 * NOTE: CTRL will update TFCE and RFCE automatically,
9767 * so we should update sc->sc_ctrl
9768 */
9769 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
9770 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9771 sc->sc_fcrtl &= ~FCRTL_XONE;
9772 if (status & STATUS_FD)
9773 sc->sc_tctl |=
9774 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9775 else
9776 sc->sc_tctl |=
9777 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9778 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
9779 sc->sc_fcrtl |= FCRTL_XONE;
9780 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9781 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
9782 WMREG_OLD_FCRTL : WMREG_FCRTL,
9783 sc->sc_fcrtl);
9784 sc->sc_tbi_linkup = 1;
9785 } else {
9786 if (i == WM_LINKUP_TIMEOUT)
9787 wm_check_for_link(sc);
9788 /* Link is down. */
9789 DPRINTF(WM_DEBUG_LINK,
9790 ("%s: LINK: set media -> link down\n",
9791 device_xname(sc->sc_dev)));
9792 sc->sc_tbi_linkup = 0;
9793 }
9794 } else {
9795 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
9796 device_xname(sc->sc_dev)));
9797 sc->sc_tbi_linkup = 0;
9798 }
9799
9800 wm_tbi_serdes_set_linkled(sc);
9801
9802 return 0;
9803 }
9804
9805 /*
9806 * wm_tbi_mediastatus: [ifmedia interface function]
9807 *
9808 * Get the current interface media status on a 1000BASE-X device.
9809 */
9810 static void
9811 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9812 {
9813 struct wm_softc *sc = ifp->if_softc;
9814 uint32_t ctrl, status;
9815
9816 ifmr->ifm_status = IFM_AVALID;
9817 ifmr->ifm_active = IFM_ETHER;
9818
9819 status = CSR_READ(sc, WMREG_STATUS);
9820 if ((status & STATUS_LU) == 0) {
9821 ifmr->ifm_active |= IFM_NONE;
9822 return;
9823 }
9824
9825 ifmr->ifm_status |= IFM_ACTIVE;
9826 /* Only 82545 is LX */
9827 if (sc->sc_type == WM_T_82545)
9828 ifmr->ifm_active |= IFM_1000_LX;
9829 else
9830 ifmr->ifm_active |= IFM_1000_SX;
9831 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
9832 ifmr->ifm_active |= IFM_FDX;
9833 else
9834 ifmr->ifm_active |= IFM_HDX;
9835 ctrl = CSR_READ(sc, WMREG_CTRL);
9836 if (ctrl & CTRL_RFCE)
9837 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
9838 if (ctrl & CTRL_TFCE)
9839 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
9840 }
9841
9842 /* XXX TBI only */
9843 static int
9844 wm_check_for_link(struct wm_softc *sc)
9845 {
9846 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9847 uint32_t rxcw;
9848 uint32_t ctrl;
9849 uint32_t status;
9850 uint32_t sig;
9851
9852 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9853 /* XXX need some work for >= 82571 */
9854 if (sc->sc_type >= WM_T_82571) {
9855 sc->sc_tbi_linkup = 1;
9856 return 0;
9857 }
9858 }
9859
9860 rxcw = CSR_READ(sc, WMREG_RXCW);
9861 ctrl = CSR_READ(sc, WMREG_CTRL);
9862 status = CSR_READ(sc, WMREG_STATUS);
9863
9864 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
9865
9866 DPRINTF(WM_DEBUG_LINK,
9867 ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
9868 device_xname(sc->sc_dev), __func__,
9869 ((ctrl & CTRL_SWDPIN(1)) == sig),
9870 ((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
9871
9872 /*
9873 * SWDPIN LU RXCW
9874 * 0 0 0
9875 * 0 0 1 (should not happen)
9876 * 0 1 0 (should not happen)
9877 * 0 1 1 (should not happen)
9878 * 1 0 0 Disable autonego and force linkup
9879 * 1 0 1 got /C/ but not linkup yet
9880 * 1 1 0 (linkup)
9881 * 1 1 1 If IFM_AUTO, back to autonego
9882 *
9883 */
9884 if (((ctrl & CTRL_SWDPIN(1)) == sig)
9885 && ((status & STATUS_LU) == 0)
9886 && ((rxcw & RXCW_C) == 0)) {
9887 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
9888 __func__));
9889 sc->sc_tbi_linkup = 0;
9890 /* Disable auto-negotiation in the TXCW register */
9891 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
9892
9893 /*
9894 * Force link-up and also force full-duplex.
9895 *
9896 * NOTE: CTRL was updated TFCE and RFCE automatically,
9897 * so we should update sc->sc_ctrl
9898 */
9899 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
9900 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9901 } else if (((status & STATUS_LU) != 0)
9902 && ((rxcw & RXCW_C) != 0)
9903 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
9904 sc->sc_tbi_linkup = 1;
9905 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
9906 __func__));
9907 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9908 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
9909 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
9910 && ((rxcw & RXCW_C) != 0)) {
9911 DPRINTF(WM_DEBUG_LINK, ("/C/"));
9912 } else {
9913 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
9914 status));
9915 }
9916
9917 return 0;
9918 }
9919
9920 /*
9921 * wm_tbi_tick:
9922 *
9923 * Check the link on TBI devices.
9924 * This function acts as mii_tick().
9925 */
9926 static void
9927 wm_tbi_tick(struct wm_softc *sc)
9928 {
9929 struct mii_data *mii = &sc->sc_mii;
9930 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9931 uint32_t status;
9932
9933 KASSERT(WM_CORE_LOCKED(sc));
9934
9935 status = CSR_READ(sc, WMREG_STATUS);
9936
9937 /* XXX is this needed? */
9938 (void)CSR_READ(sc, WMREG_RXCW);
9939 (void)CSR_READ(sc, WMREG_CTRL);
9940
9941 /* set link status */
9942 if ((status & STATUS_LU) == 0) {
9943 DPRINTF(WM_DEBUG_LINK,
9944 ("%s: LINK: checklink -> down\n",
9945 device_xname(sc->sc_dev)));
9946 sc->sc_tbi_linkup = 0;
9947 } else if (sc->sc_tbi_linkup == 0) {
9948 DPRINTF(WM_DEBUG_LINK,
9949 ("%s: LINK: checklink -> up %s\n",
9950 device_xname(sc->sc_dev),
9951 (status & STATUS_FD) ? "FDX" : "HDX"));
9952 sc->sc_tbi_linkup = 1;
9953 sc->sc_tbi_serdes_ticks = 0;
9954 }
9955
9956 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
9957 goto setled;
9958
9959 if ((status & STATUS_LU) == 0) {
9960 sc->sc_tbi_linkup = 0;
9961 /* If the timer expired, retry autonegotiation */
9962 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9963 && (++sc->sc_tbi_serdes_ticks
9964 >= sc->sc_tbi_serdes_anegticks)) {
9965 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9966 sc->sc_tbi_serdes_ticks = 0;
9967 /*
9968 * Reset the link, and let autonegotiation do
9969 * its thing
9970 */
9971 sc->sc_ctrl |= CTRL_LRST;
9972 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9973 CSR_WRITE_FLUSH(sc);
9974 delay(1000);
9975 sc->sc_ctrl &= ~CTRL_LRST;
9976 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9977 CSR_WRITE_FLUSH(sc);
9978 delay(1000);
9979 CSR_WRITE(sc, WMREG_TXCW,
9980 sc->sc_txcw & ~TXCW_ANE);
9981 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9982 }
9983 }
9984
9985 setled:
9986 wm_tbi_serdes_set_linkled(sc);
9987 }
9988
9989 /* SERDES related */
9990 static void
9991 wm_serdes_power_up_link_82575(struct wm_softc *sc)
9992 {
9993 uint32_t reg;
9994
9995 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9996 && ((sc->sc_flags & WM_F_SGMII) == 0))
9997 return;
9998
9999 reg = CSR_READ(sc, WMREG_PCS_CFG);
10000 reg |= PCS_CFG_PCS_EN;
10001 CSR_WRITE(sc, WMREG_PCS_CFG, reg);
10002
10003 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10004 reg &= ~CTRL_EXT_SWDPIN(3);
10005 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10006 CSR_WRITE_FLUSH(sc);
10007 }
10008
10009 static int
10010 wm_serdes_mediachange(struct ifnet *ifp)
10011 {
10012 struct wm_softc *sc = ifp->if_softc;
10013 bool pcs_autoneg = true; /* XXX */
10014 uint32_t ctrl_ext, pcs_lctl, reg;
10015
10016 /* XXX Currently, this function is not called on 8257[12] */
10017 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
10018 || (sc->sc_type >= WM_T_82575))
10019 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
10020
10021 wm_serdes_power_up_link_82575(sc);
10022
10023 sc->sc_ctrl |= CTRL_SLU;
10024
10025 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
10026 sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
10027
10028 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
10029 pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
10030 switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
10031 case CTRL_EXT_LINK_MODE_SGMII:
10032 pcs_autoneg = true;
10033 pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
10034 break;
10035 case CTRL_EXT_LINK_MODE_1000KX:
10036 pcs_autoneg = false;
10037 /* FALLTHROUGH */
10038 default:
10039 if ((sc->sc_type == WM_T_82575)
10040 || (sc->sc_type == WM_T_82576)) {
10041 if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
10042 pcs_autoneg = false;
10043 }
10044 sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
10045 | CTRL_FRCFDX;
10046 pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
10047 }
10048 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10049
10050 if (pcs_autoneg) {
10051 pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
10052 pcs_lctl &= ~PCS_LCTL_FORCE_FC;
10053
10054 reg = CSR_READ(sc, WMREG_PCS_ANADV);
10055 reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
10056 reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
10057 CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
10058 } else
10059 pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
10060
10061 CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
10062
10063
10064 return 0;
10065 }
10066
10067 static void
10068 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
10069 {
10070 struct wm_softc *sc = ifp->if_softc;
10071 struct mii_data *mii = &sc->sc_mii;
10072 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
10073 uint32_t pcs_adv, pcs_lpab, reg;
10074
10075 ifmr->ifm_status = IFM_AVALID;
10076 ifmr->ifm_active = IFM_ETHER;
10077
10078 /* Check PCS */
10079 reg = CSR_READ(sc, WMREG_PCS_LSTS);
10080 if ((reg & PCS_LSTS_LINKOK) == 0) {
10081 ifmr->ifm_active |= IFM_NONE;
10082 sc->sc_tbi_linkup = 0;
10083 goto setled;
10084 }
10085
10086 sc->sc_tbi_linkup = 1;
10087 ifmr->ifm_status |= IFM_ACTIVE;
10088 if (sc->sc_type == WM_T_I354) {
10089 uint32_t status;
10090
10091 status = CSR_READ(sc, WMREG_STATUS);
10092 if (((status & STATUS_2P5_SKU) != 0)
10093 && ((status & STATUS_2P5_SKU_OVER) == 0)) {
10094 ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
10095 } else
10096 ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
10097 } else {
10098 switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
10099 case PCS_LSTS_SPEED_10:
10100 ifmr->ifm_active |= IFM_10_T; /* XXX */
10101 break;
10102 case PCS_LSTS_SPEED_100:
10103 ifmr->ifm_active |= IFM_100_FX; /* XXX */
10104 break;
10105 case PCS_LSTS_SPEED_1000:
10106 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
10107 break;
10108 default:
10109 device_printf(sc->sc_dev, "Unknown speed\n");
10110 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
10111 break;
10112 }
10113 }
10114 if ((reg & PCS_LSTS_FDX) != 0)
10115 ifmr->ifm_active |= IFM_FDX;
10116 else
10117 ifmr->ifm_active |= IFM_HDX;
10118 mii->mii_media_active &= ~IFM_ETH_FMASK;
10119 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
10120 /* Check flow */
10121 reg = CSR_READ(sc, WMREG_PCS_LSTS);
10122 if ((reg & PCS_LSTS_AN_COMP) == 0) {
10123 DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
10124 goto setled;
10125 }
10126 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
10127 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
10128 DPRINTF(WM_DEBUG_LINK,
10129 ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
10130 if ((pcs_adv & TXCW_SYM_PAUSE)
10131 && (pcs_lpab & TXCW_SYM_PAUSE)) {
10132 mii->mii_media_active |= IFM_FLOW
10133 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
10134 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
10135 && (pcs_adv & TXCW_ASYM_PAUSE)
10136 && (pcs_lpab & TXCW_SYM_PAUSE)
10137 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
10138 mii->mii_media_active |= IFM_FLOW
10139 | IFM_ETH_TXPAUSE;
10140 } else if ((pcs_adv & TXCW_SYM_PAUSE)
10141 && (pcs_adv & TXCW_ASYM_PAUSE)
10142 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
10143 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
10144 mii->mii_media_active |= IFM_FLOW
10145 | IFM_ETH_RXPAUSE;
10146 }
10147 }
10148 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
10149 | (mii->mii_media_active & IFM_ETH_FMASK);
10150 setled:
10151 wm_tbi_serdes_set_linkled(sc);
10152 }
10153
10154 /*
10155 * wm_serdes_tick:
10156 *
10157 * Check the link on serdes devices.
10158 */
10159 static void
10160 wm_serdes_tick(struct wm_softc *sc)
10161 {
10162 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10163 struct mii_data *mii = &sc->sc_mii;
10164 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
10165 uint32_t reg;
10166
10167 KASSERT(WM_CORE_LOCKED(sc));
10168
10169 mii->mii_media_status = IFM_AVALID;
10170 mii->mii_media_active = IFM_ETHER;
10171
10172 /* Check PCS */
10173 reg = CSR_READ(sc, WMREG_PCS_LSTS);
10174 if ((reg & PCS_LSTS_LINKOK) != 0) {
10175 mii->mii_media_status |= IFM_ACTIVE;
10176 sc->sc_tbi_linkup = 1;
10177 sc->sc_tbi_serdes_ticks = 0;
10178 mii->mii_media_active |= IFM_1000_SX; /* XXX */
10179 if ((reg & PCS_LSTS_FDX) != 0)
10180 mii->mii_media_active |= IFM_FDX;
10181 else
10182 mii->mii_media_active |= IFM_HDX;
10183 } else {
10184 mii->mii_media_status |= IFM_NONE;
10185 sc->sc_tbi_linkup = 0;
10186 /* If the timer expired, retry autonegotiation */
10187 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
10188 && (++sc->sc_tbi_serdes_ticks
10189 >= sc->sc_tbi_serdes_anegticks)) {
10190 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
10191 sc->sc_tbi_serdes_ticks = 0;
10192 /* XXX */
10193 wm_serdes_mediachange(ifp);
10194 }
10195 }
10196
10197 wm_tbi_serdes_set_linkled(sc);
10198 }
10199
10200 /* SFP related */
10201
10202 static int
10203 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
10204 {
10205 uint32_t i2ccmd;
10206 int i;
10207
10208 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
10209 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
10210
10211 /* Poll the ready bit */
10212 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
10213 delay(50);
10214 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
10215 if (i2ccmd & I2CCMD_READY)
10216 break;
10217 }
10218 if ((i2ccmd & I2CCMD_READY) == 0)
10219 return -1;
10220 if ((i2ccmd & I2CCMD_ERROR) != 0)
10221 return -1;
10222
10223 *data = i2ccmd & 0x00ff;
10224
10225 return 0;
10226 }
10227
10228 static uint32_t
10229 wm_sfp_get_media_type(struct wm_softc *sc)
10230 {
10231 uint32_t ctrl_ext;
10232 uint8_t val = 0;
10233 int timeout = 3;
10234 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
10235 int rv = -1;
10236
10237 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
10238 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
10239 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
10240 CSR_WRITE_FLUSH(sc);
10241
10242 /* Read SFP module data */
10243 while (timeout) {
10244 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
10245 if (rv == 0)
10246 break;
10247 delay(100*1000); /* XXX too big */
10248 timeout--;
10249 }
10250 if (rv != 0)
10251 goto out;
10252 switch (val) {
10253 case SFF_SFP_ID_SFF:
10254 aprint_normal_dev(sc->sc_dev,
10255 "Module/Connector soldered to board\n");
10256 break;
10257 case SFF_SFP_ID_SFP:
10258 aprint_normal_dev(sc->sc_dev, "SFP\n");
10259 break;
10260 case SFF_SFP_ID_UNKNOWN:
10261 goto out;
10262 default:
10263 break;
10264 }
10265
10266 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
10267 if (rv != 0) {
10268 goto out;
10269 }
10270
10271 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
10272 mediatype = WM_MEDIATYPE_SERDES;
10273 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
10274 sc->sc_flags |= WM_F_SGMII;
10275 mediatype = WM_MEDIATYPE_COPPER;
10276 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
10277 sc->sc_flags |= WM_F_SGMII;
10278 mediatype = WM_MEDIATYPE_SERDES;
10279 }
10280
10281 out:
10282 /* Restore I2C interface setting */
10283 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
10284
10285 return mediatype;
10286 }
10287
10288 /*
10289 * NVM related.
10290 * Microwire, SPI (w/wo EERD) and Flash.
10291 */
10292
10293 /* Both spi and uwire */
10294
10295 /*
10296 * wm_eeprom_sendbits:
10297 *
10298 * Send a series of bits to the EEPROM.
10299 */
10300 static void
10301 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
10302 {
10303 uint32_t reg;
10304 int x;
10305
10306 reg = CSR_READ(sc, WMREG_EECD);
10307
10308 for (x = nbits; x > 0; x--) {
10309 if (bits & (1U << (x - 1)))
10310 reg |= EECD_DI;
10311 else
10312 reg &= ~EECD_DI;
10313 CSR_WRITE(sc, WMREG_EECD, reg);
10314 CSR_WRITE_FLUSH(sc);
10315 delay(2);
10316 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
10317 CSR_WRITE_FLUSH(sc);
10318 delay(2);
10319 CSR_WRITE(sc, WMREG_EECD, reg);
10320 CSR_WRITE_FLUSH(sc);
10321 delay(2);
10322 }
10323 }
10324
10325 /*
10326 * wm_eeprom_recvbits:
10327 *
10328 * Receive a series of bits from the EEPROM.
10329 */
10330 static void
10331 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
10332 {
10333 uint32_t reg, val;
10334 int x;
10335
10336 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
10337
10338 val = 0;
10339 for (x = nbits; x > 0; x--) {
10340 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
10341 CSR_WRITE_FLUSH(sc);
10342 delay(2);
10343 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
10344 val |= (1U << (x - 1));
10345 CSR_WRITE(sc, WMREG_EECD, reg);
10346 CSR_WRITE_FLUSH(sc);
10347 delay(2);
10348 }
10349 *valp = val;
10350 }
10351
10352 /* Microwire */
10353
10354 /*
10355 * wm_nvm_read_uwire:
10356 *
10357 * Read a word from the EEPROM using the MicroWire protocol.
10358 */
10359 static int
10360 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
10361 {
10362 uint32_t reg, val;
10363 int i;
10364
10365 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10366 device_xname(sc->sc_dev), __func__));
10367
10368 for (i = 0; i < wordcnt; i++) {
10369 /* Clear SK and DI. */
10370 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
10371 CSR_WRITE(sc, WMREG_EECD, reg);
10372
10373 /*
10374 * XXX: workaround for a bug in qemu-0.12.x and prior
10375 * and Xen.
10376 *
10377 * We use this workaround only for 82540 because qemu's
10378 * e1000 act as 82540.
10379 */
10380 if (sc->sc_type == WM_T_82540) {
10381 reg |= EECD_SK;
10382 CSR_WRITE(sc, WMREG_EECD, reg);
10383 reg &= ~EECD_SK;
10384 CSR_WRITE(sc, WMREG_EECD, reg);
10385 CSR_WRITE_FLUSH(sc);
10386 delay(2);
10387 }
10388 /* XXX: end of workaround */
10389
10390 /* Set CHIP SELECT. */
10391 reg |= EECD_CS;
10392 CSR_WRITE(sc, WMREG_EECD, reg);
10393 CSR_WRITE_FLUSH(sc);
10394 delay(2);
10395
10396 /* Shift in the READ command. */
10397 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
10398
10399 /* Shift in address. */
10400 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
10401
10402 /* Shift out the data. */
10403 wm_eeprom_recvbits(sc, &val, 16);
10404 data[i] = val & 0xffff;
10405
10406 /* Clear CHIP SELECT. */
10407 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
10408 CSR_WRITE(sc, WMREG_EECD, reg);
10409 CSR_WRITE_FLUSH(sc);
10410 delay(2);
10411 }
10412
10413 return 0;
10414 }
10415
10416 /* SPI */
10417
10418 /*
10419 * Set SPI and FLASH related information from the EECD register.
10420 * For 82541 and 82547, the word size is taken from EEPROM.
10421 */
10422 static int
10423 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
10424 {
10425 int size;
10426 uint32_t reg;
10427 uint16_t data;
10428
10429 reg = CSR_READ(sc, WMREG_EECD);
10430 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
10431
10432 /* Read the size of NVM from EECD by default */
10433 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
10434 switch (sc->sc_type) {
10435 case WM_T_82541:
10436 case WM_T_82541_2:
10437 case WM_T_82547:
10438 case WM_T_82547_2:
10439 /* Set dummy value to access EEPROM */
10440 sc->sc_nvm_wordsize = 64;
10441 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
10442 reg = data;
10443 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
10444 if (size == 0)
10445 size = 6; /* 64 word size */
10446 else
10447 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
10448 break;
10449 case WM_T_80003:
10450 case WM_T_82571:
10451 case WM_T_82572:
10452 case WM_T_82573: /* SPI case */
10453 case WM_T_82574: /* SPI case */
10454 case WM_T_82583: /* SPI case */
10455 size += NVM_WORD_SIZE_BASE_SHIFT;
10456 if (size > 14)
10457 size = 14;
10458 break;
10459 case WM_T_82575:
10460 case WM_T_82576:
10461 case WM_T_82580:
10462 case WM_T_I350:
10463 case WM_T_I354:
10464 case WM_T_I210:
10465 case WM_T_I211:
10466 size += NVM_WORD_SIZE_BASE_SHIFT;
10467 if (size > 15)
10468 size = 15;
10469 break;
10470 default:
10471 aprint_error_dev(sc->sc_dev,
10472 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
10473 return -1;
10474 break;
10475 }
10476
10477 sc->sc_nvm_wordsize = 1 << size;
10478
10479 return 0;
10480 }
10481
10482 /*
10483 * wm_nvm_ready_spi:
10484 *
10485 * Wait for a SPI EEPROM to be ready for commands.
10486 */
10487 static int
10488 wm_nvm_ready_spi(struct wm_softc *sc)
10489 {
10490 uint32_t val;
10491 int usec;
10492
10493 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10494 device_xname(sc->sc_dev), __func__));
10495
10496 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
10497 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
10498 wm_eeprom_recvbits(sc, &val, 8);
10499 if ((val & SPI_SR_RDY) == 0)
10500 break;
10501 }
10502 if (usec >= SPI_MAX_RETRIES) {
10503 aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
10504 return 1;
10505 }
10506 return 0;
10507 }
10508
10509 /*
10510 * wm_nvm_read_spi:
10511 *
10512 * Read a work from the EEPROM using the SPI protocol.
10513 */
10514 static int
10515 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
10516 {
10517 uint32_t reg, val;
10518 int i;
10519 uint8_t opc;
10520
10521 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10522 device_xname(sc->sc_dev), __func__));
10523
10524 /* Clear SK and CS. */
10525 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
10526 CSR_WRITE(sc, WMREG_EECD, reg);
10527 CSR_WRITE_FLUSH(sc);
10528 delay(2);
10529
10530 if (wm_nvm_ready_spi(sc))
10531 return 1;
10532
10533 /* Toggle CS to flush commands. */
10534 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
10535 CSR_WRITE_FLUSH(sc);
10536 delay(2);
10537 CSR_WRITE(sc, WMREG_EECD, reg);
10538 CSR_WRITE_FLUSH(sc);
10539 delay(2);
10540
10541 opc = SPI_OPC_READ;
10542 if (sc->sc_nvm_addrbits == 8 && word >= 128)
10543 opc |= SPI_OPC_A8;
10544
10545 wm_eeprom_sendbits(sc, opc, 8);
10546 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
10547
10548 for (i = 0; i < wordcnt; i++) {
10549 wm_eeprom_recvbits(sc, &val, 16);
10550 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
10551 }
10552
10553 /* Raise CS and clear SK. */
10554 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
10555 CSR_WRITE(sc, WMREG_EECD, reg);
10556 CSR_WRITE_FLUSH(sc);
10557 delay(2);
10558
10559 return 0;
10560 }
10561
10562 /* Using with EERD */
10563
10564 static int
10565 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
10566 {
10567 uint32_t attempts = 100000;
10568 uint32_t i, reg = 0;
10569 int32_t done = -1;
10570
10571 for (i = 0; i < attempts; i++) {
10572 reg = CSR_READ(sc, rw);
10573
10574 if (reg & EERD_DONE) {
10575 done = 0;
10576 break;
10577 }
10578 delay(5);
10579 }
10580
10581 return done;
10582 }
10583
10584 static int
10585 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
10586 uint16_t *data)
10587 {
10588 int i, eerd = 0;
10589 int error = 0;
10590
10591 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10592 device_xname(sc->sc_dev), __func__));
10593
10594 for (i = 0; i < wordcnt; i++) {
10595 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
10596
10597 CSR_WRITE(sc, WMREG_EERD, eerd);
10598 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
10599 if (error != 0)
10600 break;
10601
10602 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
10603 }
10604
10605 return error;
10606 }
10607
10608 /* Flash */
10609
10610 static int
10611 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
10612 {
10613 uint32_t eecd;
10614 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
10615 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
10616 uint8_t sig_byte = 0;
10617
10618 switch (sc->sc_type) {
10619 case WM_T_PCH_SPT:
10620 /*
10621 * In SPT, read from the CTRL_EXT reg instead of accessing the
10622 * sector valid bits from the NVM.
10623 */
10624 *bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
10625 if ((*bank == 0) || (*bank == 1)) {
10626 aprint_error_dev(sc->sc_dev,
10627 "%s: no valid NVM bank present (%u)\n", __func__,
10628 *bank);
10629 return -1;
10630 } else {
10631 *bank = *bank - 2;
10632 return 0;
10633 }
10634 case WM_T_ICH8:
10635 case WM_T_ICH9:
10636 eecd = CSR_READ(sc, WMREG_EECD);
10637 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
10638 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
10639 return 0;
10640 }
10641 /* FALLTHROUGH */
10642 default:
10643 /* Default to 0 */
10644 *bank = 0;
10645
10646 /* Check bank 0 */
10647 wm_read_ich8_byte(sc, act_offset, &sig_byte);
10648 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
10649 *bank = 0;
10650 return 0;
10651 }
10652
10653 /* Check bank 1 */
10654 wm_read_ich8_byte(sc, act_offset + bank1_offset,
10655 &sig_byte);
10656 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
10657 *bank = 1;
10658 return 0;
10659 }
10660 }
10661
10662 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
10663 device_xname(sc->sc_dev)));
10664 return -1;
10665 }
10666
10667 /******************************************************************************
10668 * This function does initial flash setup so that a new read/write/erase cycle
10669 * can be started.
10670 *
10671 * sc - The pointer to the hw structure
10672 ****************************************************************************/
10673 static int32_t
10674 wm_ich8_cycle_init(struct wm_softc *sc)
10675 {
10676 uint16_t hsfsts;
10677 int32_t error = 1;
10678 int32_t i = 0;
10679
10680 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10681
10682 /* May be check the Flash Des Valid bit in Hw status */
10683 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
10684 return error;
10685 }
10686
10687 /* Clear FCERR in Hw status by writing 1 */
10688 /* Clear DAEL in Hw status by writing a 1 */
10689 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
10690
10691 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10692
10693 /*
10694 * Either we should have a hardware SPI cycle in progress bit to check
10695 * against, in order to start a new cycle or FDONE bit should be
10696 * changed in the hardware so that it is 1 after harware reset, which
10697 * can then be used as an indication whether a cycle is in progress or
10698 * has been completed .. we should also have some software semaphore
10699 * mechanism to guard FDONE or the cycle in progress bit so that two
10700 * threads access to those bits can be sequentiallized or a way so that
10701 * 2 threads dont start the cycle at the same time
10702 */
10703
10704 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10705 /*
10706 * There is no cycle running at present, so we can start a
10707 * cycle
10708 */
10709
10710 /* Begin by setting Flash Cycle Done. */
10711 hsfsts |= HSFSTS_DONE;
10712 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10713 error = 0;
10714 } else {
10715 /*
10716 * otherwise poll for sometime so the current cycle has a
10717 * chance to end before giving up.
10718 */
10719 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
10720 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10721 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10722 error = 0;
10723 break;
10724 }
10725 delay(1);
10726 }
10727 if (error == 0) {
10728 /*
10729 * Successful in waiting for previous cycle to timeout,
10730 * now set the Flash Cycle Done.
10731 */
10732 hsfsts |= HSFSTS_DONE;
10733 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10734 }
10735 }
10736 return error;
10737 }
10738
10739 /******************************************************************************
10740 * This function starts a flash cycle and waits for its completion
10741 *
10742 * sc - The pointer to the hw structure
10743 ****************************************************************************/
10744 static int32_t
10745 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
10746 {
10747 uint16_t hsflctl;
10748 uint16_t hsfsts;
10749 int32_t error = 1;
10750 uint32_t i = 0;
10751
10752 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
10753 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10754 hsflctl |= HSFCTL_GO;
10755 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10756
10757 /* Wait till FDONE bit is set to 1 */
10758 do {
10759 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10760 if (hsfsts & HSFSTS_DONE)
10761 break;
10762 delay(1);
10763 i++;
10764 } while (i < timeout);
10765 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
10766 error = 0;
10767
10768 return error;
10769 }
10770
10771 /******************************************************************************
10772 * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
10773 *
10774 * sc - The pointer to the hw structure
10775 * index - The index of the byte or word to read.
10776 * size - Size of data to read, 1=byte 2=word, 4=dword
10777 * data - Pointer to the word to store the value read.
10778 *****************************************************************************/
10779 static int32_t
10780 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
10781 uint32_t size, uint32_t *data)
10782 {
10783 uint16_t hsfsts;
10784 uint16_t hsflctl;
10785 uint32_t flash_linear_address;
10786 uint32_t flash_data = 0;
10787 int32_t error = 1;
10788 int32_t count = 0;
10789
10790 if (size < 1 || size > 4 || data == 0x0 ||
10791 index > ICH_FLASH_LINEAR_ADDR_MASK)
10792 return error;
10793
10794 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
10795 sc->sc_ich8_flash_base;
10796
10797 do {
10798 delay(1);
10799 /* Steps */
10800 error = wm_ich8_cycle_init(sc);
10801 if (error)
10802 break;
10803
10804 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10805 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
10806 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
10807 & HSFCTL_BCOUNT_MASK;
10808 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
10809 if (sc->sc_type == WM_T_PCH_SPT) {
10810 /*
10811 * In SPT, This register is in Lan memory space, not
10812 * flash. Therefore, only 32 bit access is supported.
10813 */
10814 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
10815 (uint32_t)hsflctl);
10816 } else
10817 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10818
10819 /*
10820 * Write the last 24 bits of index into Flash Linear address
10821 * field in Flash Address
10822 */
10823 /* TODO: TBD maybe check the index against the size of flash */
10824
10825 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
10826
10827 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
10828
10829 /*
10830 * Check if FCERR is set to 1, if set to 1, clear it and try
10831 * the whole sequence a few more times, else read in (shift in)
10832 * the Flash Data0, the order is least significant byte first
10833 * msb to lsb
10834 */
10835 if (error == 0) {
10836 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
10837 if (size == 1)
10838 *data = (uint8_t)(flash_data & 0x000000FF);
10839 else if (size == 2)
10840 *data = (uint16_t)(flash_data & 0x0000FFFF);
10841 else if (size == 4)
10842 *data = (uint32_t)flash_data;
10843 break;
10844 } else {
10845 /*
10846 * If we've gotten here, then things are probably
10847 * completely hosed, but if the error condition is
10848 * detected, it won't hurt to give it another try...
10849 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
10850 */
10851 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10852 if (hsfsts & HSFSTS_ERR) {
10853 /* Repeat for some time before giving up. */
10854 continue;
10855 } else if ((hsfsts & HSFSTS_DONE) == 0)
10856 break;
10857 }
10858 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
10859
10860 return error;
10861 }
10862
10863 /******************************************************************************
10864 * Reads a single byte from the NVM using the ICH8 flash access registers.
10865 *
10866 * sc - pointer to wm_hw structure
10867 * index - The index of the byte to read.
10868 * data - Pointer to a byte to store the value read.
10869 *****************************************************************************/
10870 static int32_t
10871 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
10872 {
10873 int32_t status;
10874 uint32_t word = 0;
10875
10876 status = wm_read_ich8_data(sc, index, 1, &word);
10877 if (status == 0)
10878 *data = (uint8_t)word;
10879 else
10880 *data = 0;
10881
10882 return status;
10883 }
10884
10885 /******************************************************************************
10886 * Reads a word from the NVM using the ICH8 flash access registers.
10887 *
10888 * sc - pointer to wm_hw structure
10889 * index - The starting byte index of the word to read.
10890 * data - Pointer to a word to store the value read.
10891 *****************************************************************************/
10892 static int32_t
10893 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
10894 {
10895 int32_t status;
10896 uint32_t word = 0;
10897
10898 status = wm_read_ich8_data(sc, index, 2, &word);
10899 if (status == 0)
10900 *data = (uint16_t)word;
10901 else
10902 *data = 0;
10903
10904 return status;
10905 }
10906
10907 /******************************************************************************
10908 * Reads a dword from the NVM using the ICH8 flash access registers.
10909 *
10910 * sc - pointer to wm_hw structure
10911 * index - The starting byte index of the word to read.
10912 * data - Pointer to a word to store the value read.
10913 *****************************************************************************/
10914 static int32_t
10915 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
10916 {
10917 int32_t status;
10918
10919 status = wm_read_ich8_data(sc, index, 4, data);
10920 return status;
10921 }
10922
10923 /******************************************************************************
10924 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
10925 * register.
10926 *
10927 * sc - Struct containing variables accessed by shared code
10928 * offset - offset of word in the EEPROM to read
10929 * data - word read from the EEPROM
10930 * words - number of words to read
10931 *****************************************************************************/
10932 static int
10933 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
10934 {
10935 int32_t error = 0;
10936 uint32_t flash_bank = 0;
10937 uint32_t act_offset = 0;
10938 uint32_t bank_offset = 0;
10939 uint16_t word = 0;
10940 uint16_t i = 0;
10941
10942 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10943 device_xname(sc->sc_dev), __func__));
10944
10945 /*
10946 * We need to know which is the valid flash bank. In the event
10947 * that we didn't allocate eeprom_shadow_ram, we may not be
10948 * managing flash_bank. So it cannot be trusted and needs
10949 * to be updated with each read.
10950 */
10951 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
10952 if (error) {
10953 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
10954 device_xname(sc->sc_dev)));
10955 flash_bank = 0;
10956 }
10957
10958 /*
10959 * Adjust offset appropriately if we're on bank 1 - adjust for word
10960 * size
10961 */
10962 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
10963
10964 error = wm_get_swfwhw_semaphore(sc);
10965 if (error) {
10966 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10967 __func__);
10968 return error;
10969 }
10970
10971 for (i = 0; i < words; i++) {
10972 /* The NVM part needs a byte offset, hence * 2 */
10973 act_offset = bank_offset + ((offset + i) * 2);
10974 error = wm_read_ich8_word(sc, act_offset, &word);
10975 if (error) {
10976 aprint_error_dev(sc->sc_dev,
10977 "%s: failed to read NVM\n", __func__);
10978 break;
10979 }
10980 data[i] = word;
10981 }
10982
10983 wm_put_swfwhw_semaphore(sc);
10984 return error;
10985 }
10986
10987 /******************************************************************************
10988 * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
10989 * register.
10990 *
10991 * sc - Struct containing variables accessed by shared code
10992 * offset - offset of word in the EEPROM to read
10993 * data - word read from the EEPROM
10994 * words - number of words to read
10995 *****************************************************************************/
10996 static int
10997 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
10998 {
10999 int32_t error = 0;
11000 uint32_t flash_bank = 0;
11001 uint32_t act_offset = 0;
11002 uint32_t bank_offset = 0;
11003 uint32_t dword = 0;
11004 uint16_t i = 0;
11005
11006 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11007 device_xname(sc->sc_dev), __func__));
11008
11009 /*
11010 * We need to know which is the valid flash bank. In the event
11011 * that we didn't allocate eeprom_shadow_ram, we may not be
11012 * managing flash_bank. So it cannot be trusted and needs
11013 * to be updated with each read.
11014 */
11015 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
11016 if (error) {
11017 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
11018 device_xname(sc->sc_dev)));
11019 flash_bank = 0;
11020 }
11021
11022 /*
11023 * Adjust offset appropriately if we're on bank 1 - adjust for word
11024 * size
11025 */
11026 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
11027
11028 error = wm_get_swfwhw_semaphore(sc);
11029 if (error) {
11030 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
11031 __func__);
11032 return error;
11033 }
11034
11035 for (i = 0; i < words; i++) {
11036 /* The NVM part needs a byte offset, hence * 2 */
11037 act_offset = bank_offset + ((offset + i) * 2);
11038 /* but we must read dword aligned, so mask ... */
11039 error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
11040 if (error) {
11041 aprint_error_dev(sc->sc_dev,
11042 "%s: failed to read NVM\n", __func__);
11043 break;
11044 }
11045 /* ... and pick out low or high word */
11046 if ((act_offset & 0x2) == 0)
11047 data[i] = (uint16_t)(dword & 0xFFFF);
11048 else
11049 data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
11050 }
11051
11052 wm_put_swfwhw_semaphore(sc);
11053 return error;
11054 }
11055
11056 /* iNVM */
11057
11058 static int
11059 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
11060 {
11061 int32_t rv = 0;
11062 uint32_t invm_dword;
11063 uint16_t i;
11064 uint8_t record_type, word_address;
11065
11066 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11067 device_xname(sc->sc_dev), __func__));
11068
11069 for (i = 0; i < INVM_SIZE; i++) {
11070 invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
11071 /* Get record type */
11072 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
11073 if (record_type == INVM_UNINITIALIZED_STRUCTURE)
11074 break;
11075 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
11076 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
11077 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
11078 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
11079 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
11080 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
11081 if (word_address == address) {
11082 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
11083 rv = 0;
11084 break;
11085 }
11086 }
11087 }
11088
11089 return rv;
11090 }
11091
11092 static int
11093 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
11094 {
11095 int rv = 0;
11096 int i;
11097
11098 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11099 device_xname(sc->sc_dev), __func__));
11100
11101 for (i = 0; i < words; i++) {
11102 switch (offset + i) {
11103 case NVM_OFF_MACADDR:
11104 case NVM_OFF_MACADDR1:
11105 case NVM_OFF_MACADDR2:
11106 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
11107 if (rv != 0) {
11108 data[i] = 0xffff;
11109 rv = -1;
11110 }
11111 break;
11112 case NVM_OFF_CFG2:
11113 rv = wm_nvm_read_word_invm(sc, offset, data);
11114 if (rv != 0) {
11115 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
11116 rv = 0;
11117 }
11118 break;
11119 case NVM_OFF_CFG4:
11120 rv = wm_nvm_read_word_invm(sc, offset, data);
11121 if (rv != 0) {
11122 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
11123 rv = 0;
11124 }
11125 break;
11126 case NVM_OFF_LED_1_CFG:
11127 rv = wm_nvm_read_word_invm(sc, offset, data);
11128 if (rv != 0) {
11129 *data = NVM_LED_1_CFG_DEFAULT_I211;
11130 rv = 0;
11131 }
11132 break;
11133 case NVM_OFF_LED_0_2_CFG:
11134 rv = wm_nvm_read_word_invm(sc, offset, data);
11135 if (rv != 0) {
11136 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
11137 rv = 0;
11138 }
11139 break;
11140 case NVM_OFF_ID_LED_SETTINGS:
11141 rv = wm_nvm_read_word_invm(sc, offset, data);
11142 if (rv != 0) {
11143 *data = ID_LED_RESERVED_FFFF;
11144 rv = 0;
11145 }
11146 break;
11147 default:
11148 DPRINTF(WM_DEBUG_NVM,
11149 ("NVM word 0x%02x is not mapped.\n", offset));
11150 *data = NVM_RESERVED_WORD;
11151 break;
11152 }
11153 }
11154
11155 return rv;
11156 }
11157
11158 /* Lock, detecting NVM type, validate checksum, version and read */
11159
11160 /*
11161 * wm_nvm_acquire:
11162 *
11163 * Perform the EEPROM handshake required on some chips.
11164 */
11165 static int
11166 wm_nvm_acquire(struct wm_softc *sc)
11167 {
11168 uint32_t reg;
11169 int x;
11170 int ret = 0;
11171
11172 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11173 device_xname(sc->sc_dev), __func__));
11174
11175 if (sc->sc_type >= WM_T_ICH8) {
11176 ret = wm_get_nvm_ich8lan(sc);
11177 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
11178 ret = wm_get_swfwhw_semaphore(sc);
11179 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
11180 /* This will also do wm_get_swsm_semaphore() if needed */
11181 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
11182 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
11183 ret = wm_get_swsm_semaphore(sc);
11184 }
11185
11186 if (ret) {
11187 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
11188 __func__);
11189 return 1;
11190 }
11191
11192 if (sc->sc_flags & WM_F_LOCK_EECD) {
11193 reg = CSR_READ(sc, WMREG_EECD);
11194
11195 /* Request EEPROM access. */
11196 reg |= EECD_EE_REQ;
11197 CSR_WRITE(sc, WMREG_EECD, reg);
11198
11199 /* ..and wait for it to be granted. */
11200 for (x = 0; x < 1000; x++) {
11201 reg = CSR_READ(sc, WMREG_EECD);
11202 if (reg & EECD_EE_GNT)
11203 break;
11204 delay(5);
11205 }
11206 if ((reg & EECD_EE_GNT) == 0) {
11207 aprint_error_dev(sc->sc_dev,
11208 "could not acquire EEPROM GNT\n");
11209 reg &= ~EECD_EE_REQ;
11210 CSR_WRITE(sc, WMREG_EECD, reg);
11211 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
11212 wm_put_swfwhw_semaphore(sc);
11213 if (sc->sc_flags & WM_F_LOCK_SWFW)
11214 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
11215 else if (sc->sc_flags & WM_F_LOCK_SWSM)
11216 wm_put_swsm_semaphore(sc);
11217 return 1;
11218 }
11219 }
11220
11221 return 0;
11222 }
11223
11224 /*
11225 * wm_nvm_release:
11226 *
11227 * Release the EEPROM mutex.
11228 */
11229 static void
11230 wm_nvm_release(struct wm_softc *sc)
11231 {
11232 uint32_t reg;
11233
11234 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11235 device_xname(sc->sc_dev), __func__));
11236
11237 if (sc->sc_flags & WM_F_LOCK_EECD) {
11238 reg = CSR_READ(sc, WMREG_EECD);
11239 reg &= ~EECD_EE_REQ;
11240 CSR_WRITE(sc, WMREG_EECD, reg);
11241 }
11242
11243 if (sc->sc_type >= WM_T_ICH8) {
11244 wm_put_nvm_ich8lan(sc);
11245 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
11246 wm_put_swfwhw_semaphore(sc);
11247 if (sc->sc_flags & WM_F_LOCK_SWFW)
11248 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
11249 else if (sc->sc_flags & WM_F_LOCK_SWSM)
11250 wm_put_swsm_semaphore(sc);
11251 }
11252
11253 static int
11254 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
11255 {
11256 uint32_t eecd = 0;
11257
11258 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
11259 || sc->sc_type == WM_T_82583) {
11260 eecd = CSR_READ(sc, WMREG_EECD);
11261
11262 /* Isolate bits 15 & 16 */
11263 eecd = ((eecd >> 15) & 0x03);
11264
11265 /* If both bits are set, device is Flash type */
11266 if (eecd == 0x03)
11267 return 0;
11268 }
11269 return 1;
11270 }
11271
11272 static int
11273 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
11274 {
11275 uint32_t eec;
11276
11277 eec = CSR_READ(sc, WMREG_EEC);
11278 if ((eec & EEC_FLASH_DETECTED) != 0)
11279 return 1;
11280
11281 return 0;
11282 }
11283
11284 /*
11285 * wm_nvm_validate_checksum
11286 *
11287 * The checksum is defined as the sum of the first 64 (16 bit) words.
11288 */
11289 static int
11290 wm_nvm_validate_checksum(struct wm_softc *sc)
11291 {
11292 uint16_t checksum;
11293 uint16_t eeprom_data;
11294 #ifdef WM_DEBUG
11295 uint16_t csum_wordaddr, valid_checksum;
11296 #endif
11297 int i;
11298
11299 checksum = 0;
11300
11301 /* Don't check for I211 */
11302 if (sc->sc_type == WM_T_I211)
11303 return 0;
11304
11305 #ifdef WM_DEBUG
11306 if (sc->sc_type == WM_T_PCH_LPT) {
11307 csum_wordaddr = NVM_OFF_COMPAT;
11308 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
11309 } else {
11310 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
11311 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
11312 }
11313
11314 /* Dump EEPROM image for debug */
11315 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
11316 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
11317 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
11318 /* XXX PCH_SPT? */
11319 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
11320 if ((eeprom_data & valid_checksum) == 0) {
11321 DPRINTF(WM_DEBUG_NVM,
11322 ("%s: NVM need to be updated (%04x != %04x)\n",
11323 device_xname(sc->sc_dev), eeprom_data,
11324 valid_checksum));
11325 }
11326 }
11327
11328 if ((wm_debug & WM_DEBUG_NVM) != 0) {
11329 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
11330 for (i = 0; i < NVM_SIZE; i++) {
11331 if (wm_nvm_read(sc, i, 1, &eeprom_data))
11332 printf("XXXX ");
11333 else
11334 printf("%04hx ", eeprom_data);
11335 if (i % 8 == 7)
11336 printf("\n");
11337 }
11338 }
11339
11340 #endif /* WM_DEBUG */
11341
11342 for (i = 0; i < NVM_SIZE; i++) {
11343 if (wm_nvm_read(sc, i, 1, &eeprom_data))
11344 return 1;
11345 checksum += eeprom_data;
11346 }
11347
11348 if (checksum != (uint16_t) NVM_CHECKSUM) {
11349 #ifdef WM_DEBUG
11350 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
11351 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
11352 #endif
11353 }
11354
11355 return 0;
11356 }
11357
11358 static void
11359 wm_nvm_version_invm(struct wm_softc *sc)
11360 {
11361 uint32_t dword;
11362
11363 /*
11364 * Linux's code to decode version is very strange, so we don't
11365 * obey that algorithm and just use word 61 as the document.
11366 * Perhaps it's not perfect though...
11367 *
11368 * Example:
11369 *
11370 * Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
11371 */
11372 dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
11373 dword = __SHIFTOUT(dword, INVM_VER_1);
11374 sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
11375 sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
11376 }
11377
11378 static void
11379 wm_nvm_version(struct wm_softc *sc)
11380 {
11381 uint16_t major, minor, build, patch;
11382 uint16_t uid0, uid1;
11383 uint16_t nvm_data;
11384 uint16_t off;
11385 bool check_version = false;
11386 bool check_optionrom = false;
11387 bool have_build = false;
11388
11389 /*
11390 * Version format:
11391 *
11392 * XYYZ
11393 * X0YZ
11394 * X0YY
11395 *
11396 * Example:
11397 *
11398 * 82571 0x50a2 5.10.2? (the spec update notes about 5.6-5.10)
11399 * 82571 0x50a6 5.10.6?
11400 * 82572 0x506a 5.6.10?
11401 * 82572EI 0x5069 5.6.9?
11402 * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4)
11403 * 0x2013 2.1.3?
11404 * 82583 0x10a0 1.10.0? (document says it's default vaule)
11405 */
11406 wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
11407 switch (sc->sc_type) {
11408 case WM_T_82571:
11409 case WM_T_82572:
11410 case WM_T_82574:
11411 case WM_T_82583:
11412 check_version = true;
11413 check_optionrom = true;
11414 have_build = true;
11415 break;
11416 case WM_T_82575:
11417 case WM_T_82576:
11418 case WM_T_82580:
11419 if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
11420 check_version = true;
11421 break;
11422 case WM_T_I211:
11423 wm_nvm_version_invm(sc);
11424 goto printver;
11425 case WM_T_I210:
11426 if (!wm_nvm_get_flash_presence_i210(sc)) {
11427 wm_nvm_version_invm(sc);
11428 goto printver;
11429 }
11430 /* FALLTHROUGH */
11431 case WM_T_I350:
11432 case WM_T_I354:
11433 check_version = true;
11434 check_optionrom = true;
11435 break;
11436 default:
11437 return;
11438 }
11439 if (check_version) {
11440 wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
11441 major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
11442 if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
11443 minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
11444 build = nvm_data & NVM_BUILD_MASK;
11445 have_build = true;
11446 } else
11447 minor = nvm_data & 0x00ff;
11448
11449 /* Decimal */
11450 minor = (minor / 16) * 10 + (minor % 16);
11451 sc->sc_nvm_ver_major = major;
11452 sc->sc_nvm_ver_minor = minor;
11453
11454 printver:
11455 aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
11456 sc->sc_nvm_ver_minor);
11457 if (have_build) {
11458 sc->sc_nvm_ver_build = build;
11459 aprint_verbose(".%d", build);
11460 }
11461 }
11462 if (check_optionrom) {
11463 wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
11464 /* Option ROM Version */
11465 if ((off != 0x0000) && (off != 0xffff)) {
11466 off += NVM_COMBO_VER_OFF;
11467 wm_nvm_read(sc, off + 1, 1, &uid1);
11468 wm_nvm_read(sc, off, 1, &uid0);
11469 if ((uid0 != 0) && (uid0 != 0xffff)
11470 && (uid1 != 0) && (uid1 != 0xffff)) {
11471 /* 16bits */
11472 major = uid0 >> 8;
11473 build = (uid0 << 8) | (uid1 >> 8);
11474 patch = uid1 & 0x00ff;
11475 aprint_verbose(", option ROM Version %d.%d.%d",
11476 major, build, patch);
11477 }
11478 }
11479 }
11480
11481 wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
11482 aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
11483 }
11484
11485 /*
11486 * wm_nvm_read:
11487 *
11488 * Read data from the serial EEPROM.
11489 */
11490 static int
11491 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
11492 {
11493 int rv;
11494
11495 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11496 device_xname(sc->sc_dev), __func__));
11497
11498 if (sc->sc_flags & WM_F_EEPROM_INVALID)
11499 return 1;
11500
11501 if (wm_nvm_acquire(sc))
11502 return 1;
11503
11504 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
11505 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
11506 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
11507 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
11508 else if (sc->sc_type == WM_T_PCH_SPT)
11509 rv = wm_nvm_read_spt(sc, word, wordcnt, data);
11510 else if (sc->sc_flags & WM_F_EEPROM_INVM)
11511 rv = wm_nvm_read_invm(sc, word, wordcnt, data);
11512 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
11513 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
11514 else if (sc->sc_flags & WM_F_EEPROM_SPI)
11515 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
11516 else
11517 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
11518
11519 wm_nvm_release(sc);
11520 return rv;
11521 }
11522
11523 /*
11524 * Hardware semaphores.
11525 * Very complexed...
11526 */
11527
11528 static int
11529 wm_get_null(struct wm_softc *sc)
11530 {
11531
11532 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11533 device_xname(sc->sc_dev), __func__));
11534 return 0;
11535 }
11536
11537 static void
11538 wm_put_null(struct wm_softc *sc)
11539 {
11540
11541 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11542 device_xname(sc->sc_dev), __func__));
11543 return;
11544 }
11545
11546 /*
11547 * Get hardware semaphore.
11548 * Same as e1000_get_hw_semaphore_generic()
11549 */
11550 static int
11551 wm_get_swsm_semaphore(struct wm_softc *sc)
11552 {
11553 int32_t timeout;
11554 uint32_t swsm;
11555
11556 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11557 device_xname(sc->sc_dev), __func__));
11558 KASSERT(sc->sc_nvm_wordsize > 0);
11559
11560 /* Get the SW semaphore. */
11561 timeout = sc->sc_nvm_wordsize + 1;
11562 while (timeout) {
11563 swsm = CSR_READ(sc, WMREG_SWSM);
11564
11565 if ((swsm & SWSM_SMBI) == 0)
11566 break;
11567
11568 delay(50);
11569 timeout--;
11570 }
11571
11572 if (timeout == 0) {
11573 aprint_error_dev(sc->sc_dev,
11574 "could not acquire SWSM SMBI\n");
11575 return 1;
11576 }
11577
11578 /* Get the FW semaphore. */
11579 timeout = sc->sc_nvm_wordsize + 1;
11580 while (timeout) {
11581 swsm = CSR_READ(sc, WMREG_SWSM);
11582 swsm |= SWSM_SWESMBI;
11583 CSR_WRITE(sc, WMREG_SWSM, swsm);
11584 /* If we managed to set the bit we got the semaphore. */
11585 swsm = CSR_READ(sc, WMREG_SWSM);
11586 if (swsm & SWSM_SWESMBI)
11587 break;
11588
11589 delay(50);
11590 timeout--;
11591 }
11592
11593 if (timeout == 0) {
11594 aprint_error_dev(sc->sc_dev,
11595 "could not acquire SWSM SWESMBI\n");
11596 /* Release semaphores */
11597 wm_put_swsm_semaphore(sc);
11598 return 1;
11599 }
11600 return 0;
11601 }
11602
11603 /*
11604 * Put hardware semaphore.
11605 * Same as e1000_put_hw_semaphore_generic()
11606 */
11607 static void
11608 wm_put_swsm_semaphore(struct wm_softc *sc)
11609 {
11610 uint32_t swsm;
11611
11612 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11613 device_xname(sc->sc_dev), __func__));
11614
11615 swsm = CSR_READ(sc, WMREG_SWSM);
11616 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
11617 CSR_WRITE(sc, WMREG_SWSM, swsm);
11618 }
11619
11620 /*
11621 * Get SW/FW semaphore.
11622 * Same as e1000_acquire_swfw_sync_82575().
11623 */
11624 static int
11625 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
11626 {
11627 uint32_t swfw_sync;
11628 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
11629 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
11630 int timeout = 200;
11631
11632 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11633 device_xname(sc->sc_dev), __func__));
11634 KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
11635
11636 for (timeout = 0; timeout < 200; timeout++) {
11637 if (sc->sc_flags & WM_F_LOCK_SWSM) {
11638 if (wm_get_swsm_semaphore(sc)) {
11639 aprint_error_dev(sc->sc_dev,
11640 "%s: failed to get semaphore\n",
11641 __func__);
11642 return 1;
11643 }
11644 }
11645 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
11646 if ((swfw_sync & (swmask | fwmask)) == 0) {
11647 swfw_sync |= swmask;
11648 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
11649 if (sc->sc_flags & WM_F_LOCK_SWSM)
11650 wm_put_swsm_semaphore(sc);
11651 return 0;
11652 }
11653 if (sc->sc_flags & WM_F_LOCK_SWSM)
11654 wm_put_swsm_semaphore(sc);
11655 delay(5000);
11656 }
11657 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
11658 device_xname(sc->sc_dev), mask, swfw_sync);
11659 return 1;
11660 }
11661
11662 static void
11663 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
11664 {
11665 uint32_t swfw_sync;
11666
11667 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11668 device_xname(sc->sc_dev), __func__));
11669 KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
11670
11671 if (sc->sc_flags & WM_F_LOCK_SWSM) {
11672 while (wm_get_swsm_semaphore(sc) != 0)
11673 continue;
11674 }
11675 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
11676 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
11677 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
11678 if (sc->sc_flags & WM_F_LOCK_SWSM)
11679 wm_put_swsm_semaphore(sc);
11680 }
11681
11682 static int
11683 wm_get_phy_82575(struct wm_softc *sc)
11684 {
11685
11686 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11687 device_xname(sc->sc_dev), __func__));
11688 return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
11689 }
11690
11691 static void
11692 wm_put_phy_82575(struct wm_softc *sc)
11693 {
11694
11695 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11696 device_xname(sc->sc_dev), __func__));
11697 return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
11698 }
11699
11700 static int
11701 wm_get_swfwhw_semaphore(struct wm_softc *sc)
11702 {
11703 uint32_t ext_ctrl;
11704 int timeout = 200;
11705
11706 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11707 device_xname(sc->sc_dev), __func__));
11708
11709 mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
11710 for (timeout = 0; timeout < 200; timeout++) {
11711 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11712 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
11713 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11714
11715 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11716 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
11717 return 0;
11718 delay(5000);
11719 }
11720 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
11721 device_xname(sc->sc_dev), ext_ctrl);
11722 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
11723 return 1;
11724 }
11725
11726 static void
11727 wm_put_swfwhw_semaphore(struct wm_softc *sc)
11728 {
11729 uint32_t ext_ctrl;
11730
11731 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11732 device_xname(sc->sc_dev), __func__));
11733
11734 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11735 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11736 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11737
11738 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
11739 }
11740
11741 static int
11742 wm_get_swflag_ich8lan(struct wm_softc *sc)
11743 {
11744 uint32_t ext_ctrl;
11745 int timeout;
11746
11747 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11748 device_xname(sc->sc_dev), __func__));
11749 mutex_enter(sc->sc_ich_phymtx);
11750 for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
11751 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11752 if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
11753 break;
11754 delay(1000);
11755 }
11756 if (timeout >= WM_PHY_CFG_TIMEOUT) {
11757 printf("%s: SW has already locked the resource\n",
11758 device_xname(sc->sc_dev));
11759 goto out;
11760 }
11761
11762 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
11763 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11764 for (timeout = 0; timeout < 1000; timeout++) {
11765 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11766 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
11767 break;
11768 delay(1000);
11769 }
11770 if (timeout >= 1000) {
11771 printf("%s: failed to acquire semaphore\n",
11772 device_xname(sc->sc_dev));
11773 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11774 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11775 goto out;
11776 }
11777 return 0;
11778
11779 out:
11780 mutex_exit(sc->sc_ich_phymtx);
11781 return 1;
11782 }
11783
11784 static void
11785 wm_put_swflag_ich8lan(struct wm_softc *sc)
11786 {
11787 uint32_t ext_ctrl;
11788
11789 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11790 device_xname(sc->sc_dev), __func__));
11791 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11792 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
11793 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11794 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11795 } else {
11796 printf("%s: Semaphore unexpectedly released\n",
11797 device_xname(sc->sc_dev));
11798 }
11799
11800 mutex_exit(sc->sc_ich_phymtx);
11801 }
11802
11803 static int
11804 wm_get_nvm_ich8lan(struct wm_softc *sc)
11805 {
11806
11807 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11808 device_xname(sc->sc_dev), __func__));
11809 mutex_enter(sc->sc_ich_nvmmtx);
11810
11811 return 0;
11812 }
11813
11814 static void
11815 wm_put_nvm_ich8lan(struct wm_softc *sc)
11816 {
11817
11818 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11819 device_xname(sc->sc_dev), __func__));
11820 mutex_exit(sc->sc_ich_nvmmtx);
11821 }
11822
11823 static int
11824 wm_get_hw_semaphore_82573(struct wm_softc *sc)
11825 {
11826 int i = 0;
11827 uint32_t reg;
11828
11829 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11830 device_xname(sc->sc_dev), __func__));
11831
11832 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11833 do {
11834 CSR_WRITE(sc, WMREG_EXTCNFCTR,
11835 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
11836 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11837 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
11838 break;
11839 delay(2*1000);
11840 i++;
11841 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
11842
11843 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
11844 wm_put_hw_semaphore_82573(sc);
11845 log(LOG_ERR, "%s: Driver can't access the PHY\n",
11846 device_xname(sc->sc_dev));
11847 return -1;
11848 }
11849
11850 return 0;
11851 }
11852
11853 static void
11854 wm_put_hw_semaphore_82573(struct wm_softc *sc)
11855 {
11856 uint32_t reg;
11857
11858 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11859 device_xname(sc->sc_dev), __func__));
11860
11861 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11862 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11863 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
11864 }
11865
11866 /*
11867 * Management mode and power management related subroutines.
11868 * BMC, AMT, suspend/resume and EEE.
11869 */
11870
11871 #ifdef WM_WOL
11872 static int
11873 wm_check_mng_mode(struct wm_softc *sc)
11874 {
11875 int rv;
11876
11877 switch (sc->sc_type) {
11878 case WM_T_ICH8:
11879 case WM_T_ICH9:
11880 case WM_T_ICH10:
11881 case WM_T_PCH:
11882 case WM_T_PCH2:
11883 case WM_T_PCH_LPT:
11884 case WM_T_PCH_SPT:
11885 rv = wm_check_mng_mode_ich8lan(sc);
11886 break;
11887 case WM_T_82574:
11888 case WM_T_82583:
11889 rv = wm_check_mng_mode_82574(sc);
11890 break;
11891 case WM_T_82571:
11892 case WM_T_82572:
11893 case WM_T_82573:
11894 case WM_T_80003:
11895 rv = wm_check_mng_mode_generic(sc);
11896 break;
11897 default:
11898 /* noting to do */
11899 rv = 0;
11900 break;
11901 }
11902
11903 return rv;
11904 }
11905
11906 static int
11907 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
11908 {
11909 uint32_t fwsm;
11910
11911 fwsm = CSR_READ(sc, WMREG_FWSM);
11912
11913 if (((fwsm & FWSM_FW_VALID) != 0)
11914 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
11915 return 1;
11916
11917 return 0;
11918 }
11919
11920 static int
11921 wm_check_mng_mode_82574(struct wm_softc *sc)
11922 {
11923 uint16_t data;
11924
11925 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11926
11927 if ((data & NVM_CFG2_MNGM_MASK) != 0)
11928 return 1;
11929
11930 return 0;
11931 }
11932
11933 static int
11934 wm_check_mng_mode_generic(struct wm_softc *sc)
11935 {
11936 uint32_t fwsm;
11937
11938 fwsm = CSR_READ(sc, WMREG_FWSM);
11939
11940 if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
11941 return 1;
11942
11943 return 0;
11944 }
11945 #endif /* WM_WOL */
11946
11947 static int
11948 wm_enable_mng_pass_thru(struct wm_softc *sc)
11949 {
11950 uint32_t manc, fwsm, factps;
11951
11952 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
11953 return 0;
11954
11955 manc = CSR_READ(sc, WMREG_MANC);
11956
11957 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
11958 device_xname(sc->sc_dev), manc));
11959 if ((manc & MANC_RECV_TCO_EN) == 0)
11960 return 0;
11961
11962 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
11963 fwsm = CSR_READ(sc, WMREG_FWSM);
11964 factps = CSR_READ(sc, WMREG_FACTPS);
11965 if (((factps & FACTPS_MNGCG) == 0)
11966 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
11967 return 1;
11968 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
11969 uint16_t data;
11970
11971 factps = CSR_READ(sc, WMREG_FACTPS);
11972 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11973 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
11974 device_xname(sc->sc_dev), factps, data));
11975 if (((factps & FACTPS_MNGCG) == 0)
11976 && ((data & NVM_CFG2_MNGM_MASK)
11977 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
11978 return 1;
11979 } else if (((manc & MANC_SMBUS_EN) != 0)
11980 && ((manc & MANC_ASF_EN) == 0))
11981 return 1;
11982
11983 return 0;
11984 }
11985
11986 static bool
11987 wm_phy_resetisblocked(struct wm_softc *sc)
11988 {
11989 bool blocked = false;
11990 uint32_t reg;
11991 int i = 0;
11992
11993 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
11994 device_xname(sc->sc_dev), __func__));
11995
11996 switch (sc->sc_type) {
11997 case WM_T_ICH8:
11998 case WM_T_ICH9:
11999 case WM_T_ICH10:
12000 case WM_T_PCH:
12001 case WM_T_PCH2:
12002 case WM_T_PCH_LPT:
12003 case WM_T_PCH_SPT:
12004 do {
12005 reg = CSR_READ(sc, WMREG_FWSM);
12006 if ((reg & FWSM_RSPCIPHY) == 0) {
12007 blocked = true;
12008 delay(10*1000);
12009 continue;
12010 }
12011 blocked = false;
12012 } while (blocked && (i++ < 30));
12013 return blocked;
12014 break;
12015 case WM_T_82571:
12016 case WM_T_82572:
12017 case WM_T_82573:
12018 case WM_T_82574:
12019 case WM_T_82583:
12020 case WM_T_80003:
12021 reg = CSR_READ(sc, WMREG_MANC);
12022 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
12023 return true;
12024 else
12025 return false;
12026 break;
12027 default:
12028 /* no problem */
12029 break;
12030 }
12031
12032 return false;
12033 }
12034
12035 static void
12036 wm_get_hw_control(struct wm_softc *sc)
12037 {
12038 uint32_t reg;
12039
12040 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12041 device_xname(sc->sc_dev), __func__));
12042
12043 if (sc->sc_type == WM_T_82573) {
12044 reg = CSR_READ(sc, WMREG_SWSM);
12045 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
12046 } else if (sc->sc_type >= WM_T_82571) {
12047 reg = CSR_READ(sc, WMREG_CTRL_EXT);
12048 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
12049 }
12050 }
12051
12052 static void
12053 wm_release_hw_control(struct wm_softc *sc)
12054 {
12055 uint32_t reg;
12056
12057 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12058 device_xname(sc->sc_dev), __func__));
12059
12060 if (sc->sc_type == WM_T_82573) {
12061 reg = CSR_READ(sc, WMREG_SWSM);
12062 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
12063 } else if (sc->sc_type >= WM_T_82571) {
12064 reg = CSR_READ(sc, WMREG_CTRL_EXT);
12065 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
12066 }
12067 }
12068
12069 static void
12070 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
12071 {
12072 uint32_t reg;
12073
12074 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12075 device_xname(sc->sc_dev), __func__));
12076
12077 if (sc->sc_type < WM_T_PCH2)
12078 return;
12079
12080 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
12081
12082 if (gate)
12083 reg |= EXTCNFCTR_GATE_PHY_CFG;
12084 else
12085 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
12086
12087 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
12088 }
12089
12090 static void
12091 wm_smbustopci(struct wm_softc *sc)
12092 {
12093 uint32_t fwsm, reg;
12094 int rv = 0;
12095
12096 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12097 device_xname(sc->sc_dev), __func__));
12098
12099 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
12100 wm_gate_hw_phy_config_ich8lan(sc, true);
12101
12102 /* Disable ULP */
12103 wm_ulp_disable(sc);
12104
12105 /* Acquire PHY semaphore */
12106 sc->phy.acquire(sc);
12107
12108 fwsm = CSR_READ(sc, WMREG_FWSM);
12109 switch (sc->sc_type) {
12110 case WM_T_PCH_LPT:
12111 case WM_T_PCH_SPT:
12112 if (wm_phy_is_accessible_pchlan(sc))
12113 break;
12114
12115 reg = CSR_READ(sc, WMREG_CTRL_EXT);
12116 reg |= CTRL_EXT_FORCE_SMBUS;
12117 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12118 #if 0
12119 /* XXX Isn't this required??? */
12120 CSR_WRITE_FLUSH(sc);
12121 #endif
12122 delay(50 * 1000);
12123 /* FALLTHROUGH */
12124 case WM_T_PCH2:
12125 if (wm_phy_is_accessible_pchlan(sc) == true)
12126 break;
12127 /* FALLTHROUGH */
12128 case WM_T_PCH:
12129 if (sc->sc_type == WM_T_PCH)
12130 if ((fwsm & FWSM_FW_VALID) != 0)
12131 break;
12132
12133 if (wm_phy_resetisblocked(sc) == true) {
12134 printf("XXX reset is blocked(3)\n");
12135 break;
12136 }
12137
12138 wm_toggle_lanphypc_pch_lpt(sc);
12139
12140 if (sc->sc_type >= WM_T_PCH_LPT) {
12141 if (wm_phy_is_accessible_pchlan(sc) == true)
12142 break;
12143
12144 reg = CSR_READ(sc, WMREG_CTRL_EXT);
12145 reg &= ~CTRL_EXT_FORCE_SMBUS;
12146 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12147
12148 if (wm_phy_is_accessible_pchlan(sc) == true)
12149 break;
12150 rv = -1;
12151 }
12152 break;
12153 default:
12154 break;
12155 }
12156
12157 /* Release semaphore */
12158 sc->phy.release(sc);
12159
12160 if (rv == 0) {
12161 if (wm_phy_resetisblocked(sc)) {
12162 printf("XXX reset is blocked(4)\n");
12163 goto out;
12164 }
12165 wm_reset_phy(sc);
12166 if (wm_phy_resetisblocked(sc))
12167 printf("XXX reset is blocked(4)\n");
12168 }
12169
12170 out:
12171 /*
12172 * Ungate automatic PHY configuration by hardware on non-managed 82579
12173 */
12174 if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
12175 delay(10*1000);
12176 wm_gate_hw_phy_config_ich8lan(sc, false);
12177 }
12178 }
12179
12180 static void
12181 wm_init_manageability(struct wm_softc *sc)
12182 {
12183
12184 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12185 device_xname(sc->sc_dev), __func__));
12186 if (sc->sc_flags & WM_F_HAS_MANAGE) {
12187 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
12188 uint32_t manc = CSR_READ(sc, WMREG_MANC);
12189
12190 /* Disable hardware interception of ARP */
12191 manc &= ~MANC_ARP_EN;
12192
12193 /* Enable receiving management packets to the host */
12194 if (sc->sc_type >= WM_T_82571) {
12195 manc |= MANC_EN_MNG2HOST;
12196 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
12197 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
12198 }
12199
12200 CSR_WRITE(sc, WMREG_MANC, manc);
12201 }
12202 }
12203
12204 static void
12205 wm_release_manageability(struct wm_softc *sc)
12206 {
12207
12208 if (sc->sc_flags & WM_F_HAS_MANAGE) {
12209 uint32_t manc = CSR_READ(sc, WMREG_MANC);
12210
12211 manc |= MANC_ARP_EN;
12212 if (sc->sc_type >= WM_T_82571)
12213 manc &= ~MANC_EN_MNG2HOST;
12214
12215 CSR_WRITE(sc, WMREG_MANC, manc);
12216 }
12217 }
12218
12219 static void
12220 wm_get_wakeup(struct wm_softc *sc)
12221 {
12222
12223 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
12224 switch (sc->sc_type) {
12225 case WM_T_82573:
12226 case WM_T_82583:
12227 sc->sc_flags |= WM_F_HAS_AMT;
12228 /* FALLTHROUGH */
12229 case WM_T_80003:
12230 case WM_T_82575:
12231 case WM_T_82576:
12232 case WM_T_82580:
12233 case WM_T_I350:
12234 case WM_T_I354:
12235 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
12236 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
12237 /* FALLTHROUGH */
12238 case WM_T_82541:
12239 case WM_T_82541_2:
12240 case WM_T_82547:
12241 case WM_T_82547_2:
12242 case WM_T_82571:
12243 case WM_T_82572:
12244 case WM_T_82574:
12245 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
12246 break;
12247 case WM_T_ICH8:
12248 case WM_T_ICH9:
12249 case WM_T_ICH10:
12250 case WM_T_PCH:
12251 case WM_T_PCH2:
12252 case WM_T_PCH_LPT:
12253 case WM_T_PCH_SPT:
12254 sc->sc_flags |= WM_F_HAS_AMT;
12255 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
12256 break;
12257 default:
12258 break;
12259 }
12260
12261 /* 1: HAS_MANAGE */
12262 if (wm_enable_mng_pass_thru(sc) != 0)
12263 sc->sc_flags |= WM_F_HAS_MANAGE;
12264
12265 #ifdef WM_DEBUG
12266 printf("\n");
12267 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
12268 printf("HAS_AMT,");
12269 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
12270 printf("ARC_SUBSYS_VALID,");
12271 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
12272 printf("ASF_FIRMWARE_PRES,");
12273 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
12274 printf("HAS_MANAGE,");
12275 printf("\n");
12276 #endif
12277 /*
12278 * Note that the WOL flags is set after the resetting of the eeprom
12279 * stuff
12280 */
12281 }
12282
12283 /*
12284 * Unconfigure Ultra Low Power mode.
12285 * Only for I217 and newer (see below).
12286 */
12287 static void
12288 wm_ulp_disable(struct wm_softc *sc)
12289 {
12290 uint32_t reg;
12291 int i = 0;
12292
12293 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12294 device_xname(sc->sc_dev), __func__));
12295 /* Exclude old devices */
12296 if ((sc->sc_type < WM_T_PCH_LPT)
12297 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
12298 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
12299 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
12300 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
12301 return;
12302
12303 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
12304 /* Request ME un-configure ULP mode in the PHY */
12305 reg = CSR_READ(sc, WMREG_H2ME);
12306 reg &= ~H2ME_ULP;
12307 reg |= H2ME_ENFORCE_SETTINGS;
12308 CSR_WRITE(sc, WMREG_H2ME, reg);
12309
12310 /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
12311 while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
12312 if (i++ == 30) {
12313 printf("%s timed out\n", __func__);
12314 return;
12315 }
12316 delay(10 * 1000);
12317 }
12318 reg = CSR_READ(sc, WMREG_H2ME);
12319 reg &= ~H2ME_ENFORCE_SETTINGS;
12320 CSR_WRITE(sc, WMREG_H2ME, reg);
12321
12322 return;
12323 }
12324
12325 /* Acquire semaphore */
12326 sc->phy.acquire(sc);
12327
12328 /* Toggle LANPHYPC */
12329 wm_toggle_lanphypc_pch_lpt(sc);
12330
12331 /* Unforce SMBus mode in PHY */
12332 reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
12333 if (reg == 0x0000 || reg == 0xffff) {
12334 uint32_t reg2;
12335
12336 printf("%s: Force SMBus first.\n", __func__);
12337 reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
12338 reg2 |= CTRL_EXT_FORCE_SMBUS;
12339 CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
12340 delay(50 * 1000);
12341
12342 reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
12343 }
12344 reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
12345 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
12346
12347 /* Unforce SMBus mode in MAC */
12348 reg = CSR_READ(sc, WMREG_CTRL_EXT);
12349 reg &= ~CTRL_EXT_FORCE_SMBUS;
12350 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12351
12352 reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
12353 reg |= HV_PM_CTRL_K1_ENA;
12354 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
12355
12356 reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
12357 reg &= ~(I218_ULP_CONFIG1_IND
12358 | I218_ULP_CONFIG1_STICKY_ULP
12359 | I218_ULP_CONFIG1_RESET_TO_SMBUS
12360 | I218_ULP_CONFIG1_WOL_HOST
12361 | I218_ULP_CONFIG1_INBAND_EXIT
12362 | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
12363 | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
12364 | I218_ULP_CONFIG1_DIS_SMB_PERST);
12365 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
12366 reg |= I218_ULP_CONFIG1_START;
12367 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
12368
12369 reg = CSR_READ(sc, WMREG_FEXTNVM7);
12370 reg &= ~FEXTNVM7_DIS_SMB_PERST;
12371 CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
12372
12373 /* Release semaphore */
12374 sc->phy.release(sc);
12375 wm_gmii_reset(sc);
12376 delay(50 * 1000);
12377 }
12378
12379 /* WOL in the newer chipset interfaces (pchlan) */
12380 static void
12381 wm_enable_phy_wakeup(struct wm_softc *sc)
12382 {
12383 #if 0
12384 uint16_t preg;
12385
12386 /* Copy MAC RARs to PHY RARs */
12387
12388 /* Copy MAC MTA to PHY MTA */
12389
12390 /* Configure PHY Rx Control register */
12391
12392 /* Enable PHY wakeup in MAC register */
12393
12394 /* Configure and enable PHY wakeup in PHY registers */
12395
12396 /* Activate PHY wakeup */
12397
12398 /* XXX */
12399 #endif
12400 }
12401
12402 /* Power down workaround on D3 */
12403 static void
12404 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
12405 {
12406 uint32_t reg;
12407 int i;
12408
12409 for (i = 0; i < 2; i++) {
12410 /* Disable link */
12411 reg = CSR_READ(sc, WMREG_PHY_CTRL);
12412 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
12413 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
12414
12415 /*
12416 * Call gig speed drop workaround on Gig disable before
12417 * accessing any PHY registers
12418 */
12419 if (sc->sc_type == WM_T_ICH8)
12420 wm_gig_downshift_workaround_ich8lan(sc);
12421
12422 /* Write VR power-down enable */
12423 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
12424 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
12425 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
12426 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
12427
12428 /* Read it back and test */
12429 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
12430 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
12431 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
12432 break;
12433
12434 /* Issue PHY reset and repeat at most one more time */
12435 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
12436 }
12437 }
12438
12439 static void
12440 wm_enable_wakeup(struct wm_softc *sc)
12441 {
12442 uint32_t reg, pmreg;
12443 pcireg_t pmode;
12444
12445 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12446 device_xname(sc->sc_dev), __func__));
12447
12448 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
12449 &pmreg, NULL) == 0)
12450 return;
12451
12452 /* Advertise the wakeup capability */
12453 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
12454 | CTRL_SWDPIN(3));
12455 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
12456
12457 /* ICH workaround */
12458 switch (sc->sc_type) {
12459 case WM_T_ICH8:
12460 case WM_T_ICH9:
12461 case WM_T_ICH10:
12462 case WM_T_PCH:
12463 case WM_T_PCH2:
12464 case WM_T_PCH_LPT:
12465 case WM_T_PCH_SPT:
12466 /* Disable gig during WOL */
12467 reg = CSR_READ(sc, WMREG_PHY_CTRL);
12468 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
12469 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
12470 if (sc->sc_type == WM_T_PCH)
12471 wm_gmii_reset(sc);
12472
12473 /* Power down workaround */
12474 if (sc->sc_phytype == WMPHY_82577) {
12475 struct mii_softc *child;
12476
12477 /* Assume that the PHY is copper */
12478 child = LIST_FIRST(&sc->sc_mii.mii_phys);
12479 if (child->mii_mpd_rev <= 2)
12480 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
12481 (768 << 5) | 25, 0x0444); /* magic num */
12482 }
12483 break;
12484 default:
12485 break;
12486 }
12487
12488 /* Keep the laser running on fiber adapters */
12489 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
12490 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
12491 reg = CSR_READ(sc, WMREG_CTRL_EXT);
12492 reg |= CTRL_EXT_SWDPIN(3);
12493 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12494 }
12495
12496 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
12497 #if 0 /* for the multicast packet */
12498 reg |= WUFC_MC;
12499 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
12500 #endif
12501
12502 if (sc->sc_type >= WM_T_PCH)
12503 wm_enable_phy_wakeup(sc);
12504 else {
12505 CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
12506 CSR_WRITE(sc, WMREG_WUFC, reg);
12507 }
12508
12509 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
12510 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
12511 || (sc->sc_type == WM_T_PCH2))
12512 && (sc->sc_phytype == WMPHY_IGP_3))
12513 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
12514
12515 /* Request PME */
12516 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
12517 #if 0
12518 /* Disable WOL */
12519 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
12520 #else
12521 /* For WOL */
12522 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
12523 #endif
12524 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
12525 }
12526
12527 /* LPLU */
12528
12529 static void
12530 wm_lplu_d0_disable(struct wm_softc *sc)
12531 {
12532 uint32_t reg;
12533
12534 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12535 device_xname(sc->sc_dev), __func__));
12536
12537 reg = CSR_READ(sc, WMREG_PHY_CTRL);
12538 reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
12539 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
12540 }
12541
12542 static void
12543 wm_lplu_d0_disable_pch(struct wm_softc *sc)
12544 {
12545 uint32_t reg;
12546
12547 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12548 device_xname(sc->sc_dev), __func__));
12549
12550 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
12551 reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
12552 reg |= HV_OEM_BITS_ANEGNOW;
12553 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
12554 }
12555
12556 /* EEE */
12557
12558 static void
12559 wm_set_eee_i350(struct wm_softc *sc)
12560 {
12561 uint32_t ipcnfg, eeer;
12562
12563 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
12564 eeer = CSR_READ(sc, WMREG_EEER);
12565
12566 if ((sc->sc_flags & WM_F_EEE) != 0) {
12567 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
12568 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
12569 | EEER_LPI_FC);
12570 } else {
12571 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
12572 ipcnfg &= ~IPCNFG_10BASE_TE;
12573 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
12574 | EEER_LPI_FC);
12575 }
12576
12577 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
12578 CSR_WRITE(sc, WMREG_EEER, eeer);
12579 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
12580 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
12581 }
12582
12583 /*
12584 * Workarounds (mainly PHY related).
12585 * Basically, PHY's workarounds are in the PHY drivers.
12586 */
12587
12588 /* Work-around for 82566 Kumeran PCS lock loss */
12589 static void
12590 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
12591 {
12592 #if 0
12593 int miistatus, active, i;
12594 int reg;
12595
12596 miistatus = sc->sc_mii.mii_media_status;
12597
12598 /* If the link is not up, do nothing */
12599 if ((miistatus & IFM_ACTIVE) == 0)
12600 return;
12601
12602 active = sc->sc_mii.mii_media_active;
12603
12604 /* Nothing to do if the link is other than 1Gbps */
12605 if (IFM_SUBTYPE(active) != IFM_1000_T)
12606 return;
12607
12608 for (i = 0; i < 10; i++) {
12609 /* read twice */
12610 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
12611 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
12612 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
12613 goto out; /* GOOD! */
12614
12615 /* Reset the PHY */
12616 wm_gmii_reset(sc);
12617 delay(5*1000);
12618 }
12619
12620 /* Disable GigE link negotiation */
12621 reg = CSR_READ(sc, WMREG_PHY_CTRL);
12622 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
12623 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
12624
12625 /*
12626 * Call gig speed drop workaround on Gig disable before accessing
12627 * any PHY registers.
12628 */
12629 wm_gig_downshift_workaround_ich8lan(sc);
12630
12631 out:
12632 return;
12633 #endif
12634 }
12635
12636 /* WOL from S5 stops working */
12637 static void
12638 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
12639 {
12640 uint16_t kmrn_reg;
12641
12642 /* Only for igp3 */
12643 if (sc->sc_phytype == WMPHY_IGP_3) {
12644 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
12645 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
12646 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
12647 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
12648 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
12649 }
12650 }
12651
12652 /*
12653 * Workaround for pch's PHYs
12654 * XXX should be moved to new PHY driver?
12655 */
12656 static void
12657 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
12658 {
12659
12660 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12661 device_xname(sc->sc_dev), __func__));
12662 KASSERT(sc->sc_type == WM_T_PCH);
12663
12664 if (sc->sc_phytype == WMPHY_82577)
12665 wm_set_mdio_slow_mode_hv(sc);
12666
12667 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
12668
12669 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
12670
12671 /* 82578 */
12672 if (sc->sc_phytype == WMPHY_82578) {
12673 struct mii_softc *child;
12674
12675 /*
12676 * Return registers to default by doing a soft reset then
12677 * writing 0x3140 to the control register
12678 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
12679 */
12680 child = LIST_FIRST(&sc->sc_mii.mii_phys);
12681 if ((child != NULL) && (child->mii_mpd_rev < 2)) {
12682 PHY_RESET(child);
12683 sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
12684 0x3140);
12685 }
12686 }
12687
12688 /* Select page 0 */
12689 sc->phy.acquire(sc);
12690 wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
12691 sc->phy.release(sc);
12692
12693 /*
12694 * Configure the K1 Si workaround during phy reset assuming there is
12695 * link so that it disables K1 if link is in 1Gbps.
12696 */
12697 wm_k1_gig_workaround_hv(sc, 1);
12698 }
12699
12700 static void
12701 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
12702 {
12703
12704 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12705 device_xname(sc->sc_dev), __func__));
12706 KASSERT(sc->sc_type == WM_T_PCH2);
12707
12708 wm_set_mdio_slow_mode_hv(sc);
12709 }
12710
12711 static int
12712 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
12713 {
12714 int k1_enable = sc->sc_nvm_k1_enabled;
12715
12716 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12717 device_xname(sc->sc_dev), __func__));
12718
12719 if (sc->phy.acquire(sc) != 0)
12720 return -1;
12721
12722 if (link) {
12723 k1_enable = 0;
12724
12725 /* Link stall fix for link up */
12726 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
12727 } else {
12728 /* Link stall fix for link down */
12729 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
12730 }
12731
12732 wm_configure_k1_ich8lan(sc, k1_enable);
12733 sc->phy.release(sc);
12734
12735 return 0;
12736 }
12737
12738 static void
12739 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
12740 {
12741 uint32_t reg;
12742
12743 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
12744 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
12745 reg | HV_KMRN_MDIO_SLOW);
12746 }
12747
12748 static void
12749 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
12750 {
12751 uint32_t ctrl, ctrl_ext, tmp;
12752 uint16_t kmrn_reg;
12753
12754 kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
12755
12756 if (k1_enable)
12757 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
12758 else
12759 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
12760
12761 wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
12762
12763 delay(20);
12764
12765 ctrl = CSR_READ(sc, WMREG_CTRL);
12766 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
12767
12768 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
12769 tmp |= CTRL_FRCSPD;
12770
12771 CSR_WRITE(sc, WMREG_CTRL, tmp);
12772 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
12773 CSR_WRITE_FLUSH(sc);
12774 delay(20);
12775
12776 CSR_WRITE(sc, WMREG_CTRL, ctrl);
12777 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
12778 CSR_WRITE_FLUSH(sc);
12779 delay(20);
12780 }
12781
12782 /* special case - for 82575 - need to do manual init ... */
12783 static void
12784 wm_reset_init_script_82575(struct wm_softc *sc)
12785 {
12786 /*
12787 * remark: this is untested code - we have no board without EEPROM
12788 * same setup as mentioned int the FreeBSD driver for the i82575
12789 */
12790
12791 /* SerDes configuration via SERDESCTRL */
12792 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
12793 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
12794 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
12795 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
12796
12797 /* CCM configuration via CCMCTL register */
12798 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
12799 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
12800
12801 /* PCIe lanes configuration */
12802 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
12803 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
12804 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
12805 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
12806
12807 /* PCIe PLL Configuration */
12808 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
12809 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
12810 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
12811 }
12812
12813 static void
12814 wm_reset_mdicnfg_82580(struct wm_softc *sc)
12815 {
12816 uint32_t reg;
12817 uint16_t nvmword;
12818 int rv;
12819
12820 if ((sc->sc_flags & WM_F_SGMII) == 0)
12821 return;
12822
12823 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
12824 + NVM_OFF_CFG3_PORTA, 1, &nvmword);
12825 if (rv != 0) {
12826 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
12827 __func__);
12828 return;
12829 }
12830
12831 reg = CSR_READ(sc, WMREG_MDICNFG);
12832 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
12833 reg |= MDICNFG_DEST;
12834 if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
12835 reg |= MDICNFG_COM_MDIO;
12836 CSR_WRITE(sc, WMREG_MDICNFG, reg);
12837 }
12838
12839 #define MII_INVALIDID(x) (((x) == 0x0000) || ((x) == 0xffff))
12840
12841 static bool
12842 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
12843 {
12844 int i;
12845 uint32_t reg;
12846 uint16_t id1, id2;
12847
12848 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12849 device_xname(sc->sc_dev), __func__));
12850 id1 = id2 = 0xffff;
12851 for (i = 0; i < 2; i++) {
12852 id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
12853 if (MII_INVALIDID(id1))
12854 continue;
12855 id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
12856 if (MII_INVALIDID(id2))
12857 continue;
12858 break;
12859 }
12860 if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
12861 goto out;
12862 }
12863
12864 if (sc->sc_type < WM_T_PCH_LPT) {
12865 sc->phy.release(sc);
12866 wm_set_mdio_slow_mode_hv(sc);
12867 id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
12868 id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
12869 sc->phy.acquire(sc);
12870 }
12871 if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
12872 printf("XXX return with false\n");
12873 return false;
12874 }
12875 out:
12876 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
12877 /* Only unforce SMBus if ME is not active */
12878 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
12879 /* Unforce SMBus mode in PHY */
12880 reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
12881 CV_SMB_CTRL);
12882 reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
12883 wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
12884 CV_SMB_CTRL, reg);
12885
12886 /* Unforce SMBus mode in MAC */
12887 reg = CSR_READ(sc, WMREG_CTRL_EXT);
12888 reg &= ~CTRL_EXT_FORCE_SMBUS;
12889 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12890 }
12891 }
12892 return true;
12893 }
12894
12895 static void
12896 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
12897 {
12898 uint32_t reg;
12899 int i;
12900
12901 /* Set PHY Config Counter to 50msec */
12902 reg = CSR_READ(sc, WMREG_FEXTNVM3);
12903 reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
12904 reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
12905 CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
12906
12907 /* Toggle LANPHYPC */
12908 reg = CSR_READ(sc, WMREG_CTRL);
12909 reg |= CTRL_LANPHYPC_OVERRIDE;
12910 reg &= ~CTRL_LANPHYPC_VALUE;
12911 CSR_WRITE(sc, WMREG_CTRL, reg);
12912 CSR_WRITE_FLUSH(sc);
12913 delay(1000);
12914 reg &= ~CTRL_LANPHYPC_OVERRIDE;
12915 CSR_WRITE(sc, WMREG_CTRL, reg);
12916 CSR_WRITE_FLUSH(sc);
12917
12918 if (sc->sc_type < WM_T_PCH_LPT)
12919 delay(50 * 1000);
12920 else {
12921 i = 20;
12922
12923 do {
12924 delay(5 * 1000);
12925 } while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
12926 && i--);
12927
12928 delay(30 * 1000);
12929 }
12930 }
12931
12932 static int
12933 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
12934 {
12935 uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
12936 | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
12937 uint32_t rxa;
12938 uint16_t scale = 0, lat_enc = 0;
12939 int64_t lat_ns, value;
12940
12941 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12942 device_xname(sc->sc_dev), __func__));
12943
12944 if (link) {
12945 pcireg_t preg;
12946 uint16_t max_snoop, max_nosnoop, max_ltr_enc;
12947
12948 rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
12949
12950 /*
12951 * Determine the maximum latency tolerated by the device.
12952 *
12953 * Per the PCIe spec, the tolerated latencies are encoded as
12954 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
12955 * a 10-bit value (0-1023) to provide a range from 1 ns to
12956 * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
12957 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
12958 */
12959 lat_ns = ((int64_t)rxa * 1024 -
12960 (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000;
12961 if (lat_ns < 0)
12962 lat_ns = 0;
12963 else {
12964 uint32_t status;
12965 uint16_t speed;
12966
12967 status = CSR_READ(sc, WMREG_STATUS);
12968 switch (__SHIFTOUT(status, STATUS_SPEED)) {
12969 case STATUS_SPEED_10:
12970 speed = 10;
12971 break;
12972 case STATUS_SPEED_100:
12973 speed = 100;
12974 break;
12975 case STATUS_SPEED_1000:
12976 speed = 1000;
12977 break;
12978 default:
12979 printf("%s: Unknown speed (status = %08x)\n",
12980 device_xname(sc->sc_dev), status);
12981 return -1;
12982 }
12983 lat_ns /= speed;
12984 }
12985 value = lat_ns;
12986
12987 while (value > LTRV_VALUE) {
12988 scale ++;
12989 value = howmany(value, __BIT(5));
12990 }
12991 if (scale > LTRV_SCALE_MAX) {
12992 printf("%s: Invalid LTR latency scale %d\n",
12993 device_xname(sc->sc_dev), scale);
12994 return -1;
12995 }
12996 lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
12997
12998 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
12999 WM_PCI_LTR_CAP_LPT);
13000 max_snoop = preg & 0xffff;
13001 max_nosnoop = preg >> 16;
13002
13003 max_ltr_enc = MAX(max_snoop, max_nosnoop);
13004
13005 if (lat_enc > max_ltr_enc) {
13006 lat_enc = max_ltr_enc;
13007 }
13008 }
13009 /* Snoop and No-Snoop latencies the same */
13010 reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
13011 CSR_WRITE(sc, WMREG_LTRV, reg);
13012
13013 return 0;
13014 }
13015
13016 /*
13017 * I210 Errata 25 and I211 Errata 10
13018 * Slow System Clock.
13019 */
13020 static void
13021 wm_pll_workaround_i210(struct wm_softc *sc)
13022 {
13023 uint32_t mdicnfg, wuc;
13024 uint32_t reg;
13025 pcireg_t pcireg;
13026 uint32_t pmreg;
13027 uint16_t nvmword, tmp_nvmword;
13028 int phyval;
13029 bool wa_done = false;
13030 int i;
13031
13032 /* Save WUC and MDICNFG registers */
13033 wuc = CSR_READ(sc, WMREG_WUC);
13034 mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
13035
13036 reg = mdicnfg & ~MDICNFG_DEST;
13037 CSR_WRITE(sc, WMREG_MDICNFG, reg);
13038
13039 if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
13040 nvmword = INVM_DEFAULT_AL;
13041 tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
13042
13043 /* Get Power Management cap offset */
13044 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
13045 &pmreg, NULL) == 0)
13046 return;
13047 for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
13048 phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
13049 GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
13050
13051 if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
13052 break; /* OK */
13053 }
13054
13055 wa_done = true;
13056 /* Directly reset the internal PHY */
13057 reg = CSR_READ(sc, WMREG_CTRL);
13058 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
13059
13060 reg = CSR_READ(sc, WMREG_CTRL_EXT);
13061 reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
13062 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13063
13064 CSR_WRITE(sc, WMREG_WUC, 0);
13065 reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
13066 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
13067
13068 pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
13069 pmreg + PCI_PMCSR);
13070 pcireg |= PCI_PMCSR_STATE_D3;
13071 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
13072 pmreg + PCI_PMCSR, pcireg);
13073 delay(1000);
13074 pcireg &= ~PCI_PMCSR_STATE_D3;
13075 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
13076 pmreg + PCI_PMCSR, pcireg);
13077
13078 reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
13079 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
13080
13081 /* Restore WUC register */
13082 CSR_WRITE(sc, WMREG_WUC, wuc);
13083 }
13084
13085 /* Restore MDICNFG setting */
13086 CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
13087 if (wa_done)
13088 aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
13089 }
13090