if_wm.c revision 1.412 1 /* $NetBSD: if_wm.c,v 1.412 2016/06/10 13:27:14 ozaki-r Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - Disable D0 LPLU on 8257[12356], 82580 and I350.
77 * - TX Multi queue improvement (refine queue selection logic)
78 * - Advanced Receive Descriptor
79 * - EEE (Energy Efficiency Ethernet)
80 * - Virtual Function
81 * - Set LED correctly (based on contents in EEPROM)
82 * - Rework how parameters are loaded from the EEPROM.
83 * - Image Unique ID
84 * - restructure evcnt
85 */
86
87 #include <sys/cdefs.h>
88 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.412 2016/06/10 13:27:14 ozaki-r Exp $");
89
90 #ifdef _KERNEL_OPT
91 #include "opt_net_mpsafe.h"
92 #endif
93
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/callout.h>
97 #include <sys/mbuf.h>
98 #include <sys/malloc.h>
99 #include <sys/kmem.h>
100 #include <sys/kernel.h>
101 #include <sys/socket.h>
102 #include <sys/ioctl.h>
103 #include <sys/errno.h>
104 #include <sys/device.h>
105 #include <sys/queue.h>
106 #include <sys/syslog.h>
107 #include <sys/interrupt.h>
108 #include <sys/cpu.h>
109 #include <sys/pcq.h>
110
111 #include <sys/rndsource.h>
112
113 #include <net/if.h>
114 #include <net/if_dl.h>
115 #include <net/if_media.h>
116 #include <net/if_ether.h>
117
118 #include <net/bpf.h>
119
120 #include <netinet/in.h> /* XXX for struct ip */
121 #include <netinet/in_systm.h> /* XXX for struct ip */
122 #include <netinet/ip.h> /* XXX for struct ip */
123 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
124 #include <netinet/tcp.h> /* XXX for struct tcphdr */
125
126 #include <sys/bus.h>
127 #include <sys/intr.h>
128 #include <machine/endian.h>
129
130 #include <dev/mii/mii.h>
131 #include <dev/mii/miivar.h>
132 #include <dev/mii/miidevs.h>
133 #include <dev/mii/mii_bitbang.h>
134 #include <dev/mii/ikphyreg.h>
135 #include <dev/mii/igphyreg.h>
136 #include <dev/mii/igphyvar.h>
137 #include <dev/mii/inbmphyreg.h>
138
139 #include <dev/pci/pcireg.h>
140 #include <dev/pci/pcivar.h>
141 #include <dev/pci/pcidevs.h>
142
143 #include <dev/pci/if_wmreg.h>
144 #include <dev/pci/if_wmvar.h>
145
146 #ifdef WM_DEBUG
147 #define WM_DEBUG_LINK 0x01
148 #define WM_DEBUG_TX 0x02
149 #define WM_DEBUG_RX 0x04
150 #define WM_DEBUG_GMII 0x08
151 #define WM_DEBUG_MANAGE 0x10
152 #define WM_DEBUG_NVM 0x20
153 #define WM_DEBUG_INIT 0x40
154 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
155 | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT;
156
157 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
158 #else
159 #define DPRINTF(x, y) /* nothing */
160 #endif /* WM_DEBUG */
161
162 #ifdef NET_MPSAFE
163 #define WM_MPSAFE 1
164 #endif
165
166 /*
167 * This device driver's max interrupt numbers.
168 */
169 #define WM_MAX_NQUEUEINTR 16
170 #define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1)
171
172 /*
173 * Transmit descriptor list size. Due to errata, we can only have
174 * 256 hardware descriptors in the ring on < 82544, but we use 4096
175 * on >= 82544. We tell the upper layers that they can queue a lot
176 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
177 * of them at a time.
178 *
179 * We allow up to 256 (!) DMA segments per packet. Pathological packet
180 * chains containing many small mbufs have been observed in zero-copy
181 * situations with jumbo frames.
182 */
183 #define WM_NTXSEGS 256
184 #define WM_IFQUEUELEN 256
185 #define WM_TXQUEUELEN_MAX 64
186 #define WM_TXQUEUELEN_MAX_82547 16
187 #define WM_TXQUEUELEN(txq) ((txq)->txq_num)
188 #define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1)
189 #define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8)
190 #define WM_NTXDESC_82542 256
191 #define WM_NTXDESC_82544 4096
192 #define WM_NTXDESC(txq) ((txq)->txq_ndesc)
193 #define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1)
194 #define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize)
195 #define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq))
196 #define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq))
197
198 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
199
200 #define WM_TXINTERQSIZE 256
201
202 /*
203 * Receive descriptor list size. We have one Rx buffer for normal
204 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
205 * packet. We allocate 256 receive descriptors, each with a 2k
206 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
207 */
208 #define WM_NRXDESC 256
209 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
210 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
211 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
212
213 typedef union txdescs {
214 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
215 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544];
216 } txdescs_t;
217
218 #define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x))
219 #define WM_CDRXOFF(x) (sizeof(wiseman_rxdesc_t) * x)
220
221 /*
222 * Software state for transmit jobs.
223 */
224 struct wm_txsoft {
225 struct mbuf *txs_mbuf; /* head of our mbuf chain */
226 bus_dmamap_t txs_dmamap; /* our DMA map */
227 int txs_firstdesc; /* first descriptor in packet */
228 int txs_lastdesc; /* last descriptor in packet */
229 int txs_ndesc; /* # of descriptors used */
230 };
231
232 /*
233 * Software state for receive buffers. Each descriptor gets a
234 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
235 * more than one buffer, we chain them together.
236 */
237 struct wm_rxsoft {
238 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
239 bus_dmamap_t rxs_dmamap; /* our DMA map */
240 };
241
242 #define WM_LINKUP_TIMEOUT 50
243
244 static uint16_t swfwphysem[] = {
245 SWFW_PHY0_SM,
246 SWFW_PHY1_SM,
247 SWFW_PHY2_SM,
248 SWFW_PHY3_SM
249 };
250
251 static const uint32_t wm_82580_rxpbs_table[] = {
252 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
253 };
254
255 struct wm_softc;
256
257 struct wm_txqueue {
258 kmutex_t *txq_lock; /* lock for tx operations */
259
260 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */
261
262 /* Software state for the transmit descriptors. */
263 int txq_num; /* must be a power of two */
264 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
265
266 /* TX control data structures. */
267 int txq_ndesc; /* must be a power of two */
268 size_t txq_descsize; /* a tx descriptor size */
269 txdescs_t *txq_descs_u;
270 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */
271 bus_dma_segment_t txq_desc_seg; /* control data segment */
272 int txq_desc_rseg; /* real number of control segment */
273 #define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr
274 #define txq_descs txq_descs_u->sctxu_txdescs
275 #define txq_nq_descs txq_descs_u->sctxu_nq_txdescs
276
277 bus_addr_t txq_tdt_reg; /* offset of TDT register */
278
279 int txq_free; /* number of free Tx descriptors */
280 int txq_next; /* next ready Tx descriptor */
281
282 int txq_sfree; /* number of free Tx jobs */
283 int txq_snext; /* next free Tx job */
284 int txq_sdirty; /* dirty Tx jobs */
285
286 /* These 4 variables are used only on the 82547. */
287 int txq_fifo_size; /* Tx FIFO size */
288 int txq_fifo_head; /* current head of FIFO */
289 uint32_t txq_fifo_addr; /* internal address of start of FIFO */
290 int txq_fifo_stall; /* Tx FIFO is stalled */
291
292 /*
293 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
294 * CPUs. This queue intermediate them without block.
295 */
296 pcq_t *txq_interq;
297
298 /*
299 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
300 * to manage Tx H/W queue's busy flag.
301 */
302 int txq_flags; /* flags for H/W queue, see below */
303 #define WM_TXQ_NO_SPACE 0x1
304
305 /* XXX which event counter is required? */
306 };
307
308 struct wm_rxqueue {
309 kmutex_t *rxq_lock; /* lock for rx operations */
310
311 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */
312
313 /* Software state for the receive descriptors. */
314 wiseman_rxdesc_t *rxq_descs;
315
316 /* RX control data structures. */
317 struct wm_rxsoft rxq_soft[WM_NRXDESC];
318 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */
319 bus_dma_segment_t rxq_desc_seg; /* control data segment */
320 int rxq_desc_rseg; /* real number of control segment */
321 size_t rxq_desc_size; /* control data size */
322 #define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr
323
324 bus_addr_t rxq_rdt_reg; /* offset of RDT register */
325
326 int rxq_ptr; /* next ready Rx desc/queue ent */
327 int rxq_discard;
328 int rxq_len;
329 struct mbuf *rxq_head;
330 struct mbuf *rxq_tail;
331 struct mbuf **rxq_tailp;
332
333 /* XXX which event counter is required? */
334 };
335
336 struct wm_queue {
337 int wmq_id; /* index of transmit and receive queues */
338 int wmq_intr_idx; /* index of MSI-X tables */
339
340 struct wm_txqueue wmq_txq;
341 struct wm_rxqueue wmq_rxq;
342 };
343
344 /*
345 * Software state per device.
346 */
347 struct wm_softc {
348 device_t sc_dev; /* generic device information */
349 bus_space_tag_t sc_st; /* bus space tag */
350 bus_space_handle_t sc_sh; /* bus space handle */
351 bus_size_t sc_ss; /* bus space size */
352 bus_space_tag_t sc_iot; /* I/O space tag */
353 bus_space_handle_t sc_ioh; /* I/O space handle */
354 bus_size_t sc_ios; /* I/O space size */
355 bus_space_tag_t sc_flasht; /* flash registers space tag */
356 bus_space_handle_t sc_flashh; /* flash registers space handle */
357 bus_size_t sc_flashs; /* flash registers space size */
358 off_t sc_flashreg_offset; /*
359 * offset to flash registers from
360 * start of BAR
361 */
362 bus_dma_tag_t sc_dmat; /* bus DMA tag */
363
364 struct ethercom sc_ethercom; /* ethernet common data */
365 struct mii_data sc_mii; /* MII/media information */
366
367 pci_chipset_tag_t sc_pc;
368 pcitag_t sc_pcitag;
369 int sc_bus_speed; /* PCI/PCIX bus speed */
370 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
371
372 uint16_t sc_pcidevid; /* PCI device ID */
373 wm_chip_type sc_type; /* MAC type */
374 int sc_rev; /* MAC revision */
375 wm_phy_type sc_phytype; /* PHY type */
376 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
377 #define WM_MEDIATYPE_UNKNOWN 0x00
378 #define WM_MEDIATYPE_FIBER 0x01
379 #define WM_MEDIATYPE_COPPER 0x02
380 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
381 int sc_funcid; /* unit number of the chip (0 to 3) */
382 int sc_flags; /* flags; see below */
383 int sc_if_flags; /* last if_flags */
384 int sc_flowflags; /* 802.3x flow control flags */
385 int sc_align_tweak;
386
387 void *sc_ihs[WM_MAX_NINTR]; /*
388 * interrupt cookie.
389 * legacy and msi use sc_ihs[0].
390 */
391 pci_intr_handle_t *sc_intrs; /* legacy and msi use sc_intrs[0] */
392 int sc_nintrs; /* number of interrupts */
393
394 int sc_link_intr_idx; /* index of MSI-X tables */
395
396 callout_t sc_tick_ch; /* tick callout */
397 bool sc_stopping;
398
399 int sc_nvm_ver_major;
400 int sc_nvm_ver_minor;
401 int sc_nvm_ver_build;
402 int sc_nvm_addrbits; /* NVM address bits */
403 unsigned int sc_nvm_wordsize; /* NVM word size */
404 int sc_ich8_flash_base;
405 int sc_ich8_flash_bank_size;
406 int sc_nvm_k1_enabled;
407
408 int sc_nqueues;
409 struct wm_queue *sc_queue;
410
411 int sc_affinity_offset;
412
413 #ifdef WM_EVENT_COUNTERS
414 /* Event counters. */
415 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
416 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
417 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
418 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
419 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
420 struct evcnt sc_ev_rxintr; /* Rx interrupts */
421 struct evcnt sc_ev_linkintr; /* Link interrupts */
422
423 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
424 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
425 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
426 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
427 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
428 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
429 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
430 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
431
432 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
433 struct evcnt sc_ev_txdrop; /* Tx packets dropped(too many segs) */
434
435 struct evcnt sc_ev_tu; /* Tx underrun */
436
437 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
438 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
439 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
440 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
441 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
442 #endif /* WM_EVENT_COUNTERS */
443
444 /* This variable are used only on the 82547. */
445 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
446
447 uint32_t sc_ctrl; /* prototype CTRL register */
448 #if 0
449 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
450 #endif
451 uint32_t sc_icr; /* prototype interrupt bits */
452 uint32_t sc_itr; /* prototype intr throttling reg */
453 uint32_t sc_tctl; /* prototype TCTL register */
454 uint32_t sc_rctl; /* prototype RCTL register */
455 uint32_t sc_txcw; /* prototype TXCW register */
456 uint32_t sc_tipg; /* prototype TIPG register */
457 uint32_t sc_fcrtl; /* prototype FCRTL register */
458 uint32_t sc_pba; /* prototype PBA register */
459
460 int sc_tbi_linkup; /* TBI link status */
461 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
462 int sc_tbi_serdes_ticks; /* tbi ticks */
463
464 int sc_mchash_type; /* multicast filter offset */
465
466 krndsource_t rnd_source; /* random source */
467
468 kmutex_t *sc_core_lock; /* lock for softc operations */
469
470 struct if_percpuq *sc_ipq; /* softint-based input queues */
471 };
472
473 #define WM_TX_LOCK(_txq) if ((_txq)->txq_lock) mutex_enter((_txq)->txq_lock)
474 #define WM_TX_TRYLOCK(_txq) ((_txq)->txq_lock == NULL || mutex_tryenter((_txq)->txq_lock))
475 #define WM_TX_UNLOCK(_txq) if ((_txq)->txq_lock) mutex_exit((_txq)->txq_lock)
476 #define WM_TX_LOCKED(_txq) (!(_txq)->txq_lock || mutex_owned((_txq)->txq_lock))
477 #define WM_RX_LOCK(_rxq) if ((_rxq)->rxq_lock) mutex_enter((_rxq)->rxq_lock)
478 #define WM_RX_UNLOCK(_rxq) if ((_rxq)->rxq_lock) mutex_exit((_rxq)->rxq_lock)
479 #define WM_RX_LOCKED(_rxq) (!(_rxq)->rxq_lock || mutex_owned((_rxq)->rxq_lock))
480 #define WM_CORE_LOCK(_sc) if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
481 #define WM_CORE_UNLOCK(_sc) if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
482 #define WM_CORE_LOCKED(_sc) (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
483
484 #ifdef WM_MPSAFE
485 #define CALLOUT_FLAGS CALLOUT_MPSAFE
486 #else
487 #define CALLOUT_FLAGS 0
488 #endif
489
490 #define WM_RXCHAIN_RESET(rxq) \
491 do { \
492 (rxq)->rxq_tailp = &(rxq)->rxq_head; \
493 *(rxq)->rxq_tailp = NULL; \
494 (rxq)->rxq_len = 0; \
495 } while (/*CONSTCOND*/0)
496
497 #define WM_RXCHAIN_LINK(rxq, m) \
498 do { \
499 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \
500 (rxq)->rxq_tailp = &(m)->m_next; \
501 } while (/*CONSTCOND*/0)
502
503 #ifdef WM_EVENT_COUNTERS
504 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
505 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
506 #else
507 #define WM_EVCNT_INCR(ev) /* nothing */
508 #define WM_EVCNT_ADD(ev, val) /* nothing */
509 #endif
510
511 #define CSR_READ(sc, reg) \
512 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
513 #define CSR_WRITE(sc, reg, val) \
514 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
515 #define CSR_WRITE_FLUSH(sc) \
516 (void) CSR_READ((sc), WMREG_STATUS)
517
518 #define ICH8_FLASH_READ32(sc, reg) \
519 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \
520 (reg) + sc->sc_flashreg_offset)
521 #define ICH8_FLASH_WRITE32(sc, reg, data) \
522 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \
523 (reg) + sc->sc_flashreg_offset, (data))
524
525 #define ICH8_FLASH_READ16(sc, reg) \
526 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \
527 (reg) + sc->sc_flashreg_offset)
528 #define ICH8_FLASH_WRITE16(sc, reg, data) \
529 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \
530 (reg) + sc->sc_flashreg_offset, (data))
531
532 #define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
533 #define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
534
535 #define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU)
536 #define WM_CDTXADDR_HI(txq, x) \
537 (sizeof(bus_addr_t) == 8 ? \
538 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
539
540 #define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
541 #define WM_CDRXADDR_HI(rxq, x) \
542 (sizeof(bus_addr_t) == 8 ? \
543 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
544
545 /*
546 * Register read/write functions.
547 * Other than CSR_{READ|WRITE}().
548 */
549 #if 0
550 static inline uint32_t wm_io_read(struct wm_softc *, int);
551 #endif
552 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
553 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
554 uint32_t, uint32_t);
555 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
556
557 /*
558 * Descriptor sync/init functions.
559 */
560 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
561 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
562 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
563
564 /*
565 * Device driver interface functions and commonly used functions.
566 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
567 */
568 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
569 static int wm_match(device_t, cfdata_t, void *);
570 static void wm_attach(device_t, device_t, void *);
571 static int wm_detach(device_t, int);
572 static bool wm_suspend(device_t, const pmf_qual_t *);
573 static bool wm_resume(device_t, const pmf_qual_t *);
574 static void wm_watchdog(struct ifnet *);
575 static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
576 static void wm_tick(void *);
577 static int wm_ifflags_cb(struct ethercom *);
578 static int wm_ioctl(struct ifnet *, u_long, void *);
579 /* MAC address related */
580 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
581 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
582 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
583 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
584 static void wm_set_filter(struct wm_softc *);
585 /* Reset and init related */
586 static void wm_set_vlan(struct wm_softc *);
587 static void wm_set_pcie_completion_timeout(struct wm_softc *);
588 static void wm_get_auto_rd_done(struct wm_softc *);
589 static void wm_lan_init_done(struct wm_softc *);
590 static void wm_get_cfg_done(struct wm_softc *);
591 static void wm_initialize_hardware_bits(struct wm_softc *);
592 static uint32_t wm_rxpbs_adjust_82580(uint32_t);
593 static void wm_reset(struct wm_softc *);
594 static int wm_add_rxbuf(struct wm_rxqueue *, int);
595 static void wm_rxdrain(struct wm_rxqueue *);
596 static void wm_rss_getkey(uint8_t *);
597 static void wm_init_rss(struct wm_softc *);
598 static void wm_adjust_qnum(struct wm_softc *, int);
599 static int wm_setup_legacy(struct wm_softc *);
600 static int wm_setup_msix(struct wm_softc *);
601 static int wm_init(struct ifnet *);
602 static int wm_init_locked(struct ifnet *);
603 static void wm_stop(struct ifnet *, int);
604 static void wm_stop_locked(struct ifnet *, int);
605 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
606 static void wm_82547_txfifo_stall(void *);
607 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
608 /* DMA related */
609 static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
610 static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
611 static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
612 static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
613 struct wm_txqueue *);
614 static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
615 static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
616 static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
617 struct wm_rxqueue *);
618 static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
619 static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
620 static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
621 static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
622 static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
623 static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
624 static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
625 struct wm_txqueue *);
626 static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
627 struct wm_rxqueue *);
628 static int wm_alloc_txrx_queues(struct wm_softc *);
629 static void wm_free_txrx_queues(struct wm_softc *);
630 static int wm_init_txrx_queues(struct wm_softc *);
631 /* Start */
632 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
633 uint32_t *, uint8_t *);
634 static void wm_start(struct ifnet *);
635 static void wm_start_locked(struct ifnet *);
636 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
637 struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
638 static void wm_nq_start(struct ifnet *);
639 static void wm_nq_start_locked(struct ifnet *);
640 static int wm_nq_transmit(struct ifnet *, struct mbuf *);
641 static inline int wm_nq_select_txqueue(struct ifnet *, struct mbuf *);
642 static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
643 static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
644 /* Interrupt */
645 static int wm_txeof(struct wm_softc *, struct wm_txqueue *);
646 static void wm_rxeof(struct wm_rxqueue *);
647 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
648 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
649 static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
650 static void wm_linkintr(struct wm_softc *, uint32_t);
651 static int wm_intr_legacy(void *);
652 static int wm_txrxintr_msix(void *);
653 static int wm_linkintr_msix(void *);
654
655 /*
656 * Media related.
657 * GMII, SGMII, TBI, SERDES and SFP.
658 */
659 /* Common */
660 static void wm_tbi_serdes_set_linkled(struct wm_softc *);
661 /* GMII related */
662 static void wm_gmii_reset(struct wm_softc *);
663 static int wm_get_phy_id_82575(struct wm_softc *);
664 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
665 static int wm_gmii_mediachange(struct ifnet *);
666 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
667 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
668 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
669 static int wm_gmii_i82543_readreg(device_t, int, int);
670 static void wm_gmii_i82543_writereg(device_t, int, int, int);
671 static int wm_gmii_i82544_readreg(device_t, int, int);
672 static void wm_gmii_i82544_writereg(device_t, int, int, int);
673 static int wm_gmii_i80003_readreg(device_t, int, int);
674 static void wm_gmii_i80003_writereg(device_t, int, int, int);
675 static int wm_gmii_bm_readreg(device_t, int, int);
676 static void wm_gmii_bm_writereg(device_t, int, int, int);
677 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
678 static int wm_gmii_hv_readreg(device_t, int, int);
679 static void wm_gmii_hv_writereg(device_t, int, int, int);
680 static int wm_gmii_82580_readreg(device_t, int, int);
681 static void wm_gmii_82580_writereg(device_t, int, int, int);
682 static int wm_gmii_gs40g_readreg(device_t, int, int);
683 static void wm_gmii_gs40g_writereg(device_t, int, int, int);
684 static void wm_gmii_statchg(struct ifnet *);
685 static int wm_kmrn_readreg(struct wm_softc *, int);
686 static void wm_kmrn_writereg(struct wm_softc *, int, int);
687 /* SGMII */
688 static bool wm_sgmii_uses_mdio(struct wm_softc *);
689 static int wm_sgmii_readreg(device_t, int, int);
690 static void wm_sgmii_writereg(device_t, int, int, int);
691 /* TBI related */
692 static void wm_tbi_mediainit(struct wm_softc *);
693 static int wm_tbi_mediachange(struct ifnet *);
694 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
695 static int wm_check_for_link(struct wm_softc *);
696 static void wm_tbi_tick(struct wm_softc *);
697 /* SERDES related */
698 static void wm_serdes_power_up_link_82575(struct wm_softc *);
699 static int wm_serdes_mediachange(struct ifnet *);
700 static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
701 static void wm_serdes_tick(struct wm_softc *);
702 /* SFP related */
703 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
704 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
705
706 /*
707 * NVM related.
708 * Microwire, SPI (w/wo EERD) and Flash.
709 */
710 /* Misc functions */
711 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
712 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
713 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
714 /* Microwire */
715 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
716 /* SPI */
717 static int wm_nvm_ready_spi(struct wm_softc *);
718 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
719 /* Using with EERD */
720 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
721 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
722 /* Flash */
723 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
724 unsigned int *);
725 static int32_t wm_ich8_cycle_init(struct wm_softc *);
726 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
727 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
728 uint32_t *);
729 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
730 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
731 static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
732 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
733 static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
734 /* iNVM */
735 static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
736 static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
737 /* Lock, detecting NVM type, validate checksum and read */
738 static int wm_nvm_acquire(struct wm_softc *);
739 static void wm_nvm_release(struct wm_softc *);
740 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
741 static int wm_nvm_get_flash_presence_i210(struct wm_softc *);
742 static int wm_nvm_validate_checksum(struct wm_softc *);
743 static void wm_nvm_version_invm(struct wm_softc *);
744 static void wm_nvm_version(struct wm_softc *);
745 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
746
747 /*
748 * Hardware semaphores.
749 * Very complexed...
750 */
751 static int wm_get_swsm_semaphore(struct wm_softc *);
752 static void wm_put_swsm_semaphore(struct wm_softc *);
753 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
754 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
755 static int wm_get_swfwhw_semaphore(struct wm_softc *);
756 static void wm_put_swfwhw_semaphore(struct wm_softc *);
757 static int wm_get_hw_semaphore_82573(struct wm_softc *);
758 static void wm_put_hw_semaphore_82573(struct wm_softc *);
759
760 /*
761 * Management mode and power management related subroutines.
762 * BMC, AMT, suspend/resume and EEE.
763 */
764 #ifdef WM_WOL
765 static int wm_check_mng_mode(struct wm_softc *);
766 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
767 static int wm_check_mng_mode_82574(struct wm_softc *);
768 static int wm_check_mng_mode_generic(struct wm_softc *);
769 #endif
770 static int wm_enable_mng_pass_thru(struct wm_softc *);
771 static bool wm_phy_resetisblocked(struct wm_softc *);
772 static void wm_get_hw_control(struct wm_softc *);
773 static void wm_release_hw_control(struct wm_softc *);
774 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
775 static void wm_smbustopci(struct wm_softc *);
776 static void wm_init_manageability(struct wm_softc *);
777 static void wm_release_manageability(struct wm_softc *);
778 static void wm_get_wakeup(struct wm_softc *);
779 #ifdef WM_WOL
780 static void wm_enable_phy_wakeup(struct wm_softc *);
781 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
782 static void wm_enable_wakeup(struct wm_softc *);
783 #endif
784 /* LPLU (Low Power Link Up) */
785 static void wm_lplu_d0_disable(struct wm_softc *);
786 static void wm_lplu_d0_disable_pch(struct wm_softc *);
787 /* EEE */
788 static void wm_set_eee_i350(struct wm_softc *);
789
790 /*
791 * Workarounds (mainly PHY related).
792 * Basically, PHY's workarounds are in the PHY drivers.
793 */
794 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
795 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
796 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
797 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
798 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
799 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
800 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
801 static void wm_reset_init_script_82575(struct wm_softc *);
802 static void wm_reset_mdicnfg_82580(struct wm_softc *);
803 static void wm_pll_workaround_i210(struct wm_softc *);
804
805 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
806 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
807
808 /*
809 * Devices supported by this driver.
810 */
811 static const struct wm_product {
812 pci_vendor_id_t wmp_vendor;
813 pci_product_id_t wmp_product;
814 const char *wmp_name;
815 wm_chip_type wmp_type;
816 uint32_t wmp_flags;
817 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
818 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
819 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
820 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
821 #define WMP_MEDIATYPE(x) ((x) & 0x03)
822 } wm_products[] = {
823 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
824 "Intel i82542 1000BASE-X Ethernet",
825 WM_T_82542_2_1, WMP_F_FIBER },
826
827 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
828 "Intel i82543GC 1000BASE-X Ethernet",
829 WM_T_82543, WMP_F_FIBER },
830
831 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
832 "Intel i82543GC 1000BASE-T Ethernet",
833 WM_T_82543, WMP_F_COPPER },
834
835 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
836 "Intel i82544EI 1000BASE-T Ethernet",
837 WM_T_82544, WMP_F_COPPER },
838
839 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
840 "Intel i82544EI 1000BASE-X Ethernet",
841 WM_T_82544, WMP_F_FIBER },
842
843 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
844 "Intel i82544GC 1000BASE-T Ethernet",
845 WM_T_82544, WMP_F_COPPER },
846
847 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
848 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
849 WM_T_82544, WMP_F_COPPER },
850
851 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
852 "Intel i82540EM 1000BASE-T Ethernet",
853 WM_T_82540, WMP_F_COPPER },
854
855 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
856 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
857 WM_T_82540, WMP_F_COPPER },
858
859 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
860 "Intel i82540EP 1000BASE-T Ethernet",
861 WM_T_82540, WMP_F_COPPER },
862
863 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
864 "Intel i82540EP 1000BASE-T Ethernet",
865 WM_T_82540, WMP_F_COPPER },
866
867 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
868 "Intel i82540EP 1000BASE-T Ethernet",
869 WM_T_82540, WMP_F_COPPER },
870
871 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
872 "Intel i82545EM 1000BASE-T Ethernet",
873 WM_T_82545, WMP_F_COPPER },
874
875 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
876 "Intel i82545GM 1000BASE-T Ethernet",
877 WM_T_82545_3, WMP_F_COPPER },
878
879 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
880 "Intel i82545GM 1000BASE-X Ethernet",
881 WM_T_82545_3, WMP_F_FIBER },
882
883 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
884 "Intel i82545GM Gigabit Ethernet (SERDES)",
885 WM_T_82545_3, WMP_F_SERDES },
886
887 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
888 "Intel i82546EB 1000BASE-T Ethernet",
889 WM_T_82546, WMP_F_COPPER },
890
891 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
892 "Intel i82546EB 1000BASE-T Ethernet",
893 WM_T_82546, WMP_F_COPPER },
894
895 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
896 "Intel i82545EM 1000BASE-X Ethernet",
897 WM_T_82545, WMP_F_FIBER },
898
899 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
900 "Intel i82546EB 1000BASE-X Ethernet",
901 WM_T_82546, WMP_F_FIBER },
902
903 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
904 "Intel i82546GB 1000BASE-T Ethernet",
905 WM_T_82546_3, WMP_F_COPPER },
906
907 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
908 "Intel i82546GB 1000BASE-X Ethernet",
909 WM_T_82546_3, WMP_F_FIBER },
910
911 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
912 "Intel i82546GB Gigabit Ethernet (SERDES)",
913 WM_T_82546_3, WMP_F_SERDES },
914
915 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
916 "i82546GB quad-port Gigabit Ethernet",
917 WM_T_82546_3, WMP_F_COPPER },
918
919 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
920 "i82546GB quad-port Gigabit Ethernet (KSP3)",
921 WM_T_82546_3, WMP_F_COPPER },
922
923 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
924 "Intel PRO/1000MT (82546GB)",
925 WM_T_82546_3, WMP_F_COPPER },
926
927 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
928 "Intel i82541EI 1000BASE-T Ethernet",
929 WM_T_82541, WMP_F_COPPER },
930
931 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
932 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
933 WM_T_82541, WMP_F_COPPER },
934
935 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
936 "Intel i82541EI Mobile 1000BASE-T Ethernet",
937 WM_T_82541, WMP_F_COPPER },
938
939 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
940 "Intel i82541ER 1000BASE-T Ethernet",
941 WM_T_82541_2, WMP_F_COPPER },
942
943 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
944 "Intel i82541GI 1000BASE-T Ethernet",
945 WM_T_82541_2, WMP_F_COPPER },
946
947 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
948 "Intel i82541GI Mobile 1000BASE-T Ethernet",
949 WM_T_82541_2, WMP_F_COPPER },
950
951 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
952 "Intel i82541PI 1000BASE-T Ethernet",
953 WM_T_82541_2, WMP_F_COPPER },
954
955 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
956 "Intel i82547EI 1000BASE-T Ethernet",
957 WM_T_82547, WMP_F_COPPER },
958
959 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
960 "Intel i82547EI Mobile 1000BASE-T Ethernet",
961 WM_T_82547, WMP_F_COPPER },
962
963 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
964 "Intel i82547GI 1000BASE-T Ethernet",
965 WM_T_82547_2, WMP_F_COPPER },
966
967 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
968 "Intel PRO/1000 PT (82571EB)",
969 WM_T_82571, WMP_F_COPPER },
970
971 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
972 "Intel PRO/1000 PF (82571EB)",
973 WM_T_82571, WMP_F_FIBER },
974
975 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
976 "Intel PRO/1000 PB (82571EB)",
977 WM_T_82571, WMP_F_SERDES },
978
979 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
980 "Intel PRO/1000 QT (82571EB)",
981 WM_T_82571, WMP_F_COPPER },
982
983 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
984 "Intel PRO/1000 PT Quad Port Server Adapter",
985 WM_T_82571, WMP_F_COPPER, },
986
987 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
988 "Intel Gigabit PT Quad Port Server ExpressModule",
989 WM_T_82571, WMP_F_COPPER, },
990
991 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
992 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
993 WM_T_82571, WMP_F_SERDES, },
994
995 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
996 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
997 WM_T_82571, WMP_F_SERDES, },
998
999 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1000 "Intel 82571EB Quad 1000baseX Ethernet",
1001 WM_T_82571, WMP_F_FIBER, },
1002
1003 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
1004 "Intel i82572EI 1000baseT Ethernet",
1005 WM_T_82572, WMP_F_COPPER },
1006
1007 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
1008 "Intel i82572EI 1000baseX Ethernet",
1009 WM_T_82572, WMP_F_FIBER },
1010
1011 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
1012 "Intel i82572EI Gigabit Ethernet (SERDES)",
1013 WM_T_82572, WMP_F_SERDES },
1014
1015 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
1016 "Intel i82572EI 1000baseT Ethernet",
1017 WM_T_82572, WMP_F_COPPER },
1018
1019 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
1020 "Intel i82573E",
1021 WM_T_82573, WMP_F_COPPER },
1022
1023 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
1024 "Intel i82573E IAMT",
1025 WM_T_82573, WMP_F_COPPER },
1026
1027 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
1028 "Intel i82573L Gigabit Ethernet",
1029 WM_T_82573, WMP_F_COPPER },
1030
1031 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
1032 "Intel i82574L",
1033 WM_T_82574, WMP_F_COPPER },
1034
1035 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
1036 "Intel i82574L",
1037 WM_T_82574, WMP_F_COPPER },
1038
1039 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
1040 "Intel i82583V",
1041 WM_T_82583, WMP_F_COPPER },
1042
1043 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1044 "i80003 dual 1000baseT Ethernet",
1045 WM_T_80003, WMP_F_COPPER },
1046
1047 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1048 "i80003 dual 1000baseX Ethernet",
1049 WM_T_80003, WMP_F_COPPER },
1050
1051 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1052 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1053 WM_T_80003, WMP_F_SERDES },
1054
1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1056 "Intel i80003 1000baseT Ethernet",
1057 WM_T_80003, WMP_F_COPPER },
1058
1059 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1060 "Intel i80003 Gigabit Ethernet (SERDES)",
1061 WM_T_80003, WMP_F_SERDES },
1062
1063 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
1064 "Intel i82801H (M_AMT) LAN Controller",
1065 WM_T_ICH8, WMP_F_COPPER },
1066 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
1067 "Intel i82801H (AMT) LAN Controller",
1068 WM_T_ICH8, WMP_F_COPPER },
1069 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
1070 "Intel i82801H LAN Controller",
1071 WM_T_ICH8, WMP_F_COPPER },
1072 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1073 "Intel i82801H (IFE) LAN Controller",
1074 WM_T_ICH8, WMP_F_COPPER },
1075 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
1076 "Intel i82801H (M) LAN Controller",
1077 WM_T_ICH8, WMP_F_COPPER },
1078 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1079 "Intel i82801H IFE (GT) LAN Controller",
1080 WM_T_ICH8, WMP_F_COPPER },
1081 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1082 "Intel i82801H IFE (G) LAN Controller",
1083 WM_T_ICH8, WMP_F_COPPER },
1084 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1085 "82801I (AMT) LAN Controller",
1086 WM_T_ICH9, WMP_F_COPPER },
1087 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1088 "82801I LAN Controller",
1089 WM_T_ICH9, WMP_F_COPPER },
1090 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1091 "82801I (G) LAN Controller",
1092 WM_T_ICH9, WMP_F_COPPER },
1093 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1094 "82801I (GT) LAN Controller",
1095 WM_T_ICH9, WMP_F_COPPER },
1096 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1097 "82801I (C) LAN Controller",
1098 WM_T_ICH9, WMP_F_COPPER },
1099 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1100 "82801I mobile LAN Controller",
1101 WM_T_ICH9, WMP_F_COPPER },
1102 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1103 "82801I mobile (V) LAN Controller",
1104 WM_T_ICH9, WMP_F_COPPER },
1105 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1106 "82801I mobile (AMT) LAN Controller",
1107 WM_T_ICH9, WMP_F_COPPER },
1108 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1109 "82567LM-4 LAN Controller",
1110 WM_T_ICH9, WMP_F_COPPER },
1111 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
1112 "82567V-3 LAN Controller",
1113 WM_T_ICH9, WMP_F_COPPER },
1114 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1115 "82567LM-2 LAN Controller",
1116 WM_T_ICH10, WMP_F_COPPER },
1117 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1118 "82567LF-2 LAN Controller",
1119 WM_T_ICH10, WMP_F_COPPER },
1120 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1121 "82567LM-3 LAN Controller",
1122 WM_T_ICH10, WMP_F_COPPER },
1123 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1124 "82567LF-3 LAN Controller",
1125 WM_T_ICH10, WMP_F_COPPER },
1126 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1127 "82567V-2 LAN Controller",
1128 WM_T_ICH10, WMP_F_COPPER },
1129 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1130 "82567V-3? LAN Controller",
1131 WM_T_ICH10, WMP_F_COPPER },
1132 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1133 "HANKSVILLE LAN Controller",
1134 WM_T_ICH10, WMP_F_COPPER },
1135 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1136 "PCH LAN (82577LM) Controller",
1137 WM_T_PCH, WMP_F_COPPER },
1138 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1139 "PCH LAN (82577LC) Controller",
1140 WM_T_PCH, WMP_F_COPPER },
1141 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1142 "PCH LAN (82578DM) Controller",
1143 WM_T_PCH, WMP_F_COPPER },
1144 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1145 "PCH LAN (82578DC) Controller",
1146 WM_T_PCH, WMP_F_COPPER },
1147 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1148 "PCH2 LAN (82579LM) Controller",
1149 WM_T_PCH2, WMP_F_COPPER },
1150 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1151 "PCH2 LAN (82579V) Controller",
1152 WM_T_PCH2, WMP_F_COPPER },
1153 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1154 "82575EB dual-1000baseT Ethernet",
1155 WM_T_82575, WMP_F_COPPER },
1156 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1157 "82575EB dual-1000baseX Ethernet (SERDES)",
1158 WM_T_82575, WMP_F_SERDES },
1159 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1160 "82575GB quad-1000baseT Ethernet",
1161 WM_T_82575, WMP_F_COPPER },
1162 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1163 "82575GB quad-1000baseT Ethernet (PM)",
1164 WM_T_82575, WMP_F_COPPER },
1165 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1166 "82576 1000BaseT Ethernet",
1167 WM_T_82576, WMP_F_COPPER },
1168 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1169 "82576 1000BaseX Ethernet",
1170 WM_T_82576, WMP_F_FIBER },
1171
1172 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1173 "82576 gigabit Ethernet (SERDES)",
1174 WM_T_82576, WMP_F_SERDES },
1175
1176 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1177 "82576 quad-1000BaseT Ethernet",
1178 WM_T_82576, WMP_F_COPPER },
1179
1180 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1181 "82576 Gigabit ET2 Quad Port Server Adapter",
1182 WM_T_82576, WMP_F_COPPER },
1183
1184 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1185 "82576 gigabit Ethernet",
1186 WM_T_82576, WMP_F_COPPER },
1187
1188 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1189 "82576 gigabit Ethernet (SERDES)",
1190 WM_T_82576, WMP_F_SERDES },
1191 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1192 "82576 quad-gigabit Ethernet (SERDES)",
1193 WM_T_82576, WMP_F_SERDES },
1194
1195 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1196 "82580 1000BaseT Ethernet",
1197 WM_T_82580, WMP_F_COPPER },
1198 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1199 "82580 1000BaseX Ethernet",
1200 WM_T_82580, WMP_F_FIBER },
1201
1202 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1203 "82580 1000BaseT Ethernet (SERDES)",
1204 WM_T_82580, WMP_F_SERDES },
1205
1206 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1207 "82580 gigabit Ethernet (SGMII)",
1208 WM_T_82580, WMP_F_COPPER },
1209 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1210 "82580 dual-1000BaseT Ethernet",
1211 WM_T_82580, WMP_F_COPPER },
1212
1213 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1214 "82580 quad-1000BaseX Ethernet",
1215 WM_T_82580, WMP_F_FIBER },
1216
1217 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1218 "DH89XXCC Gigabit Ethernet (SGMII)",
1219 WM_T_82580, WMP_F_COPPER },
1220
1221 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1222 "DH89XXCC Gigabit Ethernet (SERDES)",
1223 WM_T_82580, WMP_F_SERDES },
1224
1225 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1226 "DH89XXCC 1000BASE-KX Ethernet",
1227 WM_T_82580, WMP_F_SERDES },
1228
1229 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1230 "DH89XXCC Gigabit Ethernet (SFP)",
1231 WM_T_82580, WMP_F_SERDES },
1232
1233 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1234 "I350 Gigabit Network Connection",
1235 WM_T_I350, WMP_F_COPPER },
1236
1237 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1238 "I350 Gigabit Fiber Network Connection",
1239 WM_T_I350, WMP_F_FIBER },
1240
1241 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1242 "I350 Gigabit Backplane Connection",
1243 WM_T_I350, WMP_F_SERDES },
1244
1245 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1246 "I350 Quad Port Gigabit Ethernet",
1247 WM_T_I350, WMP_F_SERDES },
1248
1249 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1250 "I350 Gigabit Connection",
1251 WM_T_I350, WMP_F_COPPER },
1252
1253 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1254 "I354 Gigabit Ethernet (KX)",
1255 WM_T_I354, WMP_F_SERDES },
1256
1257 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1258 "I354 Gigabit Ethernet (SGMII)",
1259 WM_T_I354, WMP_F_COPPER },
1260
1261 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1262 "I354 Gigabit Ethernet (2.5G)",
1263 WM_T_I354, WMP_F_COPPER },
1264
1265 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1266 "I210-T1 Ethernet Server Adapter",
1267 WM_T_I210, WMP_F_COPPER },
1268
1269 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1270 "I210 Ethernet (Copper OEM)",
1271 WM_T_I210, WMP_F_COPPER },
1272
1273 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1274 "I210 Ethernet (Copper IT)",
1275 WM_T_I210, WMP_F_COPPER },
1276
1277 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1278 "I210 Ethernet (FLASH less)",
1279 WM_T_I210, WMP_F_COPPER },
1280
1281 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1282 "I210 Gigabit Ethernet (Fiber)",
1283 WM_T_I210, WMP_F_FIBER },
1284
1285 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1286 "I210 Gigabit Ethernet (SERDES)",
1287 WM_T_I210, WMP_F_SERDES },
1288
1289 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1290 "I210 Gigabit Ethernet (FLASH less)",
1291 WM_T_I210, WMP_F_SERDES },
1292
1293 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1294 "I210 Gigabit Ethernet (SGMII)",
1295 WM_T_I210, WMP_F_COPPER },
1296
1297 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1298 "I211 Ethernet (COPPER)",
1299 WM_T_I211, WMP_F_COPPER },
1300 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1301 "I217 V Ethernet Connection",
1302 WM_T_PCH_LPT, WMP_F_COPPER },
1303 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1304 "I217 LM Ethernet Connection",
1305 WM_T_PCH_LPT, WMP_F_COPPER },
1306 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1307 "I218 V Ethernet Connection",
1308 WM_T_PCH_LPT, WMP_F_COPPER },
1309 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1310 "I218 V Ethernet Connection",
1311 WM_T_PCH_LPT, WMP_F_COPPER },
1312 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1313 "I218 V Ethernet Connection",
1314 WM_T_PCH_LPT, WMP_F_COPPER },
1315 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1316 "I218 LM Ethernet Connection",
1317 WM_T_PCH_LPT, WMP_F_COPPER },
1318 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1319 "I218 LM Ethernet Connection",
1320 WM_T_PCH_LPT, WMP_F_COPPER },
1321 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1322 "I218 LM Ethernet Connection",
1323 WM_T_PCH_LPT, WMP_F_COPPER },
1324 #if 0
1325 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V,
1326 "I219 V Ethernet Connection",
1327 WM_T_PCH_SPT, WMP_F_COPPER },
1328 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V2,
1329 "I219 V Ethernet Connection",
1330 WM_T_PCH_SPT, WMP_F_COPPER },
1331 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM,
1332 "I219 LM Ethernet Connection",
1333 WM_T_PCH_SPT, WMP_F_COPPER },
1334 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM2,
1335 "I219 LM Ethernet Connection",
1336 WM_T_PCH_SPT, WMP_F_COPPER },
1337 #endif
1338 { 0, 0,
1339 NULL,
1340 0, 0 },
1341 };
1342
1343 #ifdef WM_EVENT_COUNTERS
1344 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1345 #endif /* WM_EVENT_COUNTERS */
1346
1347
1348 /*
1349 * Register read/write functions.
1350 * Other than CSR_{READ|WRITE}().
1351 */
1352
1353 #if 0 /* Not currently used */
1354 static inline uint32_t
1355 wm_io_read(struct wm_softc *sc, int reg)
1356 {
1357
1358 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1359 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1360 }
1361 #endif
1362
1363 static inline void
1364 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1365 {
1366
1367 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1368 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1369 }
1370
1371 static inline void
1372 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1373 uint32_t data)
1374 {
1375 uint32_t regval;
1376 int i;
1377
1378 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1379
1380 CSR_WRITE(sc, reg, regval);
1381
1382 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1383 delay(5);
1384 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1385 break;
1386 }
1387 if (i == SCTL_CTL_POLL_TIMEOUT) {
1388 aprint_error("%s: WARNING:"
1389 " i82575 reg 0x%08x setup did not indicate ready\n",
1390 device_xname(sc->sc_dev), reg);
1391 }
1392 }
1393
1394 static inline void
1395 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1396 {
1397 wa->wa_low = htole32(v & 0xffffffffU);
1398 if (sizeof(bus_addr_t) == 8)
1399 wa->wa_high = htole32((uint64_t) v >> 32);
1400 else
1401 wa->wa_high = 0;
1402 }
1403
1404 /*
1405 * Descriptor sync/init functions.
1406 */
1407 static inline void
1408 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1409 {
1410 struct wm_softc *sc = txq->txq_sc;
1411
1412 /* If it will wrap around, sync to the end of the ring. */
1413 if ((start + num) > WM_NTXDESC(txq)) {
1414 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1415 WM_CDTXOFF(txq, start), txq->txq_descsize *
1416 (WM_NTXDESC(txq) - start), ops);
1417 num -= (WM_NTXDESC(txq) - start);
1418 start = 0;
1419 }
1420
1421 /* Now sync whatever is left. */
1422 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1423 WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1424 }
1425
1426 static inline void
1427 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1428 {
1429 struct wm_softc *sc = rxq->rxq_sc;
1430
1431 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1432 WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
1433 }
1434
1435 static inline void
1436 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1437 {
1438 struct wm_softc *sc = rxq->rxq_sc;
1439 struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1440 wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1441 struct mbuf *m = rxs->rxs_mbuf;
1442
1443 /*
1444 * Note: We scoot the packet forward 2 bytes in the buffer
1445 * so that the payload after the Ethernet header is aligned
1446 * to a 4-byte boundary.
1447
1448 * XXX BRAINDAMAGE ALERT!
1449 * The stupid chip uses the same size for every buffer, which
1450 * is set in the Receive Control register. We are using the 2K
1451 * size option, but what we REALLY want is (2K - 2)! For this
1452 * reason, we can't "scoot" packets longer than the standard
1453 * Ethernet MTU. On strict-alignment platforms, if the total
1454 * size exceeds (2K - 2) we set align_tweak to 0 and let
1455 * the upper layer copy the headers.
1456 */
1457 m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1458
1459 wm_set_dma_addr(&rxd->wrx_addr,
1460 rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1461 rxd->wrx_len = 0;
1462 rxd->wrx_cksum = 0;
1463 rxd->wrx_status = 0;
1464 rxd->wrx_errors = 0;
1465 rxd->wrx_special = 0;
1466 wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1467
1468 CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1469 }
1470
1471 /*
1472 * Device driver interface functions and commonly used functions.
1473 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1474 */
1475
1476 /* Lookup supported device table */
1477 static const struct wm_product *
1478 wm_lookup(const struct pci_attach_args *pa)
1479 {
1480 const struct wm_product *wmp;
1481
1482 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1483 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1484 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1485 return wmp;
1486 }
1487 return NULL;
1488 }
1489
1490 /* The match function (ca_match) */
1491 static int
1492 wm_match(device_t parent, cfdata_t cf, void *aux)
1493 {
1494 struct pci_attach_args *pa = aux;
1495
1496 if (wm_lookup(pa) != NULL)
1497 return 1;
1498
1499 return 0;
1500 }
1501
1502 /* The attach function (ca_attach) */
1503 static void
1504 wm_attach(device_t parent, device_t self, void *aux)
1505 {
1506 struct wm_softc *sc = device_private(self);
1507 struct pci_attach_args *pa = aux;
1508 prop_dictionary_t dict;
1509 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1510 pci_chipset_tag_t pc = pa->pa_pc;
1511 int counts[PCI_INTR_TYPE_SIZE];
1512 pci_intr_type_t max_type;
1513 const char *eetype, *xname;
1514 bus_space_tag_t memt;
1515 bus_space_handle_t memh;
1516 bus_size_t memsize;
1517 int memh_valid;
1518 int i, error;
1519 const struct wm_product *wmp;
1520 prop_data_t ea;
1521 prop_number_t pn;
1522 uint8_t enaddr[ETHER_ADDR_LEN];
1523 uint16_t cfg1, cfg2, swdpin, nvmword;
1524 pcireg_t preg, memtype;
1525 uint16_t eeprom_data, apme_mask;
1526 bool force_clear_smbi;
1527 uint32_t link_mode;
1528 uint32_t reg;
1529
1530 sc->sc_dev = self;
1531 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1532 sc->sc_stopping = false;
1533
1534 wmp = wm_lookup(pa);
1535 #ifdef DIAGNOSTIC
1536 if (wmp == NULL) {
1537 printf("\n");
1538 panic("wm_attach: impossible");
1539 }
1540 #endif
1541 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1542
1543 sc->sc_pc = pa->pa_pc;
1544 sc->sc_pcitag = pa->pa_tag;
1545
1546 if (pci_dma64_available(pa))
1547 sc->sc_dmat = pa->pa_dmat64;
1548 else
1549 sc->sc_dmat = pa->pa_dmat;
1550
1551 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1552 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1553 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1554
1555 sc->sc_type = wmp->wmp_type;
1556 if (sc->sc_type < WM_T_82543) {
1557 if (sc->sc_rev < 2) {
1558 aprint_error_dev(sc->sc_dev,
1559 "i82542 must be at least rev. 2\n");
1560 return;
1561 }
1562 if (sc->sc_rev < 3)
1563 sc->sc_type = WM_T_82542_2_0;
1564 }
1565
1566 /*
1567 * Disable MSI for Errata:
1568 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1569 *
1570 * 82544: Errata 25
1571 * 82540: Errata 6 (easy to reproduce device timeout)
1572 * 82545: Errata 4 (easy to reproduce device timeout)
1573 * 82546: Errata 26 (easy to reproduce device timeout)
1574 * 82541: Errata 7 (easy to reproduce device timeout)
1575 *
1576 * "Byte Enables 2 and 3 are not set on MSI writes"
1577 *
1578 * 82571 & 82572: Errata 63
1579 */
1580 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1581 || (sc->sc_type == WM_T_82572))
1582 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1583
1584 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1585 || (sc->sc_type == WM_T_82580)
1586 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1587 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1588 sc->sc_flags |= WM_F_NEWQUEUE;
1589
1590 /* Set device properties (mactype) */
1591 dict = device_properties(sc->sc_dev);
1592 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1593
1594 /*
1595 * Map the device. All devices support memory-mapped acccess,
1596 * and it is really required for normal operation.
1597 */
1598 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1599 switch (memtype) {
1600 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1601 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1602 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1603 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1604 break;
1605 default:
1606 memh_valid = 0;
1607 break;
1608 }
1609
1610 if (memh_valid) {
1611 sc->sc_st = memt;
1612 sc->sc_sh = memh;
1613 sc->sc_ss = memsize;
1614 } else {
1615 aprint_error_dev(sc->sc_dev,
1616 "unable to map device registers\n");
1617 return;
1618 }
1619
1620 /*
1621 * In addition, i82544 and later support I/O mapped indirect
1622 * register access. It is not desirable (nor supported in
1623 * this driver) to use it for normal operation, though it is
1624 * required to work around bugs in some chip versions.
1625 */
1626 if (sc->sc_type >= WM_T_82544) {
1627 /* First we have to find the I/O BAR. */
1628 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1629 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1630 if (memtype == PCI_MAPREG_TYPE_IO)
1631 break;
1632 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1633 PCI_MAPREG_MEM_TYPE_64BIT)
1634 i += 4; /* skip high bits, too */
1635 }
1636 if (i < PCI_MAPREG_END) {
1637 /*
1638 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1639 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1640 * It's no problem because newer chips has no this
1641 * bug.
1642 *
1643 * The i8254x doesn't apparently respond when the
1644 * I/O BAR is 0, which looks somewhat like it's not
1645 * been configured.
1646 */
1647 preg = pci_conf_read(pc, pa->pa_tag, i);
1648 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1649 aprint_error_dev(sc->sc_dev,
1650 "WARNING: I/O BAR at zero.\n");
1651 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1652 0, &sc->sc_iot, &sc->sc_ioh,
1653 NULL, &sc->sc_ios) == 0) {
1654 sc->sc_flags |= WM_F_IOH_VALID;
1655 } else {
1656 aprint_error_dev(sc->sc_dev,
1657 "WARNING: unable to map I/O space\n");
1658 }
1659 }
1660
1661 }
1662
1663 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1664 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1665 preg |= PCI_COMMAND_MASTER_ENABLE;
1666 if (sc->sc_type < WM_T_82542_2_1)
1667 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1668 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1669
1670 /* power up chip */
1671 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1672 NULL)) && error != EOPNOTSUPP) {
1673 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1674 return;
1675 }
1676
1677 wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
1678
1679 /* Allocation settings */
1680 max_type = PCI_INTR_TYPE_MSIX;
1681 counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
1682 counts[PCI_INTR_TYPE_MSI] = 1;
1683 counts[PCI_INTR_TYPE_INTX] = 1;
1684
1685 alloc_retry:
1686 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1687 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1688 return;
1689 }
1690
1691 if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1692 error = wm_setup_msix(sc);
1693 if (error) {
1694 pci_intr_release(pc, sc->sc_intrs,
1695 counts[PCI_INTR_TYPE_MSIX]);
1696
1697 /* Setup for MSI: Disable MSI-X */
1698 max_type = PCI_INTR_TYPE_MSI;
1699 counts[PCI_INTR_TYPE_MSI] = 1;
1700 counts[PCI_INTR_TYPE_INTX] = 1;
1701 goto alloc_retry;
1702 }
1703 } else if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
1704 wm_adjust_qnum(sc, 0); /* must not use multiqueue */
1705 error = wm_setup_legacy(sc);
1706 if (error) {
1707 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1708 counts[PCI_INTR_TYPE_MSI]);
1709
1710 /* The next try is for INTx: Disable MSI */
1711 max_type = PCI_INTR_TYPE_INTX;
1712 counts[PCI_INTR_TYPE_INTX] = 1;
1713 goto alloc_retry;
1714 }
1715 } else {
1716 wm_adjust_qnum(sc, 0); /* must not use multiqueue */
1717 error = wm_setup_legacy(sc);
1718 if (error) {
1719 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1720 counts[PCI_INTR_TYPE_INTX]);
1721 return;
1722 }
1723 }
1724
1725 /*
1726 * Check the function ID (unit number of the chip).
1727 */
1728 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1729 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1730 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1731 || (sc->sc_type == WM_T_82580)
1732 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1733 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1734 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1735 else
1736 sc->sc_funcid = 0;
1737
1738 /*
1739 * Determine a few things about the bus we're connected to.
1740 */
1741 if (sc->sc_type < WM_T_82543) {
1742 /* We don't really know the bus characteristics here. */
1743 sc->sc_bus_speed = 33;
1744 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1745 /*
1746 * CSA (Communication Streaming Architecture) is about as fast
1747 * a 32-bit 66MHz PCI Bus.
1748 */
1749 sc->sc_flags |= WM_F_CSA;
1750 sc->sc_bus_speed = 66;
1751 aprint_verbose_dev(sc->sc_dev,
1752 "Communication Streaming Architecture\n");
1753 if (sc->sc_type == WM_T_82547) {
1754 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1755 callout_setfunc(&sc->sc_txfifo_ch,
1756 wm_82547_txfifo_stall, sc);
1757 aprint_verbose_dev(sc->sc_dev,
1758 "using 82547 Tx FIFO stall work-around\n");
1759 }
1760 } else if (sc->sc_type >= WM_T_82571) {
1761 sc->sc_flags |= WM_F_PCIE;
1762 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1763 && (sc->sc_type != WM_T_ICH10)
1764 && (sc->sc_type != WM_T_PCH)
1765 && (sc->sc_type != WM_T_PCH2)
1766 && (sc->sc_type != WM_T_PCH_LPT)
1767 && (sc->sc_type != WM_T_PCH_SPT)) {
1768 /* ICH* and PCH* have no PCIe capability registers */
1769 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1770 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1771 NULL) == 0)
1772 aprint_error_dev(sc->sc_dev,
1773 "unable to find PCIe capability\n");
1774 }
1775 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1776 } else {
1777 reg = CSR_READ(sc, WMREG_STATUS);
1778 if (reg & STATUS_BUS64)
1779 sc->sc_flags |= WM_F_BUS64;
1780 if ((reg & STATUS_PCIX_MODE) != 0) {
1781 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1782
1783 sc->sc_flags |= WM_F_PCIX;
1784 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1785 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1786 aprint_error_dev(sc->sc_dev,
1787 "unable to find PCIX capability\n");
1788 else if (sc->sc_type != WM_T_82545_3 &&
1789 sc->sc_type != WM_T_82546_3) {
1790 /*
1791 * Work around a problem caused by the BIOS
1792 * setting the max memory read byte count
1793 * incorrectly.
1794 */
1795 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1796 sc->sc_pcixe_capoff + PCIX_CMD);
1797 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1798 sc->sc_pcixe_capoff + PCIX_STATUS);
1799
1800 bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1801 PCIX_CMD_BYTECNT_SHIFT;
1802 maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1803 PCIX_STATUS_MAXB_SHIFT;
1804 if (bytecnt > maxb) {
1805 aprint_verbose_dev(sc->sc_dev,
1806 "resetting PCI-X MMRBC: %d -> %d\n",
1807 512 << bytecnt, 512 << maxb);
1808 pcix_cmd = (pcix_cmd &
1809 ~PCIX_CMD_BYTECNT_MASK) |
1810 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1811 pci_conf_write(pa->pa_pc, pa->pa_tag,
1812 sc->sc_pcixe_capoff + PCIX_CMD,
1813 pcix_cmd);
1814 }
1815 }
1816 }
1817 /*
1818 * The quad port adapter is special; it has a PCIX-PCIX
1819 * bridge on the board, and can run the secondary bus at
1820 * a higher speed.
1821 */
1822 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1823 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1824 : 66;
1825 } else if (sc->sc_flags & WM_F_PCIX) {
1826 switch (reg & STATUS_PCIXSPD_MASK) {
1827 case STATUS_PCIXSPD_50_66:
1828 sc->sc_bus_speed = 66;
1829 break;
1830 case STATUS_PCIXSPD_66_100:
1831 sc->sc_bus_speed = 100;
1832 break;
1833 case STATUS_PCIXSPD_100_133:
1834 sc->sc_bus_speed = 133;
1835 break;
1836 default:
1837 aprint_error_dev(sc->sc_dev,
1838 "unknown PCIXSPD %d; assuming 66MHz\n",
1839 reg & STATUS_PCIXSPD_MASK);
1840 sc->sc_bus_speed = 66;
1841 break;
1842 }
1843 } else
1844 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1845 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1846 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1847 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1848 }
1849
1850 /* clear interesting stat counters */
1851 CSR_READ(sc, WMREG_COLC);
1852 CSR_READ(sc, WMREG_RXERRC);
1853
1854 /* get PHY control from SMBus to PCIe */
1855 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1856 || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
1857 wm_smbustopci(sc);
1858
1859 /* Reset the chip to a known state. */
1860 wm_reset(sc);
1861
1862 /* Get some information about the EEPROM. */
1863 switch (sc->sc_type) {
1864 case WM_T_82542_2_0:
1865 case WM_T_82542_2_1:
1866 case WM_T_82543:
1867 case WM_T_82544:
1868 /* Microwire */
1869 sc->sc_nvm_wordsize = 64;
1870 sc->sc_nvm_addrbits = 6;
1871 break;
1872 case WM_T_82540:
1873 case WM_T_82545:
1874 case WM_T_82545_3:
1875 case WM_T_82546:
1876 case WM_T_82546_3:
1877 /* Microwire */
1878 reg = CSR_READ(sc, WMREG_EECD);
1879 if (reg & EECD_EE_SIZE) {
1880 sc->sc_nvm_wordsize = 256;
1881 sc->sc_nvm_addrbits = 8;
1882 } else {
1883 sc->sc_nvm_wordsize = 64;
1884 sc->sc_nvm_addrbits = 6;
1885 }
1886 sc->sc_flags |= WM_F_LOCK_EECD;
1887 break;
1888 case WM_T_82541:
1889 case WM_T_82541_2:
1890 case WM_T_82547:
1891 case WM_T_82547_2:
1892 sc->sc_flags |= WM_F_LOCK_EECD;
1893 reg = CSR_READ(sc, WMREG_EECD);
1894 if (reg & EECD_EE_TYPE) {
1895 /* SPI */
1896 sc->sc_flags |= WM_F_EEPROM_SPI;
1897 wm_nvm_set_addrbits_size_eecd(sc);
1898 } else {
1899 /* Microwire */
1900 if ((reg & EECD_EE_ABITS) != 0) {
1901 sc->sc_nvm_wordsize = 256;
1902 sc->sc_nvm_addrbits = 8;
1903 } else {
1904 sc->sc_nvm_wordsize = 64;
1905 sc->sc_nvm_addrbits = 6;
1906 }
1907 }
1908 break;
1909 case WM_T_82571:
1910 case WM_T_82572:
1911 /* SPI */
1912 sc->sc_flags |= WM_F_EEPROM_SPI;
1913 wm_nvm_set_addrbits_size_eecd(sc);
1914 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1915 break;
1916 case WM_T_82573:
1917 sc->sc_flags |= WM_F_LOCK_SWSM;
1918 /* FALLTHROUGH */
1919 case WM_T_82574:
1920 case WM_T_82583:
1921 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1922 sc->sc_flags |= WM_F_EEPROM_FLASH;
1923 sc->sc_nvm_wordsize = 2048;
1924 } else {
1925 /* SPI */
1926 sc->sc_flags |= WM_F_EEPROM_SPI;
1927 wm_nvm_set_addrbits_size_eecd(sc);
1928 }
1929 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1930 break;
1931 case WM_T_82575:
1932 case WM_T_82576:
1933 case WM_T_82580:
1934 case WM_T_I350:
1935 case WM_T_I354:
1936 case WM_T_80003:
1937 /* SPI */
1938 sc->sc_flags |= WM_F_EEPROM_SPI;
1939 wm_nvm_set_addrbits_size_eecd(sc);
1940 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1941 | WM_F_LOCK_SWSM;
1942 break;
1943 case WM_T_ICH8:
1944 case WM_T_ICH9:
1945 case WM_T_ICH10:
1946 case WM_T_PCH:
1947 case WM_T_PCH2:
1948 case WM_T_PCH_LPT:
1949 /* FLASH */
1950 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1951 sc->sc_nvm_wordsize = 2048;
1952 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
1953 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1954 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
1955 aprint_error_dev(sc->sc_dev,
1956 "can't map FLASH registers\n");
1957 goto out;
1958 }
1959 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1960 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1961 ICH_FLASH_SECTOR_SIZE;
1962 sc->sc_ich8_flash_bank_size =
1963 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1964 sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
1965 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1966 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1967 sc->sc_flashreg_offset = 0;
1968 break;
1969 case WM_T_PCH_SPT:
1970 /* SPT has no GFPREG; flash registers mapped through BAR0 */
1971 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1972 sc->sc_flasht = sc->sc_st;
1973 sc->sc_flashh = sc->sc_sh;
1974 sc->sc_ich8_flash_base = 0;
1975 sc->sc_nvm_wordsize =
1976 (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
1977 * NVM_SIZE_MULTIPLIER;
1978 /* It is size in bytes, we want words */
1979 sc->sc_nvm_wordsize /= 2;
1980 /* assume 2 banks */
1981 sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
1982 sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
1983 break;
1984 case WM_T_I210:
1985 case WM_T_I211:
1986 if (wm_nvm_get_flash_presence_i210(sc)) {
1987 wm_nvm_set_addrbits_size_eecd(sc);
1988 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1989 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1990 } else {
1991 sc->sc_nvm_wordsize = INVM_SIZE;
1992 sc->sc_flags |= WM_F_EEPROM_INVM;
1993 sc->sc_flags |= WM_F_LOCK_SWFW;
1994 }
1995 break;
1996 default:
1997 break;
1998 }
1999
2000 /* Ensure the SMBI bit is clear before first NVM or PHY access */
2001 switch (sc->sc_type) {
2002 case WM_T_82571:
2003 case WM_T_82572:
2004 reg = CSR_READ(sc, WMREG_SWSM2);
2005 if ((reg & SWSM2_LOCK) == 0) {
2006 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2007 force_clear_smbi = true;
2008 } else
2009 force_clear_smbi = false;
2010 break;
2011 case WM_T_82573:
2012 case WM_T_82574:
2013 case WM_T_82583:
2014 force_clear_smbi = true;
2015 break;
2016 default:
2017 force_clear_smbi = false;
2018 break;
2019 }
2020 if (force_clear_smbi) {
2021 reg = CSR_READ(sc, WMREG_SWSM);
2022 if ((reg & SWSM_SMBI) != 0)
2023 aprint_error_dev(sc->sc_dev,
2024 "Please update the Bootagent\n");
2025 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2026 }
2027
2028 /*
2029 * Defer printing the EEPROM type until after verifying the checksum
2030 * This allows the EEPROM type to be printed correctly in the case
2031 * that no EEPROM is attached.
2032 */
2033 /*
2034 * Validate the EEPROM checksum. If the checksum fails, flag
2035 * this for later, so we can fail future reads from the EEPROM.
2036 */
2037 if (wm_nvm_validate_checksum(sc)) {
2038 /*
2039 * Read twice again because some PCI-e parts fail the
2040 * first check due to the link being in sleep state.
2041 */
2042 if (wm_nvm_validate_checksum(sc))
2043 sc->sc_flags |= WM_F_EEPROM_INVALID;
2044 }
2045
2046 /* Set device properties (macflags) */
2047 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2048
2049 if (sc->sc_flags & WM_F_EEPROM_INVALID)
2050 aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2051 else {
2052 aprint_verbose_dev(sc->sc_dev, "%u words ",
2053 sc->sc_nvm_wordsize);
2054 if (sc->sc_flags & WM_F_EEPROM_INVM)
2055 aprint_verbose("iNVM");
2056 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2057 aprint_verbose("FLASH(HW)");
2058 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2059 aprint_verbose("FLASH");
2060 else {
2061 if (sc->sc_flags & WM_F_EEPROM_SPI)
2062 eetype = "SPI";
2063 else
2064 eetype = "MicroWire";
2065 aprint_verbose("(%d address bits) %s EEPROM",
2066 sc->sc_nvm_addrbits, eetype);
2067 }
2068 }
2069 wm_nvm_version(sc);
2070 aprint_verbose("\n");
2071
2072 /* Check for I21[01] PLL workaround */
2073 if (sc->sc_type == WM_T_I210)
2074 sc->sc_flags |= WM_F_PLL_WA_I210;
2075 if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
2076 /* NVM image release 3.25 has a workaround */
2077 if ((sc->sc_nvm_ver_major < 3)
2078 || ((sc->sc_nvm_ver_major == 3)
2079 && (sc->sc_nvm_ver_minor < 25))) {
2080 aprint_verbose_dev(sc->sc_dev,
2081 "ROM image version %d.%d is older than 3.25\n",
2082 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2083 sc->sc_flags |= WM_F_PLL_WA_I210;
2084 }
2085 }
2086 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2087 wm_pll_workaround_i210(sc);
2088
2089 wm_get_wakeup(sc);
2090 switch (sc->sc_type) {
2091 case WM_T_82571:
2092 case WM_T_82572:
2093 case WM_T_82573:
2094 case WM_T_82574:
2095 case WM_T_82583:
2096 case WM_T_80003:
2097 case WM_T_ICH8:
2098 case WM_T_ICH9:
2099 case WM_T_ICH10:
2100 case WM_T_PCH:
2101 case WM_T_PCH2:
2102 case WM_T_PCH_LPT:
2103 case WM_T_PCH_SPT:
2104 /* Non-AMT based hardware can now take control from firmware */
2105 if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2106 wm_get_hw_control(sc);
2107 break;
2108 default:
2109 break;
2110 }
2111
2112 /*
2113 * Read the Ethernet address from the EEPROM, if not first found
2114 * in device properties.
2115 */
2116 ea = prop_dictionary_get(dict, "mac-address");
2117 if (ea != NULL) {
2118 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2119 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2120 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2121 } else {
2122 if (wm_read_mac_addr(sc, enaddr) != 0) {
2123 aprint_error_dev(sc->sc_dev,
2124 "unable to read Ethernet address\n");
2125 goto out;
2126 }
2127 }
2128
2129 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2130 ether_sprintf(enaddr));
2131
2132 /*
2133 * Read the config info from the EEPROM, and set up various
2134 * bits in the control registers based on their contents.
2135 */
2136 pn = prop_dictionary_get(dict, "i82543-cfg1");
2137 if (pn != NULL) {
2138 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2139 cfg1 = (uint16_t) prop_number_integer_value(pn);
2140 } else {
2141 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2142 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2143 goto out;
2144 }
2145 }
2146
2147 pn = prop_dictionary_get(dict, "i82543-cfg2");
2148 if (pn != NULL) {
2149 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2150 cfg2 = (uint16_t) prop_number_integer_value(pn);
2151 } else {
2152 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2153 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2154 goto out;
2155 }
2156 }
2157
2158 /* check for WM_F_WOL */
2159 switch (sc->sc_type) {
2160 case WM_T_82542_2_0:
2161 case WM_T_82542_2_1:
2162 case WM_T_82543:
2163 /* dummy? */
2164 eeprom_data = 0;
2165 apme_mask = NVM_CFG3_APME;
2166 break;
2167 case WM_T_82544:
2168 apme_mask = NVM_CFG2_82544_APM_EN;
2169 eeprom_data = cfg2;
2170 break;
2171 case WM_T_82546:
2172 case WM_T_82546_3:
2173 case WM_T_82571:
2174 case WM_T_82572:
2175 case WM_T_82573:
2176 case WM_T_82574:
2177 case WM_T_82583:
2178 case WM_T_80003:
2179 default:
2180 apme_mask = NVM_CFG3_APME;
2181 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2182 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2183 break;
2184 case WM_T_82575:
2185 case WM_T_82576:
2186 case WM_T_82580:
2187 case WM_T_I350:
2188 case WM_T_I354: /* XXX ok? */
2189 case WM_T_ICH8:
2190 case WM_T_ICH9:
2191 case WM_T_ICH10:
2192 case WM_T_PCH:
2193 case WM_T_PCH2:
2194 case WM_T_PCH_LPT:
2195 case WM_T_PCH_SPT:
2196 /* XXX The funcid should be checked on some devices */
2197 apme_mask = WUC_APME;
2198 eeprom_data = CSR_READ(sc, WMREG_WUC);
2199 break;
2200 }
2201
2202 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2203 if ((eeprom_data & apme_mask) != 0)
2204 sc->sc_flags |= WM_F_WOL;
2205 #ifdef WM_DEBUG
2206 if ((sc->sc_flags & WM_F_WOL) != 0)
2207 printf("WOL\n");
2208 #endif
2209
2210 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2211 /* Check NVM for autonegotiation */
2212 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2213 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2214 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2215 }
2216 }
2217
2218 /*
2219 * XXX need special handling for some multiple port cards
2220 * to disable a paticular port.
2221 */
2222
2223 if (sc->sc_type >= WM_T_82544) {
2224 pn = prop_dictionary_get(dict, "i82543-swdpin");
2225 if (pn != NULL) {
2226 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2227 swdpin = (uint16_t) prop_number_integer_value(pn);
2228 } else {
2229 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2230 aprint_error_dev(sc->sc_dev,
2231 "unable to read SWDPIN\n");
2232 goto out;
2233 }
2234 }
2235 }
2236
2237 if (cfg1 & NVM_CFG1_ILOS)
2238 sc->sc_ctrl |= CTRL_ILOS;
2239
2240 /*
2241 * XXX
2242 * This code isn't correct because pin 2 and 3 are located
2243 * in different position on newer chips. Check all datasheet.
2244 *
2245 * Until resolve this problem, check if a chip < 82580
2246 */
2247 if (sc->sc_type <= WM_T_82580) {
2248 if (sc->sc_type >= WM_T_82544) {
2249 sc->sc_ctrl |=
2250 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2251 CTRL_SWDPIO_SHIFT;
2252 sc->sc_ctrl |=
2253 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2254 CTRL_SWDPINS_SHIFT;
2255 } else {
2256 sc->sc_ctrl |=
2257 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2258 CTRL_SWDPIO_SHIFT;
2259 }
2260 }
2261
2262 /* XXX For other than 82580? */
2263 if (sc->sc_type == WM_T_82580) {
2264 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2265 if (nvmword & __BIT(13))
2266 sc->sc_ctrl |= CTRL_ILOS;
2267 }
2268
2269 #if 0
2270 if (sc->sc_type >= WM_T_82544) {
2271 if (cfg1 & NVM_CFG1_IPS0)
2272 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2273 if (cfg1 & NVM_CFG1_IPS1)
2274 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2275 sc->sc_ctrl_ext |=
2276 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2277 CTRL_EXT_SWDPIO_SHIFT;
2278 sc->sc_ctrl_ext |=
2279 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2280 CTRL_EXT_SWDPINS_SHIFT;
2281 } else {
2282 sc->sc_ctrl_ext |=
2283 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2284 CTRL_EXT_SWDPIO_SHIFT;
2285 }
2286 #endif
2287
2288 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2289 #if 0
2290 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2291 #endif
2292
2293 if (sc->sc_type == WM_T_PCH) {
2294 uint16_t val;
2295
2296 /* Save the NVM K1 bit setting */
2297 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2298
2299 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2300 sc->sc_nvm_k1_enabled = 1;
2301 else
2302 sc->sc_nvm_k1_enabled = 0;
2303 }
2304
2305 /*
2306 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2307 * media structures accordingly.
2308 */
2309 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2310 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2311 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2312 || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
2313 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2314 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2315 wm_gmii_mediainit(sc, wmp->wmp_product);
2316 } else if (sc->sc_type < WM_T_82543 ||
2317 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2318 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2319 aprint_error_dev(sc->sc_dev,
2320 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2321 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2322 }
2323 wm_tbi_mediainit(sc);
2324 } else {
2325 switch (sc->sc_type) {
2326 case WM_T_82575:
2327 case WM_T_82576:
2328 case WM_T_82580:
2329 case WM_T_I350:
2330 case WM_T_I354:
2331 case WM_T_I210:
2332 case WM_T_I211:
2333 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2334 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2335 switch (link_mode) {
2336 case CTRL_EXT_LINK_MODE_1000KX:
2337 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2338 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2339 break;
2340 case CTRL_EXT_LINK_MODE_SGMII:
2341 if (wm_sgmii_uses_mdio(sc)) {
2342 aprint_verbose_dev(sc->sc_dev,
2343 "SGMII(MDIO)\n");
2344 sc->sc_flags |= WM_F_SGMII;
2345 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2346 break;
2347 }
2348 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2349 /*FALLTHROUGH*/
2350 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2351 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2352 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2353 if (link_mode
2354 == CTRL_EXT_LINK_MODE_SGMII) {
2355 sc->sc_mediatype
2356 = WM_MEDIATYPE_COPPER;
2357 sc->sc_flags |= WM_F_SGMII;
2358 } else {
2359 sc->sc_mediatype
2360 = WM_MEDIATYPE_SERDES;
2361 aprint_verbose_dev(sc->sc_dev,
2362 "SERDES\n");
2363 }
2364 break;
2365 }
2366 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2367 aprint_verbose_dev(sc->sc_dev,
2368 "SERDES\n");
2369
2370 /* Change current link mode setting */
2371 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2372 switch (sc->sc_mediatype) {
2373 case WM_MEDIATYPE_COPPER:
2374 reg |= CTRL_EXT_LINK_MODE_SGMII;
2375 break;
2376 case WM_MEDIATYPE_SERDES:
2377 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2378 break;
2379 default:
2380 break;
2381 }
2382 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2383 break;
2384 case CTRL_EXT_LINK_MODE_GMII:
2385 default:
2386 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2387 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2388 break;
2389 }
2390
2391 reg &= ~CTRL_EXT_I2C_ENA;
2392 if ((sc->sc_flags & WM_F_SGMII) != 0)
2393 reg |= CTRL_EXT_I2C_ENA;
2394 else
2395 reg &= ~CTRL_EXT_I2C_ENA;
2396 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2397
2398 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2399 wm_gmii_mediainit(sc, wmp->wmp_product);
2400 else
2401 wm_tbi_mediainit(sc);
2402 break;
2403 default:
2404 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
2405 aprint_error_dev(sc->sc_dev,
2406 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2407 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2408 wm_gmii_mediainit(sc, wmp->wmp_product);
2409 }
2410 }
2411
2412 ifp = &sc->sc_ethercom.ec_if;
2413 xname = device_xname(sc->sc_dev);
2414 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2415 ifp->if_softc = sc;
2416 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2417 ifp->if_ioctl = wm_ioctl;
2418 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
2419 ifp->if_start = wm_nq_start;
2420 if (sc->sc_nqueues > 1)
2421 ifp->if_transmit = wm_nq_transmit;
2422 } else
2423 ifp->if_start = wm_start;
2424 ifp->if_watchdog = wm_watchdog;
2425 ifp->if_init = wm_init;
2426 ifp->if_stop = wm_stop;
2427 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2428 IFQ_SET_READY(&ifp->if_snd);
2429
2430 /* Check for jumbo frame */
2431 switch (sc->sc_type) {
2432 case WM_T_82573:
2433 /* XXX limited to 9234 if ASPM is disabled */
2434 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2435 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2436 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2437 break;
2438 case WM_T_82571:
2439 case WM_T_82572:
2440 case WM_T_82574:
2441 case WM_T_82575:
2442 case WM_T_82576:
2443 case WM_T_82580:
2444 case WM_T_I350:
2445 case WM_T_I354: /* XXXX ok? */
2446 case WM_T_I210:
2447 case WM_T_I211:
2448 case WM_T_80003:
2449 case WM_T_ICH9:
2450 case WM_T_ICH10:
2451 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2452 case WM_T_PCH_LPT:
2453 case WM_T_PCH_SPT:
2454 /* XXX limited to 9234 */
2455 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2456 break;
2457 case WM_T_PCH:
2458 /* XXX limited to 4096 */
2459 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2460 break;
2461 case WM_T_82542_2_0:
2462 case WM_T_82542_2_1:
2463 case WM_T_82583:
2464 case WM_T_ICH8:
2465 /* No support for jumbo frame */
2466 break;
2467 default:
2468 /* ETHER_MAX_LEN_JUMBO */
2469 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2470 break;
2471 }
2472
2473 /* If we're a i82543 or greater, we can support VLANs. */
2474 if (sc->sc_type >= WM_T_82543)
2475 sc->sc_ethercom.ec_capabilities |=
2476 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2477
2478 /*
2479 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2480 * on i82543 and later.
2481 */
2482 if (sc->sc_type >= WM_T_82543) {
2483 ifp->if_capabilities |=
2484 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2485 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2486 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2487 IFCAP_CSUM_TCPv6_Tx |
2488 IFCAP_CSUM_UDPv6_Tx;
2489 }
2490
2491 /*
2492 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2493 *
2494 * 82541GI (8086:1076) ... no
2495 * 82572EI (8086:10b9) ... yes
2496 */
2497 if (sc->sc_type >= WM_T_82571) {
2498 ifp->if_capabilities |=
2499 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2500 }
2501
2502 /*
2503 * If we're a i82544 or greater (except i82547), we can do
2504 * TCP segmentation offload.
2505 */
2506 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2507 ifp->if_capabilities |= IFCAP_TSOv4;
2508 }
2509
2510 if (sc->sc_type >= WM_T_82571) {
2511 ifp->if_capabilities |= IFCAP_TSOv6;
2512 }
2513
2514 #ifdef WM_MPSAFE
2515 sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2516 #else
2517 sc->sc_core_lock = NULL;
2518 #endif
2519
2520 /* Attach the interface. */
2521 if_initialize(ifp);
2522 sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
2523 ether_ifattach(ifp, enaddr);
2524 if_register(ifp);
2525 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2526 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2527 RND_FLAG_DEFAULT);
2528
2529 #ifdef WM_EVENT_COUNTERS
2530 /* Attach event counters. */
2531 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2532 NULL, xname, "txsstall");
2533 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2534 NULL, xname, "txdstall");
2535 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2536 NULL, xname, "txfifo_stall");
2537 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2538 NULL, xname, "txdw");
2539 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2540 NULL, xname, "txqe");
2541 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2542 NULL, xname, "rxintr");
2543 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2544 NULL, xname, "linkintr");
2545
2546 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2547 NULL, xname, "rxipsum");
2548 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2549 NULL, xname, "rxtusum");
2550 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2551 NULL, xname, "txipsum");
2552 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2553 NULL, xname, "txtusum");
2554 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2555 NULL, xname, "txtusum6");
2556
2557 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2558 NULL, xname, "txtso");
2559 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2560 NULL, xname, "txtso6");
2561 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2562 NULL, xname, "txtsopain");
2563
2564 for (i = 0; i < WM_NTXSEGS; i++) {
2565 snprintf(wm_txseg_evcnt_names[i],
2566 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2567 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2568 NULL, xname, wm_txseg_evcnt_names[i]);
2569 }
2570
2571 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2572 NULL, xname, "txdrop");
2573
2574 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2575 NULL, xname, "tu");
2576
2577 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2578 NULL, xname, "tx_xoff");
2579 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2580 NULL, xname, "tx_xon");
2581 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2582 NULL, xname, "rx_xoff");
2583 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2584 NULL, xname, "rx_xon");
2585 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2586 NULL, xname, "rx_macctl");
2587 #endif /* WM_EVENT_COUNTERS */
2588
2589 if (pmf_device_register(self, wm_suspend, wm_resume))
2590 pmf_class_network_register(self, ifp);
2591 else
2592 aprint_error_dev(self, "couldn't establish power handler\n");
2593
2594 sc->sc_flags |= WM_F_ATTACHED;
2595 out:
2596 return;
2597 }
2598
2599 /* The detach function (ca_detach) */
2600 static int
2601 wm_detach(device_t self, int flags __unused)
2602 {
2603 struct wm_softc *sc = device_private(self);
2604 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2605 int i;
2606 #ifndef WM_MPSAFE
2607 int s;
2608 #endif
2609
2610 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2611 return 0;
2612
2613 #ifndef WM_MPSAFE
2614 s = splnet();
2615 #endif
2616 /* Stop the interface. Callouts are stopped in it. */
2617 wm_stop(ifp, 1);
2618
2619 #ifndef WM_MPSAFE
2620 splx(s);
2621 #endif
2622
2623 pmf_device_deregister(self);
2624
2625 /* Tell the firmware about the release */
2626 WM_CORE_LOCK(sc);
2627 wm_release_manageability(sc);
2628 wm_release_hw_control(sc);
2629 WM_CORE_UNLOCK(sc);
2630
2631 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2632
2633 /* Delete all remaining media. */
2634 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2635
2636 ether_ifdetach(ifp);
2637 if_detach(ifp);
2638 if_percpuq_destroy(sc->sc_ipq);
2639
2640 /* Unload RX dmamaps and free mbufs */
2641 for (i = 0; i < sc->sc_nqueues; i++) {
2642 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
2643 WM_RX_LOCK(rxq);
2644 wm_rxdrain(rxq);
2645 WM_RX_UNLOCK(rxq);
2646 }
2647 /* Must unlock here */
2648
2649 /* Disestablish the interrupt handler */
2650 for (i = 0; i < sc->sc_nintrs; i++) {
2651 if (sc->sc_ihs[i] != NULL) {
2652 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2653 sc->sc_ihs[i] = NULL;
2654 }
2655 }
2656 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
2657
2658 wm_free_txrx_queues(sc);
2659
2660 /* Unmap the registers */
2661 if (sc->sc_ss) {
2662 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2663 sc->sc_ss = 0;
2664 }
2665 if (sc->sc_ios) {
2666 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2667 sc->sc_ios = 0;
2668 }
2669 if (sc->sc_flashs) {
2670 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
2671 sc->sc_flashs = 0;
2672 }
2673
2674 if (sc->sc_core_lock)
2675 mutex_obj_free(sc->sc_core_lock);
2676
2677 return 0;
2678 }
2679
2680 static bool
2681 wm_suspend(device_t self, const pmf_qual_t *qual)
2682 {
2683 struct wm_softc *sc = device_private(self);
2684
2685 wm_release_manageability(sc);
2686 wm_release_hw_control(sc);
2687 #ifdef WM_WOL
2688 wm_enable_wakeup(sc);
2689 #endif
2690
2691 return true;
2692 }
2693
2694 static bool
2695 wm_resume(device_t self, const pmf_qual_t *qual)
2696 {
2697 struct wm_softc *sc = device_private(self);
2698
2699 wm_init_manageability(sc);
2700
2701 return true;
2702 }
2703
2704 /*
2705 * wm_watchdog: [ifnet interface function]
2706 *
2707 * Watchdog timer handler.
2708 */
2709 static void
2710 wm_watchdog(struct ifnet *ifp)
2711 {
2712 int qid;
2713 struct wm_softc *sc = ifp->if_softc;
2714
2715 for (qid = 0; qid < sc->sc_nqueues; qid++) {
2716 struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
2717
2718 wm_watchdog_txq(ifp, txq);
2719 }
2720
2721 /* Reset the interface. */
2722 (void) wm_init(ifp);
2723
2724 /*
2725 * There are still some upper layer processing which call
2726 * ifp->if_start(). e.g. ALTQ
2727 */
2728 /* Try to get more packets going. */
2729 ifp->if_start(ifp);
2730 }
2731
2732 static void
2733 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
2734 {
2735 struct wm_softc *sc = ifp->if_softc;
2736
2737 /*
2738 * Since we're using delayed interrupts, sweep up
2739 * before we report an error.
2740 */
2741 WM_TX_LOCK(txq);
2742 wm_txeof(sc, txq);
2743 WM_TX_UNLOCK(txq);
2744
2745 if (txq->txq_free != WM_NTXDESC(txq)) {
2746 #ifdef WM_DEBUG
2747 int i, j;
2748 struct wm_txsoft *txs;
2749 #endif
2750 log(LOG_ERR,
2751 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2752 device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
2753 txq->txq_next);
2754 ifp->if_oerrors++;
2755 #ifdef WM_DEBUG
2756 for (i = txq->txq_sdirty; i != txq->txq_snext ;
2757 i = WM_NEXTTXS(txq, i)) {
2758 txs = &txq->txq_soft[i];
2759 printf("txs %d tx %d -> %d\n",
2760 i, txs->txs_firstdesc, txs->txs_lastdesc);
2761 for (j = txs->txs_firstdesc; ;
2762 j = WM_NEXTTX(txq, j)) {
2763 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2764 txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
2765 printf("\t %#08x%08x\n",
2766 txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
2767 txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
2768 if (j == txs->txs_lastdesc)
2769 break;
2770 }
2771 }
2772 #endif
2773 }
2774 }
2775
2776 /*
2777 * wm_tick:
2778 *
2779 * One second timer, used to check link status, sweep up
2780 * completed transmit jobs, etc.
2781 */
2782 static void
2783 wm_tick(void *arg)
2784 {
2785 struct wm_softc *sc = arg;
2786 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2787 #ifndef WM_MPSAFE
2788 int s;
2789
2790 s = splnet();
2791 #endif
2792
2793 WM_CORE_LOCK(sc);
2794
2795 if (sc->sc_stopping)
2796 goto out;
2797
2798 if (sc->sc_type >= WM_T_82542_2_1) {
2799 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2800 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2801 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2802 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2803 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2804 }
2805
2806 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2807 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2808 + CSR_READ(sc, WMREG_CRCERRS)
2809 + CSR_READ(sc, WMREG_ALGNERRC)
2810 + CSR_READ(sc, WMREG_SYMERRC)
2811 + CSR_READ(sc, WMREG_RXERRC)
2812 + CSR_READ(sc, WMREG_SEC)
2813 + CSR_READ(sc, WMREG_CEXTERR)
2814 + CSR_READ(sc, WMREG_RLEC);
2815 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2816
2817 if (sc->sc_flags & WM_F_HAS_MII)
2818 mii_tick(&sc->sc_mii);
2819 else if ((sc->sc_type >= WM_T_82575)
2820 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
2821 wm_serdes_tick(sc);
2822 else
2823 wm_tbi_tick(sc);
2824
2825 out:
2826 WM_CORE_UNLOCK(sc);
2827 #ifndef WM_MPSAFE
2828 splx(s);
2829 #endif
2830
2831 if (!sc->sc_stopping)
2832 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2833 }
2834
2835 static int
2836 wm_ifflags_cb(struct ethercom *ec)
2837 {
2838 struct ifnet *ifp = &ec->ec_if;
2839 struct wm_softc *sc = ifp->if_softc;
2840 int change = ifp->if_flags ^ sc->sc_if_flags;
2841 int rc = 0;
2842
2843 WM_CORE_LOCK(sc);
2844
2845 if (change != 0)
2846 sc->sc_if_flags = ifp->if_flags;
2847
2848 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
2849 rc = ENETRESET;
2850 goto out;
2851 }
2852
2853 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2854 wm_set_filter(sc);
2855
2856 wm_set_vlan(sc);
2857
2858 out:
2859 WM_CORE_UNLOCK(sc);
2860
2861 return rc;
2862 }
2863
2864 /*
2865 * wm_ioctl: [ifnet interface function]
2866 *
2867 * Handle control requests from the operator.
2868 */
2869 static int
2870 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2871 {
2872 struct wm_softc *sc = ifp->if_softc;
2873 struct ifreq *ifr = (struct ifreq *) data;
2874 struct ifaddr *ifa = (struct ifaddr *)data;
2875 struct sockaddr_dl *sdl;
2876 int s, error;
2877
2878 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
2879 device_xname(sc->sc_dev), __func__));
2880 #ifndef WM_MPSAFE
2881 s = splnet();
2882 #endif
2883 switch (cmd) {
2884 case SIOCSIFMEDIA:
2885 case SIOCGIFMEDIA:
2886 WM_CORE_LOCK(sc);
2887 /* Flow control requires full-duplex mode. */
2888 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2889 (ifr->ifr_media & IFM_FDX) == 0)
2890 ifr->ifr_media &= ~IFM_ETH_FMASK;
2891 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2892 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2893 /* We can do both TXPAUSE and RXPAUSE. */
2894 ifr->ifr_media |=
2895 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2896 }
2897 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2898 }
2899 WM_CORE_UNLOCK(sc);
2900 #ifdef WM_MPSAFE
2901 s = splnet();
2902 #endif
2903 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2904 #ifdef WM_MPSAFE
2905 splx(s);
2906 #endif
2907 break;
2908 case SIOCINITIFADDR:
2909 WM_CORE_LOCK(sc);
2910 if (ifa->ifa_addr->sa_family == AF_LINK) {
2911 sdl = satosdl(ifp->if_dl->ifa_addr);
2912 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2913 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2914 /* unicast address is first multicast entry */
2915 wm_set_filter(sc);
2916 error = 0;
2917 WM_CORE_UNLOCK(sc);
2918 break;
2919 }
2920 WM_CORE_UNLOCK(sc);
2921 /*FALLTHROUGH*/
2922 default:
2923 #ifdef WM_MPSAFE
2924 s = splnet();
2925 #endif
2926 /* It may call wm_start, so unlock here */
2927 error = ether_ioctl(ifp, cmd, data);
2928 #ifdef WM_MPSAFE
2929 splx(s);
2930 #endif
2931 if (error != ENETRESET)
2932 break;
2933
2934 error = 0;
2935
2936 if (cmd == SIOCSIFCAP) {
2937 error = (*ifp->if_init)(ifp);
2938 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2939 ;
2940 else if (ifp->if_flags & IFF_RUNNING) {
2941 /*
2942 * Multicast list has changed; set the hardware filter
2943 * accordingly.
2944 */
2945 WM_CORE_LOCK(sc);
2946 wm_set_filter(sc);
2947 WM_CORE_UNLOCK(sc);
2948 }
2949 break;
2950 }
2951
2952 #ifndef WM_MPSAFE
2953 splx(s);
2954 #endif
2955 return error;
2956 }
2957
2958 /* MAC address related */
2959
2960 /*
2961 * Get the offset of MAC address and return it.
2962 * If error occured, use offset 0.
2963 */
2964 static uint16_t
2965 wm_check_alt_mac_addr(struct wm_softc *sc)
2966 {
2967 uint16_t myea[ETHER_ADDR_LEN / 2];
2968 uint16_t offset = NVM_OFF_MACADDR;
2969
2970 /* Try to read alternative MAC address pointer */
2971 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2972 return 0;
2973
2974 /* Check pointer if it's valid or not. */
2975 if ((offset == 0x0000) || (offset == 0xffff))
2976 return 0;
2977
2978 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
2979 /*
2980 * Check whether alternative MAC address is valid or not.
2981 * Some cards have non 0xffff pointer but those don't use
2982 * alternative MAC address in reality.
2983 *
2984 * Check whether the broadcast bit is set or not.
2985 */
2986 if (wm_nvm_read(sc, offset, 1, myea) == 0)
2987 if (((myea[0] & 0xff) & 0x01) == 0)
2988 return offset; /* Found */
2989
2990 /* Not found */
2991 return 0;
2992 }
2993
2994 static int
2995 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2996 {
2997 uint16_t myea[ETHER_ADDR_LEN / 2];
2998 uint16_t offset = NVM_OFF_MACADDR;
2999 int do_invert = 0;
3000
3001 switch (sc->sc_type) {
3002 case WM_T_82580:
3003 case WM_T_I350:
3004 case WM_T_I354:
3005 /* EEPROM Top Level Partitioning */
3006 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
3007 break;
3008 case WM_T_82571:
3009 case WM_T_82575:
3010 case WM_T_82576:
3011 case WM_T_80003:
3012 case WM_T_I210:
3013 case WM_T_I211:
3014 offset = wm_check_alt_mac_addr(sc);
3015 if (offset == 0)
3016 if ((sc->sc_funcid & 0x01) == 1)
3017 do_invert = 1;
3018 break;
3019 default:
3020 if ((sc->sc_funcid & 0x01) == 1)
3021 do_invert = 1;
3022 break;
3023 }
3024
3025 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
3026 myea) != 0)
3027 goto bad;
3028
3029 enaddr[0] = myea[0] & 0xff;
3030 enaddr[1] = myea[0] >> 8;
3031 enaddr[2] = myea[1] & 0xff;
3032 enaddr[3] = myea[1] >> 8;
3033 enaddr[4] = myea[2] & 0xff;
3034 enaddr[5] = myea[2] >> 8;
3035
3036 /*
3037 * Toggle the LSB of the MAC address on the second port
3038 * of some dual port cards.
3039 */
3040 if (do_invert != 0)
3041 enaddr[5] ^= 1;
3042
3043 return 0;
3044
3045 bad:
3046 return -1;
3047 }
3048
3049 /*
3050 * wm_set_ral:
3051 *
3052 * Set an entery in the receive address list.
3053 */
3054 static void
3055 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3056 {
3057 uint32_t ral_lo, ral_hi;
3058
3059 if (enaddr != NULL) {
3060 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3061 (enaddr[3] << 24);
3062 ral_hi = enaddr[4] | (enaddr[5] << 8);
3063 ral_hi |= RAL_AV;
3064 } else {
3065 ral_lo = 0;
3066 ral_hi = 0;
3067 }
3068
3069 if (sc->sc_type >= WM_T_82544) {
3070 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3071 ral_lo);
3072 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3073 ral_hi);
3074 } else {
3075 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3076 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3077 }
3078 }
3079
3080 /*
3081 * wm_mchash:
3082 *
3083 * Compute the hash of the multicast address for the 4096-bit
3084 * multicast filter.
3085 */
3086 static uint32_t
3087 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3088 {
3089 static const int lo_shift[4] = { 4, 3, 2, 0 };
3090 static const int hi_shift[4] = { 4, 5, 6, 8 };
3091 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3092 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3093 uint32_t hash;
3094
3095 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3096 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3097 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3098 || (sc->sc_type == WM_T_PCH_SPT)) {
3099 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3100 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3101 return (hash & 0x3ff);
3102 }
3103 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3104 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3105
3106 return (hash & 0xfff);
3107 }
3108
3109 /*
3110 * wm_set_filter:
3111 *
3112 * Set up the receive filter.
3113 */
3114 static void
3115 wm_set_filter(struct wm_softc *sc)
3116 {
3117 struct ethercom *ec = &sc->sc_ethercom;
3118 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3119 struct ether_multi *enm;
3120 struct ether_multistep step;
3121 bus_addr_t mta_reg;
3122 uint32_t hash, reg, bit;
3123 int i, size, ralmax;
3124
3125 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3126 device_xname(sc->sc_dev), __func__));
3127 if (sc->sc_type >= WM_T_82544)
3128 mta_reg = WMREG_CORDOVA_MTA;
3129 else
3130 mta_reg = WMREG_MTA;
3131
3132 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3133
3134 if (ifp->if_flags & IFF_BROADCAST)
3135 sc->sc_rctl |= RCTL_BAM;
3136 if (ifp->if_flags & IFF_PROMISC) {
3137 sc->sc_rctl |= RCTL_UPE;
3138 goto allmulti;
3139 }
3140
3141 /*
3142 * Set the station address in the first RAL slot, and
3143 * clear the remaining slots.
3144 */
3145 if (sc->sc_type == WM_T_ICH8)
3146 size = WM_RAL_TABSIZE_ICH8 -1;
3147 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3148 || (sc->sc_type == WM_T_PCH))
3149 size = WM_RAL_TABSIZE_ICH8;
3150 else if (sc->sc_type == WM_T_PCH2)
3151 size = WM_RAL_TABSIZE_PCH2;
3152 else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
3153 size = WM_RAL_TABSIZE_PCH_LPT;
3154 else if (sc->sc_type == WM_T_82575)
3155 size = WM_RAL_TABSIZE_82575;
3156 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3157 size = WM_RAL_TABSIZE_82576;
3158 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3159 size = WM_RAL_TABSIZE_I350;
3160 else
3161 size = WM_RAL_TABSIZE;
3162 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3163
3164 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
3165 i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
3166 switch (i) {
3167 case 0:
3168 /* We can use all entries */
3169 ralmax = size;
3170 break;
3171 case 1:
3172 /* Only RAR[0] */
3173 ralmax = 1;
3174 break;
3175 default:
3176 /* available SHRA + RAR[0] */
3177 ralmax = i + 1;
3178 }
3179 } else
3180 ralmax = size;
3181 for (i = 1; i < size; i++) {
3182 if (i < ralmax)
3183 wm_set_ral(sc, NULL, i);
3184 }
3185
3186 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3187 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3188 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3189 || (sc->sc_type == WM_T_PCH_SPT))
3190 size = WM_ICH8_MC_TABSIZE;
3191 else
3192 size = WM_MC_TABSIZE;
3193 /* Clear out the multicast table. */
3194 for (i = 0; i < size; i++)
3195 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3196
3197 ETHER_FIRST_MULTI(step, ec, enm);
3198 while (enm != NULL) {
3199 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3200 /*
3201 * We must listen to a range of multicast addresses.
3202 * For now, just accept all multicasts, rather than
3203 * trying to set only those filter bits needed to match
3204 * the range. (At this time, the only use of address
3205 * ranges is for IP multicast routing, for which the
3206 * range is big enough to require all bits set.)
3207 */
3208 goto allmulti;
3209 }
3210
3211 hash = wm_mchash(sc, enm->enm_addrlo);
3212
3213 reg = (hash >> 5);
3214 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3215 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3216 || (sc->sc_type == WM_T_PCH2)
3217 || (sc->sc_type == WM_T_PCH_LPT)
3218 || (sc->sc_type == WM_T_PCH_SPT))
3219 reg &= 0x1f;
3220 else
3221 reg &= 0x7f;
3222 bit = hash & 0x1f;
3223
3224 hash = CSR_READ(sc, mta_reg + (reg << 2));
3225 hash |= 1U << bit;
3226
3227 if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
3228 /*
3229 * 82544 Errata 9: Certain register cannot be written
3230 * with particular alignments in PCI-X bus operation
3231 * (FCAH, MTA and VFTA).
3232 */
3233 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3234 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3235 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3236 } else
3237 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3238
3239 ETHER_NEXT_MULTI(step, enm);
3240 }
3241
3242 ifp->if_flags &= ~IFF_ALLMULTI;
3243 goto setit;
3244
3245 allmulti:
3246 ifp->if_flags |= IFF_ALLMULTI;
3247 sc->sc_rctl |= RCTL_MPE;
3248
3249 setit:
3250 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3251 }
3252
3253 /* Reset and init related */
3254
3255 static void
3256 wm_set_vlan(struct wm_softc *sc)
3257 {
3258
3259 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3260 device_xname(sc->sc_dev), __func__));
3261 /* Deal with VLAN enables. */
3262 if (VLAN_ATTACHED(&sc->sc_ethercom))
3263 sc->sc_ctrl |= CTRL_VME;
3264 else
3265 sc->sc_ctrl &= ~CTRL_VME;
3266
3267 /* Write the control registers. */
3268 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3269 }
3270
3271 static void
3272 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3273 {
3274 uint32_t gcr;
3275 pcireg_t ctrl2;
3276
3277 gcr = CSR_READ(sc, WMREG_GCR);
3278
3279 /* Only take action if timeout value is defaulted to 0 */
3280 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3281 goto out;
3282
3283 if ((gcr & GCR_CAP_VER2) == 0) {
3284 gcr |= GCR_CMPL_TMOUT_10MS;
3285 goto out;
3286 }
3287
3288 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3289 sc->sc_pcixe_capoff + PCIE_DCSR2);
3290 ctrl2 |= WM_PCIE_DCSR2_16MS;
3291 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3292 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3293
3294 out:
3295 /* Disable completion timeout resend */
3296 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3297
3298 CSR_WRITE(sc, WMREG_GCR, gcr);
3299 }
3300
3301 void
3302 wm_get_auto_rd_done(struct wm_softc *sc)
3303 {
3304 int i;
3305
3306 /* wait for eeprom to reload */
3307 switch (sc->sc_type) {
3308 case WM_T_82571:
3309 case WM_T_82572:
3310 case WM_T_82573:
3311 case WM_T_82574:
3312 case WM_T_82583:
3313 case WM_T_82575:
3314 case WM_T_82576:
3315 case WM_T_82580:
3316 case WM_T_I350:
3317 case WM_T_I354:
3318 case WM_T_I210:
3319 case WM_T_I211:
3320 case WM_T_80003:
3321 case WM_T_ICH8:
3322 case WM_T_ICH9:
3323 for (i = 0; i < 10; i++) {
3324 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3325 break;
3326 delay(1000);
3327 }
3328 if (i == 10) {
3329 log(LOG_ERR, "%s: auto read from eeprom failed to "
3330 "complete\n", device_xname(sc->sc_dev));
3331 }
3332 break;
3333 default:
3334 break;
3335 }
3336 }
3337
3338 void
3339 wm_lan_init_done(struct wm_softc *sc)
3340 {
3341 uint32_t reg = 0;
3342 int i;
3343
3344 /* wait for eeprom to reload */
3345 switch (sc->sc_type) {
3346 case WM_T_ICH10:
3347 case WM_T_PCH:
3348 case WM_T_PCH2:
3349 case WM_T_PCH_LPT:
3350 case WM_T_PCH_SPT:
3351 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3352 reg = CSR_READ(sc, WMREG_STATUS);
3353 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3354 break;
3355 delay(100);
3356 }
3357 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3358 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3359 "complete\n", device_xname(sc->sc_dev), __func__);
3360 }
3361 break;
3362 default:
3363 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3364 __func__);
3365 break;
3366 }
3367
3368 reg &= ~STATUS_LAN_INIT_DONE;
3369 CSR_WRITE(sc, WMREG_STATUS, reg);
3370 }
3371
3372 void
3373 wm_get_cfg_done(struct wm_softc *sc)
3374 {
3375 int mask;
3376 uint32_t reg;
3377 int i;
3378
3379 /* wait for eeprom to reload */
3380 switch (sc->sc_type) {
3381 case WM_T_82542_2_0:
3382 case WM_T_82542_2_1:
3383 /* null */
3384 break;
3385 case WM_T_82543:
3386 case WM_T_82544:
3387 case WM_T_82540:
3388 case WM_T_82545:
3389 case WM_T_82545_3:
3390 case WM_T_82546:
3391 case WM_T_82546_3:
3392 case WM_T_82541:
3393 case WM_T_82541_2:
3394 case WM_T_82547:
3395 case WM_T_82547_2:
3396 case WM_T_82573:
3397 case WM_T_82574:
3398 case WM_T_82583:
3399 /* generic */
3400 delay(10*1000);
3401 break;
3402 case WM_T_80003:
3403 case WM_T_82571:
3404 case WM_T_82572:
3405 case WM_T_82575:
3406 case WM_T_82576:
3407 case WM_T_82580:
3408 case WM_T_I350:
3409 case WM_T_I354:
3410 case WM_T_I210:
3411 case WM_T_I211:
3412 if (sc->sc_type == WM_T_82571) {
3413 /* Only 82571 shares port 0 */
3414 mask = EEMNGCTL_CFGDONE_0;
3415 } else
3416 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3417 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3418 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3419 break;
3420 delay(1000);
3421 }
3422 if (i >= WM_PHY_CFG_TIMEOUT) {
3423 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3424 device_xname(sc->sc_dev), __func__));
3425 }
3426 break;
3427 case WM_T_ICH8:
3428 case WM_T_ICH9:
3429 case WM_T_ICH10:
3430 case WM_T_PCH:
3431 case WM_T_PCH2:
3432 case WM_T_PCH_LPT:
3433 case WM_T_PCH_SPT:
3434 delay(10*1000);
3435 if (sc->sc_type >= WM_T_ICH10)
3436 wm_lan_init_done(sc);
3437 else
3438 wm_get_auto_rd_done(sc);
3439
3440 reg = CSR_READ(sc, WMREG_STATUS);
3441 if ((reg & STATUS_PHYRA) != 0)
3442 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3443 break;
3444 default:
3445 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3446 __func__);
3447 break;
3448 }
3449 }
3450
3451 /* Init hardware bits */
3452 void
3453 wm_initialize_hardware_bits(struct wm_softc *sc)
3454 {
3455 uint32_t tarc0, tarc1, reg;
3456
3457 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3458 device_xname(sc->sc_dev), __func__));
3459 /* For 82571 variant, 80003 and ICHs */
3460 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3461 || (sc->sc_type >= WM_T_80003)) {
3462
3463 /* Transmit Descriptor Control 0 */
3464 reg = CSR_READ(sc, WMREG_TXDCTL(0));
3465 reg |= TXDCTL_COUNT_DESC;
3466 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3467
3468 /* Transmit Descriptor Control 1 */
3469 reg = CSR_READ(sc, WMREG_TXDCTL(1));
3470 reg |= TXDCTL_COUNT_DESC;
3471 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3472
3473 /* TARC0 */
3474 tarc0 = CSR_READ(sc, WMREG_TARC0);
3475 switch (sc->sc_type) {
3476 case WM_T_82571:
3477 case WM_T_82572:
3478 case WM_T_82573:
3479 case WM_T_82574:
3480 case WM_T_82583:
3481 case WM_T_80003:
3482 /* Clear bits 30..27 */
3483 tarc0 &= ~__BITS(30, 27);
3484 break;
3485 default:
3486 break;
3487 }
3488
3489 switch (sc->sc_type) {
3490 case WM_T_82571:
3491 case WM_T_82572:
3492 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3493
3494 tarc1 = CSR_READ(sc, WMREG_TARC1);
3495 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3496 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3497 /* 8257[12] Errata No.7 */
3498 tarc1 |= __BIT(22); /* TARC1 bits 22 */
3499
3500 /* TARC1 bit 28 */
3501 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3502 tarc1 &= ~__BIT(28);
3503 else
3504 tarc1 |= __BIT(28);
3505 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3506
3507 /*
3508 * 8257[12] Errata No.13
3509 * Disable Dyamic Clock Gating.
3510 */
3511 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3512 reg &= ~CTRL_EXT_DMA_DYN_CLK;
3513 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3514 break;
3515 case WM_T_82573:
3516 case WM_T_82574:
3517 case WM_T_82583:
3518 if ((sc->sc_type == WM_T_82574)
3519 || (sc->sc_type == WM_T_82583))
3520 tarc0 |= __BIT(26); /* TARC0 bit 26 */
3521
3522 /* Extended Device Control */
3523 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3524 reg &= ~__BIT(23); /* Clear bit 23 */
3525 reg |= __BIT(22); /* Set bit 22 */
3526 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3527
3528 /* Device Control */
3529 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
3530 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3531
3532 /* PCIe Control Register */
3533 /*
3534 * 82573 Errata (unknown).
3535 *
3536 * 82574 Errata 25 and 82583 Errata 12
3537 * "Dropped Rx Packets":
3538 * NVM Image Version 2.1.4 and newer has no this bug.
3539 */
3540 reg = CSR_READ(sc, WMREG_GCR);
3541 reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
3542 CSR_WRITE(sc, WMREG_GCR, reg);
3543
3544 if ((sc->sc_type == WM_T_82574)
3545 || (sc->sc_type == WM_T_82583)) {
3546 /*
3547 * Document says this bit must be set for
3548 * proper operation.
3549 */
3550 reg = CSR_READ(sc, WMREG_GCR);
3551 reg |= __BIT(22);
3552 CSR_WRITE(sc, WMREG_GCR, reg);
3553
3554 /*
3555 * Apply workaround for hardware errata
3556 * documented in errata docs Fixes issue where
3557 * some error prone or unreliable PCIe
3558 * completions are occurring, particularly
3559 * with ASPM enabled. Without fix, issue can
3560 * cause Tx timeouts.
3561 */
3562 reg = CSR_READ(sc, WMREG_GCR2);
3563 reg |= __BIT(0);
3564 CSR_WRITE(sc, WMREG_GCR2, reg);
3565 }
3566 break;
3567 case WM_T_80003:
3568 /* TARC0 */
3569 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3570 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3571 tarc0 &= ~__BIT(20); /* Clear bits 20 */
3572
3573 /* TARC1 bit 28 */
3574 tarc1 = CSR_READ(sc, WMREG_TARC1);
3575 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3576 tarc1 &= ~__BIT(28);
3577 else
3578 tarc1 |= __BIT(28);
3579 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3580 break;
3581 case WM_T_ICH8:
3582 case WM_T_ICH9:
3583 case WM_T_ICH10:
3584 case WM_T_PCH:
3585 case WM_T_PCH2:
3586 case WM_T_PCH_LPT:
3587 case WM_T_PCH_SPT:
3588 /* TARC0 */
3589 if ((sc->sc_type == WM_T_ICH8)
3590 || (sc->sc_type == WM_T_PCH_SPT)) {
3591 /* Set TARC0 bits 29 and 28 */
3592 tarc0 |= __BITS(29, 28);
3593 }
3594 /* Set TARC0 bits 23,24,26,27 */
3595 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3596
3597 /* CTRL_EXT */
3598 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3599 reg |= __BIT(22); /* Set bit 22 */
3600 /*
3601 * Enable PHY low-power state when MAC is at D3
3602 * w/o WoL
3603 */
3604 if (sc->sc_type >= WM_T_PCH)
3605 reg |= CTRL_EXT_PHYPDEN;
3606 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3607
3608 /* TARC1 */
3609 tarc1 = CSR_READ(sc, WMREG_TARC1);
3610 /* bit 28 */
3611 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3612 tarc1 &= ~__BIT(28);
3613 else
3614 tarc1 |= __BIT(28);
3615 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3616 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3617
3618 /* Device Status */
3619 if (sc->sc_type == WM_T_ICH8) {
3620 reg = CSR_READ(sc, WMREG_STATUS);
3621 reg &= ~__BIT(31);
3622 CSR_WRITE(sc, WMREG_STATUS, reg);
3623
3624 }
3625
3626 /* IOSFPC */
3627 if (sc->sc_type == WM_T_PCH_SPT) {
3628 reg = CSR_READ(sc, WMREG_IOSFPC);
3629 reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
3630 CSR_WRITE(sc, WMREG_IOSFPC, reg);
3631 }
3632 /*
3633 * Work-around descriptor data corruption issue during
3634 * NFS v2 UDP traffic, just disable the NFS filtering
3635 * capability.
3636 */
3637 reg = CSR_READ(sc, WMREG_RFCTL);
3638 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3639 CSR_WRITE(sc, WMREG_RFCTL, reg);
3640 break;
3641 default:
3642 break;
3643 }
3644 CSR_WRITE(sc, WMREG_TARC0, tarc0);
3645
3646 /*
3647 * 8257[12] Errata No.52 and some others.
3648 * Avoid RSS Hash Value bug.
3649 */
3650 switch (sc->sc_type) {
3651 case WM_T_82571:
3652 case WM_T_82572:
3653 case WM_T_82573:
3654 case WM_T_80003:
3655 case WM_T_ICH8:
3656 reg = CSR_READ(sc, WMREG_RFCTL);
3657 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3658 CSR_WRITE(sc, WMREG_RFCTL, reg);
3659 break;
3660 default:
3661 break;
3662 }
3663 }
3664 }
3665
3666 static uint32_t
3667 wm_rxpbs_adjust_82580(uint32_t val)
3668 {
3669 uint32_t rv = 0;
3670
3671 if (val < __arraycount(wm_82580_rxpbs_table))
3672 rv = wm_82580_rxpbs_table[val];
3673
3674 return rv;
3675 }
3676
3677 /*
3678 * wm_reset:
3679 *
3680 * Reset the i82542 chip.
3681 */
3682 static void
3683 wm_reset(struct wm_softc *sc)
3684 {
3685 int phy_reset = 0;
3686 int i, error = 0;
3687 uint32_t reg, mask;
3688
3689 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3690 device_xname(sc->sc_dev), __func__));
3691 /*
3692 * Allocate on-chip memory according to the MTU size.
3693 * The Packet Buffer Allocation register must be written
3694 * before the chip is reset.
3695 */
3696 switch (sc->sc_type) {
3697 case WM_T_82547:
3698 case WM_T_82547_2:
3699 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3700 PBA_22K : PBA_30K;
3701 for (i = 0; i < sc->sc_nqueues; i++) {
3702 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3703 txq->txq_fifo_head = 0;
3704 txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3705 txq->txq_fifo_size =
3706 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3707 txq->txq_fifo_stall = 0;
3708 }
3709 break;
3710 case WM_T_82571:
3711 case WM_T_82572:
3712 case WM_T_82575: /* XXX need special handing for jumbo frames */
3713 case WM_T_80003:
3714 sc->sc_pba = PBA_32K;
3715 break;
3716 case WM_T_82573:
3717 sc->sc_pba = PBA_12K;
3718 break;
3719 case WM_T_82574:
3720 case WM_T_82583:
3721 sc->sc_pba = PBA_20K;
3722 break;
3723 case WM_T_82576:
3724 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
3725 sc->sc_pba &= RXPBS_SIZE_MASK_82576;
3726 break;
3727 case WM_T_82580:
3728 case WM_T_I350:
3729 case WM_T_I354:
3730 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
3731 break;
3732 case WM_T_I210:
3733 case WM_T_I211:
3734 sc->sc_pba = PBA_34K;
3735 break;
3736 case WM_T_ICH8:
3737 /* Workaround for a bit corruption issue in FIFO memory */
3738 sc->sc_pba = PBA_8K;
3739 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3740 break;
3741 case WM_T_ICH9:
3742 case WM_T_ICH10:
3743 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3744 PBA_14K : PBA_10K;
3745 break;
3746 case WM_T_PCH:
3747 case WM_T_PCH2:
3748 case WM_T_PCH_LPT:
3749 case WM_T_PCH_SPT:
3750 sc->sc_pba = PBA_26K;
3751 break;
3752 default:
3753 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3754 PBA_40K : PBA_48K;
3755 break;
3756 }
3757 /*
3758 * Only old or non-multiqueue devices have the PBA register
3759 * XXX Need special handling for 82575.
3760 */
3761 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3762 || (sc->sc_type == WM_T_82575))
3763 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3764
3765 /* Prevent the PCI-E bus from sticking */
3766 if (sc->sc_flags & WM_F_PCIE) {
3767 int timeout = 800;
3768
3769 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3770 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3771
3772 while (timeout--) {
3773 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3774 == 0)
3775 break;
3776 delay(100);
3777 }
3778 }
3779
3780 /* Set the completion timeout for interface */
3781 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3782 || (sc->sc_type == WM_T_82580)
3783 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3784 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3785 wm_set_pcie_completion_timeout(sc);
3786
3787 /* Clear interrupt */
3788 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3789 if (sc->sc_nintrs > 1) {
3790 if (sc->sc_type != WM_T_82574) {
3791 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3792 CSR_WRITE(sc, WMREG_EIAC, 0);
3793 } else {
3794 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3795 }
3796 }
3797
3798 /* Stop the transmit and receive processes. */
3799 CSR_WRITE(sc, WMREG_RCTL, 0);
3800 sc->sc_rctl &= ~RCTL_EN;
3801 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3802 CSR_WRITE_FLUSH(sc);
3803
3804 /* XXX set_tbi_sbp_82543() */
3805
3806 delay(10*1000);
3807
3808 /* Must acquire the MDIO ownership before MAC reset */
3809 switch (sc->sc_type) {
3810 case WM_T_82573:
3811 case WM_T_82574:
3812 case WM_T_82583:
3813 error = wm_get_hw_semaphore_82573(sc);
3814 break;
3815 default:
3816 break;
3817 }
3818
3819 /*
3820 * 82541 Errata 29? & 82547 Errata 28?
3821 * See also the description about PHY_RST bit in CTRL register
3822 * in 8254x_GBe_SDM.pdf.
3823 */
3824 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3825 CSR_WRITE(sc, WMREG_CTRL,
3826 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3827 CSR_WRITE_FLUSH(sc);
3828 delay(5000);
3829 }
3830
3831 switch (sc->sc_type) {
3832 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3833 case WM_T_82541:
3834 case WM_T_82541_2:
3835 case WM_T_82547:
3836 case WM_T_82547_2:
3837 /*
3838 * On some chipsets, a reset through a memory-mapped write
3839 * cycle can cause the chip to reset before completing the
3840 * write cycle. This causes major headache that can be
3841 * avoided by issuing the reset via indirect register writes
3842 * through I/O space.
3843 *
3844 * So, if we successfully mapped the I/O BAR at attach time,
3845 * use that. Otherwise, try our luck with a memory-mapped
3846 * reset.
3847 */
3848 if (sc->sc_flags & WM_F_IOH_VALID)
3849 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3850 else
3851 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3852 break;
3853 case WM_T_82545_3:
3854 case WM_T_82546_3:
3855 /* Use the shadow control register on these chips. */
3856 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3857 break;
3858 case WM_T_80003:
3859 mask = swfwphysem[sc->sc_funcid];
3860 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3861 wm_get_swfw_semaphore(sc, mask);
3862 CSR_WRITE(sc, WMREG_CTRL, reg);
3863 wm_put_swfw_semaphore(sc, mask);
3864 break;
3865 case WM_T_ICH8:
3866 case WM_T_ICH9:
3867 case WM_T_ICH10:
3868 case WM_T_PCH:
3869 case WM_T_PCH2:
3870 case WM_T_PCH_LPT:
3871 case WM_T_PCH_SPT:
3872 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3873 if (wm_phy_resetisblocked(sc) == false) {
3874 /*
3875 * Gate automatic PHY configuration by hardware on
3876 * non-managed 82579
3877 */
3878 if ((sc->sc_type == WM_T_PCH2)
3879 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3880 == 0))
3881 wm_gate_hw_phy_config_ich8lan(sc, true);
3882
3883 reg |= CTRL_PHY_RESET;
3884 phy_reset = 1;
3885 } else
3886 printf("XXX reset is blocked!!!\n");
3887 wm_get_swfwhw_semaphore(sc);
3888 CSR_WRITE(sc, WMREG_CTRL, reg);
3889 /* Don't insert a completion barrier when reset */
3890 delay(20*1000);
3891 wm_put_swfwhw_semaphore(sc);
3892 break;
3893 case WM_T_82580:
3894 case WM_T_I350:
3895 case WM_T_I354:
3896 case WM_T_I210:
3897 case WM_T_I211:
3898 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3899 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3900 CSR_WRITE_FLUSH(sc);
3901 delay(5000);
3902 break;
3903 case WM_T_82542_2_0:
3904 case WM_T_82542_2_1:
3905 case WM_T_82543:
3906 case WM_T_82540:
3907 case WM_T_82545:
3908 case WM_T_82546:
3909 case WM_T_82571:
3910 case WM_T_82572:
3911 case WM_T_82573:
3912 case WM_T_82574:
3913 case WM_T_82575:
3914 case WM_T_82576:
3915 case WM_T_82583:
3916 default:
3917 /* Everything else can safely use the documented method. */
3918 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3919 break;
3920 }
3921
3922 /* Must release the MDIO ownership after MAC reset */
3923 switch (sc->sc_type) {
3924 case WM_T_82573:
3925 case WM_T_82574:
3926 case WM_T_82583:
3927 if (error == 0)
3928 wm_put_hw_semaphore_82573(sc);
3929 break;
3930 default:
3931 break;
3932 }
3933
3934 if (phy_reset != 0)
3935 wm_get_cfg_done(sc);
3936
3937 /* reload EEPROM */
3938 switch (sc->sc_type) {
3939 case WM_T_82542_2_0:
3940 case WM_T_82542_2_1:
3941 case WM_T_82543:
3942 case WM_T_82544:
3943 delay(10);
3944 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3945 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3946 CSR_WRITE_FLUSH(sc);
3947 delay(2000);
3948 break;
3949 case WM_T_82540:
3950 case WM_T_82545:
3951 case WM_T_82545_3:
3952 case WM_T_82546:
3953 case WM_T_82546_3:
3954 delay(5*1000);
3955 /* XXX Disable HW ARPs on ASF enabled adapters */
3956 break;
3957 case WM_T_82541:
3958 case WM_T_82541_2:
3959 case WM_T_82547:
3960 case WM_T_82547_2:
3961 delay(20000);
3962 /* XXX Disable HW ARPs on ASF enabled adapters */
3963 break;
3964 case WM_T_82571:
3965 case WM_T_82572:
3966 case WM_T_82573:
3967 case WM_T_82574:
3968 case WM_T_82583:
3969 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3970 delay(10);
3971 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3972 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3973 CSR_WRITE_FLUSH(sc);
3974 }
3975 /* check EECD_EE_AUTORD */
3976 wm_get_auto_rd_done(sc);
3977 /*
3978 * Phy configuration from NVM just starts after EECD_AUTO_RD
3979 * is set.
3980 */
3981 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3982 || (sc->sc_type == WM_T_82583))
3983 delay(25*1000);
3984 break;
3985 case WM_T_82575:
3986 case WM_T_82576:
3987 case WM_T_82580:
3988 case WM_T_I350:
3989 case WM_T_I354:
3990 case WM_T_I210:
3991 case WM_T_I211:
3992 case WM_T_80003:
3993 /* check EECD_EE_AUTORD */
3994 wm_get_auto_rd_done(sc);
3995 break;
3996 case WM_T_ICH8:
3997 case WM_T_ICH9:
3998 case WM_T_ICH10:
3999 case WM_T_PCH:
4000 case WM_T_PCH2:
4001 case WM_T_PCH_LPT:
4002 case WM_T_PCH_SPT:
4003 break;
4004 default:
4005 panic("%s: unknown type\n", __func__);
4006 }
4007
4008 /* Check whether EEPROM is present or not */
4009 switch (sc->sc_type) {
4010 case WM_T_82575:
4011 case WM_T_82576:
4012 case WM_T_82580:
4013 case WM_T_I350:
4014 case WM_T_I354:
4015 case WM_T_ICH8:
4016 case WM_T_ICH9:
4017 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4018 /* Not found */
4019 sc->sc_flags |= WM_F_EEPROM_INVALID;
4020 if (sc->sc_type == WM_T_82575)
4021 wm_reset_init_script_82575(sc);
4022 }
4023 break;
4024 default:
4025 break;
4026 }
4027
4028 if ((sc->sc_type == WM_T_82580)
4029 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
4030 /* clear global device reset status bit */
4031 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4032 }
4033
4034 /* Clear any pending interrupt events. */
4035 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4036 reg = CSR_READ(sc, WMREG_ICR);
4037 if (sc->sc_nintrs > 1) {
4038 if (sc->sc_type != WM_T_82574) {
4039 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4040 CSR_WRITE(sc, WMREG_EIAC, 0);
4041 } else
4042 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4043 }
4044
4045 /* reload sc_ctrl */
4046 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4047
4048 if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
4049 wm_set_eee_i350(sc);
4050
4051 /* dummy read from WUC */
4052 if (sc->sc_type == WM_T_PCH)
4053 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4054 /*
4055 * For PCH, this write will make sure that any noise will be detected
4056 * as a CRC error and be dropped rather than show up as a bad packet
4057 * to the DMA engine
4058 */
4059 if (sc->sc_type == WM_T_PCH)
4060 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4061
4062 if (sc->sc_type >= WM_T_82544)
4063 CSR_WRITE(sc, WMREG_WUC, 0);
4064
4065 wm_reset_mdicnfg_82580(sc);
4066
4067 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
4068 wm_pll_workaround_i210(sc);
4069 }
4070
4071 /*
4072 * wm_add_rxbuf:
4073 *
4074 * Add a receive buffer to the indiciated descriptor.
4075 */
4076 static int
4077 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
4078 {
4079 struct wm_softc *sc = rxq->rxq_sc;
4080 struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
4081 struct mbuf *m;
4082 int error;
4083
4084 KASSERT(WM_RX_LOCKED(rxq));
4085
4086 MGETHDR(m, M_DONTWAIT, MT_DATA);
4087 if (m == NULL)
4088 return ENOBUFS;
4089
4090 MCLGET(m, M_DONTWAIT);
4091 if ((m->m_flags & M_EXT) == 0) {
4092 m_freem(m);
4093 return ENOBUFS;
4094 }
4095
4096 if (rxs->rxs_mbuf != NULL)
4097 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4098
4099 rxs->rxs_mbuf = m;
4100
4101 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4102 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4103 BUS_DMA_READ | BUS_DMA_NOWAIT);
4104 if (error) {
4105 /* XXX XXX XXX */
4106 aprint_error_dev(sc->sc_dev,
4107 "unable to load rx DMA map %d, error = %d\n",
4108 idx, error);
4109 panic("wm_add_rxbuf");
4110 }
4111
4112 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4113 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4114
4115 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4116 if ((sc->sc_rctl & RCTL_EN) != 0)
4117 wm_init_rxdesc(rxq, idx);
4118 } else
4119 wm_init_rxdesc(rxq, idx);
4120
4121 return 0;
4122 }
4123
4124 /*
4125 * wm_rxdrain:
4126 *
4127 * Drain the receive queue.
4128 */
4129 static void
4130 wm_rxdrain(struct wm_rxqueue *rxq)
4131 {
4132 struct wm_softc *sc = rxq->rxq_sc;
4133 struct wm_rxsoft *rxs;
4134 int i;
4135
4136 KASSERT(WM_RX_LOCKED(rxq));
4137
4138 for (i = 0; i < WM_NRXDESC; i++) {
4139 rxs = &rxq->rxq_soft[i];
4140 if (rxs->rxs_mbuf != NULL) {
4141 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4142 m_freem(rxs->rxs_mbuf);
4143 rxs->rxs_mbuf = NULL;
4144 }
4145 }
4146 }
4147
4148
4149 /*
4150 * XXX copy from FreeBSD's sys/net/rss_config.c
4151 */
4152 /*
4153 * RSS secret key, intended to prevent attacks on load-balancing. Its
4154 * effectiveness may be limited by algorithm choice and available entropy
4155 * during the boot.
4156 *
4157 * XXXRW: And that we don't randomize it yet!
4158 *
4159 * This is the default Microsoft RSS specification key which is also
4160 * the Chelsio T5 firmware default key.
4161 */
4162 #define RSS_KEYSIZE 40
4163 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
4164 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
4165 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
4166 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
4167 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
4168 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
4169 };
4170
4171 /*
4172 * Caller must pass an array of size sizeof(rss_key).
4173 *
4174 * XXX
4175 * As if_ixgbe may use this function, this function should not be
4176 * if_wm specific function.
4177 */
4178 static void
4179 wm_rss_getkey(uint8_t *key)
4180 {
4181
4182 memcpy(key, wm_rss_key, sizeof(wm_rss_key));
4183 }
4184
4185 /*
4186 * Setup registers for RSS.
4187 *
4188 * XXX not yet VMDq support
4189 */
4190 static void
4191 wm_init_rss(struct wm_softc *sc)
4192 {
4193 uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
4194 int i;
4195
4196 CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
4197
4198 for (i = 0; i < RETA_NUM_ENTRIES; i++) {
4199 int qid, reta_ent;
4200
4201 qid = i % sc->sc_nqueues;
4202 switch(sc->sc_type) {
4203 case WM_T_82574:
4204 reta_ent = __SHIFTIN(qid,
4205 RETA_ENT_QINDEX_MASK_82574);
4206 break;
4207 case WM_T_82575:
4208 reta_ent = __SHIFTIN(qid,
4209 RETA_ENT_QINDEX1_MASK_82575);
4210 break;
4211 default:
4212 reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
4213 break;
4214 }
4215
4216 reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
4217 reta_reg &= ~RETA_ENTRY_MASK_Q(i);
4218 reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
4219 CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
4220 }
4221
4222 wm_rss_getkey((uint8_t *)rss_key);
4223 for (i = 0; i < RSSRK_NUM_REGS; i++)
4224 CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
4225
4226 if (sc->sc_type == WM_T_82574)
4227 mrqc = MRQC_ENABLE_RSS_MQ_82574;
4228 else
4229 mrqc = MRQC_ENABLE_RSS_MQ;
4230
4231 /* XXXX
4232 * The same as FreeBSD igb.
4233 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
4234 */
4235 mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
4236 mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
4237 mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
4238 mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
4239
4240 CSR_WRITE(sc, WMREG_MRQC, mrqc);
4241 }
4242
4243 /*
4244 * Adjust TX and RX queue numbers which the system actulally uses.
4245 *
4246 * The numbers are affected by below parameters.
4247 * - The nubmer of hardware queues
4248 * - The number of MSI-X vectors (= "nvectors" argument)
4249 * - ncpu
4250 */
4251 static void
4252 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
4253 {
4254 int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
4255
4256 if (nvectors < 2) {
4257 sc->sc_nqueues = 1;
4258 return;
4259 }
4260
4261 switch(sc->sc_type) {
4262 case WM_T_82572:
4263 hw_ntxqueues = 2;
4264 hw_nrxqueues = 2;
4265 break;
4266 case WM_T_82574:
4267 hw_ntxqueues = 2;
4268 hw_nrxqueues = 2;
4269 break;
4270 case WM_T_82575:
4271 hw_ntxqueues = 4;
4272 hw_nrxqueues = 4;
4273 break;
4274 case WM_T_82576:
4275 hw_ntxqueues = 16;
4276 hw_nrxqueues = 16;
4277 break;
4278 case WM_T_82580:
4279 case WM_T_I350:
4280 case WM_T_I354:
4281 hw_ntxqueues = 8;
4282 hw_nrxqueues = 8;
4283 break;
4284 case WM_T_I210:
4285 hw_ntxqueues = 4;
4286 hw_nrxqueues = 4;
4287 break;
4288 case WM_T_I211:
4289 hw_ntxqueues = 2;
4290 hw_nrxqueues = 2;
4291 break;
4292 /*
4293 * As below ethernet controllers does not support MSI-X,
4294 * this driver let them not use multiqueue.
4295 * - WM_T_80003
4296 * - WM_T_ICH8
4297 * - WM_T_ICH9
4298 * - WM_T_ICH10
4299 * - WM_T_PCH
4300 * - WM_T_PCH2
4301 * - WM_T_PCH_LPT
4302 */
4303 default:
4304 hw_ntxqueues = 1;
4305 hw_nrxqueues = 1;
4306 break;
4307 }
4308
4309 hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
4310
4311 /*
4312 * As queues more than MSI-X vectors cannot improve scaling, we limit
4313 * the number of queues used actually.
4314 */
4315 if (nvectors < hw_nqueues + 1) {
4316 sc->sc_nqueues = nvectors - 1;
4317 } else {
4318 sc->sc_nqueues = hw_nqueues;
4319 }
4320
4321 /*
4322 * As queues more then cpus cannot improve scaling, we limit
4323 * the number of queues used actually.
4324 */
4325 if (ncpu < sc->sc_nqueues)
4326 sc->sc_nqueues = ncpu;
4327 }
4328
4329 /*
4330 * Both single interrupt MSI and INTx can use this function.
4331 */
4332 static int
4333 wm_setup_legacy(struct wm_softc *sc)
4334 {
4335 pci_chipset_tag_t pc = sc->sc_pc;
4336 const char *intrstr = NULL;
4337 char intrbuf[PCI_INTRSTR_LEN];
4338 int error;
4339
4340 error = wm_alloc_txrx_queues(sc);
4341 if (error) {
4342 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4343 error);
4344 return ENOMEM;
4345 }
4346 intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
4347 sizeof(intrbuf));
4348 #ifdef WM_MPSAFE
4349 pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
4350 #endif
4351 sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
4352 IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
4353 if (sc->sc_ihs[0] == NULL) {
4354 aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
4355 (pci_intr_type(sc->sc_intrs[0])
4356 == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
4357 return ENOMEM;
4358 }
4359
4360 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
4361 sc->sc_nintrs = 1;
4362 return 0;
4363 }
4364
4365 static int
4366 wm_setup_msix(struct wm_softc *sc)
4367 {
4368 void *vih;
4369 kcpuset_t *affinity;
4370 int qidx, error, intr_idx, txrx_established;
4371 pci_chipset_tag_t pc = sc->sc_pc;
4372 const char *intrstr = NULL;
4373 char intrbuf[PCI_INTRSTR_LEN];
4374 char intr_xname[INTRDEVNAMEBUF];
4375
4376 if (sc->sc_nqueues < ncpu) {
4377 /*
4378 * To avoid other devices' interrupts, the affinity of Tx/Rx
4379 * interrupts start from CPU#1.
4380 */
4381 sc->sc_affinity_offset = 1;
4382 } else {
4383 /*
4384 * In this case, this device use all CPUs. So, we unify
4385 * affinitied cpu_index to msix vector number for readability.
4386 */
4387 sc->sc_affinity_offset = 0;
4388 }
4389
4390 error = wm_alloc_txrx_queues(sc);
4391 if (error) {
4392 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4393 error);
4394 return ENOMEM;
4395 }
4396
4397 kcpuset_create(&affinity, false);
4398 intr_idx = 0;
4399
4400 /*
4401 * TX and RX
4402 */
4403 txrx_established = 0;
4404 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
4405 struct wm_queue *wmq = &sc->sc_queue[qidx];
4406 int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
4407
4408 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4409 sizeof(intrbuf));
4410 #ifdef WM_MPSAFE
4411 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
4412 PCI_INTR_MPSAFE, true);
4413 #endif
4414 memset(intr_xname, 0, sizeof(intr_xname));
4415 snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
4416 device_xname(sc->sc_dev), qidx);
4417 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4418 IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
4419 if (vih == NULL) {
4420 aprint_error_dev(sc->sc_dev,
4421 "unable to establish MSI-X(for TX and RX)%s%s\n",
4422 intrstr ? " at " : "",
4423 intrstr ? intrstr : "");
4424
4425 goto fail;
4426 }
4427 kcpuset_zero(affinity);
4428 /* Round-robin affinity */
4429 kcpuset_set(affinity, affinity_to);
4430 error = interrupt_distribute(vih, affinity, NULL);
4431 if (error == 0) {
4432 aprint_normal_dev(sc->sc_dev,
4433 "for TX and RX interrupting at %s affinity to %u\n",
4434 intrstr, affinity_to);
4435 } else {
4436 aprint_normal_dev(sc->sc_dev,
4437 "for TX and RX interrupting at %s\n", intrstr);
4438 }
4439 sc->sc_ihs[intr_idx] = vih;
4440 wmq->wmq_id= qidx;
4441 wmq->wmq_intr_idx = intr_idx;
4442
4443 txrx_established++;
4444 intr_idx++;
4445 }
4446
4447 /*
4448 * LINK
4449 */
4450 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4451 sizeof(intrbuf));
4452 #ifdef WM_MPSAFE
4453 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
4454 #endif
4455 memset(intr_xname, 0, sizeof(intr_xname));
4456 snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
4457 device_xname(sc->sc_dev));
4458 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4459 IPL_NET, wm_linkintr_msix, sc, intr_xname);
4460 if (vih == NULL) {
4461 aprint_error_dev(sc->sc_dev,
4462 "unable to establish MSI-X(for LINK)%s%s\n",
4463 intrstr ? " at " : "",
4464 intrstr ? intrstr : "");
4465
4466 goto fail;
4467 }
4468 /* keep default affinity to LINK interrupt */
4469 aprint_normal_dev(sc->sc_dev,
4470 "for LINK interrupting at %s\n", intrstr);
4471 sc->sc_ihs[intr_idx] = vih;
4472 sc->sc_link_intr_idx = intr_idx;
4473
4474 sc->sc_nintrs = sc->sc_nqueues + 1;
4475 kcpuset_destroy(affinity);
4476 return 0;
4477
4478 fail:
4479 for (qidx = 0; qidx < txrx_established; qidx++) {
4480 struct wm_queue *wmq = &sc->sc_queue[qidx];
4481 pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
4482 sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
4483 }
4484
4485 kcpuset_destroy(affinity);
4486 return ENOMEM;
4487 }
4488
4489 /*
4490 * wm_init: [ifnet interface function]
4491 *
4492 * Initialize the interface.
4493 */
4494 static int
4495 wm_init(struct ifnet *ifp)
4496 {
4497 struct wm_softc *sc = ifp->if_softc;
4498 int ret;
4499
4500 WM_CORE_LOCK(sc);
4501 ret = wm_init_locked(ifp);
4502 WM_CORE_UNLOCK(sc);
4503
4504 return ret;
4505 }
4506
4507 static int
4508 wm_init_locked(struct ifnet *ifp)
4509 {
4510 struct wm_softc *sc = ifp->if_softc;
4511 int i, j, trynum, error = 0;
4512 uint32_t reg;
4513
4514 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4515 device_xname(sc->sc_dev), __func__));
4516 KASSERT(WM_CORE_LOCKED(sc));
4517 /*
4518 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4519 * There is a small but measurable benefit to avoiding the adjusment
4520 * of the descriptor so that the headers are aligned, for normal mtu,
4521 * on such platforms. One possibility is that the DMA itself is
4522 * slightly more efficient if the front of the entire packet (instead
4523 * of the front of the headers) is aligned.
4524 *
4525 * Note we must always set align_tweak to 0 if we are using
4526 * jumbo frames.
4527 */
4528 #ifdef __NO_STRICT_ALIGNMENT
4529 sc->sc_align_tweak = 0;
4530 #else
4531 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4532 sc->sc_align_tweak = 0;
4533 else
4534 sc->sc_align_tweak = 2;
4535 #endif /* __NO_STRICT_ALIGNMENT */
4536
4537 /* Cancel any pending I/O. */
4538 wm_stop_locked(ifp, 0);
4539
4540 /* update statistics before reset */
4541 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4542 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4543
4544 /* Reset the chip to a known state. */
4545 wm_reset(sc);
4546
4547 switch (sc->sc_type) {
4548 case WM_T_82571:
4549 case WM_T_82572:
4550 case WM_T_82573:
4551 case WM_T_82574:
4552 case WM_T_82583:
4553 case WM_T_80003:
4554 case WM_T_ICH8:
4555 case WM_T_ICH9:
4556 case WM_T_ICH10:
4557 case WM_T_PCH:
4558 case WM_T_PCH2:
4559 case WM_T_PCH_LPT:
4560 case WM_T_PCH_SPT:
4561 /* AMT based hardware can now take control from firmware */
4562 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
4563 wm_get_hw_control(sc);
4564 break;
4565 default:
4566 break;
4567 }
4568
4569 /* Init hardware bits */
4570 wm_initialize_hardware_bits(sc);
4571
4572 /* Reset the PHY. */
4573 if (sc->sc_flags & WM_F_HAS_MII)
4574 wm_gmii_reset(sc);
4575
4576 /* Calculate (E)ITR value */
4577 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4578 sc->sc_itr = 450; /* For EITR */
4579 } else if (sc->sc_type >= WM_T_82543) {
4580 /*
4581 * Set up the interrupt throttling register (units of 256ns)
4582 * Note that a footnote in Intel's documentation says this
4583 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4584 * or 10Mbit mode. Empirically, it appears to be the case
4585 * that that is also true for the 1024ns units of the other
4586 * interrupt-related timer registers -- so, really, we ought
4587 * to divide this value by 4 when the link speed is low.
4588 *
4589 * XXX implement this division at link speed change!
4590 */
4591
4592 /*
4593 * For N interrupts/sec, set this value to:
4594 * 1000000000 / (N * 256). Note that we set the
4595 * absolute and packet timer values to this value
4596 * divided by 4 to get "simple timer" behavior.
4597 */
4598
4599 sc->sc_itr = 1500; /* 2604 ints/sec */
4600 }
4601
4602 error = wm_init_txrx_queues(sc);
4603 if (error)
4604 goto out;
4605
4606 /*
4607 * Clear out the VLAN table -- we don't use it (yet).
4608 */
4609 CSR_WRITE(sc, WMREG_VET, 0);
4610 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4611 trynum = 10; /* Due to hw errata */
4612 else
4613 trynum = 1;
4614 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4615 for (j = 0; j < trynum; j++)
4616 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4617
4618 /*
4619 * Set up flow-control parameters.
4620 *
4621 * XXX Values could probably stand some tuning.
4622 */
4623 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4624 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4625 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
4626 && (sc->sc_type != WM_T_PCH_SPT)) {
4627 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4628 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4629 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4630 }
4631
4632 sc->sc_fcrtl = FCRTL_DFLT;
4633 if (sc->sc_type < WM_T_82543) {
4634 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4635 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4636 } else {
4637 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4638 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4639 }
4640
4641 if (sc->sc_type == WM_T_80003)
4642 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4643 else
4644 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4645
4646 /* Writes the control register. */
4647 wm_set_vlan(sc);
4648
4649 if (sc->sc_flags & WM_F_HAS_MII) {
4650 int val;
4651
4652 switch (sc->sc_type) {
4653 case WM_T_80003:
4654 case WM_T_ICH8:
4655 case WM_T_ICH9:
4656 case WM_T_ICH10:
4657 case WM_T_PCH:
4658 case WM_T_PCH2:
4659 case WM_T_PCH_LPT:
4660 case WM_T_PCH_SPT:
4661 /*
4662 * Set the mac to wait the maximum time between each
4663 * iteration and increase the max iterations when
4664 * polling the phy; this fixes erroneous timeouts at
4665 * 10Mbps.
4666 */
4667 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4668 0xFFFF);
4669 val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
4670 val |= 0x3F;
4671 wm_kmrn_writereg(sc,
4672 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4673 break;
4674 default:
4675 break;
4676 }
4677
4678 if (sc->sc_type == WM_T_80003) {
4679 val = CSR_READ(sc, WMREG_CTRL_EXT);
4680 val &= ~CTRL_EXT_LINK_MODE_MASK;
4681 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4682
4683 /* Bypass RX and TX FIFO's */
4684 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4685 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4686 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4687 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4688 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4689 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4690 }
4691 }
4692 #if 0
4693 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4694 #endif
4695
4696 /* Set up checksum offload parameters. */
4697 reg = CSR_READ(sc, WMREG_RXCSUM);
4698 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4699 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4700 reg |= RXCSUM_IPOFL;
4701 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4702 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4703 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4704 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4705 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4706
4707 /* Set up MSI-X */
4708 if (sc->sc_nintrs > 1) {
4709 uint32_t ivar;
4710 struct wm_queue *wmq;
4711 int qid, qintr_idx;
4712
4713 if (sc->sc_type == WM_T_82575) {
4714 /* Interrupt control */
4715 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4716 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
4717 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4718
4719 /* TX and RX */
4720 for (i = 0; i < sc->sc_nqueues; i++) {
4721 wmq = &sc->sc_queue[i];
4722 CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
4723 EITR_TX_QUEUE(wmq->wmq_id)
4724 | EITR_RX_QUEUE(wmq->wmq_id));
4725 }
4726 /* Link status */
4727 CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
4728 EITR_OTHER);
4729 } else if (sc->sc_type == WM_T_82574) {
4730 /* Interrupt control */
4731 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4732 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
4733 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4734
4735 ivar = 0;
4736 /* TX and RX */
4737 for (i = 0; i < sc->sc_nqueues; i++) {
4738 wmq = &sc->sc_queue[i];
4739 qid = wmq->wmq_id;
4740 qintr_idx = wmq->wmq_intr_idx;
4741
4742 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
4743 IVAR_TX_MASK_Q_82574(qid));
4744 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
4745 IVAR_RX_MASK_Q_82574(qid));
4746 }
4747 /* Link status */
4748 ivar |= __SHIFTIN((IVAR_VALID_82574
4749 | sc->sc_link_intr_idx), IVAR_OTHER_MASK);
4750 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
4751 } else {
4752 /* Interrupt control */
4753 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
4754 | GPIE_EIAME | GPIE_PBA);
4755
4756 switch (sc->sc_type) {
4757 case WM_T_82580:
4758 case WM_T_I350:
4759 case WM_T_I354:
4760 case WM_T_I210:
4761 case WM_T_I211:
4762 /* TX and RX */
4763 for (i = 0; i < sc->sc_nqueues; i++) {
4764 wmq = &sc->sc_queue[i];
4765 qid = wmq->wmq_id;
4766 qintr_idx = wmq->wmq_intr_idx;
4767
4768 ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
4769 ivar &= ~IVAR_TX_MASK_Q(qid);
4770 ivar |= __SHIFTIN((qintr_idx
4771 | IVAR_VALID),
4772 IVAR_TX_MASK_Q(qid));
4773 ivar &= ~IVAR_RX_MASK_Q(qid);
4774 ivar |= __SHIFTIN((qintr_idx
4775 | IVAR_VALID),
4776 IVAR_RX_MASK_Q(qid));
4777 CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
4778 }
4779 break;
4780 case WM_T_82576:
4781 /* TX and RX */
4782 for (i = 0; i < sc->sc_nqueues; i++) {
4783 wmq = &sc->sc_queue[i];
4784 qid = wmq->wmq_id;
4785 qintr_idx = wmq->wmq_intr_idx;
4786
4787 ivar = CSR_READ(sc,
4788 WMREG_IVAR_Q_82576(qid));
4789 ivar &= ~IVAR_TX_MASK_Q_82576(qid);
4790 ivar |= __SHIFTIN((qintr_idx
4791 | IVAR_VALID),
4792 IVAR_TX_MASK_Q_82576(qid));
4793 ivar &= ~IVAR_RX_MASK_Q_82576(qid);
4794 ivar |= __SHIFTIN((qintr_idx
4795 | IVAR_VALID),
4796 IVAR_RX_MASK_Q_82576(qid));
4797 CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
4798 ivar);
4799 }
4800 break;
4801 default:
4802 break;
4803 }
4804
4805 /* Link status */
4806 ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
4807 IVAR_MISC_OTHER);
4808 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
4809 }
4810
4811 if (sc->sc_nqueues > 1) {
4812 wm_init_rss(sc);
4813
4814 /*
4815 ** NOTE: Receive Full-Packet Checksum Offload
4816 ** is mutually exclusive with Multiqueue. However
4817 ** this is not the same as TCP/IP checksums which
4818 ** still work.
4819 */
4820 reg = CSR_READ(sc, WMREG_RXCSUM);
4821 reg |= RXCSUM_PCSD;
4822 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4823 }
4824 }
4825
4826 /* Set up the interrupt registers. */
4827 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4828 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4829 ICR_RXO | ICR_RXT0;
4830 if (sc->sc_nintrs > 1) {
4831 uint32_t mask;
4832 struct wm_queue *wmq;
4833
4834 switch (sc->sc_type) {
4835 case WM_T_82574:
4836 CSR_WRITE(sc, WMREG_EIAC_82574,
4837 WMREG_EIAC_82574_MSIX_MASK);
4838 sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
4839 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4840 break;
4841 default:
4842 if (sc->sc_type == WM_T_82575) {
4843 mask = 0;
4844 for (i = 0; i < sc->sc_nqueues; i++) {
4845 wmq = &sc->sc_queue[i];
4846 mask |= EITR_TX_QUEUE(wmq->wmq_id);
4847 mask |= EITR_RX_QUEUE(wmq->wmq_id);
4848 }
4849 mask |= EITR_OTHER;
4850 } else {
4851 mask = 0;
4852 for (i = 0; i < sc->sc_nqueues; i++) {
4853 wmq = &sc->sc_queue[i];
4854 mask |= 1 << wmq->wmq_intr_idx;
4855 }
4856 mask |= 1 << sc->sc_link_intr_idx;
4857 }
4858 CSR_WRITE(sc, WMREG_EIAC, mask);
4859 CSR_WRITE(sc, WMREG_EIAM, mask);
4860 CSR_WRITE(sc, WMREG_EIMS, mask);
4861 CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
4862 break;
4863 }
4864 } else
4865 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4866
4867 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4868 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4869 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4870 || (sc->sc_type == WM_T_PCH_SPT)) {
4871 reg = CSR_READ(sc, WMREG_KABGTXD);
4872 reg |= KABGTXD_BGSQLBIAS;
4873 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4874 }
4875
4876 /* Set up the inter-packet gap. */
4877 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4878
4879 if (sc->sc_type >= WM_T_82543) {
4880 /*
4881 * XXX 82574 has both ITR and EITR. SET EITR when we use
4882 * the multi queue function with MSI-X.
4883 */
4884 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4885 int qidx;
4886 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
4887 struct wm_queue *wmq = &sc->sc_queue[qidx];
4888 CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
4889 sc->sc_itr);
4890 }
4891 /*
4892 * Link interrupts occur much less than TX
4893 * interrupts and RX interrupts. So, we don't
4894 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
4895 * FreeBSD's if_igb.
4896 */
4897 } else
4898 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4899 }
4900
4901 /* Set the VLAN ethernetype. */
4902 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4903
4904 /*
4905 * Set up the transmit control register; we start out with
4906 * a collision distance suitable for FDX, but update it whe
4907 * we resolve the media type.
4908 */
4909 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4910 | TCTL_CT(TX_COLLISION_THRESHOLD)
4911 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4912 if (sc->sc_type >= WM_T_82571)
4913 sc->sc_tctl |= TCTL_MULR;
4914 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4915
4916 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4917 /* Write TDT after TCTL.EN is set. See the document. */
4918 CSR_WRITE(sc, WMREG_TDT(0), 0);
4919 }
4920
4921 if (sc->sc_type == WM_T_80003) {
4922 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4923 reg &= ~TCTL_EXT_GCEX_MASK;
4924 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4925 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4926 }
4927
4928 /* Set the media. */
4929 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4930 goto out;
4931
4932 /* Configure for OS presence */
4933 wm_init_manageability(sc);
4934
4935 /*
4936 * Set up the receive control register; we actually program
4937 * the register when we set the receive filter. Use multicast
4938 * address offset type 0.
4939 *
4940 * Only the i82544 has the ability to strip the incoming
4941 * CRC, so we don't enable that feature.
4942 */
4943 sc->sc_mchash_type = 0;
4944 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4945 | RCTL_MO(sc->sc_mchash_type);
4946
4947 /*
4948 * The I350 has a bug where it always strips the CRC whether
4949 * asked to or not. So ask for stripped CRC here and cope in rxeof
4950 */
4951 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4952 || (sc->sc_type == WM_T_I210))
4953 sc->sc_rctl |= RCTL_SECRC;
4954
4955 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4956 && (ifp->if_mtu > ETHERMTU)) {
4957 sc->sc_rctl |= RCTL_LPE;
4958 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4959 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4960 }
4961
4962 if (MCLBYTES == 2048) {
4963 sc->sc_rctl |= RCTL_2k;
4964 } else {
4965 if (sc->sc_type >= WM_T_82543) {
4966 switch (MCLBYTES) {
4967 case 4096:
4968 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4969 break;
4970 case 8192:
4971 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4972 break;
4973 case 16384:
4974 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4975 break;
4976 default:
4977 panic("wm_init: MCLBYTES %d unsupported",
4978 MCLBYTES);
4979 break;
4980 }
4981 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4982 }
4983
4984 /* Set the receive filter. */
4985 wm_set_filter(sc);
4986
4987 /* Enable ECC */
4988 switch (sc->sc_type) {
4989 case WM_T_82571:
4990 reg = CSR_READ(sc, WMREG_PBA_ECC);
4991 reg |= PBA_ECC_CORR_EN;
4992 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4993 break;
4994 case WM_T_PCH_LPT:
4995 case WM_T_PCH_SPT:
4996 reg = CSR_READ(sc, WMREG_PBECCSTS);
4997 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4998 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4999
5000 reg = CSR_READ(sc, WMREG_CTRL);
5001 reg |= CTRL_MEHE;
5002 CSR_WRITE(sc, WMREG_CTRL, reg);
5003 break;
5004 default:
5005 break;
5006 }
5007
5008 /* On 575 and later set RDT only if RX enabled */
5009 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5010 int qidx;
5011 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5012 struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
5013 for (i = 0; i < WM_NRXDESC; i++) {
5014 WM_RX_LOCK(rxq);
5015 wm_init_rxdesc(rxq, i);
5016 WM_RX_UNLOCK(rxq);
5017
5018 }
5019 }
5020 }
5021
5022 sc->sc_stopping = false;
5023
5024 /* Start the one second link check clock. */
5025 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
5026
5027 /* ...all done! */
5028 ifp->if_flags |= IFF_RUNNING;
5029 ifp->if_flags &= ~IFF_OACTIVE;
5030
5031 out:
5032 sc->sc_if_flags = ifp->if_flags;
5033 if (error)
5034 log(LOG_ERR, "%s: interface not running\n",
5035 device_xname(sc->sc_dev));
5036 return error;
5037 }
5038
5039 /*
5040 * wm_stop: [ifnet interface function]
5041 *
5042 * Stop transmission on the interface.
5043 */
5044 static void
5045 wm_stop(struct ifnet *ifp, int disable)
5046 {
5047 struct wm_softc *sc = ifp->if_softc;
5048
5049 WM_CORE_LOCK(sc);
5050 wm_stop_locked(ifp, disable);
5051 WM_CORE_UNLOCK(sc);
5052 }
5053
5054 static void
5055 wm_stop_locked(struct ifnet *ifp, int disable)
5056 {
5057 struct wm_softc *sc = ifp->if_softc;
5058 struct wm_txsoft *txs;
5059 int i, qidx;
5060
5061 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5062 device_xname(sc->sc_dev), __func__));
5063 KASSERT(WM_CORE_LOCKED(sc));
5064
5065 sc->sc_stopping = true;
5066
5067 /* Stop the one second clock. */
5068 callout_stop(&sc->sc_tick_ch);
5069
5070 /* Stop the 82547 Tx FIFO stall check timer. */
5071 if (sc->sc_type == WM_T_82547)
5072 callout_stop(&sc->sc_txfifo_ch);
5073
5074 if (sc->sc_flags & WM_F_HAS_MII) {
5075 /* Down the MII. */
5076 mii_down(&sc->sc_mii);
5077 } else {
5078 #if 0
5079 /* Should we clear PHY's status properly? */
5080 wm_reset(sc);
5081 #endif
5082 }
5083
5084 /* Stop the transmit and receive processes. */
5085 CSR_WRITE(sc, WMREG_TCTL, 0);
5086 CSR_WRITE(sc, WMREG_RCTL, 0);
5087 sc->sc_rctl &= ~RCTL_EN;
5088
5089 /*
5090 * Clear the interrupt mask to ensure the device cannot assert its
5091 * interrupt line.
5092 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
5093 * service any currently pending or shared interrupt.
5094 */
5095 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5096 sc->sc_icr = 0;
5097 if (sc->sc_nintrs > 1) {
5098 if (sc->sc_type != WM_T_82574) {
5099 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5100 CSR_WRITE(sc, WMREG_EIAC, 0);
5101 } else
5102 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5103 }
5104
5105 /* Release any queued transmit buffers. */
5106 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5107 struct wm_queue *wmq = &sc->sc_queue[qidx];
5108 struct wm_txqueue *txq = &wmq->wmq_txq;
5109 WM_TX_LOCK(txq);
5110 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5111 txs = &txq->txq_soft[i];
5112 if (txs->txs_mbuf != NULL) {
5113 bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
5114 m_freem(txs->txs_mbuf);
5115 txs->txs_mbuf = NULL;
5116 }
5117 }
5118 if (sc->sc_type == WM_T_PCH_SPT) {
5119 pcireg_t preg;
5120 uint32_t reg;
5121 int nexttx;
5122
5123 /* First, disable MULR fix in FEXTNVM11 */
5124 reg = CSR_READ(sc, WMREG_FEXTNVM11);
5125 reg |= FEXTNVM11_DIS_MULRFIX;
5126 CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
5127
5128 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
5129 WM_PCI_DESCRING_STATUS);
5130 reg = CSR_READ(sc, WMREG_TDLEN(0));
5131 printf("XXX RST: FLUSH = %08x, len = %u\n",
5132 (uint32_t)(preg & DESCRING_STATUS_FLUSH_REQ), reg);
5133 if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0)
5134 && (reg != 0)) {
5135 /* TX */
5136 printf("XXX need TX flush (reg = %08x)\n",
5137 preg);
5138 wm_init_tx_descs(sc, txq);
5139 wm_init_tx_regs(sc, wmq, txq);
5140 nexttx = txq->txq_next;
5141 wm_set_dma_addr(
5142 &txq->txq_descs[nexttx].wtx_addr,
5143 WM_CDTXADDR(txq, nexttx));
5144 txq->txq_descs[nexttx].wtx_cmdlen
5145 = htole32(WTX_CMD_IFCS | 512);
5146 wm_cdtxsync(txq, nexttx, 1,
5147 BUS_DMASYNC_PREREAD |BUS_DMASYNC_PREWRITE);
5148 CSR_WRITE(sc, WMREG_TCTL, TCTL_EN);
5149 CSR_WRITE(sc, WMREG_TDT(0), nexttx);
5150 CSR_WRITE_FLUSH(sc);
5151 delay(250);
5152 CSR_WRITE(sc, WMREG_TCTL, 0);
5153 }
5154 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
5155 WM_PCI_DESCRING_STATUS);
5156 if (preg & DESCRING_STATUS_FLUSH_REQ) {
5157 /* RX */
5158 printf("XXX need RX flush\n");
5159 }
5160 }
5161 WM_TX_UNLOCK(txq);
5162 }
5163
5164 /* Mark the interface as down and cancel the watchdog timer. */
5165 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
5166 ifp->if_timer = 0;
5167
5168 if (disable) {
5169 for (i = 0; i < sc->sc_nqueues; i++) {
5170 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5171 WM_RX_LOCK(rxq);
5172 wm_rxdrain(rxq);
5173 WM_RX_UNLOCK(rxq);
5174 }
5175 }
5176
5177 #if 0 /* notyet */
5178 if (sc->sc_type >= WM_T_82544)
5179 CSR_WRITE(sc, WMREG_WUC, 0);
5180 #endif
5181 }
5182
5183 static void
5184 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
5185 {
5186 struct mbuf *m;
5187 int i;
5188
5189 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
5190 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
5191 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
5192 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
5193 m->m_data, m->m_len, m->m_flags);
5194 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
5195 i, i == 1 ? "" : "s");
5196 }
5197
5198 /*
5199 * wm_82547_txfifo_stall:
5200 *
5201 * Callout used to wait for the 82547 Tx FIFO to drain,
5202 * reset the FIFO pointers, and restart packet transmission.
5203 */
5204 static void
5205 wm_82547_txfifo_stall(void *arg)
5206 {
5207 struct wm_softc *sc = arg;
5208 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
5209 #ifndef WM_MPSAFE
5210 int s;
5211
5212 s = splnet();
5213 #endif
5214 WM_TX_LOCK(txq);
5215
5216 if (sc->sc_stopping)
5217 goto out;
5218
5219 if (txq->txq_fifo_stall) {
5220 if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
5221 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
5222 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
5223 /*
5224 * Packets have drained. Stop transmitter, reset
5225 * FIFO pointers, restart transmitter, and kick
5226 * the packet queue.
5227 */
5228 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
5229 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
5230 CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
5231 CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
5232 CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
5233 CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
5234 CSR_WRITE(sc, WMREG_TCTL, tctl);
5235 CSR_WRITE_FLUSH(sc);
5236
5237 txq->txq_fifo_head = 0;
5238 txq->txq_fifo_stall = 0;
5239 wm_start_locked(&sc->sc_ethercom.ec_if);
5240 } else {
5241 /*
5242 * Still waiting for packets to drain; try again in
5243 * another tick.
5244 */
5245 callout_schedule(&sc->sc_txfifo_ch, 1);
5246 }
5247 }
5248
5249 out:
5250 WM_TX_UNLOCK(txq);
5251 #ifndef WM_MPSAFE
5252 splx(s);
5253 #endif
5254 }
5255
5256 /*
5257 * wm_82547_txfifo_bugchk:
5258 *
5259 * Check for bug condition in the 82547 Tx FIFO. We need to
5260 * prevent enqueueing a packet that would wrap around the end
5261 * if the Tx FIFO ring buffer, otherwise the chip will croak.
5262 *
5263 * We do this by checking the amount of space before the end
5264 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
5265 * the Tx FIFO, wait for all remaining packets to drain, reset
5266 * the internal FIFO pointers to the beginning, and restart
5267 * transmission on the interface.
5268 */
5269 #define WM_FIFO_HDR 0x10
5270 #define WM_82547_PAD_LEN 0x3e0
5271 static int
5272 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
5273 {
5274 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
5275 int space = txq->txq_fifo_size - txq->txq_fifo_head;
5276 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
5277
5278 /* Just return if already stalled. */
5279 if (txq->txq_fifo_stall)
5280 return 1;
5281
5282 if (sc->sc_mii.mii_media_active & IFM_FDX) {
5283 /* Stall only occurs in half-duplex mode. */
5284 goto send_packet;
5285 }
5286
5287 if (len >= WM_82547_PAD_LEN + space) {
5288 txq->txq_fifo_stall = 1;
5289 callout_schedule(&sc->sc_txfifo_ch, 1);
5290 return 1;
5291 }
5292
5293 send_packet:
5294 txq->txq_fifo_head += len;
5295 if (txq->txq_fifo_head >= txq->txq_fifo_size)
5296 txq->txq_fifo_head -= txq->txq_fifo_size;
5297
5298 return 0;
5299 }
5300
5301 static int
5302 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5303 {
5304 int error;
5305
5306 /*
5307 * Allocate the control data structures, and create and load the
5308 * DMA map for it.
5309 *
5310 * NOTE: All Tx descriptors must be in the same 4G segment of
5311 * memory. So must Rx descriptors. We simplify by allocating
5312 * both sets within the same 4G segment.
5313 */
5314 if (sc->sc_type < WM_T_82544)
5315 WM_NTXDESC(txq) = WM_NTXDESC_82542;
5316 else
5317 WM_NTXDESC(txq) = WM_NTXDESC_82544;
5318 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5319 txq->txq_descsize = sizeof(nq_txdesc_t);
5320 else
5321 txq->txq_descsize = sizeof(wiseman_txdesc_t);
5322
5323 if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
5324 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
5325 1, &txq->txq_desc_rseg, 0)) != 0) {
5326 aprint_error_dev(sc->sc_dev,
5327 "unable to allocate TX control data, error = %d\n",
5328 error);
5329 goto fail_0;
5330 }
5331
5332 if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
5333 txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
5334 (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
5335 aprint_error_dev(sc->sc_dev,
5336 "unable to map TX control data, error = %d\n", error);
5337 goto fail_1;
5338 }
5339
5340 if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
5341 WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
5342 aprint_error_dev(sc->sc_dev,
5343 "unable to create TX control data DMA map, error = %d\n",
5344 error);
5345 goto fail_2;
5346 }
5347
5348 if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
5349 txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
5350 aprint_error_dev(sc->sc_dev,
5351 "unable to load TX control data DMA map, error = %d\n",
5352 error);
5353 goto fail_3;
5354 }
5355
5356 return 0;
5357
5358 fail_3:
5359 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5360 fail_2:
5361 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5362 WM_TXDESCS_SIZE(txq));
5363 fail_1:
5364 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5365 fail_0:
5366 return error;
5367 }
5368
5369 static void
5370 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5371 {
5372
5373 bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
5374 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5375 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5376 WM_TXDESCS_SIZE(txq));
5377 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5378 }
5379
5380 static int
5381 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5382 {
5383 int error;
5384
5385 /*
5386 * Allocate the control data structures, and create and load the
5387 * DMA map for it.
5388 *
5389 * NOTE: All Tx descriptors must be in the same 4G segment of
5390 * memory. So must Rx descriptors. We simplify by allocating
5391 * both sets within the same 4G segment.
5392 */
5393 rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
5394 if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
5395 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
5396 1, &rxq->rxq_desc_rseg, 0)) != 0) {
5397 aprint_error_dev(sc->sc_dev,
5398 "unable to allocate RX control data, error = %d\n",
5399 error);
5400 goto fail_0;
5401 }
5402
5403 if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
5404 rxq->rxq_desc_rseg, rxq->rxq_desc_size,
5405 (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
5406 aprint_error_dev(sc->sc_dev,
5407 "unable to map RX control data, error = %d\n", error);
5408 goto fail_1;
5409 }
5410
5411 if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
5412 rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
5413 aprint_error_dev(sc->sc_dev,
5414 "unable to create RX control data DMA map, error = %d\n",
5415 error);
5416 goto fail_2;
5417 }
5418
5419 if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
5420 rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
5421 aprint_error_dev(sc->sc_dev,
5422 "unable to load RX control data DMA map, error = %d\n",
5423 error);
5424 goto fail_3;
5425 }
5426
5427 return 0;
5428
5429 fail_3:
5430 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5431 fail_2:
5432 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5433 rxq->rxq_desc_size);
5434 fail_1:
5435 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5436 fail_0:
5437 return error;
5438 }
5439
5440 static void
5441 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5442 {
5443
5444 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
5445 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5446 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5447 rxq->rxq_desc_size);
5448 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5449 }
5450
5451
5452 static int
5453 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5454 {
5455 int i, error;
5456
5457 /* Create the transmit buffer DMA maps. */
5458 WM_TXQUEUELEN(txq) =
5459 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
5460 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
5461 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5462 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
5463 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
5464 &txq->txq_soft[i].txs_dmamap)) != 0) {
5465 aprint_error_dev(sc->sc_dev,
5466 "unable to create Tx DMA map %d, error = %d\n",
5467 i, error);
5468 goto fail;
5469 }
5470 }
5471
5472 return 0;
5473
5474 fail:
5475 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5476 if (txq->txq_soft[i].txs_dmamap != NULL)
5477 bus_dmamap_destroy(sc->sc_dmat,
5478 txq->txq_soft[i].txs_dmamap);
5479 }
5480 return error;
5481 }
5482
5483 static void
5484 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5485 {
5486 int i;
5487
5488 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5489 if (txq->txq_soft[i].txs_dmamap != NULL)
5490 bus_dmamap_destroy(sc->sc_dmat,
5491 txq->txq_soft[i].txs_dmamap);
5492 }
5493 }
5494
5495 static int
5496 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5497 {
5498 int i, error;
5499
5500 /* Create the receive buffer DMA maps. */
5501 for (i = 0; i < WM_NRXDESC; i++) {
5502 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
5503 MCLBYTES, 0, 0,
5504 &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
5505 aprint_error_dev(sc->sc_dev,
5506 "unable to create Rx DMA map %d error = %d\n",
5507 i, error);
5508 goto fail;
5509 }
5510 rxq->rxq_soft[i].rxs_mbuf = NULL;
5511 }
5512
5513 return 0;
5514
5515 fail:
5516 for (i = 0; i < WM_NRXDESC; i++) {
5517 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5518 bus_dmamap_destroy(sc->sc_dmat,
5519 rxq->rxq_soft[i].rxs_dmamap);
5520 }
5521 return error;
5522 }
5523
5524 static void
5525 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5526 {
5527 int i;
5528
5529 for (i = 0; i < WM_NRXDESC; i++) {
5530 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5531 bus_dmamap_destroy(sc->sc_dmat,
5532 rxq->rxq_soft[i].rxs_dmamap);
5533 }
5534 }
5535
5536 /*
5537 * wm_alloc_quques:
5538 * Allocate {tx,rx}descs and {tx,rx} buffers
5539 */
5540 static int
5541 wm_alloc_txrx_queues(struct wm_softc *sc)
5542 {
5543 int i, error, tx_done, rx_done;
5544
5545 sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
5546 KM_SLEEP);
5547 if (sc->sc_queue == NULL) {
5548 aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
5549 error = ENOMEM;
5550 goto fail_0;
5551 }
5552
5553 /*
5554 * For transmission
5555 */
5556 error = 0;
5557 tx_done = 0;
5558 for (i = 0; i < sc->sc_nqueues; i++) {
5559 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5560 txq->txq_sc = sc;
5561 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5562
5563 error = wm_alloc_tx_descs(sc, txq);
5564 if (error)
5565 break;
5566 error = wm_alloc_tx_buffer(sc, txq);
5567 if (error) {
5568 wm_free_tx_descs(sc, txq);
5569 break;
5570 }
5571 txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
5572 if (txq->txq_interq == NULL) {
5573 wm_free_tx_descs(sc, txq);
5574 wm_free_tx_buffer(sc, txq);
5575 error = ENOMEM;
5576 break;
5577 }
5578 tx_done++;
5579 }
5580 if (error)
5581 goto fail_1;
5582
5583 /*
5584 * For recieve
5585 */
5586 error = 0;
5587 rx_done = 0;
5588 for (i = 0; i < sc->sc_nqueues; i++) {
5589 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5590 rxq->rxq_sc = sc;
5591 #ifdef WM_MPSAFE
5592 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5593 #else
5594 rxq->rxq_lock = NULL;
5595 #endif
5596 error = wm_alloc_rx_descs(sc, rxq);
5597 if (error)
5598 break;
5599
5600 error = wm_alloc_rx_buffer(sc, rxq);
5601 if (error) {
5602 wm_free_rx_descs(sc, rxq);
5603 break;
5604 }
5605
5606 rx_done++;
5607 }
5608 if (error)
5609 goto fail_2;
5610
5611 return 0;
5612
5613 fail_2:
5614 for (i = 0; i < rx_done; i++) {
5615 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5616 wm_free_rx_buffer(sc, rxq);
5617 wm_free_rx_descs(sc, rxq);
5618 if (rxq->rxq_lock)
5619 mutex_obj_free(rxq->rxq_lock);
5620 }
5621 fail_1:
5622 for (i = 0; i < tx_done; i++) {
5623 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5624 pcq_destroy(txq->txq_interq);
5625 wm_free_tx_buffer(sc, txq);
5626 wm_free_tx_descs(sc, txq);
5627 if (txq->txq_lock)
5628 mutex_obj_free(txq->txq_lock);
5629 }
5630
5631 kmem_free(sc->sc_queue,
5632 sizeof(struct wm_queue) * sc->sc_nqueues);
5633 fail_0:
5634 return error;
5635 }
5636
5637 /*
5638 * wm_free_quques:
5639 * Free {tx,rx}descs and {tx,rx} buffers
5640 */
5641 static void
5642 wm_free_txrx_queues(struct wm_softc *sc)
5643 {
5644 int i;
5645
5646 for (i = 0; i < sc->sc_nqueues; i++) {
5647 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5648 wm_free_rx_buffer(sc, rxq);
5649 wm_free_rx_descs(sc, rxq);
5650 if (rxq->rxq_lock)
5651 mutex_obj_free(rxq->rxq_lock);
5652 }
5653
5654 for (i = 0; i < sc->sc_nqueues; i++) {
5655 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5656 wm_free_tx_buffer(sc, txq);
5657 wm_free_tx_descs(sc, txq);
5658 if (txq->txq_lock)
5659 mutex_obj_free(txq->txq_lock);
5660 }
5661
5662 kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
5663 }
5664
5665 static void
5666 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5667 {
5668
5669 KASSERT(WM_TX_LOCKED(txq));
5670
5671 /* Initialize the transmit descriptor ring. */
5672 memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
5673 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
5674 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5675 txq->txq_free = WM_NTXDESC(txq);
5676 txq->txq_next = 0;
5677 }
5678
5679 static void
5680 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
5681 struct wm_txqueue *txq)
5682 {
5683
5684 KASSERT(WM_TX_LOCKED(txq));
5685
5686 if (sc->sc_type < WM_T_82543) {
5687 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
5688 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
5689 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
5690 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
5691 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
5692 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
5693 } else {
5694 int qid = wmq->wmq_id;
5695
5696 CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
5697 CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
5698 CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
5699 CSR_WRITE(sc, WMREG_TDH(qid), 0);
5700
5701 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5702 /*
5703 * Don't write TDT before TCTL.EN is set.
5704 * See the document.
5705 */
5706 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
5707 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
5708 | TXDCTL_WTHRESH(0));
5709 else {
5710 /* ITR / 4 */
5711 CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
5712 if (sc->sc_type >= WM_T_82540) {
5713 /* should be same */
5714 CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
5715 }
5716
5717 CSR_WRITE(sc, WMREG_TDT(qid), 0);
5718 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
5719 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
5720 }
5721 }
5722 }
5723
5724 static void
5725 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5726 {
5727 int i;
5728
5729 KASSERT(WM_TX_LOCKED(txq));
5730
5731 /* Initialize the transmit job descriptors. */
5732 for (i = 0; i < WM_TXQUEUELEN(txq); i++)
5733 txq->txq_soft[i].txs_mbuf = NULL;
5734 txq->txq_sfree = WM_TXQUEUELEN(txq);
5735 txq->txq_snext = 0;
5736 txq->txq_sdirty = 0;
5737 }
5738
5739 static void
5740 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
5741 struct wm_txqueue *txq)
5742 {
5743
5744 KASSERT(WM_TX_LOCKED(txq));
5745
5746 /*
5747 * Set up some register offsets that are different between
5748 * the i82542 and the i82543 and later chips.
5749 */
5750 if (sc->sc_type < WM_T_82543)
5751 txq->txq_tdt_reg = WMREG_OLD_TDT;
5752 else
5753 txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
5754
5755 wm_init_tx_descs(sc, txq);
5756 wm_init_tx_regs(sc, wmq, txq);
5757 wm_init_tx_buffer(sc, txq);
5758 }
5759
5760 static void
5761 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
5762 struct wm_rxqueue *rxq)
5763 {
5764
5765 KASSERT(WM_RX_LOCKED(rxq));
5766
5767 /*
5768 * Initialize the receive descriptor and receive job
5769 * descriptor rings.
5770 */
5771 if (sc->sc_type < WM_T_82543) {
5772 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
5773 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
5774 CSR_WRITE(sc, WMREG_OLD_RDLEN0,
5775 sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
5776 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
5777 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
5778 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
5779
5780 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
5781 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
5782 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
5783 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
5784 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
5785 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
5786 } else {
5787 int qid = wmq->wmq_id;
5788
5789 CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
5790 CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
5791 CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
5792
5793 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5794 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
5795 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
5796 CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
5797 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
5798 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
5799 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
5800 | RXDCTL_WTHRESH(1));
5801 CSR_WRITE(sc, WMREG_RDH(qid), 0);
5802 CSR_WRITE(sc, WMREG_RDT(qid), 0);
5803 } else {
5804 CSR_WRITE(sc, WMREG_RDH(qid), 0);
5805 CSR_WRITE(sc, WMREG_RDT(qid), 0);
5806 /* ITR / 4 */
5807 CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
5808 /* MUST be same */
5809 CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
5810 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
5811 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
5812 }
5813 }
5814 }
5815
5816 static int
5817 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5818 {
5819 struct wm_rxsoft *rxs;
5820 int error, i;
5821
5822 KASSERT(WM_RX_LOCKED(rxq));
5823
5824 for (i = 0; i < WM_NRXDESC; i++) {
5825 rxs = &rxq->rxq_soft[i];
5826 if (rxs->rxs_mbuf == NULL) {
5827 if ((error = wm_add_rxbuf(rxq, i)) != 0) {
5828 log(LOG_ERR, "%s: unable to allocate or map "
5829 "rx buffer %d, error = %d\n",
5830 device_xname(sc->sc_dev), i, error);
5831 /*
5832 * XXX Should attempt to run with fewer receive
5833 * XXX buffers instead of just failing.
5834 */
5835 wm_rxdrain(rxq);
5836 return ENOMEM;
5837 }
5838 } else {
5839 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
5840 wm_init_rxdesc(rxq, i);
5841 /*
5842 * For 82575 and newer device, the RX descriptors
5843 * must be initialized after the setting of RCTL.EN in
5844 * wm_set_filter()
5845 */
5846 }
5847 }
5848 rxq->rxq_ptr = 0;
5849 rxq->rxq_discard = 0;
5850 WM_RXCHAIN_RESET(rxq);
5851
5852 return 0;
5853 }
5854
5855 static int
5856 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
5857 struct wm_rxqueue *rxq)
5858 {
5859
5860 KASSERT(WM_RX_LOCKED(rxq));
5861
5862 /*
5863 * Set up some register offsets that are different between
5864 * the i82542 and the i82543 and later chips.
5865 */
5866 if (sc->sc_type < WM_T_82543)
5867 rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
5868 else
5869 rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
5870
5871 wm_init_rx_regs(sc, wmq, rxq);
5872 return wm_init_rx_buffer(sc, rxq);
5873 }
5874
5875 /*
5876 * wm_init_quques:
5877 * Initialize {tx,rx}descs and {tx,rx} buffers
5878 */
5879 static int
5880 wm_init_txrx_queues(struct wm_softc *sc)
5881 {
5882 int i, error = 0;
5883
5884 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5885 device_xname(sc->sc_dev), __func__));
5886 for (i = 0; i < sc->sc_nqueues; i++) {
5887 struct wm_queue *wmq = &sc->sc_queue[i];
5888 struct wm_txqueue *txq = &wmq->wmq_txq;
5889 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
5890
5891 WM_TX_LOCK(txq);
5892 wm_init_tx_queue(sc, wmq, txq);
5893 WM_TX_UNLOCK(txq);
5894
5895 WM_RX_LOCK(rxq);
5896 error = wm_init_rx_queue(sc, wmq, rxq);
5897 WM_RX_UNLOCK(rxq);
5898 if (error)
5899 break;
5900 }
5901
5902 return error;
5903 }
5904
5905 /*
5906 * wm_tx_offload:
5907 *
5908 * Set up TCP/IP checksumming parameters for the
5909 * specified packet.
5910 */
5911 static int
5912 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
5913 uint8_t *fieldsp)
5914 {
5915 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
5916 struct mbuf *m0 = txs->txs_mbuf;
5917 struct livengood_tcpip_ctxdesc *t;
5918 uint32_t ipcs, tucs, cmd, cmdlen, seg;
5919 uint32_t ipcse;
5920 struct ether_header *eh;
5921 int offset, iphl;
5922 uint8_t fields;
5923
5924 /*
5925 * XXX It would be nice if the mbuf pkthdr had offset
5926 * fields for the protocol headers.
5927 */
5928
5929 eh = mtod(m0, struct ether_header *);
5930 switch (htons(eh->ether_type)) {
5931 case ETHERTYPE_IP:
5932 case ETHERTYPE_IPV6:
5933 offset = ETHER_HDR_LEN;
5934 break;
5935
5936 case ETHERTYPE_VLAN:
5937 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5938 break;
5939
5940 default:
5941 /*
5942 * Don't support this protocol or encapsulation.
5943 */
5944 *fieldsp = 0;
5945 *cmdp = 0;
5946 return 0;
5947 }
5948
5949 if ((m0->m_pkthdr.csum_flags &
5950 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
5951 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
5952 } else {
5953 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
5954 }
5955 ipcse = offset + iphl - 1;
5956
5957 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
5958 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
5959 seg = 0;
5960 fields = 0;
5961
5962 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
5963 int hlen = offset + iphl;
5964 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
5965
5966 if (__predict_false(m0->m_len <
5967 (hlen + sizeof(struct tcphdr)))) {
5968 /*
5969 * TCP/IP headers are not in the first mbuf; we need
5970 * to do this the slow and painful way. Let's just
5971 * hope this doesn't happen very often.
5972 */
5973 struct tcphdr th;
5974
5975 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
5976
5977 m_copydata(m0, hlen, sizeof(th), &th);
5978 if (v4) {
5979 struct ip ip;
5980
5981 m_copydata(m0, offset, sizeof(ip), &ip);
5982 ip.ip_len = 0;
5983 m_copyback(m0,
5984 offset + offsetof(struct ip, ip_len),
5985 sizeof(ip.ip_len), &ip.ip_len);
5986 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
5987 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
5988 } else {
5989 struct ip6_hdr ip6;
5990
5991 m_copydata(m0, offset, sizeof(ip6), &ip6);
5992 ip6.ip6_plen = 0;
5993 m_copyback(m0,
5994 offset + offsetof(struct ip6_hdr, ip6_plen),
5995 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
5996 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
5997 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
5998 }
5999 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6000 sizeof(th.th_sum), &th.th_sum);
6001
6002 hlen += th.th_off << 2;
6003 } else {
6004 /*
6005 * TCP/IP headers are in the first mbuf; we can do
6006 * this the easy way.
6007 */
6008 struct tcphdr *th;
6009
6010 if (v4) {
6011 struct ip *ip =
6012 (void *)(mtod(m0, char *) + offset);
6013 th = (void *)(mtod(m0, char *) + hlen);
6014
6015 ip->ip_len = 0;
6016 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6017 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6018 } else {
6019 struct ip6_hdr *ip6 =
6020 (void *)(mtod(m0, char *) + offset);
6021 th = (void *)(mtod(m0, char *) + hlen);
6022
6023 ip6->ip6_plen = 0;
6024 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6025 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6026 }
6027 hlen += th->th_off << 2;
6028 }
6029
6030 if (v4) {
6031 WM_EVCNT_INCR(&sc->sc_ev_txtso);
6032 cmdlen |= WTX_TCPIP_CMD_IP;
6033 } else {
6034 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
6035 ipcse = 0;
6036 }
6037 cmd |= WTX_TCPIP_CMD_TSE;
6038 cmdlen |= WTX_TCPIP_CMD_TSE |
6039 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
6040 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
6041 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
6042 }
6043
6044 /*
6045 * NOTE: Even if we're not using the IP or TCP/UDP checksum
6046 * offload feature, if we load the context descriptor, we
6047 * MUST provide valid values for IPCSS and TUCSS fields.
6048 */
6049
6050 ipcs = WTX_TCPIP_IPCSS(offset) |
6051 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
6052 WTX_TCPIP_IPCSE(ipcse);
6053 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
6054 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
6055 fields |= WTX_IXSM;
6056 }
6057
6058 offset += iphl;
6059
6060 if (m0->m_pkthdr.csum_flags &
6061 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
6062 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
6063 fields |= WTX_TXSM;
6064 tucs = WTX_TCPIP_TUCSS(offset) |
6065 WTX_TCPIP_TUCSO(offset +
6066 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
6067 WTX_TCPIP_TUCSE(0) /* rest of packet */;
6068 } else if ((m0->m_pkthdr.csum_flags &
6069 (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
6070 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
6071 fields |= WTX_TXSM;
6072 tucs = WTX_TCPIP_TUCSS(offset) |
6073 WTX_TCPIP_TUCSO(offset +
6074 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
6075 WTX_TCPIP_TUCSE(0) /* rest of packet */;
6076 } else {
6077 /* Just initialize it to a valid TCP context. */
6078 tucs = WTX_TCPIP_TUCSS(offset) |
6079 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
6080 WTX_TCPIP_TUCSE(0) /* rest of packet */;
6081 }
6082
6083 /* Fill in the context descriptor. */
6084 t = (struct livengood_tcpip_ctxdesc *)
6085 &txq->txq_descs[txq->txq_next];
6086 t->tcpip_ipcs = htole32(ipcs);
6087 t->tcpip_tucs = htole32(tucs);
6088 t->tcpip_cmdlen = htole32(cmdlen);
6089 t->tcpip_seg = htole32(seg);
6090 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6091
6092 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6093 txs->txs_ndesc++;
6094
6095 *cmdp = cmd;
6096 *fieldsp = fields;
6097
6098 return 0;
6099 }
6100
6101 /*
6102 * wm_start: [ifnet interface function]
6103 *
6104 * Start packet transmission on the interface.
6105 */
6106 static void
6107 wm_start(struct ifnet *ifp)
6108 {
6109 struct wm_softc *sc = ifp->if_softc;
6110 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6111
6112 WM_TX_LOCK(txq);
6113 if (!sc->sc_stopping)
6114 wm_start_locked(ifp);
6115 WM_TX_UNLOCK(txq);
6116 }
6117
6118 static void
6119 wm_start_locked(struct ifnet *ifp)
6120 {
6121 struct wm_softc *sc = ifp->if_softc;
6122 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6123 struct mbuf *m0;
6124 struct m_tag *mtag;
6125 struct wm_txsoft *txs;
6126 bus_dmamap_t dmamap;
6127 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
6128 bus_addr_t curaddr;
6129 bus_size_t seglen, curlen;
6130 uint32_t cksumcmd;
6131 uint8_t cksumfields;
6132
6133 KASSERT(WM_TX_LOCKED(txq));
6134
6135 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6136 return;
6137
6138 /* Remember the previous number of free descriptors. */
6139 ofree = txq->txq_free;
6140
6141 /*
6142 * Loop through the send queue, setting up transmit descriptors
6143 * until we drain the queue, or use up all available transmit
6144 * descriptors.
6145 */
6146 for (;;) {
6147 m0 = NULL;
6148
6149 /* Get a work queue entry. */
6150 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6151 wm_txeof(sc, txq);
6152 if (txq->txq_sfree == 0) {
6153 DPRINTF(WM_DEBUG_TX,
6154 ("%s: TX: no free job descriptors\n",
6155 device_xname(sc->sc_dev)));
6156 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
6157 break;
6158 }
6159 }
6160
6161 /* Grab a packet off the queue. */
6162 IFQ_DEQUEUE(&ifp->if_snd, m0);
6163 if (m0 == NULL)
6164 break;
6165
6166 DPRINTF(WM_DEBUG_TX,
6167 ("%s: TX: have packet to transmit: %p\n",
6168 device_xname(sc->sc_dev), m0));
6169
6170 txs = &txq->txq_soft[txq->txq_snext];
6171 dmamap = txs->txs_dmamap;
6172
6173 use_tso = (m0->m_pkthdr.csum_flags &
6174 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
6175
6176 /*
6177 * So says the Linux driver:
6178 * The controller does a simple calculation to make sure
6179 * there is enough room in the FIFO before initiating the
6180 * DMA for each buffer. The calc is:
6181 * 4 = ceil(buffer len / MSS)
6182 * To make sure we don't overrun the FIFO, adjust the max
6183 * buffer len if the MSS drops.
6184 */
6185 dmamap->dm_maxsegsz =
6186 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
6187 ? m0->m_pkthdr.segsz << 2
6188 : WTX_MAX_LEN;
6189
6190 /*
6191 * Load the DMA map. If this fails, the packet either
6192 * didn't fit in the allotted number of segments, or we
6193 * were short on resources. For the too-many-segments
6194 * case, we simply report an error and drop the packet,
6195 * since we can't sanely copy a jumbo packet to a single
6196 * buffer.
6197 */
6198 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6199 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
6200 if (error) {
6201 if (error == EFBIG) {
6202 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6203 log(LOG_ERR, "%s: Tx packet consumes too many "
6204 "DMA segments, dropping...\n",
6205 device_xname(sc->sc_dev));
6206 wm_dump_mbuf_chain(sc, m0);
6207 m_freem(m0);
6208 continue;
6209 }
6210 /* Short on resources, just stop for now. */
6211 DPRINTF(WM_DEBUG_TX,
6212 ("%s: TX: dmamap load failed: %d\n",
6213 device_xname(sc->sc_dev), error));
6214 break;
6215 }
6216
6217 segs_needed = dmamap->dm_nsegs;
6218 if (use_tso) {
6219 /* For sentinel descriptor; see below. */
6220 segs_needed++;
6221 }
6222
6223 /*
6224 * Ensure we have enough descriptors free to describe
6225 * the packet. Note, we always reserve one descriptor
6226 * at the end of the ring due to the semantics of the
6227 * TDT register, plus one more in the event we need
6228 * to load offload context.
6229 */
6230 if (segs_needed > txq->txq_free - 2) {
6231 /*
6232 * Not enough free descriptors to transmit this
6233 * packet. We haven't committed anything yet,
6234 * so just unload the DMA map, put the packet
6235 * pack on the queue, and punt. Notify the upper
6236 * layer that there are no more slots left.
6237 */
6238 DPRINTF(WM_DEBUG_TX,
6239 ("%s: TX: need %d (%d) descriptors, have %d\n",
6240 device_xname(sc->sc_dev), dmamap->dm_nsegs,
6241 segs_needed, txq->txq_free - 1));
6242 ifp->if_flags |= IFF_OACTIVE;
6243 bus_dmamap_unload(sc->sc_dmat, dmamap);
6244 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
6245 break;
6246 }
6247
6248 /*
6249 * Check for 82547 Tx FIFO bug. We need to do this
6250 * once we know we can transmit the packet, since we
6251 * do some internal FIFO space accounting here.
6252 */
6253 if (sc->sc_type == WM_T_82547 &&
6254 wm_82547_txfifo_bugchk(sc, m0)) {
6255 DPRINTF(WM_DEBUG_TX,
6256 ("%s: TX: 82547 Tx FIFO bug detected\n",
6257 device_xname(sc->sc_dev)));
6258 ifp->if_flags |= IFF_OACTIVE;
6259 bus_dmamap_unload(sc->sc_dmat, dmamap);
6260 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
6261 break;
6262 }
6263
6264 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6265
6266 DPRINTF(WM_DEBUG_TX,
6267 ("%s: TX: packet has %d (%d) DMA segments\n",
6268 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6269
6270 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
6271
6272 /*
6273 * Store a pointer to the packet so that we can free it
6274 * later.
6275 *
6276 * Initially, we consider the number of descriptors the
6277 * packet uses the number of DMA segments. This may be
6278 * incremented by 1 if we do checksum offload (a descriptor
6279 * is used to set the checksum context).
6280 */
6281 txs->txs_mbuf = m0;
6282 txs->txs_firstdesc = txq->txq_next;
6283 txs->txs_ndesc = segs_needed;
6284
6285 /* Set up offload parameters for this packet. */
6286 if (m0->m_pkthdr.csum_flags &
6287 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
6288 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
6289 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
6290 if (wm_tx_offload(sc, txs, &cksumcmd,
6291 &cksumfields) != 0) {
6292 /* Error message already displayed. */
6293 bus_dmamap_unload(sc->sc_dmat, dmamap);
6294 continue;
6295 }
6296 } else {
6297 cksumcmd = 0;
6298 cksumfields = 0;
6299 }
6300
6301 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
6302
6303 /* Sync the DMA map. */
6304 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6305 BUS_DMASYNC_PREWRITE);
6306
6307 /* Initialize the transmit descriptor. */
6308 for (nexttx = txq->txq_next, seg = 0;
6309 seg < dmamap->dm_nsegs; seg++) {
6310 for (seglen = dmamap->dm_segs[seg].ds_len,
6311 curaddr = dmamap->dm_segs[seg].ds_addr;
6312 seglen != 0;
6313 curaddr += curlen, seglen -= curlen,
6314 nexttx = WM_NEXTTX(txq, nexttx)) {
6315 curlen = seglen;
6316
6317 /*
6318 * So says the Linux driver:
6319 * Work around for premature descriptor
6320 * write-backs in TSO mode. Append a
6321 * 4-byte sentinel descriptor.
6322 */
6323 if (use_tso && seg == dmamap->dm_nsegs - 1 &&
6324 curlen > 8)
6325 curlen -= 4;
6326
6327 wm_set_dma_addr(
6328 &txq->txq_descs[nexttx].wtx_addr, curaddr);
6329 txq->txq_descs[nexttx].wtx_cmdlen
6330 = htole32(cksumcmd | curlen);
6331 txq->txq_descs[nexttx].wtx_fields.wtxu_status
6332 = 0;
6333 txq->txq_descs[nexttx].wtx_fields.wtxu_options
6334 = cksumfields;
6335 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
6336 lasttx = nexttx;
6337
6338 DPRINTF(WM_DEBUG_TX,
6339 ("%s: TX: desc %d: low %#" PRIx64 ", "
6340 "len %#04zx\n",
6341 device_xname(sc->sc_dev), nexttx,
6342 (uint64_t)curaddr, curlen));
6343 }
6344 }
6345
6346 KASSERT(lasttx != -1);
6347
6348 /*
6349 * Set up the command byte on the last descriptor of
6350 * the packet. If we're in the interrupt delay window,
6351 * delay the interrupt.
6352 */
6353 txq->txq_descs[lasttx].wtx_cmdlen |=
6354 htole32(WTX_CMD_EOP | WTX_CMD_RS);
6355
6356 /*
6357 * If VLANs are enabled and the packet has a VLAN tag, set
6358 * up the descriptor to encapsulate the packet for us.
6359 *
6360 * This is only valid on the last descriptor of the packet.
6361 */
6362 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6363 txq->txq_descs[lasttx].wtx_cmdlen |=
6364 htole32(WTX_CMD_VLE);
6365 txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
6366 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6367 }
6368
6369 txs->txs_lastdesc = lasttx;
6370
6371 DPRINTF(WM_DEBUG_TX,
6372 ("%s: TX: desc %d: cmdlen 0x%08x\n",
6373 device_xname(sc->sc_dev),
6374 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
6375
6376 /* Sync the descriptors we're using. */
6377 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
6378 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
6379
6380 /* Give the packet to the chip. */
6381 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
6382
6383 DPRINTF(WM_DEBUG_TX,
6384 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6385
6386 DPRINTF(WM_DEBUG_TX,
6387 ("%s: TX: finished transmitting packet, job %d\n",
6388 device_xname(sc->sc_dev), txq->txq_snext));
6389
6390 /* Advance the tx pointer. */
6391 txq->txq_free -= txs->txs_ndesc;
6392 txq->txq_next = nexttx;
6393
6394 txq->txq_sfree--;
6395 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
6396
6397 /* Pass the packet to any BPF listeners. */
6398 bpf_mtap(ifp, m0);
6399 }
6400
6401 if (m0 != NULL) {
6402 ifp->if_flags |= IFF_OACTIVE;
6403 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6404 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
6405 __func__));
6406 m_freem(m0);
6407 }
6408
6409 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
6410 /* No more slots; notify upper layer. */
6411 ifp->if_flags |= IFF_OACTIVE;
6412 }
6413
6414 if (txq->txq_free != ofree) {
6415 /* Set a watchdog timer in case the chip flakes out. */
6416 ifp->if_timer = 5;
6417 }
6418 }
6419
6420 /*
6421 * wm_nq_tx_offload:
6422 *
6423 * Set up TCP/IP checksumming parameters for the
6424 * specified packet, for NEWQUEUE devices
6425 */
6426 static int
6427 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
6428 struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
6429 {
6430 struct mbuf *m0 = txs->txs_mbuf;
6431 struct m_tag *mtag;
6432 uint32_t vl_len, mssidx, cmdc;
6433 struct ether_header *eh;
6434 int offset, iphl;
6435
6436 /*
6437 * XXX It would be nice if the mbuf pkthdr had offset
6438 * fields for the protocol headers.
6439 */
6440 *cmdlenp = 0;
6441 *fieldsp = 0;
6442
6443 eh = mtod(m0, struct ether_header *);
6444 switch (htons(eh->ether_type)) {
6445 case ETHERTYPE_IP:
6446 case ETHERTYPE_IPV6:
6447 offset = ETHER_HDR_LEN;
6448 break;
6449
6450 case ETHERTYPE_VLAN:
6451 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6452 break;
6453
6454 default:
6455 /* Don't support this protocol or encapsulation. */
6456 *do_csum = false;
6457 return 0;
6458 }
6459 *do_csum = true;
6460 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
6461 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
6462
6463 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
6464 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
6465
6466 if ((m0->m_pkthdr.csum_flags &
6467 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
6468 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6469 } else {
6470 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
6471 }
6472 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
6473 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
6474
6475 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6476 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
6477 << NQTXC_VLLEN_VLAN_SHIFT);
6478 *cmdlenp |= NQTX_CMD_VLE;
6479 }
6480
6481 mssidx = 0;
6482
6483 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6484 int hlen = offset + iphl;
6485 int tcp_hlen;
6486 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6487
6488 if (__predict_false(m0->m_len <
6489 (hlen + sizeof(struct tcphdr)))) {
6490 /*
6491 * TCP/IP headers are not in the first mbuf; we need
6492 * to do this the slow and painful way. Let's just
6493 * hope this doesn't happen very often.
6494 */
6495 struct tcphdr th;
6496
6497 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
6498
6499 m_copydata(m0, hlen, sizeof(th), &th);
6500 if (v4) {
6501 struct ip ip;
6502
6503 m_copydata(m0, offset, sizeof(ip), &ip);
6504 ip.ip_len = 0;
6505 m_copyback(m0,
6506 offset + offsetof(struct ip, ip_len),
6507 sizeof(ip.ip_len), &ip.ip_len);
6508 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6509 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6510 } else {
6511 struct ip6_hdr ip6;
6512
6513 m_copydata(m0, offset, sizeof(ip6), &ip6);
6514 ip6.ip6_plen = 0;
6515 m_copyback(m0,
6516 offset + offsetof(struct ip6_hdr, ip6_plen),
6517 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6518 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6519 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6520 }
6521 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6522 sizeof(th.th_sum), &th.th_sum);
6523
6524 tcp_hlen = th.th_off << 2;
6525 } else {
6526 /*
6527 * TCP/IP headers are in the first mbuf; we can do
6528 * this the easy way.
6529 */
6530 struct tcphdr *th;
6531
6532 if (v4) {
6533 struct ip *ip =
6534 (void *)(mtod(m0, char *) + offset);
6535 th = (void *)(mtod(m0, char *) + hlen);
6536
6537 ip->ip_len = 0;
6538 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6539 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6540 } else {
6541 struct ip6_hdr *ip6 =
6542 (void *)(mtod(m0, char *) + offset);
6543 th = (void *)(mtod(m0, char *) + hlen);
6544
6545 ip6->ip6_plen = 0;
6546 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6547 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6548 }
6549 tcp_hlen = th->th_off << 2;
6550 }
6551 hlen += tcp_hlen;
6552 *cmdlenp |= NQTX_CMD_TSE;
6553
6554 if (v4) {
6555 WM_EVCNT_INCR(&sc->sc_ev_txtso);
6556 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
6557 } else {
6558 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
6559 *fieldsp |= NQTXD_FIELDS_TUXSM;
6560 }
6561 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
6562 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6563 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
6564 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
6565 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
6566 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
6567 } else {
6568 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
6569 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6570 }
6571
6572 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
6573 *fieldsp |= NQTXD_FIELDS_IXSM;
6574 cmdc |= NQTXC_CMD_IP4;
6575 }
6576
6577 if (m0->m_pkthdr.csum_flags &
6578 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6579 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
6580 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6581 cmdc |= NQTXC_CMD_TCP;
6582 } else {
6583 cmdc |= NQTXC_CMD_UDP;
6584 }
6585 cmdc |= NQTXC_CMD_IP4;
6586 *fieldsp |= NQTXD_FIELDS_TUXSM;
6587 }
6588 if (m0->m_pkthdr.csum_flags &
6589 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6590 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
6591 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6592 cmdc |= NQTXC_CMD_TCP;
6593 } else {
6594 cmdc |= NQTXC_CMD_UDP;
6595 }
6596 cmdc |= NQTXC_CMD_IP6;
6597 *fieldsp |= NQTXD_FIELDS_TUXSM;
6598 }
6599
6600 /* Fill in the context descriptor. */
6601 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
6602 htole32(vl_len);
6603 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
6604 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
6605 htole32(cmdc);
6606 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
6607 htole32(mssidx);
6608 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6609 DPRINTF(WM_DEBUG_TX,
6610 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
6611 txq->txq_next, 0, vl_len));
6612 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
6613 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6614 txs->txs_ndesc++;
6615 return 0;
6616 }
6617
6618 /*
6619 * wm_nq_start: [ifnet interface function]
6620 *
6621 * Start packet transmission on the interface for NEWQUEUE devices
6622 */
6623 static void
6624 wm_nq_start(struct ifnet *ifp)
6625 {
6626 struct wm_softc *sc = ifp->if_softc;
6627 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6628
6629 WM_TX_LOCK(txq);
6630 if (!sc->sc_stopping)
6631 wm_nq_start_locked(ifp);
6632 WM_TX_UNLOCK(txq);
6633 }
6634
6635 static void
6636 wm_nq_start_locked(struct ifnet *ifp)
6637 {
6638 struct wm_softc *sc = ifp->if_softc;
6639 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6640
6641 wm_nq_send_common_locked(ifp, txq, false);
6642 }
6643
6644 static inline int
6645 wm_nq_select_txqueue(struct ifnet *ifp, struct mbuf *m)
6646 {
6647 struct wm_softc *sc = ifp->if_softc;
6648 u_int cpuid = cpu_index(curcpu());
6649
6650 /*
6651 * Currently, simple distribute strategy.
6652 * TODO:
6653 * destribute by flowid(RSS has value).
6654 */
6655 return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
6656 }
6657
6658 static int
6659 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
6660 {
6661 int qid;
6662 struct wm_softc *sc = ifp->if_softc;
6663 struct wm_txqueue *txq;
6664
6665 qid = wm_nq_select_txqueue(ifp, m);
6666 txq = &sc->sc_queue[qid].wmq_txq;
6667
6668 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
6669 m_freem(m);
6670 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6671 return ENOBUFS;
6672 }
6673
6674 if (WM_TX_TRYLOCK(txq)) {
6675 /* XXXX should be per TX queue */
6676 ifp->if_obytes += m->m_pkthdr.len;
6677 if (m->m_flags & M_MCAST)
6678 ifp->if_omcasts++;
6679
6680 if (!sc->sc_stopping)
6681 wm_nq_transmit_locked(ifp, txq);
6682 WM_TX_UNLOCK(txq);
6683 }
6684
6685 return 0;
6686 }
6687
6688 static void
6689 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
6690 {
6691
6692 wm_nq_send_common_locked(ifp, txq, true);
6693 }
6694
6695 static void
6696 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
6697 bool is_transmit)
6698 {
6699 struct wm_softc *sc = ifp->if_softc;
6700 struct mbuf *m0;
6701 struct m_tag *mtag;
6702 struct wm_txsoft *txs;
6703 bus_dmamap_t dmamap;
6704 int error, nexttx, lasttx = -1, seg, segs_needed;
6705 bool do_csum, sent;
6706
6707 KASSERT(WM_TX_LOCKED(txq));
6708
6709 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6710 return;
6711 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
6712 return;
6713
6714 sent = false;
6715
6716 /*
6717 * Loop through the send queue, setting up transmit descriptors
6718 * until we drain the queue, or use up all available transmit
6719 * descriptors.
6720 */
6721 for (;;) {
6722 m0 = NULL;
6723
6724 /* Get a work queue entry. */
6725 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6726 wm_txeof(sc, txq);
6727 if (txq->txq_sfree == 0) {
6728 DPRINTF(WM_DEBUG_TX,
6729 ("%s: TX: no free job descriptors\n",
6730 device_xname(sc->sc_dev)));
6731 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
6732 break;
6733 }
6734 }
6735
6736 /* Grab a packet off the queue. */
6737 if (is_transmit)
6738 m0 = pcq_get(txq->txq_interq);
6739 else
6740 IFQ_DEQUEUE(&ifp->if_snd, m0);
6741 if (m0 == NULL)
6742 break;
6743
6744 DPRINTF(WM_DEBUG_TX,
6745 ("%s: TX: have packet to transmit: %p\n",
6746 device_xname(sc->sc_dev), m0));
6747
6748 txs = &txq->txq_soft[txq->txq_snext];
6749 dmamap = txs->txs_dmamap;
6750
6751 /*
6752 * Load the DMA map. If this fails, the packet either
6753 * didn't fit in the allotted number of segments, or we
6754 * were short on resources. For the too-many-segments
6755 * case, we simply report an error and drop the packet,
6756 * since we can't sanely copy a jumbo packet to a single
6757 * buffer.
6758 */
6759 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6760 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
6761 if (error) {
6762 if (error == EFBIG) {
6763 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6764 log(LOG_ERR, "%s: Tx packet consumes too many "
6765 "DMA segments, dropping...\n",
6766 device_xname(sc->sc_dev));
6767 wm_dump_mbuf_chain(sc, m0);
6768 m_freem(m0);
6769 continue;
6770 }
6771 /* Short on resources, just stop for now. */
6772 DPRINTF(WM_DEBUG_TX,
6773 ("%s: TX: dmamap load failed: %d\n",
6774 device_xname(sc->sc_dev), error));
6775 break;
6776 }
6777
6778 segs_needed = dmamap->dm_nsegs;
6779
6780 /*
6781 * Ensure we have enough descriptors free to describe
6782 * the packet. Note, we always reserve one descriptor
6783 * at the end of the ring due to the semantics of the
6784 * TDT register, plus one more in the event we need
6785 * to load offload context.
6786 */
6787 if (segs_needed > txq->txq_free - 2) {
6788 /*
6789 * Not enough free descriptors to transmit this
6790 * packet. We haven't committed anything yet,
6791 * so just unload the DMA map, put the packet
6792 * pack on the queue, and punt. Notify the upper
6793 * layer that there are no more slots left.
6794 */
6795 DPRINTF(WM_DEBUG_TX,
6796 ("%s: TX: need %d (%d) descriptors, have %d\n",
6797 device_xname(sc->sc_dev), dmamap->dm_nsegs,
6798 segs_needed, txq->txq_free - 1));
6799 txq->txq_flags |= WM_TXQ_NO_SPACE;
6800 bus_dmamap_unload(sc->sc_dmat, dmamap);
6801 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
6802 break;
6803 }
6804
6805 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6806
6807 DPRINTF(WM_DEBUG_TX,
6808 ("%s: TX: packet has %d (%d) DMA segments\n",
6809 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6810
6811 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
6812
6813 /*
6814 * Store a pointer to the packet so that we can free it
6815 * later.
6816 *
6817 * Initially, we consider the number of descriptors the
6818 * packet uses the number of DMA segments. This may be
6819 * incremented by 1 if we do checksum offload (a descriptor
6820 * is used to set the checksum context).
6821 */
6822 txs->txs_mbuf = m0;
6823 txs->txs_firstdesc = txq->txq_next;
6824 txs->txs_ndesc = segs_needed;
6825
6826 /* Set up offload parameters for this packet. */
6827 uint32_t cmdlen, fields, dcmdlen;
6828 if (m0->m_pkthdr.csum_flags &
6829 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
6830 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
6831 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
6832 if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
6833 &do_csum) != 0) {
6834 /* Error message already displayed. */
6835 bus_dmamap_unload(sc->sc_dmat, dmamap);
6836 continue;
6837 }
6838 } else {
6839 do_csum = false;
6840 cmdlen = 0;
6841 fields = 0;
6842 }
6843
6844 /* Sync the DMA map. */
6845 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6846 BUS_DMASYNC_PREWRITE);
6847
6848 /* Initialize the first transmit descriptor. */
6849 nexttx = txq->txq_next;
6850 if (!do_csum) {
6851 /* setup a legacy descriptor */
6852 wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
6853 dmamap->dm_segs[0].ds_addr);
6854 txq->txq_descs[nexttx].wtx_cmdlen =
6855 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
6856 txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
6857 txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
6858 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
6859 NULL) {
6860 txq->txq_descs[nexttx].wtx_cmdlen |=
6861 htole32(WTX_CMD_VLE);
6862 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
6863 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6864 } else {
6865 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
6866 }
6867 dcmdlen = 0;
6868 } else {
6869 /* setup an advanced data descriptor */
6870 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
6871 htole64(dmamap->dm_segs[0].ds_addr);
6872 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
6873 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
6874 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
6875 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
6876 htole32(fields);
6877 DPRINTF(WM_DEBUG_TX,
6878 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
6879 device_xname(sc->sc_dev), nexttx,
6880 (uint64_t)dmamap->dm_segs[0].ds_addr));
6881 DPRINTF(WM_DEBUG_TX,
6882 ("\t 0x%08x%08x\n", fields,
6883 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
6884 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
6885 }
6886
6887 lasttx = nexttx;
6888 nexttx = WM_NEXTTX(txq, nexttx);
6889 /*
6890 * fill in the next descriptors. legacy or adcanced format
6891 * is the same here
6892 */
6893 for (seg = 1; seg < dmamap->dm_nsegs;
6894 seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
6895 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
6896 htole64(dmamap->dm_segs[seg].ds_addr);
6897 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
6898 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
6899 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
6900 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
6901 lasttx = nexttx;
6902
6903 DPRINTF(WM_DEBUG_TX,
6904 ("%s: TX: desc %d: %#" PRIx64 ", "
6905 "len %#04zx\n",
6906 device_xname(sc->sc_dev), nexttx,
6907 (uint64_t)dmamap->dm_segs[seg].ds_addr,
6908 dmamap->dm_segs[seg].ds_len));
6909 }
6910
6911 KASSERT(lasttx != -1);
6912
6913 /*
6914 * Set up the command byte on the last descriptor of
6915 * the packet. If we're in the interrupt delay window,
6916 * delay the interrupt.
6917 */
6918 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
6919 (NQTX_CMD_EOP | NQTX_CMD_RS));
6920 txq->txq_descs[lasttx].wtx_cmdlen |=
6921 htole32(WTX_CMD_EOP | WTX_CMD_RS);
6922
6923 txs->txs_lastdesc = lasttx;
6924
6925 DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
6926 device_xname(sc->sc_dev),
6927 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
6928
6929 /* Sync the descriptors we're using. */
6930 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
6931 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
6932
6933 /* Give the packet to the chip. */
6934 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
6935 sent = true;
6936
6937 DPRINTF(WM_DEBUG_TX,
6938 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6939
6940 DPRINTF(WM_DEBUG_TX,
6941 ("%s: TX: finished transmitting packet, job %d\n",
6942 device_xname(sc->sc_dev), txq->txq_snext));
6943
6944 /* Advance the tx pointer. */
6945 txq->txq_free -= txs->txs_ndesc;
6946 txq->txq_next = nexttx;
6947
6948 txq->txq_sfree--;
6949 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
6950
6951 /* Pass the packet to any BPF listeners. */
6952 bpf_mtap(ifp, m0);
6953 }
6954
6955 if (m0 != NULL) {
6956 txq->txq_flags |= WM_TXQ_NO_SPACE;
6957 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6958 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
6959 __func__));
6960 m_freem(m0);
6961 }
6962
6963 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
6964 /* No more slots; notify upper layer. */
6965 txq->txq_flags |= WM_TXQ_NO_SPACE;
6966 }
6967
6968 if (sent) {
6969 /* Set a watchdog timer in case the chip flakes out. */
6970 ifp->if_timer = 5;
6971 }
6972 }
6973
6974 /* Interrupt */
6975
6976 /*
6977 * wm_txeof:
6978 *
6979 * Helper; handle transmit interrupts.
6980 */
6981 static int
6982 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
6983 {
6984 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6985 struct wm_txsoft *txs;
6986 bool processed = false;
6987 int count = 0;
6988 int i;
6989 uint8_t status;
6990
6991 KASSERT(WM_TX_LOCKED(txq));
6992
6993 if (sc->sc_stopping)
6994 return 0;
6995
6996 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6997 txq->txq_flags &= ~WM_TXQ_NO_SPACE;
6998 else
6999 ifp->if_flags &= ~IFF_OACTIVE;
7000
7001 /*
7002 * Go through the Tx list and free mbufs for those
7003 * frames which have been transmitted.
7004 */
7005 for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
7006 i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
7007 txs = &txq->txq_soft[i];
7008
7009 DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
7010 device_xname(sc->sc_dev), i));
7011
7012 wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
7013 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
7014
7015 status =
7016 txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
7017 if ((status & WTX_ST_DD) == 0) {
7018 wm_cdtxsync(txq, txs->txs_lastdesc, 1,
7019 BUS_DMASYNC_PREREAD);
7020 break;
7021 }
7022
7023 processed = true;
7024 count++;
7025 DPRINTF(WM_DEBUG_TX,
7026 ("%s: TX: job %d done: descs %d..%d\n",
7027 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
7028 txs->txs_lastdesc));
7029
7030 /*
7031 * XXX We should probably be using the statistics
7032 * XXX registers, but I don't know if they exist
7033 * XXX on chips before the i82544.
7034 */
7035
7036 #ifdef WM_EVENT_COUNTERS
7037 if (status & WTX_ST_TU)
7038 WM_EVCNT_INCR(&sc->sc_ev_tu);
7039 #endif /* WM_EVENT_COUNTERS */
7040
7041 if (status & (WTX_ST_EC | WTX_ST_LC)) {
7042 ifp->if_oerrors++;
7043 if (status & WTX_ST_LC)
7044 log(LOG_WARNING, "%s: late collision\n",
7045 device_xname(sc->sc_dev));
7046 else if (status & WTX_ST_EC) {
7047 ifp->if_collisions += 16;
7048 log(LOG_WARNING, "%s: excessive collisions\n",
7049 device_xname(sc->sc_dev));
7050 }
7051 } else
7052 ifp->if_opackets++;
7053
7054 txq->txq_free += txs->txs_ndesc;
7055 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
7056 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
7057 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
7058 m_freem(txs->txs_mbuf);
7059 txs->txs_mbuf = NULL;
7060 }
7061
7062 /* Update the dirty transmit buffer pointer. */
7063 txq->txq_sdirty = i;
7064 DPRINTF(WM_DEBUG_TX,
7065 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
7066
7067 if (count != 0)
7068 rnd_add_uint32(&sc->rnd_source, count);
7069
7070 /*
7071 * If there are no more pending transmissions, cancel the watchdog
7072 * timer.
7073 */
7074 if (txq->txq_sfree == WM_TXQUEUELEN(txq))
7075 ifp->if_timer = 0;
7076
7077 return processed;
7078 }
7079
7080 /*
7081 * wm_rxeof:
7082 *
7083 * Helper; handle receive interrupts.
7084 */
7085 static void
7086 wm_rxeof(struct wm_rxqueue *rxq)
7087 {
7088 struct wm_softc *sc = rxq->rxq_sc;
7089 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7090 struct wm_rxsoft *rxs;
7091 struct mbuf *m;
7092 int i, len;
7093 int count = 0;
7094 uint8_t status, errors;
7095 uint16_t vlantag;
7096
7097 KASSERT(WM_RX_LOCKED(rxq));
7098
7099 for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
7100 rxs = &rxq->rxq_soft[i];
7101
7102 DPRINTF(WM_DEBUG_RX,
7103 ("%s: RX: checking descriptor %d\n",
7104 device_xname(sc->sc_dev), i));
7105
7106 wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
7107
7108 status = rxq->rxq_descs[i].wrx_status;
7109 errors = rxq->rxq_descs[i].wrx_errors;
7110 len = le16toh(rxq->rxq_descs[i].wrx_len);
7111 vlantag = rxq->rxq_descs[i].wrx_special;
7112
7113 if ((status & WRX_ST_DD) == 0) {
7114 /* We have processed all of the receive descriptors. */
7115 wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
7116 break;
7117 }
7118
7119 count++;
7120 if (__predict_false(rxq->rxq_discard)) {
7121 DPRINTF(WM_DEBUG_RX,
7122 ("%s: RX: discarding contents of descriptor %d\n",
7123 device_xname(sc->sc_dev), i));
7124 wm_init_rxdesc(rxq, i);
7125 if (status & WRX_ST_EOP) {
7126 /* Reset our state. */
7127 DPRINTF(WM_DEBUG_RX,
7128 ("%s: RX: resetting rxdiscard -> 0\n",
7129 device_xname(sc->sc_dev)));
7130 rxq->rxq_discard = 0;
7131 }
7132 continue;
7133 }
7134
7135 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
7136 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
7137
7138 m = rxs->rxs_mbuf;
7139
7140 /*
7141 * Add a new receive buffer to the ring, unless of
7142 * course the length is zero. Treat the latter as a
7143 * failed mapping.
7144 */
7145 if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
7146 /*
7147 * Failed, throw away what we've done so
7148 * far, and discard the rest of the packet.
7149 */
7150 ifp->if_ierrors++;
7151 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
7152 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
7153 wm_init_rxdesc(rxq, i);
7154 if ((status & WRX_ST_EOP) == 0)
7155 rxq->rxq_discard = 1;
7156 if (rxq->rxq_head != NULL)
7157 m_freem(rxq->rxq_head);
7158 WM_RXCHAIN_RESET(rxq);
7159 DPRINTF(WM_DEBUG_RX,
7160 ("%s: RX: Rx buffer allocation failed, "
7161 "dropping packet%s\n", device_xname(sc->sc_dev),
7162 rxq->rxq_discard ? " (discard)" : ""));
7163 continue;
7164 }
7165
7166 m->m_len = len;
7167 rxq->rxq_len += len;
7168 DPRINTF(WM_DEBUG_RX,
7169 ("%s: RX: buffer at %p len %d\n",
7170 device_xname(sc->sc_dev), m->m_data, len));
7171
7172 /* If this is not the end of the packet, keep looking. */
7173 if ((status & WRX_ST_EOP) == 0) {
7174 WM_RXCHAIN_LINK(rxq, m);
7175 DPRINTF(WM_DEBUG_RX,
7176 ("%s: RX: not yet EOP, rxlen -> %d\n",
7177 device_xname(sc->sc_dev), rxq->rxq_len));
7178 continue;
7179 }
7180
7181 /*
7182 * Okay, we have the entire packet now. The chip is
7183 * configured to include the FCS except I350 and I21[01]
7184 * (not all chips can be configured to strip it),
7185 * so we need to trim it.
7186 * May need to adjust length of previous mbuf in the
7187 * chain if the current mbuf is too short.
7188 * For an eratta, the RCTL_SECRC bit in RCTL register
7189 * is always set in I350, so we don't trim it.
7190 */
7191 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
7192 && (sc->sc_type != WM_T_I210)
7193 && (sc->sc_type != WM_T_I211)) {
7194 if (m->m_len < ETHER_CRC_LEN) {
7195 rxq->rxq_tail->m_len
7196 -= (ETHER_CRC_LEN - m->m_len);
7197 m->m_len = 0;
7198 } else
7199 m->m_len -= ETHER_CRC_LEN;
7200 len = rxq->rxq_len - ETHER_CRC_LEN;
7201 } else
7202 len = rxq->rxq_len;
7203
7204 WM_RXCHAIN_LINK(rxq, m);
7205
7206 *rxq->rxq_tailp = NULL;
7207 m = rxq->rxq_head;
7208
7209 WM_RXCHAIN_RESET(rxq);
7210
7211 DPRINTF(WM_DEBUG_RX,
7212 ("%s: RX: have entire packet, len -> %d\n",
7213 device_xname(sc->sc_dev), len));
7214
7215 /* If an error occurred, update stats and drop the packet. */
7216 if (errors &
7217 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
7218 if (errors & WRX_ER_SE)
7219 log(LOG_WARNING, "%s: symbol error\n",
7220 device_xname(sc->sc_dev));
7221 else if (errors & WRX_ER_SEQ)
7222 log(LOG_WARNING, "%s: receive sequence error\n",
7223 device_xname(sc->sc_dev));
7224 else if (errors & WRX_ER_CE)
7225 log(LOG_WARNING, "%s: CRC error\n",
7226 device_xname(sc->sc_dev));
7227 m_freem(m);
7228 continue;
7229 }
7230
7231 /* No errors. Receive the packet. */
7232 m_set_rcvif(m, ifp);
7233 m->m_pkthdr.len = len;
7234
7235 /*
7236 * If VLANs are enabled, VLAN packets have been unwrapped
7237 * for us. Associate the tag with the packet.
7238 */
7239 /* XXXX should check for i350 and i354 */
7240 if ((status & WRX_ST_VP) != 0) {
7241 VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
7242 }
7243
7244 /* Set up checksum info for this packet. */
7245 if ((status & WRX_ST_IXSM) == 0) {
7246 if (status & WRX_ST_IPCS) {
7247 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
7248 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
7249 if (errors & WRX_ER_IPE)
7250 m->m_pkthdr.csum_flags |=
7251 M_CSUM_IPv4_BAD;
7252 }
7253 if (status & WRX_ST_TCPCS) {
7254 /*
7255 * Note: we don't know if this was TCP or UDP,
7256 * so we just set both bits, and expect the
7257 * upper layers to deal.
7258 */
7259 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
7260 m->m_pkthdr.csum_flags |=
7261 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7262 M_CSUM_TCPv6 | M_CSUM_UDPv6;
7263 if (errors & WRX_ER_TCPE)
7264 m->m_pkthdr.csum_flags |=
7265 M_CSUM_TCP_UDP_BAD;
7266 }
7267 }
7268
7269 ifp->if_ipackets++;
7270
7271 WM_RX_UNLOCK(rxq);
7272
7273 /* Pass this up to any BPF listeners. */
7274 bpf_mtap(ifp, m);
7275
7276 /* Pass it on. */
7277 if_percpuq_enqueue(sc->sc_ipq, m);
7278
7279 WM_RX_LOCK(rxq);
7280
7281 if (sc->sc_stopping)
7282 break;
7283 }
7284
7285 /* Update the receive pointer. */
7286 rxq->rxq_ptr = i;
7287 if (count != 0)
7288 rnd_add_uint32(&sc->rnd_source, count);
7289
7290 DPRINTF(WM_DEBUG_RX,
7291 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
7292 }
7293
7294 /*
7295 * wm_linkintr_gmii:
7296 *
7297 * Helper; handle link interrupts for GMII.
7298 */
7299 static void
7300 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
7301 {
7302
7303 KASSERT(WM_CORE_LOCKED(sc));
7304
7305 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7306 __func__));
7307
7308 if (icr & ICR_LSC) {
7309 uint32_t status = CSR_READ(sc, WMREG_STATUS);
7310
7311 if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
7312 wm_gig_downshift_workaround_ich8lan(sc);
7313
7314 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
7315 device_xname(sc->sc_dev)));
7316 mii_pollstat(&sc->sc_mii);
7317 if (sc->sc_type == WM_T_82543) {
7318 int miistatus, active;
7319
7320 /*
7321 * With 82543, we need to force speed and
7322 * duplex on the MAC equal to what the PHY
7323 * speed and duplex configuration is.
7324 */
7325 miistatus = sc->sc_mii.mii_media_status;
7326
7327 if (miistatus & IFM_ACTIVE) {
7328 active = sc->sc_mii.mii_media_active;
7329 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
7330 switch (IFM_SUBTYPE(active)) {
7331 case IFM_10_T:
7332 sc->sc_ctrl |= CTRL_SPEED_10;
7333 break;
7334 case IFM_100_TX:
7335 sc->sc_ctrl |= CTRL_SPEED_100;
7336 break;
7337 case IFM_1000_T:
7338 sc->sc_ctrl |= CTRL_SPEED_1000;
7339 break;
7340 default:
7341 /*
7342 * fiber?
7343 * Shoud not enter here.
7344 */
7345 printf("unknown media (%x)\n", active);
7346 break;
7347 }
7348 if (active & IFM_FDX)
7349 sc->sc_ctrl |= CTRL_FD;
7350 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7351 }
7352 } else if ((sc->sc_type == WM_T_ICH8)
7353 && (sc->sc_phytype == WMPHY_IGP_3)) {
7354 wm_kmrn_lock_loss_workaround_ich8lan(sc);
7355 } else if (sc->sc_type == WM_T_PCH) {
7356 wm_k1_gig_workaround_hv(sc,
7357 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
7358 }
7359
7360 if ((sc->sc_phytype == WMPHY_82578)
7361 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
7362 == IFM_1000_T)) {
7363
7364 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
7365 delay(200*1000); /* XXX too big */
7366
7367 /* Link stall fix for link up */
7368 wm_gmii_hv_writereg(sc->sc_dev, 1,
7369 HV_MUX_DATA_CTRL,
7370 HV_MUX_DATA_CTRL_GEN_TO_MAC
7371 | HV_MUX_DATA_CTRL_FORCE_SPEED);
7372 wm_gmii_hv_writereg(sc->sc_dev, 1,
7373 HV_MUX_DATA_CTRL,
7374 HV_MUX_DATA_CTRL_GEN_TO_MAC);
7375 }
7376 }
7377 } else if (icr & ICR_RXSEQ) {
7378 DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
7379 device_xname(sc->sc_dev)));
7380 }
7381 }
7382
7383 /*
7384 * wm_linkintr_tbi:
7385 *
7386 * Helper; handle link interrupts for TBI mode.
7387 */
7388 static void
7389 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
7390 {
7391 uint32_t status;
7392
7393 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7394 __func__));
7395
7396 status = CSR_READ(sc, WMREG_STATUS);
7397 if (icr & ICR_LSC) {
7398 if (status & STATUS_LU) {
7399 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
7400 device_xname(sc->sc_dev),
7401 (status & STATUS_FD) ? "FDX" : "HDX"));
7402 /*
7403 * NOTE: CTRL will update TFCE and RFCE automatically,
7404 * so we should update sc->sc_ctrl
7405 */
7406
7407 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7408 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7409 sc->sc_fcrtl &= ~FCRTL_XONE;
7410 if (status & STATUS_FD)
7411 sc->sc_tctl |=
7412 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7413 else
7414 sc->sc_tctl |=
7415 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7416 if (sc->sc_ctrl & CTRL_TFCE)
7417 sc->sc_fcrtl |= FCRTL_XONE;
7418 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7419 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7420 WMREG_OLD_FCRTL : WMREG_FCRTL,
7421 sc->sc_fcrtl);
7422 sc->sc_tbi_linkup = 1;
7423 } else {
7424 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
7425 device_xname(sc->sc_dev)));
7426 sc->sc_tbi_linkup = 0;
7427 }
7428 /* Update LED */
7429 wm_tbi_serdes_set_linkled(sc);
7430 } else if (icr & ICR_RXSEQ) {
7431 DPRINTF(WM_DEBUG_LINK,
7432 ("%s: LINK: Receive sequence error\n",
7433 device_xname(sc->sc_dev)));
7434 }
7435 }
7436
7437 /*
7438 * wm_linkintr_serdes:
7439 *
7440 * Helper; handle link interrupts for TBI mode.
7441 */
7442 static void
7443 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
7444 {
7445 struct mii_data *mii = &sc->sc_mii;
7446 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7447 uint32_t pcs_adv, pcs_lpab, reg;
7448
7449 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7450 __func__));
7451
7452 if (icr & ICR_LSC) {
7453 /* Check PCS */
7454 reg = CSR_READ(sc, WMREG_PCS_LSTS);
7455 if ((reg & PCS_LSTS_LINKOK) != 0) {
7456 mii->mii_media_status |= IFM_ACTIVE;
7457 sc->sc_tbi_linkup = 1;
7458 } else {
7459 mii->mii_media_status |= IFM_NONE;
7460 sc->sc_tbi_linkup = 0;
7461 wm_tbi_serdes_set_linkled(sc);
7462 return;
7463 }
7464 mii->mii_media_active |= IFM_1000_SX;
7465 if ((reg & PCS_LSTS_FDX) != 0)
7466 mii->mii_media_active |= IFM_FDX;
7467 else
7468 mii->mii_media_active |= IFM_HDX;
7469 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7470 /* Check flow */
7471 reg = CSR_READ(sc, WMREG_PCS_LSTS);
7472 if ((reg & PCS_LSTS_AN_COMP) == 0) {
7473 DPRINTF(WM_DEBUG_LINK,
7474 ("XXX LINKOK but not ACOMP\n"));
7475 return;
7476 }
7477 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
7478 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
7479 DPRINTF(WM_DEBUG_LINK,
7480 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
7481 if ((pcs_adv & TXCW_SYM_PAUSE)
7482 && (pcs_lpab & TXCW_SYM_PAUSE)) {
7483 mii->mii_media_active |= IFM_FLOW
7484 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
7485 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
7486 && (pcs_adv & TXCW_ASYM_PAUSE)
7487 && (pcs_lpab & TXCW_SYM_PAUSE)
7488 && (pcs_lpab & TXCW_ASYM_PAUSE))
7489 mii->mii_media_active |= IFM_FLOW
7490 | IFM_ETH_TXPAUSE;
7491 else if ((pcs_adv & TXCW_SYM_PAUSE)
7492 && (pcs_adv & TXCW_ASYM_PAUSE)
7493 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
7494 && (pcs_lpab & TXCW_ASYM_PAUSE))
7495 mii->mii_media_active |= IFM_FLOW
7496 | IFM_ETH_RXPAUSE;
7497 }
7498 /* Update LED */
7499 wm_tbi_serdes_set_linkled(sc);
7500 } else {
7501 DPRINTF(WM_DEBUG_LINK,
7502 ("%s: LINK: Receive sequence error\n",
7503 device_xname(sc->sc_dev)));
7504 }
7505 }
7506
7507 /*
7508 * wm_linkintr:
7509 *
7510 * Helper; handle link interrupts.
7511 */
7512 static void
7513 wm_linkintr(struct wm_softc *sc, uint32_t icr)
7514 {
7515
7516 KASSERT(WM_CORE_LOCKED(sc));
7517
7518 if (sc->sc_flags & WM_F_HAS_MII)
7519 wm_linkintr_gmii(sc, icr);
7520 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7521 && (sc->sc_type >= WM_T_82575))
7522 wm_linkintr_serdes(sc, icr);
7523 else
7524 wm_linkintr_tbi(sc, icr);
7525 }
7526
7527 /*
7528 * wm_intr_legacy:
7529 *
7530 * Interrupt service routine for INTx and MSI.
7531 */
7532 static int
7533 wm_intr_legacy(void *arg)
7534 {
7535 struct wm_softc *sc = arg;
7536 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7537 struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
7538 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7539 uint32_t icr, rndval = 0;
7540 int handled = 0;
7541
7542 DPRINTF(WM_DEBUG_TX,
7543 ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
7544 while (1 /* CONSTCOND */) {
7545 icr = CSR_READ(sc, WMREG_ICR);
7546 if ((icr & sc->sc_icr) == 0)
7547 break;
7548 if (rndval == 0)
7549 rndval = icr;
7550
7551 WM_RX_LOCK(rxq);
7552
7553 if (sc->sc_stopping) {
7554 WM_RX_UNLOCK(rxq);
7555 break;
7556 }
7557
7558 handled = 1;
7559
7560 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7561 if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
7562 DPRINTF(WM_DEBUG_RX,
7563 ("%s: RX: got Rx intr 0x%08x\n",
7564 device_xname(sc->sc_dev),
7565 icr & (ICR_RXDMT0 | ICR_RXT0)));
7566 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
7567 }
7568 #endif
7569 wm_rxeof(rxq);
7570
7571 WM_RX_UNLOCK(rxq);
7572 WM_TX_LOCK(txq);
7573
7574 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7575 if (icr & ICR_TXDW) {
7576 DPRINTF(WM_DEBUG_TX,
7577 ("%s: TX: got TXDW interrupt\n",
7578 device_xname(sc->sc_dev)));
7579 WM_EVCNT_INCR(&sc->sc_ev_txdw);
7580 }
7581 #endif
7582 wm_txeof(sc, txq);
7583
7584 WM_TX_UNLOCK(txq);
7585 WM_CORE_LOCK(sc);
7586
7587 if (icr & (ICR_LSC | ICR_RXSEQ)) {
7588 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7589 wm_linkintr(sc, icr);
7590 }
7591
7592 WM_CORE_UNLOCK(sc);
7593
7594 if (icr & ICR_RXO) {
7595 #if defined(WM_DEBUG)
7596 log(LOG_WARNING, "%s: Receive overrun\n",
7597 device_xname(sc->sc_dev));
7598 #endif /* defined(WM_DEBUG) */
7599 }
7600 }
7601
7602 rnd_add_uint32(&sc->rnd_source, rndval);
7603
7604 if (handled) {
7605 /* Try to get more packets going. */
7606 ifp->if_start(ifp);
7607 }
7608
7609 return handled;
7610 }
7611
7612 static int
7613 wm_txrxintr_msix(void *arg)
7614 {
7615 struct wm_queue *wmq = arg;
7616 struct wm_txqueue *txq = &wmq->wmq_txq;
7617 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
7618 struct wm_softc *sc = txq->txq_sc;
7619 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7620
7621 KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
7622
7623 DPRINTF(WM_DEBUG_TX,
7624 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
7625
7626 if (sc->sc_type == WM_T_82574)
7627 CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
7628 else if (sc->sc_type == WM_T_82575)
7629 CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
7630 else
7631 CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
7632
7633 if (!sc->sc_stopping) {
7634 WM_TX_LOCK(txq);
7635
7636 WM_EVCNT_INCR(&sc->sc_ev_txdw);
7637 wm_txeof(sc, txq);
7638
7639 /* Try to get more packets going. */
7640 if (pcq_peek(txq->txq_interq) != NULL)
7641 wm_nq_transmit_locked(ifp, txq);
7642 /*
7643 * There are still some upper layer processing which call
7644 * ifp->if_start(). e.g. ALTQ
7645 */
7646 if (wmq->wmq_id == 0) {
7647 if (!IFQ_IS_EMPTY(&ifp->if_snd))
7648 wm_nq_start_locked(ifp);
7649 }
7650 WM_TX_UNLOCK(txq);
7651 }
7652
7653 DPRINTF(WM_DEBUG_RX,
7654 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
7655
7656 if (!sc->sc_stopping) {
7657 WM_RX_LOCK(rxq);
7658 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
7659 wm_rxeof(rxq);
7660 WM_RX_UNLOCK(rxq);
7661 }
7662
7663 if (sc->sc_type == WM_T_82574)
7664 CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
7665 else if (sc->sc_type == WM_T_82575)
7666 CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
7667 else
7668 CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
7669
7670 return 1;
7671 }
7672
7673 /*
7674 * wm_linkintr_msix:
7675 *
7676 * Interrupt service routine for link status change for MSI-X.
7677 */
7678 static int
7679 wm_linkintr_msix(void *arg)
7680 {
7681 struct wm_softc *sc = arg;
7682 uint32_t reg;
7683
7684 DPRINTF(WM_DEBUG_LINK,
7685 ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
7686
7687 reg = CSR_READ(sc, WMREG_ICR);
7688 WM_CORE_LOCK(sc);
7689 if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0))
7690 goto out;
7691
7692 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7693 wm_linkintr(sc, ICR_LSC);
7694
7695 out:
7696 WM_CORE_UNLOCK(sc);
7697
7698 if (sc->sc_type == WM_T_82574)
7699 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
7700 else if (sc->sc_type == WM_T_82575)
7701 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
7702 else
7703 CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
7704
7705 return 1;
7706 }
7707
7708 /*
7709 * Media related.
7710 * GMII, SGMII, TBI (and SERDES)
7711 */
7712
7713 /* Common */
7714
7715 /*
7716 * wm_tbi_serdes_set_linkled:
7717 *
7718 * Update the link LED on TBI and SERDES devices.
7719 */
7720 static void
7721 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
7722 {
7723
7724 if (sc->sc_tbi_linkup)
7725 sc->sc_ctrl |= CTRL_SWDPIN(0);
7726 else
7727 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7728
7729 /* 82540 or newer devices are active low */
7730 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7731
7732 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7733 }
7734
7735 /* GMII related */
7736
7737 /*
7738 * wm_gmii_reset:
7739 *
7740 * Reset the PHY.
7741 */
7742 static void
7743 wm_gmii_reset(struct wm_softc *sc)
7744 {
7745 uint32_t reg;
7746 int rv;
7747
7748 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
7749 device_xname(sc->sc_dev), __func__));
7750 /* get phy semaphore */
7751 switch (sc->sc_type) {
7752 case WM_T_82571:
7753 case WM_T_82572:
7754 case WM_T_82573:
7755 case WM_T_82574:
7756 case WM_T_82583:
7757 /* XXX should get sw semaphore, too */
7758 rv = wm_get_swsm_semaphore(sc);
7759 break;
7760 case WM_T_82575:
7761 case WM_T_82576:
7762 case WM_T_82580:
7763 case WM_T_I350:
7764 case WM_T_I354:
7765 case WM_T_I210:
7766 case WM_T_I211:
7767 case WM_T_80003:
7768 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7769 break;
7770 case WM_T_ICH8:
7771 case WM_T_ICH9:
7772 case WM_T_ICH10:
7773 case WM_T_PCH:
7774 case WM_T_PCH2:
7775 case WM_T_PCH_LPT:
7776 case WM_T_PCH_SPT:
7777 rv = wm_get_swfwhw_semaphore(sc);
7778 break;
7779 default:
7780 /* nothing to do*/
7781 rv = 0;
7782 break;
7783 }
7784 if (rv != 0) {
7785 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7786 __func__);
7787 return;
7788 }
7789
7790 switch (sc->sc_type) {
7791 case WM_T_82542_2_0:
7792 case WM_T_82542_2_1:
7793 /* null */
7794 break;
7795 case WM_T_82543:
7796 /*
7797 * With 82543, we need to force speed and duplex on the MAC
7798 * equal to what the PHY speed and duplex configuration is.
7799 * In addition, we need to perform a hardware reset on the PHY
7800 * to take it out of reset.
7801 */
7802 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
7803 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7804
7805 /* The PHY reset pin is active-low. */
7806 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7807 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
7808 CTRL_EXT_SWDPIN(4));
7809 reg |= CTRL_EXT_SWDPIO(4);
7810
7811 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7812 CSR_WRITE_FLUSH(sc);
7813 delay(10*1000);
7814
7815 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
7816 CSR_WRITE_FLUSH(sc);
7817 delay(150);
7818 #if 0
7819 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
7820 #endif
7821 delay(20*1000); /* XXX extra delay to get PHY ID? */
7822 break;
7823 case WM_T_82544: /* reset 10000us */
7824 case WM_T_82540:
7825 case WM_T_82545:
7826 case WM_T_82545_3:
7827 case WM_T_82546:
7828 case WM_T_82546_3:
7829 case WM_T_82541:
7830 case WM_T_82541_2:
7831 case WM_T_82547:
7832 case WM_T_82547_2:
7833 case WM_T_82571: /* reset 100us */
7834 case WM_T_82572:
7835 case WM_T_82573:
7836 case WM_T_82574:
7837 case WM_T_82575:
7838 case WM_T_82576:
7839 case WM_T_82580:
7840 case WM_T_I350:
7841 case WM_T_I354:
7842 case WM_T_I210:
7843 case WM_T_I211:
7844 case WM_T_82583:
7845 case WM_T_80003:
7846 /* generic reset */
7847 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7848 CSR_WRITE_FLUSH(sc);
7849 delay(20000);
7850 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7851 CSR_WRITE_FLUSH(sc);
7852 delay(20000);
7853
7854 if ((sc->sc_type == WM_T_82541)
7855 || (sc->sc_type == WM_T_82541_2)
7856 || (sc->sc_type == WM_T_82547)
7857 || (sc->sc_type == WM_T_82547_2)) {
7858 /* workaround for igp are done in igp_reset() */
7859 /* XXX add code to set LED after phy reset */
7860 }
7861 break;
7862 case WM_T_ICH8:
7863 case WM_T_ICH9:
7864 case WM_T_ICH10:
7865 case WM_T_PCH:
7866 case WM_T_PCH2:
7867 case WM_T_PCH_LPT:
7868 case WM_T_PCH_SPT:
7869 /* generic reset */
7870 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7871 CSR_WRITE_FLUSH(sc);
7872 delay(100);
7873 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7874 CSR_WRITE_FLUSH(sc);
7875 delay(150);
7876 break;
7877 default:
7878 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
7879 __func__);
7880 break;
7881 }
7882
7883 /* release PHY semaphore */
7884 switch (sc->sc_type) {
7885 case WM_T_82571:
7886 case WM_T_82572:
7887 case WM_T_82573:
7888 case WM_T_82574:
7889 case WM_T_82583:
7890 /* XXX should put sw semaphore, too */
7891 wm_put_swsm_semaphore(sc);
7892 break;
7893 case WM_T_82575:
7894 case WM_T_82576:
7895 case WM_T_82580:
7896 case WM_T_I350:
7897 case WM_T_I354:
7898 case WM_T_I210:
7899 case WM_T_I211:
7900 case WM_T_80003:
7901 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7902 break;
7903 case WM_T_ICH8:
7904 case WM_T_ICH9:
7905 case WM_T_ICH10:
7906 case WM_T_PCH:
7907 case WM_T_PCH2:
7908 case WM_T_PCH_LPT:
7909 case WM_T_PCH_SPT:
7910 wm_put_swfwhw_semaphore(sc);
7911 break;
7912 default:
7913 /* nothing to do */
7914 rv = 0;
7915 break;
7916 }
7917
7918 /* get_cfg_done */
7919 wm_get_cfg_done(sc);
7920
7921 /* extra setup */
7922 switch (sc->sc_type) {
7923 case WM_T_82542_2_0:
7924 case WM_T_82542_2_1:
7925 case WM_T_82543:
7926 case WM_T_82544:
7927 case WM_T_82540:
7928 case WM_T_82545:
7929 case WM_T_82545_3:
7930 case WM_T_82546:
7931 case WM_T_82546_3:
7932 case WM_T_82541_2:
7933 case WM_T_82547_2:
7934 case WM_T_82571:
7935 case WM_T_82572:
7936 case WM_T_82573:
7937 case WM_T_82575:
7938 case WM_T_82576:
7939 case WM_T_82580:
7940 case WM_T_I350:
7941 case WM_T_I354:
7942 case WM_T_I210:
7943 case WM_T_I211:
7944 case WM_T_80003:
7945 /* null */
7946 break;
7947 case WM_T_82574:
7948 case WM_T_82583:
7949 wm_lplu_d0_disable(sc);
7950 break;
7951 case WM_T_82541:
7952 case WM_T_82547:
7953 /* XXX Configure actively LED after PHY reset */
7954 break;
7955 case WM_T_ICH8:
7956 case WM_T_ICH9:
7957 case WM_T_ICH10:
7958 case WM_T_PCH:
7959 case WM_T_PCH2:
7960 case WM_T_PCH_LPT:
7961 case WM_T_PCH_SPT:
7962 /* Allow time for h/w to get to a quiescent state afer reset */
7963 delay(10*1000);
7964
7965 if (sc->sc_type == WM_T_PCH)
7966 wm_hv_phy_workaround_ich8lan(sc);
7967
7968 if (sc->sc_type == WM_T_PCH2)
7969 wm_lv_phy_workaround_ich8lan(sc);
7970
7971 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
7972 /*
7973 * dummy read to clear the phy wakeup bit after lcd
7974 * reset
7975 */
7976 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
7977 }
7978
7979 /*
7980 * XXX Configure the LCD with th extended configuration region
7981 * in NVM
7982 */
7983
7984 /* Disable D0 LPLU. */
7985 if (sc->sc_type >= WM_T_PCH) /* PCH* */
7986 wm_lplu_d0_disable_pch(sc);
7987 else
7988 wm_lplu_d0_disable(sc); /* ICH* */
7989 break;
7990 default:
7991 panic("%s: unknown type\n", __func__);
7992 break;
7993 }
7994 }
7995
7996 /*
7997 * wm_get_phy_id_82575:
7998 *
7999 * Return PHY ID. Return -1 if it failed.
8000 */
8001 static int
8002 wm_get_phy_id_82575(struct wm_softc *sc)
8003 {
8004 uint32_t reg;
8005 int phyid = -1;
8006
8007 /* XXX */
8008 if ((sc->sc_flags & WM_F_SGMII) == 0)
8009 return -1;
8010
8011 if (wm_sgmii_uses_mdio(sc)) {
8012 switch (sc->sc_type) {
8013 case WM_T_82575:
8014 case WM_T_82576:
8015 reg = CSR_READ(sc, WMREG_MDIC);
8016 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
8017 break;
8018 case WM_T_82580:
8019 case WM_T_I350:
8020 case WM_T_I354:
8021 case WM_T_I210:
8022 case WM_T_I211:
8023 reg = CSR_READ(sc, WMREG_MDICNFG);
8024 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
8025 break;
8026 default:
8027 return -1;
8028 }
8029 }
8030
8031 return phyid;
8032 }
8033
8034
8035 /*
8036 * wm_gmii_mediainit:
8037 *
8038 * Initialize media for use on 1000BASE-T devices.
8039 */
8040 static void
8041 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
8042 {
8043 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8044 struct mii_data *mii = &sc->sc_mii;
8045 uint32_t reg;
8046
8047 /* We have GMII. */
8048 sc->sc_flags |= WM_F_HAS_MII;
8049
8050 if (sc->sc_type == WM_T_80003)
8051 sc->sc_tipg = TIPG_1000T_80003_DFLT;
8052 else
8053 sc->sc_tipg = TIPG_1000T_DFLT;
8054
8055 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
8056 if ((sc->sc_type == WM_T_82580)
8057 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
8058 || (sc->sc_type == WM_T_I211)) {
8059 reg = CSR_READ(sc, WMREG_PHPM);
8060 reg &= ~PHPM_GO_LINK_D;
8061 CSR_WRITE(sc, WMREG_PHPM, reg);
8062 }
8063
8064 /*
8065 * Let the chip set speed/duplex on its own based on
8066 * signals from the PHY.
8067 * XXXbouyer - I'm not sure this is right for the 80003,
8068 * the em driver only sets CTRL_SLU here - but it seems to work.
8069 */
8070 sc->sc_ctrl |= CTRL_SLU;
8071 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8072
8073 /* Initialize our media structures and probe the GMII. */
8074 mii->mii_ifp = ifp;
8075
8076 /*
8077 * Determine the PHY access method.
8078 *
8079 * For SGMII, use SGMII specific method.
8080 *
8081 * For some devices, we can determine the PHY access method
8082 * from sc_type.
8083 *
8084 * For ICH and PCH variants, it's difficult to determine the PHY
8085 * access method by sc_type, so use the PCI product ID for some
8086 * devices.
8087 * For other ICH8 variants, try to use igp's method. If the PHY
8088 * can't detect, then use bm's method.
8089 */
8090 switch (prodid) {
8091 case PCI_PRODUCT_INTEL_PCH_M_LM:
8092 case PCI_PRODUCT_INTEL_PCH_M_LC:
8093 /* 82577 */
8094 sc->sc_phytype = WMPHY_82577;
8095 break;
8096 case PCI_PRODUCT_INTEL_PCH_D_DM:
8097 case PCI_PRODUCT_INTEL_PCH_D_DC:
8098 /* 82578 */
8099 sc->sc_phytype = WMPHY_82578;
8100 break;
8101 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
8102 case PCI_PRODUCT_INTEL_PCH2_LV_V:
8103 /* 82579 */
8104 sc->sc_phytype = WMPHY_82579;
8105 break;
8106 case PCI_PRODUCT_INTEL_82801I_BM:
8107 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
8108 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
8109 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
8110 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
8111 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
8112 /* 82567 */
8113 sc->sc_phytype = WMPHY_BM;
8114 mii->mii_readreg = wm_gmii_bm_readreg;
8115 mii->mii_writereg = wm_gmii_bm_writereg;
8116 break;
8117 default:
8118 if (((sc->sc_flags & WM_F_SGMII) != 0)
8119 && !wm_sgmii_uses_mdio(sc)){
8120 /* SGMII */
8121 mii->mii_readreg = wm_sgmii_readreg;
8122 mii->mii_writereg = wm_sgmii_writereg;
8123 } else if (sc->sc_type >= WM_T_80003) {
8124 /* 80003 */
8125 mii->mii_readreg = wm_gmii_i80003_readreg;
8126 mii->mii_writereg = wm_gmii_i80003_writereg;
8127 } else if (sc->sc_type >= WM_T_I210) {
8128 /* I210 and I211 */
8129 mii->mii_readreg = wm_gmii_gs40g_readreg;
8130 mii->mii_writereg = wm_gmii_gs40g_writereg;
8131 } else if (sc->sc_type >= WM_T_82580) {
8132 /* 82580, I350 and I354 */
8133 sc->sc_phytype = WMPHY_82580;
8134 mii->mii_readreg = wm_gmii_82580_readreg;
8135 mii->mii_writereg = wm_gmii_82580_writereg;
8136 } else if (sc->sc_type >= WM_T_82544) {
8137 /* 82544, 0, [56], [17], 8257[1234] and 82583 */
8138 mii->mii_readreg = wm_gmii_i82544_readreg;
8139 mii->mii_writereg = wm_gmii_i82544_writereg;
8140 } else {
8141 mii->mii_readreg = wm_gmii_i82543_readreg;
8142 mii->mii_writereg = wm_gmii_i82543_writereg;
8143 }
8144 break;
8145 }
8146 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
8147 /* All PCH* use _hv_ */
8148 mii->mii_readreg = wm_gmii_hv_readreg;
8149 mii->mii_writereg = wm_gmii_hv_writereg;
8150 }
8151 mii->mii_statchg = wm_gmii_statchg;
8152
8153 wm_gmii_reset(sc);
8154
8155 sc->sc_ethercom.ec_mii = &sc->sc_mii;
8156 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
8157 wm_gmii_mediastatus);
8158
8159 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
8160 || (sc->sc_type == WM_T_82580)
8161 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
8162 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
8163 if ((sc->sc_flags & WM_F_SGMII) == 0) {
8164 /* Attach only one port */
8165 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
8166 MII_OFFSET_ANY, MIIF_DOPAUSE);
8167 } else {
8168 int i, id;
8169 uint32_t ctrl_ext;
8170
8171 id = wm_get_phy_id_82575(sc);
8172 if (id != -1) {
8173 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
8174 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
8175 }
8176 if ((id == -1)
8177 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
8178 /* Power on sgmii phy if it is disabled */
8179 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8180 CSR_WRITE(sc, WMREG_CTRL_EXT,
8181 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
8182 CSR_WRITE_FLUSH(sc);
8183 delay(300*1000); /* XXX too long */
8184
8185 /* from 1 to 8 */
8186 for (i = 1; i < 8; i++)
8187 mii_attach(sc->sc_dev, &sc->sc_mii,
8188 0xffffffff, i, MII_OFFSET_ANY,
8189 MIIF_DOPAUSE);
8190
8191 /* restore previous sfp cage power state */
8192 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8193 }
8194 }
8195 } else {
8196 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8197 MII_OFFSET_ANY, MIIF_DOPAUSE);
8198 }
8199
8200 /*
8201 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
8202 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
8203 */
8204 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
8205 (LIST_FIRST(&mii->mii_phys) == NULL)) {
8206 wm_set_mdio_slow_mode_hv(sc);
8207 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8208 MII_OFFSET_ANY, MIIF_DOPAUSE);
8209 }
8210
8211 /*
8212 * (For ICH8 variants)
8213 * If PHY detection failed, use BM's r/w function and retry.
8214 */
8215 if (LIST_FIRST(&mii->mii_phys) == NULL) {
8216 /* if failed, retry with *_bm_* */
8217 mii->mii_readreg = wm_gmii_bm_readreg;
8218 mii->mii_writereg = wm_gmii_bm_writereg;
8219
8220 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8221 MII_OFFSET_ANY, MIIF_DOPAUSE);
8222 }
8223
8224 if (LIST_FIRST(&mii->mii_phys) == NULL) {
8225 /* Any PHY wasn't find */
8226 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
8227 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
8228 sc->sc_phytype = WMPHY_NONE;
8229 } else {
8230 /*
8231 * PHY Found!
8232 * Check PHY type.
8233 */
8234 uint32_t model;
8235 struct mii_softc *child;
8236
8237 child = LIST_FIRST(&mii->mii_phys);
8238 model = child->mii_mpd_model;
8239 if (model == MII_MODEL_yyINTEL_I82566)
8240 sc->sc_phytype = WMPHY_IGP_3;
8241
8242 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
8243 }
8244 }
8245
8246 /*
8247 * wm_gmii_mediachange: [ifmedia interface function]
8248 *
8249 * Set hardware to newly-selected media on a 1000BASE-T device.
8250 */
8251 static int
8252 wm_gmii_mediachange(struct ifnet *ifp)
8253 {
8254 struct wm_softc *sc = ifp->if_softc;
8255 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8256 int rc;
8257
8258 if ((ifp->if_flags & IFF_UP) == 0)
8259 return 0;
8260
8261 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
8262 sc->sc_ctrl |= CTRL_SLU;
8263 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8264 || (sc->sc_type > WM_T_82543)) {
8265 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
8266 } else {
8267 sc->sc_ctrl &= ~CTRL_ASDE;
8268 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
8269 if (ife->ifm_media & IFM_FDX)
8270 sc->sc_ctrl |= CTRL_FD;
8271 switch (IFM_SUBTYPE(ife->ifm_media)) {
8272 case IFM_10_T:
8273 sc->sc_ctrl |= CTRL_SPEED_10;
8274 break;
8275 case IFM_100_TX:
8276 sc->sc_ctrl |= CTRL_SPEED_100;
8277 break;
8278 case IFM_1000_T:
8279 sc->sc_ctrl |= CTRL_SPEED_1000;
8280 break;
8281 default:
8282 panic("wm_gmii_mediachange: bad media 0x%x",
8283 ife->ifm_media);
8284 }
8285 }
8286 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8287 if (sc->sc_type <= WM_T_82543)
8288 wm_gmii_reset(sc);
8289
8290 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
8291 return 0;
8292 return rc;
8293 }
8294
8295 /*
8296 * wm_gmii_mediastatus: [ifmedia interface function]
8297 *
8298 * Get the current interface media status on a 1000BASE-T device.
8299 */
8300 static void
8301 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8302 {
8303 struct wm_softc *sc = ifp->if_softc;
8304
8305 ether_mediastatus(ifp, ifmr);
8306 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
8307 | sc->sc_flowflags;
8308 }
8309
8310 #define MDI_IO CTRL_SWDPIN(2)
8311 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
8312 #define MDI_CLK CTRL_SWDPIN(3)
8313
8314 static void
8315 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
8316 {
8317 uint32_t i, v;
8318
8319 v = CSR_READ(sc, WMREG_CTRL);
8320 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8321 v |= MDI_DIR | CTRL_SWDPIO(3);
8322
8323 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
8324 if (data & i)
8325 v |= MDI_IO;
8326 else
8327 v &= ~MDI_IO;
8328 CSR_WRITE(sc, WMREG_CTRL, v);
8329 CSR_WRITE_FLUSH(sc);
8330 delay(10);
8331 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8332 CSR_WRITE_FLUSH(sc);
8333 delay(10);
8334 CSR_WRITE(sc, WMREG_CTRL, v);
8335 CSR_WRITE_FLUSH(sc);
8336 delay(10);
8337 }
8338 }
8339
8340 static uint32_t
8341 wm_i82543_mii_recvbits(struct wm_softc *sc)
8342 {
8343 uint32_t v, i, data = 0;
8344
8345 v = CSR_READ(sc, WMREG_CTRL);
8346 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8347 v |= CTRL_SWDPIO(3);
8348
8349 CSR_WRITE(sc, WMREG_CTRL, v);
8350 CSR_WRITE_FLUSH(sc);
8351 delay(10);
8352 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8353 CSR_WRITE_FLUSH(sc);
8354 delay(10);
8355 CSR_WRITE(sc, WMREG_CTRL, v);
8356 CSR_WRITE_FLUSH(sc);
8357 delay(10);
8358
8359 for (i = 0; i < 16; i++) {
8360 data <<= 1;
8361 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8362 CSR_WRITE_FLUSH(sc);
8363 delay(10);
8364 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
8365 data |= 1;
8366 CSR_WRITE(sc, WMREG_CTRL, v);
8367 CSR_WRITE_FLUSH(sc);
8368 delay(10);
8369 }
8370
8371 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8372 CSR_WRITE_FLUSH(sc);
8373 delay(10);
8374 CSR_WRITE(sc, WMREG_CTRL, v);
8375 CSR_WRITE_FLUSH(sc);
8376 delay(10);
8377
8378 return data;
8379 }
8380
8381 #undef MDI_IO
8382 #undef MDI_DIR
8383 #undef MDI_CLK
8384
8385 /*
8386 * wm_gmii_i82543_readreg: [mii interface function]
8387 *
8388 * Read a PHY register on the GMII (i82543 version).
8389 */
8390 static int
8391 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
8392 {
8393 struct wm_softc *sc = device_private(self);
8394 int rv;
8395
8396 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8397 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
8398 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
8399 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
8400
8401 DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
8402 device_xname(sc->sc_dev), phy, reg, rv));
8403
8404 return rv;
8405 }
8406
8407 /*
8408 * wm_gmii_i82543_writereg: [mii interface function]
8409 *
8410 * Write a PHY register on the GMII (i82543 version).
8411 */
8412 static void
8413 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
8414 {
8415 struct wm_softc *sc = device_private(self);
8416
8417 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8418 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
8419 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
8420 (MII_COMMAND_START << 30), 32);
8421 }
8422
8423 /*
8424 * wm_gmii_i82544_readreg: [mii interface function]
8425 *
8426 * Read a PHY register on the GMII.
8427 */
8428 static int
8429 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
8430 {
8431 struct wm_softc *sc = device_private(self);
8432 uint32_t mdic = 0;
8433 int i, rv;
8434
8435 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
8436 MDIC_REGADD(reg));
8437
8438 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8439 mdic = CSR_READ(sc, WMREG_MDIC);
8440 if (mdic & MDIC_READY)
8441 break;
8442 delay(50);
8443 }
8444
8445 if ((mdic & MDIC_READY) == 0) {
8446 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
8447 device_xname(sc->sc_dev), phy, reg);
8448 rv = 0;
8449 } else if (mdic & MDIC_E) {
8450 #if 0 /* This is normal if no PHY is present. */
8451 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
8452 device_xname(sc->sc_dev), phy, reg);
8453 #endif
8454 rv = 0;
8455 } else {
8456 rv = MDIC_DATA(mdic);
8457 if (rv == 0xffff)
8458 rv = 0;
8459 }
8460
8461 return rv;
8462 }
8463
8464 /*
8465 * wm_gmii_i82544_writereg: [mii interface function]
8466 *
8467 * Write a PHY register on the GMII.
8468 */
8469 static void
8470 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
8471 {
8472 struct wm_softc *sc = device_private(self);
8473 uint32_t mdic = 0;
8474 int i;
8475
8476 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
8477 MDIC_REGADD(reg) | MDIC_DATA(val));
8478
8479 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8480 mdic = CSR_READ(sc, WMREG_MDIC);
8481 if (mdic & MDIC_READY)
8482 break;
8483 delay(50);
8484 }
8485
8486 if ((mdic & MDIC_READY) == 0)
8487 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
8488 device_xname(sc->sc_dev), phy, reg);
8489 else if (mdic & MDIC_E)
8490 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
8491 device_xname(sc->sc_dev), phy, reg);
8492 }
8493
8494 /*
8495 * wm_gmii_i80003_readreg: [mii interface function]
8496 *
8497 * Read a PHY register on the kumeran
8498 * This could be handled by the PHY layer if we didn't have to lock the
8499 * ressource ...
8500 */
8501 static int
8502 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
8503 {
8504 struct wm_softc *sc = device_private(self);
8505 int sem;
8506 int rv;
8507
8508 if (phy != 1) /* only one PHY on kumeran bus */
8509 return 0;
8510
8511 sem = swfwphysem[sc->sc_funcid];
8512 if (wm_get_swfw_semaphore(sc, sem)) {
8513 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8514 __func__);
8515 return 0;
8516 }
8517
8518 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
8519 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8520 reg >> GG82563_PAGE_SHIFT);
8521 } else {
8522 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8523 reg >> GG82563_PAGE_SHIFT);
8524 }
8525 /* Wait more 200us for a bug of the ready bit in the MDIC register */
8526 delay(200);
8527 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
8528 delay(200);
8529
8530 wm_put_swfw_semaphore(sc, sem);
8531 return rv;
8532 }
8533
8534 /*
8535 * wm_gmii_i80003_writereg: [mii interface function]
8536 *
8537 * Write a PHY register on the kumeran.
8538 * This could be handled by the PHY layer if we didn't have to lock the
8539 * ressource ...
8540 */
8541 static void
8542 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
8543 {
8544 struct wm_softc *sc = device_private(self);
8545 int sem;
8546
8547 if (phy != 1) /* only one PHY on kumeran bus */
8548 return;
8549
8550 sem = swfwphysem[sc->sc_funcid];
8551 if (wm_get_swfw_semaphore(sc, sem)) {
8552 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8553 __func__);
8554 return;
8555 }
8556
8557 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
8558 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8559 reg >> GG82563_PAGE_SHIFT);
8560 } else {
8561 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8562 reg >> GG82563_PAGE_SHIFT);
8563 }
8564 /* Wait more 200us for a bug of the ready bit in the MDIC register */
8565 delay(200);
8566 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
8567 delay(200);
8568
8569 wm_put_swfw_semaphore(sc, sem);
8570 }
8571
8572 /*
8573 * wm_gmii_bm_readreg: [mii interface function]
8574 *
8575 * Read a PHY register on the kumeran
8576 * This could be handled by the PHY layer if we didn't have to lock the
8577 * ressource ...
8578 */
8579 static int
8580 wm_gmii_bm_readreg(device_t self, int phy, int reg)
8581 {
8582 struct wm_softc *sc = device_private(self);
8583 int sem;
8584 int rv;
8585
8586 sem = swfwphysem[sc->sc_funcid];
8587 if (wm_get_swfw_semaphore(sc, sem)) {
8588 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8589 __func__);
8590 return 0;
8591 }
8592
8593 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8594 if (phy == 1)
8595 wm_gmii_i82544_writereg(self, phy,
8596 MII_IGPHY_PAGE_SELECT, reg);
8597 else
8598 wm_gmii_i82544_writereg(self, phy,
8599 GG82563_PHY_PAGE_SELECT,
8600 reg >> GG82563_PAGE_SHIFT);
8601 }
8602
8603 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
8604 wm_put_swfw_semaphore(sc, sem);
8605 return rv;
8606 }
8607
8608 /*
8609 * wm_gmii_bm_writereg: [mii interface function]
8610 *
8611 * Write a PHY register on the kumeran.
8612 * This could be handled by the PHY layer if we didn't have to lock the
8613 * ressource ...
8614 */
8615 static void
8616 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
8617 {
8618 struct wm_softc *sc = device_private(self);
8619 int sem;
8620
8621 sem = swfwphysem[sc->sc_funcid];
8622 if (wm_get_swfw_semaphore(sc, sem)) {
8623 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8624 __func__);
8625 return;
8626 }
8627
8628 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8629 if (phy == 1)
8630 wm_gmii_i82544_writereg(self, phy,
8631 MII_IGPHY_PAGE_SELECT, reg);
8632 else
8633 wm_gmii_i82544_writereg(self, phy,
8634 GG82563_PHY_PAGE_SELECT,
8635 reg >> GG82563_PAGE_SHIFT);
8636 }
8637
8638 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
8639 wm_put_swfw_semaphore(sc, sem);
8640 }
8641
8642 static void
8643 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
8644 {
8645 struct wm_softc *sc = device_private(self);
8646 uint16_t regnum = BM_PHY_REG_NUM(offset);
8647 uint16_t wuce;
8648
8649 /* XXX Gig must be disabled for MDIO accesses to page 800 */
8650 if (sc->sc_type == WM_T_PCH) {
8651 /* XXX e1000 driver do nothing... why? */
8652 }
8653
8654 /* Set page 769 */
8655 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8656 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
8657
8658 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
8659
8660 wuce &= ~BM_WUC_HOST_WU_BIT;
8661 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
8662 wuce | BM_WUC_ENABLE_BIT);
8663
8664 /* Select page 800 */
8665 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8666 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
8667
8668 /* Write page 800 */
8669 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
8670
8671 if (rd)
8672 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
8673 else
8674 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
8675
8676 /* Set page 769 */
8677 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8678 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
8679
8680 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
8681 }
8682
8683 /*
8684 * wm_gmii_hv_readreg: [mii interface function]
8685 *
8686 * Read a PHY register on the kumeran
8687 * This could be handled by the PHY layer if we didn't have to lock the
8688 * ressource ...
8689 */
8690 static int
8691 wm_gmii_hv_readreg(device_t self, int phy, int reg)
8692 {
8693 struct wm_softc *sc = device_private(self);
8694 uint16_t page = BM_PHY_REG_PAGE(reg);
8695 uint16_t regnum = BM_PHY_REG_NUM(reg);
8696 uint16_t val;
8697 int rv;
8698
8699 if (wm_get_swfwhw_semaphore(sc)) {
8700 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8701 __func__);
8702 return 0;
8703 }
8704
8705 /* XXX Workaround failure in MDIO access while cable is disconnected */
8706 if (sc->sc_phytype == WMPHY_82577) {
8707 /* XXX must write */
8708 }
8709
8710 /* Page 800 works differently than the rest so it has its own func */
8711 if (page == BM_WUC_PAGE) {
8712 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
8713 return val;
8714 }
8715
8716 /*
8717 * Lower than page 768 works differently than the rest so it has its
8718 * own func
8719 */
8720 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
8721 printf("gmii_hv_readreg!!!\n");
8722 return 0;
8723 }
8724
8725 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
8726 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8727 page << BME1000_PAGE_SHIFT);
8728 }
8729
8730 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
8731 wm_put_swfwhw_semaphore(sc);
8732 return rv;
8733 }
8734
8735 /*
8736 * wm_gmii_hv_writereg: [mii interface function]
8737 *
8738 * Write a PHY register on the kumeran.
8739 * This could be handled by the PHY layer if we didn't have to lock the
8740 * ressource ...
8741 */
8742 static void
8743 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
8744 {
8745 struct wm_softc *sc = device_private(self);
8746 uint16_t page = BM_PHY_REG_PAGE(reg);
8747 uint16_t regnum = BM_PHY_REG_NUM(reg);
8748
8749 if (wm_get_swfwhw_semaphore(sc)) {
8750 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8751 __func__);
8752 return;
8753 }
8754
8755 /* XXX Workaround failure in MDIO access while cable is disconnected */
8756
8757 /* Page 800 works differently than the rest so it has its own func */
8758 if (page == BM_WUC_PAGE) {
8759 uint16_t tmp;
8760
8761 tmp = val;
8762 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
8763 return;
8764 }
8765
8766 /*
8767 * Lower than page 768 works differently than the rest so it has its
8768 * own func
8769 */
8770 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
8771 printf("gmii_hv_writereg!!!\n");
8772 return;
8773 }
8774
8775 /*
8776 * XXX Workaround MDIO accesses being disabled after entering IEEE
8777 * Power Down (whenever bit 11 of the PHY control register is set)
8778 */
8779
8780 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
8781 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8782 page << BME1000_PAGE_SHIFT);
8783 }
8784
8785 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
8786 wm_put_swfwhw_semaphore(sc);
8787 }
8788
8789 /*
8790 * wm_gmii_82580_readreg: [mii interface function]
8791 *
8792 * Read a PHY register on the 82580 and I350.
8793 * This could be handled by the PHY layer if we didn't have to lock the
8794 * ressource ...
8795 */
8796 static int
8797 wm_gmii_82580_readreg(device_t self, int phy, int reg)
8798 {
8799 struct wm_softc *sc = device_private(self);
8800 int sem;
8801 int rv;
8802
8803 sem = swfwphysem[sc->sc_funcid];
8804 if (wm_get_swfw_semaphore(sc, sem)) {
8805 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8806 __func__);
8807 return 0;
8808 }
8809
8810 rv = wm_gmii_i82544_readreg(self, phy, reg);
8811
8812 wm_put_swfw_semaphore(sc, sem);
8813 return rv;
8814 }
8815
8816 /*
8817 * wm_gmii_82580_writereg: [mii interface function]
8818 *
8819 * Write a PHY register on the 82580 and I350.
8820 * This could be handled by the PHY layer if we didn't have to lock the
8821 * ressource ...
8822 */
8823 static void
8824 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
8825 {
8826 struct wm_softc *sc = device_private(self);
8827 int sem;
8828
8829 sem = swfwphysem[sc->sc_funcid];
8830 if (wm_get_swfw_semaphore(sc, sem)) {
8831 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8832 __func__);
8833 return;
8834 }
8835
8836 wm_gmii_i82544_writereg(self, phy, reg, val);
8837
8838 wm_put_swfw_semaphore(sc, sem);
8839 }
8840
8841 /*
8842 * wm_gmii_gs40g_readreg: [mii interface function]
8843 *
8844 * Read a PHY register on the I2100 and I211.
8845 * This could be handled by the PHY layer if we didn't have to lock the
8846 * ressource ...
8847 */
8848 static int
8849 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
8850 {
8851 struct wm_softc *sc = device_private(self);
8852 int sem;
8853 int page, offset;
8854 int rv;
8855
8856 /* Acquire semaphore */
8857 sem = swfwphysem[sc->sc_funcid];
8858 if (wm_get_swfw_semaphore(sc, sem)) {
8859 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8860 __func__);
8861 return 0;
8862 }
8863
8864 /* Page select */
8865 page = reg >> GS40G_PAGE_SHIFT;
8866 wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
8867
8868 /* Read reg */
8869 offset = reg & GS40G_OFFSET_MASK;
8870 rv = wm_gmii_i82544_readreg(self, phy, offset);
8871
8872 wm_put_swfw_semaphore(sc, sem);
8873 return rv;
8874 }
8875
8876 /*
8877 * wm_gmii_gs40g_writereg: [mii interface function]
8878 *
8879 * Write a PHY register on the I210 and I211.
8880 * This could be handled by the PHY layer if we didn't have to lock the
8881 * ressource ...
8882 */
8883 static void
8884 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
8885 {
8886 struct wm_softc *sc = device_private(self);
8887 int sem;
8888 int page, offset;
8889
8890 /* Acquire semaphore */
8891 sem = swfwphysem[sc->sc_funcid];
8892 if (wm_get_swfw_semaphore(sc, sem)) {
8893 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8894 __func__);
8895 return;
8896 }
8897
8898 /* Page select */
8899 page = reg >> GS40G_PAGE_SHIFT;
8900 wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
8901
8902 /* Write reg */
8903 offset = reg & GS40G_OFFSET_MASK;
8904 wm_gmii_i82544_writereg(self, phy, offset, val);
8905
8906 /* Release semaphore */
8907 wm_put_swfw_semaphore(sc, sem);
8908 }
8909
8910 /*
8911 * wm_gmii_statchg: [mii interface function]
8912 *
8913 * Callback from MII layer when media changes.
8914 */
8915 static void
8916 wm_gmii_statchg(struct ifnet *ifp)
8917 {
8918 struct wm_softc *sc = ifp->if_softc;
8919 struct mii_data *mii = &sc->sc_mii;
8920
8921 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
8922 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
8923 sc->sc_fcrtl &= ~FCRTL_XONE;
8924
8925 /*
8926 * Get flow control negotiation result.
8927 */
8928 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
8929 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
8930 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
8931 mii->mii_media_active &= ~IFM_ETH_FMASK;
8932 }
8933
8934 if (sc->sc_flowflags & IFM_FLOW) {
8935 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
8936 sc->sc_ctrl |= CTRL_TFCE;
8937 sc->sc_fcrtl |= FCRTL_XONE;
8938 }
8939 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
8940 sc->sc_ctrl |= CTRL_RFCE;
8941 }
8942
8943 if (sc->sc_mii.mii_media_active & IFM_FDX) {
8944 DPRINTF(WM_DEBUG_LINK,
8945 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
8946 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
8947 } else {
8948 DPRINTF(WM_DEBUG_LINK,
8949 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
8950 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
8951 }
8952
8953 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8954 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
8955 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
8956 : WMREG_FCRTL, sc->sc_fcrtl);
8957 if (sc->sc_type == WM_T_80003) {
8958 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
8959 case IFM_1000_T:
8960 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
8961 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
8962 sc->sc_tipg = TIPG_1000T_80003_DFLT;
8963 break;
8964 default:
8965 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
8966 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
8967 sc->sc_tipg = TIPG_10_100_80003_DFLT;
8968 break;
8969 }
8970 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
8971 }
8972 }
8973
8974 /*
8975 * wm_kmrn_readreg:
8976 *
8977 * Read a kumeran register
8978 */
8979 static int
8980 wm_kmrn_readreg(struct wm_softc *sc, int reg)
8981 {
8982 int rv;
8983
8984 if (sc->sc_flags & WM_F_LOCK_SWFW) {
8985 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
8986 aprint_error_dev(sc->sc_dev,
8987 "%s: failed to get semaphore\n", __func__);
8988 return 0;
8989 }
8990 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8991 if (wm_get_swfwhw_semaphore(sc)) {
8992 aprint_error_dev(sc->sc_dev,
8993 "%s: failed to get semaphore\n", __func__);
8994 return 0;
8995 }
8996 }
8997
8998 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
8999 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
9000 KUMCTRLSTA_REN);
9001 CSR_WRITE_FLUSH(sc);
9002 delay(2);
9003
9004 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
9005
9006 if (sc->sc_flags & WM_F_LOCK_SWFW)
9007 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9008 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
9009 wm_put_swfwhw_semaphore(sc);
9010
9011 return rv;
9012 }
9013
9014 /*
9015 * wm_kmrn_writereg:
9016 *
9017 * Write a kumeran register
9018 */
9019 static void
9020 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
9021 {
9022
9023 if (sc->sc_flags & WM_F_LOCK_SWFW) {
9024 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
9025 aprint_error_dev(sc->sc_dev,
9026 "%s: failed to get semaphore\n", __func__);
9027 return;
9028 }
9029 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
9030 if (wm_get_swfwhw_semaphore(sc)) {
9031 aprint_error_dev(sc->sc_dev,
9032 "%s: failed to get semaphore\n", __func__);
9033 return;
9034 }
9035 }
9036
9037 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
9038 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
9039 (val & KUMCTRLSTA_MASK));
9040
9041 if (sc->sc_flags & WM_F_LOCK_SWFW)
9042 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9043 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
9044 wm_put_swfwhw_semaphore(sc);
9045 }
9046
9047 /* SGMII related */
9048
9049 /*
9050 * wm_sgmii_uses_mdio
9051 *
9052 * Check whether the transaction is to the internal PHY or the external
9053 * MDIO interface. Return true if it's MDIO.
9054 */
9055 static bool
9056 wm_sgmii_uses_mdio(struct wm_softc *sc)
9057 {
9058 uint32_t reg;
9059 bool ismdio = false;
9060
9061 switch (sc->sc_type) {
9062 case WM_T_82575:
9063 case WM_T_82576:
9064 reg = CSR_READ(sc, WMREG_MDIC);
9065 ismdio = ((reg & MDIC_DEST) != 0);
9066 break;
9067 case WM_T_82580:
9068 case WM_T_I350:
9069 case WM_T_I354:
9070 case WM_T_I210:
9071 case WM_T_I211:
9072 reg = CSR_READ(sc, WMREG_MDICNFG);
9073 ismdio = ((reg & MDICNFG_DEST) != 0);
9074 break;
9075 default:
9076 break;
9077 }
9078
9079 return ismdio;
9080 }
9081
9082 /*
9083 * wm_sgmii_readreg: [mii interface function]
9084 *
9085 * Read a PHY register on the SGMII
9086 * This could be handled by the PHY layer if we didn't have to lock the
9087 * ressource ...
9088 */
9089 static int
9090 wm_sgmii_readreg(device_t self, int phy, int reg)
9091 {
9092 struct wm_softc *sc = device_private(self);
9093 uint32_t i2ccmd;
9094 int i, rv;
9095
9096 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
9097 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9098 __func__);
9099 return 0;
9100 }
9101
9102 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
9103 | (phy << I2CCMD_PHY_ADDR_SHIFT)
9104 | I2CCMD_OPCODE_READ;
9105 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9106
9107 /* Poll the ready bit */
9108 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9109 delay(50);
9110 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9111 if (i2ccmd & I2CCMD_READY)
9112 break;
9113 }
9114 if ((i2ccmd & I2CCMD_READY) == 0)
9115 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
9116 if ((i2ccmd & I2CCMD_ERROR) != 0)
9117 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
9118
9119 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
9120
9121 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
9122 return rv;
9123 }
9124
9125 /*
9126 * wm_sgmii_writereg: [mii interface function]
9127 *
9128 * Write a PHY register on the SGMII.
9129 * This could be handled by the PHY layer if we didn't have to lock the
9130 * ressource ...
9131 */
9132 static void
9133 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
9134 {
9135 struct wm_softc *sc = device_private(self);
9136 uint32_t i2ccmd;
9137 int i;
9138 int val_swapped;
9139
9140 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
9141 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9142 __func__);
9143 return;
9144 }
9145 /* Swap the data bytes for the I2C interface */
9146 val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
9147 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
9148 | (phy << I2CCMD_PHY_ADDR_SHIFT)
9149 | I2CCMD_OPCODE_WRITE | val_swapped;
9150 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9151
9152 /* Poll the ready bit */
9153 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9154 delay(50);
9155 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9156 if (i2ccmd & I2CCMD_READY)
9157 break;
9158 }
9159 if ((i2ccmd & I2CCMD_READY) == 0)
9160 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
9161 if ((i2ccmd & I2CCMD_ERROR) != 0)
9162 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
9163
9164 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
9165 }
9166
9167 /* TBI related */
9168
9169 /*
9170 * wm_tbi_mediainit:
9171 *
9172 * Initialize media for use on 1000BASE-X devices.
9173 */
9174 static void
9175 wm_tbi_mediainit(struct wm_softc *sc)
9176 {
9177 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9178 const char *sep = "";
9179
9180 if (sc->sc_type < WM_T_82543)
9181 sc->sc_tipg = TIPG_WM_DFLT;
9182 else
9183 sc->sc_tipg = TIPG_LG_DFLT;
9184
9185 sc->sc_tbi_serdes_anegticks = 5;
9186
9187 /* Initialize our media structures */
9188 sc->sc_mii.mii_ifp = ifp;
9189 sc->sc_ethercom.ec_mii = &sc->sc_mii;
9190
9191 if ((sc->sc_type >= WM_T_82575)
9192 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
9193 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9194 wm_serdes_mediachange, wm_serdes_mediastatus);
9195 else
9196 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9197 wm_tbi_mediachange, wm_tbi_mediastatus);
9198
9199 /*
9200 * SWD Pins:
9201 *
9202 * 0 = Link LED (output)
9203 * 1 = Loss Of Signal (input)
9204 */
9205 sc->sc_ctrl |= CTRL_SWDPIO(0);
9206
9207 /* XXX Perhaps this is only for TBI */
9208 if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9209 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
9210
9211 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
9212 sc->sc_ctrl &= ~CTRL_LRST;
9213
9214 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9215
9216 #define ADD(ss, mm, dd) \
9217 do { \
9218 aprint_normal("%s%s", sep, ss); \
9219 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
9220 sep = ", "; \
9221 } while (/*CONSTCOND*/0)
9222
9223 aprint_normal_dev(sc->sc_dev, "");
9224
9225 /* Only 82545 is LX */
9226 if (sc->sc_type == WM_T_82545) {
9227 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
9228 ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
9229 } else {
9230 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
9231 ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
9232 }
9233 ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
9234 aprint_normal("\n");
9235
9236 #undef ADD
9237
9238 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
9239 }
9240
9241 /*
9242 * wm_tbi_mediachange: [ifmedia interface function]
9243 *
9244 * Set hardware to newly-selected media on a 1000BASE-X device.
9245 */
9246 static int
9247 wm_tbi_mediachange(struct ifnet *ifp)
9248 {
9249 struct wm_softc *sc = ifp->if_softc;
9250 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9251 uint32_t status;
9252 int i;
9253
9254 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9255 /* XXX need some work for >= 82571 and < 82575 */
9256 if (sc->sc_type < WM_T_82575)
9257 return 0;
9258 }
9259
9260 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9261 || (sc->sc_type >= WM_T_82575))
9262 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9263
9264 sc->sc_ctrl &= ~CTRL_LRST;
9265 sc->sc_txcw = TXCW_ANE;
9266 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9267 sc->sc_txcw |= TXCW_FD | TXCW_HD;
9268 else if (ife->ifm_media & IFM_FDX)
9269 sc->sc_txcw |= TXCW_FD;
9270 else
9271 sc->sc_txcw |= TXCW_HD;
9272
9273 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
9274 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
9275
9276 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
9277 device_xname(sc->sc_dev), sc->sc_txcw));
9278 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9279 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9280 CSR_WRITE_FLUSH(sc);
9281 delay(1000);
9282
9283 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
9284 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
9285
9286 /*
9287 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
9288 * optics detect a signal, 0 if they don't.
9289 */
9290 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
9291 /* Have signal; wait for the link to come up. */
9292 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
9293 delay(10000);
9294 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
9295 break;
9296 }
9297
9298 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
9299 device_xname(sc->sc_dev),i));
9300
9301 status = CSR_READ(sc, WMREG_STATUS);
9302 DPRINTF(WM_DEBUG_LINK,
9303 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
9304 device_xname(sc->sc_dev),status, STATUS_LU));
9305 if (status & STATUS_LU) {
9306 /* Link is up. */
9307 DPRINTF(WM_DEBUG_LINK,
9308 ("%s: LINK: set media -> link up %s\n",
9309 device_xname(sc->sc_dev),
9310 (status & STATUS_FD) ? "FDX" : "HDX"));
9311
9312 /*
9313 * NOTE: CTRL will update TFCE and RFCE automatically,
9314 * so we should update sc->sc_ctrl
9315 */
9316 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
9317 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9318 sc->sc_fcrtl &= ~FCRTL_XONE;
9319 if (status & STATUS_FD)
9320 sc->sc_tctl |=
9321 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9322 else
9323 sc->sc_tctl |=
9324 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9325 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
9326 sc->sc_fcrtl |= FCRTL_XONE;
9327 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9328 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
9329 WMREG_OLD_FCRTL : WMREG_FCRTL,
9330 sc->sc_fcrtl);
9331 sc->sc_tbi_linkup = 1;
9332 } else {
9333 if (i == WM_LINKUP_TIMEOUT)
9334 wm_check_for_link(sc);
9335 /* Link is down. */
9336 DPRINTF(WM_DEBUG_LINK,
9337 ("%s: LINK: set media -> link down\n",
9338 device_xname(sc->sc_dev)));
9339 sc->sc_tbi_linkup = 0;
9340 }
9341 } else {
9342 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
9343 device_xname(sc->sc_dev)));
9344 sc->sc_tbi_linkup = 0;
9345 }
9346
9347 wm_tbi_serdes_set_linkled(sc);
9348
9349 return 0;
9350 }
9351
9352 /*
9353 * wm_tbi_mediastatus: [ifmedia interface function]
9354 *
9355 * Get the current interface media status on a 1000BASE-X device.
9356 */
9357 static void
9358 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9359 {
9360 struct wm_softc *sc = ifp->if_softc;
9361 uint32_t ctrl, status;
9362
9363 ifmr->ifm_status = IFM_AVALID;
9364 ifmr->ifm_active = IFM_ETHER;
9365
9366 status = CSR_READ(sc, WMREG_STATUS);
9367 if ((status & STATUS_LU) == 0) {
9368 ifmr->ifm_active |= IFM_NONE;
9369 return;
9370 }
9371
9372 ifmr->ifm_status |= IFM_ACTIVE;
9373 /* Only 82545 is LX */
9374 if (sc->sc_type == WM_T_82545)
9375 ifmr->ifm_active |= IFM_1000_LX;
9376 else
9377 ifmr->ifm_active |= IFM_1000_SX;
9378 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
9379 ifmr->ifm_active |= IFM_FDX;
9380 else
9381 ifmr->ifm_active |= IFM_HDX;
9382 ctrl = CSR_READ(sc, WMREG_CTRL);
9383 if (ctrl & CTRL_RFCE)
9384 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
9385 if (ctrl & CTRL_TFCE)
9386 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
9387 }
9388
9389 /* XXX TBI only */
9390 static int
9391 wm_check_for_link(struct wm_softc *sc)
9392 {
9393 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9394 uint32_t rxcw;
9395 uint32_t ctrl;
9396 uint32_t status;
9397 uint32_t sig;
9398
9399 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9400 /* XXX need some work for >= 82571 */
9401 if (sc->sc_type >= WM_T_82571) {
9402 sc->sc_tbi_linkup = 1;
9403 return 0;
9404 }
9405 }
9406
9407 rxcw = CSR_READ(sc, WMREG_RXCW);
9408 ctrl = CSR_READ(sc, WMREG_CTRL);
9409 status = CSR_READ(sc, WMREG_STATUS);
9410
9411 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
9412
9413 DPRINTF(WM_DEBUG_LINK,
9414 ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
9415 device_xname(sc->sc_dev), __func__,
9416 ((ctrl & CTRL_SWDPIN(1)) == sig),
9417 ((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
9418
9419 /*
9420 * SWDPIN LU RXCW
9421 * 0 0 0
9422 * 0 0 1 (should not happen)
9423 * 0 1 0 (should not happen)
9424 * 0 1 1 (should not happen)
9425 * 1 0 0 Disable autonego and force linkup
9426 * 1 0 1 got /C/ but not linkup yet
9427 * 1 1 0 (linkup)
9428 * 1 1 1 If IFM_AUTO, back to autonego
9429 *
9430 */
9431 if (((ctrl & CTRL_SWDPIN(1)) == sig)
9432 && ((status & STATUS_LU) == 0)
9433 && ((rxcw & RXCW_C) == 0)) {
9434 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
9435 __func__));
9436 sc->sc_tbi_linkup = 0;
9437 /* Disable auto-negotiation in the TXCW register */
9438 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
9439
9440 /*
9441 * Force link-up and also force full-duplex.
9442 *
9443 * NOTE: CTRL was updated TFCE and RFCE automatically,
9444 * so we should update sc->sc_ctrl
9445 */
9446 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
9447 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9448 } else if (((status & STATUS_LU) != 0)
9449 && ((rxcw & RXCW_C) != 0)
9450 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
9451 sc->sc_tbi_linkup = 1;
9452 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
9453 __func__));
9454 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9455 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
9456 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
9457 && ((rxcw & RXCW_C) != 0)) {
9458 DPRINTF(WM_DEBUG_LINK, ("/C/"));
9459 } else {
9460 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
9461 status));
9462 }
9463
9464 return 0;
9465 }
9466
9467 /*
9468 * wm_tbi_tick:
9469 *
9470 * Check the link on TBI devices.
9471 * This function acts as mii_tick().
9472 */
9473 static void
9474 wm_tbi_tick(struct wm_softc *sc)
9475 {
9476 struct mii_data *mii = &sc->sc_mii;
9477 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9478 uint32_t status;
9479
9480 KASSERT(WM_CORE_LOCKED(sc));
9481
9482 status = CSR_READ(sc, WMREG_STATUS);
9483
9484 /* XXX is this needed? */
9485 (void)CSR_READ(sc, WMREG_RXCW);
9486 (void)CSR_READ(sc, WMREG_CTRL);
9487
9488 /* set link status */
9489 if ((status & STATUS_LU) == 0) {
9490 DPRINTF(WM_DEBUG_LINK,
9491 ("%s: LINK: checklink -> down\n",
9492 device_xname(sc->sc_dev)));
9493 sc->sc_tbi_linkup = 0;
9494 } else if (sc->sc_tbi_linkup == 0) {
9495 DPRINTF(WM_DEBUG_LINK,
9496 ("%s: LINK: checklink -> up %s\n",
9497 device_xname(sc->sc_dev),
9498 (status & STATUS_FD) ? "FDX" : "HDX"));
9499 sc->sc_tbi_linkup = 1;
9500 sc->sc_tbi_serdes_ticks = 0;
9501 }
9502
9503 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
9504 goto setled;
9505
9506 if ((status & STATUS_LU) == 0) {
9507 sc->sc_tbi_linkup = 0;
9508 /* If the timer expired, retry autonegotiation */
9509 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9510 && (++sc->sc_tbi_serdes_ticks
9511 >= sc->sc_tbi_serdes_anegticks)) {
9512 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9513 sc->sc_tbi_serdes_ticks = 0;
9514 /*
9515 * Reset the link, and let autonegotiation do
9516 * its thing
9517 */
9518 sc->sc_ctrl |= CTRL_LRST;
9519 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9520 CSR_WRITE_FLUSH(sc);
9521 delay(1000);
9522 sc->sc_ctrl &= ~CTRL_LRST;
9523 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9524 CSR_WRITE_FLUSH(sc);
9525 delay(1000);
9526 CSR_WRITE(sc, WMREG_TXCW,
9527 sc->sc_txcw & ~TXCW_ANE);
9528 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9529 }
9530 }
9531
9532 setled:
9533 wm_tbi_serdes_set_linkled(sc);
9534 }
9535
9536 /* SERDES related */
9537 static void
9538 wm_serdes_power_up_link_82575(struct wm_softc *sc)
9539 {
9540 uint32_t reg;
9541
9542 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9543 && ((sc->sc_flags & WM_F_SGMII) == 0))
9544 return;
9545
9546 reg = CSR_READ(sc, WMREG_PCS_CFG);
9547 reg |= PCS_CFG_PCS_EN;
9548 CSR_WRITE(sc, WMREG_PCS_CFG, reg);
9549
9550 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9551 reg &= ~CTRL_EXT_SWDPIN(3);
9552 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9553 CSR_WRITE_FLUSH(sc);
9554 }
9555
9556 static int
9557 wm_serdes_mediachange(struct ifnet *ifp)
9558 {
9559 struct wm_softc *sc = ifp->if_softc;
9560 bool pcs_autoneg = true; /* XXX */
9561 uint32_t ctrl_ext, pcs_lctl, reg;
9562
9563 /* XXX Currently, this function is not called on 8257[12] */
9564 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9565 || (sc->sc_type >= WM_T_82575))
9566 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9567
9568 wm_serdes_power_up_link_82575(sc);
9569
9570 sc->sc_ctrl |= CTRL_SLU;
9571
9572 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
9573 sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
9574
9575 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9576 pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
9577 switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
9578 case CTRL_EXT_LINK_MODE_SGMII:
9579 pcs_autoneg = true;
9580 pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
9581 break;
9582 case CTRL_EXT_LINK_MODE_1000KX:
9583 pcs_autoneg = false;
9584 /* FALLTHROUGH */
9585 default:
9586 if ((sc->sc_type == WM_T_82575)
9587 || (sc->sc_type == WM_T_82576)) {
9588 if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
9589 pcs_autoneg = false;
9590 }
9591 sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
9592 | CTRL_FRCFDX;
9593 pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
9594 }
9595 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9596
9597 if (pcs_autoneg) {
9598 pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
9599 pcs_lctl &= ~PCS_LCTL_FORCE_FC;
9600
9601 reg = CSR_READ(sc, WMREG_PCS_ANADV);
9602 reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
9603 reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
9604 CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
9605 } else
9606 pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
9607
9608 CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
9609
9610
9611 return 0;
9612 }
9613
9614 static void
9615 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9616 {
9617 struct wm_softc *sc = ifp->if_softc;
9618 struct mii_data *mii = &sc->sc_mii;
9619 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9620 uint32_t pcs_adv, pcs_lpab, reg;
9621
9622 ifmr->ifm_status = IFM_AVALID;
9623 ifmr->ifm_active = IFM_ETHER;
9624
9625 /* Check PCS */
9626 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9627 if ((reg & PCS_LSTS_LINKOK) == 0) {
9628 ifmr->ifm_active |= IFM_NONE;
9629 sc->sc_tbi_linkup = 0;
9630 goto setled;
9631 }
9632
9633 sc->sc_tbi_linkup = 1;
9634 ifmr->ifm_status |= IFM_ACTIVE;
9635 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
9636 if ((reg & PCS_LSTS_FDX) != 0)
9637 ifmr->ifm_active |= IFM_FDX;
9638 else
9639 ifmr->ifm_active |= IFM_HDX;
9640 mii->mii_media_active &= ~IFM_ETH_FMASK;
9641 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
9642 /* Check flow */
9643 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9644 if ((reg & PCS_LSTS_AN_COMP) == 0) {
9645 DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
9646 goto setled;
9647 }
9648 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
9649 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
9650 DPRINTF(WM_DEBUG_LINK,
9651 ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
9652 if ((pcs_adv & TXCW_SYM_PAUSE)
9653 && (pcs_lpab & TXCW_SYM_PAUSE)) {
9654 mii->mii_media_active |= IFM_FLOW
9655 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
9656 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
9657 && (pcs_adv & TXCW_ASYM_PAUSE)
9658 && (pcs_lpab & TXCW_SYM_PAUSE)
9659 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
9660 mii->mii_media_active |= IFM_FLOW
9661 | IFM_ETH_TXPAUSE;
9662 } else if ((pcs_adv & TXCW_SYM_PAUSE)
9663 && (pcs_adv & TXCW_ASYM_PAUSE)
9664 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
9665 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
9666 mii->mii_media_active |= IFM_FLOW
9667 | IFM_ETH_RXPAUSE;
9668 } else {
9669 }
9670 }
9671 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
9672 | (mii->mii_media_active & IFM_ETH_FMASK);
9673 setled:
9674 wm_tbi_serdes_set_linkled(sc);
9675 }
9676
9677 /*
9678 * wm_serdes_tick:
9679 *
9680 * Check the link on serdes devices.
9681 */
9682 static void
9683 wm_serdes_tick(struct wm_softc *sc)
9684 {
9685 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9686 struct mii_data *mii = &sc->sc_mii;
9687 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9688 uint32_t reg;
9689
9690 KASSERT(WM_CORE_LOCKED(sc));
9691
9692 mii->mii_media_status = IFM_AVALID;
9693 mii->mii_media_active = IFM_ETHER;
9694
9695 /* Check PCS */
9696 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9697 if ((reg & PCS_LSTS_LINKOK) != 0) {
9698 mii->mii_media_status |= IFM_ACTIVE;
9699 sc->sc_tbi_linkup = 1;
9700 sc->sc_tbi_serdes_ticks = 0;
9701 mii->mii_media_active |= IFM_1000_SX; /* XXX */
9702 if ((reg & PCS_LSTS_FDX) != 0)
9703 mii->mii_media_active |= IFM_FDX;
9704 else
9705 mii->mii_media_active |= IFM_HDX;
9706 } else {
9707 mii->mii_media_status |= IFM_NONE;
9708 sc->sc_tbi_linkup = 0;
9709 /* If the timer expired, retry autonegotiation */
9710 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9711 && (++sc->sc_tbi_serdes_ticks
9712 >= sc->sc_tbi_serdes_anegticks)) {
9713 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9714 sc->sc_tbi_serdes_ticks = 0;
9715 /* XXX */
9716 wm_serdes_mediachange(ifp);
9717 }
9718 }
9719
9720 wm_tbi_serdes_set_linkled(sc);
9721 }
9722
9723 /* SFP related */
9724
9725 static int
9726 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
9727 {
9728 uint32_t i2ccmd;
9729 int i;
9730
9731 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
9732 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9733
9734 /* Poll the ready bit */
9735 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9736 delay(50);
9737 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9738 if (i2ccmd & I2CCMD_READY)
9739 break;
9740 }
9741 if ((i2ccmd & I2CCMD_READY) == 0)
9742 return -1;
9743 if ((i2ccmd & I2CCMD_ERROR) != 0)
9744 return -1;
9745
9746 *data = i2ccmd & 0x00ff;
9747
9748 return 0;
9749 }
9750
9751 static uint32_t
9752 wm_sfp_get_media_type(struct wm_softc *sc)
9753 {
9754 uint32_t ctrl_ext;
9755 uint8_t val = 0;
9756 int timeout = 3;
9757 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
9758 int rv = -1;
9759
9760 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9761 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
9762 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
9763 CSR_WRITE_FLUSH(sc);
9764
9765 /* Read SFP module data */
9766 while (timeout) {
9767 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
9768 if (rv == 0)
9769 break;
9770 delay(100*1000); /* XXX too big */
9771 timeout--;
9772 }
9773 if (rv != 0)
9774 goto out;
9775 switch (val) {
9776 case SFF_SFP_ID_SFF:
9777 aprint_normal_dev(sc->sc_dev,
9778 "Module/Connector soldered to board\n");
9779 break;
9780 case SFF_SFP_ID_SFP:
9781 aprint_normal_dev(sc->sc_dev, "SFP\n");
9782 break;
9783 case SFF_SFP_ID_UNKNOWN:
9784 goto out;
9785 default:
9786 break;
9787 }
9788
9789 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
9790 if (rv != 0) {
9791 goto out;
9792 }
9793
9794 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
9795 mediatype = WM_MEDIATYPE_SERDES;
9796 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
9797 sc->sc_flags |= WM_F_SGMII;
9798 mediatype = WM_MEDIATYPE_COPPER;
9799 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
9800 sc->sc_flags |= WM_F_SGMII;
9801 mediatype = WM_MEDIATYPE_SERDES;
9802 }
9803
9804 out:
9805 /* Restore I2C interface setting */
9806 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9807
9808 return mediatype;
9809 }
9810 /*
9811 * NVM related.
9812 * Microwire, SPI (w/wo EERD) and Flash.
9813 */
9814
9815 /* Both spi and uwire */
9816
9817 /*
9818 * wm_eeprom_sendbits:
9819 *
9820 * Send a series of bits to the EEPROM.
9821 */
9822 static void
9823 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
9824 {
9825 uint32_t reg;
9826 int x;
9827
9828 reg = CSR_READ(sc, WMREG_EECD);
9829
9830 for (x = nbits; x > 0; x--) {
9831 if (bits & (1U << (x - 1)))
9832 reg |= EECD_DI;
9833 else
9834 reg &= ~EECD_DI;
9835 CSR_WRITE(sc, WMREG_EECD, reg);
9836 CSR_WRITE_FLUSH(sc);
9837 delay(2);
9838 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
9839 CSR_WRITE_FLUSH(sc);
9840 delay(2);
9841 CSR_WRITE(sc, WMREG_EECD, reg);
9842 CSR_WRITE_FLUSH(sc);
9843 delay(2);
9844 }
9845 }
9846
9847 /*
9848 * wm_eeprom_recvbits:
9849 *
9850 * Receive a series of bits from the EEPROM.
9851 */
9852 static void
9853 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
9854 {
9855 uint32_t reg, val;
9856 int x;
9857
9858 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
9859
9860 val = 0;
9861 for (x = nbits; x > 0; x--) {
9862 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
9863 CSR_WRITE_FLUSH(sc);
9864 delay(2);
9865 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
9866 val |= (1U << (x - 1));
9867 CSR_WRITE(sc, WMREG_EECD, reg);
9868 CSR_WRITE_FLUSH(sc);
9869 delay(2);
9870 }
9871 *valp = val;
9872 }
9873
9874 /* Microwire */
9875
9876 /*
9877 * wm_nvm_read_uwire:
9878 *
9879 * Read a word from the EEPROM using the MicroWire protocol.
9880 */
9881 static int
9882 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9883 {
9884 uint32_t reg, val;
9885 int i;
9886
9887 for (i = 0; i < wordcnt; i++) {
9888 /* Clear SK and DI. */
9889 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
9890 CSR_WRITE(sc, WMREG_EECD, reg);
9891
9892 /*
9893 * XXX: workaround for a bug in qemu-0.12.x and prior
9894 * and Xen.
9895 *
9896 * We use this workaround only for 82540 because qemu's
9897 * e1000 act as 82540.
9898 */
9899 if (sc->sc_type == WM_T_82540) {
9900 reg |= EECD_SK;
9901 CSR_WRITE(sc, WMREG_EECD, reg);
9902 reg &= ~EECD_SK;
9903 CSR_WRITE(sc, WMREG_EECD, reg);
9904 CSR_WRITE_FLUSH(sc);
9905 delay(2);
9906 }
9907 /* XXX: end of workaround */
9908
9909 /* Set CHIP SELECT. */
9910 reg |= EECD_CS;
9911 CSR_WRITE(sc, WMREG_EECD, reg);
9912 CSR_WRITE_FLUSH(sc);
9913 delay(2);
9914
9915 /* Shift in the READ command. */
9916 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
9917
9918 /* Shift in address. */
9919 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
9920
9921 /* Shift out the data. */
9922 wm_eeprom_recvbits(sc, &val, 16);
9923 data[i] = val & 0xffff;
9924
9925 /* Clear CHIP SELECT. */
9926 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
9927 CSR_WRITE(sc, WMREG_EECD, reg);
9928 CSR_WRITE_FLUSH(sc);
9929 delay(2);
9930 }
9931
9932 return 0;
9933 }
9934
9935 /* SPI */
9936
9937 /*
9938 * Set SPI and FLASH related information from the EECD register.
9939 * For 82541 and 82547, the word size is taken from EEPROM.
9940 */
9941 static int
9942 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
9943 {
9944 int size;
9945 uint32_t reg;
9946 uint16_t data;
9947
9948 reg = CSR_READ(sc, WMREG_EECD);
9949 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
9950
9951 /* Read the size of NVM from EECD by default */
9952 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
9953 switch (sc->sc_type) {
9954 case WM_T_82541:
9955 case WM_T_82541_2:
9956 case WM_T_82547:
9957 case WM_T_82547_2:
9958 /* Set dummy value to access EEPROM */
9959 sc->sc_nvm_wordsize = 64;
9960 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
9961 reg = data;
9962 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
9963 if (size == 0)
9964 size = 6; /* 64 word size */
9965 else
9966 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
9967 break;
9968 case WM_T_80003:
9969 case WM_T_82571:
9970 case WM_T_82572:
9971 case WM_T_82573: /* SPI case */
9972 case WM_T_82574: /* SPI case */
9973 case WM_T_82583: /* SPI case */
9974 size += NVM_WORD_SIZE_BASE_SHIFT;
9975 if (size > 14)
9976 size = 14;
9977 break;
9978 case WM_T_82575:
9979 case WM_T_82576:
9980 case WM_T_82580:
9981 case WM_T_I350:
9982 case WM_T_I354:
9983 case WM_T_I210:
9984 case WM_T_I211:
9985 size += NVM_WORD_SIZE_BASE_SHIFT;
9986 if (size > 15)
9987 size = 15;
9988 break;
9989 default:
9990 aprint_error_dev(sc->sc_dev,
9991 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
9992 return -1;
9993 break;
9994 }
9995
9996 sc->sc_nvm_wordsize = 1 << size;
9997
9998 return 0;
9999 }
10000
10001 /*
10002 * wm_nvm_ready_spi:
10003 *
10004 * Wait for a SPI EEPROM to be ready for commands.
10005 */
10006 static int
10007 wm_nvm_ready_spi(struct wm_softc *sc)
10008 {
10009 uint32_t val;
10010 int usec;
10011
10012 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
10013 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
10014 wm_eeprom_recvbits(sc, &val, 8);
10015 if ((val & SPI_SR_RDY) == 0)
10016 break;
10017 }
10018 if (usec >= SPI_MAX_RETRIES) {
10019 aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
10020 return 1;
10021 }
10022 return 0;
10023 }
10024
10025 /*
10026 * wm_nvm_read_spi:
10027 *
10028 * Read a work from the EEPROM using the SPI protocol.
10029 */
10030 static int
10031 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
10032 {
10033 uint32_t reg, val;
10034 int i;
10035 uint8_t opc;
10036
10037 /* Clear SK and CS. */
10038 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
10039 CSR_WRITE(sc, WMREG_EECD, reg);
10040 CSR_WRITE_FLUSH(sc);
10041 delay(2);
10042
10043 if (wm_nvm_ready_spi(sc))
10044 return 1;
10045
10046 /* Toggle CS to flush commands. */
10047 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
10048 CSR_WRITE_FLUSH(sc);
10049 delay(2);
10050 CSR_WRITE(sc, WMREG_EECD, reg);
10051 CSR_WRITE_FLUSH(sc);
10052 delay(2);
10053
10054 opc = SPI_OPC_READ;
10055 if (sc->sc_nvm_addrbits == 8 && word >= 128)
10056 opc |= SPI_OPC_A8;
10057
10058 wm_eeprom_sendbits(sc, opc, 8);
10059 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
10060
10061 for (i = 0; i < wordcnt; i++) {
10062 wm_eeprom_recvbits(sc, &val, 16);
10063 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
10064 }
10065
10066 /* Raise CS and clear SK. */
10067 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
10068 CSR_WRITE(sc, WMREG_EECD, reg);
10069 CSR_WRITE_FLUSH(sc);
10070 delay(2);
10071
10072 return 0;
10073 }
10074
10075 /* Using with EERD */
10076
10077 static int
10078 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
10079 {
10080 uint32_t attempts = 100000;
10081 uint32_t i, reg = 0;
10082 int32_t done = -1;
10083
10084 for (i = 0; i < attempts; i++) {
10085 reg = CSR_READ(sc, rw);
10086
10087 if (reg & EERD_DONE) {
10088 done = 0;
10089 break;
10090 }
10091 delay(5);
10092 }
10093
10094 return done;
10095 }
10096
10097 static int
10098 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
10099 uint16_t *data)
10100 {
10101 int i, eerd = 0;
10102 int error = 0;
10103
10104 for (i = 0; i < wordcnt; i++) {
10105 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
10106
10107 CSR_WRITE(sc, WMREG_EERD, eerd);
10108 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
10109 if (error != 0)
10110 break;
10111
10112 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
10113 }
10114
10115 return error;
10116 }
10117
10118 /* Flash */
10119
10120 static int
10121 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
10122 {
10123 uint32_t eecd;
10124 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
10125 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
10126 uint8_t sig_byte = 0;
10127
10128 switch (sc->sc_type) {
10129 case WM_T_PCH_SPT:
10130 /*
10131 * In SPT, read from the CTRL_EXT reg instead of accessing the
10132 * sector valid bits from the NVM.
10133 */
10134 *bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
10135 if ((*bank == 0) || (*bank == 1)) {
10136 aprint_error_dev(sc->sc_dev,
10137 "%s: no valid NVM bank present\n",
10138 __func__);
10139 return -1;
10140 } else {
10141 *bank = *bank - 2;
10142 return 0;
10143 }
10144 case WM_T_ICH8:
10145 case WM_T_ICH9:
10146 eecd = CSR_READ(sc, WMREG_EECD);
10147 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
10148 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
10149 return 0;
10150 }
10151 /* FALLTHROUGH */
10152 default:
10153 /* Default to 0 */
10154 *bank = 0;
10155
10156 /* Check bank 0 */
10157 wm_read_ich8_byte(sc, act_offset, &sig_byte);
10158 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
10159 *bank = 0;
10160 return 0;
10161 }
10162
10163 /* Check bank 1 */
10164 wm_read_ich8_byte(sc, act_offset + bank1_offset,
10165 &sig_byte);
10166 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
10167 *bank = 1;
10168 return 0;
10169 }
10170 }
10171
10172 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
10173 device_xname(sc->sc_dev)));
10174 return -1;
10175 }
10176
10177 /******************************************************************************
10178 * This function does initial flash setup so that a new read/write/erase cycle
10179 * can be started.
10180 *
10181 * sc - The pointer to the hw structure
10182 ****************************************************************************/
10183 static int32_t
10184 wm_ich8_cycle_init(struct wm_softc *sc)
10185 {
10186 uint16_t hsfsts;
10187 int32_t error = 1;
10188 int32_t i = 0;
10189
10190 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10191
10192 /* May be check the Flash Des Valid bit in Hw status */
10193 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
10194 return error;
10195 }
10196
10197 /* Clear FCERR in Hw status by writing 1 */
10198 /* Clear DAEL in Hw status by writing a 1 */
10199 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
10200
10201 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10202
10203 /*
10204 * Either we should have a hardware SPI cycle in progress bit to check
10205 * against, in order to start a new cycle or FDONE bit should be
10206 * changed in the hardware so that it is 1 after harware reset, which
10207 * can then be used as an indication whether a cycle is in progress or
10208 * has been completed .. we should also have some software semaphore
10209 * mechanism to guard FDONE or the cycle in progress bit so that two
10210 * threads access to those bits can be sequentiallized or a way so that
10211 * 2 threads dont start the cycle at the same time
10212 */
10213
10214 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10215 /*
10216 * There is no cycle running at present, so we can start a
10217 * cycle
10218 */
10219
10220 /* Begin by setting Flash Cycle Done. */
10221 hsfsts |= HSFSTS_DONE;
10222 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10223 error = 0;
10224 } else {
10225 /*
10226 * otherwise poll for sometime so the current cycle has a
10227 * chance to end before giving up.
10228 */
10229 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
10230 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10231 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10232 error = 0;
10233 break;
10234 }
10235 delay(1);
10236 }
10237 if (error == 0) {
10238 /*
10239 * Successful in waiting for previous cycle to timeout,
10240 * now set the Flash Cycle Done.
10241 */
10242 hsfsts |= HSFSTS_DONE;
10243 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10244 }
10245 }
10246 return error;
10247 }
10248
10249 /******************************************************************************
10250 * This function starts a flash cycle and waits for its completion
10251 *
10252 * sc - The pointer to the hw structure
10253 ****************************************************************************/
10254 static int32_t
10255 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
10256 {
10257 uint16_t hsflctl;
10258 uint16_t hsfsts;
10259 int32_t error = 1;
10260 uint32_t i = 0;
10261
10262 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
10263 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10264 hsflctl |= HSFCTL_GO;
10265 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10266
10267 /* Wait till FDONE bit is set to 1 */
10268 do {
10269 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10270 if (hsfsts & HSFSTS_DONE)
10271 break;
10272 delay(1);
10273 i++;
10274 } while (i < timeout);
10275 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
10276 error = 0;
10277
10278 return error;
10279 }
10280
10281 /******************************************************************************
10282 * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
10283 *
10284 * sc - The pointer to the hw structure
10285 * index - The index of the byte or word to read.
10286 * size - Size of data to read, 1=byte 2=word, 4=dword
10287 * data - Pointer to the word to store the value read.
10288 *****************************************************************************/
10289 static int32_t
10290 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
10291 uint32_t size, uint32_t *data)
10292 {
10293 uint16_t hsfsts;
10294 uint16_t hsflctl;
10295 uint32_t flash_linear_address;
10296 uint32_t flash_data = 0;
10297 int32_t error = 1;
10298 int32_t count = 0;
10299
10300 if (size < 1 || size > 4 || data == 0x0 ||
10301 index > ICH_FLASH_LINEAR_ADDR_MASK)
10302 return error;
10303
10304 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
10305 sc->sc_ich8_flash_base;
10306
10307 do {
10308 delay(1);
10309 /* Steps */
10310 error = wm_ich8_cycle_init(sc);
10311 if (error)
10312 break;
10313
10314 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10315 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
10316 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
10317 & HSFCTL_BCOUNT_MASK;
10318 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
10319 if (sc->sc_type == WM_T_PCH_SPT) {
10320 /*
10321 * In SPT, This register is in Lan memory space, not
10322 * flash. Therefore, only 32 bit access is supported.
10323 */
10324 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
10325 (uint32_t)hsflctl);
10326 } else
10327 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10328
10329 /*
10330 * Write the last 24 bits of index into Flash Linear address
10331 * field in Flash Address
10332 */
10333 /* TODO: TBD maybe check the index against the size of flash */
10334
10335 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
10336
10337 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
10338
10339 /*
10340 * Check if FCERR is set to 1, if set to 1, clear it and try
10341 * the whole sequence a few more times, else read in (shift in)
10342 * the Flash Data0, the order is least significant byte first
10343 * msb to lsb
10344 */
10345 if (error == 0) {
10346 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
10347 if (size == 1)
10348 *data = (uint8_t)(flash_data & 0x000000FF);
10349 else if (size == 2)
10350 *data = (uint16_t)(flash_data & 0x0000FFFF);
10351 else if (size == 4)
10352 *data = (uint32_t)flash_data;
10353 break;
10354 } else {
10355 /*
10356 * If we've gotten here, then things are probably
10357 * completely hosed, but if the error condition is
10358 * detected, it won't hurt to give it another try...
10359 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
10360 */
10361 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10362 if (hsfsts & HSFSTS_ERR) {
10363 /* Repeat for some time before giving up. */
10364 continue;
10365 } else if ((hsfsts & HSFSTS_DONE) == 0)
10366 break;
10367 }
10368 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
10369
10370 return error;
10371 }
10372
10373 /******************************************************************************
10374 * Reads a single byte from the NVM using the ICH8 flash access registers.
10375 *
10376 * sc - pointer to wm_hw structure
10377 * index - The index of the byte to read.
10378 * data - Pointer to a byte to store the value read.
10379 *****************************************************************************/
10380 static int32_t
10381 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
10382 {
10383 int32_t status;
10384 uint32_t word = 0;
10385
10386 status = wm_read_ich8_data(sc, index, 1, &word);
10387 if (status == 0)
10388 *data = (uint8_t)word;
10389 else
10390 *data = 0;
10391
10392 return status;
10393 }
10394
10395 /******************************************************************************
10396 * Reads a word from the NVM using the ICH8 flash access registers.
10397 *
10398 * sc - pointer to wm_hw structure
10399 * index - The starting byte index of the word to read.
10400 * data - Pointer to a word to store the value read.
10401 *****************************************************************************/
10402 static int32_t
10403 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
10404 {
10405 int32_t status;
10406 uint32_t word = 0;
10407
10408 status = wm_read_ich8_data(sc, index, 2, &word);
10409 if (status == 0)
10410 *data = (uint16_t)word;
10411 else
10412 *data = 0;
10413
10414 return status;
10415 }
10416
10417 /******************************************************************************
10418 * Reads a dword from the NVM using the ICH8 flash access registers.
10419 *
10420 * sc - pointer to wm_hw structure
10421 * index - The starting byte index of the word to read.
10422 * data - Pointer to a word to store the value read.
10423 *****************************************************************************/
10424 static int32_t
10425 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
10426 {
10427 int32_t status;
10428
10429 status = wm_read_ich8_data(sc, index, 4, data);
10430 return status;
10431 }
10432
10433 /******************************************************************************
10434 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
10435 * register.
10436 *
10437 * sc - Struct containing variables accessed by shared code
10438 * offset - offset of word in the EEPROM to read
10439 * data - word read from the EEPROM
10440 * words - number of words to read
10441 *****************************************************************************/
10442 static int
10443 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
10444 {
10445 int32_t error = 0;
10446 uint32_t flash_bank = 0;
10447 uint32_t act_offset = 0;
10448 uint32_t bank_offset = 0;
10449 uint16_t word = 0;
10450 uint16_t i = 0;
10451
10452 /*
10453 * We need to know which is the valid flash bank. In the event
10454 * that we didn't allocate eeprom_shadow_ram, we may not be
10455 * managing flash_bank. So it cannot be trusted and needs
10456 * to be updated with each read.
10457 */
10458 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
10459 if (error) {
10460 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
10461 device_xname(sc->sc_dev)));
10462 flash_bank = 0;
10463 }
10464
10465 /*
10466 * Adjust offset appropriately if we're on bank 1 - adjust for word
10467 * size
10468 */
10469 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
10470
10471 error = wm_get_swfwhw_semaphore(sc);
10472 if (error) {
10473 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10474 __func__);
10475 return error;
10476 }
10477
10478 for (i = 0; i < words; i++) {
10479 /* The NVM part needs a byte offset, hence * 2 */
10480 act_offset = bank_offset + ((offset + i) * 2);
10481 error = wm_read_ich8_word(sc, act_offset, &word);
10482 if (error) {
10483 aprint_error_dev(sc->sc_dev,
10484 "%s: failed to read NVM\n", __func__);
10485 break;
10486 }
10487 data[i] = word;
10488 }
10489
10490 wm_put_swfwhw_semaphore(sc);
10491 return error;
10492 }
10493
10494 /******************************************************************************
10495 * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
10496 * register.
10497 *
10498 * sc - Struct containing variables accessed by shared code
10499 * offset - offset of word in the EEPROM to read
10500 * data - word read from the EEPROM
10501 * words - number of words to read
10502 *****************************************************************************/
10503 static int
10504 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
10505 {
10506 int32_t error = 0;
10507 uint32_t flash_bank = 0;
10508 uint32_t act_offset = 0;
10509 uint32_t bank_offset = 0;
10510 uint32_t dword = 0;
10511 uint16_t i = 0;
10512
10513 /*
10514 * We need to know which is the valid flash bank. In the event
10515 * that we didn't allocate eeprom_shadow_ram, we may not be
10516 * managing flash_bank. So it cannot be trusted and needs
10517 * to be updated with each read.
10518 */
10519 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
10520 if (error) {
10521 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
10522 device_xname(sc->sc_dev)));
10523 flash_bank = 0;
10524 }
10525
10526 /*
10527 * Adjust offset appropriately if we're on bank 1 - adjust for word
10528 * size
10529 */
10530 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
10531
10532 error = wm_get_swfwhw_semaphore(sc);
10533 if (error) {
10534 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10535 __func__);
10536 return error;
10537 }
10538
10539 for (i = 0; i < words; i++) {
10540 /* The NVM part needs a byte offset, hence * 2 */
10541 act_offset = bank_offset + ((offset + i) * 2);
10542 /* but we must read dword aligned, so mask ... */
10543 error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
10544 if (error) {
10545 aprint_error_dev(sc->sc_dev,
10546 "%s: failed to read NVM\n", __func__);
10547 break;
10548 }
10549 /* ... and pick out low or high word */
10550 if ((act_offset & 0x2) == 0)
10551 data[i] = (uint16_t)(dword & 0xFFFF);
10552 else
10553 data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
10554 }
10555
10556 wm_put_swfwhw_semaphore(sc);
10557 return error;
10558 }
10559
10560 /* iNVM */
10561
10562 static int
10563 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
10564 {
10565 int32_t rv = 0;
10566 uint32_t invm_dword;
10567 uint16_t i;
10568 uint8_t record_type, word_address;
10569
10570 for (i = 0; i < INVM_SIZE; i++) {
10571 invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
10572 /* Get record type */
10573 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
10574 if (record_type == INVM_UNINITIALIZED_STRUCTURE)
10575 break;
10576 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
10577 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
10578 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
10579 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
10580 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
10581 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
10582 if (word_address == address) {
10583 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
10584 rv = 0;
10585 break;
10586 }
10587 }
10588 }
10589
10590 return rv;
10591 }
10592
10593 static int
10594 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
10595 {
10596 int rv = 0;
10597 int i;
10598
10599 for (i = 0; i < words; i++) {
10600 switch (offset + i) {
10601 case NVM_OFF_MACADDR:
10602 case NVM_OFF_MACADDR1:
10603 case NVM_OFF_MACADDR2:
10604 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
10605 if (rv != 0) {
10606 data[i] = 0xffff;
10607 rv = -1;
10608 }
10609 break;
10610 case NVM_OFF_CFG2:
10611 rv = wm_nvm_read_word_invm(sc, offset, data);
10612 if (rv != 0) {
10613 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
10614 rv = 0;
10615 }
10616 break;
10617 case NVM_OFF_CFG4:
10618 rv = wm_nvm_read_word_invm(sc, offset, data);
10619 if (rv != 0) {
10620 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
10621 rv = 0;
10622 }
10623 break;
10624 case NVM_OFF_LED_1_CFG:
10625 rv = wm_nvm_read_word_invm(sc, offset, data);
10626 if (rv != 0) {
10627 *data = NVM_LED_1_CFG_DEFAULT_I211;
10628 rv = 0;
10629 }
10630 break;
10631 case NVM_OFF_LED_0_2_CFG:
10632 rv = wm_nvm_read_word_invm(sc, offset, data);
10633 if (rv != 0) {
10634 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
10635 rv = 0;
10636 }
10637 break;
10638 case NVM_OFF_ID_LED_SETTINGS:
10639 rv = wm_nvm_read_word_invm(sc, offset, data);
10640 if (rv != 0) {
10641 *data = ID_LED_RESERVED_FFFF;
10642 rv = 0;
10643 }
10644 break;
10645 default:
10646 DPRINTF(WM_DEBUG_NVM,
10647 ("NVM word 0x%02x is not mapped.\n", offset));
10648 *data = NVM_RESERVED_WORD;
10649 break;
10650 }
10651 }
10652
10653 return rv;
10654 }
10655
10656 /* Lock, detecting NVM type, validate checksum, version and read */
10657
10658 /*
10659 * wm_nvm_acquire:
10660 *
10661 * Perform the EEPROM handshake required on some chips.
10662 */
10663 static int
10664 wm_nvm_acquire(struct wm_softc *sc)
10665 {
10666 uint32_t reg;
10667 int x;
10668 int ret = 0;
10669
10670 /* always success */
10671 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
10672 return 0;
10673
10674 if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
10675 ret = wm_get_swfwhw_semaphore(sc);
10676 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
10677 /* This will also do wm_get_swsm_semaphore() if needed */
10678 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
10679 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
10680 ret = wm_get_swsm_semaphore(sc);
10681 }
10682
10683 if (ret) {
10684 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10685 __func__);
10686 return 1;
10687 }
10688
10689 if (sc->sc_flags & WM_F_LOCK_EECD) {
10690 reg = CSR_READ(sc, WMREG_EECD);
10691
10692 /* Request EEPROM access. */
10693 reg |= EECD_EE_REQ;
10694 CSR_WRITE(sc, WMREG_EECD, reg);
10695
10696 /* ..and wait for it to be granted. */
10697 for (x = 0; x < 1000; x++) {
10698 reg = CSR_READ(sc, WMREG_EECD);
10699 if (reg & EECD_EE_GNT)
10700 break;
10701 delay(5);
10702 }
10703 if ((reg & EECD_EE_GNT) == 0) {
10704 aprint_error_dev(sc->sc_dev,
10705 "could not acquire EEPROM GNT\n");
10706 reg &= ~EECD_EE_REQ;
10707 CSR_WRITE(sc, WMREG_EECD, reg);
10708 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
10709 wm_put_swfwhw_semaphore(sc);
10710 if (sc->sc_flags & WM_F_LOCK_SWFW)
10711 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
10712 else if (sc->sc_flags & WM_F_LOCK_SWSM)
10713 wm_put_swsm_semaphore(sc);
10714 return 1;
10715 }
10716 }
10717
10718 return 0;
10719 }
10720
10721 /*
10722 * wm_nvm_release:
10723 *
10724 * Release the EEPROM mutex.
10725 */
10726 static void
10727 wm_nvm_release(struct wm_softc *sc)
10728 {
10729 uint32_t reg;
10730
10731 /* always success */
10732 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
10733 return;
10734
10735 if (sc->sc_flags & WM_F_LOCK_EECD) {
10736 reg = CSR_READ(sc, WMREG_EECD);
10737 reg &= ~EECD_EE_REQ;
10738 CSR_WRITE(sc, WMREG_EECD, reg);
10739 }
10740
10741 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
10742 wm_put_swfwhw_semaphore(sc);
10743 if (sc->sc_flags & WM_F_LOCK_SWFW)
10744 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
10745 else if (sc->sc_flags & WM_F_LOCK_SWSM)
10746 wm_put_swsm_semaphore(sc);
10747 }
10748
10749 static int
10750 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
10751 {
10752 uint32_t eecd = 0;
10753
10754 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
10755 || sc->sc_type == WM_T_82583) {
10756 eecd = CSR_READ(sc, WMREG_EECD);
10757
10758 /* Isolate bits 15 & 16 */
10759 eecd = ((eecd >> 15) & 0x03);
10760
10761 /* If both bits are set, device is Flash type */
10762 if (eecd == 0x03)
10763 return 0;
10764 }
10765 return 1;
10766 }
10767
10768 static int
10769 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
10770 {
10771 uint32_t eec;
10772
10773 eec = CSR_READ(sc, WMREG_EEC);
10774 if ((eec & EEC_FLASH_DETECTED) != 0)
10775 return 1;
10776
10777 return 0;
10778 }
10779
10780 /*
10781 * wm_nvm_validate_checksum
10782 *
10783 * The checksum is defined as the sum of the first 64 (16 bit) words.
10784 */
10785 static int
10786 wm_nvm_validate_checksum(struct wm_softc *sc)
10787 {
10788 uint16_t checksum;
10789 uint16_t eeprom_data;
10790 #ifdef WM_DEBUG
10791 uint16_t csum_wordaddr, valid_checksum;
10792 #endif
10793 int i;
10794
10795 checksum = 0;
10796
10797 /* Don't check for I211 */
10798 if (sc->sc_type == WM_T_I211)
10799 return 0;
10800
10801 #ifdef WM_DEBUG
10802 if (sc->sc_type == WM_T_PCH_LPT) {
10803 csum_wordaddr = NVM_OFF_COMPAT;
10804 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
10805 } else {
10806 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
10807 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
10808 }
10809
10810 /* Dump EEPROM image for debug */
10811 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10812 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10813 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
10814 /* XXX PCH_SPT? */
10815 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
10816 if ((eeprom_data & valid_checksum) == 0) {
10817 DPRINTF(WM_DEBUG_NVM,
10818 ("%s: NVM need to be updated (%04x != %04x)\n",
10819 device_xname(sc->sc_dev), eeprom_data,
10820 valid_checksum));
10821 }
10822 }
10823
10824 if ((wm_debug & WM_DEBUG_NVM) != 0) {
10825 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
10826 for (i = 0; i < NVM_SIZE; i++) {
10827 if (wm_nvm_read(sc, i, 1, &eeprom_data))
10828 printf("XXXX ");
10829 else
10830 printf("%04hx ", eeprom_data);
10831 if (i % 8 == 7)
10832 printf("\n");
10833 }
10834 }
10835
10836 #endif /* WM_DEBUG */
10837
10838 for (i = 0; i < NVM_SIZE; i++) {
10839 if (wm_nvm_read(sc, i, 1, &eeprom_data))
10840 return 1;
10841 checksum += eeprom_data;
10842 }
10843
10844 if (checksum != (uint16_t) NVM_CHECKSUM) {
10845 #ifdef WM_DEBUG
10846 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
10847 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
10848 #endif
10849 }
10850
10851 return 0;
10852 }
10853
10854 static void
10855 wm_nvm_version_invm(struct wm_softc *sc)
10856 {
10857 uint32_t dword;
10858
10859 /*
10860 * Linux's code to decode version is very strange, so we don't
10861 * obey that algorithm and just use word 61 as the document.
10862 * Perhaps it's not perfect though...
10863 *
10864 * Example:
10865 *
10866 * Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
10867 */
10868 dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
10869 dword = __SHIFTOUT(dword, INVM_VER_1);
10870 sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
10871 sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
10872 }
10873
10874 static void
10875 wm_nvm_version(struct wm_softc *sc)
10876 {
10877 uint16_t major, minor, build, patch;
10878 uint16_t uid0, uid1;
10879 uint16_t nvm_data;
10880 uint16_t off;
10881 bool check_version = false;
10882 bool check_optionrom = false;
10883 bool have_build = false;
10884
10885 /*
10886 * Version format:
10887 *
10888 * XYYZ
10889 * X0YZ
10890 * X0YY
10891 *
10892 * Example:
10893 *
10894 * 82571 0x50a2 5.10.2? (the spec update notes about 5.6-5.10)
10895 * 82571 0x50a6 5.10.6?
10896 * 82572 0x506a 5.6.10?
10897 * 82572EI 0x5069 5.6.9?
10898 * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4)
10899 * 0x2013 2.1.3?
10900 * 82583 0x10a0 1.10.0? (document says it's default vaule)
10901 */
10902 wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
10903 switch (sc->sc_type) {
10904 case WM_T_82571:
10905 case WM_T_82572:
10906 case WM_T_82574:
10907 case WM_T_82583:
10908 check_version = true;
10909 check_optionrom = true;
10910 have_build = true;
10911 break;
10912 case WM_T_82575:
10913 case WM_T_82576:
10914 case WM_T_82580:
10915 if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
10916 check_version = true;
10917 break;
10918 case WM_T_I211:
10919 wm_nvm_version_invm(sc);
10920 goto printver;
10921 case WM_T_I210:
10922 if (!wm_nvm_get_flash_presence_i210(sc)) {
10923 wm_nvm_version_invm(sc);
10924 goto printver;
10925 }
10926 /* FALLTHROUGH */
10927 case WM_T_I350:
10928 case WM_T_I354:
10929 check_version = true;
10930 check_optionrom = true;
10931 break;
10932 default:
10933 return;
10934 }
10935 if (check_version) {
10936 wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
10937 major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
10938 if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
10939 minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
10940 build = nvm_data & NVM_BUILD_MASK;
10941 have_build = true;
10942 } else
10943 minor = nvm_data & 0x00ff;
10944
10945 /* Decimal */
10946 minor = (minor / 16) * 10 + (minor % 16);
10947 sc->sc_nvm_ver_major = major;
10948 sc->sc_nvm_ver_minor = minor;
10949
10950 printver:
10951 aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
10952 sc->sc_nvm_ver_minor);
10953 if (have_build) {
10954 sc->sc_nvm_ver_build = build;
10955 aprint_verbose(".%d", build);
10956 }
10957 }
10958 if (check_optionrom) {
10959 wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
10960 /* Option ROM Version */
10961 if ((off != 0x0000) && (off != 0xffff)) {
10962 off += NVM_COMBO_VER_OFF;
10963 wm_nvm_read(sc, off + 1, 1, &uid1);
10964 wm_nvm_read(sc, off, 1, &uid0);
10965 if ((uid0 != 0) && (uid0 != 0xffff)
10966 && (uid1 != 0) && (uid1 != 0xffff)) {
10967 /* 16bits */
10968 major = uid0 >> 8;
10969 build = (uid0 << 8) | (uid1 >> 8);
10970 patch = uid1 & 0x00ff;
10971 aprint_verbose(", option ROM Version %d.%d.%d",
10972 major, build, patch);
10973 }
10974 }
10975 }
10976
10977 wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
10978 aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
10979 }
10980
10981 /*
10982 * wm_nvm_read:
10983 *
10984 * Read data from the serial EEPROM.
10985 */
10986 static int
10987 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
10988 {
10989 int rv;
10990
10991 if (sc->sc_flags & WM_F_EEPROM_INVALID)
10992 return 1;
10993
10994 if (wm_nvm_acquire(sc))
10995 return 1;
10996
10997 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10998 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10999 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
11000 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
11001 else if (sc->sc_type == WM_T_PCH_SPT)
11002 rv = wm_nvm_read_spt(sc, word, wordcnt, data);
11003 else if (sc->sc_flags & WM_F_EEPROM_INVM)
11004 rv = wm_nvm_read_invm(sc, word, wordcnt, data);
11005 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
11006 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
11007 else if (sc->sc_flags & WM_F_EEPROM_SPI)
11008 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
11009 else
11010 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
11011
11012 wm_nvm_release(sc);
11013 return rv;
11014 }
11015
11016 /*
11017 * Hardware semaphores.
11018 * Very complexed...
11019 */
11020
11021 static int
11022 wm_get_swsm_semaphore(struct wm_softc *sc)
11023 {
11024 int32_t timeout;
11025 uint32_t swsm;
11026
11027 if (sc->sc_flags & WM_F_LOCK_SWSM) {
11028 /* Get the SW semaphore. */
11029 timeout = sc->sc_nvm_wordsize + 1;
11030 while (timeout) {
11031 swsm = CSR_READ(sc, WMREG_SWSM);
11032
11033 if ((swsm & SWSM_SMBI) == 0)
11034 break;
11035
11036 delay(50);
11037 timeout--;
11038 }
11039
11040 if (timeout == 0) {
11041 aprint_error_dev(sc->sc_dev,
11042 "could not acquire SWSM SMBI\n");
11043 return 1;
11044 }
11045 }
11046
11047 /* Get the FW semaphore. */
11048 timeout = sc->sc_nvm_wordsize + 1;
11049 while (timeout) {
11050 swsm = CSR_READ(sc, WMREG_SWSM);
11051 swsm |= SWSM_SWESMBI;
11052 CSR_WRITE(sc, WMREG_SWSM, swsm);
11053 /* If we managed to set the bit we got the semaphore. */
11054 swsm = CSR_READ(sc, WMREG_SWSM);
11055 if (swsm & SWSM_SWESMBI)
11056 break;
11057
11058 delay(50);
11059 timeout--;
11060 }
11061
11062 if (timeout == 0) {
11063 aprint_error_dev(sc->sc_dev,
11064 "could not acquire SWSM SWESMBI\n");
11065 /* Release semaphores */
11066 wm_put_swsm_semaphore(sc);
11067 return 1;
11068 }
11069 return 0;
11070 }
11071
11072 static void
11073 wm_put_swsm_semaphore(struct wm_softc *sc)
11074 {
11075 uint32_t swsm;
11076
11077 swsm = CSR_READ(sc, WMREG_SWSM);
11078 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
11079 CSR_WRITE(sc, WMREG_SWSM, swsm);
11080 }
11081
11082 static int
11083 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
11084 {
11085 uint32_t swfw_sync;
11086 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
11087 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
11088 int timeout = 200;
11089
11090 for (timeout = 0; timeout < 200; timeout++) {
11091 if (sc->sc_flags & WM_F_LOCK_SWSM) {
11092 if (wm_get_swsm_semaphore(sc)) {
11093 aprint_error_dev(sc->sc_dev,
11094 "%s: failed to get semaphore\n",
11095 __func__);
11096 return 1;
11097 }
11098 }
11099 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
11100 if ((swfw_sync & (swmask | fwmask)) == 0) {
11101 swfw_sync |= swmask;
11102 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
11103 if (sc->sc_flags & WM_F_LOCK_SWSM)
11104 wm_put_swsm_semaphore(sc);
11105 return 0;
11106 }
11107 if (sc->sc_flags & WM_F_LOCK_SWSM)
11108 wm_put_swsm_semaphore(sc);
11109 delay(5000);
11110 }
11111 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
11112 device_xname(sc->sc_dev), mask, swfw_sync);
11113 return 1;
11114 }
11115
11116 static void
11117 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
11118 {
11119 uint32_t swfw_sync;
11120
11121 if (sc->sc_flags & WM_F_LOCK_SWSM) {
11122 while (wm_get_swsm_semaphore(sc) != 0)
11123 continue;
11124 }
11125 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
11126 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
11127 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
11128 if (sc->sc_flags & WM_F_LOCK_SWSM)
11129 wm_put_swsm_semaphore(sc);
11130 }
11131
11132 static int
11133 wm_get_swfwhw_semaphore(struct wm_softc *sc)
11134 {
11135 uint32_t ext_ctrl;
11136 int timeout = 200;
11137
11138 for (timeout = 0; timeout < 200; timeout++) {
11139 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11140 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
11141 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11142
11143 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11144 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
11145 return 0;
11146 delay(5000);
11147 }
11148 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
11149 device_xname(sc->sc_dev), ext_ctrl);
11150 return 1;
11151 }
11152
11153 static void
11154 wm_put_swfwhw_semaphore(struct wm_softc *sc)
11155 {
11156 uint32_t ext_ctrl;
11157
11158 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11159 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11160 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11161 }
11162
11163 static int
11164 wm_get_hw_semaphore_82573(struct wm_softc *sc)
11165 {
11166 int i = 0;
11167 uint32_t reg;
11168
11169 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11170 do {
11171 CSR_WRITE(sc, WMREG_EXTCNFCTR,
11172 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
11173 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11174 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
11175 break;
11176 delay(2*1000);
11177 i++;
11178 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
11179
11180 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
11181 wm_put_hw_semaphore_82573(sc);
11182 log(LOG_ERR, "%s: Driver can't access the PHY\n",
11183 device_xname(sc->sc_dev));
11184 return -1;
11185 }
11186
11187 return 0;
11188 }
11189
11190 static void
11191 wm_put_hw_semaphore_82573(struct wm_softc *sc)
11192 {
11193 uint32_t reg;
11194
11195 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11196 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11197 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
11198 }
11199
11200 /*
11201 * Management mode and power management related subroutines.
11202 * BMC, AMT, suspend/resume and EEE.
11203 */
11204
11205 #ifdef WM_WOL
11206 static int
11207 wm_check_mng_mode(struct wm_softc *sc)
11208 {
11209 int rv;
11210
11211 switch (sc->sc_type) {
11212 case WM_T_ICH8:
11213 case WM_T_ICH9:
11214 case WM_T_ICH10:
11215 case WM_T_PCH:
11216 case WM_T_PCH2:
11217 case WM_T_PCH_LPT:
11218 case WM_T_PCH_SPT:
11219 rv = wm_check_mng_mode_ich8lan(sc);
11220 break;
11221 case WM_T_82574:
11222 case WM_T_82583:
11223 rv = wm_check_mng_mode_82574(sc);
11224 break;
11225 case WM_T_82571:
11226 case WM_T_82572:
11227 case WM_T_82573:
11228 case WM_T_80003:
11229 rv = wm_check_mng_mode_generic(sc);
11230 break;
11231 default:
11232 /* noting to do */
11233 rv = 0;
11234 break;
11235 }
11236
11237 return rv;
11238 }
11239
11240 static int
11241 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
11242 {
11243 uint32_t fwsm;
11244
11245 fwsm = CSR_READ(sc, WMREG_FWSM);
11246
11247 if (((fwsm & FWSM_FW_VALID) != 0)
11248 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
11249 return 1;
11250
11251 return 0;
11252 }
11253
11254 static int
11255 wm_check_mng_mode_82574(struct wm_softc *sc)
11256 {
11257 uint16_t data;
11258
11259 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11260
11261 if ((data & NVM_CFG2_MNGM_MASK) != 0)
11262 return 1;
11263
11264 return 0;
11265 }
11266
11267 static int
11268 wm_check_mng_mode_generic(struct wm_softc *sc)
11269 {
11270 uint32_t fwsm;
11271
11272 fwsm = CSR_READ(sc, WMREG_FWSM);
11273
11274 if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
11275 return 1;
11276
11277 return 0;
11278 }
11279 #endif /* WM_WOL */
11280
11281 static int
11282 wm_enable_mng_pass_thru(struct wm_softc *sc)
11283 {
11284 uint32_t manc, fwsm, factps;
11285
11286 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
11287 return 0;
11288
11289 manc = CSR_READ(sc, WMREG_MANC);
11290
11291 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
11292 device_xname(sc->sc_dev), manc));
11293 if ((manc & MANC_RECV_TCO_EN) == 0)
11294 return 0;
11295
11296 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
11297 fwsm = CSR_READ(sc, WMREG_FWSM);
11298 factps = CSR_READ(sc, WMREG_FACTPS);
11299 if (((factps & FACTPS_MNGCG) == 0)
11300 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
11301 return 1;
11302 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
11303 uint16_t data;
11304
11305 factps = CSR_READ(sc, WMREG_FACTPS);
11306 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11307 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
11308 device_xname(sc->sc_dev), factps, data));
11309 if (((factps & FACTPS_MNGCG) == 0)
11310 && ((data & NVM_CFG2_MNGM_MASK)
11311 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
11312 return 1;
11313 } else if (((manc & MANC_SMBUS_EN) != 0)
11314 && ((manc & MANC_ASF_EN) == 0))
11315 return 1;
11316
11317 return 0;
11318 }
11319
11320 static bool
11321 wm_phy_resetisblocked(struct wm_softc *sc)
11322 {
11323 bool blocked = false;
11324 uint32_t reg;
11325 int i = 0;
11326
11327 switch (sc->sc_type) {
11328 case WM_T_ICH8:
11329 case WM_T_ICH9:
11330 case WM_T_ICH10:
11331 case WM_T_PCH:
11332 case WM_T_PCH2:
11333 case WM_T_PCH_LPT:
11334 case WM_T_PCH_SPT:
11335 do {
11336 reg = CSR_READ(sc, WMREG_FWSM);
11337 if ((reg & FWSM_RSPCIPHY) == 0) {
11338 blocked = true;
11339 delay(10*1000);
11340 continue;
11341 }
11342 blocked = false;
11343 } while (blocked && (i++ < 10));
11344 return blocked;
11345 break;
11346 case WM_T_82571:
11347 case WM_T_82572:
11348 case WM_T_82573:
11349 case WM_T_82574:
11350 case WM_T_82583:
11351 case WM_T_80003:
11352 reg = CSR_READ(sc, WMREG_MANC);
11353 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
11354 return true;
11355 else
11356 return false;
11357 break;
11358 default:
11359 /* no problem */
11360 break;
11361 }
11362
11363 return false;
11364 }
11365
11366 static void
11367 wm_get_hw_control(struct wm_softc *sc)
11368 {
11369 uint32_t reg;
11370
11371 switch (sc->sc_type) {
11372 case WM_T_82573:
11373 reg = CSR_READ(sc, WMREG_SWSM);
11374 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
11375 break;
11376 case WM_T_82571:
11377 case WM_T_82572:
11378 case WM_T_82574:
11379 case WM_T_82583:
11380 case WM_T_80003:
11381 case WM_T_ICH8:
11382 case WM_T_ICH9:
11383 case WM_T_ICH10:
11384 case WM_T_PCH:
11385 case WM_T_PCH2:
11386 case WM_T_PCH_LPT:
11387 case WM_T_PCH_SPT:
11388 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11389 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
11390 break;
11391 default:
11392 break;
11393 }
11394 }
11395
11396 static void
11397 wm_release_hw_control(struct wm_softc *sc)
11398 {
11399 uint32_t reg;
11400
11401 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
11402 return;
11403
11404 if (sc->sc_type == WM_T_82573) {
11405 reg = CSR_READ(sc, WMREG_SWSM);
11406 reg &= ~SWSM_DRV_LOAD;
11407 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
11408 } else {
11409 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11410 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
11411 }
11412 }
11413
11414 static void
11415 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
11416 {
11417 uint32_t reg;
11418
11419 if (sc->sc_type < WM_T_PCH2)
11420 return;
11421
11422 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11423
11424 if (gate)
11425 reg |= EXTCNFCTR_GATE_PHY_CFG;
11426 else
11427 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
11428
11429 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
11430 }
11431
11432 static void
11433 wm_smbustopci(struct wm_softc *sc)
11434 {
11435 uint32_t fwsm, reg;
11436
11437 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
11438 wm_gate_hw_phy_config_ich8lan(sc, true);
11439
11440 /* Acquire semaphore */
11441 wm_get_swfwhw_semaphore(sc);
11442
11443 fwsm = CSR_READ(sc, WMREG_FWSM);
11444 if (((fwsm & FWSM_FW_VALID) == 0)
11445 && ((wm_phy_resetisblocked(sc) == false))) {
11446 if (sc->sc_type >= WM_T_PCH_LPT) {
11447 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11448 reg |= CTRL_EXT_FORCE_SMBUS;
11449 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11450 CSR_WRITE_FLUSH(sc);
11451 delay(50*1000);
11452 }
11453
11454 /* Toggle LANPHYPC */
11455 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
11456 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
11457 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11458 CSR_WRITE_FLUSH(sc);
11459 delay(10);
11460 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
11461 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11462 CSR_WRITE_FLUSH(sc);
11463 delay(50*1000);
11464
11465 if (sc->sc_type >= WM_T_PCH_LPT) {
11466 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11467 reg &= ~CTRL_EXT_FORCE_SMBUS;
11468 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11469 }
11470 }
11471
11472 /* Release semaphore */
11473 wm_put_swfwhw_semaphore(sc);
11474
11475 /*
11476 * Ungate automatic PHY configuration by hardware on non-managed 82579
11477 */
11478 if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0))
11479 wm_gate_hw_phy_config_ich8lan(sc, false);
11480 }
11481
11482 static void
11483 wm_init_manageability(struct wm_softc *sc)
11484 {
11485
11486 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
11487 device_xname(sc->sc_dev), __func__));
11488 if (sc->sc_flags & WM_F_HAS_MANAGE) {
11489 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
11490 uint32_t manc = CSR_READ(sc, WMREG_MANC);
11491
11492 /* Disable hardware interception of ARP */
11493 manc &= ~MANC_ARP_EN;
11494
11495 /* Enable receiving management packets to the host */
11496 if (sc->sc_type >= WM_T_82571) {
11497 manc |= MANC_EN_MNG2HOST;
11498 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
11499 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
11500 }
11501
11502 CSR_WRITE(sc, WMREG_MANC, manc);
11503 }
11504 }
11505
11506 static void
11507 wm_release_manageability(struct wm_softc *sc)
11508 {
11509
11510 if (sc->sc_flags & WM_F_HAS_MANAGE) {
11511 uint32_t manc = CSR_READ(sc, WMREG_MANC);
11512
11513 manc |= MANC_ARP_EN;
11514 if (sc->sc_type >= WM_T_82571)
11515 manc &= ~MANC_EN_MNG2HOST;
11516
11517 CSR_WRITE(sc, WMREG_MANC, manc);
11518 }
11519 }
11520
11521 static void
11522 wm_get_wakeup(struct wm_softc *sc)
11523 {
11524
11525 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
11526 switch (sc->sc_type) {
11527 case WM_T_82573:
11528 case WM_T_82583:
11529 sc->sc_flags |= WM_F_HAS_AMT;
11530 /* FALLTHROUGH */
11531 case WM_T_80003:
11532 case WM_T_82541:
11533 case WM_T_82547:
11534 case WM_T_82571:
11535 case WM_T_82572:
11536 case WM_T_82574:
11537 case WM_T_82575:
11538 case WM_T_82576:
11539 case WM_T_82580:
11540 case WM_T_I350:
11541 case WM_T_I354:
11542 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
11543 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
11544 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
11545 break;
11546 case WM_T_ICH8:
11547 case WM_T_ICH9:
11548 case WM_T_ICH10:
11549 case WM_T_PCH:
11550 case WM_T_PCH2:
11551 case WM_T_PCH_LPT:
11552 case WM_T_PCH_SPT: /* XXX only Q170 chipset? */
11553 sc->sc_flags |= WM_F_HAS_AMT;
11554 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
11555 break;
11556 default:
11557 break;
11558 }
11559
11560 /* 1: HAS_MANAGE */
11561 if (wm_enable_mng_pass_thru(sc) != 0)
11562 sc->sc_flags |= WM_F_HAS_MANAGE;
11563
11564 #ifdef WM_DEBUG
11565 printf("\n");
11566 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
11567 printf("HAS_AMT,");
11568 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
11569 printf("ARC_SUBSYS_VALID,");
11570 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
11571 printf("ASF_FIRMWARE_PRES,");
11572 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
11573 printf("HAS_MANAGE,");
11574 printf("\n");
11575 #endif
11576 /*
11577 * Note that the WOL flags is set after the resetting of the eeprom
11578 * stuff
11579 */
11580 }
11581
11582 #ifdef WM_WOL
11583 /* WOL in the newer chipset interfaces (pchlan) */
11584 static void
11585 wm_enable_phy_wakeup(struct wm_softc *sc)
11586 {
11587 #if 0
11588 uint16_t preg;
11589
11590 /* Copy MAC RARs to PHY RARs */
11591
11592 /* Copy MAC MTA to PHY MTA */
11593
11594 /* Configure PHY Rx Control register */
11595
11596 /* Enable PHY wakeup in MAC register */
11597
11598 /* Configure and enable PHY wakeup in PHY registers */
11599
11600 /* Activate PHY wakeup */
11601
11602 /* XXX */
11603 #endif
11604 }
11605
11606 /* Power down workaround on D3 */
11607 static void
11608 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
11609 {
11610 uint32_t reg;
11611 int i;
11612
11613 for (i = 0; i < 2; i++) {
11614 /* Disable link */
11615 reg = CSR_READ(sc, WMREG_PHY_CTRL);
11616 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
11617 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11618
11619 /*
11620 * Call gig speed drop workaround on Gig disable before
11621 * accessing any PHY registers
11622 */
11623 if (sc->sc_type == WM_T_ICH8)
11624 wm_gig_downshift_workaround_ich8lan(sc);
11625
11626 /* Write VR power-down enable */
11627 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
11628 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
11629 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
11630 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
11631
11632 /* Read it back and test */
11633 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
11634 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
11635 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
11636 break;
11637
11638 /* Issue PHY reset and repeat at most one more time */
11639 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
11640 }
11641 }
11642
11643 static void
11644 wm_enable_wakeup(struct wm_softc *sc)
11645 {
11646 uint32_t reg, pmreg;
11647 pcireg_t pmode;
11648
11649 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
11650 &pmreg, NULL) == 0)
11651 return;
11652
11653 /* Advertise the wakeup capability */
11654 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
11655 | CTRL_SWDPIN(3));
11656 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
11657
11658 /* ICH workaround */
11659 switch (sc->sc_type) {
11660 case WM_T_ICH8:
11661 case WM_T_ICH9:
11662 case WM_T_ICH10:
11663 case WM_T_PCH:
11664 case WM_T_PCH2:
11665 case WM_T_PCH_LPT:
11666 case WM_T_PCH_SPT:
11667 /* Disable gig during WOL */
11668 reg = CSR_READ(sc, WMREG_PHY_CTRL);
11669 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
11670 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11671 if (sc->sc_type == WM_T_PCH)
11672 wm_gmii_reset(sc);
11673
11674 /* Power down workaround */
11675 if (sc->sc_phytype == WMPHY_82577) {
11676 struct mii_softc *child;
11677
11678 /* Assume that the PHY is copper */
11679 child = LIST_FIRST(&sc->sc_mii.mii_phys);
11680 if (child->mii_mpd_rev <= 2)
11681 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
11682 (768 << 5) | 25, 0x0444); /* magic num */
11683 }
11684 break;
11685 default:
11686 break;
11687 }
11688
11689 /* Keep the laser running on fiber adapters */
11690 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
11691 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
11692 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11693 reg |= CTRL_EXT_SWDPIN(3);
11694 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11695 }
11696
11697 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
11698 #if 0 /* for the multicast packet */
11699 reg |= WUFC_MC;
11700 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
11701 #endif
11702
11703 if (sc->sc_type == WM_T_PCH) {
11704 wm_enable_phy_wakeup(sc);
11705 } else {
11706 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
11707 CSR_WRITE(sc, WMREG_WUFC, reg);
11708 }
11709
11710 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
11711 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
11712 || (sc->sc_type == WM_T_PCH2))
11713 && (sc->sc_phytype == WMPHY_IGP_3))
11714 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
11715
11716 /* Request PME */
11717 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
11718 #if 0
11719 /* Disable WOL */
11720 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
11721 #else
11722 /* For WOL */
11723 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
11724 #endif
11725 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
11726 }
11727 #endif /* WM_WOL */
11728
11729 /* LPLU */
11730
11731 static void
11732 wm_lplu_d0_disable(struct wm_softc *sc)
11733 {
11734 uint32_t reg;
11735
11736 reg = CSR_READ(sc, WMREG_PHY_CTRL);
11737 reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
11738 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11739 }
11740
11741 static void
11742 wm_lplu_d0_disable_pch(struct wm_softc *sc)
11743 {
11744 uint32_t reg;
11745
11746 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
11747 reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
11748 reg |= HV_OEM_BITS_ANEGNOW;
11749 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
11750 }
11751
11752 /* EEE */
11753
11754 static void
11755 wm_set_eee_i350(struct wm_softc *sc)
11756 {
11757 uint32_t ipcnfg, eeer;
11758
11759 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
11760 eeer = CSR_READ(sc, WMREG_EEER);
11761
11762 if ((sc->sc_flags & WM_F_EEE) != 0) {
11763 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
11764 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
11765 | EEER_LPI_FC);
11766 } else {
11767 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
11768 ipcnfg &= ~IPCNFG_10BASE_TE;
11769 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
11770 | EEER_LPI_FC);
11771 }
11772
11773 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
11774 CSR_WRITE(sc, WMREG_EEER, eeer);
11775 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
11776 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
11777 }
11778
11779 /*
11780 * Workarounds (mainly PHY related).
11781 * Basically, PHY's workarounds are in the PHY drivers.
11782 */
11783
11784 /* Work-around for 82566 Kumeran PCS lock loss */
11785 static void
11786 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
11787 {
11788 #if 0
11789 int miistatus, active, i;
11790 int reg;
11791
11792 miistatus = sc->sc_mii.mii_media_status;
11793
11794 /* If the link is not up, do nothing */
11795 if ((miistatus & IFM_ACTIVE) == 0)
11796 return;
11797
11798 active = sc->sc_mii.mii_media_active;
11799
11800 /* Nothing to do if the link is other than 1Gbps */
11801 if (IFM_SUBTYPE(active) != IFM_1000_T)
11802 return;
11803
11804 for (i = 0; i < 10; i++) {
11805 /* read twice */
11806 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
11807 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
11808 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
11809 goto out; /* GOOD! */
11810
11811 /* Reset the PHY */
11812 wm_gmii_reset(sc);
11813 delay(5*1000);
11814 }
11815
11816 /* Disable GigE link negotiation */
11817 reg = CSR_READ(sc, WMREG_PHY_CTRL);
11818 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
11819 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11820
11821 /*
11822 * Call gig speed drop workaround on Gig disable before accessing
11823 * any PHY registers.
11824 */
11825 wm_gig_downshift_workaround_ich8lan(sc);
11826
11827 out:
11828 return;
11829 #endif
11830 }
11831
11832 /* WOL from S5 stops working */
11833 static void
11834 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
11835 {
11836 uint16_t kmrn_reg;
11837
11838 /* Only for igp3 */
11839 if (sc->sc_phytype == WMPHY_IGP_3) {
11840 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
11841 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
11842 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
11843 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
11844 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
11845 }
11846 }
11847
11848 /*
11849 * Workaround for pch's PHYs
11850 * XXX should be moved to new PHY driver?
11851 */
11852 static void
11853 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
11854 {
11855 if (sc->sc_phytype == WMPHY_82577)
11856 wm_set_mdio_slow_mode_hv(sc);
11857
11858 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
11859
11860 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
11861
11862 /* 82578 */
11863 if (sc->sc_phytype == WMPHY_82578) {
11864 /* PCH rev. < 3 */
11865 if (sc->sc_rev < 3) {
11866 /* XXX 6 bit shift? Why? Is it page2? */
11867 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
11868 0x66c0);
11869 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
11870 0xffff);
11871 }
11872
11873 /* XXX phy rev. < 2 */
11874 }
11875
11876 /* Select page 0 */
11877
11878 /* XXX acquire semaphore */
11879 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
11880 /* XXX release semaphore */
11881
11882 /*
11883 * Configure the K1 Si workaround during phy reset assuming there is
11884 * link so that it disables K1 if link is in 1Gbps.
11885 */
11886 wm_k1_gig_workaround_hv(sc, 1);
11887 }
11888
11889 static void
11890 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
11891 {
11892
11893 wm_set_mdio_slow_mode_hv(sc);
11894 }
11895
11896 static void
11897 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
11898 {
11899 int k1_enable = sc->sc_nvm_k1_enabled;
11900
11901 /* XXX acquire semaphore */
11902
11903 if (link) {
11904 k1_enable = 0;
11905
11906 /* Link stall fix for link up */
11907 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
11908 } else {
11909 /* Link stall fix for link down */
11910 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
11911 }
11912
11913 wm_configure_k1_ich8lan(sc, k1_enable);
11914
11915 /* XXX release semaphore */
11916 }
11917
11918 static void
11919 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
11920 {
11921 uint32_t reg;
11922
11923 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
11924 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
11925 reg | HV_KMRN_MDIO_SLOW);
11926 }
11927
11928 static void
11929 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
11930 {
11931 uint32_t ctrl, ctrl_ext, tmp;
11932 uint16_t kmrn_reg;
11933
11934 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
11935
11936 if (k1_enable)
11937 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
11938 else
11939 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
11940
11941 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
11942
11943 delay(20);
11944
11945 ctrl = CSR_READ(sc, WMREG_CTRL);
11946 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
11947
11948 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
11949 tmp |= CTRL_FRCSPD;
11950
11951 CSR_WRITE(sc, WMREG_CTRL, tmp);
11952 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
11953 CSR_WRITE_FLUSH(sc);
11954 delay(20);
11955
11956 CSR_WRITE(sc, WMREG_CTRL, ctrl);
11957 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
11958 CSR_WRITE_FLUSH(sc);
11959 delay(20);
11960 }
11961
11962 /* special case - for 82575 - need to do manual init ... */
11963 static void
11964 wm_reset_init_script_82575(struct wm_softc *sc)
11965 {
11966 /*
11967 * remark: this is untested code - we have no board without EEPROM
11968 * same setup as mentioned int the FreeBSD driver for the i82575
11969 */
11970
11971 /* SerDes configuration via SERDESCTRL */
11972 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
11973 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
11974 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
11975 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
11976
11977 /* CCM configuration via CCMCTL register */
11978 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
11979 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
11980
11981 /* PCIe lanes configuration */
11982 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
11983 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
11984 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
11985 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
11986
11987 /* PCIe PLL Configuration */
11988 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
11989 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
11990 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
11991 }
11992
11993 static void
11994 wm_reset_mdicnfg_82580(struct wm_softc *sc)
11995 {
11996 uint32_t reg;
11997 uint16_t nvmword;
11998 int rv;
11999
12000 if ((sc->sc_flags & WM_F_SGMII) == 0)
12001 return;
12002
12003 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
12004 + NVM_OFF_CFG3_PORTA, 1, &nvmword);
12005 if (rv != 0) {
12006 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
12007 __func__);
12008 return;
12009 }
12010
12011 reg = CSR_READ(sc, WMREG_MDICNFG);
12012 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
12013 reg |= MDICNFG_DEST;
12014 if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
12015 reg |= MDICNFG_COM_MDIO;
12016 CSR_WRITE(sc, WMREG_MDICNFG, reg);
12017 }
12018
12019 /*
12020 * I210 Errata 25 and I211 Errata 10
12021 * Slow System Clock.
12022 */
12023 static void
12024 wm_pll_workaround_i210(struct wm_softc *sc)
12025 {
12026 uint32_t mdicnfg, wuc;
12027 uint32_t reg;
12028 pcireg_t pcireg;
12029 uint32_t pmreg;
12030 uint16_t nvmword, tmp_nvmword;
12031 int phyval;
12032 bool wa_done = false;
12033 int i;
12034
12035 /* Save WUC and MDICNFG registers */
12036 wuc = CSR_READ(sc, WMREG_WUC);
12037 mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
12038
12039 reg = mdicnfg & ~MDICNFG_DEST;
12040 CSR_WRITE(sc, WMREG_MDICNFG, reg);
12041
12042 if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
12043 nvmword = INVM_DEFAULT_AL;
12044 tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
12045
12046 /* Get Power Management cap offset */
12047 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
12048 &pmreg, NULL) == 0)
12049 return;
12050 for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
12051 phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
12052 GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
12053
12054 if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
12055 break; /* OK */
12056 }
12057
12058 wa_done = true;
12059 /* Directly reset the internal PHY */
12060 reg = CSR_READ(sc, WMREG_CTRL);
12061 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
12062
12063 reg = CSR_READ(sc, WMREG_CTRL_EXT);
12064 reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
12065 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12066
12067 CSR_WRITE(sc, WMREG_WUC, 0);
12068 reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
12069 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
12070
12071 pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
12072 pmreg + PCI_PMCSR);
12073 pcireg |= PCI_PMCSR_STATE_D3;
12074 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
12075 pmreg + PCI_PMCSR, pcireg);
12076 delay(1000);
12077 pcireg &= ~PCI_PMCSR_STATE_D3;
12078 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
12079 pmreg + PCI_PMCSR, pcireg);
12080
12081 reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
12082 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
12083
12084 /* Restore WUC register */
12085 CSR_WRITE(sc, WMREG_WUC, wuc);
12086 }
12087
12088 /* Restore MDICNFG setting */
12089 CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
12090 if (wa_done)
12091 aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
12092 }
12093