if_wm.c revision 1.373 1 /* $NetBSD: if_wm.c,v 1.373 2015/10/22 07:00:05 knakahara Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - LPLU other than PCH*
77 * - TX Multi queue
78 * - EEE (Energy Efficiency Ethernet)
79 * - Virtual Function
80 * - Set LED correctly (based on contents in EEPROM)
81 * - Rework how parameters are loaded from the EEPROM.
82 * - Image Unique ID
83 */
84
85 #include <sys/cdefs.h>
86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.373 2015/10/22 07:00:05 knakahara Exp $");
87
88 #ifdef _KERNEL_OPT
89 #include "opt_net_mpsafe.h"
90 #endif
91
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/callout.h>
95 #include <sys/mbuf.h>
96 #include <sys/malloc.h>
97 #include <sys/kmem.h>
98 #include <sys/kernel.h>
99 #include <sys/socket.h>
100 #include <sys/ioctl.h>
101 #include <sys/errno.h>
102 #include <sys/device.h>
103 #include <sys/queue.h>
104 #include <sys/syslog.h>
105 #include <sys/interrupt.h>
106
107 #include <sys/rndsource.h>
108
109 #include <net/if.h>
110 #include <net/if_dl.h>
111 #include <net/if_media.h>
112 #include <net/if_ether.h>
113
114 #include <net/bpf.h>
115
116 #include <netinet/in.h> /* XXX for struct ip */
117 #include <netinet/in_systm.h> /* XXX for struct ip */
118 #include <netinet/ip.h> /* XXX for struct ip */
119 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
120 #include <netinet/tcp.h> /* XXX for struct tcphdr */
121
122 #include <sys/bus.h>
123 #include <sys/intr.h>
124 #include <machine/endian.h>
125
126 #include <dev/mii/mii.h>
127 #include <dev/mii/miivar.h>
128 #include <dev/mii/miidevs.h>
129 #include <dev/mii/mii_bitbang.h>
130 #include <dev/mii/ikphyreg.h>
131 #include <dev/mii/igphyreg.h>
132 #include <dev/mii/igphyvar.h>
133 #include <dev/mii/inbmphyreg.h>
134
135 #include <dev/pci/pcireg.h>
136 #include <dev/pci/pcivar.h>
137 #include <dev/pci/pcidevs.h>
138
139 #include <dev/pci/if_wmreg.h>
140 #include <dev/pci/if_wmvar.h>
141
142 #ifdef WM_DEBUG
143 #define WM_DEBUG_LINK 0x01
144 #define WM_DEBUG_TX 0x02
145 #define WM_DEBUG_RX 0x04
146 #define WM_DEBUG_GMII 0x08
147 #define WM_DEBUG_MANAGE 0x10
148 #define WM_DEBUG_NVM 0x20
149 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
150 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
151
152 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
153 #else
154 #define DPRINTF(x, y) /* nothing */
155 #endif /* WM_DEBUG */
156
157 #ifdef NET_MPSAFE
158 #define WM_MPSAFE 1
159 #endif
160
161 #ifdef __HAVE_PCI_MSI_MSIX
162 #define WM_MSI_MSIX 1 /* Enable by default */
163 #endif
164
165 /*
166 * This device driver's max interrupt numbers.
167 */
168 #define WM_MAX_NTXINTR 16
169 #define WM_MAX_NRXINTR 16
170 #define WM_MAX_NINTR (WM_MAX_NTXINTR + WM_MAX_NRXINTR + 1)
171
172 /*
173 * Transmit descriptor list size. Due to errata, we can only have
174 * 256 hardware descriptors in the ring on < 82544, but we use 4096
175 * on >= 82544. We tell the upper layers that they can queue a lot
176 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
177 * of them at a time.
178 *
179 * We allow up to 256 (!) DMA segments per packet. Pathological packet
180 * chains containing many small mbufs have been observed in zero-copy
181 * situations with jumbo frames.
182 */
183 #define WM_NTXSEGS 256
184 #define WM_IFQUEUELEN 256
185 #define WM_TXQUEUELEN_MAX 64
186 #define WM_TXQUEUELEN_MAX_82547 16
187 #define WM_TXQUEUELEN(txq) ((txq)->txq_num)
188 #define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1)
189 #define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8)
190 #define WM_NTXDESC_82542 256
191 #define WM_NTXDESC_82544 4096
192 #define WM_NTXDESC(txq) ((txq)->txq_ndesc)
193 #define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1)
194 #define WM_TXDESCSIZE(txq) (WM_NTXDESC(txq) * sizeof(wiseman_txdesc_t))
195 #define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq))
196 #define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq))
197
198 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
199
200 /*
201 * Receive descriptor list size. We have one Rx buffer for normal
202 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
203 * packet. We allocate 256 receive descriptors, each with a 2k
204 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
205 */
206 #define WM_NRXDESC 256
207 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
208 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
209 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
210
211 typedef union txdescs {
212 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
213 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544];
214 } txdescs_t;
215
216 #define WM_CDTXOFF(x) (sizeof(wiseman_txdesc_t) * x)
217 #define WM_CDRXOFF(x) (sizeof(wiseman_rxdesc_t) * x)
218
219 /*
220 * Software state for transmit jobs.
221 */
222 struct wm_txsoft {
223 struct mbuf *txs_mbuf; /* head of our mbuf chain */
224 bus_dmamap_t txs_dmamap; /* our DMA map */
225 int txs_firstdesc; /* first descriptor in packet */
226 int txs_lastdesc; /* last descriptor in packet */
227 int txs_ndesc; /* # of descriptors used */
228 };
229
230 /*
231 * Software state for receive buffers. Each descriptor gets a
232 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
233 * more than one buffer, we chain them together.
234 */
235 struct wm_rxsoft {
236 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
237 bus_dmamap_t rxs_dmamap; /* our DMA map */
238 };
239
240 #define WM_LINKUP_TIMEOUT 50
241
242 static uint16_t swfwphysem[] = {
243 SWFW_PHY0_SM,
244 SWFW_PHY1_SM,
245 SWFW_PHY2_SM,
246 SWFW_PHY3_SM
247 };
248
249 static const uint32_t wm_82580_rxpbs_table[] = {
250 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
251 };
252
253 struct wm_softc;
254
255 struct wm_txqueue {
256 kmutex_t *txq_lock; /* lock for tx operations */
257
258 struct wm_softc *txq_sc;
259
260 int txq_id; /* index of transmit queues */
261 int txq_intr_idx; /* index of MSI-X tables */
262
263 /* Software state for the transmit descriptors. */
264 int txq_num; /* must be a power of two */
265 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
266
267 /* TX control data structures. */
268 int txq_ndesc; /* must be a power of two */
269 txdescs_t *txq_descs_u;
270 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */
271 bus_dma_segment_t txq_desc_seg; /* control data segment */
272 int txq_desc_rseg; /* real number of control segment */
273 size_t txq_desc_size; /* control data size */
274 #define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr
275 #define txq_descs txq_descs_u->sctxu_txdescs
276 #define txq_nq_descs txq_descs_u->sctxu_nq_txdescs
277
278 bus_addr_t txq_tdt_reg; /* offset of TDT register */
279
280 int txq_free; /* number of free Tx descriptors */
281 int txq_next; /* next ready Tx descriptor */
282
283 int txq_sfree; /* number of free Tx jobs */
284 int txq_snext; /* next free Tx job */
285 int txq_sdirty; /* dirty Tx jobs */
286
287 /* These 4 variables are used only on the 82547. */
288 int txq_fifo_size; /* Tx FIFO size */
289 int txq_fifo_head; /* current head of FIFO */
290 uint32_t txq_fifo_addr; /* internal address of start of FIFO */
291 int txq_fifo_stall; /* Tx FIFO is stalled */
292
293 /* XXX which event counter is required? */
294 };
295
296 struct wm_rxqueue {
297 kmutex_t *rxq_lock; /* lock for rx operations */
298
299 struct wm_softc *rxq_sc;
300
301 int rxq_id; /* index of receive queues */
302 int rxq_intr_idx; /* index of MSI-X tables */
303
304 /* Software state for the receive descriptors. */
305 wiseman_rxdesc_t *rxq_descs;
306
307 /* RX control data structures. */
308 struct wm_rxsoft rxq_soft[WM_NRXDESC];
309 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */
310 bus_dma_segment_t rxq_desc_seg; /* control data segment */
311 int rxq_desc_rseg; /* real number of control segment */
312 size_t rxq_desc_size; /* control data size */
313 #define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr
314
315 bus_addr_t rxq_rdt_reg; /* offset of RDT register */
316
317 int rxq_ptr; /* next ready Rx descriptor/queue ent */
318 int rxq_discard;
319 int rxq_len;
320 struct mbuf *rxq_head;
321 struct mbuf *rxq_tail;
322 struct mbuf **rxq_tailp;
323
324 /* XXX which event counter is required? */
325 };
326
327 /*
328 * Software state per device.
329 */
330 struct wm_softc {
331 device_t sc_dev; /* generic device information */
332 bus_space_tag_t sc_st; /* bus space tag */
333 bus_space_handle_t sc_sh; /* bus space handle */
334 bus_size_t sc_ss; /* bus space size */
335 bus_space_tag_t sc_iot; /* I/O space tag */
336 bus_space_handle_t sc_ioh; /* I/O space handle */
337 bus_size_t sc_ios; /* I/O space size */
338 bus_space_tag_t sc_flasht; /* flash registers space tag */
339 bus_space_handle_t sc_flashh; /* flash registers space handle */
340 bus_size_t sc_flashs; /* flash registers space size */
341 bus_dma_tag_t sc_dmat; /* bus DMA tag */
342
343 struct ethercom sc_ethercom; /* ethernet common data */
344 struct mii_data sc_mii; /* MII/media information */
345
346 pci_chipset_tag_t sc_pc;
347 pcitag_t sc_pcitag;
348 int sc_bus_speed; /* PCI/PCIX bus speed */
349 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
350
351 uint16_t sc_pcidevid; /* PCI device ID */
352 wm_chip_type sc_type; /* MAC type */
353 int sc_rev; /* MAC revision */
354 wm_phy_type sc_phytype; /* PHY type */
355 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
356 #define WM_MEDIATYPE_UNKNOWN 0x00
357 #define WM_MEDIATYPE_FIBER 0x01
358 #define WM_MEDIATYPE_COPPER 0x02
359 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
360 int sc_funcid; /* unit number of the chip (0 to 3) */
361 int sc_flags; /* flags; see below */
362 int sc_if_flags; /* last if_flags */
363 int sc_flowflags; /* 802.3x flow control flags */
364 int sc_align_tweak;
365
366 void *sc_ihs[WM_MAX_NINTR]; /*
367 * interrupt cookie.
368 * legacy and msi use sc_ihs[0].
369 */
370 pci_intr_handle_t *sc_intrs; /* legacy and msi use sc_intrs[0] */
371 int sc_nintrs; /* number of interrupts */
372
373 int sc_link_intr_idx; /* index of MSI-X tables */
374
375 callout_t sc_tick_ch; /* tick callout */
376 bool sc_stopping;
377
378 int sc_nvm_ver_major;
379 int sc_nvm_ver_minor;
380 int sc_nvm_ver_build;
381 int sc_nvm_addrbits; /* NVM address bits */
382 unsigned int sc_nvm_wordsize; /* NVM word size */
383 int sc_ich8_flash_base;
384 int sc_ich8_flash_bank_size;
385 int sc_nvm_k1_enabled;
386
387 int sc_ntxqueues;
388 struct wm_txqueue *sc_txq;
389
390 int sc_nrxqueues;
391 struct wm_rxqueue *sc_rxq;
392
393 #ifdef WM_EVENT_COUNTERS
394 /* Event counters. */
395 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
396 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
397 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
398 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
399 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
400 struct evcnt sc_ev_rxintr; /* Rx interrupts */
401 struct evcnt sc_ev_linkintr; /* Link interrupts */
402
403 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
404 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
405 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
406 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
407 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
408 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
409 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
410 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
411
412 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
413 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
414
415 struct evcnt sc_ev_tu; /* Tx underrun */
416
417 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
418 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
419 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
420 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
421 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
422 #endif /* WM_EVENT_COUNTERS */
423
424 /* This variable are used only on the 82547. */
425 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
426
427 uint32_t sc_ctrl; /* prototype CTRL register */
428 #if 0
429 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
430 #endif
431 uint32_t sc_icr; /* prototype interrupt bits */
432 uint32_t sc_itr; /* prototype intr throttling reg */
433 uint32_t sc_tctl; /* prototype TCTL register */
434 uint32_t sc_rctl; /* prototype RCTL register */
435 uint32_t sc_txcw; /* prototype TXCW register */
436 uint32_t sc_tipg; /* prototype TIPG register */
437 uint32_t sc_fcrtl; /* prototype FCRTL register */
438 uint32_t sc_pba; /* prototype PBA register */
439
440 int sc_tbi_linkup; /* TBI link status */
441 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
442 int sc_tbi_serdes_ticks; /* tbi ticks */
443
444 int sc_mchash_type; /* multicast filter offset */
445
446 krndsource_t rnd_source; /* random source */
447
448 kmutex_t *sc_core_lock; /* lock for softc operations */
449 };
450
451 #define WM_TX_LOCK(_txq) if ((_txq)->txq_lock) mutex_enter((_txq)->txq_lock)
452 #define WM_TX_UNLOCK(_txq) if ((_txq)->txq_lock) mutex_exit((_txq)->txq_lock)
453 #define WM_TX_LOCKED(_txq) (!(_txq)->txq_lock || mutex_owned((_txq)->txq_lock))
454 #define WM_RX_LOCK(_rxq) if ((_rxq)->rxq_lock) mutex_enter((_rxq)->rxq_lock)
455 #define WM_RX_UNLOCK(_rxq) if ((_rxq)->rxq_lock) mutex_exit((_rxq)->rxq_lock)
456 #define WM_RX_LOCKED(_rxq) (!(_rxq)->rxq_lock || mutex_owned((_rxq)->rxq_lock))
457 #define WM_CORE_LOCK(_sc) if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
458 #define WM_CORE_UNLOCK(_sc) if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
459 #define WM_CORE_LOCKED(_sc) (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
460
461 #ifdef WM_MPSAFE
462 #define CALLOUT_FLAGS CALLOUT_MPSAFE
463 #else
464 #define CALLOUT_FLAGS 0
465 #endif
466
467 #define WM_RXCHAIN_RESET(rxq) \
468 do { \
469 (rxq)->rxq_tailp = &(rxq)->rxq_head; \
470 *(rxq)->rxq_tailp = NULL; \
471 (rxq)->rxq_len = 0; \
472 } while (/*CONSTCOND*/0)
473
474 #define WM_RXCHAIN_LINK(rxq, m) \
475 do { \
476 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \
477 (rxq)->rxq_tailp = &(m)->m_next; \
478 } while (/*CONSTCOND*/0)
479
480 #ifdef WM_EVENT_COUNTERS
481 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
482 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
483 #else
484 #define WM_EVCNT_INCR(ev) /* nothing */
485 #define WM_EVCNT_ADD(ev, val) /* nothing */
486 #endif
487
488 #define CSR_READ(sc, reg) \
489 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
490 #define CSR_WRITE(sc, reg, val) \
491 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
492 #define CSR_WRITE_FLUSH(sc) \
493 (void) CSR_READ((sc), WMREG_STATUS)
494
495 #define ICH8_FLASH_READ32(sc, reg) \
496 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
497 #define ICH8_FLASH_WRITE32(sc, reg, data) \
498 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
499
500 #define ICH8_FLASH_READ16(sc, reg) \
501 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
502 #define ICH8_FLASH_WRITE16(sc, reg, data) \
503 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
504
505 #define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((x)))
506 #define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
507
508 #define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU)
509 #define WM_CDTXADDR_HI(txq, x) \
510 (sizeof(bus_addr_t) == 8 ? \
511 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
512
513 #define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
514 #define WM_CDRXADDR_HI(rxq, x) \
515 (sizeof(bus_addr_t) == 8 ? \
516 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
517
518 /*
519 * Register read/write functions.
520 * Other than CSR_{READ|WRITE}().
521 */
522 #if 0
523 static inline uint32_t wm_io_read(struct wm_softc *, int);
524 #endif
525 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
526 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
527 uint32_t, uint32_t);
528 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
529
530 /*
531 * Descriptor sync/init functions.
532 */
533 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
534 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
535 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
536
537 /*
538 * Device driver interface functions and commonly used functions.
539 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
540 */
541 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
542 static int wm_match(device_t, cfdata_t, void *);
543 static void wm_attach(device_t, device_t, void *);
544 static int wm_detach(device_t, int);
545 static bool wm_suspend(device_t, const pmf_qual_t *);
546 static bool wm_resume(device_t, const pmf_qual_t *);
547 static void wm_watchdog(struct ifnet *);
548 static void wm_tick(void *);
549 static int wm_ifflags_cb(struct ethercom *);
550 static int wm_ioctl(struct ifnet *, u_long, void *);
551 /* MAC address related */
552 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
553 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
554 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
555 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
556 static void wm_set_filter(struct wm_softc *);
557 /* Reset and init related */
558 static void wm_set_vlan(struct wm_softc *);
559 static void wm_set_pcie_completion_timeout(struct wm_softc *);
560 static void wm_get_auto_rd_done(struct wm_softc *);
561 static void wm_lan_init_done(struct wm_softc *);
562 static void wm_get_cfg_done(struct wm_softc *);
563 static void wm_initialize_hardware_bits(struct wm_softc *);
564 static uint32_t wm_rxpbs_adjust_82580(uint32_t);
565 static void wm_reset(struct wm_softc *);
566 static int wm_add_rxbuf(struct wm_rxqueue *, int);
567 static void wm_rxdrain(struct wm_rxqueue *);
568 static void wm_rss_getkey(uint8_t *);
569 static void wm_init_rss(struct wm_softc *);
570 #ifdef WM_MSI_MSIX
571 static void wm_adjust_qnum(struct wm_softc *, int);
572 static int wm_setup_legacy(struct wm_softc *);
573 static int wm_setup_msix(struct wm_softc *);
574 #endif
575 static int wm_init(struct ifnet *);
576 static int wm_init_locked(struct ifnet *);
577 static void wm_stop(struct ifnet *, int);
578 static void wm_stop_locked(struct ifnet *, int);
579 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
580 static void wm_82547_txfifo_stall(void *);
581 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
582 /* DMA related */
583 static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
584 static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
585 static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
586 static void wm_init_tx_regs(struct wm_softc *, struct wm_txqueue *);
587 static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
588 static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
589 static void wm_init_rx_regs(struct wm_softc *, struct wm_rxqueue *);
590 static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
591 static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
592 static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
593 static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
594 static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
595 static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
596 static void wm_init_tx_queue(struct wm_softc *, struct wm_txqueue *);
597 static int wm_init_rx_queue(struct wm_softc *, struct wm_rxqueue *);
598 static int wm_alloc_txrx_queues(struct wm_softc *);
599 static void wm_free_txrx_queues(struct wm_softc *);
600 static int wm_init_txrx_queues(struct wm_softc *);
601 /* Start */
602 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
603 uint32_t *, uint8_t *);
604 static void wm_start(struct ifnet *);
605 static void wm_start_locked(struct ifnet *);
606 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
607 uint32_t *, uint32_t *, bool *);
608 static void wm_nq_start(struct ifnet *);
609 static void wm_nq_start_locked(struct ifnet *);
610 /* Interrupt */
611 static int wm_txeof(struct wm_softc *);
612 static void wm_rxeof(struct wm_rxqueue *);
613 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
614 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
615 static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
616 static void wm_linkintr(struct wm_softc *, uint32_t);
617 static int wm_intr_legacy(void *);
618 #ifdef WM_MSI_MSIX
619 static int wm_txintr_msix(void *);
620 static int wm_rxintr_msix(void *);
621 static int wm_linkintr_msix(void *);
622 #endif
623
624 /*
625 * Media related.
626 * GMII, SGMII, TBI, SERDES and SFP.
627 */
628 /* Common */
629 static void wm_tbi_serdes_set_linkled(struct wm_softc *);
630 /* GMII related */
631 static void wm_gmii_reset(struct wm_softc *);
632 static int wm_get_phy_id_82575(struct wm_softc *);
633 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
634 static int wm_gmii_mediachange(struct ifnet *);
635 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
636 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
637 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
638 static int wm_gmii_i82543_readreg(device_t, int, int);
639 static void wm_gmii_i82543_writereg(device_t, int, int, int);
640 static int wm_gmii_i82544_readreg(device_t, int, int);
641 static void wm_gmii_i82544_writereg(device_t, int, int, int);
642 static int wm_gmii_i80003_readreg(device_t, int, int);
643 static void wm_gmii_i80003_writereg(device_t, int, int, int);
644 static int wm_gmii_bm_readreg(device_t, int, int);
645 static void wm_gmii_bm_writereg(device_t, int, int, int);
646 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
647 static int wm_gmii_hv_readreg(device_t, int, int);
648 static void wm_gmii_hv_writereg(device_t, int, int, int);
649 static int wm_gmii_82580_readreg(device_t, int, int);
650 static void wm_gmii_82580_writereg(device_t, int, int, int);
651 static int wm_gmii_gs40g_readreg(device_t, int, int);
652 static void wm_gmii_gs40g_writereg(device_t, int, int, int);
653 static void wm_gmii_statchg(struct ifnet *);
654 static int wm_kmrn_readreg(struct wm_softc *, int);
655 static void wm_kmrn_writereg(struct wm_softc *, int, int);
656 /* SGMII */
657 static bool wm_sgmii_uses_mdio(struct wm_softc *);
658 static int wm_sgmii_readreg(device_t, int, int);
659 static void wm_sgmii_writereg(device_t, int, int, int);
660 /* TBI related */
661 static void wm_tbi_mediainit(struct wm_softc *);
662 static int wm_tbi_mediachange(struct ifnet *);
663 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
664 static int wm_check_for_link(struct wm_softc *);
665 static void wm_tbi_tick(struct wm_softc *);
666 /* SERDES related */
667 static void wm_serdes_power_up_link_82575(struct wm_softc *);
668 static int wm_serdes_mediachange(struct ifnet *);
669 static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
670 static void wm_serdes_tick(struct wm_softc *);
671 /* SFP related */
672 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
673 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
674
675 /*
676 * NVM related.
677 * Microwire, SPI (w/wo EERD) and Flash.
678 */
679 /* Misc functions */
680 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
681 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
682 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
683 /* Microwire */
684 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
685 /* SPI */
686 static int wm_nvm_ready_spi(struct wm_softc *);
687 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
688 /* Using with EERD */
689 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
690 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
691 /* Flash */
692 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
693 unsigned int *);
694 static int32_t wm_ich8_cycle_init(struct wm_softc *);
695 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
696 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
697 uint16_t *);
698 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
699 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
700 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
701 /* iNVM */
702 static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
703 static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
704 /* Lock, detecting NVM type, validate checksum and read */
705 static int wm_nvm_acquire(struct wm_softc *);
706 static void wm_nvm_release(struct wm_softc *);
707 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
708 static int wm_nvm_get_flash_presence_i210(struct wm_softc *);
709 static int wm_nvm_validate_checksum(struct wm_softc *);
710 static void wm_nvm_version_invm(struct wm_softc *);
711 static void wm_nvm_version(struct wm_softc *);
712 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
713
714 /*
715 * Hardware semaphores.
716 * Very complexed...
717 */
718 static int wm_get_swsm_semaphore(struct wm_softc *);
719 static void wm_put_swsm_semaphore(struct wm_softc *);
720 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
721 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
722 static int wm_get_swfwhw_semaphore(struct wm_softc *);
723 static void wm_put_swfwhw_semaphore(struct wm_softc *);
724 static int wm_get_hw_semaphore_82573(struct wm_softc *);
725 static void wm_put_hw_semaphore_82573(struct wm_softc *);
726
727 /*
728 * Management mode and power management related subroutines.
729 * BMC, AMT, suspend/resume and EEE.
730 */
731 static int wm_check_mng_mode(struct wm_softc *);
732 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
733 static int wm_check_mng_mode_82574(struct wm_softc *);
734 static int wm_check_mng_mode_generic(struct wm_softc *);
735 static int wm_enable_mng_pass_thru(struct wm_softc *);
736 static int wm_check_reset_block(struct wm_softc *);
737 static void wm_get_hw_control(struct wm_softc *);
738 static void wm_release_hw_control(struct wm_softc *);
739 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
740 static void wm_smbustopci(struct wm_softc *);
741 static void wm_init_manageability(struct wm_softc *);
742 static void wm_release_manageability(struct wm_softc *);
743 static void wm_get_wakeup(struct wm_softc *);
744 #ifdef WM_WOL
745 static void wm_enable_phy_wakeup(struct wm_softc *);
746 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
747 static void wm_enable_wakeup(struct wm_softc *);
748 #endif
749 /* EEE */
750 static void wm_set_eee_i350(struct wm_softc *);
751
752 /*
753 * Workarounds (mainly PHY related).
754 * Basically, PHY's workarounds are in the PHY drivers.
755 */
756 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
757 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
758 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
759 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
760 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
761 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
762 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
763 static void wm_reset_init_script_82575(struct wm_softc *);
764 static void wm_reset_mdicnfg_82580(struct wm_softc *);
765 static void wm_pll_workaround_i210(struct wm_softc *);
766
767 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
768 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
769
770 /*
771 * Devices supported by this driver.
772 */
773 static const struct wm_product {
774 pci_vendor_id_t wmp_vendor;
775 pci_product_id_t wmp_product;
776 const char *wmp_name;
777 wm_chip_type wmp_type;
778 uint32_t wmp_flags;
779 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
780 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
781 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
782 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
783 #define WMP_MEDIATYPE(x) ((x) & 0x03)
784 } wm_products[] = {
785 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
786 "Intel i82542 1000BASE-X Ethernet",
787 WM_T_82542_2_1, WMP_F_FIBER },
788
789 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
790 "Intel i82543GC 1000BASE-X Ethernet",
791 WM_T_82543, WMP_F_FIBER },
792
793 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
794 "Intel i82543GC 1000BASE-T Ethernet",
795 WM_T_82543, WMP_F_COPPER },
796
797 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
798 "Intel i82544EI 1000BASE-T Ethernet",
799 WM_T_82544, WMP_F_COPPER },
800
801 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
802 "Intel i82544EI 1000BASE-X Ethernet",
803 WM_T_82544, WMP_F_FIBER },
804
805 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
806 "Intel i82544GC 1000BASE-T Ethernet",
807 WM_T_82544, WMP_F_COPPER },
808
809 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
810 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
811 WM_T_82544, WMP_F_COPPER },
812
813 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
814 "Intel i82540EM 1000BASE-T Ethernet",
815 WM_T_82540, WMP_F_COPPER },
816
817 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
818 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
819 WM_T_82540, WMP_F_COPPER },
820
821 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
822 "Intel i82540EP 1000BASE-T Ethernet",
823 WM_T_82540, WMP_F_COPPER },
824
825 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
826 "Intel i82540EP 1000BASE-T Ethernet",
827 WM_T_82540, WMP_F_COPPER },
828
829 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
830 "Intel i82540EP 1000BASE-T Ethernet",
831 WM_T_82540, WMP_F_COPPER },
832
833 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
834 "Intel i82545EM 1000BASE-T Ethernet",
835 WM_T_82545, WMP_F_COPPER },
836
837 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
838 "Intel i82545GM 1000BASE-T Ethernet",
839 WM_T_82545_3, WMP_F_COPPER },
840
841 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
842 "Intel i82545GM 1000BASE-X Ethernet",
843 WM_T_82545_3, WMP_F_FIBER },
844
845 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
846 "Intel i82545GM Gigabit Ethernet (SERDES)",
847 WM_T_82545_3, WMP_F_SERDES },
848
849 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
850 "Intel i82546EB 1000BASE-T Ethernet",
851 WM_T_82546, WMP_F_COPPER },
852
853 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
854 "Intel i82546EB 1000BASE-T Ethernet",
855 WM_T_82546, WMP_F_COPPER },
856
857 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
858 "Intel i82545EM 1000BASE-X Ethernet",
859 WM_T_82545, WMP_F_FIBER },
860
861 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
862 "Intel i82546EB 1000BASE-X Ethernet",
863 WM_T_82546, WMP_F_FIBER },
864
865 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
866 "Intel i82546GB 1000BASE-T Ethernet",
867 WM_T_82546_3, WMP_F_COPPER },
868
869 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
870 "Intel i82546GB 1000BASE-X Ethernet",
871 WM_T_82546_3, WMP_F_FIBER },
872
873 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
874 "Intel i82546GB Gigabit Ethernet (SERDES)",
875 WM_T_82546_3, WMP_F_SERDES },
876
877 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
878 "i82546GB quad-port Gigabit Ethernet",
879 WM_T_82546_3, WMP_F_COPPER },
880
881 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
882 "i82546GB quad-port Gigabit Ethernet (KSP3)",
883 WM_T_82546_3, WMP_F_COPPER },
884
885 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
886 "Intel PRO/1000MT (82546GB)",
887 WM_T_82546_3, WMP_F_COPPER },
888
889 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
890 "Intel i82541EI 1000BASE-T Ethernet",
891 WM_T_82541, WMP_F_COPPER },
892
893 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
894 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
895 WM_T_82541, WMP_F_COPPER },
896
897 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
898 "Intel i82541EI Mobile 1000BASE-T Ethernet",
899 WM_T_82541, WMP_F_COPPER },
900
901 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
902 "Intel i82541ER 1000BASE-T Ethernet",
903 WM_T_82541_2, WMP_F_COPPER },
904
905 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
906 "Intel i82541GI 1000BASE-T Ethernet",
907 WM_T_82541_2, WMP_F_COPPER },
908
909 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
910 "Intel i82541GI Mobile 1000BASE-T Ethernet",
911 WM_T_82541_2, WMP_F_COPPER },
912
913 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
914 "Intel i82541PI 1000BASE-T Ethernet",
915 WM_T_82541_2, WMP_F_COPPER },
916
917 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
918 "Intel i82547EI 1000BASE-T Ethernet",
919 WM_T_82547, WMP_F_COPPER },
920
921 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
922 "Intel i82547EI Mobile 1000BASE-T Ethernet",
923 WM_T_82547, WMP_F_COPPER },
924
925 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
926 "Intel i82547GI 1000BASE-T Ethernet",
927 WM_T_82547_2, WMP_F_COPPER },
928
929 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
930 "Intel PRO/1000 PT (82571EB)",
931 WM_T_82571, WMP_F_COPPER },
932
933 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
934 "Intel PRO/1000 PF (82571EB)",
935 WM_T_82571, WMP_F_FIBER },
936
937 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
938 "Intel PRO/1000 PB (82571EB)",
939 WM_T_82571, WMP_F_SERDES },
940
941 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
942 "Intel PRO/1000 QT (82571EB)",
943 WM_T_82571, WMP_F_COPPER },
944
945 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
946 "Intel PRO/1000 PT Quad Port Server Adapter",
947 WM_T_82571, WMP_F_COPPER, },
948
949 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
950 "Intel Gigabit PT Quad Port Server ExpressModule",
951 WM_T_82571, WMP_F_COPPER, },
952
953 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
954 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
955 WM_T_82571, WMP_F_SERDES, },
956
957 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
958 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
959 WM_T_82571, WMP_F_SERDES, },
960
961 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
962 "Intel 82571EB Quad 1000baseX Ethernet",
963 WM_T_82571, WMP_F_FIBER, },
964
965 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
966 "Intel i82572EI 1000baseT Ethernet",
967 WM_T_82572, WMP_F_COPPER },
968
969 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
970 "Intel i82572EI 1000baseX Ethernet",
971 WM_T_82572, WMP_F_FIBER },
972
973 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
974 "Intel i82572EI Gigabit Ethernet (SERDES)",
975 WM_T_82572, WMP_F_SERDES },
976
977 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
978 "Intel i82572EI 1000baseT Ethernet",
979 WM_T_82572, WMP_F_COPPER },
980
981 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
982 "Intel i82573E",
983 WM_T_82573, WMP_F_COPPER },
984
985 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
986 "Intel i82573E IAMT",
987 WM_T_82573, WMP_F_COPPER },
988
989 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
990 "Intel i82573L Gigabit Ethernet",
991 WM_T_82573, WMP_F_COPPER },
992
993 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
994 "Intel i82574L",
995 WM_T_82574, WMP_F_COPPER },
996
997 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
998 "Intel i82574L",
999 WM_T_82574, WMP_F_COPPER },
1000
1001 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
1002 "Intel i82583V",
1003 WM_T_82583, WMP_F_COPPER },
1004
1005 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1006 "i80003 dual 1000baseT Ethernet",
1007 WM_T_80003, WMP_F_COPPER },
1008
1009 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1010 "i80003 dual 1000baseX Ethernet",
1011 WM_T_80003, WMP_F_COPPER },
1012
1013 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1014 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1015 WM_T_80003, WMP_F_SERDES },
1016
1017 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1018 "Intel i80003 1000baseT Ethernet",
1019 WM_T_80003, WMP_F_COPPER },
1020
1021 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1022 "Intel i80003 Gigabit Ethernet (SERDES)",
1023 WM_T_80003, WMP_F_SERDES },
1024
1025 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
1026 "Intel i82801H (M_AMT) LAN Controller",
1027 WM_T_ICH8, WMP_F_COPPER },
1028 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
1029 "Intel i82801H (AMT) LAN Controller",
1030 WM_T_ICH8, WMP_F_COPPER },
1031 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
1032 "Intel i82801H LAN Controller",
1033 WM_T_ICH8, WMP_F_COPPER },
1034 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1035 "Intel i82801H (IFE) LAN Controller",
1036 WM_T_ICH8, WMP_F_COPPER },
1037 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
1038 "Intel i82801H (M) LAN Controller",
1039 WM_T_ICH8, WMP_F_COPPER },
1040 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1041 "Intel i82801H IFE (GT) LAN Controller",
1042 WM_T_ICH8, WMP_F_COPPER },
1043 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1044 "Intel i82801H IFE (G) LAN Controller",
1045 WM_T_ICH8, WMP_F_COPPER },
1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1047 "82801I (AMT) LAN Controller",
1048 WM_T_ICH9, WMP_F_COPPER },
1049 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1050 "82801I LAN Controller",
1051 WM_T_ICH9, WMP_F_COPPER },
1052 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1053 "82801I (G) LAN Controller",
1054 WM_T_ICH9, WMP_F_COPPER },
1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1056 "82801I (GT) LAN Controller",
1057 WM_T_ICH9, WMP_F_COPPER },
1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1059 "82801I (C) LAN Controller",
1060 WM_T_ICH9, WMP_F_COPPER },
1061 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1062 "82801I mobile LAN Controller",
1063 WM_T_ICH9, WMP_F_COPPER },
1064 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1065 "82801I mobile (V) LAN Controller",
1066 WM_T_ICH9, WMP_F_COPPER },
1067 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1068 "82801I mobile (AMT) LAN Controller",
1069 WM_T_ICH9, WMP_F_COPPER },
1070 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1071 "82567LM-4 LAN Controller",
1072 WM_T_ICH9, WMP_F_COPPER },
1073 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
1074 "82567V-3 LAN Controller",
1075 WM_T_ICH9, WMP_F_COPPER },
1076 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1077 "82567LM-2 LAN Controller",
1078 WM_T_ICH10, WMP_F_COPPER },
1079 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1080 "82567LF-2 LAN Controller",
1081 WM_T_ICH10, WMP_F_COPPER },
1082 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1083 "82567LM-3 LAN Controller",
1084 WM_T_ICH10, WMP_F_COPPER },
1085 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1086 "82567LF-3 LAN Controller",
1087 WM_T_ICH10, WMP_F_COPPER },
1088 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1089 "82567V-2 LAN Controller",
1090 WM_T_ICH10, WMP_F_COPPER },
1091 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1092 "82567V-3? LAN Controller",
1093 WM_T_ICH10, WMP_F_COPPER },
1094 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1095 "HANKSVILLE LAN Controller",
1096 WM_T_ICH10, WMP_F_COPPER },
1097 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1098 "PCH LAN (82577LM) Controller",
1099 WM_T_PCH, WMP_F_COPPER },
1100 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1101 "PCH LAN (82577LC) Controller",
1102 WM_T_PCH, WMP_F_COPPER },
1103 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1104 "PCH LAN (82578DM) Controller",
1105 WM_T_PCH, WMP_F_COPPER },
1106 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1107 "PCH LAN (82578DC) Controller",
1108 WM_T_PCH, WMP_F_COPPER },
1109 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1110 "PCH2 LAN (82579LM) Controller",
1111 WM_T_PCH2, WMP_F_COPPER },
1112 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1113 "PCH2 LAN (82579V) Controller",
1114 WM_T_PCH2, WMP_F_COPPER },
1115 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1116 "82575EB dual-1000baseT Ethernet",
1117 WM_T_82575, WMP_F_COPPER },
1118 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1119 "82575EB dual-1000baseX Ethernet (SERDES)",
1120 WM_T_82575, WMP_F_SERDES },
1121 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1122 "82575GB quad-1000baseT Ethernet",
1123 WM_T_82575, WMP_F_COPPER },
1124 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1125 "82575GB quad-1000baseT Ethernet (PM)",
1126 WM_T_82575, WMP_F_COPPER },
1127 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1128 "82576 1000BaseT Ethernet",
1129 WM_T_82576, WMP_F_COPPER },
1130 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1131 "82576 1000BaseX Ethernet",
1132 WM_T_82576, WMP_F_FIBER },
1133
1134 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1135 "82576 gigabit Ethernet (SERDES)",
1136 WM_T_82576, WMP_F_SERDES },
1137
1138 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1139 "82576 quad-1000BaseT Ethernet",
1140 WM_T_82576, WMP_F_COPPER },
1141
1142 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1143 "82576 Gigabit ET2 Quad Port Server Adapter",
1144 WM_T_82576, WMP_F_COPPER },
1145
1146 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1147 "82576 gigabit Ethernet",
1148 WM_T_82576, WMP_F_COPPER },
1149
1150 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1151 "82576 gigabit Ethernet (SERDES)",
1152 WM_T_82576, WMP_F_SERDES },
1153 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1154 "82576 quad-gigabit Ethernet (SERDES)",
1155 WM_T_82576, WMP_F_SERDES },
1156
1157 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1158 "82580 1000BaseT Ethernet",
1159 WM_T_82580, WMP_F_COPPER },
1160 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1161 "82580 1000BaseX Ethernet",
1162 WM_T_82580, WMP_F_FIBER },
1163
1164 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1165 "82580 1000BaseT Ethernet (SERDES)",
1166 WM_T_82580, WMP_F_SERDES },
1167
1168 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1169 "82580 gigabit Ethernet (SGMII)",
1170 WM_T_82580, WMP_F_COPPER },
1171 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1172 "82580 dual-1000BaseT Ethernet",
1173 WM_T_82580, WMP_F_COPPER },
1174
1175 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1176 "82580 quad-1000BaseX Ethernet",
1177 WM_T_82580, WMP_F_FIBER },
1178
1179 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1180 "DH89XXCC Gigabit Ethernet (SGMII)",
1181 WM_T_82580, WMP_F_COPPER },
1182
1183 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1184 "DH89XXCC Gigabit Ethernet (SERDES)",
1185 WM_T_82580, WMP_F_SERDES },
1186
1187 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1188 "DH89XXCC 1000BASE-KX Ethernet",
1189 WM_T_82580, WMP_F_SERDES },
1190
1191 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1192 "DH89XXCC Gigabit Ethernet (SFP)",
1193 WM_T_82580, WMP_F_SERDES },
1194
1195 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1196 "I350 Gigabit Network Connection",
1197 WM_T_I350, WMP_F_COPPER },
1198
1199 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1200 "I350 Gigabit Fiber Network Connection",
1201 WM_T_I350, WMP_F_FIBER },
1202
1203 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1204 "I350 Gigabit Backplane Connection",
1205 WM_T_I350, WMP_F_SERDES },
1206
1207 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1208 "I350 Quad Port Gigabit Ethernet",
1209 WM_T_I350, WMP_F_SERDES },
1210
1211 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1212 "I350 Gigabit Connection",
1213 WM_T_I350, WMP_F_COPPER },
1214
1215 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1216 "I354 Gigabit Ethernet (KX)",
1217 WM_T_I354, WMP_F_SERDES },
1218
1219 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1220 "I354 Gigabit Ethernet (SGMII)",
1221 WM_T_I354, WMP_F_COPPER },
1222
1223 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1224 "I354 Gigabit Ethernet (2.5G)",
1225 WM_T_I354, WMP_F_COPPER },
1226
1227 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1228 "I210-T1 Ethernet Server Adapter",
1229 WM_T_I210, WMP_F_COPPER },
1230
1231 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1232 "I210 Ethernet (Copper OEM)",
1233 WM_T_I210, WMP_F_COPPER },
1234
1235 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1236 "I210 Ethernet (Copper IT)",
1237 WM_T_I210, WMP_F_COPPER },
1238
1239 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1240 "I210 Ethernet (FLASH less)",
1241 WM_T_I210, WMP_F_COPPER },
1242
1243 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1244 "I210 Gigabit Ethernet (Fiber)",
1245 WM_T_I210, WMP_F_FIBER },
1246
1247 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1248 "I210 Gigabit Ethernet (SERDES)",
1249 WM_T_I210, WMP_F_SERDES },
1250
1251 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1252 "I210 Gigabit Ethernet (FLASH less)",
1253 WM_T_I210, WMP_F_SERDES },
1254
1255 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1256 "I210 Gigabit Ethernet (SGMII)",
1257 WM_T_I210, WMP_F_COPPER },
1258
1259 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1260 "I211 Ethernet (COPPER)",
1261 WM_T_I211, WMP_F_COPPER },
1262 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1263 "I217 V Ethernet Connection",
1264 WM_T_PCH_LPT, WMP_F_COPPER },
1265 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1266 "I217 LM Ethernet Connection",
1267 WM_T_PCH_LPT, WMP_F_COPPER },
1268 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1269 "I218 V Ethernet Connection",
1270 WM_T_PCH_LPT, WMP_F_COPPER },
1271 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1272 "I218 V Ethernet Connection",
1273 WM_T_PCH_LPT, WMP_F_COPPER },
1274 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1275 "I218 V Ethernet Connection",
1276 WM_T_PCH_LPT, WMP_F_COPPER },
1277 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1278 "I218 LM Ethernet Connection",
1279 WM_T_PCH_LPT, WMP_F_COPPER },
1280 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1281 "I218 LM Ethernet Connection",
1282 WM_T_PCH_LPT, WMP_F_COPPER },
1283 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1284 "I218 LM Ethernet Connection",
1285 WM_T_PCH_LPT, WMP_F_COPPER },
1286 { 0, 0,
1287 NULL,
1288 0, 0 },
1289 };
1290
1291 #ifdef WM_EVENT_COUNTERS
1292 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1293 #endif /* WM_EVENT_COUNTERS */
1294
1295
1296 /*
1297 * Register read/write functions.
1298 * Other than CSR_{READ|WRITE}().
1299 */
1300
1301 #if 0 /* Not currently used */
1302 static inline uint32_t
1303 wm_io_read(struct wm_softc *sc, int reg)
1304 {
1305
1306 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1307 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1308 }
1309 #endif
1310
1311 static inline void
1312 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1313 {
1314
1315 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1316 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1317 }
1318
1319 static inline void
1320 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1321 uint32_t data)
1322 {
1323 uint32_t regval;
1324 int i;
1325
1326 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1327
1328 CSR_WRITE(sc, reg, regval);
1329
1330 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1331 delay(5);
1332 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1333 break;
1334 }
1335 if (i == SCTL_CTL_POLL_TIMEOUT) {
1336 aprint_error("%s: WARNING:"
1337 " i82575 reg 0x%08x setup did not indicate ready\n",
1338 device_xname(sc->sc_dev), reg);
1339 }
1340 }
1341
1342 static inline void
1343 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1344 {
1345 wa->wa_low = htole32(v & 0xffffffffU);
1346 if (sizeof(bus_addr_t) == 8)
1347 wa->wa_high = htole32((uint64_t) v >> 32);
1348 else
1349 wa->wa_high = 0;
1350 }
1351
1352 /*
1353 * Descriptor sync/init functions.
1354 */
1355 static inline void
1356 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1357 {
1358 struct wm_softc *sc = txq->txq_sc;
1359
1360 /* If it will wrap around, sync to the end of the ring. */
1361 if ((start + num) > WM_NTXDESC(txq)) {
1362 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1363 WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) *
1364 (WM_NTXDESC(txq) - start), ops);
1365 num -= (WM_NTXDESC(txq) - start);
1366 start = 0;
1367 }
1368
1369 /* Now sync whatever is left. */
1370 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1371 WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) * num, ops);
1372 }
1373
1374 static inline void
1375 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1376 {
1377 struct wm_softc *sc = rxq->rxq_sc;
1378
1379 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1380 WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
1381 }
1382
1383 static inline void
1384 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1385 {
1386 struct wm_softc *sc = rxq->rxq_sc;
1387 struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1388 wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1389 struct mbuf *m = rxs->rxs_mbuf;
1390
1391 /*
1392 * Note: We scoot the packet forward 2 bytes in the buffer
1393 * so that the payload after the Ethernet header is aligned
1394 * to a 4-byte boundary.
1395
1396 * XXX BRAINDAMAGE ALERT!
1397 * The stupid chip uses the same size for every buffer, which
1398 * is set in the Receive Control register. We are using the 2K
1399 * size option, but what we REALLY want is (2K - 2)! For this
1400 * reason, we can't "scoot" packets longer than the standard
1401 * Ethernet MTU. On strict-alignment platforms, if the total
1402 * size exceeds (2K - 2) we set align_tweak to 0 and let
1403 * the upper layer copy the headers.
1404 */
1405 m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1406
1407 wm_set_dma_addr(&rxd->wrx_addr,
1408 rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1409 rxd->wrx_len = 0;
1410 rxd->wrx_cksum = 0;
1411 rxd->wrx_status = 0;
1412 rxd->wrx_errors = 0;
1413 rxd->wrx_special = 0;
1414 wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1415
1416 CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1417 }
1418
1419 /*
1420 * Device driver interface functions and commonly used functions.
1421 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1422 */
1423
1424 /* Lookup supported device table */
1425 static const struct wm_product *
1426 wm_lookup(const struct pci_attach_args *pa)
1427 {
1428 const struct wm_product *wmp;
1429
1430 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1431 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1432 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1433 return wmp;
1434 }
1435 return NULL;
1436 }
1437
1438 /* The match function (ca_match) */
1439 static int
1440 wm_match(device_t parent, cfdata_t cf, void *aux)
1441 {
1442 struct pci_attach_args *pa = aux;
1443
1444 if (wm_lookup(pa) != NULL)
1445 return 1;
1446
1447 return 0;
1448 }
1449
1450 /* The attach function (ca_attach) */
1451 static void
1452 wm_attach(device_t parent, device_t self, void *aux)
1453 {
1454 struct wm_softc *sc = device_private(self);
1455 struct pci_attach_args *pa = aux;
1456 prop_dictionary_t dict;
1457 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1458 pci_chipset_tag_t pc = pa->pa_pc;
1459 #ifndef WM_MSI_MSIX
1460 pci_intr_handle_t ih;
1461 const char *intrstr = NULL;
1462 char intrbuf[PCI_INTRSTR_LEN];
1463 #else
1464 int counts[PCI_INTR_TYPE_SIZE];
1465 pci_intr_type_t max_type;
1466 #endif
1467 const char *eetype, *xname;
1468 bus_space_tag_t memt;
1469 bus_space_handle_t memh;
1470 bus_size_t memsize;
1471 int memh_valid;
1472 int i, error;
1473 const struct wm_product *wmp;
1474 prop_data_t ea;
1475 prop_number_t pn;
1476 uint8_t enaddr[ETHER_ADDR_LEN];
1477 uint16_t cfg1, cfg2, swdpin, nvmword;
1478 pcireg_t preg, memtype;
1479 uint16_t eeprom_data, apme_mask;
1480 bool force_clear_smbi;
1481 uint32_t link_mode;
1482 uint32_t reg;
1483
1484 sc->sc_dev = self;
1485 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1486 sc->sc_stopping = false;
1487
1488 wmp = wm_lookup(pa);
1489 #ifdef DIAGNOSTIC
1490 if (wmp == NULL) {
1491 printf("\n");
1492 panic("wm_attach: impossible");
1493 }
1494 #endif
1495 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1496
1497 sc->sc_pc = pa->pa_pc;
1498 sc->sc_pcitag = pa->pa_tag;
1499
1500 if (pci_dma64_available(pa))
1501 sc->sc_dmat = pa->pa_dmat64;
1502 else
1503 sc->sc_dmat = pa->pa_dmat;
1504
1505 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1506 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1507 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1508
1509 sc->sc_type = wmp->wmp_type;
1510 if (sc->sc_type < WM_T_82543) {
1511 if (sc->sc_rev < 2) {
1512 aprint_error_dev(sc->sc_dev,
1513 "i82542 must be at least rev. 2\n");
1514 return;
1515 }
1516 if (sc->sc_rev < 3)
1517 sc->sc_type = WM_T_82542_2_0;
1518 }
1519
1520 /*
1521 * Disable MSI for Errata:
1522 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1523 *
1524 * 82544: Errata 25
1525 * 82540: Errata 6 (easy to reproduce device timeout)
1526 * 82545: Errata 4 (easy to reproduce device timeout)
1527 * 82546: Errata 26 (easy to reproduce device timeout)
1528 * 82541: Errata 7 (easy to reproduce device timeout)
1529 *
1530 * "Byte Enables 2 and 3 are not set on MSI writes"
1531 *
1532 * 82571 & 82572: Errata 63
1533 */
1534 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1535 || (sc->sc_type == WM_T_82572))
1536 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1537
1538 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1539 || (sc->sc_type == WM_T_82580)
1540 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1541 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1542 sc->sc_flags |= WM_F_NEWQUEUE;
1543
1544 /* Set device properties (mactype) */
1545 dict = device_properties(sc->sc_dev);
1546 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1547
1548 /*
1549 * Map the device. All devices support memory-mapped acccess,
1550 * and it is really required for normal operation.
1551 */
1552 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1553 switch (memtype) {
1554 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1555 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1556 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1557 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1558 break;
1559 default:
1560 memh_valid = 0;
1561 break;
1562 }
1563
1564 if (memh_valid) {
1565 sc->sc_st = memt;
1566 sc->sc_sh = memh;
1567 sc->sc_ss = memsize;
1568 } else {
1569 aprint_error_dev(sc->sc_dev,
1570 "unable to map device registers\n");
1571 return;
1572 }
1573
1574 /*
1575 * In addition, i82544 and later support I/O mapped indirect
1576 * register access. It is not desirable (nor supported in
1577 * this driver) to use it for normal operation, though it is
1578 * required to work around bugs in some chip versions.
1579 */
1580 if (sc->sc_type >= WM_T_82544) {
1581 /* First we have to find the I/O BAR. */
1582 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1583 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1584 if (memtype == PCI_MAPREG_TYPE_IO)
1585 break;
1586 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1587 PCI_MAPREG_MEM_TYPE_64BIT)
1588 i += 4; /* skip high bits, too */
1589 }
1590 if (i < PCI_MAPREG_END) {
1591 /*
1592 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1593 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1594 * It's no problem because newer chips has no this
1595 * bug.
1596 *
1597 * The i8254x doesn't apparently respond when the
1598 * I/O BAR is 0, which looks somewhat like it's not
1599 * been configured.
1600 */
1601 preg = pci_conf_read(pc, pa->pa_tag, i);
1602 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1603 aprint_error_dev(sc->sc_dev,
1604 "WARNING: I/O BAR at zero.\n");
1605 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1606 0, &sc->sc_iot, &sc->sc_ioh,
1607 NULL, &sc->sc_ios) == 0) {
1608 sc->sc_flags |= WM_F_IOH_VALID;
1609 } else {
1610 aprint_error_dev(sc->sc_dev,
1611 "WARNING: unable to map I/O space\n");
1612 }
1613 }
1614
1615 }
1616
1617 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1618 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1619 preg |= PCI_COMMAND_MASTER_ENABLE;
1620 if (sc->sc_type < WM_T_82542_2_1)
1621 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1622 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1623
1624 /* power up chip */
1625 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1626 NULL)) && error != EOPNOTSUPP) {
1627 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1628 return;
1629 }
1630
1631 #ifndef WM_MSI_MSIX
1632 sc->sc_ntxqueues = 1;
1633 sc->sc_nrxqueues = 1;
1634 error = wm_alloc_txrx_queues(sc);
1635 if (error) {
1636 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
1637 error);
1638 return;
1639 }
1640
1641 /*
1642 * Map and establish our interrupt.
1643 */
1644 if (pci_intr_map(pa, &ih)) {
1645 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1646 return;
1647 }
1648 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1649 #ifdef WM_MPSAFE
1650 pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
1651 #endif
1652 sc->sc_ihs[0] = pci_intr_establish_xname(pc, ih, IPL_NET,
1653 wm_intr_legacy, sc, device_xname(sc->sc_dev));
1654 if (sc->sc_ihs[0] == NULL) {
1655 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1656 if (intrstr != NULL)
1657 aprint_error(" at %s", intrstr);
1658 aprint_error("\n");
1659 return;
1660 }
1661 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1662 sc->sc_nintrs = 1;
1663 #else /* WM_MSI_MSIX */
1664 wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
1665 error = wm_alloc_txrx_queues(sc);
1666 if (error) {
1667 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
1668 error);
1669 return;
1670 }
1671
1672 /* Allocation settings */
1673 max_type = PCI_INTR_TYPE_MSIX;
1674 counts[PCI_INTR_TYPE_MSIX] = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
1675 counts[PCI_INTR_TYPE_MSI] = 1;
1676 counts[PCI_INTR_TYPE_INTX] = 1;
1677
1678 alloc_retry:
1679 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1680 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1681 return;
1682 }
1683
1684 if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1685 error = wm_setup_msix(sc);
1686 if (error) {
1687 pci_intr_release(pc, sc->sc_intrs,
1688 counts[PCI_INTR_TYPE_MSIX]);
1689
1690 /* Setup for MSI: Disable MSI-X */
1691 max_type = PCI_INTR_TYPE_MSI;
1692 counts[PCI_INTR_TYPE_MSI] = 1;
1693 counts[PCI_INTR_TYPE_INTX] = 1;
1694 goto alloc_retry;
1695 }
1696 } else if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
1697 error = wm_setup_legacy(sc);
1698 if (error) {
1699 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1700 counts[PCI_INTR_TYPE_MSI]);
1701
1702 /* The next try is for INTx: Disable MSI */
1703 max_type = PCI_INTR_TYPE_INTX;
1704 counts[PCI_INTR_TYPE_INTX] = 1;
1705 goto alloc_retry;
1706 }
1707 } else {
1708 error = wm_setup_legacy(sc);
1709 if (error) {
1710 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1711 counts[PCI_INTR_TYPE_INTX]);
1712 return;
1713 }
1714 }
1715 #endif /* WM_MSI_MSIX */
1716
1717 /*
1718 * Check the function ID (unit number of the chip).
1719 */
1720 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1721 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1722 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1723 || (sc->sc_type == WM_T_82580)
1724 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1725 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1726 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1727 else
1728 sc->sc_funcid = 0;
1729
1730 /*
1731 * Determine a few things about the bus we're connected to.
1732 */
1733 if (sc->sc_type < WM_T_82543) {
1734 /* We don't really know the bus characteristics here. */
1735 sc->sc_bus_speed = 33;
1736 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1737 /*
1738 * CSA (Communication Streaming Architecture) is about as fast
1739 * a 32-bit 66MHz PCI Bus.
1740 */
1741 sc->sc_flags |= WM_F_CSA;
1742 sc->sc_bus_speed = 66;
1743 aprint_verbose_dev(sc->sc_dev,
1744 "Communication Streaming Architecture\n");
1745 if (sc->sc_type == WM_T_82547) {
1746 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1747 callout_setfunc(&sc->sc_txfifo_ch,
1748 wm_82547_txfifo_stall, sc);
1749 aprint_verbose_dev(sc->sc_dev,
1750 "using 82547 Tx FIFO stall work-around\n");
1751 }
1752 } else if (sc->sc_type >= WM_T_82571) {
1753 sc->sc_flags |= WM_F_PCIE;
1754 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1755 && (sc->sc_type != WM_T_ICH10)
1756 && (sc->sc_type != WM_T_PCH)
1757 && (sc->sc_type != WM_T_PCH2)
1758 && (sc->sc_type != WM_T_PCH_LPT)) {
1759 /* ICH* and PCH* have no PCIe capability registers */
1760 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1761 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1762 NULL) == 0)
1763 aprint_error_dev(sc->sc_dev,
1764 "unable to find PCIe capability\n");
1765 }
1766 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1767 } else {
1768 reg = CSR_READ(sc, WMREG_STATUS);
1769 if (reg & STATUS_BUS64)
1770 sc->sc_flags |= WM_F_BUS64;
1771 if ((reg & STATUS_PCIX_MODE) != 0) {
1772 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1773
1774 sc->sc_flags |= WM_F_PCIX;
1775 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1776 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1777 aprint_error_dev(sc->sc_dev,
1778 "unable to find PCIX capability\n");
1779 else if (sc->sc_type != WM_T_82545_3 &&
1780 sc->sc_type != WM_T_82546_3) {
1781 /*
1782 * Work around a problem caused by the BIOS
1783 * setting the max memory read byte count
1784 * incorrectly.
1785 */
1786 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1787 sc->sc_pcixe_capoff + PCIX_CMD);
1788 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1789 sc->sc_pcixe_capoff + PCIX_STATUS);
1790
1791 bytecnt =
1792 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1793 PCIX_CMD_BYTECNT_SHIFT;
1794 maxb =
1795 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1796 PCIX_STATUS_MAXB_SHIFT;
1797 if (bytecnt > maxb) {
1798 aprint_verbose_dev(sc->sc_dev,
1799 "resetting PCI-X MMRBC: %d -> %d\n",
1800 512 << bytecnt, 512 << maxb);
1801 pcix_cmd = (pcix_cmd &
1802 ~PCIX_CMD_BYTECNT_MASK) |
1803 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1804 pci_conf_write(pa->pa_pc, pa->pa_tag,
1805 sc->sc_pcixe_capoff + PCIX_CMD,
1806 pcix_cmd);
1807 }
1808 }
1809 }
1810 /*
1811 * The quad port adapter is special; it has a PCIX-PCIX
1812 * bridge on the board, and can run the secondary bus at
1813 * a higher speed.
1814 */
1815 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1816 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1817 : 66;
1818 } else if (sc->sc_flags & WM_F_PCIX) {
1819 switch (reg & STATUS_PCIXSPD_MASK) {
1820 case STATUS_PCIXSPD_50_66:
1821 sc->sc_bus_speed = 66;
1822 break;
1823 case STATUS_PCIXSPD_66_100:
1824 sc->sc_bus_speed = 100;
1825 break;
1826 case STATUS_PCIXSPD_100_133:
1827 sc->sc_bus_speed = 133;
1828 break;
1829 default:
1830 aprint_error_dev(sc->sc_dev,
1831 "unknown PCIXSPD %d; assuming 66MHz\n",
1832 reg & STATUS_PCIXSPD_MASK);
1833 sc->sc_bus_speed = 66;
1834 break;
1835 }
1836 } else
1837 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1838 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1839 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1840 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1841 }
1842
1843 /* clear interesting stat counters */
1844 CSR_READ(sc, WMREG_COLC);
1845 CSR_READ(sc, WMREG_RXERRC);
1846
1847 /* get PHY control from SMBus to PCIe */
1848 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1849 || (sc->sc_type == WM_T_PCH_LPT))
1850 wm_smbustopci(sc);
1851
1852 /* Reset the chip to a known state. */
1853 wm_reset(sc);
1854
1855 /* Get some information about the EEPROM. */
1856 switch (sc->sc_type) {
1857 case WM_T_82542_2_0:
1858 case WM_T_82542_2_1:
1859 case WM_T_82543:
1860 case WM_T_82544:
1861 /* Microwire */
1862 sc->sc_nvm_wordsize = 64;
1863 sc->sc_nvm_addrbits = 6;
1864 break;
1865 case WM_T_82540:
1866 case WM_T_82545:
1867 case WM_T_82545_3:
1868 case WM_T_82546:
1869 case WM_T_82546_3:
1870 /* Microwire */
1871 reg = CSR_READ(sc, WMREG_EECD);
1872 if (reg & EECD_EE_SIZE) {
1873 sc->sc_nvm_wordsize = 256;
1874 sc->sc_nvm_addrbits = 8;
1875 } else {
1876 sc->sc_nvm_wordsize = 64;
1877 sc->sc_nvm_addrbits = 6;
1878 }
1879 sc->sc_flags |= WM_F_LOCK_EECD;
1880 break;
1881 case WM_T_82541:
1882 case WM_T_82541_2:
1883 case WM_T_82547:
1884 case WM_T_82547_2:
1885 sc->sc_flags |= WM_F_LOCK_EECD;
1886 reg = CSR_READ(sc, WMREG_EECD);
1887 if (reg & EECD_EE_TYPE) {
1888 /* SPI */
1889 sc->sc_flags |= WM_F_EEPROM_SPI;
1890 wm_nvm_set_addrbits_size_eecd(sc);
1891 } else {
1892 /* Microwire */
1893 if ((reg & EECD_EE_ABITS) != 0) {
1894 sc->sc_nvm_wordsize = 256;
1895 sc->sc_nvm_addrbits = 8;
1896 } else {
1897 sc->sc_nvm_wordsize = 64;
1898 sc->sc_nvm_addrbits = 6;
1899 }
1900 }
1901 break;
1902 case WM_T_82571:
1903 case WM_T_82572:
1904 /* SPI */
1905 sc->sc_flags |= WM_F_EEPROM_SPI;
1906 wm_nvm_set_addrbits_size_eecd(sc);
1907 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1908 break;
1909 case WM_T_82573:
1910 sc->sc_flags |= WM_F_LOCK_SWSM;
1911 /* FALLTHROUGH */
1912 case WM_T_82574:
1913 case WM_T_82583:
1914 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1915 sc->sc_flags |= WM_F_EEPROM_FLASH;
1916 sc->sc_nvm_wordsize = 2048;
1917 } else {
1918 /* SPI */
1919 sc->sc_flags |= WM_F_EEPROM_SPI;
1920 wm_nvm_set_addrbits_size_eecd(sc);
1921 }
1922 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1923 break;
1924 case WM_T_82575:
1925 case WM_T_82576:
1926 case WM_T_82580:
1927 case WM_T_I350:
1928 case WM_T_I354:
1929 case WM_T_80003:
1930 /* SPI */
1931 sc->sc_flags |= WM_F_EEPROM_SPI;
1932 wm_nvm_set_addrbits_size_eecd(sc);
1933 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1934 | WM_F_LOCK_SWSM;
1935 break;
1936 case WM_T_ICH8:
1937 case WM_T_ICH9:
1938 case WM_T_ICH10:
1939 case WM_T_PCH:
1940 case WM_T_PCH2:
1941 case WM_T_PCH_LPT:
1942 /* FLASH */
1943 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1944 sc->sc_nvm_wordsize = 2048;
1945 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1946 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1947 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
1948 aprint_error_dev(sc->sc_dev,
1949 "can't map FLASH registers\n");
1950 goto out;
1951 }
1952 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1953 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1954 ICH_FLASH_SECTOR_SIZE;
1955 sc->sc_ich8_flash_bank_size =
1956 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1957 sc->sc_ich8_flash_bank_size -=
1958 (reg & ICH_GFPREG_BASE_MASK);
1959 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1960 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1961 break;
1962 case WM_T_I210:
1963 case WM_T_I211:
1964 if (wm_nvm_get_flash_presence_i210(sc)) {
1965 wm_nvm_set_addrbits_size_eecd(sc);
1966 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1967 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1968 } else {
1969 sc->sc_nvm_wordsize = INVM_SIZE;
1970 sc->sc_flags |= WM_F_EEPROM_INVM;
1971 sc->sc_flags |= WM_F_LOCK_SWFW;
1972 }
1973 break;
1974 default:
1975 break;
1976 }
1977
1978 /* Ensure the SMBI bit is clear before first NVM or PHY access */
1979 switch (sc->sc_type) {
1980 case WM_T_82571:
1981 case WM_T_82572:
1982 reg = CSR_READ(sc, WMREG_SWSM2);
1983 if ((reg & SWSM2_LOCK) == 0) {
1984 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
1985 force_clear_smbi = true;
1986 } else
1987 force_clear_smbi = false;
1988 break;
1989 case WM_T_82573:
1990 case WM_T_82574:
1991 case WM_T_82583:
1992 force_clear_smbi = true;
1993 break;
1994 default:
1995 force_clear_smbi = false;
1996 break;
1997 }
1998 if (force_clear_smbi) {
1999 reg = CSR_READ(sc, WMREG_SWSM);
2000 if ((reg & SWSM_SMBI) != 0)
2001 aprint_error_dev(sc->sc_dev,
2002 "Please update the Bootagent\n");
2003 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2004 }
2005
2006 /*
2007 * Defer printing the EEPROM type until after verifying the checksum
2008 * This allows the EEPROM type to be printed correctly in the case
2009 * that no EEPROM is attached.
2010 */
2011 /*
2012 * Validate the EEPROM checksum. If the checksum fails, flag
2013 * this for later, so we can fail future reads from the EEPROM.
2014 */
2015 if (wm_nvm_validate_checksum(sc)) {
2016 /*
2017 * Read twice again because some PCI-e parts fail the
2018 * first check due to the link being in sleep state.
2019 */
2020 if (wm_nvm_validate_checksum(sc))
2021 sc->sc_flags |= WM_F_EEPROM_INVALID;
2022 }
2023
2024 /* Set device properties (macflags) */
2025 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2026
2027 if (sc->sc_flags & WM_F_EEPROM_INVALID)
2028 aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2029 else {
2030 aprint_verbose_dev(sc->sc_dev, "%u words ",
2031 sc->sc_nvm_wordsize);
2032 if (sc->sc_flags & WM_F_EEPROM_INVM)
2033 aprint_verbose("iNVM");
2034 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2035 aprint_verbose("FLASH(HW)");
2036 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2037 aprint_verbose("FLASH");
2038 else {
2039 if (sc->sc_flags & WM_F_EEPROM_SPI)
2040 eetype = "SPI";
2041 else
2042 eetype = "MicroWire";
2043 aprint_verbose("(%d address bits) %s EEPROM",
2044 sc->sc_nvm_addrbits, eetype);
2045 }
2046 }
2047 wm_nvm_version(sc);
2048 aprint_verbose("\n");
2049
2050 /* Check for I21[01] PLL workaround */
2051 if (sc->sc_type == WM_T_I210)
2052 sc->sc_flags |= WM_F_PLL_WA_I210;
2053 if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
2054 /* NVM image release 3.25 has a workaround */
2055 if ((sc->sc_nvm_ver_major < 3)
2056 || ((sc->sc_nvm_ver_major == 3)
2057 && (sc->sc_nvm_ver_minor < 25))) {
2058 aprint_verbose_dev(sc->sc_dev,
2059 "ROM image version %d.%d is older than 3.25\n",
2060 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2061 sc->sc_flags |= WM_F_PLL_WA_I210;
2062 }
2063 }
2064 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2065 wm_pll_workaround_i210(sc);
2066
2067 switch (sc->sc_type) {
2068 case WM_T_82571:
2069 case WM_T_82572:
2070 case WM_T_82573:
2071 case WM_T_82574:
2072 case WM_T_82583:
2073 case WM_T_80003:
2074 case WM_T_ICH8:
2075 case WM_T_ICH9:
2076 case WM_T_ICH10:
2077 case WM_T_PCH:
2078 case WM_T_PCH2:
2079 case WM_T_PCH_LPT:
2080 if (wm_check_mng_mode(sc) != 0)
2081 wm_get_hw_control(sc);
2082 break;
2083 default:
2084 break;
2085 }
2086 wm_get_wakeup(sc);
2087 /*
2088 * Read the Ethernet address from the EEPROM, if not first found
2089 * in device properties.
2090 */
2091 ea = prop_dictionary_get(dict, "mac-address");
2092 if (ea != NULL) {
2093 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2094 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2095 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2096 } else {
2097 if (wm_read_mac_addr(sc, enaddr) != 0) {
2098 aprint_error_dev(sc->sc_dev,
2099 "unable to read Ethernet address\n");
2100 goto out;
2101 }
2102 }
2103
2104 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2105 ether_sprintf(enaddr));
2106
2107 /*
2108 * Read the config info from the EEPROM, and set up various
2109 * bits in the control registers based on their contents.
2110 */
2111 pn = prop_dictionary_get(dict, "i82543-cfg1");
2112 if (pn != NULL) {
2113 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2114 cfg1 = (uint16_t) prop_number_integer_value(pn);
2115 } else {
2116 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2117 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2118 goto out;
2119 }
2120 }
2121
2122 pn = prop_dictionary_get(dict, "i82543-cfg2");
2123 if (pn != NULL) {
2124 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2125 cfg2 = (uint16_t) prop_number_integer_value(pn);
2126 } else {
2127 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2128 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2129 goto out;
2130 }
2131 }
2132
2133 /* check for WM_F_WOL */
2134 switch (sc->sc_type) {
2135 case WM_T_82542_2_0:
2136 case WM_T_82542_2_1:
2137 case WM_T_82543:
2138 /* dummy? */
2139 eeprom_data = 0;
2140 apme_mask = NVM_CFG3_APME;
2141 break;
2142 case WM_T_82544:
2143 apme_mask = NVM_CFG2_82544_APM_EN;
2144 eeprom_data = cfg2;
2145 break;
2146 case WM_T_82546:
2147 case WM_T_82546_3:
2148 case WM_T_82571:
2149 case WM_T_82572:
2150 case WM_T_82573:
2151 case WM_T_82574:
2152 case WM_T_82583:
2153 case WM_T_80003:
2154 default:
2155 apme_mask = NVM_CFG3_APME;
2156 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2157 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2158 break;
2159 case WM_T_82575:
2160 case WM_T_82576:
2161 case WM_T_82580:
2162 case WM_T_I350:
2163 case WM_T_I354: /* XXX ok? */
2164 case WM_T_ICH8:
2165 case WM_T_ICH9:
2166 case WM_T_ICH10:
2167 case WM_T_PCH:
2168 case WM_T_PCH2:
2169 case WM_T_PCH_LPT:
2170 /* XXX The funcid should be checked on some devices */
2171 apme_mask = WUC_APME;
2172 eeprom_data = CSR_READ(sc, WMREG_WUC);
2173 break;
2174 }
2175
2176 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2177 if ((eeprom_data & apme_mask) != 0)
2178 sc->sc_flags |= WM_F_WOL;
2179 #ifdef WM_DEBUG
2180 if ((sc->sc_flags & WM_F_WOL) != 0)
2181 printf("WOL\n");
2182 #endif
2183
2184 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2185 /* Check NVM for autonegotiation */
2186 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2187 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2188 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2189 }
2190 }
2191
2192 /*
2193 * XXX need special handling for some multiple port cards
2194 * to disable a paticular port.
2195 */
2196
2197 if (sc->sc_type >= WM_T_82544) {
2198 pn = prop_dictionary_get(dict, "i82543-swdpin");
2199 if (pn != NULL) {
2200 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2201 swdpin = (uint16_t) prop_number_integer_value(pn);
2202 } else {
2203 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2204 aprint_error_dev(sc->sc_dev,
2205 "unable to read SWDPIN\n");
2206 goto out;
2207 }
2208 }
2209 }
2210
2211 if (cfg1 & NVM_CFG1_ILOS)
2212 sc->sc_ctrl |= CTRL_ILOS;
2213
2214 /*
2215 * XXX
2216 * This code isn't correct because pin 2 and 3 are located
2217 * in different position on newer chips. Check all datasheet.
2218 *
2219 * Until resolve this problem, check if a chip < 82580
2220 */
2221 if (sc->sc_type <= WM_T_82580) {
2222 if (sc->sc_type >= WM_T_82544) {
2223 sc->sc_ctrl |=
2224 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2225 CTRL_SWDPIO_SHIFT;
2226 sc->sc_ctrl |=
2227 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2228 CTRL_SWDPINS_SHIFT;
2229 } else {
2230 sc->sc_ctrl |=
2231 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2232 CTRL_SWDPIO_SHIFT;
2233 }
2234 }
2235
2236 /* XXX For other than 82580? */
2237 if (sc->sc_type == WM_T_82580) {
2238 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2239 printf("CFG3 = %08x\n", (uint32_t)nvmword);
2240 if (nvmword & __BIT(13)) {
2241 printf("SET ILOS\n");
2242 sc->sc_ctrl |= CTRL_ILOS;
2243 }
2244 }
2245
2246 #if 0
2247 if (sc->sc_type >= WM_T_82544) {
2248 if (cfg1 & NVM_CFG1_IPS0)
2249 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2250 if (cfg1 & NVM_CFG1_IPS1)
2251 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2252 sc->sc_ctrl_ext |=
2253 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2254 CTRL_EXT_SWDPIO_SHIFT;
2255 sc->sc_ctrl_ext |=
2256 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2257 CTRL_EXT_SWDPINS_SHIFT;
2258 } else {
2259 sc->sc_ctrl_ext |=
2260 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2261 CTRL_EXT_SWDPIO_SHIFT;
2262 }
2263 #endif
2264
2265 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2266 #if 0
2267 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2268 #endif
2269
2270 if (sc->sc_type == WM_T_PCH) {
2271 uint16_t val;
2272
2273 /* Save the NVM K1 bit setting */
2274 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2275
2276 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2277 sc->sc_nvm_k1_enabled = 1;
2278 else
2279 sc->sc_nvm_k1_enabled = 0;
2280 }
2281
2282 /*
2283 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2284 * media structures accordingly.
2285 */
2286 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2287 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2288 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2289 || sc->sc_type == WM_T_82573
2290 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2291 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2292 wm_gmii_mediainit(sc, wmp->wmp_product);
2293 } else if (sc->sc_type < WM_T_82543 ||
2294 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2295 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2296 aprint_error_dev(sc->sc_dev,
2297 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2298 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2299 }
2300 wm_tbi_mediainit(sc);
2301 } else {
2302 switch (sc->sc_type) {
2303 case WM_T_82575:
2304 case WM_T_82576:
2305 case WM_T_82580:
2306 case WM_T_I350:
2307 case WM_T_I354:
2308 case WM_T_I210:
2309 case WM_T_I211:
2310 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2311 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2312 switch (link_mode) {
2313 case CTRL_EXT_LINK_MODE_1000KX:
2314 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2315 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2316 break;
2317 case CTRL_EXT_LINK_MODE_SGMII:
2318 if (wm_sgmii_uses_mdio(sc)) {
2319 aprint_verbose_dev(sc->sc_dev,
2320 "SGMII(MDIO)\n");
2321 sc->sc_flags |= WM_F_SGMII;
2322 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2323 break;
2324 }
2325 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2326 /*FALLTHROUGH*/
2327 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2328 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2329 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2330 if (link_mode
2331 == CTRL_EXT_LINK_MODE_SGMII) {
2332 sc->sc_mediatype
2333 = WM_MEDIATYPE_COPPER;
2334 sc->sc_flags |= WM_F_SGMII;
2335 } else {
2336 sc->sc_mediatype
2337 = WM_MEDIATYPE_SERDES;
2338 aprint_verbose_dev(sc->sc_dev,
2339 "SERDES\n");
2340 }
2341 break;
2342 }
2343 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2344 aprint_verbose_dev(sc->sc_dev,
2345 "SERDES\n");
2346
2347 /* Change current link mode setting */
2348 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2349 switch (sc->sc_mediatype) {
2350 case WM_MEDIATYPE_COPPER:
2351 reg |= CTRL_EXT_LINK_MODE_SGMII;
2352 break;
2353 case WM_MEDIATYPE_SERDES:
2354 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2355 break;
2356 default:
2357 break;
2358 }
2359 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2360 break;
2361 case CTRL_EXT_LINK_MODE_GMII:
2362 default:
2363 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2364 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2365 break;
2366 }
2367
2368 reg &= ~CTRL_EXT_I2C_ENA;
2369 if ((sc->sc_flags & WM_F_SGMII) != 0)
2370 reg |= CTRL_EXT_I2C_ENA;
2371 else
2372 reg &= ~CTRL_EXT_I2C_ENA;
2373 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2374
2375 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2376 wm_gmii_mediainit(sc, wmp->wmp_product);
2377 else
2378 wm_tbi_mediainit(sc);
2379 break;
2380 default:
2381 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
2382 aprint_error_dev(sc->sc_dev,
2383 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2384 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2385 wm_gmii_mediainit(sc, wmp->wmp_product);
2386 }
2387 }
2388
2389 ifp = &sc->sc_ethercom.ec_if;
2390 xname = device_xname(sc->sc_dev);
2391 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2392 ifp->if_softc = sc;
2393 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2394 ifp->if_ioctl = wm_ioctl;
2395 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2396 ifp->if_start = wm_nq_start;
2397 else
2398 ifp->if_start = wm_start;
2399 ifp->if_watchdog = wm_watchdog;
2400 ifp->if_init = wm_init;
2401 ifp->if_stop = wm_stop;
2402 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2403 IFQ_SET_READY(&ifp->if_snd);
2404
2405 /* Check for jumbo frame */
2406 switch (sc->sc_type) {
2407 case WM_T_82573:
2408 /* XXX limited to 9234 if ASPM is disabled */
2409 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2410 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2411 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2412 break;
2413 case WM_T_82571:
2414 case WM_T_82572:
2415 case WM_T_82574:
2416 case WM_T_82575:
2417 case WM_T_82576:
2418 case WM_T_82580:
2419 case WM_T_I350:
2420 case WM_T_I354: /* XXXX ok? */
2421 case WM_T_I210:
2422 case WM_T_I211:
2423 case WM_T_80003:
2424 case WM_T_ICH9:
2425 case WM_T_ICH10:
2426 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2427 case WM_T_PCH_LPT:
2428 /* XXX limited to 9234 */
2429 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2430 break;
2431 case WM_T_PCH:
2432 /* XXX limited to 4096 */
2433 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2434 break;
2435 case WM_T_82542_2_0:
2436 case WM_T_82542_2_1:
2437 case WM_T_82583:
2438 case WM_T_ICH8:
2439 /* No support for jumbo frame */
2440 break;
2441 default:
2442 /* ETHER_MAX_LEN_JUMBO */
2443 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2444 break;
2445 }
2446
2447 /* If we're a i82543 or greater, we can support VLANs. */
2448 if (sc->sc_type >= WM_T_82543)
2449 sc->sc_ethercom.ec_capabilities |=
2450 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2451
2452 /*
2453 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2454 * on i82543 and later.
2455 */
2456 if (sc->sc_type >= WM_T_82543) {
2457 ifp->if_capabilities |=
2458 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2459 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2460 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2461 IFCAP_CSUM_TCPv6_Tx |
2462 IFCAP_CSUM_UDPv6_Tx;
2463 }
2464
2465 /*
2466 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2467 *
2468 * 82541GI (8086:1076) ... no
2469 * 82572EI (8086:10b9) ... yes
2470 */
2471 if (sc->sc_type >= WM_T_82571) {
2472 ifp->if_capabilities |=
2473 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2474 }
2475
2476 /*
2477 * If we're a i82544 or greater (except i82547), we can do
2478 * TCP segmentation offload.
2479 */
2480 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2481 ifp->if_capabilities |= IFCAP_TSOv4;
2482 }
2483
2484 if (sc->sc_type >= WM_T_82571) {
2485 ifp->if_capabilities |= IFCAP_TSOv6;
2486 }
2487
2488 #ifdef WM_MPSAFE
2489 sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2490 #else
2491 sc->sc_core_lock = NULL;
2492 #endif
2493
2494 /* Attach the interface. */
2495 if_attach(ifp);
2496 ether_ifattach(ifp, enaddr);
2497 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2498 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2499 RND_FLAG_DEFAULT);
2500
2501 #ifdef WM_EVENT_COUNTERS
2502 /* Attach event counters. */
2503 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2504 NULL, xname, "txsstall");
2505 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2506 NULL, xname, "txdstall");
2507 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2508 NULL, xname, "txfifo_stall");
2509 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2510 NULL, xname, "txdw");
2511 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2512 NULL, xname, "txqe");
2513 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2514 NULL, xname, "rxintr");
2515 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2516 NULL, xname, "linkintr");
2517
2518 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2519 NULL, xname, "rxipsum");
2520 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2521 NULL, xname, "rxtusum");
2522 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2523 NULL, xname, "txipsum");
2524 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2525 NULL, xname, "txtusum");
2526 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2527 NULL, xname, "txtusum6");
2528
2529 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2530 NULL, xname, "txtso");
2531 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2532 NULL, xname, "txtso6");
2533 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2534 NULL, xname, "txtsopain");
2535
2536 for (i = 0; i < WM_NTXSEGS; i++) {
2537 snprintf(wm_txseg_evcnt_names[i],
2538 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2539 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2540 NULL, xname, wm_txseg_evcnt_names[i]);
2541 }
2542
2543 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2544 NULL, xname, "txdrop");
2545
2546 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2547 NULL, xname, "tu");
2548
2549 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2550 NULL, xname, "tx_xoff");
2551 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2552 NULL, xname, "tx_xon");
2553 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2554 NULL, xname, "rx_xoff");
2555 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2556 NULL, xname, "rx_xon");
2557 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2558 NULL, xname, "rx_macctl");
2559 #endif /* WM_EVENT_COUNTERS */
2560
2561 if (pmf_device_register(self, wm_suspend, wm_resume))
2562 pmf_class_network_register(self, ifp);
2563 else
2564 aprint_error_dev(self, "couldn't establish power handler\n");
2565
2566 sc->sc_flags |= WM_F_ATTACHED;
2567 out:
2568 return;
2569 }
2570
2571 /* The detach function (ca_detach) */
2572 static int
2573 wm_detach(device_t self, int flags __unused)
2574 {
2575 struct wm_softc *sc = device_private(self);
2576 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2577 int i;
2578 #ifndef WM_MPSAFE
2579 int s;
2580 #endif
2581
2582 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2583 return 0;
2584
2585 #ifndef WM_MPSAFE
2586 s = splnet();
2587 #endif
2588 /* Stop the interface. Callouts are stopped in it. */
2589 wm_stop(ifp, 1);
2590
2591 #ifndef WM_MPSAFE
2592 splx(s);
2593 #endif
2594
2595 pmf_device_deregister(self);
2596
2597 /* Tell the firmware about the release */
2598 WM_CORE_LOCK(sc);
2599 wm_release_manageability(sc);
2600 wm_release_hw_control(sc);
2601 WM_CORE_UNLOCK(sc);
2602
2603 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2604
2605 /* Delete all remaining media. */
2606 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2607
2608 ether_ifdetach(ifp);
2609 if_detach(ifp);
2610
2611
2612 /* Unload RX dmamaps and free mbufs */
2613 for (i = 0; i < sc->sc_nrxqueues; i++) {
2614 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
2615 WM_RX_LOCK(rxq);
2616 wm_rxdrain(rxq);
2617 WM_RX_UNLOCK(rxq);
2618 }
2619 /* Must unlock here */
2620
2621 wm_free_txrx_queues(sc);
2622
2623 /* Disestablish the interrupt handler */
2624 for (i = 0; i < sc->sc_nintrs; i++) {
2625 if (sc->sc_ihs[i] != NULL) {
2626 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2627 sc->sc_ihs[i] = NULL;
2628 }
2629 }
2630 #ifdef WM_MSI_MSIX
2631 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
2632 #endif /* WM_MSI_MSIX */
2633
2634 /* Unmap the registers */
2635 if (sc->sc_ss) {
2636 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2637 sc->sc_ss = 0;
2638 }
2639 if (sc->sc_ios) {
2640 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2641 sc->sc_ios = 0;
2642 }
2643 if (sc->sc_flashs) {
2644 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
2645 sc->sc_flashs = 0;
2646 }
2647
2648 if (sc->sc_core_lock)
2649 mutex_obj_free(sc->sc_core_lock);
2650
2651 return 0;
2652 }
2653
2654 static bool
2655 wm_suspend(device_t self, const pmf_qual_t *qual)
2656 {
2657 struct wm_softc *sc = device_private(self);
2658
2659 wm_release_manageability(sc);
2660 wm_release_hw_control(sc);
2661 #ifdef WM_WOL
2662 wm_enable_wakeup(sc);
2663 #endif
2664
2665 return true;
2666 }
2667
2668 static bool
2669 wm_resume(device_t self, const pmf_qual_t *qual)
2670 {
2671 struct wm_softc *sc = device_private(self);
2672
2673 wm_init_manageability(sc);
2674
2675 return true;
2676 }
2677
2678 /*
2679 * wm_watchdog: [ifnet interface function]
2680 *
2681 * Watchdog timer handler.
2682 */
2683 static void
2684 wm_watchdog(struct ifnet *ifp)
2685 {
2686 struct wm_softc *sc = ifp->if_softc;
2687 struct wm_txqueue *txq = &sc->sc_txq[0];
2688
2689 /*
2690 * Since we're using delayed interrupts, sweep up
2691 * before we report an error.
2692 */
2693 WM_TX_LOCK(txq);
2694 wm_txeof(sc);
2695 WM_TX_UNLOCK(txq);
2696
2697 if (txq->txq_free != WM_NTXDESC(txq)) {
2698 #ifdef WM_DEBUG
2699 int i, j;
2700 struct wm_txsoft *txs;
2701 #endif
2702 log(LOG_ERR,
2703 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2704 device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
2705 txq->txq_next);
2706 ifp->if_oerrors++;
2707 #ifdef WM_DEBUG
2708 for (i = txq->txq_sdirty; i != txq->txq_snext ;
2709 i = WM_NEXTTXS(txq, i)) {
2710 txs = &txq->txq_soft[i];
2711 printf("txs %d tx %d -> %d\n",
2712 i, txs->txs_firstdesc, txs->txs_lastdesc);
2713 for (j = txs->txs_firstdesc; ;
2714 j = WM_NEXTTX(txq, j)) {
2715 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2716 txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
2717 printf("\t %#08x%08x\n",
2718 txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
2719 txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
2720 if (j == txs->txs_lastdesc)
2721 break;
2722 }
2723 }
2724 #endif
2725 /* Reset the interface. */
2726 (void) wm_init(ifp);
2727 }
2728
2729 /* Try to get more packets going. */
2730 ifp->if_start(ifp);
2731 }
2732
2733 /*
2734 * wm_tick:
2735 *
2736 * One second timer, used to check link status, sweep up
2737 * completed transmit jobs, etc.
2738 */
2739 static void
2740 wm_tick(void *arg)
2741 {
2742 struct wm_softc *sc = arg;
2743 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2744 #ifndef WM_MPSAFE
2745 int s;
2746
2747 s = splnet();
2748 #endif
2749
2750 WM_CORE_LOCK(sc);
2751
2752 if (sc->sc_stopping)
2753 goto out;
2754
2755 if (sc->sc_type >= WM_T_82542_2_1) {
2756 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2757 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2758 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2759 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2760 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2761 }
2762
2763 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2764 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2765 + CSR_READ(sc, WMREG_CRCERRS)
2766 + CSR_READ(sc, WMREG_ALGNERRC)
2767 + CSR_READ(sc, WMREG_SYMERRC)
2768 + CSR_READ(sc, WMREG_RXERRC)
2769 + CSR_READ(sc, WMREG_SEC)
2770 + CSR_READ(sc, WMREG_CEXTERR)
2771 + CSR_READ(sc, WMREG_RLEC);
2772 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2773
2774 if (sc->sc_flags & WM_F_HAS_MII)
2775 mii_tick(&sc->sc_mii);
2776 else if ((sc->sc_type >= WM_T_82575)
2777 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
2778 wm_serdes_tick(sc);
2779 else
2780 wm_tbi_tick(sc);
2781
2782 out:
2783 WM_CORE_UNLOCK(sc);
2784 #ifndef WM_MPSAFE
2785 splx(s);
2786 #endif
2787
2788 if (!sc->sc_stopping)
2789 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2790 }
2791
2792 static int
2793 wm_ifflags_cb(struct ethercom *ec)
2794 {
2795 struct ifnet *ifp = &ec->ec_if;
2796 struct wm_softc *sc = ifp->if_softc;
2797 int change = ifp->if_flags ^ sc->sc_if_flags;
2798 int rc = 0;
2799
2800 WM_CORE_LOCK(sc);
2801
2802 if (change != 0)
2803 sc->sc_if_flags = ifp->if_flags;
2804
2805 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
2806 rc = ENETRESET;
2807 goto out;
2808 }
2809
2810 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2811 wm_set_filter(sc);
2812
2813 wm_set_vlan(sc);
2814
2815 out:
2816 WM_CORE_UNLOCK(sc);
2817
2818 return rc;
2819 }
2820
2821 /*
2822 * wm_ioctl: [ifnet interface function]
2823 *
2824 * Handle control requests from the operator.
2825 */
2826 static int
2827 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2828 {
2829 struct wm_softc *sc = ifp->if_softc;
2830 struct ifreq *ifr = (struct ifreq *) data;
2831 struct ifaddr *ifa = (struct ifaddr *)data;
2832 struct sockaddr_dl *sdl;
2833 int s, error;
2834
2835 #ifndef WM_MPSAFE
2836 s = splnet();
2837 #endif
2838 switch (cmd) {
2839 case SIOCSIFMEDIA:
2840 case SIOCGIFMEDIA:
2841 WM_CORE_LOCK(sc);
2842 /* Flow control requires full-duplex mode. */
2843 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2844 (ifr->ifr_media & IFM_FDX) == 0)
2845 ifr->ifr_media &= ~IFM_ETH_FMASK;
2846 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2847 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2848 /* We can do both TXPAUSE and RXPAUSE. */
2849 ifr->ifr_media |=
2850 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2851 }
2852 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2853 }
2854 WM_CORE_UNLOCK(sc);
2855 #ifdef WM_MPSAFE
2856 s = splnet();
2857 #endif
2858 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2859 #ifdef WM_MPSAFE
2860 splx(s);
2861 #endif
2862 break;
2863 case SIOCINITIFADDR:
2864 WM_CORE_LOCK(sc);
2865 if (ifa->ifa_addr->sa_family == AF_LINK) {
2866 sdl = satosdl(ifp->if_dl->ifa_addr);
2867 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2868 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2869 /* unicast address is first multicast entry */
2870 wm_set_filter(sc);
2871 error = 0;
2872 WM_CORE_UNLOCK(sc);
2873 break;
2874 }
2875 WM_CORE_UNLOCK(sc);
2876 /*FALLTHROUGH*/
2877 default:
2878 #ifdef WM_MPSAFE
2879 s = splnet();
2880 #endif
2881 /* It may call wm_start, so unlock here */
2882 error = ether_ioctl(ifp, cmd, data);
2883 #ifdef WM_MPSAFE
2884 splx(s);
2885 #endif
2886 if (error != ENETRESET)
2887 break;
2888
2889 error = 0;
2890
2891 if (cmd == SIOCSIFCAP) {
2892 error = (*ifp->if_init)(ifp);
2893 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2894 ;
2895 else if (ifp->if_flags & IFF_RUNNING) {
2896 /*
2897 * Multicast list has changed; set the hardware filter
2898 * accordingly.
2899 */
2900 WM_CORE_LOCK(sc);
2901 wm_set_filter(sc);
2902 WM_CORE_UNLOCK(sc);
2903 }
2904 break;
2905 }
2906
2907 #ifndef WM_MPSAFE
2908 splx(s);
2909 #endif
2910 return error;
2911 }
2912
2913 /* MAC address related */
2914
2915 /*
2916 * Get the offset of MAC address and return it.
2917 * If error occured, use offset 0.
2918 */
2919 static uint16_t
2920 wm_check_alt_mac_addr(struct wm_softc *sc)
2921 {
2922 uint16_t myea[ETHER_ADDR_LEN / 2];
2923 uint16_t offset = NVM_OFF_MACADDR;
2924
2925 /* Try to read alternative MAC address pointer */
2926 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2927 return 0;
2928
2929 /* Check pointer if it's valid or not. */
2930 if ((offset == 0x0000) || (offset == 0xffff))
2931 return 0;
2932
2933 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
2934 /*
2935 * Check whether alternative MAC address is valid or not.
2936 * Some cards have non 0xffff pointer but those don't use
2937 * alternative MAC address in reality.
2938 *
2939 * Check whether the broadcast bit is set or not.
2940 */
2941 if (wm_nvm_read(sc, offset, 1, myea) == 0)
2942 if (((myea[0] & 0xff) & 0x01) == 0)
2943 return offset; /* Found */
2944
2945 /* Not found */
2946 return 0;
2947 }
2948
2949 static int
2950 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2951 {
2952 uint16_t myea[ETHER_ADDR_LEN / 2];
2953 uint16_t offset = NVM_OFF_MACADDR;
2954 int do_invert = 0;
2955
2956 switch (sc->sc_type) {
2957 case WM_T_82580:
2958 case WM_T_I350:
2959 case WM_T_I354:
2960 /* EEPROM Top Level Partitioning */
2961 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
2962 break;
2963 case WM_T_82571:
2964 case WM_T_82575:
2965 case WM_T_82576:
2966 case WM_T_80003:
2967 case WM_T_I210:
2968 case WM_T_I211:
2969 offset = wm_check_alt_mac_addr(sc);
2970 if (offset == 0)
2971 if ((sc->sc_funcid & 0x01) == 1)
2972 do_invert = 1;
2973 break;
2974 default:
2975 if ((sc->sc_funcid & 0x01) == 1)
2976 do_invert = 1;
2977 break;
2978 }
2979
2980 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
2981 myea) != 0)
2982 goto bad;
2983
2984 enaddr[0] = myea[0] & 0xff;
2985 enaddr[1] = myea[0] >> 8;
2986 enaddr[2] = myea[1] & 0xff;
2987 enaddr[3] = myea[1] >> 8;
2988 enaddr[4] = myea[2] & 0xff;
2989 enaddr[5] = myea[2] >> 8;
2990
2991 /*
2992 * Toggle the LSB of the MAC address on the second port
2993 * of some dual port cards.
2994 */
2995 if (do_invert != 0)
2996 enaddr[5] ^= 1;
2997
2998 return 0;
2999
3000 bad:
3001 return -1;
3002 }
3003
3004 /*
3005 * wm_set_ral:
3006 *
3007 * Set an entery in the receive address list.
3008 */
3009 static void
3010 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3011 {
3012 uint32_t ral_lo, ral_hi;
3013
3014 if (enaddr != NULL) {
3015 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3016 (enaddr[3] << 24);
3017 ral_hi = enaddr[4] | (enaddr[5] << 8);
3018 ral_hi |= RAL_AV;
3019 } else {
3020 ral_lo = 0;
3021 ral_hi = 0;
3022 }
3023
3024 if (sc->sc_type >= WM_T_82544) {
3025 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3026 ral_lo);
3027 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3028 ral_hi);
3029 } else {
3030 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3031 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3032 }
3033 }
3034
3035 /*
3036 * wm_mchash:
3037 *
3038 * Compute the hash of the multicast address for the 4096-bit
3039 * multicast filter.
3040 */
3041 static uint32_t
3042 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3043 {
3044 static const int lo_shift[4] = { 4, 3, 2, 0 };
3045 static const int hi_shift[4] = { 4, 5, 6, 8 };
3046 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3047 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3048 uint32_t hash;
3049
3050 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3051 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3052 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
3053 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3054 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3055 return (hash & 0x3ff);
3056 }
3057 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3058 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3059
3060 return (hash & 0xfff);
3061 }
3062
3063 /*
3064 * wm_set_filter:
3065 *
3066 * Set up the receive filter.
3067 */
3068 static void
3069 wm_set_filter(struct wm_softc *sc)
3070 {
3071 struct ethercom *ec = &sc->sc_ethercom;
3072 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3073 struct ether_multi *enm;
3074 struct ether_multistep step;
3075 bus_addr_t mta_reg;
3076 uint32_t hash, reg, bit;
3077 int i, size;
3078
3079 if (sc->sc_type >= WM_T_82544)
3080 mta_reg = WMREG_CORDOVA_MTA;
3081 else
3082 mta_reg = WMREG_MTA;
3083
3084 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3085
3086 if (ifp->if_flags & IFF_BROADCAST)
3087 sc->sc_rctl |= RCTL_BAM;
3088 if (ifp->if_flags & IFF_PROMISC) {
3089 sc->sc_rctl |= RCTL_UPE;
3090 goto allmulti;
3091 }
3092
3093 /*
3094 * Set the station address in the first RAL slot, and
3095 * clear the remaining slots.
3096 */
3097 if (sc->sc_type == WM_T_ICH8)
3098 size = WM_RAL_TABSIZE_ICH8 -1;
3099 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3100 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
3101 || (sc->sc_type == WM_T_PCH_LPT))
3102 size = WM_RAL_TABSIZE_ICH8;
3103 else if (sc->sc_type == WM_T_82575)
3104 size = WM_RAL_TABSIZE_82575;
3105 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3106 size = WM_RAL_TABSIZE_82576;
3107 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3108 size = WM_RAL_TABSIZE_I350;
3109 else
3110 size = WM_RAL_TABSIZE;
3111 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3112 for (i = 1; i < size; i++)
3113 wm_set_ral(sc, NULL, i);
3114
3115 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3116 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3117 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
3118 size = WM_ICH8_MC_TABSIZE;
3119 else
3120 size = WM_MC_TABSIZE;
3121 /* Clear out the multicast table. */
3122 for (i = 0; i < size; i++)
3123 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3124
3125 ETHER_FIRST_MULTI(step, ec, enm);
3126 while (enm != NULL) {
3127 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3128 /*
3129 * We must listen to a range of multicast addresses.
3130 * For now, just accept all multicasts, rather than
3131 * trying to set only those filter bits needed to match
3132 * the range. (At this time, the only use of address
3133 * ranges is for IP multicast routing, for which the
3134 * range is big enough to require all bits set.)
3135 */
3136 goto allmulti;
3137 }
3138
3139 hash = wm_mchash(sc, enm->enm_addrlo);
3140
3141 reg = (hash >> 5);
3142 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3143 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3144 || (sc->sc_type == WM_T_PCH2)
3145 || (sc->sc_type == WM_T_PCH_LPT))
3146 reg &= 0x1f;
3147 else
3148 reg &= 0x7f;
3149 bit = hash & 0x1f;
3150
3151 hash = CSR_READ(sc, mta_reg + (reg << 2));
3152 hash |= 1U << bit;
3153
3154 /* XXX Hardware bug?? */
3155 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3156 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3157 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3158 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3159 } else
3160 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3161
3162 ETHER_NEXT_MULTI(step, enm);
3163 }
3164
3165 ifp->if_flags &= ~IFF_ALLMULTI;
3166 goto setit;
3167
3168 allmulti:
3169 ifp->if_flags |= IFF_ALLMULTI;
3170 sc->sc_rctl |= RCTL_MPE;
3171
3172 setit:
3173 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3174 }
3175
3176 /* Reset and init related */
3177
3178 static void
3179 wm_set_vlan(struct wm_softc *sc)
3180 {
3181 /* Deal with VLAN enables. */
3182 if (VLAN_ATTACHED(&sc->sc_ethercom))
3183 sc->sc_ctrl |= CTRL_VME;
3184 else
3185 sc->sc_ctrl &= ~CTRL_VME;
3186
3187 /* Write the control registers. */
3188 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3189 }
3190
3191 static void
3192 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3193 {
3194 uint32_t gcr;
3195 pcireg_t ctrl2;
3196
3197 gcr = CSR_READ(sc, WMREG_GCR);
3198
3199 /* Only take action if timeout value is defaulted to 0 */
3200 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3201 goto out;
3202
3203 if ((gcr & GCR_CAP_VER2) == 0) {
3204 gcr |= GCR_CMPL_TMOUT_10MS;
3205 goto out;
3206 }
3207
3208 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3209 sc->sc_pcixe_capoff + PCIE_DCSR2);
3210 ctrl2 |= WM_PCIE_DCSR2_16MS;
3211 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3212 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3213
3214 out:
3215 /* Disable completion timeout resend */
3216 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3217
3218 CSR_WRITE(sc, WMREG_GCR, gcr);
3219 }
3220
3221 void
3222 wm_get_auto_rd_done(struct wm_softc *sc)
3223 {
3224 int i;
3225
3226 /* wait for eeprom to reload */
3227 switch (sc->sc_type) {
3228 case WM_T_82571:
3229 case WM_T_82572:
3230 case WM_T_82573:
3231 case WM_T_82574:
3232 case WM_T_82583:
3233 case WM_T_82575:
3234 case WM_T_82576:
3235 case WM_T_82580:
3236 case WM_T_I350:
3237 case WM_T_I354:
3238 case WM_T_I210:
3239 case WM_T_I211:
3240 case WM_T_80003:
3241 case WM_T_ICH8:
3242 case WM_T_ICH9:
3243 for (i = 0; i < 10; i++) {
3244 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3245 break;
3246 delay(1000);
3247 }
3248 if (i == 10) {
3249 log(LOG_ERR, "%s: auto read from eeprom failed to "
3250 "complete\n", device_xname(sc->sc_dev));
3251 }
3252 break;
3253 default:
3254 break;
3255 }
3256 }
3257
3258 void
3259 wm_lan_init_done(struct wm_softc *sc)
3260 {
3261 uint32_t reg = 0;
3262 int i;
3263
3264 /* wait for eeprom to reload */
3265 switch (sc->sc_type) {
3266 case WM_T_ICH10:
3267 case WM_T_PCH:
3268 case WM_T_PCH2:
3269 case WM_T_PCH_LPT:
3270 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3271 reg = CSR_READ(sc, WMREG_STATUS);
3272 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3273 break;
3274 delay(100);
3275 }
3276 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3277 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3278 "complete\n", device_xname(sc->sc_dev), __func__);
3279 }
3280 break;
3281 default:
3282 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3283 __func__);
3284 break;
3285 }
3286
3287 reg &= ~STATUS_LAN_INIT_DONE;
3288 CSR_WRITE(sc, WMREG_STATUS, reg);
3289 }
3290
3291 void
3292 wm_get_cfg_done(struct wm_softc *sc)
3293 {
3294 int mask;
3295 uint32_t reg;
3296 int i;
3297
3298 /* wait for eeprom to reload */
3299 switch (sc->sc_type) {
3300 case WM_T_82542_2_0:
3301 case WM_T_82542_2_1:
3302 /* null */
3303 break;
3304 case WM_T_82543:
3305 case WM_T_82544:
3306 case WM_T_82540:
3307 case WM_T_82545:
3308 case WM_T_82545_3:
3309 case WM_T_82546:
3310 case WM_T_82546_3:
3311 case WM_T_82541:
3312 case WM_T_82541_2:
3313 case WM_T_82547:
3314 case WM_T_82547_2:
3315 case WM_T_82573:
3316 case WM_T_82574:
3317 case WM_T_82583:
3318 /* generic */
3319 delay(10*1000);
3320 break;
3321 case WM_T_80003:
3322 case WM_T_82571:
3323 case WM_T_82572:
3324 case WM_T_82575:
3325 case WM_T_82576:
3326 case WM_T_82580:
3327 case WM_T_I350:
3328 case WM_T_I354:
3329 case WM_T_I210:
3330 case WM_T_I211:
3331 if (sc->sc_type == WM_T_82571) {
3332 /* Only 82571 shares port 0 */
3333 mask = EEMNGCTL_CFGDONE_0;
3334 } else
3335 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3336 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3337 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3338 break;
3339 delay(1000);
3340 }
3341 if (i >= WM_PHY_CFG_TIMEOUT) {
3342 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3343 device_xname(sc->sc_dev), __func__));
3344 }
3345 break;
3346 case WM_T_ICH8:
3347 case WM_T_ICH9:
3348 case WM_T_ICH10:
3349 case WM_T_PCH:
3350 case WM_T_PCH2:
3351 case WM_T_PCH_LPT:
3352 delay(10*1000);
3353 if (sc->sc_type >= WM_T_ICH10)
3354 wm_lan_init_done(sc);
3355 else
3356 wm_get_auto_rd_done(sc);
3357
3358 reg = CSR_READ(sc, WMREG_STATUS);
3359 if ((reg & STATUS_PHYRA) != 0)
3360 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3361 break;
3362 default:
3363 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3364 __func__);
3365 break;
3366 }
3367 }
3368
3369 /* Init hardware bits */
3370 void
3371 wm_initialize_hardware_bits(struct wm_softc *sc)
3372 {
3373 uint32_t tarc0, tarc1, reg;
3374
3375 /* For 82571 variant, 80003 and ICHs */
3376 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3377 || (sc->sc_type >= WM_T_80003)) {
3378
3379 /* Transmit Descriptor Control 0 */
3380 reg = CSR_READ(sc, WMREG_TXDCTL(0));
3381 reg |= TXDCTL_COUNT_DESC;
3382 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3383
3384 /* Transmit Descriptor Control 1 */
3385 reg = CSR_READ(sc, WMREG_TXDCTL(1));
3386 reg |= TXDCTL_COUNT_DESC;
3387 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3388
3389 /* TARC0 */
3390 tarc0 = CSR_READ(sc, WMREG_TARC0);
3391 switch (sc->sc_type) {
3392 case WM_T_82571:
3393 case WM_T_82572:
3394 case WM_T_82573:
3395 case WM_T_82574:
3396 case WM_T_82583:
3397 case WM_T_80003:
3398 /* Clear bits 30..27 */
3399 tarc0 &= ~__BITS(30, 27);
3400 break;
3401 default:
3402 break;
3403 }
3404
3405 switch (sc->sc_type) {
3406 case WM_T_82571:
3407 case WM_T_82572:
3408 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3409
3410 tarc1 = CSR_READ(sc, WMREG_TARC1);
3411 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3412 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3413 /* 8257[12] Errata No.7 */
3414 tarc1 |= __BIT(22); /* TARC1 bits 22 */
3415
3416 /* TARC1 bit 28 */
3417 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3418 tarc1 &= ~__BIT(28);
3419 else
3420 tarc1 |= __BIT(28);
3421 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3422
3423 /*
3424 * 8257[12] Errata No.13
3425 * Disable Dyamic Clock Gating.
3426 */
3427 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3428 reg &= ~CTRL_EXT_DMA_DYN_CLK;
3429 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3430 break;
3431 case WM_T_82573:
3432 case WM_T_82574:
3433 case WM_T_82583:
3434 if ((sc->sc_type == WM_T_82574)
3435 || (sc->sc_type == WM_T_82583))
3436 tarc0 |= __BIT(26); /* TARC0 bit 26 */
3437
3438 /* Extended Device Control */
3439 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3440 reg &= ~__BIT(23); /* Clear bit 23 */
3441 reg |= __BIT(22); /* Set bit 22 */
3442 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3443
3444 /* Device Control */
3445 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
3446 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3447
3448 /* PCIe Control Register */
3449 /*
3450 * 82573 Errata (unknown).
3451 *
3452 * 82574 Errata 25 and 82583 Errata 12
3453 * "Dropped Rx Packets":
3454 * NVM Image Version 2.1.4 and newer has no this bug.
3455 */
3456 reg = CSR_READ(sc, WMREG_GCR);
3457 reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
3458 CSR_WRITE(sc, WMREG_GCR, reg);
3459
3460 if ((sc->sc_type == WM_T_82574)
3461 || (sc->sc_type == WM_T_82583)) {
3462 /*
3463 * Document says this bit must be set for
3464 * proper operation.
3465 */
3466 reg = CSR_READ(sc, WMREG_GCR);
3467 reg |= __BIT(22);
3468 CSR_WRITE(sc, WMREG_GCR, reg);
3469
3470 /*
3471 * Apply workaround for hardware errata
3472 * documented in errata docs Fixes issue where
3473 * some error prone or unreliable PCIe
3474 * completions are occurring, particularly
3475 * with ASPM enabled. Without fix, issue can
3476 * cause Tx timeouts.
3477 */
3478 reg = CSR_READ(sc, WMREG_GCR2);
3479 reg |= __BIT(0);
3480 CSR_WRITE(sc, WMREG_GCR2, reg);
3481 }
3482 break;
3483 case WM_T_80003:
3484 /* TARC0 */
3485 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3486 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3487 tarc0 &= ~__BIT(20); /* Clear bits 20 */
3488
3489 /* TARC1 bit 28 */
3490 tarc1 = CSR_READ(sc, WMREG_TARC1);
3491 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3492 tarc1 &= ~__BIT(28);
3493 else
3494 tarc1 |= __BIT(28);
3495 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3496 break;
3497 case WM_T_ICH8:
3498 case WM_T_ICH9:
3499 case WM_T_ICH10:
3500 case WM_T_PCH:
3501 case WM_T_PCH2:
3502 case WM_T_PCH_LPT:
3503 /* TARC 0 */
3504 if (sc->sc_type == WM_T_ICH8) {
3505 /* Set TARC0 bits 29 and 28 */
3506 tarc0 |= __BITS(29, 28);
3507 }
3508 /* Set TARC0 bits 23,24,26,27 */
3509 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3510
3511 /* CTRL_EXT */
3512 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3513 reg |= __BIT(22); /* Set bit 22 */
3514 /*
3515 * Enable PHY low-power state when MAC is at D3
3516 * w/o WoL
3517 */
3518 if (sc->sc_type >= WM_T_PCH)
3519 reg |= CTRL_EXT_PHYPDEN;
3520 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3521
3522 /* TARC1 */
3523 tarc1 = CSR_READ(sc, WMREG_TARC1);
3524 /* bit 28 */
3525 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3526 tarc1 &= ~__BIT(28);
3527 else
3528 tarc1 |= __BIT(28);
3529 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3530 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3531
3532 /* Device Status */
3533 if (sc->sc_type == WM_T_ICH8) {
3534 reg = CSR_READ(sc, WMREG_STATUS);
3535 reg &= ~__BIT(31);
3536 CSR_WRITE(sc, WMREG_STATUS, reg);
3537
3538 }
3539
3540 /*
3541 * Work-around descriptor data corruption issue during
3542 * NFS v2 UDP traffic, just disable the NFS filtering
3543 * capability.
3544 */
3545 reg = CSR_READ(sc, WMREG_RFCTL);
3546 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3547 CSR_WRITE(sc, WMREG_RFCTL, reg);
3548 break;
3549 default:
3550 break;
3551 }
3552 CSR_WRITE(sc, WMREG_TARC0, tarc0);
3553
3554 /*
3555 * 8257[12] Errata No.52 and some others.
3556 * Avoid RSS Hash Value bug.
3557 */
3558 switch (sc->sc_type) {
3559 case WM_T_82571:
3560 case WM_T_82572:
3561 case WM_T_82573:
3562 case WM_T_80003:
3563 case WM_T_ICH8:
3564 reg = CSR_READ(sc, WMREG_RFCTL);
3565 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3566 CSR_WRITE(sc, WMREG_RFCTL, reg);
3567 break;
3568 default:
3569 break;
3570 }
3571 }
3572 }
3573
3574 static uint32_t
3575 wm_rxpbs_adjust_82580(uint32_t val)
3576 {
3577 uint32_t rv = 0;
3578
3579 if (val < __arraycount(wm_82580_rxpbs_table))
3580 rv = wm_82580_rxpbs_table[val];
3581
3582 return rv;
3583 }
3584
3585 /*
3586 * wm_reset:
3587 *
3588 * Reset the i82542 chip.
3589 */
3590 static void
3591 wm_reset(struct wm_softc *sc)
3592 {
3593 int phy_reset = 0;
3594 int i, error = 0;
3595 uint32_t reg, mask;
3596
3597 /*
3598 * Allocate on-chip memory according to the MTU size.
3599 * The Packet Buffer Allocation register must be written
3600 * before the chip is reset.
3601 */
3602 switch (sc->sc_type) {
3603 case WM_T_82547:
3604 case WM_T_82547_2:
3605 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3606 PBA_22K : PBA_30K;
3607 for (i = 0; i < sc->sc_ntxqueues; i++) {
3608 struct wm_txqueue *txq = &sc->sc_txq[i];
3609 txq->txq_fifo_head = 0;
3610 txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3611 txq->txq_fifo_size =
3612 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3613 txq->txq_fifo_stall = 0;
3614 }
3615 break;
3616 case WM_T_82571:
3617 case WM_T_82572:
3618 case WM_T_82575: /* XXX need special handing for jumbo frames */
3619 case WM_T_80003:
3620 sc->sc_pba = PBA_32K;
3621 break;
3622 case WM_T_82573:
3623 sc->sc_pba = PBA_12K;
3624 break;
3625 case WM_T_82574:
3626 case WM_T_82583:
3627 sc->sc_pba = PBA_20K;
3628 break;
3629 case WM_T_82576:
3630 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
3631 sc->sc_pba &= RXPBS_SIZE_MASK_82576;
3632 break;
3633 case WM_T_82580:
3634 case WM_T_I350:
3635 case WM_T_I354:
3636 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
3637 break;
3638 case WM_T_I210:
3639 case WM_T_I211:
3640 sc->sc_pba = PBA_34K;
3641 break;
3642 case WM_T_ICH8:
3643 /* Workaround for a bit corruption issue in FIFO memory */
3644 sc->sc_pba = PBA_8K;
3645 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3646 break;
3647 case WM_T_ICH9:
3648 case WM_T_ICH10:
3649 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3650 PBA_14K : PBA_10K;
3651 break;
3652 case WM_T_PCH:
3653 case WM_T_PCH2:
3654 case WM_T_PCH_LPT:
3655 sc->sc_pba = PBA_26K;
3656 break;
3657 default:
3658 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3659 PBA_40K : PBA_48K;
3660 break;
3661 }
3662 /*
3663 * Only old or non-multiqueue devices have the PBA register
3664 * XXX Need special handling for 82575.
3665 */
3666 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3667 || (sc->sc_type == WM_T_82575))
3668 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3669
3670 /* Prevent the PCI-E bus from sticking */
3671 if (sc->sc_flags & WM_F_PCIE) {
3672 int timeout = 800;
3673
3674 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3675 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3676
3677 while (timeout--) {
3678 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3679 == 0)
3680 break;
3681 delay(100);
3682 }
3683 }
3684
3685 /* Set the completion timeout for interface */
3686 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3687 || (sc->sc_type == WM_T_82580)
3688 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3689 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3690 wm_set_pcie_completion_timeout(sc);
3691
3692 /* Clear interrupt */
3693 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3694 if (sc->sc_nintrs > 1) {
3695 if (sc->sc_type != WM_T_82574) {
3696 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3697 CSR_WRITE(sc, WMREG_EIAC, 0);
3698 } else {
3699 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3700 }
3701 }
3702
3703 /* Stop the transmit and receive processes. */
3704 CSR_WRITE(sc, WMREG_RCTL, 0);
3705 sc->sc_rctl &= ~RCTL_EN;
3706 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3707 CSR_WRITE_FLUSH(sc);
3708
3709 /* XXX set_tbi_sbp_82543() */
3710
3711 delay(10*1000);
3712
3713 /* Must acquire the MDIO ownership before MAC reset */
3714 switch (sc->sc_type) {
3715 case WM_T_82573:
3716 case WM_T_82574:
3717 case WM_T_82583:
3718 error = wm_get_hw_semaphore_82573(sc);
3719 break;
3720 default:
3721 break;
3722 }
3723
3724 /*
3725 * 82541 Errata 29? & 82547 Errata 28?
3726 * See also the description about PHY_RST bit in CTRL register
3727 * in 8254x_GBe_SDM.pdf.
3728 */
3729 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3730 CSR_WRITE(sc, WMREG_CTRL,
3731 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3732 CSR_WRITE_FLUSH(sc);
3733 delay(5000);
3734 }
3735
3736 switch (sc->sc_type) {
3737 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3738 case WM_T_82541:
3739 case WM_T_82541_2:
3740 case WM_T_82547:
3741 case WM_T_82547_2:
3742 /*
3743 * On some chipsets, a reset through a memory-mapped write
3744 * cycle can cause the chip to reset before completing the
3745 * write cycle. This causes major headache that can be
3746 * avoided by issuing the reset via indirect register writes
3747 * through I/O space.
3748 *
3749 * So, if we successfully mapped the I/O BAR at attach time,
3750 * use that. Otherwise, try our luck with a memory-mapped
3751 * reset.
3752 */
3753 if (sc->sc_flags & WM_F_IOH_VALID)
3754 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3755 else
3756 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3757 break;
3758 case WM_T_82545_3:
3759 case WM_T_82546_3:
3760 /* Use the shadow control register on these chips. */
3761 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3762 break;
3763 case WM_T_80003:
3764 mask = swfwphysem[sc->sc_funcid];
3765 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3766 wm_get_swfw_semaphore(sc, mask);
3767 CSR_WRITE(sc, WMREG_CTRL, reg);
3768 wm_put_swfw_semaphore(sc, mask);
3769 break;
3770 case WM_T_ICH8:
3771 case WM_T_ICH9:
3772 case WM_T_ICH10:
3773 case WM_T_PCH:
3774 case WM_T_PCH2:
3775 case WM_T_PCH_LPT:
3776 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3777 if (wm_check_reset_block(sc) == 0) {
3778 /*
3779 * Gate automatic PHY configuration by hardware on
3780 * non-managed 82579
3781 */
3782 if ((sc->sc_type == WM_T_PCH2)
3783 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3784 != 0))
3785 wm_gate_hw_phy_config_ich8lan(sc, 1);
3786
3787
3788 reg |= CTRL_PHY_RESET;
3789 phy_reset = 1;
3790 }
3791 wm_get_swfwhw_semaphore(sc);
3792 CSR_WRITE(sc, WMREG_CTRL, reg);
3793 /* Don't insert a completion barrier when reset */
3794 delay(20*1000);
3795 wm_put_swfwhw_semaphore(sc);
3796 break;
3797 case WM_T_82580:
3798 case WM_T_I350:
3799 case WM_T_I354:
3800 case WM_T_I210:
3801 case WM_T_I211:
3802 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3803 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3804 CSR_WRITE_FLUSH(sc);
3805 delay(5000);
3806 break;
3807 case WM_T_82542_2_0:
3808 case WM_T_82542_2_1:
3809 case WM_T_82543:
3810 case WM_T_82540:
3811 case WM_T_82545:
3812 case WM_T_82546:
3813 case WM_T_82571:
3814 case WM_T_82572:
3815 case WM_T_82573:
3816 case WM_T_82574:
3817 case WM_T_82575:
3818 case WM_T_82576:
3819 case WM_T_82583:
3820 default:
3821 /* Everything else can safely use the documented method. */
3822 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3823 break;
3824 }
3825
3826 /* Must release the MDIO ownership after MAC reset */
3827 switch (sc->sc_type) {
3828 case WM_T_82573:
3829 case WM_T_82574:
3830 case WM_T_82583:
3831 if (error == 0)
3832 wm_put_hw_semaphore_82573(sc);
3833 break;
3834 default:
3835 break;
3836 }
3837
3838 if (phy_reset != 0)
3839 wm_get_cfg_done(sc);
3840
3841 /* reload EEPROM */
3842 switch (sc->sc_type) {
3843 case WM_T_82542_2_0:
3844 case WM_T_82542_2_1:
3845 case WM_T_82543:
3846 case WM_T_82544:
3847 delay(10);
3848 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3849 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3850 CSR_WRITE_FLUSH(sc);
3851 delay(2000);
3852 break;
3853 case WM_T_82540:
3854 case WM_T_82545:
3855 case WM_T_82545_3:
3856 case WM_T_82546:
3857 case WM_T_82546_3:
3858 delay(5*1000);
3859 /* XXX Disable HW ARPs on ASF enabled adapters */
3860 break;
3861 case WM_T_82541:
3862 case WM_T_82541_2:
3863 case WM_T_82547:
3864 case WM_T_82547_2:
3865 delay(20000);
3866 /* XXX Disable HW ARPs on ASF enabled adapters */
3867 break;
3868 case WM_T_82571:
3869 case WM_T_82572:
3870 case WM_T_82573:
3871 case WM_T_82574:
3872 case WM_T_82583:
3873 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3874 delay(10);
3875 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3876 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3877 CSR_WRITE_FLUSH(sc);
3878 }
3879 /* check EECD_EE_AUTORD */
3880 wm_get_auto_rd_done(sc);
3881 /*
3882 * Phy configuration from NVM just starts after EECD_AUTO_RD
3883 * is set.
3884 */
3885 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3886 || (sc->sc_type == WM_T_82583))
3887 delay(25*1000);
3888 break;
3889 case WM_T_82575:
3890 case WM_T_82576:
3891 case WM_T_82580:
3892 case WM_T_I350:
3893 case WM_T_I354:
3894 case WM_T_I210:
3895 case WM_T_I211:
3896 case WM_T_80003:
3897 /* check EECD_EE_AUTORD */
3898 wm_get_auto_rd_done(sc);
3899 break;
3900 case WM_T_ICH8:
3901 case WM_T_ICH9:
3902 case WM_T_ICH10:
3903 case WM_T_PCH:
3904 case WM_T_PCH2:
3905 case WM_T_PCH_LPT:
3906 break;
3907 default:
3908 panic("%s: unknown type\n", __func__);
3909 }
3910
3911 /* Check whether EEPROM is present or not */
3912 switch (sc->sc_type) {
3913 case WM_T_82575:
3914 case WM_T_82576:
3915 case WM_T_82580:
3916 case WM_T_I350:
3917 case WM_T_I354:
3918 case WM_T_ICH8:
3919 case WM_T_ICH9:
3920 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3921 /* Not found */
3922 sc->sc_flags |= WM_F_EEPROM_INVALID;
3923 if (sc->sc_type == WM_T_82575)
3924 wm_reset_init_script_82575(sc);
3925 }
3926 break;
3927 default:
3928 break;
3929 }
3930
3931 if ((sc->sc_type == WM_T_82580)
3932 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
3933 /* clear global device reset status bit */
3934 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3935 }
3936
3937 /* Clear any pending interrupt events. */
3938 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3939 reg = CSR_READ(sc, WMREG_ICR);
3940 if (sc->sc_nintrs > 1) {
3941 if (sc->sc_type != WM_T_82574) {
3942 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3943 CSR_WRITE(sc, WMREG_EIAC, 0);
3944 } else
3945 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3946 }
3947
3948 /* reload sc_ctrl */
3949 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3950
3951 if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
3952 wm_set_eee_i350(sc);
3953
3954 /* dummy read from WUC */
3955 if (sc->sc_type == WM_T_PCH)
3956 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3957 /*
3958 * For PCH, this write will make sure that any noise will be detected
3959 * as a CRC error and be dropped rather than show up as a bad packet
3960 * to the DMA engine
3961 */
3962 if (sc->sc_type == WM_T_PCH)
3963 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3964
3965 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3966 CSR_WRITE(sc, WMREG_WUC, 0);
3967
3968 wm_reset_mdicnfg_82580(sc);
3969
3970 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
3971 wm_pll_workaround_i210(sc);
3972 }
3973
3974 /*
3975 * wm_add_rxbuf:
3976 *
3977 * Add a receive buffer to the indiciated descriptor.
3978 */
3979 static int
3980 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
3981 {
3982 struct wm_softc *sc = rxq->rxq_sc;
3983 struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
3984 struct mbuf *m;
3985 int error;
3986
3987 KASSERT(WM_RX_LOCKED(rxq));
3988
3989 MGETHDR(m, M_DONTWAIT, MT_DATA);
3990 if (m == NULL)
3991 return ENOBUFS;
3992
3993 MCLGET(m, M_DONTWAIT);
3994 if ((m->m_flags & M_EXT) == 0) {
3995 m_freem(m);
3996 return ENOBUFS;
3997 }
3998
3999 if (rxs->rxs_mbuf != NULL)
4000 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4001
4002 rxs->rxs_mbuf = m;
4003
4004 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4005 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4006 BUS_DMA_READ|BUS_DMA_NOWAIT);
4007 if (error) {
4008 /* XXX XXX XXX */
4009 aprint_error_dev(sc->sc_dev,
4010 "unable to load rx DMA map %d, error = %d\n",
4011 idx, error);
4012 panic("wm_add_rxbuf");
4013 }
4014
4015 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4016 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4017
4018 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4019 if ((sc->sc_rctl & RCTL_EN) != 0)
4020 wm_init_rxdesc(rxq, idx);
4021 } else
4022 wm_init_rxdesc(rxq, idx);
4023
4024 return 0;
4025 }
4026
4027 /*
4028 * wm_rxdrain:
4029 *
4030 * Drain the receive queue.
4031 */
4032 static void
4033 wm_rxdrain(struct wm_rxqueue *rxq)
4034 {
4035 struct wm_softc *sc = rxq->rxq_sc;
4036 struct wm_rxsoft *rxs;
4037 int i;
4038
4039 KASSERT(WM_RX_LOCKED(rxq));
4040
4041 for (i = 0; i < WM_NRXDESC; i++) {
4042 rxs = &rxq->rxq_soft[i];
4043 if (rxs->rxs_mbuf != NULL) {
4044 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4045 m_freem(rxs->rxs_mbuf);
4046 rxs->rxs_mbuf = NULL;
4047 }
4048 }
4049 }
4050
4051
4052 /*
4053 * XXX copy from FreeBSD's sys/net/rss_config.c
4054 */
4055 /*
4056 * RSS secret key, intended to prevent attacks on load-balancing. Its
4057 * effectiveness may be limited by algorithm choice and available entropy
4058 * during the boot.
4059 *
4060 * XXXRW: And that we don't randomize it yet!
4061 *
4062 * This is the default Microsoft RSS specification key which is also
4063 * the Chelsio T5 firmware default key.
4064 */
4065 #define RSS_KEYSIZE 40
4066 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
4067 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
4068 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
4069 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
4070 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
4071 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
4072 };
4073
4074 /*
4075 * Caller must pass an array of size sizeof(rss_key).
4076 *
4077 * XXX
4078 * As if_ixgbe may use this function, this function should not be
4079 * if_wm specific function.
4080 */
4081 static void
4082 wm_rss_getkey(uint8_t *key)
4083 {
4084
4085 memcpy(key, wm_rss_key, sizeof(wm_rss_key));
4086 }
4087
4088 /*
4089 * Setup registers for RSS.
4090 *
4091 * XXX not yet VMDq support
4092 */
4093 static void
4094 wm_init_rss(struct wm_softc *sc)
4095 {
4096 uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
4097 int i;
4098
4099 CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
4100
4101 for (i = 0; i < RETA_NUM_ENTRIES; i++) {
4102 int qid, reta_ent;
4103
4104 qid = i % sc->sc_nrxqueues;
4105 switch(sc->sc_type) {
4106 case WM_T_82574:
4107 reta_ent = __SHIFTIN(qid,
4108 RETA_ENT_QINDEX_MASK_82574);
4109 break;
4110 case WM_T_82575:
4111 reta_ent = __SHIFTIN(qid,
4112 RETA_ENT_QINDEX1_MASK_82575);
4113 break;
4114 default:
4115 reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
4116 break;
4117 }
4118
4119 reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
4120 reta_reg &= ~RETA_ENTRY_MASK_Q(i);
4121 reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
4122 CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
4123 }
4124
4125 wm_rss_getkey((uint8_t *)rss_key);
4126 for (i = 0; i < RSSRK_NUM_REGS; i++)
4127 CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
4128
4129 if (sc->sc_type == WM_T_82574)
4130 mrqc = MRQC_ENABLE_RSS_MQ_82574;
4131 else
4132 mrqc = MRQC_ENABLE_RSS_MQ;
4133
4134 /* XXXX
4135 * The same as FreeBSD igb.
4136 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
4137 */
4138 mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
4139 mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
4140 mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
4141 mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
4142
4143 CSR_WRITE(sc, WMREG_MRQC, mrqc);
4144 }
4145
4146 #ifdef WM_MSI_MSIX
4147
4148 /*
4149 * Adjust TX and RX queue numbers which the system actulally uses.
4150 *
4151 * The numbers are affected by below parameters.
4152 * - The nubmer of hardware queues
4153 * - The number of MSI-X vectors (= "nvectors" argument)
4154 * - ncpu
4155 */
4156 static void
4157 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
4158 {
4159 int hw_ntxqueues, hw_nrxqueues;
4160
4161 if (nvectors < 3) {
4162 sc->sc_ntxqueues = 1;
4163 sc->sc_nrxqueues = 1;
4164 return;
4165 }
4166
4167 switch(sc->sc_type) {
4168 case WM_T_82572:
4169 hw_ntxqueues = 2;
4170 hw_nrxqueues = 2;
4171 break;
4172 case WM_T_82574:
4173 hw_ntxqueues = 2;
4174 hw_nrxqueues = 2;
4175 break;
4176 case WM_T_82575:
4177 hw_ntxqueues = 4;
4178 hw_nrxqueues = 4;
4179 break;
4180 case WM_T_82576:
4181 hw_ntxqueues = 16;
4182 hw_nrxqueues = 16;
4183 break;
4184 case WM_T_82580:
4185 case WM_T_I350:
4186 case WM_T_I354:
4187 hw_ntxqueues = 8;
4188 hw_nrxqueues = 8;
4189 break;
4190 case WM_T_I210:
4191 hw_ntxqueues = 4;
4192 hw_nrxqueues = 4;
4193 break;
4194 case WM_T_I211:
4195 hw_ntxqueues = 2;
4196 hw_nrxqueues = 2;
4197 break;
4198 /*
4199 * As below ethernet controllers does not support MSI-X,
4200 * this driver let them not use multiqueue.
4201 * - WM_T_80003
4202 * - WM_T_ICH8
4203 * - WM_T_ICH9
4204 * - WM_T_ICH10
4205 * - WM_T_PCH
4206 * - WM_T_PCH2
4207 * - WM_T_PCH_LPT
4208 */
4209 default:
4210 hw_ntxqueues = 1;
4211 hw_nrxqueues = 1;
4212 break;
4213 }
4214
4215 /*
4216 * As queues more then MSI-X vectors cannot improve scaling, we limit
4217 * the number of queues used actually.
4218 *
4219 * XXX
4220 * Currently, we separate TX queue interrupts and RX queue interrupts.
4221 * Howerver, the number of MSI-X vectors of recent controllers (such as
4222 * I354) expects that drivers bundle a TX queue interrupt and a RX
4223 * interrupt to one interrupt. e.g. FreeBSD's igb deals interrupts in
4224 * such a way.
4225 */
4226 if (nvectors < hw_ntxqueues + hw_nrxqueues + 1) {
4227 sc->sc_ntxqueues = (nvectors - 1) / 2;
4228 sc->sc_nrxqueues = (nvectors - 1) / 2;
4229 } else {
4230 sc->sc_ntxqueues = hw_ntxqueues;
4231 sc->sc_nrxqueues = hw_nrxqueues;
4232 }
4233
4234 /*
4235 * As queues more then cpus cannot improve scaling, we limit
4236 * the number of queues used actually.
4237 */
4238 if (ncpu < sc->sc_ntxqueues)
4239 sc->sc_ntxqueues = ncpu;
4240 if (ncpu < sc->sc_nrxqueues)
4241 sc->sc_nrxqueues = ncpu;
4242
4243 /* XXX Currently, this driver supports RX multiqueue only. */
4244 sc->sc_ntxqueues = 1;
4245 }
4246
4247 /*
4248 * Both single interrupt MSI and INTx can use this function.
4249 */
4250 static int
4251 wm_setup_legacy(struct wm_softc *sc)
4252 {
4253 pci_chipset_tag_t pc = sc->sc_pc;
4254 const char *intrstr = NULL;
4255 char intrbuf[PCI_INTRSTR_LEN];
4256
4257 intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
4258 sizeof(intrbuf));
4259 #ifdef WM_MPSAFE
4260 pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
4261 #endif
4262 sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
4263 IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
4264 if (sc->sc_ihs[0] == NULL) {
4265 aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
4266 (pci_intr_type(sc->sc_intrs[0])
4267 == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
4268 return ENOMEM;
4269 }
4270
4271 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
4272 sc->sc_nintrs = 1;
4273 return 0;
4274 }
4275
4276 static int
4277 wm_setup_msix(struct wm_softc *sc)
4278 {
4279 void *vih;
4280 kcpuset_t *affinity;
4281 int qidx, error, intr_idx, tx_established, rx_established;
4282 pci_chipset_tag_t pc = sc->sc_pc;
4283 const char *intrstr = NULL;
4284 char intrbuf[PCI_INTRSTR_LEN];
4285 char intr_xname[INTRDEVNAMEBUF];
4286
4287 kcpuset_create(&affinity, false);
4288 intr_idx = 0;
4289
4290 /*
4291 * TX
4292 */
4293 tx_established = 0;
4294 for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
4295 struct wm_txqueue *txq = &sc->sc_txq[qidx];
4296
4297 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4298 sizeof(intrbuf));
4299 #ifdef WM_MPSAFE
4300 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
4301 PCI_INTR_MPSAFE, true);
4302 #endif
4303 memset(intr_xname, 0, sizeof(intr_xname));
4304 snprintf(intr_xname, sizeof(intr_xname), "%sTX%d",
4305 device_xname(sc->sc_dev), qidx);
4306 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4307 IPL_NET, wm_txintr_msix, txq, intr_xname);
4308 if (vih == NULL) {
4309 aprint_error_dev(sc->sc_dev,
4310 "unable to establish MSI-X(for TX)%s%s\n",
4311 intrstr ? " at " : "",
4312 intrstr ? intrstr : "");
4313
4314 goto fail_0;
4315 }
4316 kcpuset_zero(affinity);
4317 /* Round-robin affinity */
4318 kcpuset_set(affinity, intr_idx % ncpu);
4319 error = interrupt_distribute(vih, affinity, NULL);
4320 if (error == 0) {
4321 aprint_normal_dev(sc->sc_dev,
4322 "for TX interrupting at %s affinity to %u\n",
4323 intrstr, intr_idx % ncpu);
4324 } else {
4325 aprint_normal_dev(sc->sc_dev,
4326 "for TX interrupting at %s\n", intrstr);
4327 }
4328 sc->sc_ihs[intr_idx] = vih;
4329 txq->txq_id = qidx;
4330 txq->txq_intr_idx = intr_idx;
4331
4332 tx_established++;
4333 intr_idx++;
4334 }
4335
4336 /*
4337 * RX
4338 */
4339 rx_established = 0;
4340 for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
4341 struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
4342
4343 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4344 sizeof(intrbuf));
4345 #ifdef WM_MPSAFE
4346 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
4347 PCI_INTR_MPSAFE, true);
4348 #endif
4349 memset(intr_xname, 0, sizeof(intr_xname));
4350 snprintf(intr_xname, sizeof(intr_xname), "%sRX%d",
4351 device_xname(sc->sc_dev), qidx);
4352 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4353 IPL_NET, wm_rxintr_msix, rxq, intr_xname);
4354 if (vih == NULL) {
4355 aprint_error_dev(sc->sc_dev,
4356 "unable to establish MSI-X(for RX)%s%s\n",
4357 intrstr ? " at " : "",
4358 intrstr ? intrstr : "");
4359
4360 goto fail_1;
4361 }
4362 kcpuset_zero(affinity);
4363 /* Round-robin affinity */
4364 kcpuset_set(affinity, intr_idx % ncpu);
4365 error = interrupt_distribute(vih, affinity, NULL);
4366 if (error == 0) {
4367 aprint_normal_dev(sc->sc_dev,
4368 "for RX interrupting at %s affinity to %u\n",
4369 intrstr, intr_idx % ncpu);
4370 } else {
4371 aprint_normal_dev(sc->sc_dev,
4372 "for RX interrupting at %s\n", intrstr);
4373 }
4374 sc->sc_ihs[intr_idx] = vih;
4375 rxq->rxq_id = qidx;
4376 rxq->rxq_intr_idx = intr_idx;
4377
4378 rx_established++;
4379 intr_idx++;
4380 }
4381
4382 /*
4383 * LINK
4384 */
4385 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4386 sizeof(intrbuf));
4387 #ifdef WM_MPSAFE
4388 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
4389 PCI_INTR_MPSAFE, true);
4390 #endif
4391 memset(intr_xname, 0, sizeof(intr_xname));
4392 snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
4393 device_xname(sc->sc_dev));
4394 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4395 IPL_NET, wm_linkintr_msix, sc, intr_xname);
4396 if (vih == NULL) {
4397 aprint_error_dev(sc->sc_dev,
4398 "unable to establish MSI-X(for LINK)%s%s\n",
4399 intrstr ? " at " : "",
4400 intrstr ? intrstr : "");
4401
4402 goto fail_1;
4403 }
4404 /* keep default affinity to LINK interrupt */
4405 aprint_normal_dev(sc->sc_dev,
4406 "for LINK interrupting at %s\n", intrstr);
4407 sc->sc_ihs[intr_idx] = vih;
4408 sc->sc_link_intr_idx = intr_idx;
4409
4410 sc->sc_nintrs = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
4411 kcpuset_destroy(affinity);
4412 return 0;
4413
4414 fail_1:
4415 for (qidx = 0; qidx < rx_established; qidx++) {
4416 struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
4417 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[rxq->rxq_intr_idx]);
4418 sc->sc_ihs[rxq->rxq_intr_idx] = NULL;
4419 }
4420 fail_0:
4421 for (qidx = 0; qidx < tx_established; qidx++) {
4422 struct wm_txqueue *txq = &sc->sc_txq[qidx];
4423 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[txq->txq_intr_idx]);
4424 sc->sc_ihs[txq->txq_intr_idx] = NULL;
4425 }
4426
4427 kcpuset_destroy(affinity);
4428 return ENOMEM;
4429 }
4430 #endif
4431
4432 /*
4433 * wm_init: [ifnet interface function]
4434 *
4435 * Initialize the interface.
4436 */
4437 static int
4438 wm_init(struct ifnet *ifp)
4439 {
4440 struct wm_softc *sc = ifp->if_softc;
4441 int ret;
4442
4443 WM_CORE_LOCK(sc);
4444 ret = wm_init_locked(ifp);
4445 WM_CORE_UNLOCK(sc);
4446
4447 return ret;
4448 }
4449
4450 static int
4451 wm_init_locked(struct ifnet *ifp)
4452 {
4453 struct wm_softc *sc = ifp->if_softc;
4454 int i, j, trynum, error = 0;
4455 uint32_t reg;
4456
4457 KASSERT(WM_CORE_LOCKED(sc));
4458 /*
4459 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4460 * There is a small but measurable benefit to avoiding the adjusment
4461 * of the descriptor so that the headers are aligned, for normal mtu,
4462 * on such platforms. One possibility is that the DMA itself is
4463 * slightly more efficient if the front of the entire packet (instead
4464 * of the front of the headers) is aligned.
4465 *
4466 * Note we must always set align_tweak to 0 if we are using
4467 * jumbo frames.
4468 */
4469 #ifdef __NO_STRICT_ALIGNMENT
4470 sc->sc_align_tweak = 0;
4471 #else
4472 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4473 sc->sc_align_tweak = 0;
4474 else
4475 sc->sc_align_tweak = 2;
4476 #endif /* __NO_STRICT_ALIGNMENT */
4477
4478 /* Cancel any pending I/O. */
4479 wm_stop_locked(ifp, 0);
4480
4481 /* update statistics before reset */
4482 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4483 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4484
4485 /* Reset the chip to a known state. */
4486 wm_reset(sc);
4487
4488 switch (sc->sc_type) {
4489 case WM_T_82571:
4490 case WM_T_82572:
4491 case WM_T_82573:
4492 case WM_T_82574:
4493 case WM_T_82583:
4494 case WM_T_80003:
4495 case WM_T_ICH8:
4496 case WM_T_ICH9:
4497 case WM_T_ICH10:
4498 case WM_T_PCH:
4499 case WM_T_PCH2:
4500 case WM_T_PCH_LPT:
4501 if (wm_check_mng_mode(sc) != 0)
4502 wm_get_hw_control(sc);
4503 break;
4504 default:
4505 break;
4506 }
4507
4508 /* Init hardware bits */
4509 wm_initialize_hardware_bits(sc);
4510
4511 /* Reset the PHY. */
4512 if (sc->sc_flags & WM_F_HAS_MII)
4513 wm_gmii_reset(sc);
4514
4515 /* Calculate (E)ITR value */
4516 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4517 sc->sc_itr = 450; /* For EITR */
4518 } else if (sc->sc_type >= WM_T_82543) {
4519 /*
4520 * Set up the interrupt throttling register (units of 256ns)
4521 * Note that a footnote in Intel's documentation says this
4522 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4523 * or 10Mbit mode. Empirically, it appears to be the case
4524 * that that is also true for the 1024ns units of the other
4525 * interrupt-related timer registers -- so, really, we ought
4526 * to divide this value by 4 when the link speed is low.
4527 *
4528 * XXX implement this division at link speed change!
4529 */
4530
4531 /*
4532 * For N interrupts/sec, set this value to:
4533 * 1000000000 / (N * 256). Note that we set the
4534 * absolute and packet timer values to this value
4535 * divided by 4 to get "simple timer" behavior.
4536 */
4537
4538 sc->sc_itr = 1500; /* 2604 ints/sec */
4539 }
4540
4541 error = wm_init_txrx_queues(sc);
4542 if (error)
4543 goto out;
4544
4545 /*
4546 * Clear out the VLAN table -- we don't use it (yet).
4547 */
4548 CSR_WRITE(sc, WMREG_VET, 0);
4549 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4550 trynum = 10; /* Due to hw errata */
4551 else
4552 trynum = 1;
4553 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4554 for (j = 0; j < trynum; j++)
4555 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4556
4557 /*
4558 * Set up flow-control parameters.
4559 *
4560 * XXX Values could probably stand some tuning.
4561 */
4562 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4563 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4564 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
4565 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4566 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4567 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4568 }
4569
4570 sc->sc_fcrtl = FCRTL_DFLT;
4571 if (sc->sc_type < WM_T_82543) {
4572 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4573 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4574 } else {
4575 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4576 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4577 }
4578
4579 if (sc->sc_type == WM_T_80003)
4580 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4581 else
4582 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4583
4584 /* Writes the control register. */
4585 wm_set_vlan(sc);
4586
4587 if (sc->sc_flags & WM_F_HAS_MII) {
4588 int val;
4589
4590 switch (sc->sc_type) {
4591 case WM_T_80003:
4592 case WM_T_ICH8:
4593 case WM_T_ICH9:
4594 case WM_T_ICH10:
4595 case WM_T_PCH:
4596 case WM_T_PCH2:
4597 case WM_T_PCH_LPT:
4598 /*
4599 * Set the mac to wait the maximum time between each
4600 * iteration and increase the max iterations when
4601 * polling the phy; this fixes erroneous timeouts at
4602 * 10Mbps.
4603 */
4604 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4605 0xFFFF);
4606 val = wm_kmrn_readreg(sc,
4607 KUMCTRLSTA_OFFSET_INB_PARAM);
4608 val |= 0x3F;
4609 wm_kmrn_writereg(sc,
4610 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4611 break;
4612 default:
4613 break;
4614 }
4615
4616 if (sc->sc_type == WM_T_80003) {
4617 val = CSR_READ(sc, WMREG_CTRL_EXT);
4618 val &= ~CTRL_EXT_LINK_MODE_MASK;
4619 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4620
4621 /* Bypass RX and TX FIFO's */
4622 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4623 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4624 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4625 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4626 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4627 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4628 }
4629 }
4630 #if 0
4631 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4632 #endif
4633
4634 /* Set up checksum offload parameters. */
4635 reg = CSR_READ(sc, WMREG_RXCSUM);
4636 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4637 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4638 reg |= RXCSUM_IPOFL;
4639 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4640 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4641 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4642 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4643 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4644
4645 /* Set up MSI-X */
4646 if (sc->sc_nintrs > 1) {
4647 uint32_t ivar;
4648
4649 if (sc->sc_type == WM_T_82575) {
4650 /* Interrupt control */
4651 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4652 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
4653 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4654
4655 /* TX */
4656 for (i = 0; i < sc->sc_ntxqueues; i++) {
4657 struct wm_txqueue *txq = &sc->sc_txq[i];
4658 CSR_WRITE(sc, WMREG_MSIXBM(txq->txq_intr_idx),
4659 EITR_TX_QUEUE(txq->txq_id));
4660 }
4661 /* RX */
4662 for (i = 0; i < sc->sc_nrxqueues; i++) {
4663 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
4664 CSR_WRITE(sc, WMREG_MSIXBM(rxq->rxq_intr_idx),
4665 EITR_RX_QUEUE(rxq->rxq_id));
4666 }
4667 /* Link status */
4668 CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
4669 EITR_OTHER);
4670 } else if (sc->sc_type == WM_T_82574) {
4671 /* Interrupt control */
4672 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4673 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
4674 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4675
4676 ivar = 0;
4677 /* TX */
4678 for (i = 0; i < sc->sc_ntxqueues; i++) {
4679 struct wm_txqueue *txq = &sc->sc_txq[i];
4680 ivar |= __SHIFTIN((IVAR_VALID_82574|txq->txq_intr_idx),
4681 IVAR_TX_MASK_Q_82574(txq->txq_id));
4682 }
4683 /* RX */
4684 for (i = 0; i < sc->sc_nrxqueues; i++) {
4685 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
4686 ivar |= __SHIFTIN((IVAR_VALID_82574|rxq->rxq_intr_idx),
4687 IVAR_RX_MASK_Q_82574(rxq->rxq_id));
4688 }
4689 /* Link status */
4690 ivar |= __SHIFTIN((IVAR_VALID_82574|sc->sc_link_intr_idx),
4691 IVAR_OTHER_MASK);
4692 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
4693 } else {
4694 /* Interrupt control */
4695 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR
4696 | GPIE_MULTI_MSIX | GPIE_EIAME
4697 | GPIE_PBA);
4698
4699 switch (sc->sc_type) {
4700 case WM_T_82580:
4701 case WM_T_I350:
4702 case WM_T_I354:
4703 case WM_T_I210:
4704 case WM_T_I211:
4705 /* TX */
4706 for (i = 0; i < sc->sc_ntxqueues; i++) {
4707 struct wm_txqueue *txq = &sc->sc_txq[i];
4708 int qid = txq->txq_id;
4709 ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
4710 ivar &= ~IVAR_TX_MASK_Q(qid);
4711 ivar |= __SHIFTIN(
4712 (txq->txq_intr_idx | IVAR_VALID),
4713 IVAR_TX_MASK_Q(qid));
4714 CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
4715 }
4716
4717 /* RX */
4718 for (i = 0; i < sc->sc_nrxqueues; i++) {
4719 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
4720 int qid = rxq->rxq_id;
4721 ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
4722 ivar &= ~IVAR_RX_MASK_Q(qid);
4723 ivar |= __SHIFTIN(
4724 (rxq->rxq_intr_idx | IVAR_VALID),
4725 IVAR_RX_MASK_Q(qid));
4726 CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
4727 }
4728 break;
4729 case WM_T_82576:
4730 /* TX */
4731 for (i = 0; i < sc->sc_ntxqueues; i++) {
4732 struct wm_txqueue *txq = &sc->sc_txq[i];
4733 int qid = txq->txq_id;
4734 ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(qid));
4735 ivar &= ~IVAR_TX_MASK_Q_82576(qid);
4736 ivar |= __SHIFTIN(
4737 (txq->txq_intr_idx | IVAR_VALID),
4738 IVAR_TX_MASK_Q_82576(qid));
4739 CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid), ivar);
4740 }
4741
4742 /* RX */
4743 for (i = 0; i < sc->sc_nrxqueues; i++) {
4744 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
4745 int qid = rxq->rxq_id;
4746 ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(qid));
4747 ivar &= ~IVAR_RX_MASK_Q_82576(qid);
4748 ivar |= __SHIFTIN(
4749 (rxq->rxq_intr_idx | IVAR_VALID),
4750 IVAR_RX_MASK_Q_82576(qid));
4751 CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid), ivar);
4752 }
4753 break;
4754 default:
4755 break;
4756 }
4757
4758 /* Link status */
4759 ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
4760 IVAR_MISC_OTHER);
4761 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
4762 }
4763
4764 if (sc->sc_nrxqueues > 1) {
4765 wm_init_rss(sc);
4766
4767 /*
4768 ** NOTE: Receive Full-Packet Checksum Offload
4769 ** is mutually exclusive with Multiqueue. However
4770 ** this is not the same as TCP/IP checksums which
4771 ** still work.
4772 */
4773 reg = CSR_READ(sc, WMREG_RXCSUM);
4774 reg |= RXCSUM_PCSD;
4775 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4776 }
4777 }
4778
4779 /* Set up the interrupt registers. */
4780 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4781 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4782 ICR_RXO | ICR_RXT0;
4783 if (sc->sc_nintrs > 1) {
4784 uint32_t mask;
4785 switch (sc->sc_type) {
4786 case WM_T_82574:
4787 CSR_WRITE(sc, WMREG_EIAC_82574,
4788 WMREG_EIAC_82574_MSIX_MASK);
4789 sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
4790 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4791 break;
4792 default:
4793 if (sc->sc_type == WM_T_82575) {
4794 mask = 0;
4795 for (i = 0; i < sc->sc_ntxqueues; i++) {
4796 struct wm_txqueue *txq = &sc->sc_txq[i];
4797 mask |= EITR_TX_QUEUE(txq->txq_id);
4798 }
4799 for (i = 0; i < sc->sc_nrxqueues; i++) {
4800 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
4801 mask |= EITR_RX_QUEUE(rxq->rxq_id);
4802 }
4803 mask |= EITR_OTHER;
4804 } else {
4805 mask = 0;
4806 for (i = 0; i < sc->sc_ntxqueues; i++) {
4807 struct wm_txqueue *txq = &sc->sc_txq[i];
4808 mask |= 1 << txq->txq_intr_idx;
4809 }
4810 for (i = 0; i < sc->sc_nrxqueues; i++) {
4811 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
4812 mask |= 1 << rxq->rxq_intr_idx;
4813 }
4814 mask |= 1 << sc->sc_link_intr_idx;
4815 }
4816 CSR_WRITE(sc, WMREG_EIAC, mask);
4817 CSR_WRITE(sc, WMREG_EIAM, mask);
4818 CSR_WRITE(sc, WMREG_EIMS, mask);
4819 CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
4820 break;
4821 }
4822 } else
4823 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4824
4825 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4826 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4827 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4828 reg = CSR_READ(sc, WMREG_KABGTXD);
4829 reg |= KABGTXD_BGSQLBIAS;
4830 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4831 }
4832
4833 /* Set up the inter-packet gap. */
4834 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4835
4836 if (sc->sc_type >= WM_T_82543) {
4837 /*
4838 * XXX 82574 has both ITR and EITR. SET EITR when we use
4839 * the multi queue function with MSI-X.
4840 */
4841 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4842 int qidx;
4843 for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
4844 struct wm_txqueue *txq = &sc->sc_txq[qidx];
4845 CSR_WRITE(sc, WMREG_EITR(txq->txq_intr_idx),
4846 sc->sc_itr);
4847 }
4848 for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
4849 struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
4850 CSR_WRITE(sc, WMREG_EITR(rxq->rxq_intr_idx),
4851 sc->sc_itr);
4852 }
4853 /*
4854 * Link interrupts occur much less than TX
4855 * interrupts and RX interrupts. So, we don't
4856 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
4857 * FreeBSD's if_igb.
4858 */
4859 } else
4860 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4861 }
4862
4863 /* Set the VLAN ethernetype. */
4864 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4865
4866 /*
4867 * Set up the transmit control register; we start out with
4868 * a collision distance suitable for FDX, but update it whe
4869 * we resolve the media type.
4870 */
4871 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4872 | TCTL_CT(TX_COLLISION_THRESHOLD)
4873 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4874 if (sc->sc_type >= WM_T_82571)
4875 sc->sc_tctl |= TCTL_MULR;
4876 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4877
4878 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4879 /* Write TDT after TCTL.EN is set. See the document. */
4880 CSR_WRITE(sc, WMREG_TDT(0), 0);
4881 }
4882
4883 if (sc->sc_type == WM_T_80003) {
4884 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4885 reg &= ~TCTL_EXT_GCEX_MASK;
4886 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4887 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4888 }
4889
4890 /* Set the media. */
4891 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4892 goto out;
4893
4894 /* Configure for OS presence */
4895 wm_init_manageability(sc);
4896
4897 /*
4898 * Set up the receive control register; we actually program
4899 * the register when we set the receive filter. Use multicast
4900 * address offset type 0.
4901 *
4902 * Only the i82544 has the ability to strip the incoming
4903 * CRC, so we don't enable that feature.
4904 */
4905 sc->sc_mchash_type = 0;
4906 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4907 | RCTL_MO(sc->sc_mchash_type);
4908
4909 /*
4910 * The I350 has a bug where it always strips the CRC whether
4911 * asked to or not. So ask for stripped CRC here and cope in rxeof
4912 */
4913 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4914 || (sc->sc_type == WM_T_I210))
4915 sc->sc_rctl |= RCTL_SECRC;
4916
4917 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4918 && (ifp->if_mtu > ETHERMTU)) {
4919 sc->sc_rctl |= RCTL_LPE;
4920 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4921 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4922 }
4923
4924 if (MCLBYTES == 2048) {
4925 sc->sc_rctl |= RCTL_2k;
4926 } else {
4927 if (sc->sc_type >= WM_T_82543) {
4928 switch (MCLBYTES) {
4929 case 4096:
4930 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4931 break;
4932 case 8192:
4933 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4934 break;
4935 case 16384:
4936 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4937 break;
4938 default:
4939 panic("wm_init: MCLBYTES %d unsupported",
4940 MCLBYTES);
4941 break;
4942 }
4943 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4944 }
4945
4946 /* Set the receive filter. */
4947 wm_set_filter(sc);
4948
4949 /* Enable ECC */
4950 switch (sc->sc_type) {
4951 case WM_T_82571:
4952 reg = CSR_READ(sc, WMREG_PBA_ECC);
4953 reg |= PBA_ECC_CORR_EN;
4954 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4955 break;
4956 case WM_T_PCH_LPT:
4957 reg = CSR_READ(sc, WMREG_PBECCSTS);
4958 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4959 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4960
4961 reg = CSR_READ(sc, WMREG_CTRL);
4962 reg |= CTRL_MEHE;
4963 CSR_WRITE(sc, WMREG_CTRL, reg);
4964 break;
4965 default:
4966 break;
4967 }
4968
4969 /* On 575 and later set RDT only if RX enabled */
4970 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4971 int qidx;
4972 for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
4973 struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
4974 for (i = 0; i < WM_NRXDESC; i++) {
4975 WM_RX_LOCK(rxq);
4976 wm_init_rxdesc(rxq, i);
4977 WM_RX_UNLOCK(rxq);
4978
4979 }
4980 }
4981 }
4982
4983 sc->sc_stopping = false;
4984
4985 /* Start the one second link check clock. */
4986 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4987
4988 /* ...all done! */
4989 ifp->if_flags |= IFF_RUNNING;
4990 ifp->if_flags &= ~IFF_OACTIVE;
4991
4992 out:
4993 sc->sc_if_flags = ifp->if_flags;
4994 if (error)
4995 log(LOG_ERR, "%s: interface not running\n",
4996 device_xname(sc->sc_dev));
4997 return error;
4998 }
4999
5000 /*
5001 * wm_stop: [ifnet interface function]
5002 *
5003 * Stop transmission on the interface.
5004 */
5005 static void
5006 wm_stop(struct ifnet *ifp, int disable)
5007 {
5008 struct wm_softc *sc = ifp->if_softc;
5009
5010 WM_CORE_LOCK(sc);
5011 wm_stop_locked(ifp, disable);
5012 WM_CORE_UNLOCK(sc);
5013 }
5014
5015 static void
5016 wm_stop_locked(struct ifnet *ifp, int disable)
5017 {
5018 struct wm_softc *sc = ifp->if_softc;
5019 struct wm_txsoft *txs;
5020 int i, qidx;
5021
5022 KASSERT(WM_CORE_LOCKED(sc));
5023
5024 sc->sc_stopping = true;
5025
5026 /* Stop the one second clock. */
5027 callout_stop(&sc->sc_tick_ch);
5028
5029 /* Stop the 82547 Tx FIFO stall check timer. */
5030 if (sc->sc_type == WM_T_82547)
5031 callout_stop(&sc->sc_txfifo_ch);
5032
5033 if (sc->sc_flags & WM_F_HAS_MII) {
5034 /* Down the MII. */
5035 mii_down(&sc->sc_mii);
5036 } else {
5037 #if 0
5038 /* Should we clear PHY's status properly? */
5039 wm_reset(sc);
5040 #endif
5041 }
5042
5043 /* Stop the transmit and receive processes. */
5044 CSR_WRITE(sc, WMREG_TCTL, 0);
5045 CSR_WRITE(sc, WMREG_RCTL, 0);
5046 sc->sc_rctl &= ~RCTL_EN;
5047
5048 /*
5049 * Clear the interrupt mask to ensure the device cannot assert its
5050 * interrupt line.
5051 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
5052 * service any currently pending or shared interrupt.
5053 */
5054 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5055 sc->sc_icr = 0;
5056 if (sc->sc_nintrs > 1) {
5057 if (sc->sc_type != WM_T_82574) {
5058 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5059 CSR_WRITE(sc, WMREG_EIAC, 0);
5060 } else
5061 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5062 }
5063
5064 /* Release any queued transmit buffers. */
5065 for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
5066 struct wm_txqueue *txq = &sc->sc_txq[qidx];
5067 WM_TX_LOCK(txq);
5068 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5069 txs = &txq->txq_soft[i];
5070 if (txs->txs_mbuf != NULL) {
5071 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
5072 m_freem(txs->txs_mbuf);
5073 txs->txs_mbuf = NULL;
5074 }
5075 }
5076 WM_TX_UNLOCK(txq);
5077 }
5078
5079 /* Mark the interface as down and cancel the watchdog timer. */
5080 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
5081 ifp->if_timer = 0;
5082
5083 if (disable) {
5084 for (i = 0; i < sc->sc_nrxqueues; i++) {
5085 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5086 WM_RX_LOCK(rxq);
5087 wm_rxdrain(rxq);
5088 WM_RX_UNLOCK(rxq);
5089 }
5090 }
5091
5092 #if 0 /* notyet */
5093 if (sc->sc_type >= WM_T_82544)
5094 CSR_WRITE(sc, WMREG_WUC, 0);
5095 #endif
5096 }
5097
5098 static void
5099 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
5100 {
5101 struct mbuf *m;
5102 int i;
5103
5104 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
5105 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
5106 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
5107 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
5108 m->m_data, m->m_len, m->m_flags);
5109 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
5110 i, i == 1 ? "" : "s");
5111 }
5112
5113 /*
5114 * wm_82547_txfifo_stall:
5115 *
5116 * Callout used to wait for the 82547 Tx FIFO to drain,
5117 * reset the FIFO pointers, and restart packet transmission.
5118 */
5119 static void
5120 wm_82547_txfifo_stall(void *arg)
5121 {
5122 struct wm_softc *sc = arg;
5123 struct wm_txqueue *txq = sc->sc_txq;
5124 #ifndef WM_MPSAFE
5125 int s;
5126
5127 s = splnet();
5128 #endif
5129 WM_TX_LOCK(txq);
5130
5131 if (sc->sc_stopping)
5132 goto out;
5133
5134 if (txq->txq_fifo_stall) {
5135 if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
5136 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
5137 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
5138 /*
5139 * Packets have drained. Stop transmitter, reset
5140 * FIFO pointers, restart transmitter, and kick
5141 * the packet queue.
5142 */
5143 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
5144 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
5145 CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
5146 CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
5147 CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
5148 CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
5149 CSR_WRITE(sc, WMREG_TCTL, tctl);
5150 CSR_WRITE_FLUSH(sc);
5151
5152 txq->txq_fifo_head = 0;
5153 txq->txq_fifo_stall = 0;
5154 wm_start_locked(&sc->sc_ethercom.ec_if);
5155 } else {
5156 /*
5157 * Still waiting for packets to drain; try again in
5158 * another tick.
5159 */
5160 callout_schedule(&sc->sc_txfifo_ch, 1);
5161 }
5162 }
5163
5164 out:
5165 WM_TX_UNLOCK(txq);
5166 #ifndef WM_MPSAFE
5167 splx(s);
5168 #endif
5169 }
5170
5171 /*
5172 * wm_82547_txfifo_bugchk:
5173 *
5174 * Check for bug condition in the 82547 Tx FIFO. We need to
5175 * prevent enqueueing a packet that would wrap around the end
5176 * if the Tx FIFO ring buffer, otherwise the chip will croak.
5177 *
5178 * We do this by checking the amount of space before the end
5179 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
5180 * the Tx FIFO, wait for all remaining packets to drain, reset
5181 * the internal FIFO pointers to the beginning, and restart
5182 * transmission on the interface.
5183 */
5184 #define WM_FIFO_HDR 0x10
5185 #define WM_82547_PAD_LEN 0x3e0
5186 static int
5187 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
5188 {
5189 struct wm_txqueue *txq = &sc->sc_txq[0];
5190 int space = txq->txq_fifo_size - txq->txq_fifo_head;
5191 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
5192
5193 /* Just return if already stalled. */
5194 if (txq->txq_fifo_stall)
5195 return 1;
5196
5197 if (sc->sc_mii.mii_media_active & IFM_FDX) {
5198 /* Stall only occurs in half-duplex mode. */
5199 goto send_packet;
5200 }
5201
5202 if (len >= WM_82547_PAD_LEN + space) {
5203 txq->txq_fifo_stall = 1;
5204 callout_schedule(&sc->sc_txfifo_ch, 1);
5205 return 1;
5206 }
5207
5208 send_packet:
5209 txq->txq_fifo_head += len;
5210 if (txq->txq_fifo_head >= txq->txq_fifo_size)
5211 txq->txq_fifo_head -= txq->txq_fifo_size;
5212
5213 return 0;
5214 }
5215
5216 static int
5217 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5218 {
5219 int error;
5220
5221 /*
5222 * Allocate the control data structures, and create and load the
5223 * DMA map for it.
5224 *
5225 * NOTE: All Tx descriptors must be in the same 4G segment of
5226 * memory. So must Rx descriptors. We simplify by allocating
5227 * both sets within the same 4G segment.
5228 */
5229 if (sc->sc_type < WM_T_82544) {
5230 WM_NTXDESC(txq) = WM_NTXDESC_82542;
5231 txq->txq_desc_size = sizeof(wiseman_txdesc_t) * WM_NTXDESC(txq);
5232 } else {
5233 WM_NTXDESC(txq) = WM_NTXDESC_82544;
5234 txq->txq_desc_size = sizeof(txdescs_t);
5235 }
5236
5237 if ((error = bus_dmamem_alloc(sc->sc_dmat, txq->txq_desc_size, PAGE_SIZE,
5238 (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg, 1,
5239 &txq->txq_desc_rseg, 0)) != 0) {
5240 aprint_error_dev(sc->sc_dev,
5241 "unable to allocate TX control data, error = %d\n",
5242 error);
5243 goto fail_0;
5244 }
5245
5246 if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
5247 txq->txq_desc_rseg, txq->txq_desc_size,
5248 (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
5249 aprint_error_dev(sc->sc_dev,
5250 "unable to map TX control data, error = %d\n", error);
5251 goto fail_1;
5252 }
5253
5254 if ((error = bus_dmamap_create(sc->sc_dmat, txq->txq_desc_size, 1,
5255 txq->txq_desc_size, 0, 0, &txq->txq_desc_dmamap)) != 0) {
5256 aprint_error_dev(sc->sc_dev,
5257 "unable to create TX control data DMA map, error = %d\n",
5258 error);
5259 goto fail_2;
5260 }
5261
5262 if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
5263 txq->txq_descs_u, txq->txq_desc_size, NULL, 0)) != 0) {
5264 aprint_error_dev(sc->sc_dev,
5265 "unable to load TX control data DMA map, error = %d\n",
5266 error);
5267 goto fail_3;
5268 }
5269
5270 return 0;
5271
5272 fail_3:
5273 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5274 fail_2:
5275 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5276 txq->txq_desc_size);
5277 fail_1:
5278 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5279 fail_0:
5280 return error;
5281 }
5282
5283 static void
5284 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5285 {
5286
5287 bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
5288 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5289 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5290 txq->txq_desc_size);
5291 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5292 }
5293
5294 static int
5295 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5296 {
5297 int error;
5298
5299 /*
5300 * Allocate the control data structures, and create and load the
5301 * DMA map for it.
5302 *
5303 * NOTE: All Tx descriptors must be in the same 4G segment of
5304 * memory. So must Rx descriptors. We simplify by allocating
5305 * both sets within the same 4G segment.
5306 */
5307 rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
5308 if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size, PAGE_SIZE,
5309 (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg, 1,
5310 &rxq->rxq_desc_rseg, 0)) != 0) {
5311 aprint_error_dev(sc->sc_dev,
5312 "unable to allocate RX control data, error = %d\n",
5313 error);
5314 goto fail_0;
5315 }
5316
5317 if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
5318 rxq->rxq_desc_rseg, rxq->rxq_desc_size,
5319 (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
5320 aprint_error_dev(sc->sc_dev,
5321 "unable to map RX control data, error = %d\n", error);
5322 goto fail_1;
5323 }
5324
5325 if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
5326 rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
5327 aprint_error_dev(sc->sc_dev,
5328 "unable to create RX control data DMA map, error = %d\n",
5329 error);
5330 goto fail_2;
5331 }
5332
5333 if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
5334 rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
5335 aprint_error_dev(sc->sc_dev,
5336 "unable to load RX control data DMA map, error = %d\n",
5337 error);
5338 goto fail_3;
5339 }
5340
5341 return 0;
5342
5343 fail_3:
5344 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5345 fail_2:
5346 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5347 rxq->rxq_desc_size);
5348 fail_1:
5349 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5350 fail_0:
5351 return error;
5352 }
5353
5354 static void
5355 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5356 {
5357
5358 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
5359 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5360 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5361 rxq->rxq_desc_size);
5362 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5363 }
5364
5365
5366 static int
5367 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5368 {
5369 int i, error;
5370
5371 /* Create the transmit buffer DMA maps. */
5372 WM_TXQUEUELEN(txq) =
5373 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
5374 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
5375 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5376 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
5377 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
5378 &txq->txq_soft[i].txs_dmamap)) != 0) {
5379 aprint_error_dev(sc->sc_dev,
5380 "unable to create Tx DMA map %d, error = %d\n",
5381 i, error);
5382 goto fail;
5383 }
5384 }
5385
5386 return 0;
5387
5388 fail:
5389 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5390 if (txq->txq_soft[i].txs_dmamap != NULL)
5391 bus_dmamap_destroy(sc->sc_dmat,
5392 txq->txq_soft[i].txs_dmamap);
5393 }
5394 return error;
5395 }
5396
5397 static void
5398 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5399 {
5400 int i;
5401
5402 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5403 if (txq->txq_soft[i].txs_dmamap != NULL)
5404 bus_dmamap_destroy(sc->sc_dmat,
5405 txq->txq_soft[i].txs_dmamap);
5406 }
5407 }
5408
5409 static int
5410 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5411 {
5412 int i, error;
5413
5414 /* Create the receive buffer DMA maps. */
5415 for (i = 0; i < WM_NRXDESC; i++) {
5416 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
5417 MCLBYTES, 0, 0,
5418 &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
5419 aprint_error_dev(sc->sc_dev,
5420 "unable to create Rx DMA map %d error = %d\n",
5421 i, error);
5422 goto fail;
5423 }
5424 rxq->rxq_soft[i].rxs_mbuf = NULL;
5425 }
5426
5427 return 0;
5428
5429 fail:
5430 for (i = 0; i < WM_NRXDESC; i++) {
5431 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5432 bus_dmamap_destroy(sc->sc_dmat,
5433 rxq->rxq_soft[i].rxs_dmamap);
5434 }
5435 return error;
5436 }
5437
5438 static void
5439 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5440 {
5441 int i;
5442
5443 for (i = 0; i < WM_NRXDESC; i++) {
5444 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5445 bus_dmamap_destroy(sc->sc_dmat,
5446 rxq->rxq_soft[i].rxs_dmamap);
5447 }
5448 }
5449
5450 /*
5451 * wm_alloc_quques:
5452 * Allocate {tx,rx}descs and {tx,rx} buffers
5453 */
5454 static int
5455 wm_alloc_txrx_queues(struct wm_softc *sc)
5456 {
5457 int i, error, tx_done, rx_done;
5458
5459 /*
5460 * For transmission
5461 */
5462 sc->sc_txq = kmem_zalloc(sizeof(struct wm_txqueue) * sc->sc_ntxqueues,
5463 KM_SLEEP);
5464 if (sc->sc_txq == NULL) {
5465 aprint_error_dev(sc->sc_dev, "unable to allocate wm_txqueue\n");
5466 error = ENOMEM;
5467 goto fail_0;
5468 }
5469
5470 error = 0;
5471 tx_done = 0;
5472 for (i = 0; i < sc->sc_ntxqueues; i++) {
5473 struct wm_txqueue *txq = &sc->sc_txq[i];
5474 txq->txq_sc = sc;
5475 #ifdef WM_MPSAFE
5476 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5477 #else
5478 txq->txq_lock = NULL;
5479 #endif
5480 error = wm_alloc_tx_descs(sc, txq);
5481 if (error)
5482 break;
5483 error = wm_alloc_tx_buffer(sc, txq);
5484 if (error) {
5485 wm_free_tx_descs(sc, txq);
5486 break;
5487 }
5488 tx_done++;
5489 }
5490 if (error)
5491 goto fail_1;
5492
5493 /*
5494 * For recieve
5495 */
5496 sc->sc_rxq = kmem_zalloc(sizeof(struct wm_rxqueue) * sc->sc_nrxqueues,
5497 KM_SLEEP);
5498 if (sc->sc_rxq == NULL) {
5499 aprint_error_dev(sc->sc_dev, "unable to allocate wm_rxqueue\n");
5500 error = ENOMEM;
5501 goto fail_1;
5502 }
5503
5504 error = 0;
5505 rx_done = 0;
5506 for (i = 0; i < sc->sc_nrxqueues; i++) {
5507 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5508 rxq->rxq_sc = sc;
5509 #ifdef WM_MPSAFE
5510 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5511 #else
5512 rxq->rxq_lock = NULL;
5513 #endif
5514 error = wm_alloc_rx_descs(sc, rxq);
5515 if (error)
5516 break;
5517
5518 error = wm_alloc_rx_buffer(sc, rxq);
5519 if (error) {
5520 wm_free_rx_descs(sc, rxq);
5521 break;
5522 }
5523
5524 rx_done++;
5525 }
5526 if (error)
5527 goto fail_2;
5528
5529 return 0;
5530
5531 fail_2:
5532 for (i = 0; i < rx_done; i++) {
5533 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5534 wm_free_rx_buffer(sc, rxq);
5535 wm_free_rx_descs(sc, rxq);
5536 if (rxq->rxq_lock)
5537 mutex_obj_free(rxq->rxq_lock);
5538 }
5539 kmem_free(sc->sc_rxq,
5540 sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
5541 fail_1:
5542 for (i = 0; i < tx_done; i++) {
5543 struct wm_txqueue *txq = &sc->sc_txq[i];
5544 wm_free_tx_buffer(sc, txq);
5545 wm_free_tx_descs(sc, txq);
5546 if (txq->txq_lock)
5547 mutex_obj_free(txq->txq_lock);
5548 }
5549 kmem_free(sc->sc_txq,
5550 sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
5551 fail_0:
5552 return error;
5553 }
5554
5555 /*
5556 * wm_free_quques:
5557 * Free {tx,rx}descs and {tx,rx} buffers
5558 */
5559 static void
5560 wm_free_txrx_queues(struct wm_softc *sc)
5561 {
5562 int i;
5563
5564 for (i = 0; i < sc->sc_nrxqueues; i++) {
5565 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5566 wm_free_rx_buffer(sc, rxq);
5567 wm_free_rx_descs(sc, rxq);
5568 if (rxq->rxq_lock)
5569 mutex_obj_free(rxq->rxq_lock);
5570 }
5571 kmem_free(sc->sc_rxq, sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
5572
5573 for (i = 0; i < sc->sc_ntxqueues; i++) {
5574 struct wm_txqueue *txq = &sc->sc_txq[i];
5575 wm_free_tx_buffer(sc, txq);
5576 wm_free_tx_descs(sc, txq);
5577 if (txq->txq_lock)
5578 mutex_obj_free(txq->txq_lock);
5579 }
5580 kmem_free(sc->sc_txq, sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
5581 }
5582
5583 static void
5584 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5585 {
5586
5587 KASSERT(WM_TX_LOCKED(txq));
5588
5589 /* Initialize the transmit descriptor ring. */
5590 memset(txq->txq_descs, 0, WM_TXDESCSIZE(txq));
5591 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
5592 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5593 txq->txq_free = WM_NTXDESC(txq);
5594 txq->txq_next = 0;
5595 }
5596
5597 static void
5598 wm_init_tx_regs(struct wm_softc *sc, struct wm_txqueue *txq)
5599 {
5600
5601 KASSERT(WM_TX_LOCKED(txq));
5602
5603 if (sc->sc_type < WM_T_82543) {
5604 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
5605 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
5606 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(txq));
5607 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
5608 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
5609 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
5610 } else {
5611 int qid = txq->txq_id;
5612
5613 CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
5614 CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
5615 CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCSIZE(txq));
5616 CSR_WRITE(sc, WMREG_TDH(qid), 0);
5617
5618 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5619 /*
5620 * Don't write TDT before TCTL.EN is set.
5621 * See the document.
5622 */
5623 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
5624 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
5625 | TXDCTL_WTHRESH(0));
5626 else {
5627 /* ITR / 4 */
5628 CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
5629 if (sc->sc_type >= WM_T_82540) {
5630 /* should be same */
5631 CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
5632 }
5633
5634 CSR_WRITE(sc, WMREG_TDT(qid), 0);
5635 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
5636 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
5637 }
5638 }
5639 }
5640
5641 static void
5642 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5643 {
5644 int i;
5645
5646 KASSERT(WM_TX_LOCKED(txq));
5647
5648 /* Initialize the transmit job descriptors. */
5649 for (i = 0; i < WM_TXQUEUELEN(txq); i++)
5650 txq->txq_soft[i].txs_mbuf = NULL;
5651 txq->txq_sfree = WM_TXQUEUELEN(txq);
5652 txq->txq_snext = 0;
5653 txq->txq_sdirty = 0;
5654 }
5655
5656 static void
5657 wm_init_tx_queue(struct wm_softc *sc, struct wm_txqueue *txq)
5658 {
5659
5660 KASSERT(WM_TX_LOCKED(txq));
5661
5662 /*
5663 * Set up some register offsets that are different between
5664 * the i82542 and the i82543 and later chips.
5665 */
5666 if (sc->sc_type < WM_T_82543) {
5667 txq->txq_tdt_reg = WMREG_OLD_TDT;
5668 } else {
5669 txq->txq_tdt_reg = WMREG_TDT(0);
5670 }
5671
5672 wm_init_tx_descs(sc, txq);
5673 wm_init_tx_regs(sc, txq);
5674 wm_init_tx_buffer(sc, txq);
5675 }
5676
5677 static void
5678 wm_init_rx_regs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5679 {
5680
5681 KASSERT(WM_RX_LOCKED(rxq));
5682
5683 /*
5684 * Initialize the receive descriptor and receive job
5685 * descriptor rings.
5686 */
5687 if (sc->sc_type < WM_T_82543) {
5688 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
5689 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
5690 CSR_WRITE(sc, WMREG_OLD_RDLEN0,
5691 sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
5692 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
5693 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
5694 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
5695
5696 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
5697 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
5698 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
5699 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
5700 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
5701 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
5702 } else {
5703 int qid = rxq->rxq_id;
5704
5705 CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
5706 CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
5707 CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
5708
5709 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5710 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
5711 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
5712 CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
5713 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
5714 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
5715 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
5716 | RXDCTL_WTHRESH(1));
5717 CSR_WRITE(sc, WMREG_RDH(qid), 0);
5718 CSR_WRITE(sc, WMREG_RDT(qid), 0);
5719 } else {
5720 CSR_WRITE(sc, WMREG_RDH(qid), 0);
5721 CSR_WRITE(sc, WMREG_RDT(qid), 0);
5722 /* ITR / 4 */
5723 CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
5724 /* MUST be same */
5725 CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
5726 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
5727 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
5728 }
5729 }
5730 }
5731
5732 static int
5733 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5734 {
5735 struct wm_rxsoft *rxs;
5736 int error, i;
5737
5738 KASSERT(WM_RX_LOCKED(rxq));
5739
5740 for (i = 0; i < WM_NRXDESC; i++) {
5741 rxs = &rxq->rxq_soft[i];
5742 if (rxs->rxs_mbuf == NULL) {
5743 if ((error = wm_add_rxbuf(rxq, i)) != 0) {
5744 log(LOG_ERR, "%s: unable to allocate or map "
5745 "rx buffer %d, error = %d\n",
5746 device_xname(sc->sc_dev), i, error);
5747 /*
5748 * XXX Should attempt to run with fewer receive
5749 * XXX buffers instead of just failing.
5750 */
5751 wm_rxdrain(rxq);
5752 return ENOMEM;
5753 }
5754 } else {
5755 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
5756 wm_init_rxdesc(rxq, i);
5757 /*
5758 * For 82575 and newer device, the RX descriptors
5759 * must be initialized after the setting of RCTL.EN in
5760 * wm_set_filter()
5761 */
5762 }
5763 }
5764 rxq->rxq_ptr = 0;
5765 rxq->rxq_discard = 0;
5766 WM_RXCHAIN_RESET(rxq);
5767
5768 return 0;
5769 }
5770
5771 static int
5772 wm_init_rx_queue(struct wm_softc *sc, struct wm_rxqueue *rxq)
5773 {
5774
5775 KASSERT(WM_RX_LOCKED(rxq));
5776
5777 /*
5778 * Set up some register offsets that are different between
5779 * the i82542 and the i82543 and later chips.
5780 */
5781 if (sc->sc_type < WM_T_82543) {
5782 rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
5783 } else {
5784 rxq->rxq_rdt_reg = WMREG_RDT(rxq->rxq_id);
5785 }
5786
5787 wm_init_rx_regs(sc, rxq);
5788 return wm_init_rx_buffer(sc, rxq);
5789 }
5790
5791 /*
5792 * wm_init_quques:
5793 * Initialize {tx,rx}descs and {tx,rx} buffers
5794 */
5795 static int
5796 wm_init_txrx_queues(struct wm_softc *sc)
5797 {
5798 int i, error;
5799
5800 for (i = 0; i < sc->sc_ntxqueues; i++) {
5801 struct wm_txqueue *txq = &sc->sc_txq[i];
5802 WM_TX_LOCK(txq);
5803 wm_init_tx_queue(sc, txq);
5804 WM_TX_UNLOCK(txq);
5805 }
5806
5807 error = 0;
5808 for (i = 0; i < sc->sc_nrxqueues; i++) {
5809 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5810 WM_RX_LOCK(rxq);
5811 error = wm_init_rx_queue(sc, rxq);
5812 WM_RX_UNLOCK(rxq);
5813 if (error)
5814 break;
5815 }
5816
5817 return error;
5818 }
5819
5820 /*
5821 * wm_tx_offload:
5822 *
5823 * Set up TCP/IP checksumming parameters for the
5824 * specified packet.
5825 */
5826 static int
5827 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
5828 uint8_t *fieldsp)
5829 {
5830 struct wm_txqueue *txq = &sc->sc_txq[0];
5831 struct mbuf *m0 = txs->txs_mbuf;
5832 struct livengood_tcpip_ctxdesc *t;
5833 uint32_t ipcs, tucs, cmd, cmdlen, seg;
5834 uint32_t ipcse;
5835 struct ether_header *eh;
5836 int offset, iphl;
5837 uint8_t fields;
5838
5839 /*
5840 * XXX It would be nice if the mbuf pkthdr had offset
5841 * fields for the protocol headers.
5842 */
5843
5844 eh = mtod(m0, struct ether_header *);
5845 switch (htons(eh->ether_type)) {
5846 case ETHERTYPE_IP:
5847 case ETHERTYPE_IPV6:
5848 offset = ETHER_HDR_LEN;
5849 break;
5850
5851 case ETHERTYPE_VLAN:
5852 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5853 break;
5854
5855 default:
5856 /*
5857 * Don't support this protocol or encapsulation.
5858 */
5859 *fieldsp = 0;
5860 *cmdp = 0;
5861 return 0;
5862 }
5863
5864 if ((m0->m_pkthdr.csum_flags &
5865 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
5866 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
5867 } else {
5868 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
5869 }
5870 ipcse = offset + iphl - 1;
5871
5872 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
5873 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
5874 seg = 0;
5875 fields = 0;
5876
5877 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
5878 int hlen = offset + iphl;
5879 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
5880
5881 if (__predict_false(m0->m_len <
5882 (hlen + sizeof(struct tcphdr)))) {
5883 /*
5884 * TCP/IP headers are not in the first mbuf; we need
5885 * to do this the slow and painful way. Let's just
5886 * hope this doesn't happen very often.
5887 */
5888 struct tcphdr th;
5889
5890 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
5891
5892 m_copydata(m0, hlen, sizeof(th), &th);
5893 if (v4) {
5894 struct ip ip;
5895
5896 m_copydata(m0, offset, sizeof(ip), &ip);
5897 ip.ip_len = 0;
5898 m_copyback(m0,
5899 offset + offsetof(struct ip, ip_len),
5900 sizeof(ip.ip_len), &ip.ip_len);
5901 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
5902 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
5903 } else {
5904 struct ip6_hdr ip6;
5905
5906 m_copydata(m0, offset, sizeof(ip6), &ip6);
5907 ip6.ip6_plen = 0;
5908 m_copyback(m0,
5909 offset + offsetof(struct ip6_hdr, ip6_plen),
5910 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
5911 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
5912 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
5913 }
5914 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
5915 sizeof(th.th_sum), &th.th_sum);
5916
5917 hlen += th.th_off << 2;
5918 } else {
5919 /*
5920 * TCP/IP headers are in the first mbuf; we can do
5921 * this the easy way.
5922 */
5923 struct tcphdr *th;
5924
5925 if (v4) {
5926 struct ip *ip =
5927 (void *)(mtod(m0, char *) + offset);
5928 th = (void *)(mtod(m0, char *) + hlen);
5929
5930 ip->ip_len = 0;
5931 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
5932 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
5933 } else {
5934 struct ip6_hdr *ip6 =
5935 (void *)(mtod(m0, char *) + offset);
5936 th = (void *)(mtod(m0, char *) + hlen);
5937
5938 ip6->ip6_plen = 0;
5939 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
5940 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
5941 }
5942 hlen += th->th_off << 2;
5943 }
5944
5945 if (v4) {
5946 WM_EVCNT_INCR(&sc->sc_ev_txtso);
5947 cmdlen |= WTX_TCPIP_CMD_IP;
5948 } else {
5949 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
5950 ipcse = 0;
5951 }
5952 cmd |= WTX_TCPIP_CMD_TSE;
5953 cmdlen |= WTX_TCPIP_CMD_TSE |
5954 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
5955 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
5956 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
5957 }
5958
5959 /*
5960 * NOTE: Even if we're not using the IP or TCP/UDP checksum
5961 * offload feature, if we load the context descriptor, we
5962 * MUST provide valid values for IPCSS and TUCSS fields.
5963 */
5964
5965 ipcs = WTX_TCPIP_IPCSS(offset) |
5966 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
5967 WTX_TCPIP_IPCSE(ipcse);
5968 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
5969 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
5970 fields |= WTX_IXSM;
5971 }
5972
5973 offset += iphl;
5974
5975 if (m0->m_pkthdr.csum_flags &
5976 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
5977 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5978 fields |= WTX_TXSM;
5979 tucs = WTX_TCPIP_TUCSS(offset) |
5980 WTX_TCPIP_TUCSO(offset +
5981 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
5982 WTX_TCPIP_TUCSE(0) /* rest of packet */;
5983 } else if ((m0->m_pkthdr.csum_flags &
5984 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
5985 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5986 fields |= WTX_TXSM;
5987 tucs = WTX_TCPIP_TUCSS(offset) |
5988 WTX_TCPIP_TUCSO(offset +
5989 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
5990 WTX_TCPIP_TUCSE(0) /* rest of packet */;
5991 } else {
5992 /* Just initialize it to a valid TCP context. */
5993 tucs = WTX_TCPIP_TUCSS(offset) |
5994 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
5995 WTX_TCPIP_TUCSE(0) /* rest of packet */;
5996 }
5997
5998 /* Fill in the context descriptor. */
5999 t = (struct livengood_tcpip_ctxdesc *)
6000 &txq->txq_descs[txq->txq_next];
6001 t->tcpip_ipcs = htole32(ipcs);
6002 t->tcpip_tucs = htole32(tucs);
6003 t->tcpip_cmdlen = htole32(cmdlen);
6004 t->tcpip_seg = htole32(seg);
6005 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6006
6007 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6008 txs->txs_ndesc++;
6009
6010 *cmdp = cmd;
6011 *fieldsp = fields;
6012
6013 return 0;
6014 }
6015
6016 /*
6017 * wm_start: [ifnet interface function]
6018 *
6019 * Start packet transmission on the interface.
6020 */
6021 static void
6022 wm_start(struct ifnet *ifp)
6023 {
6024 struct wm_softc *sc = ifp->if_softc;
6025 struct wm_txqueue *txq = &sc->sc_txq[0];
6026
6027 WM_TX_LOCK(txq);
6028 if (!sc->sc_stopping)
6029 wm_start_locked(ifp);
6030 WM_TX_UNLOCK(txq);
6031 }
6032
6033 static void
6034 wm_start_locked(struct ifnet *ifp)
6035 {
6036 struct wm_softc *sc = ifp->if_softc;
6037 struct wm_txqueue *txq = &sc->sc_txq[0];
6038 struct mbuf *m0;
6039 struct m_tag *mtag;
6040 struct wm_txsoft *txs;
6041 bus_dmamap_t dmamap;
6042 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
6043 bus_addr_t curaddr;
6044 bus_size_t seglen, curlen;
6045 uint32_t cksumcmd;
6046 uint8_t cksumfields;
6047
6048 KASSERT(WM_TX_LOCKED(txq));
6049
6050 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
6051 return;
6052
6053 /* Remember the previous number of free descriptors. */
6054 ofree = txq->txq_free;
6055
6056 /*
6057 * Loop through the send queue, setting up transmit descriptors
6058 * until we drain the queue, or use up all available transmit
6059 * descriptors.
6060 */
6061 for (;;) {
6062 m0 = NULL;
6063
6064 /* Get a work queue entry. */
6065 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6066 wm_txeof(sc);
6067 if (txq->txq_sfree == 0) {
6068 DPRINTF(WM_DEBUG_TX,
6069 ("%s: TX: no free job descriptors\n",
6070 device_xname(sc->sc_dev)));
6071 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
6072 break;
6073 }
6074 }
6075
6076 /* Grab a packet off the queue. */
6077 IFQ_DEQUEUE(&ifp->if_snd, m0);
6078 if (m0 == NULL)
6079 break;
6080
6081 DPRINTF(WM_DEBUG_TX,
6082 ("%s: TX: have packet to transmit: %p\n",
6083 device_xname(sc->sc_dev), m0));
6084
6085 txs = &txq->txq_soft[txq->txq_snext];
6086 dmamap = txs->txs_dmamap;
6087
6088 use_tso = (m0->m_pkthdr.csum_flags &
6089 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
6090
6091 /*
6092 * So says the Linux driver:
6093 * The controller does a simple calculation to make sure
6094 * there is enough room in the FIFO before initiating the
6095 * DMA for each buffer. The calc is:
6096 * 4 = ceil(buffer len / MSS)
6097 * To make sure we don't overrun the FIFO, adjust the max
6098 * buffer len if the MSS drops.
6099 */
6100 dmamap->dm_maxsegsz =
6101 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
6102 ? m0->m_pkthdr.segsz << 2
6103 : WTX_MAX_LEN;
6104
6105 /*
6106 * Load the DMA map. If this fails, the packet either
6107 * didn't fit in the allotted number of segments, or we
6108 * were short on resources. For the too-many-segments
6109 * case, we simply report an error and drop the packet,
6110 * since we can't sanely copy a jumbo packet to a single
6111 * buffer.
6112 */
6113 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6114 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
6115 if (error) {
6116 if (error == EFBIG) {
6117 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6118 log(LOG_ERR, "%s: Tx packet consumes too many "
6119 "DMA segments, dropping...\n",
6120 device_xname(sc->sc_dev));
6121 wm_dump_mbuf_chain(sc, m0);
6122 m_freem(m0);
6123 continue;
6124 }
6125 /* Short on resources, just stop for now. */
6126 DPRINTF(WM_DEBUG_TX,
6127 ("%s: TX: dmamap load failed: %d\n",
6128 device_xname(sc->sc_dev), error));
6129 break;
6130 }
6131
6132 segs_needed = dmamap->dm_nsegs;
6133 if (use_tso) {
6134 /* For sentinel descriptor; see below. */
6135 segs_needed++;
6136 }
6137
6138 /*
6139 * Ensure we have enough descriptors free to describe
6140 * the packet. Note, we always reserve one descriptor
6141 * at the end of the ring due to the semantics of the
6142 * TDT register, plus one more in the event we need
6143 * to load offload context.
6144 */
6145 if (segs_needed > txq->txq_free - 2) {
6146 /*
6147 * Not enough free descriptors to transmit this
6148 * packet. We haven't committed anything yet,
6149 * so just unload the DMA map, put the packet
6150 * pack on the queue, and punt. Notify the upper
6151 * layer that there are no more slots left.
6152 */
6153 DPRINTF(WM_DEBUG_TX,
6154 ("%s: TX: need %d (%d) descriptors, have %d\n",
6155 device_xname(sc->sc_dev), dmamap->dm_nsegs,
6156 segs_needed, txq->txq_free - 1));
6157 ifp->if_flags |= IFF_OACTIVE;
6158 bus_dmamap_unload(sc->sc_dmat, dmamap);
6159 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
6160 break;
6161 }
6162
6163 /*
6164 * Check for 82547 Tx FIFO bug. We need to do this
6165 * once we know we can transmit the packet, since we
6166 * do some internal FIFO space accounting here.
6167 */
6168 if (sc->sc_type == WM_T_82547 &&
6169 wm_82547_txfifo_bugchk(sc, m0)) {
6170 DPRINTF(WM_DEBUG_TX,
6171 ("%s: TX: 82547 Tx FIFO bug detected\n",
6172 device_xname(sc->sc_dev)));
6173 ifp->if_flags |= IFF_OACTIVE;
6174 bus_dmamap_unload(sc->sc_dmat, dmamap);
6175 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
6176 break;
6177 }
6178
6179 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6180
6181 DPRINTF(WM_DEBUG_TX,
6182 ("%s: TX: packet has %d (%d) DMA segments\n",
6183 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6184
6185 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
6186
6187 /*
6188 * Store a pointer to the packet so that we can free it
6189 * later.
6190 *
6191 * Initially, we consider the number of descriptors the
6192 * packet uses the number of DMA segments. This may be
6193 * incremented by 1 if we do checksum offload (a descriptor
6194 * is used to set the checksum context).
6195 */
6196 txs->txs_mbuf = m0;
6197 txs->txs_firstdesc = txq->txq_next;
6198 txs->txs_ndesc = segs_needed;
6199
6200 /* Set up offload parameters for this packet. */
6201 if (m0->m_pkthdr.csum_flags &
6202 (M_CSUM_TSOv4|M_CSUM_TSOv6|
6203 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
6204 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
6205 if (wm_tx_offload(sc, txs, &cksumcmd,
6206 &cksumfields) != 0) {
6207 /* Error message already displayed. */
6208 bus_dmamap_unload(sc->sc_dmat, dmamap);
6209 continue;
6210 }
6211 } else {
6212 cksumcmd = 0;
6213 cksumfields = 0;
6214 }
6215
6216 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
6217
6218 /* Sync the DMA map. */
6219 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6220 BUS_DMASYNC_PREWRITE);
6221
6222 /* Initialize the transmit descriptor. */
6223 for (nexttx = txq->txq_next, seg = 0;
6224 seg < dmamap->dm_nsegs; seg++) {
6225 for (seglen = dmamap->dm_segs[seg].ds_len,
6226 curaddr = dmamap->dm_segs[seg].ds_addr;
6227 seglen != 0;
6228 curaddr += curlen, seglen -= curlen,
6229 nexttx = WM_NEXTTX(txq, nexttx)) {
6230 curlen = seglen;
6231
6232 /*
6233 * So says the Linux driver:
6234 * Work around for premature descriptor
6235 * write-backs in TSO mode. Append a
6236 * 4-byte sentinel descriptor.
6237 */
6238 if (use_tso &&
6239 seg == dmamap->dm_nsegs - 1 &&
6240 curlen > 8)
6241 curlen -= 4;
6242
6243 wm_set_dma_addr(
6244 &txq->txq_descs[nexttx].wtx_addr,
6245 curaddr);
6246 txq->txq_descs[nexttx].wtx_cmdlen =
6247 htole32(cksumcmd | curlen);
6248 txq->txq_descs[nexttx].wtx_fields.wtxu_status =
6249 0;
6250 txq->txq_descs[nexttx].wtx_fields.wtxu_options =
6251 cksumfields;
6252 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan = 0;
6253 lasttx = nexttx;
6254
6255 DPRINTF(WM_DEBUG_TX,
6256 ("%s: TX: desc %d: low %#" PRIx64 ", "
6257 "len %#04zx\n",
6258 device_xname(sc->sc_dev), nexttx,
6259 (uint64_t)curaddr, curlen));
6260 }
6261 }
6262
6263 KASSERT(lasttx != -1);
6264
6265 /*
6266 * Set up the command byte on the last descriptor of
6267 * the packet. If we're in the interrupt delay window,
6268 * delay the interrupt.
6269 */
6270 txq->txq_descs[lasttx].wtx_cmdlen |=
6271 htole32(WTX_CMD_EOP | WTX_CMD_RS);
6272
6273 /*
6274 * If VLANs are enabled and the packet has a VLAN tag, set
6275 * up the descriptor to encapsulate the packet for us.
6276 *
6277 * This is only valid on the last descriptor of the packet.
6278 */
6279 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6280 txq->txq_descs[lasttx].wtx_cmdlen |=
6281 htole32(WTX_CMD_VLE);
6282 txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
6283 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6284 }
6285
6286 txs->txs_lastdesc = lasttx;
6287
6288 DPRINTF(WM_DEBUG_TX,
6289 ("%s: TX: desc %d: cmdlen 0x%08x\n",
6290 device_xname(sc->sc_dev),
6291 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
6292
6293 /* Sync the descriptors we're using. */
6294 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
6295 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
6296
6297 /* Give the packet to the chip. */
6298 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
6299
6300 DPRINTF(WM_DEBUG_TX,
6301 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6302
6303 DPRINTF(WM_DEBUG_TX,
6304 ("%s: TX: finished transmitting packet, job %d\n",
6305 device_xname(sc->sc_dev), txq->txq_snext));
6306
6307 /* Advance the tx pointer. */
6308 txq->txq_free -= txs->txs_ndesc;
6309 txq->txq_next = nexttx;
6310
6311 txq->txq_sfree--;
6312 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
6313
6314 /* Pass the packet to any BPF listeners. */
6315 bpf_mtap(ifp, m0);
6316 }
6317
6318 if (m0 != NULL) {
6319 ifp->if_flags |= IFF_OACTIVE;
6320 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6321 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
6322 m_freem(m0);
6323 }
6324
6325 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
6326 /* No more slots; notify upper layer. */
6327 ifp->if_flags |= IFF_OACTIVE;
6328 }
6329
6330 if (txq->txq_free != ofree) {
6331 /* Set a watchdog timer in case the chip flakes out. */
6332 ifp->if_timer = 5;
6333 }
6334 }
6335
6336 /*
6337 * wm_nq_tx_offload:
6338 *
6339 * Set up TCP/IP checksumming parameters for the
6340 * specified packet, for NEWQUEUE devices
6341 */
6342 static int
6343 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
6344 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
6345 {
6346 struct wm_txqueue *txq = &sc->sc_txq[0];
6347 struct mbuf *m0 = txs->txs_mbuf;
6348 struct m_tag *mtag;
6349 uint32_t vl_len, mssidx, cmdc;
6350 struct ether_header *eh;
6351 int offset, iphl;
6352
6353 /*
6354 * XXX It would be nice if the mbuf pkthdr had offset
6355 * fields for the protocol headers.
6356 */
6357 *cmdlenp = 0;
6358 *fieldsp = 0;
6359
6360 eh = mtod(m0, struct ether_header *);
6361 switch (htons(eh->ether_type)) {
6362 case ETHERTYPE_IP:
6363 case ETHERTYPE_IPV6:
6364 offset = ETHER_HDR_LEN;
6365 break;
6366
6367 case ETHERTYPE_VLAN:
6368 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6369 break;
6370
6371 default:
6372 /* Don't support this protocol or encapsulation. */
6373 *do_csum = false;
6374 return 0;
6375 }
6376 *do_csum = true;
6377 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
6378 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
6379
6380 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
6381 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
6382
6383 if ((m0->m_pkthdr.csum_flags &
6384 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
6385 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6386 } else {
6387 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
6388 }
6389 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
6390 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
6391
6392 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6393 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
6394 << NQTXC_VLLEN_VLAN_SHIFT);
6395 *cmdlenp |= NQTX_CMD_VLE;
6396 }
6397
6398 mssidx = 0;
6399
6400 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6401 int hlen = offset + iphl;
6402 int tcp_hlen;
6403 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6404
6405 if (__predict_false(m0->m_len <
6406 (hlen + sizeof(struct tcphdr)))) {
6407 /*
6408 * TCP/IP headers are not in the first mbuf; we need
6409 * to do this the slow and painful way. Let's just
6410 * hope this doesn't happen very often.
6411 */
6412 struct tcphdr th;
6413
6414 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
6415
6416 m_copydata(m0, hlen, sizeof(th), &th);
6417 if (v4) {
6418 struct ip ip;
6419
6420 m_copydata(m0, offset, sizeof(ip), &ip);
6421 ip.ip_len = 0;
6422 m_copyback(m0,
6423 offset + offsetof(struct ip, ip_len),
6424 sizeof(ip.ip_len), &ip.ip_len);
6425 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6426 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6427 } else {
6428 struct ip6_hdr ip6;
6429
6430 m_copydata(m0, offset, sizeof(ip6), &ip6);
6431 ip6.ip6_plen = 0;
6432 m_copyback(m0,
6433 offset + offsetof(struct ip6_hdr, ip6_plen),
6434 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6435 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6436 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6437 }
6438 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6439 sizeof(th.th_sum), &th.th_sum);
6440
6441 tcp_hlen = th.th_off << 2;
6442 } else {
6443 /*
6444 * TCP/IP headers are in the first mbuf; we can do
6445 * this the easy way.
6446 */
6447 struct tcphdr *th;
6448
6449 if (v4) {
6450 struct ip *ip =
6451 (void *)(mtod(m0, char *) + offset);
6452 th = (void *)(mtod(m0, char *) + hlen);
6453
6454 ip->ip_len = 0;
6455 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6456 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6457 } else {
6458 struct ip6_hdr *ip6 =
6459 (void *)(mtod(m0, char *) + offset);
6460 th = (void *)(mtod(m0, char *) + hlen);
6461
6462 ip6->ip6_plen = 0;
6463 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6464 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6465 }
6466 tcp_hlen = th->th_off << 2;
6467 }
6468 hlen += tcp_hlen;
6469 *cmdlenp |= NQTX_CMD_TSE;
6470
6471 if (v4) {
6472 WM_EVCNT_INCR(&sc->sc_ev_txtso);
6473 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
6474 } else {
6475 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
6476 *fieldsp |= NQTXD_FIELDS_TUXSM;
6477 }
6478 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
6479 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6480 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
6481 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
6482 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
6483 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
6484 } else {
6485 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
6486 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6487 }
6488
6489 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
6490 *fieldsp |= NQTXD_FIELDS_IXSM;
6491 cmdc |= NQTXC_CMD_IP4;
6492 }
6493
6494 if (m0->m_pkthdr.csum_flags &
6495 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6496 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
6497 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6498 cmdc |= NQTXC_CMD_TCP;
6499 } else {
6500 cmdc |= NQTXC_CMD_UDP;
6501 }
6502 cmdc |= NQTXC_CMD_IP4;
6503 *fieldsp |= NQTXD_FIELDS_TUXSM;
6504 }
6505 if (m0->m_pkthdr.csum_flags &
6506 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6507 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
6508 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6509 cmdc |= NQTXC_CMD_TCP;
6510 } else {
6511 cmdc |= NQTXC_CMD_UDP;
6512 }
6513 cmdc |= NQTXC_CMD_IP6;
6514 *fieldsp |= NQTXD_FIELDS_TUXSM;
6515 }
6516
6517 /* Fill in the context descriptor. */
6518 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
6519 htole32(vl_len);
6520 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
6521 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
6522 htole32(cmdc);
6523 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
6524 htole32(mssidx);
6525 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6526 DPRINTF(WM_DEBUG_TX,
6527 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
6528 txq->txq_next, 0, vl_len));
6529 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
6530 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6531 txs->txs_ndesc++;
6532 return 0;
6533 }
6534
6535 /*
6536 * wm_nq_start: [ifnet interface function]
6537 *
6538 * Start packet transmission on the interface for NEWQUEUE devices
6539 */
6540 static void
6541 wm_nq_start(struct ifnet *ifp)
6542 {
6543 struct wm_softc *sc = ifp->if_softc;
6544 struct wm_txqueue *txq = &sc->sc_txq[0];
6545
6546 WM_TX_LOCK(txq);
6547 if (!sc->sc_stopping)
6548 wm_nq_start_locked(ifp);
6549 WM_TX_UNLOCK(txq);
6550 }
6551
6552 static void
6553 wm_nq_start_locked(struct ifnet *ifp)
6554 {
6555 struct wm_softc *sc = ifp->if_softc;
6556 struct wm_txqueue *txq = &sc->sc_txq[0];
6557 struct mbuf *m0;
6558 struct m_tag *mtag;
6559 struct wm_txsoft *txs;
6560 bus_dmamap_t dmamap;
6561 int error, nexttx, lasttx = -1, seg, segs_needed;
6562 bool do_csum, sent;
6563
6564 KASSERT(WM_TX_LOCKED(txq));
6565
6566 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
6567 return;
6568
6569 sent = false;
6570
6571 /*
6572 * Loop through the send queue, setting up transmit descriptors
6573 * until we drain the queue, or use up all available transmit
6574 * descriptors.
6575 */
6576 for (;;) {
6577 m0 = NULL;
6578
6579 /* Get a work queue entry. */
6580 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6581 wm_txeof(sc);
6582 if (txq->txq_sfree == 0) {
6583 DPRINTF(WM_DEBUG_TX,
6584 ("%s: TX: no free job descriptors\n",
6585 device_xname(sc->sc_dev)));
6586 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
6587 break;
6588 }
6589 }
6590
6591 /* Grab a packet off the queue. */
6592 IFQ_DEQUEUE(&ifp->if_snd, m0);
6593 if (m0 == NULL)
6594 break;
6595
6596 DPRINTF(WM_DEBUG_TX,
6597 ("%s: TX: have packet to transmit: %p\n",
6598 device_xname(sc->sc_dev), m0));
6599
6600 txs = &txq->txq_soft[txq->txq_snext];
6601 dmamap = txs->txs_dmamap;
6602
6603 /*
6604 * Load the DMA map. If this fails, the packet either
6605 * didn't fit in the allotted number of segments, or we
6606 * were short on resources. For the too-many-segments
6607 * case, we simply report an error and drop the packet,
6608 * since we can't sanely copy a jumbo packet to a single
6609 * buffer.
6610 */
6611 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6612 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
6613 if (error) {
6614 if (error == EFBIG) {
6615 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6616 log(LOG_ERR, "%s: Tx packet consumes too many "
6617 "DMA segments, dropping...\n",
6618 device_xname(sc->sc_dev));
6619 wm_dump_mbuf_chain(sc, m0);
6620 m_freem(m0);
6621 continue;
6622 }
6623 /* Short on resources, just stop for now. */
6624 DPRINTF(WM_DEBUG_TX,
6625 ("%s: TX: dmamap load failed: %d\n",
6626 device_xname(sc->sc_dev), error));
6627 break;
6628 }
6629
6630 segs_needed = dmamap->dm_nsegs;
6631
6632 /*
6633 * Ensure we have enough descriptors free to describe
6634 * the packet. Note, we always reserve one descriptor
6635 * at the end of the ring due to the semantics of the
6636 * TDT register, plus one more in the event we need
6637 * to load offload context.
6638 */
6639 if (segs_needed > txq->txq_free - 2) {
6640 /*
6641 * Not enough free descriptors to transmit this
6642 * packet. We haven't committed anything yet,
6643 * so just unload the DMA map, put the packet
6644 * pack on the queue, and punt. Notify the upper
6645 * layer that there are no more slots left.
6646 */
6647 DPRINTF(WM_DEBUG_TX,
6648 ("%s: TX: need %d (%d) descriptors, have %d\n",
6649 device_xname(sc->sc_dev), dmamap->dm_nsegs,
6650 segs_needed, txq->txq_free - 1));
6651 ifp->if_flags |= IFF_OACTIVE;
6652 bus_dmamap_unload(sc->sc_dmat, dmamap);
6653 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
6654 break;
6655 }
6656
6657 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6658
6659 DPRINTF(WM_DEBUG_TX,
6660 ("%s: TX: packet has %d (%d) DMA segments\n",
6661 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6662
6663 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
6664
6665 /*
6666 * Store a pointer to the packet so that we can free it
6667 * later.
6668 *
6669 * Initially, we consider the number of descriptors the
6670 * packet uses the number of DMA segments. This may be
6671 * incremented by 1 if we do checksum offload (a descriptor
6672 * is used to set the checksum context).
6673 */
6674 txs->txs_mbuf = m0;
6675 txs->txs_firstdesc = txq->txq_next;
6676 txs->txs_ndesc = segs_needed;
6677
6678 /* Set up offload parameters for this packet. */
6679 uint32_t cmdlen, fields, dcmdlen;
6680 if (m0->m_pkthdr.csum_flags &
6681 (M_CSUM_TSOv4|M_CSUM_TSOv6|
6682 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
6683 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
6684 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
6685 &do_csum) != 0) {
6686 /* Error message already displayed. */
6687 bus_dmamap_unload(sc->sc_dmat, dmamap);
6688 continue;
6689 }
6690 } else {
6691 do_csum = false;
6692 cmdlen = 0;
6693 fields = 0;
6694 }
6695
6696 /* Sync the DMA map. */
6697 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6698 BUS_DMASYNC_PREWRITE);
6699
6700 /* Initialize the first transmit descriptor. */
6701 nexttx = txq->txq_next;
6702 if (!do_csum) {
6703 /* setup a legacy descriptor */
6704 wm_set_dma_addr(
6705 &txq->txq_descs[nexttx].wtx_addr,
6706 dmamap->dm_segs[0].ds_addr);
6707 txq->txq_descs[nexttx].wtx_cmdlen =
6708 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
6709 txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
6710 txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
6711 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
6712 NULL) {
6713 txq->txq_descs[nexttx].wtx_cmdlen |=
6714 htole32(WTX_CMD_VLE);
6715 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
6716 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6717 } else {
6718 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
6719 }
6720 dcmdlen = 0;
6721 } else {
6722 /* setup an advanced data descriptor */
6723 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
6724 htole64(dmamap->dm_segs[0].ds_addr);
6725 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
6726 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
6727 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
6728 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
6729 htole32(fields);
6730 DPRINTF(WM_DEBUG_TX,
6731 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
6732 device_xname(sc->sc_dev), nexttx,
6733 (uint64_t)dmamap->dm_segs[0].ds_addr));
6734 DPRINTF(WM_DEBUG_TX,
6735 ("\t 0x%08x%08x\n", fields,
6736 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
6737 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
6738 }
6739
6740 lasttx = nexttx;
6741 nexttx = WM_NEXTTX(txq, nexttx);
6742 /*
6743 * fill in the next descriptors. legacy or adcanced format
6744 * is the same here
6745 */
6746 for (seg = 1; seg < dmamap->dm_nsegs;
6747 seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
6748 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
6749 htole64(dmamap->dm_segs[seg].ds_addr);
6750 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
6751 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
6752 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
6753 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
6754 lasttx = nexttx;
6755
6756 DPRINTF(WM_DEBUG_TX,
6757 ("%s: TX: desc %d: %#" PRIx64 ", "
6758 "len %#04zx\n",
6759 device_xname(sc->sc_dev), nexttx,
6760 (uint64_t)dmamap->dm_segs[seg].ds_addr,
6761 dmamap->dm_segs[seg].ds_len));
6762 }
6763
6764 KASSERT(lasttx != -1);
6765
6766 /*
6767 * Set up the command byte on the last descriptor of
6768 * the packet. If we're in the interrupt delay window,
6769 * delay the interrupt.
6770 */
6771 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
6772 (NQTX_CMD_EOP | NQTX_CMD_RS));
6773 txq->txq_descs[lasttx].wtx_cmdlen |=
6774 htole32(WTX_CMD_EOP | WTX_CMD_RS);
6775
6776 txs->txs_lastdesc = lasttx;
6777
6778 DPRINTF(WM_DEBUG_TX,
6779 ("%s: TX: desc %d: cmdlen 0x%08x\n",
6780 device_xname(sc->sc_dev),
6781 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
6782
6783 /* Sync the descriptors we're using. */
6784 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
6785 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
6786
6787 /* Give the packet to the chip. */
6788 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
6789 sent = true;
6790
6791 DPRINTF(WM_DEBUG_TX,
6792 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6793
6794 DPRINTF(WM_DEBUG_TX,
6795 ("%s: TX: finished transmitting packet, job %d\n",
6796 device_xname(sc->sc_dev), txq->txq_snext));
6797
6798 /* Advance the tx pointer. */
6799 txq->txq_free -= txs->txs_ndesc;
6800 txq->txq_next = nexttx;
6801
6802 txq->txq_sfree--;
6803 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
6804
6805 /* Pass the packet to any BPF listeners. */
6806 bpf_mtap(ifp, m0);
6807 }
6808
6809 if (m0 != NULL) {
6810 ifp->if_flags |= IFF_OACTIVE;
6811 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6812 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
6813 m_freem(m0);
6814 }
6815
6816 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
6817 /* No more slots; notify upper layer. */
6818 ifp->if_flags |= IFF_OACTIVE;
6819 }
6820
6821 if (sent) {
6822 /* Set a watchdog timer in case the chip flakes out. */
6823 ifp->if_timer = 5;
6824 }
6825 }
6826
6827 /* Interrupt */
6828
6829 /*
6830 * wm_txeof:
6831 *
6832 * Helper; handle transmit interrupts.
6833 */
6834 static int
6835 wm_txeof(struct wm_softc *sc)
6836 {
6837 struct wm_txqueue *txq = &sc->sc_txq[0];
6838 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6839 struct wm_txsoft *txs;
6840 bool processed = false;
6841 int count = 0;
6842 int i;
6843 uint8_t status;
6844
6845 if (sc->sc_stopping)
6846 return 0;
6847
6848 ifp->if_flags &= ~IFF_OACTIVE;
6849
6850 /*
6851 * Go through the Tx list and free mbufs for those
6852 * frames which have been transmitted.
6853 */
6854 for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
6855 i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
6856 txs = &txq->txq_soft[i];
6857
6858 DPRINTF(WM_DEBUG_TX,
6859 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
6860
6861 wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
6862 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
6863
6864 status =
6865 txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
6866 if ((status & WTX_ST_DD) == 0) {
6867 wm_cdtxsync(txq, txs->txs_lastdesc, 1,
6868 BUS_DMASYNC_PREREAD);
6869 break;
6870 }
6871
6872 processed = true;
6873 count++;
6874 DPRINTF(WM_DEBUG_TX,
6875 ("%s: TX: job %d done: descs %d..%d\n",
6876 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
6877 txs->txs_lastdesc));
6878
6879 /*
6880 * XXX We should probably be using the statistics
6881 * XXX registers, but I don't know if they exist
6882 * XXX on chips before the i82544.
6883 */
6884
6885 #ifdef WM_EVENT_COUNTERS
6886 if (status & WTX_ST_TU)
6887 WM_EVCNT_INCR(&sc->sc_ev_tu);
6888 #endif /* WM_EVENT_COUNTERS */
6889
6890 if (status & (WTX_ST_EC|WTX_ST_LC)) {
6891 ifp->if_oerrors++;
6892 if (status & WTX_ST_LC)
6893 log(LOG_WARNING, "%s: late collision\n",
6894 device_xname(sc->sc_dev));
6895 else if (status & WTX_ST_EC) {
6896 ifp->if_collisions += 16;
6897 log(LOG_WARNING, "%s: excessive collisions\n",
6898 device_xname(sc->sc_dev));
6899 }
6900 } else
6901 ifp->if_opackets++;
6902
6903 txq->txq_free += txs->txs_ndesc;
6904 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
6905 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
6906 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
6907 m_freem(txs->txs_mbuf);
6908 txs->txs_mbuf = NULL;
6909 }
6910
6911 /* Update the dirty transmit buffer pointer. */
6912 txq->txq_sdirty = i;
6913 DPRINTF(WM_DEBUG_TX,
6914 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
6915
6916 if (count != 0)
6917 rnd_add_uint32(&sc->rnd_source, count);
6918
6919 /*
6920 * If there are no more pending transmissions, cancel the watchdog
6921 * timer.
6922 */
6923 if (txq->txq_sfree == WM_TXQUEUELEN(txq))
6924 ifp->if_timer = 0;
6925
6926 return processed;
6927 }
6928
6929 /*
6930 * wm_rxeof:
6931 *
6932 * Helper; handle receive interrupts.
6933 */
6934 static void
6935 wm_rxeof(struct wm_rxqueue *rxq)
6936 {
6937 struct wm_softc *sc = rxq->rxq_sc;
6938 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6939 struct wm_rxsoft *rxs;
6940 struct mbuf *m;
6941 int i, len;
6942 int count = 0;
6943 uint8_t status, errors;
6944 uint16_t vlantag;
6945
6946 for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
6947 rxs = &rxq->rxq_soft[i];
6948
6949 DPRINTF(WM_DEBUG_RX,
6950 ("%s: RX: checking descriptor %d\n",
6951 device_xname(sc->sc_dev), i));
6952
6953 wm_cdrxsync(rxq, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
6954
6955 status = rxq->rxq_descs[i].wrx_status;
6956 errors = rxq->rxq_descs[i].wrx_errors;
6957 len = le16toh(rxq->rxq_descs[i].wrx_len);
6958 vlantag = rxq->rxq_descs[i].wrx_special;
6959
6960 if ((status & WRX_ST_DD) == 0) {
6961 /* We have processed all of the receive descriptors. */
6962 wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
6963 break;
6964 }
6965
6966 count++;
6967 if (__predict_false(rxq->rxq_discard)) {
6968 DPRINTF(WM_DEBUG_RX,
6969 ("%s: RX: discarding contents of descriptor %d\n",
6970 device_xname(sc->sc_dev), i));
6971 wm_init_rxdesc(rxq, i);
6972 if (status & WRX_ST_EOP) {
6973 /* Reset our state. */
6974 DPRINTF(WM_DEBUG_RX,
6975 ("%s: RX: resetting rxdiscard -> 0\n",
6976 device_xname(sc->sc_dev)));
6977 rxq->rxq_discard = 0;
6978 }
6979 continue;
6980 }
6981
6982 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
6983 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
6984
6985 m = rxs->rxs_mbuf;
6986
6987 /*
6988 * Add a new receive buffer to the ring, unless of
6989 * course the length is zero. Treat the latter as a
6990 * failed mapping.
6991 */
6992 if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
6993 /*
6994 * Failed, throw away what we've done so
6995 * far, and discard the rest of the packet.
6996 */
6997 ifp->if_ierrors++;
6998 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
6999 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
7000 wm_init_rxdesc(rxq, i);
7001 if ((status & WRX_ST_EOP) == 0)
7002 rxq->rxq_discard = 1;
7003 if (rxq->rxq_head != NULL)
7004 m_freem(rxq->rxq_head);
7005 WM_RXCHAIN_RESET(rxq);
7006 DPRINTF(WM_DEBUG_RX,
7007 ("%s: RX: Rx buffer allocation failed, "
7008 "dropping packet%s\n", device_xname(sc->sc_dev),
7009 rxq->rxq_discard ? " (discard)" : ""));
7010 continue;
7011 }
7012
7013 m->m_len = len;
7014 rxq->rxq_len += len;
7015 DPRINTF(WM_DEBUG_RX,
7016 ("%s: RX: buffer at %p len %d\n",
7017 device_xname(sc->sc_dev), m->m_data, len));
7018
7019 /* If this is not the end of the packet, keep looking. */
7020 if ((status & WRX_ST_EOP) == 0) {
7021 WM_RXCHAIN_LINK(rxq, m);
7022 DPRINTF(WM_DEBUG_RX,
7023 ("%s: RX: not yet EOP, rxlen -> %d\n",
7024 device_xname(sc->sc_dev), rxq->rxq_len));
7025 continue;
7026 }
7027
7028 /*
7029 * Okay, we have the entire packet now. The chip is
7030 * configured to include the FCS except I350 and I21[01]
7031 * (not all chips can be configured to strip it),
7032 * so we need to trim it.
7033 * May need to adjust length of previous mbuf in the
7034 * chain if the current mbuf is too short.
7035 * For an eratta, the RCTL_SECRC bit in RCTL register
7036 * is always set in I350, so we don't trim it.
7037 */
7038 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
7039 && (sc->sc_type != WM_T_I210)
7040 && (sc->sc_type != WM_T_I211)) {
7041 if (m->m_len < ETHER_CRC_LEN) {
7042 rxq->rxq_tail->m_len
7043 -= (ETHER_CRC_LEN - m->m_len);
7044 m->m_len = 0;
7045 } else
7046 m->m_len -= ETHER_CRC_LEN;
7047 len = rxq->rxq_len - ETHER_CRC_LEN;
7048 } else
7049 len = rxq->rxq_len;
7050
7051 WM_RXCHAIN_LINK(rxq, m);
7052
7053 *rxq->rxq_tailp = NULL;
7054 m = rxq->rxq_head;
7055
7056 WM_RXCHAIN_RESET(rxq);
7057
7058 DPRINTF(WM_DEBUG_RX,
7059 ("%s: RX: have entire packet, len -> %d\n",
7060 device_xname(sc->sc_dev), len));
7061
7062 /* If an error occurred, update stats and drop the packet. */
7063 if (errors &
7064 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
7065 if (errors & WRX_ER_SE)
7066 log(LOG_WARNING, "%s: symbol error\n",
7067 device_xname(sc->sc_dev));
7068 else if (errors & WRX_ER_SEQ)
7069 log(LOG_WARNING, "%s: receive sequence error\n",
7070 device_xname(sc->sc_dev));
7071 else if (errors & WRX_ER_CE)
7072 log(LOG_WARNING, "%s: CRC error\n",
7073 device_xname(sc->sc_dev));
7074 m_freem(m);
7075 continue;
7076 }
7077
7078 /* No errors. Receive the packet. */
7079 m->m_pkthdr.rcvif = ifp;
7080 m->m_pkthdr.len = len;
7081
7082 /*
7083 * If VLANs are enabled, VLAN packets have been unwrapped
7084 * for us. Associate the tag with the packet.
7085 */
7086 /* XXXX should check for i350 and i354 */
7087 if ((status & WRX_ST_VP) != 0) {
7088 VLAN_INPUT_TAG(ifp, m,
7089 le16toh(vlantag),
7090 continue);
7091 }
7092
7093 /* Set up checksum info for this packet. */
7094 if ((status & WRX_ST_IXSM) == 0) {
7095 if (status & WRX_ST_IPCS) {
7096 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
7097 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
7098 if (errors & WRX_ER_IPE)
7099 m->m_pkthdr.csum_flags |=
7100 M_CSUM_IPv4_BAD;
7101 }
7102 if (status & WRX_ST_TCPCS) {
7103 /*
7104 * Note: we don't know if this was TCP or UDP,
7105 * so we just set both bits, and expect the
7106 * upper layers to deal.
7107 */
7108 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
7109 m->m_pkthdr.csum_flags |=
7110 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7111 M_CSUM_TCPv6 | M_CSUM_UDPv6;
7112 if (errors & WRX_ER_TCPE)
7113 m->m_pkthdr.csum_flags |=
7114 M_CSUM_TCP_UDP_BAD;
7115 }
7116 }
7117
7118 ifp->if_ipackets++;
7119
7120 WM_RX_UNLOCK(rxq);
7121
7122 /* Pass this up to any BPF listeners. */
7123 bpf_mtap(ifp, m);
7124
7125 /* Pass it on. */
7126 (*ifp->if_input)(ifp, m);
7127
7128 WM_RX_LOCK(rxq);
7129
7130 if (sc->sc_stopping)
7131 break;
7132 }
7133
7134 /* Update the receive pointer. */
7135 rxq->rxq_ptr = i;
7136 if (count != 0)
7137 rnd_add_uint32(&sc->rnd_source, count);
7138
7139 DPRINTF(WM_DEBUG_RX,
7140 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
7141 }
7142
7143 /*
7144 * wm_linkintr_gmii:
7145 *
7146 * Helper; handle link interrupts for GMII.
7147 */
7148 static void
7149 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
7150 {
7151
7152 KASSERT(WM_CORE_LOCKED(sc));
7153
7154 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7155 __func__));
7156
7157 if (icr & ICR_LSC) {
7158 DPRINTF(WM_DEBUG_LINK,
7159 ("%s: LINK: LSC -> mii_pollstat\n",
7160 device_xname(sc->sc_dev)));
7161 mii_pollstat(&sc->sc_mii);
7162 if (sc->sc_type == WM_T_82543) {
7163 int miistatus, active;
7164
7165 /*
7166 * With 82543, we need to force speed and
7167 * duplex on the MAC equal to what the PHY
7168 * speed and duplex configuration is.
7169 */
7170 miistatus = sc->sc_mii.mii_media_status;
7171
7172 if (miistatus & IFM_ACTIVE) {
7173 active = sc->sc_mii.mii_media_active;
7174 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
7175 switch (IFM_SUBTYPE(active)) {
7176 case IFM_10_T:
7177 sc->sc_ctrl |= CTRL_SPEED_10;
7178 break;
7179 case IFM_100_TX:
7180 sc->sc_ctrl |= CTRL_SPEED_100;
7181 break;
7182 case IFM_1000_T:
7183 sc->sc_ctrl |= CTRL_SPEED_1000;
7184 break;
7185 default:
7186 /*
7187 * fiber?
7188 * Shoud not enter here.
7189 */
7190 printf("unknown media (%x)\n",
7191 active);
7192 break;
7193 }
7194 if (active & IFM_FDX)
7195 sc->sc_ctrl |= CTRL_FD;
7196 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7197 }
7198 } else if ((sc->sc_type == WM_T_ICH8)
7199 && (sc->sc_phytype == WMPHY_IGP_3)) {
7200 wm_kmrn_lock_loss_workaround_ich8lan(sc);
7201 } else if (sc->sc_type == WM_T_PCH) {
7202 wm_k1_gig_workaround_hv(sc,
7203 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
7204 }
7205
7206 if ((sc->sc_phytype == WMPHY_82578)
7207 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
7208 == IFM_1000_T)) {
7209
7210 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
7211 delay(200*1000); /* XXX too big */
7212
7213 /* Link stall fix for link up */
7214 wm_gmii_hv_writereg(sc->sc_dev, 1,
7215 HV_MUX_DATA_CTRL,
7216 HV_MUX_DATA_CTRL_GEN_TO_MAC
7217 | HV_MUX_DATA_CTRL_FORCE_SPEED);
7218 wm_gmii_hv_writereg(sc->sc_dev, 1,
7219 HV_MUX_DATA_CTRL,
7220 HV_MUX_DATA_CTRL_GEN_TO_MAC);
7221 }
7222 }
7223 } else if (icr & ICR_RXSEQ) {
7224 DPRINTF(WM_DEBUG_LINK,
7225 ("%s: LINK Receive sequence error\n",
7226 device_xname(sc->sc_dev)));
7227 }
7228 }
7229
7230 /*
7231 * wm_linkintr_tbi:
7232 *
7233 * Helper; handle link interrupts for TBI mode.
7234 */
7235 static void
7236 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
7237 {
7238 uint32_t status;
7239
7240 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7241 __func__));
7242
7243 status = CSR_READ(sc, WMREG_STATUS);
7244 if (icr & ICR_LSC) {
7245 if (status & STATUS_LU) {
7246 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
7247 device_xname(sc->sc_dev),
7248 (status & STATUS_FD) ? "FDX" : "HDX"));
7249 /*
7250 * NOTE: CTRL will update TFCE and RFCE automatically,
7251 * so we should update sc->sc_ctrl
7252 */
7253
7254 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7255 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7256 sc->sc_fcrtl &= ~FCRTL_XONE;
7257 if (status & STATUS_FD)
7258 sc->sc_tctl |=
7259 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7260 else
7261 sc->sc_tctl |=
7262 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7263 if (sc->sc_ctrl & CTRL_TFCE)
7264 sc->sc_fcrtl |= FCRTL_XONE;
7265 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7266 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7267 WMREG_OLD_FCRTL : WMREG_FCRTL,
7268 sc->sc_fcrtl);
7269 sc->sc_tbi_linkup = 1;
7270 } else {
7271 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
7272 device_xname(sc->sc_dev)));
7273 sc->sc_tbi_linkup = 0;
7274 }
7275 /* Update LED */
7276 wm_tbi_serdes_set_linkled(sc);
7277 } else if (icr & ICR_RXSEQ) {
7278 DPRINTF(WM_DEBUG_LINK,
7279 ("%s: LINK: Receive sequence error\n",
7280 device_xname(sc->sc_dev)));
7281 }
7282 }
7283
7284 /*
7285 * wm_linkintr_serdes:
7286 *
7287 * Helper; handle link interrupts for TBI mode.
7288 */
7289 static void
7290 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
7291 {
7292 struct mii_data *mii = &sc->sc_mii;
7293 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7294 uint32_t pcs_adv, pcs_lpab, reg;
7295
7296 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7297 __func__));
7298
7299 if (icr & ICR_LSC) {
7300 /* Check PCS */
7301 reg = CSR_READ(sc, WMREG_PCS_LSTS);
7302 if ((reg & PCS_LSTS_LINKOK) != 0) {
7303 mii->mii_media_status |= IFM_ACTIVE;
7304 sc->sc_tbi_linkup = 1;
7305 } else {
7306 mii->mii_media_status |= IFM_NONE;
7307 sc->sc_tbi_linkup = 0;
7308 wm_tbi_serdes_set_linkled(sc);
7309 return;
7310 }
7311 mii->mii_media_active |= IFM_1000_SX;
7312 if ((reg & PCS_LSTS_FDX) != 0)
7313 mii->mii_media_active |= IFM_FDX;
7314 else
7315 mii->mii_media_active |= IFM_HDX;
7316 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7317 /* Check flow */
7318 reg = CSR_READ(sc, WMREG_PCS_LSTS);
7319 if ((reg & PCS_LSTS_AN_COMP) == 0) {
7320 DPRINTF(WM_DEBUG_LINK,
7321 ("XXX LINKOK but not ACOMP\n"));
7322 return;
7323 }
7324 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
7325 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
7326 DPRINTF(WM_DEBUG_LINK,
7327 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
7328 if ((pcs_adv & TXCW_SYM_PAUSE)
7329 && (pcs_lpab & TXCW_SYM_PAUSE)) {
7330 mii->mii_media_active |= IFM_FLOW
7331 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
7332 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
7333 && (pcs_adv & TXCW_ASYM_PAUSE)
7334 && (pcs_lpab & TXCW_SYM_PAUSE)
7335 && (pcs_lpab & TXCW_ASYM_PAUSE))
7336 mii->mii_media_active |= IFM_FLOW
7337 | IFM_ETH_TXPAUSE;
7338 else if ((pcs_adv & TXCW_SYM_PAUSE)
7339 && (pcs_adv & TXCW_ASYM_PAUSE)
7340 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
7341 && (pcs_lpab & TXCW_ASYM_PAUSE))
7342 mii->mii_media_active |= IFM_FLOW
7343 | IFM_ETH_RXPAUSE;
7344 }
7345 /* Update LED */
7346 wm_tbi_serdes_set_linkled(sc);
7347 } else {
7348 DPRINTF(WM_DEBUG_LINK,
7349 ("%s: LINK: Receive sequence error\n",
7350 device_xname(sc->sc_dev)));
7351 }
7352 }
7353
7354 /*
7355 * wm_linkintr:
7356 *
7357 * Helper; handle link interrupts.
7358 */
7359 static void
7360 wm_linkintr(struct wm_softc *sc, uint32_t icr)
7361 {
7362
7363 KASSERT(WM_CORE_LOCKED(sc));
7364
7365 if (sc->sc_flags & WM_F_HAS_MII)
7366 wm_linkintr_gmii(sc, icr);
7367 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7368 && (sc->sc_type >= WM_T_82575))
7369 wm_linkintr_serdes(sc, icr);
7370 else
7371 wm_linkintr_tbi(sc, icr);
7372 }
7373
7374 /*
7375 * wm_intr_legacy:
7376 *
7377 * Interrupt service routine for INTx and MSI.
7378 */
7379 static int
7380 wm_intr_legacy(void *arg)
7381 {
7382 struct wm_softc *sc = arg;
7383 struct wm_txqueue *txq = &sc->sc_txq[0];
7384 struct wm_rxqueue *rxq = &sc->sc_rxq[0];
7385 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7386 uint32_t icr, rndval = 0;
7387 int handled = 0;
7388
7389 DPRINTF(WM_DEBUG_TX,
7390 ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
7391 while (1 /* CONSTCOND */) {
7392 icr = CSR_READ(sc, WMREG_ICR);
7393 if ((icr & sc->sc_icr) == 0)
7394 break;
7395 if (rndval == 0)
7396 rndval = icr;
7397
7398 WM_RX_LOCK(rxq);
7399
7400 if (sc->sc_stopping) {
7401 WM_RX_UNLOCK(rxq);
7402 break;
7403 }
7404
7405 handled = 1;
7406
7407 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7408 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
7409 DPRINTF(WM_DEBUG_RX,
7410 ("%s: RX: got Rx intr 0x%08x\n",
7411 device_xname(sc->sc_dev),
7412 icr & (ICR_RXDMT0|ICR_RXT0)));
7413 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
7414 }
7415 #endif
7416 wm_rxeof(rxq);
7417
7418 WM_RX_UNLOCK(rxq);
7419 WM_TX_LOCK(txq);
7420
7421 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7422 if (icr & ICR_TXDW) {
7423 DPRINTF(WM_DEBUG_TX,
7424 ("%s: TX: got TXDW interrupt\n",
7425 device_xname(sc->sc_dev)));
7426 WM_EVCNT_INCR(&sc->sc_ev_txdw);
7427 }
7428 #endif
7429 wm_txeof(sc);
7430
7431 WM_TX_UNLOCK(txq);
7432 WM_CORE_LOCK(sc);
7433
7434 if (icr & (ICR_LSC|ICR_RXSEQ)) {
7435 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7436 wm_linkintr(sc, icr);
7437 }
7438
7439 WM_CORE_UNLOCK(sc);
7440
7441 if (icr & ICR_RXO) {
7442 #if defined(WM_DEBUG)
7443 log(LOG_WARNING, "%s: Receive overrun\n",
7444 device_xname(sc->sc_dev));
7445 #endif /* defined(WM_DEBUG) */
7446 }
7447 }
7448
7449 rnd_add_uint32(&sc->rnd_source, rndval);
7450
7451 if (handled) {
7452 /* Try to get more packets going. */
7453 ifp->if_start(ifp);
7454 }
7455
7456 return handled;
7457 }
7458
7459 #ifdef WM_MSI_MSIX
7460 /*
7461 * wm_txintr_msix:
7462 *
7463 * Interrupt service routine for TX complete interrupt for MSI-X.
7464 */
7465 static int
7466 wm_txintr_msix(void *arg)
7467 {
7468 struct wm_txqueue *txq = arg;
7469 struct wm_softc *sc = txq->txq_sc;
7470 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7471 int handled = 0;
7472
7473 DPRINTF(WM_DEBUG_TX,
7474 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
7475
7476 if (sc->sc_type == WM_T_82574)
7477 CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(txq->txq_id)); /* 82574 only */
7478 else if (sc->sc_type == WM_T_82575)
7479 CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(txq->txq_id));
7480 else
7481 CSR_WRITE(sc, WMREG_EIMC, 1 << txq->txq_intr_idx);
7482
7483 WM_TX_LOCK(txq);
7484
7485 if (sc->sc_stopping)
7486 goto out;
7487
7488 WM_EVCNT_INCR(&sc->sc_ev_txdw);
7489 handled = wm_txeof(sc);
7490
7491 out:
7492 WM_TX_UNLOCK(txq);
7493
7494 if (sc->sc_type == WM_T_82574)
7495 CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(txq->txq_id)); /* 82574 only */
7496 else if (sc->sc_type == WM_T_82575)
7497 CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(txq->txq_id));
7498 else
7499 CSR_WRITE(sc, WMREG_EIMS, 1 << txq->txq_intr_idx);
7500
7501 if (handled) {
7502 /* Try to get more packets going. */
7503 ifp->if_start(ifp);
7504 }
7505
7506 return handled;
7507 }
7508
7509 /*
7510 * wm_rxintr_msix:
7511 *
7512 * Interrupt service routine for RX interrupt for MSI-X.
7513 */
7514 static int
7515 wm_rxintr_msix(void *arg)
7516 {
7517 struct wm_rxqueue *rxq = arg;
7518 struct wm_softc *sc = rxq->rxq_sc;
7519
7520 DPRINTF(WM_DEBUG_RX,
7521 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
7522
7523 if (sc->sc_type == WM_T_82574)
7524 CSR_WRITE(sc, WMREG_IMC, ICR_RXQ(rxq->rxq_id)); /* 82574 only */
7525 else if (sc->sc_type == WM_T_82575)
7526 CSR_WRITE(sc, WMREG_EIMC, EITR_RX_QUEUE(rxq->rxq_id));
7527 else
7528 CSR_WRITE(sc, WMREG_EIMC, 1 << rxq->rxq_intr_idx);
7529
7530 WM_RX_LOCK(rxq);
7531
7532 if (sc->sc_stopping)
7533 goto out;
7534
7535 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
7536 wm_rxeof(rxq);
7537
7538 out:
7539 WM_RX_UNLOCK(rxq);
7540
7541 if (sc->sc_type == WM_T_82574)
7542 CSR_WRITE(sc, WMREG_IMS, ICR_RXQ(rxq->rxq_id));
7543 else if (sc->sc_type == WM_T_82575)
7544 CSR_WRITE(sc, WMREG_EIMS, EITR_RX_QUEUE(rxq->rxq_id));
7545 else
7546 CSR_WRITE(sc, WMREG_EIMS, 1 << rxq->rxq_intr_idx);
7547
7548 return 1;
7549 }
7550
7551 /*
7552 * wm_linkintr_msix:
7553 *
7554 * Interrupt service routine for link status change for MSI-X.
7555 */
7556 static int
7557 wm_linkintr_msix(void *arg)
7558 {
7559 struct wm_softc *sc = arg;
7560 uint32_t reg;
7561
7562 DPRINTF(WM_DEBUG_LINK,
7563 ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
7564
7565 reg = CSR_READ(sc, WMREG_ICR);
7566 WM_CORE_LOCK(sc);
7567 if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0))
7568 goto out;
7569
7570 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7571 wm_linkintr(sc, ICR_LSC);
7572
7573 out:
7574 WM_CORE_UNLOCK(sc);
7575
7576 if (sc->sc_type == WM_T_82574)
7577 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC); /* 82574 only */
7578 else if (sc->sc_type == WM_T_82575)
7579 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
7580 else
7581 CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
7582
7583 return 1;
7584 }
7585 #endif /* WM_MSI_MSIX */
7586
7587 /*
7588 * Media related.
7589 * GMII, SGMII, TBI (and SERDES)
7590 */
7591
7592 /* Common */
7593
7594 /*
7595 * wm_tbi_serdes_set_linkled:
7596 *
7597 * Update the link LED on TBI and SERDES devices.
7598 */
7599 static void
7600 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
7601 {
7602
7603 if (sc->sc_tbi_linkup)
7604 sc->sc_ctrl |= CTRL_SWDPIN(0);
7605 else
7606 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7607
7608 /* 82540 or newer devices are active low */
7609 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7610
7611 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7612 }
7613
7614 /* GMII related */
7615
7616 /*
7617 * wm_gmii_reset:
7618 *
7619 * Reset the PHY.
7620 */
7621 static void
7622 wm_gmii_reset(struct wm_softc *sc)
7623 {
7624 uint32_t reg;
7625 int rv;
7626
7627 /* get phy semaphore */
7628 switch (sc->sc_type) {
7629 case WM_T_82571:
7630 case WM_T_82572:
7631 case WM_T_82573:
7632 case WM_T_82574:
7633 case WM_T_82583:
7634 /* XXX should get sw semaphore, too */
7635 rv = wm_get_swsm_semaphore(sc);
7636 break;
7637 case WM_T_82575:
7638 case WM_T_82576:
7639 case WM_T_82580:
7640 case WM_T_I350:
7641 case WM_T_I354:
7642 case WM_T_I210:
7643 case WM_T_I211:
7644 case WM_T_80003:
7645 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7646 break;
7647 case WM_T_ICH8:
7648 case WM_T_ICH9:
7649 case WM_T_ICH10:
7650 case WM_T_PCH:
7651 case WM_T_PCH2:
7652 case WM_T_PCH_LPT:
7653 rv = wm_get_swfwhw_semaphore(sc);
7654 break;
7655 default:
7656 /* nothing to do*/
7657 rv = 0;
7658 break;
7659 }
7660 if (rv != 0) {
7661 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7662 __func__);
7663 return;
7664 }
7665
7666 switch (sc->sc_type) {
7667 case WM_T_82542_2_0:
7668 case WM_T_82542_2_1:
7669 /* null */
7670 break;
7671 case WM_T_82543:
7672 /*
7673 * With 82543, we need to force speed and duplex on the MAC
7674 * equal to what the PHY speed and duplex configuration is.
7675 * In addition, we need to perform a hardware reset on the PHY
7676 * to take it out of reset.
7677 */
7678 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
7679 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7680
7681 /* The PHY reset pin is active-low. */
7682 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7683 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
7684 CTRL_EXT_SWDPIN(4));
7685 reg |= CTRL_EXT_SWDPIO(4);
7686
7687 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7688 CSR_WRITE_FLUSH(sc);
7689 delay(10*1000);
7690
7691 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
7692 CSR_WRITE_FLUSH(sc);
7693 delay(150);
7694 #if 0
7695 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
7696 #endif
7697 delay(20*1000); /* XXX extra delay to get PHY ID? */
7698 break;
7699 case WM_T_82544: /* reset 10000us */
7700 case WM_T_82540:
7701 case WM_T_82545:
7702 case WM_T_82545_3:
7703 case WM_T_82546:
7704 case WM_T_82546_3:
7705 case WM_T_82541:
7706 case WM_T_82541_2:
7707 case WM_T_82547:
7708 case WM_T_82547_2:
7709 case WM_T_82571: /* reset 100us */
7710 case WM_T_82572:
7711 case WM_T_82573:
7712 case WM_T_82574:
7713 case WM_T_82575:
7714 case WM_T_82576:
7715 case WM_T_82580:
7716 case WM_T_I350:
7717 case WM_T_I354:
7718 case WM_T_I210:
7719 case WM_T_I211:
7720 case WM_T_82583:
7721 case WM_T_80003:
7722 /* generic reset */
7723 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7724 CSR_WRITE_FLUSH(sc);
7725 delay(20000);
7726 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7727 CSR_WRITE_FLUSH(sc);
7728 delay(20000);
7729
7730 if ((sc->sc_type == WM_T_82541)
7731 || (sc->sc_type == WM_T_82541_2)
7732 || (sc->sc_type == WM_T_82547)
7733 || (sc->sc_type == WM_T_82547_2)) {
7734 /* workaround for igp are done in igp_reset() */
7735 /* XXX add code to set LED after phy reset */
7736 }
7737 break;
7738 case WM_T_ICH8:
7739 case WM_T_ICH9:
7740 case WM_T_ICH10:
7741 case WM_T_PCH:
7742 case WM_T_PCH2:
7743 case WM_T_PCH_LPT:
7744 /* generic reset */
7745 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7746 CSR_WRITE_FLUSH(sc);
7747 delay(100);
7748 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7749 CSR_WRITE_FLUSH(sc);
7750 delay(150);
7751 break;
7752 default:
7753 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
7754 __func__);
7755 break;
7756 }
7757
7758 /* release PHY semaphore */
7759 switch (sc->sc_type) {
7760 case WM_T_82571:
7761 case WM_T_82572:
7762 case WM_T_82573:
7763 case WM_T_82574:
7764 case WM_T_82583:
7765 /* XXX should put sw semaphore, too */
7766 wm_put_swsm_semaphore(sc);
7767 break;
7768 case WM_T_82575:
7769 case WM_T_82576:
7770 case WM_T_82580:
7771 case WM_T_I350:
7772 case WM_T_I354:
7773 case WM_T_I210:
7774 case WM_T_I211:
7775 case WM_T_80003:
7776 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7777 break;
7778 case WM_T_ICH8:
7779 case WM_T_ICH9:
7780 case WM_T_ICH10:
7781 case WM_T_PCH:
7782 case WM_T_PCH2:
7783 case WM_T_PCH_LPT:
7784 wm_put_swfwhw_semaphore(sc);
7785 break;
7786 default:
7787 /* nothing to do*/
7788 rv = 0;
7789 break;
7790 }
7791
7792 /* get_cfg_done */
7793 wm_get_cfg_done(sc);
7794
7795 /* extra setup */
7796 switch (sc->sc_type) {
7797 case WM_T_82542_2_0:
7798 case WM_T_82542_2_1:
7799 case WM_T_82543:
7800 case WM_T_82544:
7801 case WM_T_82540:
7802 case WM_T_82545:
7803 case WM_T_82545_3:
7804 case WM_T_82546:
7805 case WM_T_82546_3:
7806 case WM_T_82541_2:
7807 case WM_T_82547_2:
7808 case WM_T_82571:
7809 case WM_T_82572:
7810 case WM_T_82573:
7811 case WM_T_82574:
7812 case WM_T_82575:
7813 case WM_T_82576:
7814 case WM_T_82580:
7815 case WM_T_I350:
7816 case WM_T_I354:
7817 case WM_T_I210:
7818 case WM_T_I211:
7819 case WM_T_82583:
7820 case WM_T_80003:
7821 /* null */
7822 break;
7823 case WM_T_82541:
7824 case WM_T_82547:
7825 /* XXX Configure actively LED after PHY reset */
7826 break;
7827 case WM_T_ICH8:
7828 case WM_T_ICH9:
7829 case WM_T_ICH10:
7830 case WM_T_PCH:
7831 case WM_T_PCH2:
7832 case WM_T_PCH_LPT:
7833 /* Allow time for h/w to get to a quiescent state afer reset */
7834 delay(10*1000);
7835
7836 if (sc->sc_type == WM_T_PCH)
7837 wm_hv_phy_workaround_ich8lan(sc);
7838
7839 if (sc->sc_type == WM_T_PCH2)
7840 wm_lv_phy_workaround_ich8lan(sc);
7841
7842 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
7843 /*
7844 * dummy read to clear the phy wakeup bit after lcd
7845 * reset
7846 */
7847 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
7848 }
7849
7850 /*
7851 * XXX Configure the LCD with th extended configuration region
7852 * in NVM
7853 */
7854
7855 /* Configure the LCD with the OEM bits in NVM */
7856 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
7857 || (sc->sc_type == WM_T_PCH_LPT)) {
7858 /*
7859 * Disable LPLU.
7860 * XXX It seems that 82567 has LPLU, too.
7861 */
7862 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
7863 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
7864 reg |= HV_OEM_BITS_ANEGNOW;
7865 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
7866 }
7867 break;
7868 default:
7869 panic("%s: unknown type\n", __func__);
7870 break;
7871 }
7872 }
7873
7874 /*
7875 * wm_get_phy_id_82575:
7876 *
7877 * Return PHY ID. Return -1 if it failed.
7878 */
7879 static int
7880 wm_get_phy_id_82575(struct wm_softc *sc)
7881 {
7882 uint32_t reg;
7883 int phyid = -1;
7884
7885 /* XXX */
7886 if ((sc->sc_flags & WM_F_SGMII) == 0)
7887 return -1;
7888
7889 if (wm_sgmii_uses_mdio(sc)) {
7890 switch (sc->sc_type) {
7891 case WM_T_82575:
7892 case WM_T_82576:
7893 reg = CSR_READ(sc, WMREG_MDIC);
7894 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
7895 break;
7896 case WM_T_82580:
7897 case WM_T_I350:
7898 case WM_T_I354:
7899 case WM_T_I210:
7900 case WM_T_I211:
7901 reg = CSR_READ(sc, WMREG_MDICNFG);
7902 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
7903 break;
7904 default:
7905 return -1;
7906 }
7907 }
7908
7909 return phyid;
7910 }
7911
7912
7913 /*
7914 * wm_gmii_mediainit:
7915 *
7916 * Initialize media for use on 1000BASE-T devices.
7917 */
7918 static void
7919 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
7920 {
7921 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7922 struct mii_data *mii = &sc->sc_mii;
7923 uint32_t reg;
7924
7925 /* We have GMII. */
7926 sc->sc_flags |= WM_F_HAS_MII;
7927
7928 if (sc->sc_type == WM_T_80003)
7929 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7930 else
7931 sc->sc_tipg = TIPG_1000T_DFLT;
7932
7933 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
7934 if ((sc->sc_type == WM_T_82580)
7935 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
7936 || (sc->sc_type == WM_T_I211)) {
7937 reg = CSR_READ(sc, WMREG_PHPM);
7938 reg &= ~PHPM_GO_LINK_D;
7939 CSR_WRITE(sc, WMREG_PHPM, reg);
7940 }
7941
7942 /*
7943 * Let the chip set speed/duplex on its own based on
7944 * signals from the PHY.
7945 * XXXbouyer - I'm not sure this is right for the 80003,
7946 * the em driver only sets CTRL_SLU here - but it seems to work.
7947 */
7948 sc->sc_ctrl |= CTRL_SLU;
7949 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7950
7951 /* Initialize our media structures and probe the GMII. */
7952 mii->mii_ifp = ifp;
7953
7954 /*
7955 * Determine the PHY access method.
7956 *
7957 * For SGMII, use SGMII specific method.
7958 *
7959 * For some devices, we can determine the PHY access method
7960 * from sc_type.
7961 *
7962 * For ICH and PCH variants, it's difficult to determine the PHY
7963 * access method by sc_type, so use the PCI product ID for some
7964 * devices.
7965 * For other ICH8 variants, try to use igp's method. If the PHY
7966 * can't detect, then use bm's method.
7967 */
7968 switch (prodid) {
7969 case PCI_PRODUCT_INTEL_PCH_M_LM:
7970 case PCI_PRODUCT_INTEL_PCH_M_LC:
7971 /* 82577 */
7972 sc->sc_phytype = WMPHY_82577;
7973 break;
7974 case PCI_PRODUCT_INTEL_PCH_D_DM:
7975 case PCI_PRODUCT_INTEL_PCH_D_DC:
7976 /* 82578 */
7977 sc->sc_phytype = WMPHY_82578;
7978 break;
7979 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
7980 case PCI_PRODUCT_INTEL_PCH2_LV_V:
7981 /* 82579 */
7982 sc->sc_phytype = WMPHY_82579;
7983 break;
7984 case PCI_PRODUCT_INTEL_82801I_BM:
7985 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
7986 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
7987 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
7988 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
7989 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
7990 /* 82567 */
7991 sc->sc_phytype = WMPHY_BM;
7992 mii->mii_readreg = wm_gmii_bm_readreg;
7993 mii->mii_writereg = wm_gmii_bm_writereg;
7994 break;
7995 default:
7996 if (((sc->sc_flags & WM_F_SGMII) != 0)
7997 && !wm_sgmii_uses_mdio(sc)){
7998 /* SGMII */
7999 mii->mii_readreg = wm_sgmii_readreg;
8000 mii->mii_writereg = wm_sgmii_writereg;
8001 } else if (sc->sc_type >= WM_T_80003) {
8002 /* 80003 */
8003 mii->mii_readreg = wm_gmii_i80003_readreg;
8004 mii->mii_writereg = wm_gmii_i80003_writereg;
8005 } else if (sc->sc_type >= WM_T_I210) {
8006 /* I210 and I211 */
8007 mii->mii_readreg = wm_gmii_gs40g_readreg;
8008 mii->mii_writereg = wm_gmii_gs40g_writereg;
8009 } else if (sc->sc_type >= WM_T_82580) {
8010 /* 82580, I350 and I354 */
8011 sc->sc_phytype = WMPHY_82580;
8012 mii->mii_readreg = wm_gmii_82580_readreg;
8013 mii->mii_writereg = wm_gmii_82580_writereg;
8014 } else if (sc->sc_type >= WM_T_82544) {
8015 /* 82544, 0, [56], [17], 8257[1234] and 82583 */
8016 mii->mii_readreg = wm_gmii_i82544_readreg;
8017 mii->mii_writereg = wm_gmii_i82544_writereg;
8018 } else {
8019 mii->mii_readreg = wm_gmii_i82543_readreg;
8020 mii->mii_writereg = wm_gmii_i82543_writereg;
8021 }
8022 break;
8023 }
8024 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) {
8025 /* All PCH* use _hv_ */
8026 mii->mii_readreg = wm_gmii_hv_readreg;
8027 mii->mii_writereg = wm_gmii_hv_writereg;
8028 }
8029 mii->mii_statchg = wm_gmii_statchg;
8030
8031 wm_gmii_reset(sc);
8032
8033 sc->sc_ethercom.ec_mii = &sc->sc_mii;
8034 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
8035 wm_gmii_mediastatus);
8036
8037 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
8038 || (sc->sc_type == WM_T_82580)
8039 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
8040 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
8041 if ((sc->sc_flags & WM_F_SGMII) == 0) {
8042 /* Attach only one port */
8043 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
8044 MII_OFFSET_ANY, MIIF_DOPAUSE);
8045 } else {
8046 int i, id;
8047 uint32_t ctrl_ext;
8048
8049 id = wm_get_phy_id_82575(sc);
8050 if (id != -1) {
8051 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
8052 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
8053 }
8054 if ((id == -1)
8055 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
8056 /* Power on sgmii phy if it is disabled */
8057 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8058 CSR_WRITE(sc, WMREG_CTRL_EXT,
8059 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
8060 CSR_WRITE_FLUSH(sc);
8061 delay(300*1000); /* XXX too long */
8062
8063 /* from 1 to 8 */
8064 for (i = 1; i < 8; i++)
8065 mii_attach(sc->sc_dev, &sc->sc_mii,
8066 0xffffffff, i, MII_OFFSET_ANY,
8067 MIIF_DOPAUSE);
8068
8069 /* restore previous sfp cage power state */
8070 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8071 }
8072 }
8073 } else {
8074 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8075 MII_OFFSET_ANY, MIIF_DOPAUSE);
8076 }
8077
8078 /*
8079 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
8080 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
8081 */
8082 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
8083 (LIST_FIRST(&mii->mii_phys) == NULL)) {
8084 wm_set_mdio_slow_mode_hv(sc);
8085 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8086 MII_OFFSET_ANY, MIIF_DOPAUSE);
8087 }
8088
8089 /*
8090 * (For ICH8 variants)
8091 * If PHY detection failed, use BM's r/w function and retry.
8092 */
8093 if (LIST_FIRST(&mii->mii_phys) == NULL) {
8094 /* if failed, retry with *_bm_* */
8095 mii->mii_readreg = wm_gmii_bm_readreg;
8096 mii->mii_writereg = wm_gmii_bm_writereg;
8097
8098 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8099 MII_OFFSET_ANY, MIIF_DOPAUSE);
8100 }
8101
8102 if (LIST_FIRST(&mii->mii_phys) == NULL) {
8103 /* Any PHY wasn't find */
8104 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
8105 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
8106 sc->sc_phytype = WMPHY_NONE;
8107 } else {
8108 /*
8109 * PHY Found!
8110 * Check PHY type.
8111 */
8112 uint32_t model;
8113 struct mii_softc *child;
8114
8115 child = LIST_FIRST(&mii->mii_phys);
8116 if (device_is_a(child->mii_dev, "igphy")) {
8117 struct igphy_softc *isc = (struct igphy_softc *)child;
8118
8119 model = isc->sc_mii.mii_mpd_model;
8120 if (model == MII_MODEL_yyINTEL_I82566)
8121 sc->sc_phytype = WMPHY_IGP_3;
8122 }
8123
8124 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
8125 }
8126 }
8127
8128 /*
8129 * wm_gmii_mediachange: [ifmedia interface function]
8130 *
8131 * Set hardware to newly-selected media on a 1000BASE-T device.
8132 */
8133 static int
8134 wm_gmii_mediachange(struct ifnet *ifp)
8135 {
8136 struct wm_softc *sc = ifp->if_softc;
8137 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8138 int rc;
8139
8140 if ((ifp->if_flags & IFF_UP) == 0)
8141 return 0;
8142
8143 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
8144 sc->sc_ctrl |= CTRL_SLU;
8145 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8146 || (sc->sc_type > WM_T_82543)) {
8147 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
8148 } else {
8149 sc->sc_ctrl &= ~CTRL_ASDE;
8150 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
8151 if (ife->ifm_media & IFM_FDX)
8152 sc->sc_ctrl |= CTRL_FD;
8153 switch (IFM_SUBTYPE(ife->ifm_media)) {
8154 case IFM_10_T:
8155 sc->sc_ctrl |= CTRL_SPEED_10;
8156 break;
8157 case IFM_100_TX:
8158 sc->sc_ctrl |= CTRL_SPEED_100;
8159 break;
8160 case IFM_1000_T:
8161 sc->sc_ctrl |= CTRL_SPEED_1000;
8162 break;
8163 default:
8164 panic("wm_gmii_mediachange: bad media 0x%x",
8165 ife->ifm_media);
8166 }
8167 }
8168 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8169 if (sc->sc_type <= WM_T_82543)
8170 wm_gmii_reset(sc);
8171
8172 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
8173 return 0;
8174 return rc;
8175 }
8176
8177 /*
8178 * wm_gmii_mediastatus: [ifmedia interface function]
8179 *
8180 * Get the current interface media status on a 1000BASE-T device.
8181 */
8182 static void
8183 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8184 {
8185 struct wm_softc *sc = ifp->if_softc;
8186
8187 ether_mediastatus(ifp, ifmr);
8188 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
8189 | sc->sc_flowflags;
8190 }
8191
8192 #define MDI_IO CTRL_SWDPIN(2)
8193 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
8194 #define MDI_CLK CTRL_SWDPIN(3)
8195
8196 static void
8197 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
8198 {
8199 uint32_t i, v;
8200
8201 v = CSR_READ(sc, WMREG_CTRL);
8202 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8203 v |= MDI_DIR | CTRL_SWDPIO(3);
8204
8205 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
8206 if (data & i)
8207 v |= MDI_IO;
8208 else
8209 v &= ~MDI_IO;
8210 CSR_WRITE(sc, WMREG_CTRL, v);
8211 CSR_WRITE_FLUSH(sc);
8212 delay(10);
8213 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8214 CSR_WRITE_FLUSH(sc);
8215 delay(10);
8216 CSR_WRITE(sc, WMREG_CTRL, v);
8217 CSR_WRITE_FLUSH(sc);
8218 delay(10);
8219 }
8220 }
8221
8222 static uint32_t
8223 wm_i82543_mii_recvbits(struct wm_softc *sc)
8224 {
8225 uint32_t v, i, data = 0;
8226
8227 v = CSR_READ(sc, WMREG_CTRL);
8228 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8229 v |= CTRL_SWDPIO(3);
8230
8231 CSR_WRITE(sc, WMREG_CTRL, v);
8232 CSR_WRITE_FLUSH(sc);
8233 delay(10);
8234 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8235 CSR_WRITE_FLUSH(sc);
8236 delay(10);
8237 CSR_WRITE(sc, WMREG_CTRL, v);
8238 CSR_WRITE_FLUSH(sc);
8239 delay(10);
8240
8241 for (i = 0; i < 16; i++) {
8242 data <<= 1;
8243 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8244 CSR_WRITE_FLUSH(sc);
8245 delay(10);
8246 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
8247 data |= 1;
8248 CSR_WRITE(sc, WMREG_CTRL, v);
8249 CSR_WRITE_FLUSH(sc);
8250 delay(10);
8251 }
8252
8253 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8254 CSR_WRITE_FLUSH(sc);
8255 delay(10);
8256 CSR_WRITE(sc, WMREG_CTRL, v);
8257 CSR_WRITE_FLUSH(sc);
8258 delay(10);
8259
8260 return data;
8261 }
8262
8263 #undef MDI_IO
8264 #undef MDI_DIR
8265 #undef MDI_CLK
8266
8267 /*
8268 * wm_gmii_i82543_readreg: [mii interface function]
8269 *
8270 * Read a PHY register on the GMII (i82543 version).
8271 */
8272 static int
8273 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
8274 {
8275 struct wm_softc *sc = device_private(self);
8276 int rv;
8277
8278 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8279 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
8280 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
8281 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
8282
8283 DPRINTF(WM_DEBUG_GMII,
8284 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
8285 device_xname(sc->sc_dev), phy, reg, rv));
8286
8287 return rv;
8288 }
8289
8290 /*
8291 * wm_gmii_i82543_writereg: [mii interface function]
8292 *
8293 * Write a PHY register on the GMII (i82543 version).
8294 */
8295 static void
8296 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
8297 {
8298 struct wm_softc *sc = device_private(self);
8299
8300 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8301 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
8302 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
8303 (MII_COMMAND_START << 30), 32);
8304 }
8305
8306 /*
8307 * wm_gmii_i82544_readreg: [mii interface function]
8308 *
8309 * Read a PHY register on the GMII.
8310 */
8311 static int
8312 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
8313 {
8314 struct wm_softc *sc = device_private(self);
8315 uint32_t mdic = 0;
8316 int i, rv;
8317
8318 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
8319 MDIC_REGADD(reg));
8320
8321 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8322 mdic = CSR_READ(sc, WMREG_MDIC);
8323 if (mdic & MDIC_READY)
8324 break;
8325 delay(50);
8326 }
8327
8328 if ((mdic & MDIC_READY) == 0) {
8329 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
8330 device_xname(sc->sc_dev), phy, reg);
8331 rv = 0;
8332 } else if (mdic & MDIC_E) {
8333 #if 0 /* This is normal if no PHY is present. */
8334 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
8335 device_xname(sc->sc_dev), phy, reg);
8336 #endif
8337 rv = 0;
8338 } else {
8339 rv = MDIC_DATA(mdic);
8340 if (rv == 0xffff)
8341 rv = 0;
8342 }
8343
8344 return rv;
8345 }
8346
8347 /*
8348 * wm_gmii_i82544_writereg: [mii interface function]
8349 *
8350 * Write a PHY register on the GMII.
8351 */
8352 static void
8353 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
8354 {
8355 struct wm_softc *sc = device_private(self);
8356 uint32_t mdic = 0;
8357 int i;
8358
8359 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
8360 MDIC_REGADD(reg) | MDIC_DATA(val));
8361
8362 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8363 mdic = CSR_READ(sc, WMREG_MDIC);
8364 if (mdic & MDIC_READY)
8365 break;
8366 delay(50);
8367 }
8368
8369 if ((mdic & MDIC_READY) == 0)
8370 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
8371 device_xname(sc->sc_dev), phy, reg);
8372 else if (mdic & MDIC_E)
8373 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
8374 device_xname(sc->sc_dev), phy, reg);
8375 }
8376
8377 /*
8378 * wm_gmii_i80003_readreg: [mii interface function]
8379 *
8380 * Read a PHY register on the kumeran
8381 * This could be handled by the PHY layer if we didn't have to lock the
8382 * ressource ...
8383 */
8384 static int
8385 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
8386 {
8387 struct wm_softc *sc = device_private(self);
8388 int sem;
8389 int rv;
8390
8391 if (phy != 1) /* only one PHY on kumeran bus */
8392 return 0;
8393
8394 sem = swfwphysem[sc->sc_funcid];
8395 if (wm_get_swfw_semaphore(sc, sem)) {
8396 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8397 __func__);
8398 return 0;
8399 }
8400
8401 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
8402 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8403 reg >> GG82563_PAGE_SHIFT);
8404 } else {
8405 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8406 reg >> GG82563_PAGE_SHIFT);
8407 }
8408 /* Wait more 200us for a bug of the ready bit in the MDIC register */
8409 delay(200);
8410 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
8411 delay(200);
8412
8413 wm_put_swfw_semaphore(sc, sem);
8414 return rv;
8415 }
8416
8417 /*
8418 * wm_gmii_i80003_writereg: [mii interface function]
8419 *
8420 * Write a PHY register on the kumeran.
8421 * This could be handled by the PHY layer if we didn't have to lock the
8422 * ressource ...
8423 */
8424 static void
8425 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
8426 {
8427 struct wm_softc *sc = device_private(self);
8428 int sem;
8429
8430 if (phy != 1) /* only one PHY on kumeran bus */
8431 return;
8432
8433 sem = swfwphysem[sc->sc_funcid];
8434 if (wm_get_swfw_semaphore(sc, sem)) {
8435 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8436 __func__);
8437 return;
8438 }
8439
8440 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
8441 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8442 reg >> GG82563_PAGE_SHIFT);
8443 } else {
8444 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8445 reg >> GG82563_PAGE_SHIFT);
8446 }
8447 /* Wait more 200us for a bug of the ready bit in the MDIC register */
8448 delay(200);
8449 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
8450 delay(200);
8451
8452 wm_put_swfw_semaphore(sc, sem);
8453 }
8454
8455 /*
8456 * wm_gmii_bm_readreg: [mii interface function]
8457 *
8458 * Read a PHY register on the kumeran
8459 * This could be handled by the PHY layer if we didn't have to lock the
8460 * ressource ...
8461 */
8462 static int
8463 wm_gmii_bm_readreg(device_t self, int phy, int reg)
8464 {
8465 struct wm_softc *sc = device_private(self);
8466 int sem;
8467 int rv;
8468
8469 sem = swfwphysem[sc->sc_funcid];
8470 if (wm_get_swfw_semaphore(sc, sem)) {
8471 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8472 __func__);
8473 return 0;
8474 }
8475
8476 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8477 if (phy == 1)
8478 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
8479 reg);
8480 else
8481 wm_gmii_i82544_writereg(self, phy,
8482 GG82563_PHY_PAGE_SELECT,
8483 reg >> GG82563_PAGE_SHIFT);
8484 }
8485
8486 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
8487 wm_put_swfw_semaphore(sc, sem);
8488 return rv;
8489 }
8490
8491 /*
8492 * wm_gmii_bm_writereg: [mii interface function]
8493 *
8494 * Write a PHY register on the kumeran.
8495 * This could be handled by the PHY layer if we didn't have to lock the
8496 * ressource ...
8497 */
8498 static void
8499 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
8500 {
8501 struct wm_softc *sc = device_private(self);
8502 int sem;
8503
8504 sem = swfwphysem[sc->sc_funcid];
8505 if (wm_get_swfw_semaphore(sc, sem)) {
8506 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8507 __func__);
8508 return;
8509 }
8510
8511 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8512 if (phy == 1)
8513 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
8514 reg);
8515 else
8516 wm_gmii_i82544_writereg(self, phy,
8517 GG82563_PHY_PAGE_SELECT,
8518 reg >> GG82563_PAGE_SHIFT);
8519 }
8520
8521 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
8522 wm_put_swfw_semaphore(sc, sem);
8523 }
8524
8525 static void
8526 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
8527 {
8528 struct wm_softc *sc = device_private(self);
8529 uint16_t regnum = BM_PHY_REG_NUM(offset);
8530 uint16_t wuce;
8531
8532 /* XXX Gig must be disabled for MDIO accesses to page 800 */
8533 if (sc->sc_type == WM_T_PCH) {
8534 /* XXX e1000 driver do nothing... why? */
8535 }
8536
8537 /* Set page 769 */
8538 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8539 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
8540
8541 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
8542
8543 wuce &= ~BM_WUC_HOST_WU_BIT;
8544 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
8545 wuce | BM_WUC_ENABLE_BIT);
8546
8547 /* Select page 800 */
8548 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8549 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
8550
8551 /* Write page 800 */
8552 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
8553
8554 if (rd)
8555 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
8556 else
8557 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
8558
8559 /* Set page 769 */
8560 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8561 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
8562
8563 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
8564 }
8565
8566 /*
8567 * wm_gmii_hv_readreg: [mii interface function]
8568 *
8569 * Read a PHY register on the kumeran
8570 * This could be handled by the PHY layer if we didn't have to lock the
8571 * ressource ...
8572 */
8573 static int
8574 wm_gmii_hv_readreg(device_t self, int phy, int reg)
8575 {
8576 struct wm_softc *sc = device_private(self);
8577 uint16_t page = BM_PHY_REG_PAGE(reg);
8578 uint16_t regnum = BM_PHY_REG_NUM(reg);
8579 uint16_t val;
8580 int rv;
8581
8582 if (wm_get_swfwhw_semaphore(sc)) {
8583 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8584 __func__);
8585 return 0;
8586 }
8587
8588 /* XXX Workaround failure in MDIO access while cable is disconnected */
8589 if (sc->sc_phytype == WMPHY_82577) {
8590 /* XXX must write */
8591 }
8592
8593 /* Page 800 works differently than the rest so it has its own func */
8594 if (page == BM_WUC_PAGE) {
8595 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
8596 return val;
8597 }
8598
8599 /*
8600 * Lower than page 768 works differently than the rest so it has its
8601 * own func
8602 */
8603 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
8604 printf("gmii_hv_readreg!!!\n");
8605 return 0;
8606 }
8607
8608 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
8609 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8610 page << BME1000_PAGE_SHIFT);
8611 }
8612
8613 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
8614 wm_put_swfwhw_semaphore(sc);
8615 return rv;
8616 }
8617
8618 /*
8619 * wm_gmii_hv_writereg: [mii interface function]
8620 *
8621 * Write a PHY register on the kumeran.
8622 * This could be handled by the PHY layer if we didn't have to lock the
8623 * ressource ...
8624 */
8625 static void
8626 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
8627 {
8628 struct wm_softc *sc = device_private(self);
8629 uint16_t page = BM_PHY_REG_PAGE(reg);
8630 uint16_t regnum = BM_PHY_REG_NUM(reg);
8631
8632 if (wm_get_swfwhw_semaphore(sc)) {
8633 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8634 __func__);
8635 return;
8636 }
8637
8638 /* XXX Workaround failure in MDIO access while cable is disconnected */
8639
8640 /* Page 800 works differently than the rest so it has its own func */
8641 if (page == BM_WUC_PAGE) {
8642 uint16_t tmp;
8643
8644 tmp = val;
8645 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
8646 return;
8647 }
8648
8649 /*
8650 * Lower than page 768 works differently than the rest so it has its
8651 * own func
8652 */
8653 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
8654 printf("gmii_hv_writereg!!!\n");
8655 return;
8656 }
8657
8658 /*
8659 * XXX Workaround MDIO accesses being disabled after entering IEEE
8660 * Power Down (whenever bit 11 of the PHY control register is set)
8661 */
8662
8663 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
8664 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8665 page << BME1000_PAGE_SHIFT);
8666 }
8667
8668 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
8669 wm_put_swfwhw_semaphore(sc);
8670 }
8671
8672 /*
8673 * wm_gmii_82580_readreg: [mii interface function]
8674 *
8675 * Read a PHY register on the 82580 and I350.
8676 * This could be handled by the PHY layer if we didn't have to lock the
8677 * ressource ...
8678 */
8679 static int
8680 wm_gmii_82580_readreg(device_t self, int phy, int reg)
8681 {
8682 struct wm_softc *sc = device_private(self);
8683 int sem;
8684 int rv;
8685
8686 sem = swfwphysem[sc->sc_funcid];
8687 if (wm_get_swfw_semaphore(sc, sem)) {
8688 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8689 __func__);
8690 return 0;
8691 }
8692
8693 rv = wm_gmii_i82544_readreg(self, phy, reg);
8694
8695 wm_put_swfw_semaphore(sc, sem);
8696 return rv;
8697 }
8698
8699 /*
8700 * wm_gmii_82580_writereg: [mii interface function]
8701 *
8702 * Write a PHY register on the 82580 and I350.
8703 * This could be handled by the PHY layer if we didn't have to lock the
8704 * ressource ...
8705 */
8706 static void
8707 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
8708 {
8709 struct wm_softc *sc = device_private(self);
8710 int sem;
8711
8712 sem = swfwphysem[sc->sc_funcid];
8713 if (wm_get_swfw_semaphore(sc, sem)) {
8714 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8715 __func__);
8716 return;
8717 }
8718
8719 wm_gmii_i82544_writereg(self, phy, reg, val);
8720
8721 wm_put_swfw_semaphore(sc, sem);
8722 }
8723
8724 /*
8725 * wm_gmii_gs40g_readreg: [mii interface function]
8726 *
8727 * Read a PHY register on the I2100 and I211.
8728 * This could be handled by the PHY layer if we didn't have to lock the
8729 * ressource ...
8730 */
8731 static int
8732 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
8733 {
8734 struct wm_softc *sc = device_private(self);
8735 int sem;
8736 int page, offset;
8737 int rv;
8738
8739 /* Acquire semaphore */
8740 sem = swfwphysem[sc->sc_funcid];
8741 if (wm_get_swfw_semaphore(sc, sem)) {
8742 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8743 __func__);
8744 return 0;
8745 }
8746
8747 /* Page select */
8748 page = reg >> GS40G_PAGE_SHIFT;
8749 wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
8750
8751 /* Read reg */
8752 offset = reg & GS40G_OFFSET_MASK;
8753 rv = wm_gmii_i82544_readreg(self, phy, offset);
8754
8755 wm_put_swfw_semaphore(sc, sem);
8756 return rv;
8757 }
8758
8759 /*
8760 * wm_gmii_gs40g_writereg: [mii interface function]
8761 *
8762 * Write a PHY register on the I210 and I211.
8763 * This could be handled by the PHY layer if we didn't have to lock the
8764 * ressource ...
8765 */
8766 static void
8767 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
8768 {
8769 struct wm_softc *sc = device_private(self);
8770 int sem;
8771 int page, offset;
8772
8773 /* Acquire semaphore */
8774 sem = swfwphysem[sc->sc_funcid];
8775 if (wm_get_swfw_semaphore(sc, sem)) {
8776 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8777 __func__);
8778 return;
8779 }
8780
8781 /* Page select */
8782 page = reg >> GS40G_PAGE_SHIFT;
8783 wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
8784
8785 /* Write reg */
8786 offset = reg & GS40G_OFFSET_MASK;
8787 wm_gmii_i82544_writereg(self, phy, offset, val);
8788
8789 /* Release semaphore */
8790 wm_put_swfw_semaphore(sc, sem);
8791 }
8792
8793 /*
8794 * wm_gmii_statchg: [mii interface function]
8795 *
8796 * Callback from MII layer when media changes.
8797 */
8798 static void
8799 wm_gmii_statchg(struct ifnet *ifp)
8800 {
8801 struct wm_softc *sc = ifp->if_softc;
8802 struct mii_data *mii = &sc->sc_mii;
8803
8804 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
8805 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
8806 sc->sc_fcrtl &= ~FCRTL_XONE;
8807
8808 /*
8809 * Get flow control negotiation result.
8810 */
8811 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
8812 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
8813 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
8814 mii->mii_media_active &= ~IFM_ETH_FMASK;
8815 }
8816
8817 if (sc->sc_flowflags & IFM_FLOW) {
8818 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
8819 sc->sc_ctrl |= CTRL_TFCE;
8820 sc->sc_fcrtl |= FCRTL_XONE;
8821 }
8822 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
8823 sc->sc_ctrl |= CTRL_RFCE;
8824 }
8825
8826 if (sc->sc_mii.mii_media_active & IFM_FDX) {
8827 DPRINTF(WM_DEBUG_LINK,
8828 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
8829 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
8830 } else {
8831 DPRINTF(WM_DEBUG_LINK,
8832 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
8833 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
8834 }
8835
8836 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8837 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
8838 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
8839 : WMREG_FCRTL, sc->sc_fcrtl);
8840 if (sc->sc_type == WM_T_80003) {
8841 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
8842 case IFM_1000_T:
8843 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
8844 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
8845 sc->sc_tipg = TIPG_1000T_80003_DFLT;
8846 break;
8847 default:
8848 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
8849 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
8850 sc->sc_tipg = TIPG_10_100_80003_DFLT;
8851 break;
8852 }
8853 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
8854 }
8855 }
8856
8857 /*
8858 * wm_kmrn_readreg:
8859 *
8860 * Read a kumeran register
8861 */
8862 static int
8863 wm_kmrn_readreg(struct wm_softc *sc, int reg)
8864 {
8865 int rv;
8866
8867 if (sc->sc_flags & WM_F_LOCK_SWFW) {
8868 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
8869 aprint_error_dev(sc->sc_dev,
8870 "%s: failed to get semaphore\n", __func__);
8871 return 0;
8872 }
8873 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8874 if (wm_get_swfwhw_semaphore(sc)) {
8875 aprint_error_dev(sc->sc_dev,
8876 "%s: failed to get semaphore\n", __func__);
8877 return 0;
8878 }
8879 }
8880
8881 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
8882 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
8883 KUMCTRLSTA_REN);
8884 CSR_WRITE_FLUSH(sc);
8885 delay(2);
8886
8887 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
8888
8889 if (sc->sc_flags & WM_F_LOCK_SWFW)
8890 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
8891 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8892 wm_put_swfwhw_semaphore(sc);
8893
8894 return rv;
8895 }
8896
8897 /*
8898 * wm_kmrn_writereg:
8899 *
8900 * Write a kumeran register
8901 */
8902 static void
8903 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
8904 {
8905
8906 if (sc->sc_flags & WM_F_LOCK_SWFW) {
8907 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
8908 aprint_error_dev(sc->sc_dev,
8909 "%s: failed to get semaphore\n", __func__);
8910 return;
8911 }
8912 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8913 if (wm_get_swfwhw_semaphore(sc)) {
8914 aprint_error_dev(sc->sc_dev,
8915 "%s: failed to get semaphore\n", __func__);
8916 return;
8917 }
8918 }
8919
8920 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
8921 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
8922 (val & KUMCTRLSTA_MASK));
8923
8924 if (sc->sc_flags & WM_F_LOCK_SWFW)
8925 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
8926 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8927 wm_put_swfwhw_semaphore(sc);
8928 }
8929
8930 /* SGMII related */
8931
8932 /*
8933 * wm_sgmii_uses_mdio
8934 *
8935 * Check whether the transaction is to the internal PHY or the external
8936 * MDIO interface. Return true if it's MDIO.
8937 */
8938 static bool
8939 wm_sgmii_uses_mdio(struct wm_softc *sc)
8940 {
8941 uint32_t reg;
8942 bool ismdio = false;
8943
8944 switch (sc->sc_type) {
8945 case WM_T_82575:
8946 case WM_T_82576:
8947 reg = CSR_READ(sc, WMREG_MDIC);
8948 ismdio = ((reg & MDIC_DEST) != 0);
8949 break;
8950 case WM_T_82580:
8951 case WM_T_I350:
8952 case WM_T_I354:
8953 case WM_T_I210:
8954 case WM_T_I211:
8955 reg = CSR_READ(sc, WMREG_MDICNFG);
8956 ismdio = ((reg & MDICNFG_DEST) != 0);
8957 break;
8958 default:
8959 break;
8960 }
8961
8962 return ismdio;
8963 }
8964
8965 /*
8966 * wm_sgmii_readreg: [mii interface function]
8967 *
8968 * Read a PHY register on the SGMII
8969 * This could be handled by the PHY layer if we didn't have to lock the
8970 * ressource ...
8971 */
8972 static int
8973 wm_sgmii_readreg(device_t self, int phy, int reg)
8974 {
8975 struct wm_softc *sc = device_private(self);
8976 uint32_t i2ccmd;
8977 int i, rv;
8978
8979 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
8980 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8981 __func__);
8982 return 0;
8983 }
8984
8985 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
8986 | (phy << I2CCMD_PHY_ADDR_SHIFT)
8987 | I2CCMD_OPCODE_READ;
8988 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
8989
8990 /* Poll the ready bit */
8991 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
8992 delay(50);
8993 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
8994 if (i2ccmd & I2CCMD_READY)
8995 break;
8996 }
8997 if ((i2ccmd & I2CCMD_READY) == 0)
8998 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
8999 if ((i2ccmd & I2CCMD_ERROR) != 0)
9000 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
9001
9002 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
9003
9004 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
9005 return rv;
9006 }
9007
9008 /*
9009 * wm_sgmii_writereg: [mii interface function]
9010 *
9011 * Write a PHY register on the SGMII.
9012 * This could be handled by the PHY layer if we didn't have to lock the
9013 * ressource ...
9014 */
9015 static void
9016 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
9017 {
9018 struct wm_softc *sc = device_private(self);
9019 uint32_t i2ccmd;
9020 int i;
9021 int val_swapped;
9022
9023 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
9024 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9025 __func__);
9026 return;
9027 }
9028 /* Swap the data bytes for the I2C interface */
9029 val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
9030 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
9031 | (phy << I2CCMD_PHY_ADDR_SHIFT)
9032 | I2CCMD_OPCODE_WRITE | val_swapped;
9033 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9034
9035 /* Poll the ready bit */
9036 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9037 delay(50);
9038 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9039 if (i2ccmd & I2CCMD_READY)
9040 break;
9041 }
9042 if ((i2ccmd & I2CCMD_READY) == 0)
9043 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
9044 if ((i2ccmd & I2CCMD_ERROR) != 0)
9045 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
9046
9047 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
9048 }
9049
9050 /* TBI related */
9051
9052 /*
9053 * wm_tbi_mediainit:
9054 *
9055 * Initialize media for use on 1000BASE-X devices.
9056 */
9057 static void
9058 wm_tbi_mediainit(struct wm_softc *sc)
9059 {
9060 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9061 const char *sep = "";
9062
9063 if (sc->sc_type < WM_T_82543)
9064 sc->sc_tipg = TIPG_WM_DFLT;
9065 else
9066 sc->sc_tipg = TIPG_LG_DFLT;
9067
9068 sc->sc_tbi_serdes_anegticks = 5;
9069
9070 /* Initialize our media structures */
9071 sc->sc_mii.mii_ifp = ifp;
9072 sc->sc_ethercom.ec_mii = &sc->sc_mii;
9073
9074 if ((sc->sc_type >= WM_T_82575)
9075 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
9076 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9077 wm_serdes_mediachange, wm_serdes_mediastatus);
9078 else
9079 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9080 wm_tbi_mediachange, wm_tbi_mediastatus);
9081
9082 /*
9083 * SWD Pins:
9084 *
9085 * 0 = Link LED (output)
9086 * 1 = Loss Of Signal (input)
9087 */
9088 sc->sc_ctrl |= CTRL_SWDPIO(0);
9089
9090 /* XXX Perhaps this is only for TBI */
9091 if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9092 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
9093
9094 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
9095 sc->sc_ctrl &= ~CTRL_LRST;
9096
9097 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9098
9099 #define ADD(ss, mm, dd) \
9100 do { \
9101 aprint_normal("%s%s", sep, ss); \
9102 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
9103 sep = ", "; \
9104 } while (/*CONSTCOND*/0)
9105
9106 aprint_normal_dev(sc->sc_dev, "");
9107
9108 /* Only 82545 is LX */
9109 if (sc->sc_type == WM_T_82545) {
9110 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
9111 ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
9112 } else {
9113 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
9114 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
9115 }
9116 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
9117 aprint_normal("\n");
9118
9119 #undef ADD
9120
9121 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
9122 }
9123
9124 /*
9125 * wm_tbi_mediachange: [ifmedia interface function]
9126 *
9127 * Set hardware to newly-selected media on a 1000BASE-X device.
9128 */
9129 static int
9130 wm_tbi_mediachange(struct ifnet *ifp)
9131 {
9132 struct wm_softc *sc = ifp->if_softc;
9133 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9134 uint32_t status;
9135 int i;
9136
9137 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9138 /* XXX need some work for >= 82571 and < 82575 */
9139 if (sc->sc_type < WM_T_82575)
9140 return 0;
9141 }
9142
9143 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9144 || (sc->sc_type >= WM_T_82575))
9145 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9146
9147 sc->sc_ctrl &= ~CTRL_LRST;
9148 sc->sc_txcw = TXCW_ANE;
9149 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9150 sc->sc_txcw |= TXCW_FD | TXCW_HD;
9151 else if (ife->ifm_media & IFM_FDX)
9152 sc->sc_txcw |= TXCW_FD;
9153 else
9154 sc->sc_txcw |= TXCW_HD;
9155
9156 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
9157 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
9158
9159 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
9160 device_xname(sc->sc_dev), sc->sc_txcw));
9161 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9162 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9163 CSR_WRITE_FLUSH(sc);
9164 delay(1000);
9165
9166 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
9167 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
9168
9169 /*
9170 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
9171 * optics detect a signal, 0 if they don't.
9172 */
9173 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
9174 /* Have signal; wait for the link to come up. */
9175 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
9176 delay(10000);
9177 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
9178 break;
9179 }
9180
9181 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
9182 device_xname(sc->sc_dev),i));
9183
9184 status = CSR_READ(sc, WMREG_STATUS);
9185 DPRINTF(WM_DEBUG_LINK,
9186 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
9187 device_xname(sc->sc_dev),status, STATUS_LU));
9188 if (status & STATUS_LU) {
9189 /* Link is up. */
9190 DPRINTF(WM_DEBUG_LINK,
9191 ("%s: LINK: set media -> link up %s\n",
9192 device_xname(sc->sc_dev),
9193 (status & STATUS_FD) ? "FDX" : "HDX"));
9194
9195 /*
9196 * NOTE: CTRL will update TFCE and RFCE automatically,
9197 * so we should update sc->sc_ctrl
9198 */
9199 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
9200 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9201 sc->sc_fcrtl &= ~FCRTL_XONE;
9202 if (status & STATUS_FD)
9203 sc->sc_tctl |=
9204 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9205 else
9206 sc->sc_tctl |=
9207 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9208 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
9209 sc->sc_fcrtl |= FCRTL_XONE;
9210 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9211 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
9212 WMREG_OLD_FCRTL : WMREG_FCRTL,
9213 sc->sc_fcrtl);
9214 sc->sc_tbi_linkup = 1;
9215 } else {
9216 if (i == WM_LINKUP_TIMEOUT)
9217 wm_check_for_link(sc);
9218 /* Link is down. */
9219 DPRINTF(WM_DEBUG_LINK,
9220 ("%s: LINK: set media -> link down\n",
9221 device_xname(sc->sc_dev)));
9222 sc->sc_tbi_linkup = 0;
9223 }
9224 } else {
9225 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
9226 device_xname(sc->sc_dev)));
9227 sc->sc_tbi_linkup = 0;
9228 }
9229
9230 wm_tbi_serdes_set_linkled(sc);
9231
9232 return 0;
9233 }
9234
9235 /*
9236 * wm_tbi_mediastatus: [ifmedia interface function]
9237 *
9238 * Get the current interface media status on a 1000BASE-X device.
9239 */
9240 static void
9241 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9242 {
9243 struct wm_softc *sc = ifp->if_softc;
9244 uint32_t ctrl, status;
9245
9246 ifmr->ifm_status = IFM_AVALID;
9247 ifmr->ifm_active = IFM_ETHER;
9248
9249 status = CSR_READ(sc, WMREG_STATUS);
9250 if ((status & STATUS_LU) == 0) {
9251 ifmr->ifm_active |= IFM_NONE;
9252 return;
9253 }
9254
9255 ifmr->ifm_status |= IFM_ACTIVE;
9256 /* Only 82545 is LX */
9257 if (sc->sc_type == WM_T_82545)
9258 ifmr->ifm_active |= IFM_1000_LX;
9259 else
9260 ifmr->ifm_active |= IFM_1000_SX;
9261 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
9262 ifmr->ifm_active |= IFM_FDX;
9263 else
9264 ifmr->ifm_active |= IFM_HDX;
9265 ctrl = CSR_READ(sc, WMREG_CTRL);
9266 if (ctrl & CTRL_RFCE)
9267 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
9268 if (ctrl & CTRL_TFCE)
9269 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
9270 }
9271
9272 /* XXX TBI only */
9273 static int
9274 wm_check_for_link(struct wm_softc *sc)
9275 {
9276 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9277 uint32_t rxcw;
9278 uint32_t ctrl;
9279 uint32_t status;
9280 uint32_t sig;
9281
9282 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9283 /* XXX need some work for >= 82571 */
9284 if (sc->sc_type >= WM_T_82571) {
9285 sc->sc_tbi_linkup = 1;
9286 return 0;
9287 }
9288 }
9289
9290 rxcw = CSR_READ(sc, WMREG_RXCW);
9291 ctrl = CSR_READ(sc, WMREG_CTRL);
9292 status = CSR_READ(sc, WMREG_STATUS);
9293
9294 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
9295
9296 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
9297 device_xname(sc->sc_dev), __func__,
9298 ((ctrl & CTRL_SWDPIN(1)) == sig),
9299 ((status & STATUS_LU) != 0),
9300 ((rxcw & RXCW_C) != 0)
9301 ));
9302
9303 /*
9304 * SWDPIN LU RXCW
9305 * 0 0 0
9306 * 0 0 1 (should not happen)
9307 * 0 1 0 (should not happen)
9308 * 0 1 1 (should not happen)
9309 * 1 0 0 Disable autonego and force linkup
9310 * 1 0 1 got /C/ but not linkup yet
9311 * 1 1 0 (linkup)
9312 * 1 1 1 If IFM_AUTO, back to autonego
9313 *
9314 */
9315 if (((ctrl & CTRL_SWDPIN(1)) == sig)
9316 && ((status & STATUS_LU) == 0)
9317 && ((rxcw & RXCW_C) == 0)) {
9318 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
9319 __func__));
9320 sc->sc_tbi_linkup = 0;
9321 /* Disable auto-negotiation in the TXCW register */
9322 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
9323
9324 /*
9325 * Force link-up and also force full-duplex.
9326 *
9327 * NOTE: CTRL was updated TFCE and RFCE automatically,
9328 * so we should update sc->sc_ctrl
9329 */
9330 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
9331 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9332 } else if (((status & STATUS_LU) != 0)
9333 && ((rxcw & RXCW_C) != 0)
9334 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
9335 sc->sc_tbi_linkup = 1;
9336 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
9337 __func__));
9338 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9339 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
9340 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
9341 && ((rxcw & RXCW_C) != 0)) {
9342 DPRINTF(WM_DEBUG_LINK, ("/C/"));
9343 } else {
9344 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
9345 status));
9346 }
9347
9348 return 0;
9349 }
9350
9351 /*
9352 * wm_tbi_tick:
9353 *
9354 * Check the link on TBI devices.
9355 * This function acts as mii_tick().
9356 */
9357 static void
9358 wm_tbi_tick(struct wm_softc *sc)
9359 {
9360 struct wm_txqueue *txq __diagused = &sc->sc_txq[0];
9361 struct mii_data *mii = &sc->sc_mii;
9362 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9363 uint32_t status;
9364
9365 KASSERT(WM_TX_LOCKED(txq));
9366
9367 status = CSR_READ(sc, WMREG_STATUS);
9368
9369 /* XXX is this needed? */
9370 (void)CSR_READ(sc, WMREG_RXCW);
9371 (void)CSR_READ(sc, WMREG_CTRL);
9372
9373 /* set link status */
9374 if ((status & STATUS_LU) == 0) {
9375 DPRINTF(WM_DEBUG_LINK,
9376 ("%s: LINK: checklink -> down\n",
9377 device_xname(sc->sc_dev)));
9378 sc->sc_tbi_linkup = 0;
9379 } else if (sc->sc_tbi_linkup == 0) {
9380 DPRINTF(WM_DEBUG_LINK,
9381 ("%s: LINK: checklink -> up %s\n",
9382 device_xname(sc->sc_dev),
9383 (status & STATUS_FD) ? "FDX" : "HDX"));
9384 sc->sc_tbi_linkup = 1;
9385 sc->sc_tbi_serdes_ticks = 0;
9386 }
9387
9388 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
9389 goto setled;
9390
9391 if ((status & STATUS_LU) == 0) {
9392 sc->sc_tbi_linkup = 0;
9393 /* If the timer expired, retry autonegotiation */
9394 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9395 && (++sc->sc_tbi_serdes_ticks
9396 >= sc->sc_tbi_serdes_anegticks)) {
9397 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9398 sc->sc_tbi_serdes_ticks = 0;
9399 /*
9400 * Reset the link, and let autonegotiation do
9401 * its thing
9402 */
9403 sc->sc_ctrl |= CTRL_LRST;
9404 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9405 CSR_WRITE_FLUSH(sc);
9406 delay(1000);
9407 sc->sc_ctrl &= ~CTRL_LRST;
9408 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9409 CSR_WRITE_FLUSH(sc);
9410 delay(1000);
9411 CSR_WRITE(sc, WMREG_TXCW,
9412 sc->sc_txcw & ~TXCW_ANE);
9413 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9414 }
9415 }
9416
9417 setled:
9418 wm_tbi_serdes_set_linkled(sc);
9419 }
9420
9421 /* SERDES related */
9422 static void
9423 wm_serdes_power_up_link_82575(struct wm_softc *sc)
9424 {
9425 uint32_t reg;
9426
9427 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9428 && ((sc->sc_flags & WM_F_SGMII) == 0))
9429 return;
9430
9431 reg = CSR_READ(sc, WMREG_PCS_CFG);
9432 reg |= PCS_CFG_PCS_EN;
9433 CSR_WRITE(sc, WMREG_PCS_CFG, reg);
9434
9435 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9436 reg &= ~CTRL_EXT_SWDPIN(3);
9437 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9438 CSR_WRITE_FLUSH(sc);
9439 }
9440
9441 static int
9442 wm_serdes_mediachange(struct ifnet *ifp)
9443 {
9444 struct wm_softc *sc = ifp->if_softc;
9445 bool pcs_autoneg = true; /* XXX */
9446 uint32_t ctrl_ext, pcs_lctl, reg;
9447
9448 /* XXX Currently, this function is not called on 8257[12] */
9449 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9450 || (sc->sc_type >= WM_T_82575))
9451 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9452
9453 wm_serdes_power_up_link_82575(sc);
9454
9455 sc->sc_ctrl |= CTRL_SLU;
9456
9457 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
9458 sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
9459
9460 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9461 pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
9462 switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
9463 case CTRL_EXT_LINK_MODE_SGMII:
9464 pcs_autoneg = true;
9465 pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
9466 break;
9467 case CTRL_EXT_LINK_MODE_1000KX:
9468 pcs_autoneg = false;
9469 /* FALLTHROUGH */
9470 default:
9471 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)){
9472 if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
9473 pcs_autoneg = false;
9474 }
9475 sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
9476 | CTRL_FRCFDX;
9477 pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
9478 }
9479 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9480
9481 if (pcs_autoneg) {
9482 pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
9483 pcs_lctl &= ~PCS_LCTL_FORCE_FC;
9484
9485 reg = CSR_READ(sc, WMREG_PCS_ANADV);
9486 reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
9487 reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
9488 CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
9489 } else
9490 pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
9491
9492 CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
9493
9494
9495 return 0;
9496 }
9497
9498 static void
9499 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9500 {
9501 struct wm_softc *sc = ifp->if_softc;
9502 struct mii_data *mii = &sc->sc_mii;
9503 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9504 uint32_t pcs_adv, pcs_lpab, reg;
9505
9506 ifmr->ifm_status = IFM_AVALID;
9507 ifmr->ifm_active = IFM_ETHER;
9508
9509 /* Check PCS */
9510 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9511 if ((reg & PCS_LSTS_LINKOK) == 0) {
9512 ifmr->ifm_active |= IFM_NONE;
9513 sc->sc_tbi_linkup = 0;
9514 goto setled;
9515 }
9516
9517 sc->sc_tbi_linkup = 1;
9518 ifmr->ifm_status |= IFM_ACTIVE;
9519 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
9520 if ((reg & PCS_LSTS_FDX) != 0)
9521 ifmr->ifm_active |= IFM_FDX;
9522 else
9523 ifmr->ifm_active |= IFM_HDX;
9524 mii->mii_media_active &= ~IFM_ETH_FMASK;
9525 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
9526 /* Check flow */
9527 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9528 if ((reg & PCS_LSTS_AN_COMP) == 0) {
9529 printf("XXX LINKOK but not ACOMP\n");
9530 goto setled;
9531 }
9532 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
9533 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
9534 printf("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab);
9535 if ((pcs_adv & TXCW_SYM_PAUSE)
9536 && (pcs_lpab & TXCW_SYM_PAUSE)) {
9537 mii->mii_media_active |= IFM_FLOW
9538 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
9539 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
9540 && (pcs_adv & TXCW_ASYM_PAUSE)
9541 && (pcs_lpab & TXCW_SYM_PAUSE)
9542 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
9543 mii->mii_media_active |= IFM_FLOW
9544 | IFM_ETH_TXPAUSE;
9545 } else if ((pcs_adv & TXCW_SYM_PAUSE)
9546 && (pcs_adv & TXCW_ASYM_PAUSE)
9547 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
9548 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
9549 mii->mii_media_active |= IFM_FLOW
9550 | IFM_ETH_RXPAUSE;
9551 } else {
9552 }
9553 }
9554 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
9555 | (mii->mii_media_active & IFM_ETH_FMASK);
9556 setled:
9557 wm_tbi_serdes_set_linkled(sc);
9558 }
9559
9560 /*
9561 * wm_serdes_tick:
9562 *
9563 * Check the link on serdes devices.
9564 */
9565 static void
9566 wm_serdes_tick(struct wm_softc *sc)
9567 {
9568 struct wm_txqueue *txq __diagused = &sc->sc_txq[0];
9569 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9570 struct mii_data *mii = &sc->sc_mii;
9571 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9572 uint32_t reg;
9573
9574 KASSERT(WM_TX_LOCKED(txq));
9575
9576 mii->mii_media_status = IFM_AVALID;
9577 mii->mii_media_active = IFM_ETHER;
9578
9579 /* Check PCS */
9580 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9581 if ((reg & PCS_LSTS_LINKOK) != 0) {
9582 mii->mii_media_status |= IFM_ACTIVE;
9583 sc->sc_tbi_linkup = 1;
9584 sc->sc_tbi_serdes_ticks = 0;
9585 mii->mii_media_active |= IFM_1000_SX; /* XXX */
9586 if ((reg & PCS_LSTS_FDX) != 0)
9587 mii->mii_media_active |= IFM_FDX;
9588 else
9589 mii->mii_media_active |= IFM_HDX;
9590 } else {
9591 mii->mii_media_status |= IFM_NONE;
9592 sc->sc_tbi_linkup = 0;
9593 /* If the timer expired, retry autonegotiation */
9594 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9595 && (++sc->sc_tbi_serdes_ticks
9596 >= sc->sc_tbi_serdes_anegticks)) {
9597 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9598 sc->sc_tbi_serdes_ticks = 0;
9599 /* XXX */
9600 wm_serdes_mediachange(ifp);
9601 }
9602 }
9603
9604 wm_tbi_serdes_set_linkled(sc);
9605 }
9606
9607 /* SFP related */
9608
9609 static int
9610 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
9611 {
9612 uint32_t i2ccmd;
9613 int i;
9614
9615 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
9616 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9617
9618 /* Poll the ready bit */
9619 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9620 delay(50);
9621 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9622 if (i2ccmd & I2CCMD_READY)
9623 break;
9624 }
9625 if ((i2ccmd & I2CCMD_READY) == 0)
9626 return -1;
9627 if ((i2ccmd & I2CCMD_ERROR) != 0)
9628 return -1;
9629
9630 *data = i2ccmd & 0x00ff;
9631
9632 return 0;
9633 }
9634
9635 static uint32_t
9636 wm_sfp_get_media_type(struct wm_softc *sc)
9637 {
9638 uint32_t ctrl_ext;
9639 uint8_t val = 0;
9640 int timeout = 3;
9641 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
9642 int rv = -1;
9643
9644 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9645 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
9646 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
9647 CSR_WRITE_FLUSH(sc);
9648
9649 /* Read SFP module data */
9650 while (timeout) {
9651 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
9652 if (rv == 0)
9653 break;
9654 delay(100*1000); /* XXX too big */
9655 timeout--;
9656 }
9657 if (rv != 0)
9658 goto out;
9659 switch (val) {
9660 case SFF_SFP_ID_SFF:
9661 aprint_normal_dev(sc->sc_dev,
9662 "Module/Connector soldered to board\n");
9663 break;
9664 case SFF_SFP_ID_SFP:
9665 aprint_normal_dev(sc->sc_dev, "SFP\n");
9666 break;
9667 case SFF_SFP_ID_UNKNOWN:
9668 goto out;
9669 default:
9670 break;
9671 }
9672
9673 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
9674 if (rv != 0) {
9675 goto out;
9676 }
9677
9678 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
9679 mediatype = WM_MEDIATYPE_SERDES;
9680 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
9681 sc->sc_flags |= WM_F_SGMII;
9682 mediatype = WM_MEDIATYPE_COPPER;
9683 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
9684 sc->sc_flags |= WM_F_SGMII;
9685 mediatype = WM_MEDIATYPE_SERDES;
9686 }
9687
9688 out:
9689 /* Restore I2C interface setting */
9690 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9691
9692 return mediatype;
9693 }
9694 /*
9695 * NVM related.
9696 * Microwire, SPI (w/wo EERD) and Flash.
9697 */
9698
9699 /* Both spi and uwire */
9700
9701 /*
9702 * wm_eeprom_sendbits:
9703 *
9704 * Send a series of bits to the EEPROM.
9705 */
9706 static void
9707 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
9708 {
9709 uint32_t reg;
9710 int x;
9711
9712 reg = CSR_READ(sc, WMREG_EECD);
9713
9714 for (x = nbits; x > 0; x--) {
9715 if (bits & (1U << (x - 1)))
9716 reg |= EECD_DI;
9717 else
9718 reg &= ~EECD_DI;
9719 CSR_WRITE(sc, WMREG_EECD, reg);
9720 CSR_WRITE_FLUSH(sc);
9721 delay(2);
9722 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
9723 CSR_WRITE_FLUSH(sc);
9724 delay(2);
9725 CSR_WRITE(sc, WMREG_EECD, reg);
9726 CSR_WRITE_FLUSH(sc);
9727 delay(2);
9728 }
9729 }
9730
9731 /*
9732 * wm_eeprom_recvbits:
9733 *
9734 * Receive a series of bits from the EEPROM.
9735 */
9736 static void
9737 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
9738 {
9739 uint32_t reg, val;
9740 int x;
9741
9742 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
9743
9744 val = 0;
9745 for (x = nbits; x > 0; x--) {
9746 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
9747 CSR_WRITE_FLUSH(sc);
9748 delay(2);
9749 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
9750 val |= (1U << (x - 1));
9751 CSR_WRITE(sc, WMREG_EECD, reg);
9752 CSR_WRITE_FLUSH(sc);
9753 delay(2);
9754 }
9755 *valp = val;
9756 }
9757
9758 /* Microwire */
9759
9760 /*
9761 * wm_nvm_read_uwire:
9762 *
9763 * Read a word from the EEPROM using the MicroWire protocol.
9764 */
9765 static int
9766 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9767 {
9768 uint32_t reg, val;
9769 int i;
9770
9771 for (i = 0; i < wordcnt; i++) {
9772 /* Clear SK and DI. */
9773 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
9774 CSR_WRITE(sc, WMREG_EECD, reg);
9775
9776 /*
9777 * XXX: workaround for a bug in qemu-0.12.x and prior
9778 * and Xen.
9779 *
9780 * We use this workaround only for 82540 because qemu's
9781 * e1000 act as 82540.
9782 */
9783 if (sc->sc_type == WM_T_82540) {
9784 reg |= EECD_SK;
9785 CSR_WRITE(sc, WMREG_EECD, reg);
9786 reg &= ~EECD_SK;
9787 CSR_WRITE(sc, WMREG_EECD, reg);
9788 CSR_WRITE_FLUSH(sc);
9789 delay(2);
9790 }
9791 /* XXX: end of workaround */
9792
9793 /* Set CHIP SELECT. */
9794 reg |= EECD_CS;
9795 CSR_WRITE(sc, WMREG_EECD, reg);
9796 CSR_WRITE_FLUSH(sc);
9797 delay(2);
9798
9799 /* Shift in the READ command. */
9800 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
9801
9802 /* Shift in address. */
9803 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
9804
9805 /* Shift out the data. */
9806 wm_eeprom_recvbits(sc, &val, 16);
9807 data[i] = val & 0xffff;
9808
9809 /* Clear CHIP SELECT. */
9810 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
9811 CSR_WRITE(sc, WMREG_EECD, reg);
9812 CSR_WRITE_FLUSH(sc);
9813 delay(2);
9814 }
9815
9816 return 0;
9817 }
9818
9819 /* SPI */
9820
9821 /*
9822 * Set SPI and FLASH related information from the EECD register.
9823 * For 82541 and 82547, the word size is taken from EEPROM.
9824 */
9825 static int
9826 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
9827 {
9828 int size;
9829 uint32_t reg;
9830 uint16_t data;
9831
9832 reg = CSR_READ(sc, WMREG_EECD);
9833 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
9834
9835 /* Read the size of NVM from EECD by default */
9836 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
9837 switch (sc->sc_type) {
9838 case WM_T_82541:
9839 case WM_T_82541_2:
9840 case WM_T_82547:
9841 case WM_T_82547_2:
9842 /* Set dummy value to access EEPROM */
9843 sc->sc_nvm_wordsize = 64;
9844 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
9845 reg = data;
9846 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
9847 if (size == 0)
9848 size = 6; /* 64 word size */
9849 else
9850 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
9851 break;
9852 case WM_T_80003:
9853 case WM_T_82571:
9854 case WM_T_82572:
9855 case WM_T_82573: /* SPI case */
9856 case WM_T_82574: /* SPI case */
9857 case WM_T_82583: /* SPI case */
9858 size += NVM_WORD_SIZE_BASE_SHIFT;
9859 if (size > 14)
9860 size = 14;
9861 break;
9862 case WM_T_82575:
9863 case WM_T_82576:
9864 case WM_T_82580:
9865 case WM_T_I350:
9866 case WM_T_I354:
9867 case WM_T_I210:
9868 case WM_T_I211:
9869 size += NVM_WORD_SIZE_BASE_SHIFT;
9870 if (size > 15)
9871 size = 15;
9872 break;
9873 default:
9874 aprint_error_dev(sc->sc_dev,
9875 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
9876 return -1;
9877 break;
9878 }
9879
9880 sc->sc_nvm_wordsize = 1 << size;
9881
9882 return 0;
9883 }
9884
9885 /*
9886 * wm_nvm_ready_spi:
9887 *
9888 * Wait for a SPI EEPROM to be ready for commands.
9889 */
9890 static int
9891 wm_nvm_ready_spi(struct wm_softc *sc)
9892 {
9893 uint32_t val;
9894 int usec;
9895
9896 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
9897 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
9898 wm_eeprom_recvbits(sc, &val, 8);
9899 if ((val & SPI_SR_RDY) == 0)
9900 break;
9901 }
9902 if (usec >= SPI_MAX_RETRIES) {
9903 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
9904 return 1;
9905 }
9906 return 0;
9907 }
9908
9909 /*
9910 * wm_nvm_read_spi:
9911 *
9912 * Read a work from the EEPROM using the SPI protocol.
9913 */
9914 static int
9915 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9916 {
9917 uint32_t reg, val;
9918 int i;
9919 uint8_t opc;
9920
9921 /* Clear SK and CS. */
9922 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
9923 CSR_WRITE(sc, WMREG_EECD, reg);
9924 CSR_WRITE_FLUSH(sc);
9925 delay(2);
9926
9927 if (wm_nvm_ready_spi(sc))
9928 return 1;
9929
9930 /* Toggle CS to flush commands. */
9931 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
9932 CSR_WRITE_FLUSH(sc);
9933 delay(2);
9934 CSR_WRITE(sc, WMREG_EECD, reg);
9935 CSR_WRITE_FLUSH(sc);
9936 delay(2);
9937
9938 opc = SPI_OPC_READ;
9939 if (sc->sc_nvm_addrbits == 8 && word >= 128)
9940 opc |= SPI_OPC_A8;
9941
9942 wm_eeprom_sendbits(sc, opc, 8);
9943 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
9944
9945 for (i = 0; i < wordcnt; i++) {
9946 wm_eeprom_recvbits(sc, &val, 16);
9947 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
9948 }
9949
9950 /* Raise CS and clear SK. */
9951 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
9952 CSR_WRITE(sc, WMREG_EECD, reg);
9953 CSR_WRITE_FLUSH(sc);
9954 delay(2);
9955
9956 return 0;
9957 }
9958
9959 /* Using with EERD */
9960
9961 static int
9962 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
9963 {
9964 uint32_t attempts = 100000;
9965 uint32_t i, reg = 0;
9966 int32_t done = -1;
9967
9968 for (i = 0; i < attempts; i++) {
9969 reg = CSR_READ(sc, rw);
9970
9971 if (reg & EERD_DONE) {
9972 done = 0;
9973 break;
9974 }
9975 delay(5);
9976 }
9977
9978 return done;
9979 }
9980
9981 static int
9982 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
9983 uint16_t *data)
9984 {
9985 int i, eerd = 0;
9986 int error = 0;
9987
9988 for (i = 0; i < wordcnt; i++) {
9989 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
9990
9991 CSR_WRITE(sc, WMREG_EERD, eerd);
9992 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
9993 if (error != 0)
9994 break;
9995
9996 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
9997 }
9998
9999 return error;
10000 }
10001
10002 /* Flash */
10003
10004 static int
10005 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
10006 {
10007 uint32_t eecd;
10008 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
10009 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
10010 uint8_t sig_byte = 0;
10011
10012 switch (sc->sc_type) {
10013 case WM_T_ICH8:
10014 case WM_T_ICH9:
10015 eecd = CSR_READ(sc, WMREG_EECD);
10016 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
10017 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
10018 return 0;
10019 }
10020 /* FALLTHROUGH */
10021 default:
10022 /* Default to 0 */
10023 *bank = 0;
10024
10025 /* Check bank 0 */
10026 wm_read_ich8_byte(sc, act_offset, &sig_byte);
10027 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
10028 *bank = 0;
10029 return 0;
10030 }
10031
10032 /* Check bank 1 */
10033 wm_read_ich8_byte(sc, act_offset + bank1_offset,
10034 &sig_byte);
10035 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
10036 *bank = 1;
10037 return 0;
10038 }
10039 }
10040
10041 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
10042 device_xname(sc->sc_dev)));
10043 return -1;
10044 }
10045
10046 /******************************************************************************
10047 * This function does initial flash setup so that a new read/write/erase cycle
10048 * can be started.
10049 *
10050 * sc - The pointer to the hw structure
10051 ****************************************************************************/
10052 static int32_t
10053 wm_ich8_cycle_init(struct wm_softc *sc)
10054 {
10055 uint16_t hsfsts;
10056 int32_t error = 1;
10057 int32_t i = 0;
10058
10059 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10060
10061 /* May be check the Flash Des Valid bit in Hw status */
10062 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
10063 return error;
10064 }
10065
10066 /* Clear FCERR in Hw status by writing 1 */
10067 /* Clear DAEL in Hw status by writing a 1 */
10068 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
10069
10070 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10071
10072 /*
10073 * Either we should have a hardware SPI cycle in progress bit to check
10074 * against, in order to start a new cycle or FDONE bit should be
10075 * changed in the hardware so that it is 1 after harware reset, which
10076 * can then be used as an indication whether a cycle is in progress or
10077 * has been completed .. we should also have some software semaphore
10078 * mechanism to guard FDONE or the cycle in progress bit so that two
10079 * threads access to those bits can be sequentiallized or a way so that
10080 * 2 threads dont start the cycle at the same time
10081 */
10082
10083 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10084 /*
10085 * There is no cycle running at present, so we can start a
10086 * cycle
10087 */
10088
10089 /* Begin by setting Flash Cycle Done. */
10090 hsfsts |= HSFSTS_DONE;
10091 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10092 error = 0;
10093 } else {
10094 /*
10095 * otherwise poll for sometime so the current cycle has a
10096 * chance to end before giving up.
10097 */
10098 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
10099 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10100 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10101 error = 0;
10102 break;
10103 }
10104 delay(1);
10105 }
10106 if (error == 0) {
10107 /*
10108 * Successful in waiting for previous cycle to timeout,
10109 * now set the Flash Cycle Done.
10110 */
10111 hsfsts |= HSFSTS_DONE;
10112 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10113 }
10114 }
10115 return error;
10116 }
10117
10118 /******************************************************************************
10119 * This function starts a flash cycle and waits for its completion
10120 *
10121 * sc - The pointer to the hw structure
10122 ****************************************************************************/
10123 static int32_t
10124 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
10125 {
10126 uint16_t hsflctl;
10127 uint16_t hsfsts;
10128 int32_t error = 1;
10129 uint32_t i = 0;
10130
10131 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
10132 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10133 hsflctl |= HSFCTL_GO;
10134 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10135
10136 /* Wait till FDONE bit is set to 1 */
10137 do {
10138 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10139 if (hsfsts & HSFSTS_DONE)
10140 break;
10141 delay(1);
10142 i++;
10143 } while (i < timeout);
10144 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
10145 error = 0;
10146
10147 return error;
10148 }
10149
10150 /******************************************************************************
10151 * Reads a byte or word from the NVM using the ICH8 flash access registers.
10152 *
10153 * sc - The pointer to the hw structure
10154 * index - The index of the byte or word to read.
10155 * size - Size of data to read, 1=byte 2=word
10156 * data - Pointer to the word to store the value read.
10157 *****************************************************************************/
10158 static int32_t
10159 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
10160 uint32_t size, uint16_t *data)
10161 {
10162 uint16_t hsfsts;
10163 uint16_t hsflctl;
10164 uint32_t flash_linear_address;
10165 uint32_t flash_data = 0;
10166 int32_t error = 1;
10167 int32_t count = 0;
10168
10169 if (size < 1 || size > 2 || data == 0x0 ||
10170 index > ICH_FLASH_LINEAR_ADDR_MASK)
10171 return error;
10172
10173 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
10174 sc->sc_ich8_flash_base;
10175
10176 do {
10177 delay(1);
10178 /* Steps */
10179 error = wm_ich8_cycle_init(sc);
10180 if (error)
10181 break;
10182
10183 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10184 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
10185 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
10186 & HSFCTL_BCOUNT_MASK;
10187 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
10188 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10189
10190 /*
10191 * Write the last 24 bits of index into Flash Linear address
10192 * field in Flash Address
10193 */
10194 /* TODO: TBD maybe check the index against the size of flash */
10195
10196 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
10197
10198 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
10199
10200 /*
10201 * Check if FCERR is set to 1, if set to 1, clear it and try
10202 * the whole sequence a few more times, else read in (shift in)
10203 * the Flash Data0, the order is least significant byte first
10204 * msb to lsb
10205 */
10206 if (error == 0) {
10207 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
10208 if (size == 1)
10209 *data = (uint8_t)(flash_data & 0x000000FF);
10210 else if (size == 2)
10211 *data = (uint16_t)(flash_data & 0x0000FFFF);
10212 break;
10213 } else {
10214 /*
10215 * If we've gotten here, then things are probably
10216 * completely hosed, but if the error condition is
10217 * detected, it won't hurt to give it another try...
10218 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
10219 */
10220 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10221 if (hsfsts & HSFSTS_ERR) {
10222 /* Repeat for some time before giving up. */
10223 continue;
10224 } else if ((hsfsts & HSFSTS_DONE) == 0)
10225 break;
10226 }
10227 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
10228
10229 return error;
10230 }
10231
10232 /******************************************************************************
10233 * Reads a single byte from the NVM using the ICH8 flash access registers.
10234 *
10235 * sc - pointer to wm_hw structure
10236 * index - The index of the byte to read.
10237 * data - Pointer to a byte to store the value read.
10238 *****************************************************************************/
10239 static int32_t
10240 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
10241 {
10242 int32_t status;
10243 uint16_t word = 0;
10244
10245 status = wm_read_ich8_data(sc, index, 1, &word);
10246 if (status == 0)
10247 *data = (uint8_t)word;
10248 else
10249 *data = 0;
10250
10251 return status;
10252 }
10253
10254 /******************************************************************************
10255 * Reads a word from the NVM using the ICH8 flash access registers.
10256 *
10257 * sc - pointer to wm_hw structure
10258 * index - The starting byte index of the word to read.
10259 * data - Pointer to a word to store the value read.
10260 *****************************************************************************/
10261 static int32_t
10262 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
10263 {
10264 int32_t status;
10265
10266 status = wm_read_ich8_data(sc, index, 2, data);
10267 return status;
10268 }
10269
10270 /******************************************************************************
10271 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
10272 * register.
10273 *
10274 * sc - Struct containing variables accessed by shared code
10275 * offset - offset of word in the EEPROM to read
10276 * data - word read from the EEPROM
10277 * words - number of words to read
10278 *****************************************************************************/
10279 static int
10280 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
10281 {
10282 int32_t error = 0;
10283 uint32_t flash_bank = 0;
10284 uint32_t act_offset = 0;
10285 uint32_t bank_offset = 0;
10286 uint16_t word = 0;
10287 uint16_t i = 0;
10288
10289 /*
10290 * We need to know which is the valid flash bank. In the event
10291 * that we didn't allocate eeprom_shadow_ram, we may not be
10292 * managing flash_bank. So it cannot be trusted and needs
10293 * to be updated with each read.
10294 */
10295 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
10296 if (error) {
10297 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
10298 device_xname(sc->sc_dev)));
10299 flash_bank = 0;
10300 }
10301
10302 /*
10303 * Adjust offset appropriately if we're on bank 1 - adjust for word
10304 * size
10305 */
10306 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
10307
10308 error = wm_get_swfwhw_semaphore(sc);
10309 if (error) {
10310 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10311 __func__);
10312 return error;
10313 }
10314
10315 for (i = 0; i < words; i++) {
10316 /* The NVM part needs a byte offset, hence * 2 */
10317 act_offset = bank_offset + ((offset + i) * 2);
10318 error = wm_read_ich8_word(sc, act_offset, &word);
10319 if (error) {
10320 aprint_error_dev(sc->sc_dev,
10321 "%s: failed to read NVM\n", __func__);
10322 break;
10323 }
10324 data[i] = word;
10325 }
10326
10327 wm_put_swfwhw_semaphore(sc);
10328 return error;
10329 }
10330
10331 /* iNVM */
10332
10333 static int
10334 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
10335 {
10336 int32_t rv = 0;
10337 uint32_t invm_dword;
10338 uint16_t i;
10339 uint8_t record_type, word_address;
10340
10341 for (i = 0; i < INVM_SIZE; i++) {
10342 invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
10343 /* Get record type */
10344 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
10345 if (record_type == INVM_UNINITIALIZED_STRUCTURE)
10346 break;
10347 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
10348 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
10349 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
10350 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
10351 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
10352 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
10353 if (word_address == address) {
10354 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
10355 rv = 0;
10356 break;
10357 }
10358 }
10359 }
10360
10361 return rv;
10362 }
10363
10364 static int
10365 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
10366 {
10367 int rv = 0;
10368 int i;
10369
10370 for (i = 0; i < words; i++) {
10371 switch (offset + i) {
10372 case NVM_OFF_MACADDR:
10373 case NVM_OFF_MACADDR1:
10374 case NVM_OFF_MACADDR2:
10375 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
10376 if (rv != 0) {
10377 data[i] = 0xffff;
10378 rv = -1;
10379 }
10380 break;
10381 case NVM_OFF_CFG2:
10382 rv = wm_nvm_read_word_invm(sc, offset, data);
10383 if (rv != 0) {
10384 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
10385 rv = 0;
10386 }
10387 break;
10388 case NVM_OFF_CFG4:
10389 rv = wm_nvm_read_word_invm(sc, offset, data);
10390 if (rv != 0) {
10391 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
10392 rv = 0;
10393 }
10394 break;
10395 case NVM_OFF_LED_1_CFG:
10396 rv = wm_nvm_read_word_invm(sc, offset, data);
10397 if (rv != 0) {
10398 *data = NVM_LED_1_CFG_DEFAULT_I211;
10399 rv = 0;
10400 }
10401 break;
10402 case NVM_OFF_LED_0_2_CFG:
10403 rv = wm_nvm_read_word_invm(sc, offset, data);
10404 if (rv != 0) {
10405 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
10406 rv = 0;
10407 }
10408 break;
10409 case NVM_OFF_ID_LED_SETTINGS:
10410 rv = wm_nvm_read_word_invm(sc, offset, data);
10411 if (rv != 0) {
10412 *data = ID_LED_RESERVED_FFFF;
10413 rv = 0;
10414 }
10415 break;
10416 default:
10417 DPRINTF(WM_DEBUG_NVM,
10418 ("NVM word 0x%02x is not mapped.\n", offset));
10419 *data = NVM_RESERVED_WORD;
10420 break;
10421 }
10422 }
10423
10424 return rv;
10425 }
10426
10427 /* Lock, detecting NVM type, validate checksum, version and read */
10428
10429 /*
10430 * wm_nvm_acquire:
10431 *
10432 * Perform the EEPROM handshake required on some chips.
10433 */
10434 static int
10435 wm_nvm_acquire(struct wm_softc *sc)
10436 {
10437 uint32_t reg;
10438 int x;
10439 int ret = 0;
10440
10441 /* always success */
10442 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
10443 return 0;
10444
10445 if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
10446 ret = wm_get_swfwhw_semaphore(sc);
10447 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
10448 /* This will also do wm_get_swsm_semaphore() if needed */
10449 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
10450 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
10451 ret = wm_get_swsm_semaphore(sc);
10452 }
10453
10454 if (ret) {
10455 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10456 __func__);
10457 return 1;
10458 }
10459
10460 if (sc->sc_flags & WM_F_LOCK_EECD) {
10461 reg = CSR_READ(sc, WMREG_EECD);
10462
10463 /* Request EEPROM access. */
10464 reg |= EECD_EE_REQ;
10465 CSR_WRITE(sc, WMREG_EECD, reg);
10466
10467 /* ..and wait for it to be granted. */
10468 for (x = 0; x < 1000; x++) {
10469 reg = CSR_READ(sc, WMREG_EECD);
10470 if (reg & EECD_EE_GNT)
10471 break;
10472 delay(5);
10473 }
10474 if ((reg & EECD_EE_GNT) == 0) {
10475 aprint_error_dev(sc->sc_dev,
10476 "could not acquire EEPROM GNT\n");
10477 reg &= ~EECD_EE_REQ;
10478 CSR_WRITE(sc, WMREG_EECD, reg);
10479 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
10480 wm_put_swfwhw_semaphore(sc);
10481 if (sc->sc_flags & WM_F_LOCK_SWFW)
10482 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
10483 else if (sc->sc_flags & WM_F_LOCK_SWSM)
10484 wm_put_swsm_semaphore(sc);
10485 return 1;
10486 }
10487 }
10488
10489 return 0;
10490 }
10491
10492 /*
10493 * wm_nvm_release:
10494 *
10495 * Release the EEPROM mutex.
10496 */
10497 static void
10498 wm_nvm_release(struct wm_softc *sc)
10499 {
10500 uint32_t reg;
10501
10502 /* always success */
10503 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
10504 return;
10505
10506 if (sc->sc_flags & WM_F_LOCK_EECD) {
10507 reg = CSR_READ(sc, WMREG_EECD);
10508 reg &= ~EECD_EE_REQ;
10509 CSR_WRITE(sc, WMREG_EECD, reg);
10510 }
10511
10512 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
10513 wm_put_swfwhw_semaphore(sc);
10514 if (sc->sc_flags & WM_F_LOCK_SWFW)
10515 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
10516 else if (sc->sc_flags & WM_F_LOCK_SWSM)
10517 wm_put_swsm_semaphore(sc);
10518 }
10519
10520 static int
10521 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
10522 {
10523 uint32_t eecd = 0;
10524
10525 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
10526 || sc->sc_type == WM_T_82583) {
10527 eecd = CSR_READ(sc, WMREG_EECD);
10528
10529 /* Isolate bits 15 & 16 */
10530 eecd = ((eecd >> 15) & 0x03);
10531
10532 /* If both bits are set, device is Flash type */
10533 if (eecd == 0x03)
10534 return 0;
10535 }
10536 return 1;
10537 }
10538
10539 static int
10540 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
10541 {
10542 uint32_t eec;
10543
10544 eec = CSR_READ(sc, WMREG_EEC);
10545 if ((eec & EEC_FLASH_DETECTED) != 0)
10546 return 1;
10547
10548 return 0;
10549 }
10550
10551 /*
10552 * wm_nvm_validate_checksum
10553 *
10554 * The checksum is defined as the sum of the first 64 (16 bit) words.
10555 */
10556 static int
10557 wm_nvm_validate_checksum(struct wm_softc *sc)
10558 {
10559 uint16_t checksum;
10560 uint16_t eeprom_data;
10561 #ifdef WM_DEBUG
10562 uint16_t csum_wordaddr, valid_checksum;
10563 #endif
10564 int i;
10565
10566 checksum = 0;
10567
10568 /* Don't check for I211 */
10569 if (sc->sc_type == WM_T_I211)
10570 return 0;
10571
10572 #ifdef WM_DEBUG
10573 if (sc->sc_type == WM_T_PCH_LPT) {
10574 csum_wordaddr = NVM_OFF_COMPAT;
10575 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
10576 } else {
10577 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
10578 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
10579 }
10580
10581 /* Dump EEPROM image for debug */
10582 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10583 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10584 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
10585 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
10586 if ((eeprom_data & valid_checksum) == 0) {
10587 DPRINTF(WM_DEBUG_NVM,
10588 ("%s: NVM need to be updated (%04x != %04x)\n",
10589 device_xname(sc->sc_dev), eeprom_data,
10590 valid_checksum));
10591 }
10592 }
10593
10594 if ((wm_debug & WM_DEBUG_NVM) != 0) {
10595 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
10596 for (i = 0; i < NVM_SIZE; i++) {
10597 if (wm_nvm_read(sc, i, 1, &eeprom_data))
10598 printf("XXXX ");
10599 else
10600 printf("%04hx ", eeprom_data);
10601 if (i % 8 == 7)
10602 printf("\n");
10603 }
10604 }
10605
10606 #endif /* WM_DEBUG */
10607
10608 for (i = 0; i < NVM_SIZE; i++) {
10609 if (wm_nvm_read(sc, i, 1, &eeprom_data))
10610 return 1;
10611 checksum += eeprom_data;
10612 }
10613
10614 if (checksum != (uint16_t) NVM_CHECKSUM) {
10615 #ifdef WM_DEBUG
10616 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
10617 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
10618 #endif
10619 }
10620
10621 return 0;
10622 }
10623
10624 static void
10625 wm_nvm_version_invm(struct wm_softc *sc)
10626 {
10627 uint32_t dword;
10628
10629 /*
10630 * Linux's code to decode version is very strange, so we don't
10631 * obey that algorithm and just use word 61 as the document.
10632 * Perhaps it's not perfect though...
10633 *
10634 * Example:
10635 *
10636 * Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
10637 */
10638 dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
10639 dword = __SHIFTOUT(dword, INVM_VER_1);
10640 sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
10641 sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
10642 }
10643
10644 static void
10645 wm_nvm_version(struct wm_softc *sc)
10646 {
10647 uint16_t major, minor, build, patch;
10648 uint16_t uid0, uid1;
10649 uint16_t nvm_data;
10650 uint16_t off;
10651 bool check_version = false;
10652 bool check_optionrom = false;
10653 bool have_build = false;
10654
10655 /*
10656 * Version format:
10657 *
10658 * XYYZ
10659 * X0YZ
10660 * X0YY
10661 *
10662 * Example:
10663 *
10664 * 82571 0x50a2 5.10.2? (the spec update notes about 5.6-5.10)
10665 * 82571 0x50a6 5.10.6?
10666 * 82572 0x506a 5.6.10?
10667 * 82572EI 0x5069 5.6.9?
10668 * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4)
10669 * 0x2013 2.1.3?
10670 * 82583 0x10a0 1.10.0? (document says it's default vaule)
10671 */
10672 wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
10673 switch (sc->sc_type) {
10674 case WM_T_82571:
10675 case WM_T_82572:
10676 case WM_T_82574:
10677 case WM_T_82583:
10678 check_version = true;
10679 check_optionrom = true;
10680 have_build = true;
10681 break;
10682 case WM_T_82575:
10683 case WM_T_82576:
10684 case WM_T_82580:
10685 if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
10686 check_version = true;
10687 break;
10688 case WM_T_I211:
10689 wm_nvm_version_invm(sc);
10690 goto printver;
10691 case WM_T_I210:
10692 if (!wm_nvm_get_flash_presence_i210(sc)) {
10693 wm_nvm_version_invm(sc);
10694 goto printver;
10695 }
10696 /* FALLTHROUGH */
10697 case WM_T_I350:
10698 case WM_T_I354:
10699 check_version = true;
10700 check_optionrom = true;
10701 break;
10702 default:
10703 return;
10704 }
10705 if (check_version) {
10706 wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
10707 major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
10708 if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
10709 minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
10710 build = nvm_data & NVM_BUILD_MASK;
10711 have_build = true;
10712 } else
10713 minor = nvm_data & 0x00ff;
10714
10715 /* Decimal */
10716 minor = (minor / 16) * 10 + (minor % 16);
10717 sc->sc_nvm_ver_major = major;
10718 sc->sc_nvm_ver_minor = minor;
10719
10720 printver:
10721 aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
10722 sc->sc_nvm_ver_minor);
10723 if (have_build) {
10724 sc->sc_nvm_ver_build = build;
10725 aprint_verbose(".%d", build);
10726 }
10727 }
10728 if (check_optionrom) {
10729 wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
10730 /* Option ROM Version */
10731 if ((off != 0x0000) && (off != 0xffff)) {
10732 off += NVM_COMBO_VER_OFF;
10733 wm_nvm_read(sc, off + 1, 1, &uid1);
10734 wm_nvm_read(sc, off, 1, &uid0);
10735 if ((uid0 != 0) && (uid0 != 0xffff)
10736 && (uid1 != 0) && (uid1 != 0xffff)) {
10737 /* 16bits */
10738 major = uid0 >> 8;
10739 build = (uid0 << 8) | (uid1 >> 8);
10740 patch = uid1 & 0x00ff;
10741 aprint_verbose(", option ROM Version %d.%d.%d",
10742 major, build, patch);
10743 }
10744 }
10745 }
10746
10747 wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
10748 aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
10749 }
10750
10751 /*
10752 * wm_nvm_read:
10753 *
10754 * Read data from the serial EEPROM.
10755 */
10756 static int
10757 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
10758 {
10759 int rv;
10760
10761 if (sc->sc_flags & WM_F_EEPROM_INVALID)
10762 return 1;
10763
10764 if (wm_nvm_acquire(sc))
10765 return 1;
10766
10767 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10768 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10769 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
10770 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
10771 else if (sc->sc_flags & WM_F_EEPROM_INVM)
10772 rv = wm_nvm_read_invm(sc, word, wordcnt, data);
10773 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
10774 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
10775 else if (sc->sc_flags & WM_F_EEPROM_SPI)
10776 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
10777 else
10778 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
10779
10780 wm_nvm_release(sc);
10781 return rv;
10782 }
10783
10784 /*
10785 * Hardware semaphores.
10786 * Very complexed...
10787 */
10788
10789 static int
10790 wm_get_swsm_semaphore(struct wm_softc *sc)
10791 {
10792 int32_t timeout;
10793 uint32_t swsm;
10794
10795 if (sc->sc_flags & WM_F_LOCK_SWSM) {
10796 /* Get the SW semaphore. */
10797 timeout = sc->sc_nvm_wordsize + 1;
10798 while (timeout) {
10799 swsm = CSR_READ(sc, WMREG_SWSM);
10800
10801 if ((swsm & SWSM_SMBI) == 0)
10802 break;
10803
10804 delay(50);
10805 timeout--;
10806 }
10807
10808 if (timeout == 0) {
10809 aprint_error_dev(sc->sc_dev,
10810 "could not acquire SWSM SMBI\n");
10811 return 1;
10812 }
10813 }
10814
10815 /* Get the FW semaphore. */
10816 timeout = sc->sc_nvm_wordsize + 1;
10817 while (timeout) {
10818 swsm = CSR_READ(sc, WMREG_SWSM);
10819 swsm |= SWSM_SWESMBI;
10820 CSR_WRITE(sc, WMREG_SWSM, swsm);
10821 /* If we managed to set the bit we got the semaphore. */
10822 swsm = CSR_READ(sc, WMREG_SWSM);
10823 if (swsm & SWSM_SWESMBI)
10824 break;
10825
10826 delay(50);
10827 timeout--;
10828 }
10829
10830 if (timeout == 0) {
10831 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
10832 /* Release semaphores */
10833 wm_put_swsm_semaphore(sc);
10834 return 1;
10835 }
10836 return 0;
10837 }
10838
10839 static void
10840 wm_put_swsm_semaphore(struct wm_softc *sc)
10841 {
10842 uint32_t swsm;
10843
10844 swsm = CSR_READ(sc, WMREG_SWSM);
10845 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
10846 CSR_WRITE(sc, WMREG_SWSM, swsm);
10847 }
10848
10849 static int
10850 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
10851 {
10852 uint32_t swfw_sync;
10853 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
10854 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
10855 int timeout = 200;
10856
10857 for (timeout = 0; timeout < 200; timeout++) {
10858 if (sc->sc_flags & WM_F_LOCK_SWSM) {
10859 if (wm_get_swsm_semaphore(sc)) {
10860 aprint_error_dev(sc->sc_dev,
10861 "%s: failed to get semaphore\n",
10862 __func__);
10863 return 1;
10864 }
10865 }
10866 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
10867 if ((swfw_sync & (swmask | fwmask)) == 0) {
10868 swfw_sync |= swmask;
10869 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
10870 if (sc->sc_flags & WM_F_LOCK_SWSM)
10871 wm_put_swsm_semaphore(sc);
10872 return 0;
10873 }
10874 if (sc->sc_flags & WM_F_LOCK_SWSM)
10875 wm_put_swsm_semaphore(sc);
10876 delay(5000);
10877 }
10878 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
10879 device_xname(sc->sc_dev), mask, swfw_sync);
10880 return 1;
10881 }
10882
10883 static void
10884 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
10885 {
10886 uint32_t swfw_sync;
10887
10888 if (sc->sc_flags & WM_F_LOCK_SWSM) {
10889 while (wm_get_swsm_semaphore(sc) != 0)
10890 continue;
10891 }
10892 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
10893 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
10894 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
10895 if (sc->sc_flags & WM_F_LOCK_SWSM)
10896 wm_put_swsm_semaphore(sc);
10897 }
10898
10899 static int
10900 wm_get_swfwhw_semaphore(struct wm_softc *sc)
10901 {
10902 uint32_t ext_ctrl;
10903 int timeout = 200;
10904
10905 for (timeout = 0; timeout < 200; timeout++) {
10906 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10907 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
10908 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
10909
10910 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10911 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
10912 return 0;
10913 delay(5000);
10914 }
10915 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
10916 device_xname(sc->sc_dev), ext_ctrl);
10917 return 1;
10918 }
10919
10920 static void
10921 wm_put_swfwhw_semaphore(struct wm_softc *sc)
10922 {
10923 uint32_t ext_ctrl;
10924 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10925 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
10926 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
10927 }
10928
10929 static int
10930 wm_get_hw_semaphore_82573(struct wm_softc *sc)
10931 {
10932 int i = 0;
10933 uint32_t reg;
10934
10935 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10936 do {
10937 CSR_WRITE(sc, WMREG_EXTCNFCTR,
10938 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
10939 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10940 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
10941 break;
10942 delay(2*1000);
10943 i++;
10944 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
10945
10946 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
10947 wm_put_hw_semaphore_82573(sc);
10948 log(LOG_ERR, "%s: Driver can't access the PHY\n",
10949 device_xname(sc->sc_dev));
10950 return -1;
10951 }
10952
10953 return 0;
10954 }
10955
10956 static void
10957 wm_put_hw_semaphore_82573(struct wm_softc *sc)
10958 {
10959 uint32_t reg;
10960
10961 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10962 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
10963 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
10964 }
10965
10966 /*
10967 * Management mode and power management related subroutines.
10968 * BMC, AMT, suspend/resume and EEE.
10969 */
10970
10971 static int
10972 wm_check_mng_mode(struct wm_softc *sc)
10973 {
10974 int rv;
10975
10976 switch (sc->sc_type) {
10977 case WM_T_ICH8:
10978 case WM_T_ICH9:
10979 case WM_T_ICH10:
10980 case WM_T_PCH:
10981 case WM_T_PCH2:
10982 case WM_T_PCH_LPT:
10983 rv = wm_check_mng_mode_ich8lan(sc);
10984 break;
10985 case WM_T_82574:
10986 case WM_T_82583:
10987 rv = wm_check_mng_mode_82574(sc);
10988 break;
10989 case WM_T_82571:
10990 case WM_T_82572:
10991 case WM_T_82573:
10992 case WM_T_80003:
10993 rv = wm_check_mng_mode_generic(sc);
10994 break;
10995 default:
10996 /* noting to do */
10997 rv = 0;
10998 break;
10999 }
11000
11001 return rv;
11002 }
11003
11004 static int
11005 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
11006 {
11007 uint32_t fwsm;
11008
11009 fwsm = CSR_READ(sc, WMREG_FWSM);
11010
11011 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
11012 return 1;
11013
11014 return 0;
11015 }
11016
11017 static int
11018 wm_check_mng_mode_82574(struct wm_softc *sc)
11019 {
11020 uint16_t data;
11021
11022 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11023
11024 if ((data & NVM_CFG2_MNGM_MASK) != 0)
11025 return 1;
11026
11027 return 0;
11028 }
11029
11030 static int
11031 wm_check_mng_mode_generic(struct wm_softc *sc)
11032 {
11033 uint32_t fwsm;
11034
11035 fwsm = CSR_READ(sc, WMREG_FWSM);
11036
11037 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
11038 return 1;
11039
11040 return 0;
11041 }
11042
11043 static int
11044 wm_enable_mng_pass_thru(struct wm_softc *sc)
11045 {
11046 uint32_t manc, fwsm, factps;
11047
11048 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
11049 return 0;
11050
11051 manc = CSR_READ(sc, WMREG_MANC);
11052
11053 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
11054 device_xname(sc->sc_dev), manc));
11055 if ((manc & MANC_RECV_TCO_EN) == 0)
11056 return 0;
11057
11058 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
11059 fwsm = CSR_READ(sc, WMREG_FWSM);
11060 factps = CSR_READ(sc, WMREG_FACTPS);
11061 if (((factps & FACTPS_MNGCG) == 0)
11062 && ((fwsm & FWSM_MODE_MASK)
11063 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
11064 return 1;
11065 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
11066 uint16_t data;
11067
11068 factps = CSR_READ(sc, WMREG_FACTPS);
11069 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11070 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
11071 device_xname(sc->sc_dev), factps, data));
11072 if (((factps & FACTPS_MNGCG) == 0)
11073 && ((data & NVM_CFG2_MNGM_MASK)
11074 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
11075 return 1;
11076 } else if (((manc & MANC_SMBUS_EN) != 0)
11077 && ((manc & MANC_ASF_EN) == 0))
11078 return 1;
11079
11080 return 0;
11081 }
11082
11083 static int
11084 wm_check_reset_block(struct wm_softc *sc)
11085 {
11086 uint32_t reg;
11087
11088 switch (sc->sc_type) {
11089 case WM_T_ICH8:
11090 case WM_T_ICH9:
11091 case WM_T_ICH10:
11092 case WM_T_PCH:
11093 case WM_T_PCH2:
11094 case WM_T_PCH_LPT:
11095 reg = CSR_READ(sc, WMREG_FWSM);
11096 if ((reg & FWSM_RSPCIPHY) != 0)
11097 return 0;
11098 else
11099 return -1;
11100 break;
11101 case WM_T_82571:
11102 case WM_T_82572:
11103 case WM_T_82573:
11104 case WM_T_82574:
11105 case WM_T_82583:
11106 case WM_T_80003:
11107 reg = CSR_READ(sc, WMREG_MANC);
11108 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
11109 return -1;
11110 else
11111 return 0;
11112 break;
11113 default:
11114 /* no problem */
11115 break;
11116 }
11117
11118 return 0;
11119 }
11120
11121 static void
11122 wm_get_hw_control(struct wm_softc *sc)
11123 {
11124 uint32_t reg;
11125
11126 switch (sc->sc_type) {
11127 case WM_T_82573:
11128 reg = CSR_READ(sc, WMREG_SWSM);
11129 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
11130 break;
11131 case WM_T_82571:
11132 case WM_T_82572:
11133 case WM_T_82574:
11134 case WM_T_82583:
11135 case WM_T_80003:
11136 case WM_T_ICH8:
11137 case WM_T_ICH9:
11138 case WM_T_ICH10:
11139 case WM_T_PCH:
11140 case WM_T_PCH2:
11141 case WM_T_PCH_LPT:
11142 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11143 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
11144 break;
11145 default:
11146 break;
11147 }
11148 }
11149
11150 static void
11151 wm_release_hw_control(struct wm_softc *sc)
11152 {
11153 uint32_t reg;
11154
11155 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
11156 return;
11157
11158 if (sc->sc_type == WM_T_82573) {
11159 reg = CSR_READ(sc, WMREG_SWSM);
11160 reg &= ~SWSM_DRV_LOAD;
11161 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
11162 } else {
11163 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11164 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
11165 }
11166 }
11167
11168 static void
11169 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
11170 {
11171 uint32_t reg;
11172
11173 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11174
11175 if (on != 0)
11176 reg |= EXTCNFCTR_GATE_PHY_CFG;
11177 else
11178 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
11179
11180 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
11181 }
11182
11183 static void
11184 wm_smbustopci(struct wm_softc *sc)
11185 {
11186 uint32_t fwsm;
11187
11188 fwsm = CSR_READ(sc, WMREG_FWSM);
11189 if (((fwsm & FWSM_FW_VALID) == 0)
11190 && ((wm_check_reset_block(sc) == 0))) {
11191 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
11192 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
11193 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11194 CSR_WRITE_FLUSH(sc);
11195 delay(10);
11196 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
11197 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11198 CSR_WRITE_FLUSH(sc);
11199 delay(50*1000);
11200
11201 /*
11202 * Gate automatic PHY configuration by hardware on non-managed
11203 * 82579
11204 */
11205 if (sc->sc_type == WM_T_PCH2)
11206 wm_gate_hw_phy_config_ich8lan(sc, 1);
11207 }
11208 }
11209
11210 static void
11211 wm_init_manageability(struct wm_softc *sc)
11212 {
11213
11214 if (sc->sc_flags & WM_F_HAS_MANAGE) {
11215 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
11216 uint32_t manc = CSR_READ(sc, WMREG_MANC);
11217
11218 /* Disable hardware interception of ARP */
11219 manc &= ~MANC_ARP_EN;
11220
11221 /* Enable receiving management packets to the host */
11222 if (sc->sc_type >= WM_T_82571) {
11223 manc |= MANC_EN_MNG2HOST;
11224 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
11225 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
11226 }
11227
11228 CSR_WRITE(sc, WMREG_MANC, manc);
11229 }
11230 }
11231
11232 static void
11233 wm_release_manageability(struct wm_softc *sc)
11234 {
11235
11236 if (sc->sc_flags & WM_F_HAS_MANAGE) {
11237 uint32_t manc = CSR_READ(sc, WMREG_MANC);
11238
11239 manc |= MANC_ARP_EN;
11240 if (sc->sc_type >= WM_T_82571)
11241 manc &= ~MANC_EN_MNG2HOST;
11242
11243 CSR_WRITE(sc, WMREG_MANC, manc);
11244 }
11245 }
11246
11247 static void
11248 wm_get_wakeup(struct wm_softc *sc)
11249 {
11250
11251 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
11252 switch (sc->sc_type) {
11253 case WM_T_82573:
11254 case WM_T_82583:
11255 sc->sc_flags |= WM_F_HAS_AMT;
11256 /* FALLTHROUGH */
11257 case WM_T_80003:
11258 case WM_T_82541:
11259 case WM_T_82547:
11260 case WM_T_82571:
11261 case WM_T_82572:
11262 case WM_T_82574:
11263 case WM_T_82575:
11264 case WM_T_82576:
11265 case WM_T_82580:
11266 case WM_T_I350:
11267 case WM_T_I354:
11268 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
11269 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
11270 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
11271 break;
11272 case WM_T_ICH8:
11273 case WM_T_ICH9:
11274 case WM_T_ICH10:
11275 case WM_T_PCH:
11276 case WM_T_PCH2:
11277 case WM_T_PCH_LPT:
11278 sc->sc_flags |= WM_F_HAS_AMT;
11279 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
11280 break;
11281 default:
11282 break;
11283 }
11284
11285 /* 1: HAS_MANAGE */
11286 if (wm_enable_mng_pass_thru(sc) != 0)
11287 sc->sc_flags |= WM_F_HAS_MANAGE;
11288
11289 #ifdef WM_DEBUG
11290 printf("\n");
11291 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
11292 printf("HAS_AMT,");
11293 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
11294 printf("ARC_SUBSYS_VALID,");
11295 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
11296 printf("ASF_FIRMWARE_PRES,");
11297 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
11298 printf("HAS_MANAGE,");
11299 printf("\n");
11300 #endif
11301 /*
11302 * Note that the WOL flags is set after the resetting of the eeprom
11303 * stuff
11304 */
11305 }
11306
11307 #ifdef WM_WOL
11308 /* WOL in the newer chipset interfaces (pchlan) */
11309 static void
11310 wm_enable_phy_wakeup(struct wm_softc *sc)
11311 {
11312 #if 0
11313 uint16_t preg;
11314
11315 /* Copy MAC RARs to PHY RARs */
11316
11317 /* Copy MAC MTA to PHY MTA */
11318
11319 /* Configure PHY Rx Control register */
11320
11321 /* Enable PHY wakeup in MAC register */
11322
11323 /* Configure and enable PHY wakeup in PHY registers */
11324
11325 /* Activate PHY wakeup */
11326
11327 /* XXX */
11328 #endif
11329 }
11330
11331 /* Power down workaround on D3 */
11332 static void
11333 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
11334 {
11335 uint32_t reg;
11336 int i;
11337
11338 for (i = 0; i < 2; i++) {
11339 /* Disable link */
11340 reg = CSR_READ(sc, WMREG_PHY_CTRL);
11341 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
11342 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11343
11344 /*
11345 * Call gig speed drop workaround on Gig disable before
11346 * accessing any PHY registers
11347 */
11348 if (sc->sc_type == WM_T_ICH8)
11349 wm_gig_downshift_workaround_ich8lan(sc);
11350
11351 /* Write VR power-down enable */
11352 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
11353 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
11354 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
11355 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
11356
11357 /* Read it back and test */
11358 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
11359 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
11360 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
11361 break;
11362
11363 /* Issue PHY reset and repeat at most one more time */
11364 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
11365 }
11366 }
11367
11368 static void
11369 wm_enable_wakeup(struct wm_softc *sc)
11370 {
11371 uint32_t reg, pmreg;
11372 pcireg_t pmode;
11373
11374 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
11375 &pmreg, NULL) == 0)
11376 return;
11377
11378 /* Advertise the wakeup capability */
11379 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
11380 | CTRL_SWDPIN(3));
11381 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
11382
11383 /* ICH workaround */
11384 switch (sc->sc_type) {
11385 case WM_T_ICH8:
11386 case WM_T_ICH9:
11387 case WM_T_ICH10:
11388 case WM_T_PCH:
11389 case WM_T_PCH2:
11390 case WM_T_PCH_LPT:
11391 /* Disable gig during WOL */
11392 reg = CSR_READ(sc, WMREG_PHY_CTRL);
11393 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
11394 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11395 if (sc->sc_type == WM_T_PCH)
11396 wm_gmii_reset(sc);
11397
11398 /* Power down workaround */
11399 if (sc->sc_phytype == WMPHY_82577) {
11400 struct mii_softc *child;
11401
11402 /* Assume that the PHY is copper */
11403 child = LIST_FIRST(&sc->sc_mii.mii_phys);
11404 if (child->mii_mpd_rev <= 2)
11405 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
11406 (768 << 5) | 25, 0x0444); /* magic num */
11407 }
11408 break;
11409 default:
11410 break;
11411 }
11412
11413 /* Keep the laser running on fiber adapters */
11414 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
11415 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
11416 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11417 reg |= CTRL_EXT_SWDPIN(3);
11418 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11419 }
11420
11421 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
11422 #if 0 /* for the multicast packet */
11423 reg |= WUFC_MC;
11424 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
11425 #endif
11426
11427 if (sc->sc_type == WM_T_PCH) {
11428 wm_enable_phy_wakeup(sc);
11429 } else {
11430 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
11431 CSR_WRITE(sc, WMREG_WUFC, reg);
11432 }
11433
11434 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
11435 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
11436 || (sc->sc_type == WM_T_PCH2))
11437 && (sc->sc_phytype == WMPHY_IGP_3))
11438 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
11439
11440 /* Request PME */
11441 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
11442 #if 0
11443 /* Disable WOL */
11444 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
11445 #else
11446 /* For WOL */
11447 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
11448 #endif
11449 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
11450 }
11451 #endif /* WM_WOL */
11452
11453 /* EEE */
11454
11455 static void
11456 wm_set_eee_i350(struct wm_softc *sc)
11457 {
11458 uint32_t ipcnfg, eeer;
11459
11460 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
11461 eeer = CSR_READ(sc, WMREG_EEER);
11462
11463 if ((sc->sc_flags & WM_F_EEE) != 0) {
11464 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
11465 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
11466 | EEER_LPI_FC);
11467 } else {
11468 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
11469 ipcnfg &= ~IPCNFG_10BASE_TE;
11470 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
11471 | EEER_LPI_FC);
11472 }
11473
11474 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
11475 CSR_WRITE(sc, WMREG_EEER, eeer);
11476 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
11477 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
11478 }
11479
11480 /*
11481 * Workarounds (mainly PHY related).
11482 * Basically, PHY's workarounds are in the PHY drivers.
11483 */
11484
11485 /* Work-around for 82566 Kumeran PCS lock loss */
11486 static void
11487 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
11488 {
11489 int miistatus, active, i;
11490 int reg;
11491
11492 miistatus = sc->sc_mii.mii_media_status;
11493
11494 /* If the link is not up, do nothing */
11495 if ((miistatus & IFM_ACTIVE) != 0)
11496 return;
11497
11498 active = sc->sc_mii.mii_media_active;
11499
11500 /* Nothing to do if the link is other than 1Gbps */
11501 if (IFM_SUBTYPE(active) != IFM_1000_T)
11502 return;
11503
11504 for (i = 0; i < 10; i++) {
11505 /* read twice */
11506 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
11507 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
11508 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
11509 goto out; /* GOOD! */
11510
11511 /* Reset the PHY */
11512 wm_gmii_reset(sc);
11513 delay(5*1000);
11514 }
11515
11516 /* Disable GigE link negotiation */
11517 reg = CSR_READ(sc, WMREG_PHY_CTRL);
11518 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
11519 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11520
11521 /*
11522 * Call gig speed drop workaround on Gig disable before accessing
11523 * any PHY registers.
11524 */
11525 wm_gig_downshift_workaround_ich8lan(sc);
11526
11527 out:
11528 return;
11529 }
11530
11531 /* WOL from S5 stops working */
11532 static void
11533 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
11534 {
11535 uint16_t kmrn_reg;
11536
11537 /* Only for igp3 */
11538 if (sc->sc_phytype == WMPHY_IGP_3) {
11539 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
11540 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
11541 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
11542 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
11543 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
11544 }
11545 }
11546
11547 /*
11548 * Workaround for pch's PHYs
11549 * XXX should be moved to new PHY driver?
11550 */
11551 static void
11552 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
11553 {
11554 if (sc->sc_phytype == WMPHY_82577)
11555 wm_set_mdio_slow_mode_hv(sc);
11556
11557 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
11558
11559 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
11560
11561 /* 82578 */
11562 if (sc->sc_phytype == WMPHY_82578) {
11563 /* PCH rev. < 3 */
11564 if (sc->sc_rev < 3) {
11565 /* XXX 6 bit shift? Why? Is it page2? */
11566 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
11567 0x66c0);
11568 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
11569 0xffff);
11570 }
11571
11572 /* XXX phy rev. < 2 */
11573 }
11574
11575 /* Select page 0 */
11576
11577 /* XXX acquire semaphore */
11578 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
11579 /* XXX release semaphore */
11580
11581 /*
11582 * Configure the K1 Si workaround during phy reset assuming there is
11583 * link so that it disables K1 if link is in 1Gbps.
11584 */
11585 wm_k1_gig_workaround_hv(sc, 1);
11586 }
11587
11588 static void
11589 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
11590 {
11591
11592 wm_set_mdio_slow_mode_hv(sc);
11593 }
11594
11595 static void
11596 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
11597 {
11598 int k1_enable = sc->sc_nvm_k1_enabled;
11599
11600 /* XXX acquire semaphore */
11601
11602 if (link) {
11603 k1_enable = 0;
11604
11605 /* Link stall fix for link up */
11606 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
11607 } else {
11608 /* Link stall fix for link down */
11609 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
11610 }
11611
11612 wm_configure_k1_ich8lan(sc, k1_enable);
11613
11614 /* XXX release semaphore */
11615 }
11616
11617 static void
11618 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
11619 {
11620 uint32_t reg;
11621
11622 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
11623 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
11624 reg | HV_KMRN_MDIO_SLOW);
11625 }
11626
11627 static void
11628 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
11629 {
11630 uint32_t ctrl, ctrl_ext, tmp;
11631 uint16_t kmrn_reg;
11632
11633 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
11634
11635 if (k1_enable)
11636 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
11637 else
11638 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
11639
11640 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
11641
11642 delay(20);
11643
11644 ctrl = CSR_READ(sc, WMREG_CTRL);
11645 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
11646
11647 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
11648 tmp |= CTRL_FRCSPD;
11649
11650 CSR_WRITE(sc, WMREG_CTRL, tmp);
11651 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
11652 CSR_WRITE_FLUSH(sc);
11653 delay(20);
11654
11655 CSR_WRITE(sc, WMREG_CTRL, ctrl);
11656 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
11657 CSR_WRITE_FLUSH(sc);
11658 delay(20);
11659 }
11660
11661 /* special case - for 82575 - need to do manual init ... */
11662 static void
11663 wm_reset_init_script_82575(struct wm_softc *sc)
11664 {
11665 /*
11666 * remark: this is untested code - we have no board without EEPROM
11667 * same setup as mentioned int the FreeBSD driver for the i82575
11668 */
11669
11670 /* SerDes configuration via SERDESCTRL */
11671 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
11672 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
11673 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
11674 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
11675
11676 /* CCM configuration via CCMCTL register */
11677 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
11678 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
11679
11680 /* PCIe lanes configuration */
11681 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
11682 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
11683 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
11684 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
11685
11686 /* PCIe PLL Configuration */
11687 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
11688 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
11689 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
11690 }
11691
11692 static void
11693 wm_reset_mdicnfg_82580(struct wm_softc *sc)
11694 {
11695 uint32_t reg;
11696 uint16_t nvmword;
11697 int rv;
11698
11699 if ((sc->sc_flags & WM_F_SGMII) == 0)
11700 return;
11701
11702 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
11703 + NVM_OFF_CFG3_PORTA, 1, &nvmword);
11704 if (rv != 0) {
11705 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
11706 __func__);
11707 return;
11708 }
11709
11710 reg = CSR_READ(sc, WMREG_MDICNFG);
11711 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
11712 reg |= MDICNFG_DEST;
11713 if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
11714 reg |= MDICNFG_COM_MDIO;
11715 CSR_WRITE(sc, WMREG_MDICNFG, reg);
11716 }
11717
11718 /*
11719 * I210 Errata 25 and I211 Errata 10
11720 * Slow System Clock.
11721 */
11722 static void
11723 wm_pll_workaround_i210(struct wm_softc *sc)
11724 {
11725 uint32_t mdicnfg, wuc;
11726 uint32_t reg;
11727 pcireg_t pcireg;
11728 uint32_t pmreg;
11729 uint16_t nvmword, tmp_nvmword;
11730 int phyval;
11731 bool wa_done = false;
11732 int i;
11733
11734 /* Save WUC and MDICNFG registers */
11735 wuc = CSR_READ(sc, WMREG_WUC);
11736 mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
11737
11738 reg = mdicnfg & ~MDICNFG_DEST;
11739 CSR_WRITE(sc, WMREG_MDICNFG, reg);
11740
11741 if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
11742 nvmword = INVM_DEFAULT_AL;
11743 tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
11744
11745 /* Get Power Management cap offset */
11746 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
11747 &pmreg, NULL) == 0)
11748 return;
11749 for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
11750 phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
11751 GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
11752
11753 if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
11754 break; /* OK */
11755 }
11756
11757 wa_done = true;
11758 /* Directly reset the internal PHY */
11759 reg = CSR_READ(sc, WMREG_CTRL);
11760 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
11761
11762 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11763 reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
11764 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11765
11766 CSR_WRITE(sc, WMREG_WUC, 0);
11767 reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
11768 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
11769
11770 pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
11771 pmreg + PCI_PMCSR);
11772 pcireg |= PCI_PMCSR_STATE_D3;
11773 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
11774 pmreg + PCI_PMCSR, pcireg);
11775 delay(1000);
11776 pcireg &= ~PCI_PMCSR_STATE_D3;
11777 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
11778 pmreg + PCI_PMCSR, pcireg);
11779
11780 reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
11781 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
11782
11783 /* Restore WUC register */
11784 CSR_WRITE(sc, WMREG_WUC, wuc);
11785 }
11786
11787 /* Restore MDICNFG setting */
11788 CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
11789 if (wa_done)
11790 aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
11791 }
11792