if_wm.c revision 1.376 1 /* $NetBSD: if_wm.c,v 1.376 2015/10/30 07:35:30 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - LPLU other than PCH*
77 * - TX Multi queue
78 * - EEE (Energy Efficiency Ethernet)
79 * - Virtual Function
80 * - Set LED correctly (based on contents in EEPROM)
81 * - Rework how parameters are loaded from the EEPROM.
82 * - Image Unique ID
83 */
84
85 #include <sys/cdefs.h>
86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.376 2015/10/30 07:35:30 msaitoh Exp $");
87
88 #ifdef _KERNEL_OPT
89 #include "opt_net_mpsafe.h"
90 #endif
91
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/callout.h>
95 #include <sys/mbuf.h>
96 #include <sys/malloc.h>
97 #include <sys/kmem.h>
98 #include <sys/kernel.h>
99 #include <sys/socket.h>
100 #include <sys/ioctl.h>
101 #include <sys/errno.h>
102 #include <sys/device.h>
103 #include <sys/queue.h>
104 #include <sys/syslog.h>
105 #include <sys/interrupt.h>
106
107 #include <sys/rndsource.h>
108
109 #include <net/if.h>
110 #include <net/if_dl.h>
111 #include <net/if_media.h>
112 #include <net/if_ether.h>
113
114 #include <net/bpf.h>
115
116 #include <netinet/in.h> /* XXX for struct ip */
117 #include <netinet/in_systm.h> /* XXX for struct ip */
118 #include <netinet/ip.h> /* XXX for struct ip */
119 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
120 #include <netinet/tcp.h> /* XXX for struct tcphdr */
121
122 #include <sys/bus.h>
123 #include <sys/intr.h>
124 #include <machine/endian.h>
125
126 #include <dev/mii/mii.h>
127 #include <dev/mii/miivar.h>
128 #include <dev/mii/miidevs.h>
129 #include <dev/mii/mii_bitbang.h>
130 #include <dev/mii/ikphyreg.h>
131 #include <dev/mii/igphyreg.h>
132 #include <dev/mii/igphyvar.h>
133 #include <dev/mii/inbmphyreg.h>
134
135 #include <dev/pci/pcireg.h>
136 #include <dev/pci/pcivar.h>
137 #include <dev/pci/pcidevs.h>
138
139 #include <dev/pci/if_wmreg.h>
140 #include <dev/pci/if_wmvar.h>
141
142 #ifdef WM_DEBUG
143 #define WM_DEBUG_LINK 0x01
144 #define WM_DEBUG_TX 0x02
145 #define WM_DEBUG_RX 0x04
146 #define WM_DEBUG_GMII 0x08
147 #define WM_DEBUG_MANAGE 0x10
148 #define WM_DEBUG_NVM 0x20
149 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
150 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
151
152 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
153 #else
154 #define DPRINTF(x, y) /* nothing */
155 #endif /* WM_DEBUG */
156
157 #ifdef NET_MPSAFE
158 #define WM_MPSAFE 1
159 #endif
160
161 /*
162 * This device driver's max interrupt numbers.
163 */
164 #define WM_MAX_NTXINTR 16
165 #define WM_MAX_NRXINTR 16
166 #define WM_MAX_NINTR (WM_MAX_NTXINTR + WM_MAX_NRXINTR + 1)
167
168 /*
169 * Transmit descriptor list size. Due to errata, we can only have
170 * 256 hardware descriptors in the ring on < 82544, but we use 4096
171 * on >= 82544. We tell the upper layers that they can queue a lot
172 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
173 * of them at a time.
174 *
175 * We allow up to 256 (!) DMA segments per packet. Pathological packet
176 * chains containing many small mbufs have been observed in zero-copy
177 * situations with jumbo frames.
178 */
179 #define WM_NTXSEGS 256
180 #define WM_IFQUEUELEN 256
181 #define WM_TXQUEUELEN_MAX 64
182 #define WM_TXQUEUELEN_MAX_82547 16
183 #define WM_TXQUEUELEN(txq) ((txq)->txq_num)
184 #define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1)
185 #define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8)
186 #define WM_NTXDESC_82542 256
187 #define WM_NTXDESC_82544 4096
188 #define WM_NTXDESC(txq) ((txq)->txq_ndesc)
189 #define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1)
190 #define WM_TXDESCSIZE(txq) (WM_NTXDESC(txq) * sizeof(wiseman_txdesc_t))
191 #define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq))
192 #define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq))
193
194 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
195
196 /*
197 * Receive descriptor list size. We have one Rx buffer for normal
198 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
199 * packet. We allocate 256 receive descriptors, each with a 2k
200 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
201 */
202 #define WM_NRXDESC 256
203 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
204 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
205 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
206
207 typedef union txdescs {
208 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
209 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544];
210 } txdescs_t;
211
212 #define WM_CDTXOFF(x) (sizeof(wiseman_txdesc_t) * x)
213 #define WM_CDRXOFF(x) (sizeof(wiseman_rxdesc_t) * x)
214
215 /*
216 * Software state for transmit jobs.
217 */
218 struct wm_txsoft {
219 struct mbuf *txs_mbuf; /* head of our mbuf chain */
220 bus_dmamap_t txs_dmamap; /* our DMA map */
221 int txs_firstdesc; /* first descriptor in packet */
222 int txs_lastdesc; /* last descriptor in packet */
223 int txs_ndesc; /* # of descriptors used */
224 };
225
226 /*
227 * Software state for receive buffers. Each descriptor gets a
228 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
229 * more than one buffer, we chain them together.
230 */
231 struct wm_rxsoft {
232 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
233 bus_dmamap_t rxs_dmamap; /* our DMA map */
234 };
235
236 #define WM_LINKUP_TIMEOUT 50
237
238 static uint16_t swfwphysem[] = {
239 SWFW_PHY0_SM,
240 SWFW_PHY1_SM,
241 SWFW_PHY2_SM,
242 SWFW_PHY3_SM
243 };
244
245 static const uint32_t wm_82580_rxpbs_table[] = {
246 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
247 };
248
249 struct wm_softc;
250
251 struct wm_txqueue {
252 kmutex_t *txq_lock; /* lock for tx operations */
253
254 struct wm_softc *txq_sc;
255
256 int txq_id; /* index of transmit queues */
257 int txq_intr_idx; /* index of MSI-X tables */
258
259 /* Software state for the transmit descriptors. */
260 int txq_num; /* must be a power of two */
261 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
262
263 /* TX control data structures. */
264 int txq_ndesc; /* must be a power of two */
265 txdescs_t *txq_descs_u;
266 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */
267 bus_dma_segment_t txq_desc_seg; /* control data segment */
268 int txq_desc_rseg; /* real number of control segment */
269 size_t txq_desc_size; /* control data size */
270 #define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr
271 #define txq_descs txq_descs_u->sctxu_txdescs
272 #define txq_nq_descs txq_descs_u->sctxu_nq_txdescs
273
274 bus_addr_t txq_tdt_reg; /* offset of TDT register */
275
276 int txq_free; /* number of free Tx descriptors */
277 int txq_next; /* next ready Tx descriptor */
278
279 int txq_sfree; /* number of free Tx jobs */
280 int txq_snext; /* next free Tx job */
281 int txq_sdirty; /* dirty Tx jobs */
282
283 /* These 4 variables are used only on the 82547. */
284 int txq_fifo_size; /* Tx FIFO size */
285 int txq_fifo_head; /* current head of FIFO */
286 uint32_t txq_fifo_addr; /* internal address of start of FIFO */
287 int txq_fifo_stall; /* Tx FIFO is stalled */
288
289 /* XXX which event counter is required? */
290 };
291
292 struct wm_rxqueue {
293 kmutex_t *rxq_lock; /* lock for rx operations */
294
295 struct wm_softc *rxq_sc;
296
297 int rxq_id; /* index of receive queues */
298 int rxq_intr_idx; /* index of MSI-X tables */
299
300 /* Software state for the receive descriptors. */
301 wiseman_rxdesc_t *rxq_descs;
302
303 /* RX control data structures. */
304 struct wm_rxsoft rxq_soft[WM_NRXDESC];
305 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */
306 bus_dma_segment_t rxq_desc_seg; /* control data segment */
307 int rxq_desc_rseg; /* real number of control segment */
308 size_t rxq_desc_size; /* control data size */
309 #define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr
310
311 bus_addr_t rxq_rdt_reg; /* offset of RDT register */
312
313 int rxq_ptr; /* next ready Rx descriptor/queue ent */
314 int rxq_discard;
315 int rxq_len;
316 struct mbuf *rxq_head;
317 struct mbuf *rxq_tail;
318 struct mbuf **rxq_tailp;
319
320 /* XXX which event counter is required? */
321 };
322
323 /*
324 * Software state per device.
325 */
326 struct wm_softc {
327 device_t sc_dev; /* generic device information */
328 bus_space_tag_t sc_st; /* bus space tag */
329 bus_space_handle_t sc_sh; /* bus space handle */
330 bus_size_t sc_ss; /* bus space size */
331 bus_space_tag_t sc_iot; /* I/O space tag */
332 bus_space_handle_t sc_ioh; /* I/O space handle */
333 bus_size_t sc_ios; /* I/O space size */
334 bus_space_tag_t sc_flasht; /* flash registers space tag */
335 bus_space_handle_t sc_flashh; /* flash registers space handle */
336 bus_size_t sc_flashs; /* flash registers space size */
337 bus_dma_tag_t sc_dmat; /* bus DMA tag */
338
339 struct ethercom sc_ethercom; /* ethernet common data */
340 struct mii_data sc_mii; /* MII/media information */
341
342 pci_chipset_tag_t sc_pc;
343 pcitag_t sc_pcitag;
344 int sc_bus_speed; /* PCI/PCIX bus speed */
345 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
346
347 uint16_t sc_pcidevid; /* PCI device ID */
348 wm_chip_type sc_type; /* MAC type */
349 int sc_rev; /* MAC revision */
350 wm_phy_type sc_phytype; /* PHY type */
351 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
352 #define WM_MEDIATYPE_UNKNOWN 0x00
353 #define WM_MEDIATYPE_FIBER 0x01
354 #define WM_MEDIATYPE_COPPER 0x02
355 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
356 int sc_funcid; /* unit number of the chip (0 to 3) */
357 int sc_flags; /* flags; see below */
358 int sc_if_flags; /* last if_flags */
359 int sc_flowflags; /* 802.3x flow control flags */
360 int sc_align_tweak;
361
362 void *sc_ihs[WM_MAX_NINTR]; /*
363 * interrupt cookie.
364 * legacy and msi use sc_ihs[0].
365 */
366 pci_intr_handle_t *sc_intrs; /* legacy and msi use sc_intrs[0] */
367 int sc_nintrs; /* number of interrupts */
368
369 int sc_link_intr_idx; /* index of MSI-X tables */
370
371 callout_t sc_tick_ch; /* tick callout */
372 bool sc_stopping;
373
374 int sc_nvm_ver_major;
375 int sc_nvm_ver_minor;
376 int sc_nvm_ver_build;
377 int sc_nvm_addrbits; /* NVM address bits */
378 unsigned int sc_nvm_wordsize; /* NVM word size */
379 int sc_ich8_flash_base;
380 int sc_ich8_flash_bank_size;
381 int sc_nvm_k1_enabled;
382
383 int sc_ntxqueues;
384 struct wm_txqueue *sc_txq;
385
386 int sc_nrxqueues;
387 struct wm_rxqueue *sc_rxq;
388
389 #ifdef WM_EVENT_COUNTERS
390 /* Event counters. */
391 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
392 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
393 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
394 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
395 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
396 struct evcnt sc_ev_rxintr; /* Rx interrupts */
397 struct evcnt sc_ev_linkintr; /* Link interrupts */
398
399 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
400 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
401 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
402 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
403 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
404 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
405 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
406 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
407
408 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
409 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
410
411 struct evcnt sc_ev_tu; /* Tx underrun */
412
413 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
414 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
415 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
416 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
417 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
418 #endif /* WM_EVENT_COUNTERS */
419
420 /* This variable are used only on the 82547. */
421 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
422
423 uint32_t sc_ctrl; /* prototype CTRL register */
424 #if 0
425 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
426 #endif
427 uint32_t sc_icr; /* prototype interrupt bits */
428 uint32_t sc_itr; /* prototype intr throttling reg */
429 uint32_t sc_tctl; /* prototype TCTL register */
430 uint32_t sc_rctl; /* prototype RCTL register */
431 uint32_t sc_txcw; /* prototype TXCW register */
432 uint32_t sc_tipg; /* prototype TIPG register */
433 uint32_t sc_fcrtl; /* prototype FCRTL register */
434 uint32_t sc_pba; /* prototype PBA register */
435
436 int sc_tbi_linkup; /* TBI link status */
437 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
438 int sc_tbi_serdes_ticks; /* tbi ticks */
439
440 int sc_mchash_type; /* multicast filter offset */
441
442 krndsource_t rnd_source; /* random source */
443
444 kmutex_t *sc_core_lock; /* lock for softc operations */
445 };
446
447 #define WM_TX_LOCK(_txq) if ((_txq)->txq_lock) mutex_enter((_txq)->txq_lock)
448 #define WM_TX_UNLOCK(_txq) if ((_txq)->txq_lock) mutex_exit((_txq)->txq_lock)
449 #define WM_TX_LOCKED(_txq) (!(_txq)->txq_lock || mutex_owned((_txq)->txq_lock))
450 #define WM_RX_LOCK(_rxq) if ((_rxq)->rxq_lock) mutex_enter((_rxq)->rxq_lock)
451 #define WM_RX_UNLOCK(_rxq) if ((_rxq)->rxq_lock) mutex_exit((_rxq)->rxq_lock)
452 #define WM_RX_LOCKED(_rxq) (!(_rxq)->rxq_lock || mutex_owned((_rxq)->rxq_lock))
453 #define WM_CORE_LOCK(_sc) if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
454 #define WM_CORE_UNLOCK(_sc) if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
455 #define WM_CORE_LOCKED(_sc) (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
456
457 #ifdef WM_MPSAFE
458 #define CALLOUT_FLAGS CALLOUT_MPSAFE
459 #else
460 #define CALLOUT_FLAGS 0
461 #endif
462
463 #define WM_RXCHAIN_RESET(rxq) \
464 do { \
465 (rxq)->rxq_tailp = &(rxq)->rxq_head; \
466 *(rxq)->rxq_tailp = NULL; \
467 (rxq)->rxq_len = 0; \
468 } while (/*CONSTCOND*/0)
469
470 #define WM_RXCHAIN_LINK(rxq, m) \
471 do { \
472 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \
473 (rxq)->rxq_tailp = &(m)->m_next; \
474 } while (/*CONSTCOND*/0)
475
476 #ifdef WM_EVENT_COUNTERS
477 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
478 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
479 #else
480 #define WM_EVCNT_INCR(ev) /* nothing */
481 #define WM_EVCNT_ADD(ev, val) /* nothing */
482 #endif
483
484 #define CSR_READ(sc, reg) \
485 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
486 #define CSR_WRITE(sc, reg, val) \
487 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
488 #define CSR_WRITE_FLUSH(sc) \
489 (void) CSR_READ((sc), WMREG_STATUS)
490
491 #define ICH8_FLASH_READ32(sc, reg) \
492 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
493 #define ICH8_FLASH_WRITE32(sc, reg, data) \
494 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
495
496 #define ICH8_FLASH_READ16(sc, reg) \
497 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
498 #define ICH8_FLASH_WRITE16(sc, reg, data) \
499 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
500
501 #define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((x)))
502 #define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
503
504 #define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU)
505 #define WM_CDTXADDR_HI(txq, x) \
506 (sizeof(bus_addr_t) == 8 ? \
507 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
508
509 #define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
510 #define WM_CDRXADDR_HI(rxq, x) \
511 (sizeof(bus_addr_t) == 8 ? \
512 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
513
514 /*
515 * Register read/write functions.
516 * Other than CSR_{READ|WRITE}().
517 */
518 #if 0
519 static inline uint32_t wm_io_read(struct wm_softc *, int);
520 #endif
521 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
522 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
523 uint32_t, uint32_t);
524 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
525
526 /*
527 * Descriptor sync/init functions.
528 */
529 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
530 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
531 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
532
533 /*
534 * Device driver interface functions and commonly used functions.
535 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
536 */
537 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
538 static int wm_match(device_t, cfdata_t, void *);
539 static void wm_attach(device_t, device_t, void *);
540 static int wm_detach(device_t, int);
541 static bool wm_suspend(device_t, const pmf_qual_t *);
542 static bool wm_resume(device_t, const pmf_qual_t *);
543 static void wm_watchdog(struct ifnet *);
544 static void wm_tick(void *);
545 static int wm_ifflags_cb(struct ethercom *);
546 static int wm_ioctl(struct ifnet *, u_long, void *);
547 /* MAC address related */
548 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
549 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
550 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
551 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
552 static void wm_set_filter(struct wm_softc *);
553 /* Reset and init related */
554 static void wm_set_vlan(struct wm_softc *);
555 static void wm_set_pcie_completion_timeout(struct wm_softc *);
556 static void wm_get_auto_rd_done(struct wm_softc *);
557 static void wm_lan_init_done(struct wm_softc *);
558 static void wm_get_cfg_done(struct wm_softc *);
559 static void wm_initialize_hardware_bits(struct wm_softc *);
560 static uint32_t wm_rxpbs_adjust_82580(uint32_t);
561 static void wm_reset(struct wm_softc *);
562 static int wm_add_rxbuf(struct wm_rxqueue *, int);
563 static void wm_rxdrain(struct wm_rxqueue *);
564 static void wm_rss_getkey(uint8_t *);
565 static void wm_init_rss(struct wm_softc *);
566 static void wm_adjust_qnum(struct wm_softc *, int);
567 static int wm_setup_legacy(struct wm_softc *);
568 static int wm_setup_msix(struct wm_softc *);
569 static int wm_init(struct ifnet *);
570 static int wm_init_locked(struct ifnet *);
571 static void wm_stop(struct ifnet *, int);
572 static void wm_stop_locked(struct ifnet *, int);
573 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
574 static void wm_82547_txfifo_stall(void *);
575 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
576 /* DMA related */
577 static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
578 static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
579 static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
580 static void wm_init_tx_regs(struct wm_softc *, struct wm_txqueue *);
581 static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
582 static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
583 static void wm_init_rx_regs(struct wm_softc *, struct wm_rxqueue *);
584 static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
585 static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
586 static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
587 static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
588 static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
589 static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
590 static void wm_init_tx_queue(struct wm_softc *, struct wm_txqueue *);
591 static int wm_init_rx_queue(struct wm_softc *, struct wm_rxqueue *);
592 static int wm_alloc_txrx_queues(struct wm_softc *);
593 static void wm_free_txrx_queues(struct wm_softc *);
594 static int wm_init_txrx_queues(struct wm_softc *);
595 /* Start */
596 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
597 uint32_t *, uint8_t *);
598 static void wm_start(struct ifnet *);
599 static void wm_start_locked(struct ifnet *);
600 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
601 uint32_t *, uint32_t *, bool *);
602 static void wm_nq_start(struct ifnet *);
603 static void wm_nq_start_locked(struct ifnet *);
604 /* Interrupt */
605 static int wm_txeof(struct wm_softc *);
606 static void wm_rxeof(struct wm_rxqueue *);
607 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
608 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
609 static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
610 static void wm_linkintr(struct wm_softc *, uint32_t);
611 static int wm_intr_legacy(void *);
612 static int wm_txintr_msix(void *);
613 static int wm_rxintr_msix(void *);
614 static int wm_linkintr_msix(void *);
615
616 /*
617 * Media related.
618 * GMII, SGMII, TBI, SERDES and SFP.
619 */
620 /* Common */
621 static void wm_tbi_serdes_set_linkled(struct wm_softc *);
622 /* GMII related */
623 static void wm_gmii_reset(struct wm_softc *);
624 static int wm_get_phy_id_82575(struct wm_softc *);
625 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
626 static int wm_gmii_mediachange(struct ifnet *);
627 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
628 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
629 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
630 static int wm_gmii_i82543_readreg(device_t, int, int);
631 static void wm_gmii_i82543_writereg(device_t, int, int, int);
632 static int wm_gmii_i82544_readreg(device_t, int, int);
633 static void wm_gmii_i82544_writereg(device_t, int, int, int);
634 static int wm_gmii_i80003_readreg(device_t, int, int);
635 static void wm_gmii_i80003_writereg(device_t, int, int, int);
636 static int wm_gmii_bm_readreg(device_t, int, int);
637 static void wm_gmii_bm_writereg(device_t, int, int, int);
638 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
639 static int wm_gmii_hv_readreg(device_t, int, int);
640 static void wm_gmii_hv_writereg(device_t, int, int, int);
641 static int wm_gmii_82580_readreg(device_t, int, int);
642 static void wm_gmii_82580_writereg(device_t, int, int, int);
643 static int wm_gmii_gs40g_readreg(device_t, int, int);
644 static void wm_gmii_gs40g_writereg(device_t, int, int, int);
645 static void wm_gmii_statchg(struct ifnet *);
646 static int wm_kmrn_readreg(struct wm_softc *, int);
647 static void wm_kmrn_writereg(struct wm_softc *, int, int);
648 /* SGMII */
649 static bool wm_sgmii_uses_mdio(struct wm_softc *);
650 static int wm_sgmii_readreg(device_t, int, int);
651 static void wm_sgmii_writereg(device_t, int, int, int);
652 /* TBI related */
653 static void wm_tbi_mediainit(struct wm_softc *);
654 static int wm_tbi_mediachange(struct ifnet *);
655 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
656 static int wm_check_for_link(struct wm_softc *);
657 static void wm_tbi_tick(struct wm_softc *);
658 /* SERDES related */
659 static void wm_serdes_power_up_link_82575(struct wm_softc *);
660 static int wm_serdes_mediachange(struct ifnet *);
661 static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
662 static void wm_serdes_tick(struct wm_softc *);
663 /* SFP related */
664 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
665 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
666
667 /*
668 * NVM related.
669 * Microwire, SPI (w/wo EERD) and Flash.
670 */
671 /* Misc functions */
672 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
673 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
674 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
675 /* Microwire */
676 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
677 /* SPI */
678 static int wm_nvm_ready_spi(struct wm_softc *);
679 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
680 /* Using with EERD */
681 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
682 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
683 /* Flash */
684 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
685 unsigned int *);
686 static int32_t wm_ich8_cycle_init(struct wm_softc *);
687 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
688 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
689 uint16_t *);
690 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
691 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
692 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
693 /* iNVM */
694 static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
695 static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
696 /* Lock, detecting NVM type, validate checksum and read */
697 static int wm_nvm_acquire(struct wm_softc *);
698 static void wm_nvm_release(struct wm_softc *);
699 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
700 static int wm_nvm_get_flash_presence_i210(struct wm_softc *);
701 static int wm_nvm_validate_checksum(struct wm_softc *);
702 static void wm_nvm_version_invm(struct wm_softc *);
703 static void wm_nvm_version(struct wm_softc *);
704 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
705
706 /*
707 * Hardware semaphores.
708 * Very complexed...
709 */
710 static int wm_get_swsm_semaphore(struct wm_softc *);
711 static void wm_put_swsm_semaphore(struct wm_softc *);
712 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
713 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
714 static int wm_get_swfwhw_semaphore(struct wm_softc *);
715 static void wm_put_swfwhw_semaphore(struct wm_softc *);
716 static int wm_get_hw_semaphore_82573(struct wm_softc *);
717 static void wm_put_hw_semaphore_82573(struct wm_softc *);
718
719 /*
720 * Management mode and power management related subroutines.
721 * BMC, AMT, suspend/resume and EEE.
722 */
723 static int wm_check_mng_mode(struct wm_softc *);
724 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
725 static int wm_check_mng_mode_82574(struct wm_softc *);
726 static int wm_check_mng_mode_generic(struct wm_softc *);
727 static int wm_enable_mng_pass_thru(struct wm_softc *);
728 static int wm_check_reset_block(struct wm_softc *);
729 static void wm_get_hw_control(struct wm_softc *);
730 static void wm_release_hw_control(struct wm_softc *);
731 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
732 static void wm_smbustopci(struct wm_softc *);
733 static void wm_init_manageability(struct wm_softc *);
734 static void wm_release_manageability(struct wm_softc *);
735 static void wm_get_wakeup(struct wm_softc *);
736 #ifdef WM_WOL
737 static void wm_enable_phy_wakeup(struct wm_softc *);
738 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
739 static void wm_enable_wakeup(struct wm_softc *);
740 #endif
741 /* EEE */
742 static void wm_set_eee_i350(struct wm_softc *);
743
744 /*
745 * Workarounds (mainly PHY related).
746 * Basically, PHY's workarounds are in the PHY drivers.
747 */
748 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
749 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
750 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
751 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
752 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
753 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
754 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
755 static void wm_reset_init_script_82575(struct wm_softc *);
756 static void wm_reset_mdicnfg_82580(struct wm_softc *);
757 static void wm_pll_workaround_i210(struct wm_softc *);
758
759 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
760 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
761
762 /*
763 * Devices supported by this driver.
764 */
765 static const struct wm_product {
766 pci_vendor_id_t wmp_vendor;
767 pci_product_id_t wmp_product;
768 const char *wmp_name;
769 wm_chip_type wmp_type;
770 uint32_t wmp_flags;
771 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
772 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
773 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
774 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
775 #define WMP_MEDIATYPE(x) ((x) & 0x03)
776 } wm_products[] = {
777 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
778 "Intel i82542 1000BASE-X Ethernet",
779 WM_T_82542_2_1, WMP_F_FIBER },
780
781 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
782 "Intel i82543GC 1000BASE-X Ethernet",
783 WM_T_82543, WMP_F_FIBER },
784
785 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
786 "Intel i82543GC 1000BASE-T Ethernet",
787 WM_T_82543, WMP_F_COPPER },
788
789 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
790 "Intel i82544EI 1000BASE-T Ethernet",
791 WM_T_82544, WMP_F_COPPER },
792
793 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
794 "Intel i82544EI 1000BASE-X Ethernet",
795 WM_T_82544, WMP_F_FIBER },
796
797 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
798 "Intel i82544GC 1000BASE-T Ethernet",
799 WM_T_82544, WMP_F_COPPER },
800
801 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
802 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
803 WM_T_82544, WMP_F_COPPER },
804
805 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
806 "Intel i82540EM 1000BASE-T Ethernet",
807 WM_T_82540, WMP_F_COPPER },
808
809 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
810 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
811 WM_T_82540, WMP_F_COPPER },
812
813 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
814 "Intel i82540EP 1000BASE-T Ethernet",
815 WM_T_82540, WMP_F_COPPER },
816
817 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
818 "Intel i82540EP 1000BASE-T Ethernet",
819 WM_T_82540, WMP_F_COPPER },
820
821 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
822 "Intel i82540EP 1000BASE-T Ethernet",
823 WM_T_82540, WMP_F_COPPER },
824
825 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
826 "Intel i82545EM 1000BASE-T Ethernet",
827 WM_T_82545, WMP_F_COPPER },
828
829 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
830 "Intel i82545GM 1000BASE-T Ethernet",
831 WM_T_82545_3, WMP_F_COPPER },
832
833 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
834 "Intel i82545GM 1000BASE-X Ethernet",
835 WM_T_82545_3, WMP_F_FIBER },
836
837 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
838 "Intel i82545GM Gigabit Ethernet (SERDES)",
839 WM_T_82545_3, WMP_F_SERDES },
840
841 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
842 "Intel i82546EB 1000BASE-T Ethernet",
843 WM_T_82546, WMP_F_COPPER },
844
845 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
846 "Intel i82546EB 1000BASE-T Ethernet",
847 WM_T_82546, WMP_F_COPPER },
848
849 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
850 "Intel i82545EM 1000BASE-X Ethernet",
851 WM_T_82545, WMP_F_FIBER },
852
853 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
854 "Intel i82546EB 1000BASE-X Ethernet",
855 WM_T_82546, WMP_F_FIBER },
856
857 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
858 "Intel i82546GB 1000BASE-T Ethernet",
859 WM_T_82546_3, WMP_F_COPPER },
860
861 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
862 "Intel i82546GB 1000BASE-X Ethernet",
863 WM_T_82546_3, WMP_F_FIBER },
864
865 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
866 "Intel i82546GB Gigabit Ethernet (SERDES)",
867 WM_T_82546_3, WMP_F_SERDES },
868
869 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
870 "i82546GB quad-port Gigabit Ethernet",
871 WM_T_82546_3, WMP_F_COPPER },
872
873 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
874 "i82546GB quad-port Gigabit Ethernet (KSP3)",
875 WM_T_82546_3, WMP_F_COPPER },
876
877 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
878 "Intel PRO/1000MT (82546GB)",
879 WM_T_82546_3, WMP_F_COPPER },
880
881 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
882 "Intel i82541EI 1000BASE-T Ethernet",
883 WM_T_82541, WMP_F_COPPER },
884
885 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
886 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
887 WM_T_82541, WMP_F_COPPER },
888
889 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
890 "Intel i82541EI Mobile 1000BASE-T Ethernet",
891 WM_T_82541, WMP_F_COPPER },
892
893 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
894 "Intel i82541ER 1000BASE-T Ethernet",
895 WM_T_82541_2, WMP_F_COPPER },
896
897 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
898 "Intel i82541GI 1000BASE-T Ethernet",
899 WM_T_82541_2, WMP_F_COPPER },
900
901 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
902 "Intel i82541GI Mobile 1000BASE-T Ethernet",
903 WM_T_82541_2, WMP_F_COPPER },
904
905 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
906 "Intel i82541PI 1000BASE-T Ethernet",
907 WM_T_82541_2, WMP_F_COPPER },
908
909 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
910 "Intel i82547EI 1000BASE-T Ethernet",
911 WM_T_82547, WMP_F_COPPER },
912
913 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
914 "Intel i82547EI Mobile 1000BASE-T Ethernet",
915 WM_T_82547, WMP_F_COPPER },
916
917 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
918 "Intel i82547GI 1000BASE-T Ethernet",
919 WM_T_82547_2, WMP_F_COPPER },
920
921 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
922 "Intel PRO/1000 PT (82571EB)",
923 WM_T_82571, WMP_F_COPPER },
924
925 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
926 "Intel PRO/1000 PF (82571EB)",
927 WM_T_82571, WMP_F_FIBER },
928
929 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
930 "Intel PRO/1000 PB (82571EB)",
931 WM_T_82571, WMP_F_SERDES },
932
933 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
934 "Intel PRO/1000 QT (82571EB)",
935 WM_T_82571, WMP_F_COPPER },
936
937 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
938 "Intel PRO/1000 PT Quad Port Server Adapter",
939 WM_T_82571, WMP_F_COPPER, },
940
941 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
942 "Intel Gigabit PT Quad Port Server ExpressModule",
943 WM_T_82571, WMP_F_COPPER, },
944
945 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
946 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
947 WM_T_82571, WMP_F_SERDES, },
948
949 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
950 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
951 WM_T_82571, WMP_F_SERDES, },
952
953 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
954 "Intel 82571EB Quad 1000baseX Ethernet",
955 WM_T_82571, WMP_F_FIBER, },
956
957 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
958 "Intel i82572EI 1000baseT Ethernet",
959 WM_T_82572, WMP_F_COPPER },
960
961 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
962 "Intel i82572EI 1000baseX Ethernet",
963 WM_T_82572, WMP_F_FIBER },
964
965 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
966 "Intel i82572EI Gigabit Ethernet (SERDES)",
967 WM_T_82572, WMP_F_SERDES },
968
969 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
970 "Intel i82572EI 1000baseT Ethernet",
971 WM_T_82572, WMP_F_COPPER },
972
973 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
974 "Intel i82573E",
975 WM_T_82573, WMP_F_COPPER },
976
977 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
978 "Intel i82573E IAMT",
979 WM_T_82573, WMP_F_COPPER },
980
981 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
982 "Intel i82573L Gigabit Ethernet",
983 WM_T_82573, WMP_F_COPPER },
984
985 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
986 "Intel i82574L",
987 WM_T_82574, WMP_F_COPPER },
988
989 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
990 "Intel i82574L",
991 WM_T_82574, WMP_F_COPPER },
992
993 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
994 "Intel i82583V",
995 WM_T_82583, WMP_F_COPPER },
996
997 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
998 "i80003 dual 1000baseT Ethernet",
999 WM_T_80003, WMP_F_COPPER },
1000
1001 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1002 "i80003 dual 1000baseX Ethernet",
1003 WM_T_80003, WMP_F_COPPER },
1004
1005 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1006 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1007 WM_T_80003, WMP_F_SERDES },
1008
1009 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1010 "Intel i80003 1000baseT Ethernet",
1011 WM_T_80003, WMP_F_COPPER },
1012
1013 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1014 "Intel i80003 Gigabit Ethernet (SERDES)",
1015 WM_T_80003, WMP_F_SERDES },
1016
1017 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
1018 "Intel i82801H (M_AMT) LAN Controller",
1019 WM_T_ICH8, WMP_F_COPPER },
1020 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
1021 "Intel i82801H (AMT) LAN Controller",
1022 WM_T_ICH8, WMP_F_COPPER },
1023 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
1024 "Intel i82801H LAN Controller",
1025 WM_T_ICH8, WMP_F_COPPER },
1026 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1027 "Intel i82801H (IFE) LAN Controller",
1028 WM_T_ICH8, WMP_F_COPPER },
1029 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
1030 "Intel i82801H (M) LAN Controller",
1031 WM_T_ICH8, WMP_F_COPPER },
1032 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1033 "Intel i82801H IFE (GT) LAN Controller",
1034 WM_T_ICH8, WMP_F_COPPER },
1035 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1036 "Intel i82801H IFE (G) LAN Controller",
1037 WM_T_ICH8, WMP_F_COPPER },
1038 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1039 "82801I (AMT) LAN Controller",
1040 WM_T_ICH9, WMP_F_COPPER },
1041 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1042 "82801I LAN Controller",
1043 WM_T_ICH9, WMP_F_COPPER },
1044 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1045 "82801I (G) LAN Controller",
1046 WM_T_ICH9, WMP_F_COPPER },
1047 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1048 "82801I (GT) LAN Controller",
1049 WM_T_ICH9, WMP_F_COPPER },
1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1051 "82801I (C) LAN Controller",
1052 WM_T_ICH9, WMP_F_COPPER },
1053 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1054 "82801I mobile LAN Controller",
1055 WM_T_ICH9, WMP_F_COPPER },
1056 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1057 "82801I mobile (V) LAN Controller",
1058 WM_T_ICH9, WMP_F_COPPER },
1059 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1060 "82801I mobile (AMT) LAN Controller",
1061 WM_T_ICH9, WMP_F_COPPER },
1062 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1063 "82567LM-4 LAN Controller",
1064 WM_T_ICH9, WMP_F_COPPER },
1065 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
1066 "82567V-3 LAN Controller",
1067 WM_T_ICH9, WMP_F_COPPER },
1068 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1069 "82567LM-2 LAN Controller",
1070 WM_T_ICH10, WMP_F_COPPER },
1071 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1072 "82567LF-2 LAN Controller",
1073 WM_T_ICH10, WMP_F_COPPER },
1074 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1075 "82567LM-3 LAN Controller",
1076 WM_T_ICH10, WMP_F_COPPER },
1077 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1078 "82567LF-3 LAN Controller",
1079 WM_T_ICH10, WMP_F_COPPER },
1080 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1081 "82567V-2 LAN Controller",
1082 WM_T_ICH10, WMP_F_COPPER },
1083 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1084 "82567V-3? LAN Controller",
1085 WM_T_ICH10, WMP_F_COPPER },
1086 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1087 "HANKSVILLE LAN Controller",
1088 WM_T_ICH10, WMP_F_COPPER },
1089 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1090 "PCH LAN (82577LM) Controller",
1091 WM_T_PCH, WMP_F_COPPER },
1092 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1093 "PCH LAN (82577LC) Controller",
1094 WM_T_PCH, WMP_F_COPPER },
1095 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1096 "PCH LAN (82578DM) Controller",
1097 WM_T_PCH, WMP_F_COPPER },
1098 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1099 "PCH LAN (82578DC) Controller",
1100 WM_T_PCH, WMP_F_COPPER },
1101 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1102 "PCH2 LAN (82579LM) Controller",
1103 WM_T_PCH2, WMP_F_COPPER },
1104 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1105 "PCH2 LAN (82579V) Controller",
1106 WM_T_PCH2, WMP_F_COPPER },
1107 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1108 "82575EB dual-1000baseT Ethernet",
1109 WM_T_82575, WMP_F_COPPER },
1110 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1111 "82575EB dual-1000baseX Ethernet (SERDES)",
1112 WM_T_82575, WMP_F_SERDES },
1113 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1114 "82575GB quad-1000baseT Ethernet",
1115 WM_T_82575, WMP_F_COPPER },
1116 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1117 "82575GB quad-1000baseT Ethernet (PM)",
1118 WM_T_82575, WMP_F_COPPER },
1119 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1120 "82576 1000BaseT Ethernet",
1121 WM_T_82576, WMP_F_COPPER },
1122 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1123 "82576 1000BaseX Ethernet",
1124 WM_T_82576, WMP_F_FIBER },
1125
1126 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1127 "82576 gigabit Ethernet (SERDES)",
1128 WM_T_82576, WMP_F_SERDES },
1129
1130 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1131 "82576 quad-1000BaseT Ethernet",
1132 WM_T_82576, WMP_F_COPPER },
1133
1134 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1135 "82576 Gigabit ET2 Quad Port Server Adapter",
1136 WM_T_82576, WMP_F_COPPER },
1137
1138 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1139 "82576 gigabit Ethernet",
1140 WM_T_82576, WMP_F_COPPER },
1141
1142 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1143 "82576 gigabit Ethernet (SERDES)",
1144 WM_T_82576, WMP_F_SERDES },
1145 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1146 "82576 quad-gigabit Ethernet (SERDES)",
1147 WM_T_82576, WMP_F_SERDES },
1148
1149 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1150 "82580 1000BaseT Ethernet",
1151 WM_T_82580, WMP_F_COPPER },
1152 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1153 "82580 1000BaseX Ethernet",
1154 WM_T_82580, WMP_F_FIBER },
1155
1156 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1157 "82580 1000BaseT Ethernet (SERDES)",
1158 WM_T_82580, WMP_F_SERDES },
1159
1160 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1161 "82580 gigabit Ethernet (SGMII)",
1162 WM_T_82580, WMP_F_COPPER },
1163 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1164 "82580 dual-1000BaseT Ethernet",
1165 WM_T_82580, WMP_F_COPPER },
1166
1167 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1168 "82580 quad-1000BaseX Ethernet",
1169 WM_T_82580, WMP_F_FIBER },
1170
1171 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1172 "DH89XXCC Gigabit Ethernet (SGMII)",
1173 WM_T_82580, WMP_F_COPPER },
1174
1175 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1176 "DH89XXCC Gigabit Ethernet (SERDES)",
1177 WM_T_82580, WMP_F_SERDES },
1178
1179 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1180 "DH89XXCC 1000BASE-KX Ethernet",
1181 WM_T_82580, WMP_F_SERDES },
1182
1183 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1184 "DH89XXCC Gigabit Ethernet (SFP)",
1185 WM_T_82580, WMP_F_SERDES },
1186
1187 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1188 "I350 Gigabit Network Connection",
1189 WM_T_I350, WMP_F_COPPER },
1190
1191 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1192 "I350 Gigabit Fiber Network Connection",
1193 WM_T_I350, WMP_F_FIBER },
1194
1195 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1196 "I350 Gigabit Backplane Connection",
1197 WM_T_I350, WMP_F_SERDES },
1198
1199 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1200 "I350 Quad Port Gigabit Ethernet",
1201 WM_T_I350, WMP_F_SERDES },
1202
1203 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1204 "I350 Gigabit Connection",
1205 WM_T_I350, WMP_F_COPPER },
1206
1207 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1208 "I354 Gigabit Ethernet (KX)",
1209 WM_T_I354, WMP_F_SERDES },
1210
1211 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1212 "I354 Gigabit Ethernet (SGMII)",
1213 WM_T_I354, WMP_F_COPPER },
1214
1215 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1216 "I354 Gigabit Ethernet (2.5G)",
1217 WM_T_I354, WMP_F_COPPER },
1218
1219 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1220 "I210-T1 Ethernet Server Adapter",
1221 WM_T_I210, WMP_F_COPPER },
1222
1223 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1224 "I210 Ethernet (Copper OEM)",
1225 WM_T_I210, WMP_F_COPPER },
1226
1227 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1228 "I210 Ethernet (Copper IT)",
1229 WM_T_I210, WMP_F_COPPER },
1230
1231 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1232 "I210 Ethernet (FLASH less)",
1233 WM_T_I210, WMP_F_COPPER },
1234
1235 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1236 "I210 Gigabit Ethernet (Fiber)",
1237 WM_T_I210, WMP_F_FIBER },
1238
1239 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1240 "I210 Gigabit Ethernet (SERDES)",
1241 WM_T_I210, WMP_F_SERDES },
1242
1243 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1244 "I210 Gigabit Ethernet (FLASH less)",
1245 WM_T_I210, WMP_F_SERDES },
1246
1247 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1248 "I210 Gigabit Ethernet (SGMII)",
1249 WM_T_I210, WMP_F_COPPER },
1250
1251 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1252 "I211 Ethernet (COPPER)",
1253 WM_T_I211, WMP_F_COPPER },
1254 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1255 "I217 V Ethernet Connection",
1256 WM_T_PCH_LPT, WMP_F_COPPER },
1257 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1258 "I217 LM Ethernet Connection",
1259 WM_T_PCH_LPT, WMP_F_COPPER },
1260 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1261 "I218 V Ethernet Connection",
1262 WM_T_PCH_LPT, WMP_F_COPPER },
1263 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1264 "I218 V Ethernet Connection",
1265 WM_T_PCH_LPT, WMP_F_COPPER },
1266 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1267 "I218 V Ethernet Connection",
1268 WM_T_PCH_LPT, WMP_F_COPPER },
1269 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1270 "I218 LM Ethernet Connection",
1271 WM_T_PCH_LPT, WMP_F_COPPER },
1272 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1273 "I218 LM Ethernet Connection",
1274 WM_T_PCH_LPT, WMP_F_COPPER },
1275 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1276 "I218 LM Ethernet Connection",
1277 WM_T_PCH_LPT, WMP_F_COPPER },
1278 { 0, 0,
1279 NULL,
1280 0, 0 },
1281 };
1282
1283 #ifdef WM_EVENT_COUNTERS
1284 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1285 #endif /* WM_EVENT_COUNTERS */
1286
1287
1288 /*
1289 * Register read/write functions.
1290 * Other than CSR_{READ|WRITE}().
1291 */
1292
1293 #if 0 /* Not currently used */
1294 static inline uint32_t
1295 wm_io_read(struct wm_softc *sc, int reg)
1296 {
1297
1298 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1299 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1300 }
1301 #endif
1302
1303 static inline void
1304 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1305 {
1306
1307 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1308 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1309 }
1310
1311 static inline void
1312 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1313 uint32_t data)
1314 {
1315 uint32_t regval;
1316 int i;
1317
1318 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1319
1320 CSR_WRITE(sc, reg, regval);
1321
1322 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1323 delay(5);
1324 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1325 break;
1326 }
1327 if (i == SCTL_CTL_POLL_TIMEOUT) {
1328 aprint_error("%s: WARNING:"
1329 " i82575 reg 0x%08x setup did not indicate ready\n",
1330 device_xname(sc->sc_dev), reg);
1331 }
1332 }
1333
1334 static inline void
1335 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1336 {
1337 wa->wa_low = htole32(v & 0xffffffffU);
1338 if (sizeof(bus_addr_t) == 8)
1339 wa->wa_high = htole32((uint64_t) v >> 32);
1340 else
1341 wa->wa_high = 0;
1342 }
1343
1344 /*
1345 * Descriptor sync/init functions.
1346 */
1347 static inline void
1348 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1349 {
1350 struct wm_softc *sc = txq->txq_sc;
1351
1352 /* If it will wrap around, sync to the end of the ring. */
1353 if ((start + num) > WM_NTXDESC(txq)) {
1354 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1355 WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) *
1356 (WM_NTXDESC(txq) - start), ops);
1357 num -= (WM_NTXDESC(txq) - start);
1358 start = 0;
1359 }
1360
1361 /* Now sync whatever is left. */
1362 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1363 WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) * num, ops);
1364 }
1365
1366 static inline void
1367 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1368 {
1369 struct wm_softc *sc = rxq->rxq_sc;
1370
1371 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1372 WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
1373 }
1374
1375 static inline void
1376 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1377 {
1378 struct wm_softc *sc = rxq->rxq_sc;
1379 struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1380 wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1381 struct mbuf *m = rxs->rxs_mbuf;
1382
1383 /*
1384 * Note: We scoot the packet forward 2 bytes in the buffer
1385 * so that the payload after the Ethernet header is aligned
1386 * to a 4-byte boundary.
1387
1388 * XXX BRAINDAMAGE ALERT!
1389 * The stupid chip uses the same size for every buffer, which
1390 * is set in the Receive Control register. We are using the 2K
1391 * size option, but what we REALLY want is (2K - 2)! For this
1392 * reason, we can't "scoot" packets longer than the standard
1393 * Ethernet MTU. On strict-alignment platforms, if the total
1394 * size exceeds (2K - 2) we set align_tweak to 0 and let
1395 * the upper layer copy the headers.
1396 */
1397 m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1398
1399 wm_set_dma_addr(&rxd->wrx_addr,
1400 rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1401 rxd->wrx_len = 0;
1402 rxd->wrx_cksum = 0;
1403 rxd->wrx_status = 0;
1404 rxd->wrx_errors = 0;
1405 rxd->wrx_special = 0;
1406 wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1407
1408 CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1409 }
1410
1411 /*
1412 * Device driver interface functions and commonly used functions.
1413 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1414 */
1415
1416 /* Lookup supported device table */
1417 static const struct wm_product *
1418 wm_lookup(const struct pci_attach_args *pa)
1419 {
1420 const struct wm_product *wmp;
1421
1422 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1423 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1424 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1425 return wmp;
1426 }
1427 return NULL;
1428 }
1429
1430 /* The match function (ca_match) */
1431 static int
1432 wm_match(device_t parent, cfdata_t cf, void *aux)
1433 {
1434 struct pci_attach_args *pa = aux;
1435
1436 if (wm_lookup(pa) != NULL)
1437 return 1;
1438
1439 return 0;
1440 }
1441
1442 /* The attach function (ca_attach) */
1443 static void
1444 wm_attach(device_t parent, device_t self, void *aux)
1445 {
1446 struct wm_softc *sc = device_private(self);
1447 struct pci_attach_args *pa = aux;
1448 prop_dictionary_t dict;
1449 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1450 pci_chipset_tag_t pc = pa->pa_pc;
1451 int counts[PCI_INTR_TYPE_SIZE];
1452 pci_intr_type_t max_type;
1453 const char *eetype, *xname;
1454 bus_space_tag_t memt;
1455 bus_space_handle_t memh;
1456 bus_size_t memsize;
1457 int memh_valid;
1458 int i, error;
1459 const struct wm_product *wmp;
1460 prop_data_t ea;
1461 prop_number_t pn;
1462 uint8_t enaddr[ETHER_ADDR_LEN];
1463 uint16_t cfg1, cfg2, swdpin, nvmword;
1464 pcireg_t preg, memtype;
1465 uint16_t eeprom_data, apme_mask;
1466 bool force_clear_smbi;
1467 uint32_t link_mode;
1468 uint32_t reg;
1469
1470 sc->sc_dev = self;
1471 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1472 sc->sc_stopping = false;
1473
1474 wmp = wm_lookup(pa);
1475 #ifdef DIAGNOSTIC
1476 if (wmp == NULL) {
1477 printf("\n");
1478 panic("wm_attach: impossible");
1479 }
1480 #endif
1481 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1482
1483 sc->sc_pc = pa->pa_pc;
1484 sc->sc_pcitag = pa->pa_tag;
1485
1486 if (pci_dma64_available(pa))
1487 sc->sc_dmat = pa->pa_dmat64;
1488 else
1489 sc->sc_dmat = pa->pa_dmat;
1490
1491 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1492 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1493 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1494
1495 sc->sc_type = wmp->wmp_type;
1496 if (sc->sc_type < WM_T_82543) {
1497 if (sc->sc_rev < 2) {
1498 aprint_error_dev(sc->sc_dev,
1499 "i82542 must be at least rev. 2\n");
1500 return;
1501 }
1502 if (sc->sc_rev < 3)
1503 sc->sc_type = WM_T_82542_2_0;
1504 }
1505
1506 /*
1507 * Disable MSI for Errata:
1508 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1509 *
1510 * 82544: Errata 25
1511 * 82540: Errata 6 (easy to reproduce device timeout)
1512 * 82545: Errata 4 (easy to reproduce device timeout)
1513 * 82546: Errata 26 (easy to reproduce device timeout)
1514 * 82541: Errata 7 (easy to reproduce device timeout)
1515 *
1516 * "Byte Enables 2 and 3 are not set on MSI writes"
1517 *
1518 * 82571 & 82572: Errata 63
1519 */
1520 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1521 || (sc->sc_type == WM_T_82572))
1522 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1523
1524 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1525 || (sc->sc_type == WM_T_82580)
1526 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1527 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1528 sc->sc_flags |= WM_F_NEWQUEUE;
1529
1530 /* Set device properties (mactype) */
1531 dict = device_properties(sc->sc_dev);
1532 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1533
1534 /*
1535 * Map the device. All devices support memory-mapped acccess,
1536 * and it is really required for normal operation.
1537 */
1538 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1539 switch (memtype) {
1540 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1541 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1542 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1543 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1544 break;
1545 default:
1546 memh_valid = 0;
1547 break;
1548 }
1549
1550 if (memh_valid) {
1551 sc->sc_st = memt;
1552 sc->sc_sh = memh;
1553 sc->sc_ss = memsize;
1554 } else {
1555 aprint_error_dev(sc->sc_dev,
1556 "unable to map device registers\n");
1557 return;
1558 }
1559
1560 /*
1561 * In addition, i82544 and later support I/O mapped indirect
1562 * register access. It is not desirable (nor supported in
1563 * this driver) to use it for normal operation, though it is
1564 * required to work around bugs in some chip versions.
1565 */
1566 if (sc->sc_type >= WM_T_82544) {
1567 /* First we have to find the I/O BAR. */
1568 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1569 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1570 if (memtype == PCI_MAPREG_TYPE_IO)
1571 break;
1572 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1573 PCI_MAPREG_MEM_TYPE_64BIT)
1574 i += 4; /* skip high bits, too */
1575 }
1576 if (i < PCI_MAPREG_END) {
1577 /*
1578 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1579 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1580 * It's no problem because newer chips has no this
1581 * bug.
1582 *
1583 * The i8254x doesn't apparently respond when the
1584 * I/O BAR is 0, which looks somewhat like it's not
1585 * been configured.
1586 */
1587 preg = pci_conf_read(pc, pa->pa_tag, i);
1588 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1589 aprint_error_dev(sc->sc_dev,
1590 "WARNING: I/O BAR at zero.\n");
1591 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1592 0, &sc->sc_iot, &sc->sc_ioh,
1593 NULL, &sc->sc_ios) == 0) {
1594 sc->sc_flags |= WM_F_IOH_VALID;
1595 } else {
1596 aprint_error_dev(sc->sc_dev,
1597 "WARNING: unable to map I/O space\n");
1598 }
1599 }
1600
1601 }
1602
1603 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1604 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1605 preg |= PCI_COMMAND_MASTER_ENABLE;
1606 if (sc->sc_type < WM_T_82542_2_1)
1607 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1608 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1609
1610 /* power up chip */
1611 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1612 NULL)) && error != EOPNOTSUPP) {
1613 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1614 return;
1615 }
1616
1617 wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
1618
1619 /* Allocation settings */
1620 max_type = PCI_INTR_TYPE_MSIX;
1621 counts[PCI_INTR_TYPE_MSIX] = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
1622 counts[PCI_INTR_TYPE_MSI] = 1;
1623 counts[PCI_INTR_TYPE_INTX] = 1;
1624
1625 alloc_retry:
1626 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1627 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1628 return;
1629 }
1630
1631 if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1632 error = wm_setup_msix(sc);
1633 if (error) {
1634 pci_intr_release(pc, sc->sc_intrs,
1635 counts[PCI_INTR_TYPE_MSIX]);
1636
1637 /* Setup for MSI: Disable MSI-X */
1638 max_type = PCI_INTR_TYPE_MSI;
1639 counts[PCI_INTR_TYPE_MSI] = 1;
1640 counts[PCI_INTR_TYPE_INTX] = 1;
1641 goto alloc_retry;
1642 }
1643 } else if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
1644 wm_adjust_qnum(sc, 0); /* must not use multiqueue */
1645 error = wm_setup_legacy(sc);
1646 if (error) {
1647 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1648 counts[PCI_INTR_TYPE_MSI]);
1649
1650 /* The next try is for INTx: Disable MSI */
1651 max_type = PCI_INTR_TYPE_INTX;
1652 counts[PCI_INTR_TYPE_INTX] = 1;
1653 goto alloc_retry;
1654 }
1655 } else {
1656 wm_adjust_qnum(sc, 0); /* must not use multiqueue */
1657 error = wm_setup_legacy(sc);
1658 if (error) {
1659 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1660 counts[PCI_INTR_TYPE_INTX]);
1661 return;
1662 }
1663 }
1664
1665 /*
1666 * Check the function ID (unit number of the chip).
1667 */
1668 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1669 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1670 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1671 || (sc->sc_type == WM_T_82580)
1672 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1673 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1674 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1675 else
1676 sc->sc_funcid = 0;
1677
1678 /*
1679 * Determine a few things about the bus we're connected to.
1680 */
1681 if (sc->sc_type < WM_T_82543) {
1682 /* We don't really know the bus characteristics here. */
1683 sc->sc_bus_speed = 33;
1684 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1685 /*
1686 * CSA (Communication Streaming Architecture) is about as fast
1687 * a 32-bit 66MHz PCI Bus.
1688 */
1689 sc->sc_flags |= WM_F_CSA;
1690 sc->sc_bus_speed = 66;
1691 aprint_verbose_dev(sc->sc_dev,
1692 "Communication Streaming Architecture\n");
1693 if (sc->sc_type == WM_T_82547) {
1694 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1695 callout_setfunc(&sc->sc_txfifo_ch,
1696 wm_82547_txfifo_stall, sc);
1697 aprint_verbose_dev(sc->sc_dev,
1698 "using 82547 Tx FIFO stall work-around\n");
1699 }
1700 } else if (sc->sc_type >= WM_T_82571) {
1701 sc->sc_flags |= WM_F_PCIE;
1702 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1703 && (sc->sc_type != WM_T_ICH10)
1704 && (sc->sc_type != WM_T_PCH)
1705 && (sc->sc_type != WM_T_PCH2)
1706 && (sc->sc_type != WM_T_PCH_LPT)) {
1707 /* ICH* and PCH* have no PCIe capability registers */
1708 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1709 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1710 NULL) == 0)
1711 aprint_error_dev(sc->sc_dev,
1712 "unable to find PCIe capability\n");
1713 }
1714 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1715 } else {
1716 reg = CSR_READ(sc, WMREG_STATUS);
1717 if (reg & STATUS_BUS64)
1718 sc->sc_flags |= WM_F_BUS64;
1719 if ((reg & STATUS_PCIX_MODE) != 0) {
1720 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1721
1722 sc->sc_flags |= WM_F_PCIX;
1723 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1724 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1725 aprint_error_dev(sc->sc_dev,
1726 "unable to find PCIX capability\n");
1727 else if (sc->sc_type != WM_T_82545_3 &&
1728 sc->sc_type != WM_T_82546_3) {
1729 /*
1730 * Work around a problem caused by the BIOS
1731 * setting the max memory read byte count
1732 * incorrectly.
1733 */
1734 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1735 sc->sc_pcixe_capoff + PCIX_CMD);
1736 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1737 sc->sc_pcixe_capoff + PCIX_STATUS);
1738
1739 bytecnt =
1740 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1741 PCIX_CMD_BYTECNT_SHIFT;
1742 maxb =
1743 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1744 PCIX_STATUS_MAXB_SHIFT;
1745 if (bytecnt > maxb) {
1746 aprint_verbose_dev(sc->sc_dev,
1747 "resetting PCI-X MMRBC: %d -> %d\n",
1748 512 << bytecnt, 512 << maxb);
1749 pcix_cmd = (pcix_cmd &
1750 ~PCIX_CMD_BYTECNT_MASK) |
1751 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1752 pci_conf_write(pa->pa_pc, pa->pa_tag,
1753 sc->sc_pcixe_capoff + PCIX_CMD,
1754 pcix_cmd);
1755 }
1756 }
1757 }
1758 /*
1759 * The quad port adapter is special; it has a PCIX-PCIX
1760 * bridge on the board, and can run the secondary bus at
1761 * a higher speed.
1762 */
1763 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1764 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1765 : 66;
1766 } else if (sc->sc_flags & WM_F_PCIX) {
1767 switch (reg & STATUS_PCIXSPD_MASK) {
1768 case STATUS_PCIXSPD_50_66:
1769 sc->sc_bus_speed = 66;
1770 break;
1771 case STATUS_PCIXSPD_66_100:
1772 sc->sc_bus_speed = 100;
1773 break;
1774 case STATUS_PCIXSPD_100_133:
1775 sc->sc_bus_speed = 133;
1776 break;
1777 default:
1778 aprint_error_dev(sc->sc_dev,
1779 "unknown PCIXSPD %d; assuming 66MHz\n",
1780 reg & STATUS_PCIXSPD_MASK);
1781 sc->sc_bus_speed = 66;
1782 break;
1783 }
1784 } else
1785 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1786 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1787 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1788 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1789 }
1790
1791 /* clear interesting stat counters */
1792 CSR_READ(sc, WMREG_COLC);
1793 CSR_READ(sc, WMREG_RXERRC);
1794
1795 /* get PHY control from SMBus to PCIe */
1796 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1797 || (sc->sc_type == WM_T_PCH_LPT))
1798 wm_smbustopci(sc);
1799
1800 /* Reset the chip to a known state. */
1801 wm_reset(sc);
1802
1803 /* Get some information about the EEPROM. */
1804 switch (sc->sc_type) {
1805 case WM_T_82542_2_0:
1806 case WM_T_82542_2_1:
1807 case WM_T_82543:
1808 case WM_T_82544:
1809 /* Microwire */
1810 sc->sc_nvm_wordsize = 64;
1811 sc->sc_nvm_addrbits = 6;
1812 break;
1813 case WM_T_82540:
1814 case WM_T_82545:
1815 case WM_T_82545_3:
1816 case WM_T_82546:
1817 case WM_T_82546_3:
1818 /* Microwire */
1819 reg = CSR_READ(sc, WMREG_EECD);
1820 if (reg & EECD_EE_SIZE) {
1821 sc->sc_nvm_wordsize = 256;
1822 sc->sc_nvm_addrbits = 8;
1823 } else {
1824 sc->sc_nvm_wordsize = 64;
1825 sc->sc_nvm_addrbits = 6;
1826 }
1827 sc->sc_flags |= WM_F_LOCK_EECD;
1828 break;
1829 case WM_T_82541:
1830 case WM_T_82541_2:
1831 case WM_T_82547:
1832 case WM_T_82547_2:
1833 sc->sc_flags |= WM_F_LOCK_EECD;
1834 reg = CSR_READ(sc, WMREG_EECD);
1835 if (reg & EECD_EE_TYPE) {
1836 /* SPI */
1837 sc->sc_flags |= WM_F_EEPROM_SPI;
1838 wm_nvm_set_addrbits_size_eecd(sc);
1839 } else {
1840 /* Microwire */
1841 if ((reg & EECD_EE_ABITS) != 0) {
1842 sc->sc_nvm_wordsize = 256;
1843 sc->sc_nvm_addrbits = 8;
1844 } else {
1845 sc->sc_nvm_wordsize = 64;
1846 sc->sc_nvm_addrbits = 6;
1847 }
1848 }
1849 break;
1850 case WM_T_82571:
1851 case WM_T_82572:
1852 /* SPI */
1853 sc->sc_flags |= WM_F_EEPROM_SPI;
1854 wm_nvm_set_addrbits_size_eecd(sc);
1855 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1856 break;
1857 case WM_T_82573:
1858 sc->sc_flags |= WM_F_LOCK_SWSM;
1859 /* FALLTHROUGH */
1860 case WM_T_82574:
1861 case WM_T_82583:
1862 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1863 sc->sc_flags |= WM_F_EEPROM_FLASH;
1864 sc->sc_nvm_wordsize = 2048;
1865 } else {
1866 /* SPI */
1867 sc->sc_flags |= WM_F_EEPROM_SPI;
1868 wm_nvm_set_addrbits_size_eecd(sc);
1869 }
1870 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1871 break;
1872 case WM_T_82575:
1873 case WM_T_82576:
1874 case WM_T_82580:
1875 case WM_T_I350:
1876 case WM_T_I354:
1877 case WM_T_80003:
1878 /* SPI */
1879 sc->sc_flags |= WM_F_EEPROM_SPI;
1880 wm_nvm_set_addrbits_size_eecd(sc);
1881 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1882 | WM_F_LOCK_SWSM;
1883 break;
1884 case WM_T_ICH8:
1885 case WM_T_ICH9:
1886 case WM_T_ICH10:
1887 case WM_T_PCH:
1888 case WM_T_PCH2:
1889 case WM_T_PCH_LPT:
1890 /* FLASH */
1891 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1892 sc->sc_nvm_wordsize = 2048;
1893 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1894 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1895 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
1896 aprint_error_dev(sc->sc_dev,
1897 "can't map FLASH registers\n");
1898 goto out;
1899 }
1900 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1901 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1902 ICH_FLASH_SECTOR_SIZE;
1903 sc->sc_ich8_flash_bank_size =
1904 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1905 sc->sc_ich8_flash_bank_size -=
1906 (reg & ICH_GFPREG_BASE_MASK);
1907 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1908 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1909 break;
1910 case WM_T_I210:
1911 case WM_T_I211:
1912 if (wm_nvm_get_flash_presence_i210(sc)) {
1913 wm_nvm_set_addrbits_size_eecd(sc);
1914 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1915 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1916 } else {
1917 sc->sc_nvm_wordsize = INVM_SIZE;
1918 sc->sc_flags |= WM_F_EEPROM_INVM;
1919 sc->sc_flags |= WM_F_LOCK_SWFW;
1920 }
1921 break;
1922 default:
1923 break;
1924 }
1925
1926 /* Ensure the SMBI bit is clear before first NVM or PHY access */
1927 switch (sc->sc_type) {
1928 case WM_T_82571:
1929 case WM_T_82572:
1930 reg = CSR_READ(sc, WMREG_SWSM2);
1931 if ((reg & SWSM2_LOCK) == 0) {
1932 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
1933 force_clear_smbi = true;
1934 } else
1935 force_clear_smbi = false;
1936 break;
1937 case WM_T_82573:
1938 case WM_T_82574:
1939 case WM_T_82583:
1940 force_clear_smbi = true;
1941 break;
1942 default:
1943 force_clear_smbi = false;
1944 break;
1945 }
1946 if (force_clear_smbi) {
1947 reg = CSR_READ(sc, WMREG_SWSM);
1948 if ((reg & SWSM_SMBI) != 0)
1949 aprint_error_dev(sc->sc_dev,
1950 "Please update the Bootagent\n");
1951 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
1952 }
1953
1954 /*
1955 * Defer printing the EEPROM type until after verifying the checksum
1956 * This allows the EEPROM type to be printed correctly in the case
1957 * that no EEPROM is attached.
1958 */
1959 /*
1960 * Validate the EEPROM checksum. If the checksum fails, flag
1961 * this for later, so we can fail future reads from the EEPROM.
1962 */
1963 if (wm_nvm_validate_checksum(sc)) {
1964 /*
1965 * Read twice again because some PCI-e parts fail the
1966 * first check due to the link being in sleep state.
1967 */
1968 if (wm_nvm_validate_checksum(sc))
1969 sc->sc_flags |= WM_F_EEPROM_INVALID;
1970 }
1971
1972 /* Set device properties (macflags) */
1973 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1974
1975 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1976 aprint_verbose_dev(sc->sc_dev, "No EEPROM");
1977 else {
1978 aprint_verbose_dev(sc->sc_dev, "%u words ",
1979 sc->sc_nvm_wordsize);
1980 if (sc->sc_flags & WM_F_EEPROM_INVM)
1981 aprint_verbose("iNVM");
1982 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
1983 aprint_verbose("FLASH(HW)");
1984 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
1985 aprint_verbose("FLASH");
1986 else {
1987 if (sc->sc_flags & WM_F_EEPROM_SPI)
1988 eetype = "SPI";
1989 else
1990 eetype = "MicroWire";
1991 aprint_verbose("(%d address bits) %s EEPROM",
1992 sc->sc_nvm_addrbits, eetype);
1993 }
1994 }
1995 wm_nvm_version(sc);
1996 aprint_verbose("\n");
1997
1998 /* Check for I21[01] PLL workaround */
1999 if (sc->sc_type == WM_T_I210)
2000 sc->sc_flags |= WM_F_PLL_WA_I210;
2001 if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
2002 /* NVM image release 3.25 has a workaround */
2003 if ((sc->sc_nvm_ver_major < 3)
2004 || ((sc->sc_nvm_ver_major == 3)
2005 && (sc->sc_nvm_ver_minor < 25))) {
2006 aprint_verbose_dev(sc->sc_dev,
2007 "ROM image version %d.%d is older than 3.25\n",
2008 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2009 sc->sc_flags |= WM_F_PLL_WA_I210;
2010 }
2011 }
2012 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2013 wm_pll_workaround_i210(sc);
2014
2015 switch (sc->sc_type) {
2016 case WM_T_82571:
2017 case WM_T_82572:
2018 case WM_T_82573:
2019 case WM_T_82574:
2020 case WM_T_82583:
2021 case WM_T_80003:
2022 case WM_T_ICH8:
2023 case WM_T_ICH9:
2024 case WM_T_ICH10:
2025 case WM_T_PCH:
2026 case WM_T_PCH2:
2027 case WM_T_PCH_LPT:
2028 if (wm_check_mng_mode(sc) != 0)
2029 wm_get_hw_control(sc);
2030 break;
2031 default:
2032 break;
2033 }
2034 wm_get_wakeup(sc);
2035 /*
2036 * Read the Ethernet address from the EEPROM, if not first found
2037 * in device properties.
2038 */
2039 ea = prop_dictionary_get(dict, "mac-address");
2040 if (ea != NULL) {
2041 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2042 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2043 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2044 } else {
2045 if (wm_read_mac_addr(sc, enaddr) != 0) {
2046 aprint_error_dev(sc->sc_dev,
2047 "unable to read Ethernet address\n");
2048 goto out;
2049 }
2050 }
2051
2052 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2053 ether_sprintf(enaddr));
2054
2055 /*
2056 * Read the config info from the EEPROM, and set up various
2057 * bits in the control registers based on their contents.
2058 */
2059 pn = prop_dictionary_get(dict, "i82543-cfg1");
2060 if (pn != NULL) {
2061 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2062 cfg1 = (uint16_t) prop_number_integer_value(pn);
2063 } else {
2064 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2065 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2066 goto out;
2067 }
2068 }
2069
2070 pn = prop_dictionary_get(dict, "i82543-cfg2");
2071 if (pn != NULL) {
2072 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2073 cfg2 = (uint16_t) prop_number_integer_value(pn);
2074 } else {
2075 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2076 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2077 goto out;
2078 }
2079 }
2080
2081 /* check for WM_F_WOL */
2082 switch (sc->sc_type) {
2083 case WM_T_82542_2_0:
2084 case WM_T_82542_2_1:
2085 case WM_T_82543:
2086 /* dummy? */
2087 eeprom_data = 0;
2088 apme_mask = NVM_CFG3_APME;
2089 break;
2090 case WM_T_82544:
2091 apme_mask = NVM_CFG2_82544_APM_EN;
2092 eeprom_data = cfg2;
2093 break;
2094 case WM_T_82546:
2095 case WM_T_82546_3:
2096 case WM_T_82571:
2097 case WM_T_82572:
2098 case WM_T_82573:
2099 case WM_T_82574:
2100 case WM_T_82583:
2101 case WM_T_80003:
2102 default:
2103 apme_mask = NVM_CFG3_APME;
2104 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2105 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2106 break;
2107 case WM_T_82575:
2108 case WM_T_82576:
2109 case WM_T_82580:
2110 case WM_T_I350:
2111 case WM_T_I354: /* XXX ok? */
2112 case WM_T_ICH8:
2113 case WM_T_ICH9:
2114 case WM_T_ICH10:
2115 case WM_T_PCH:
2116 case WM_T_PCH2:
2117 case WM_T_PCH_LPT:
2118 /* XXX The funcid should be checked on some devices */
2119 apme_mask = WUC_APME;
2120 eeprom_data = CSR_READ(sc, WMREG_WUC);
2121 break;
2122 }
2123
2124 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2125 if ((eeprom_data & apme_mask) != 0)
2126 sc->sc_flags |= WM_F_WOL;
2127 #ifdef WM_DEBUG
2128 if ((sc->sc_flags & WM_F_WOL) != 0)
2129 printf("WOL\n");
2130 #endif
2131
2132 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2133 /* Check NVM for autonegotiation */
2134 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2135 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2136 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2137 }
2138 }
2139
2140 /*
2141 * XXX need special handling for some multiple port cards
2142 * to disable a paticular port.
2143 */
2144
2145 if (sc->sc_type >= WM_T_82544) {
2146 pn = prop_dictionary_get(dict, "i82543-swdpin");
2147 if (pn != NULL) {
2148 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2149 swdpin = (uint16_t) prop_number_integer_value(pn);
2150 } else {
2151 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2152 aprint_error_dev(sc->sc_dev,
2153 "unable to read SWDPIN\n");
2154 goto out;
2155 }
2156 }
2157 }
2158
2159 if (cfg1 & NVM_CFG1_ILOS)
2160 sc->sc_ctrl |= CTRL_ILOS;
2161
2162 /*
2163 * XXX
2164 * This code isn't correct because pin 2 and 3 are located
2165 * in different position on newer chips. Check all datasheet.
2166 *
2167 * Until resolve this problem, check if a chip < 82580
2168 */
2169 if (sc->sc_type <= WM_T_82580) {
2170 if (sc->sc_type >= WM_T_82544) {
2171 sc->sc_ctrl |=
2172 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2173 CTRL_SWDPIO_SHIFT;
2174 sc->sc_ctrl |=
2175 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2176 CTRL_SWDPINS_SHIFT;
2177 } else {
2178 sc->sc_ctrl |=
2179 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2180 CTRL_SWDPIO_SHIFT;
2181 }
2182 }
2183
2184 /* XXX For other than 82580? */
2185 if (sc->sc_type == WM_T_82580) {
2186 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2187 printf("CFG3 = %08x\n", (uint32_t)nvmword);
2188 if (nvmword & __BIT(13)) {
2189 printf("SET ILOS\n");
2190 sc->sc_ctrl |= CTRL_ILOS;
2191 }
2192 }
2193
2194 #if 0
2195 if (sc->sc_type >= WM_T_82544) {
2196 if (cfg1 & NVM_CFG1_IPS0)
2197 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2198 if (cfg1 & NVM_CFG1_IPS1)
2199 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2200 sc->sc_ctrl_ext |=
2201 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2202 CTRL_EXT_SWDPIO_SHIFT;
2203 sc->sc_ctrl_ext |=
2204 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2205 CTRL_EXT_SWDPINS_SHIFT;
2206 } else {
2207 sc->sc_ctrl_ext |=
2208 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2209 CTRL_EXT_SWDPIO_SHIFT;
2210 }
2211 #endif
2212
2213 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2214 #if 0
2215 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2216 #endif
2217
2218 if (sc->sc_type == WM_T_PCH) {
2219 uint16_t val;
2220
2221 /* Save the NVM K1 bit setting */
2222 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2223
2224 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2225 sc->sc_nvm_k1_enabled = 1;
2226 else
2227 sc->sc_nvm_k1_enabled = 0;
2228 }
2229
2230 /*
2231 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2232 * media structures accordingly.
2233 */
2234 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2235 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2236 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2237 || sc->sc_type == WM_T_82573
2238 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2239 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2240 wm_gmii_mediainit(sc, wmp->wmp_product);
2241 } else if (sc->sc_type < WM_T_82543 ||
2242 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2243 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2244 aprint_error_dev(sc->sc_dev,
2245 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2246 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2247 }
2248 wm_tbi_mediainit(sc);
2249 } else {
2250 switch (sc->sc_type) {
2251 case WM_T_82575:
2252 case WM_T_82576:
2253 case WM_T_82580:
2254 case WM_T_I350:
2255 case WM_T_I354:
2256 case WM_T_I210:
2257 case WM_T_I211:
2258 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2259 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2260 switch (link_mode) {
2261 case CTRL_EXT_LINK_MODE_1000KX:
2262 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2263 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2264 break;
2265 case CTRL_EXT_LINK_MODE_SGMII:
2266 if (wm_sgmii_uses_mdio(sc)) {
2267 aprint_verbose_dev(sc->sc_dev,
2268 "SGMII(MDIO)\n");
2269 sc->sc_flags |= WM_F_SGMII;
2270 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2271 break;
2272 }
2273 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2274 /*FALLTHROUGH*/
2275 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2276 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2277 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2278 if (link_mode
2279 == CTRL_EXT_LINK_MODE_SGMII) {
2280 sc->sc_mediatype
2281 = WM_MEDIATYPE_COPPER;
2282 sc->sc_flags |= WM_F_SGMII;
2283 } else {
2284 sc->sc_mediatype
2285 = WM_MEDIATYPE_SERDES;
2286 aprint_verbose_dev(sc->sc_dev,
2287 "SERDES\n");
2288 }
2289 break;
2290 }
2291 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2292 aprint_verbose_dev(sc->sc_dev,
2293 "SERDES\n");
2294
2295 /* Change current link mode setting */
2296 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2297 switch (sc->sc_mediatype) {
2298 case WM_MEDIATYPE_COPPER:
2299 reg |= CTRL_EXT_LINK_MODE_SGMII;
2300 break;
2301 case WM_MEDIATYPE_SERDES:
2302 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2303 break;
2304 default:
2305 break;
2306 }
2307 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2308 break;
2309 case CTRL_EXT_LINK_MODE_GMII:
2310 default:
2311 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2312 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2313 break;
2314 }
2315
2316 reg &= ~CTRL_EXT_I2C_ENA;
2317 if ((sc->sc_flags & WM_F_SGMII) != 0)
2318 reg |= CTRL_EXT_I2C_ENA;
2319 else
2320 reg &= ~CTRL_EXT_I2C_ENA;
2321 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2322
2323 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2324 wm_gmii_mediainit(sc, wmp->wmp_product);
2325 else
2326 wm_tbi_mediainit(sc);
2327 break;
2328 default:
2329 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
2330 aprint_error_dev(sc->sc_dev,
2331 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2332 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2333 wm_gmii_mediainit(sc, wmp->wmp_product);
2334 }
2335 }
2336
2337 ifp = &sc->sc_ethercom.ec_if;
2338 xname = device_xname(sc->sc_dev);
2339 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2340 ifp->if_softc = sc;
2341 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2342 ifp->if_ioctl = wm_ioctl;
2343 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2344 ifp->if_start = wm_nq_start;
2345 else
2346 ifp->if_start = wm_start;
2347 ifp->if_watchdog = wm_watchdog;
2348 ifp->if_init = wm_init;
2349 ifp->if_stop = wm_stop;
2350 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2351 IFQ_SET_READY(&ifp->if_snd);
2352
2353 /* Check for jumbo frame */
2354 switch (sc->sc_type) {
2355 case WM_T_82573:
2356 /* XXX limited to 9234 if ASPM is disabled */
2357 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2358 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2359 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2360 break;
2361 case WM_T_82571:
2362 case WM_T_82572:
2363 case WM_T_82574:
2364 case WM_T_82575:
2365 case WM_T_82576:
2366 case WM_T_82580:
2367 case WM_T_I350:
2368 case WM_T_I354: /* XXXX ok? */
2369 case WM_T_I210:
2370 case WM_T_I211:
2371 case WM_T_80003:
2372 case WM_T_ICH9:
2373 case WM_T_ICH10:
2374 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2375 case WM_T_PCH_LPT:
2376 /* XXX limited to 9234 */
2377 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2378 break;
2379 case WM_T_PCH:
2380 /* XXX limited to 4096 */
2381 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2382 break;
2383 case WM_T_82542_2_0:
2384 case WM_T_82542_2_1:
2385 case WM_T_82583:
2386 case WM_T_ICH8:
2387 /* No support for jumbo frame */
2388 break;
2389 default:
2390 /* ETHER_MAX_LEN_JUMBO */
2391 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2392 break;
2393 }
2394
2395 /* If we're a i82543 or greater, we can support VLANs. */
2396 if (sc->sc_type >= WM_T_82543)
2397 sc->sc_ethercom.ec_capabilities |=
2398 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2399
2400 /*
2401 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2402 * on i82543 and later.
2403 */
2404 if (sc->sc_type >= WM_T_82543) {
2405 ifp->if_capabilities |=
2406 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2407 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2408 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2409 IFCAP_CSUM_TCPv6_Tx |
2410 IFCAP_CSUM_UDPv6_Tx;
2411 }
2412
2413 /*
2414 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2415 *
2416 * 82541GI (8086:1076) ... no
2417 * 82572EI (8086:10b9) ... yes
2418 */
2419 if (sc->sc_type >= WM_T_82571) {
2420 ifp->if_capabilities |=
2421 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2422 }
2423
2424 /*
2425 * If we're a i82544 or greater (except i82547), we can do
2426 * TCP segmentation offload.
2427 */
2428 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2429 ifp->if_capabilities |= IFCAP_TSOv4;
2430 }
2431
2432 if (sc->sc_type >= WM_T_82571) {
2433 ifp->if_capabilities |= IFCAP_TSOv6;
2434 }
2435
2436 #ifdef WM_MPSAFE
2437 sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2438 #else
2439 sc->sc_core_lock = NULL;
2440 #endif
2441
2442 /* Attach the interface. */
2443 if_attach(ifp);
2444 ether_ifattach(ifp, enaddr);
2445 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2446 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2447 RND_FLAG_DEFAULT);
2448
2449 #ifdef WM_EVENT_COUNTERS
2450 /* Attach event counters. */
2451 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2452 NULL, xname, "txsstall");
2453 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2454 NULL, xname, "txdstall");
2455 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2456 NULL, xname, "txfifo_stall");
2457 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2458 NULL, xname, "txdw");
2459 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2460 NULL, xname, "txqe");
2461 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2462 NULL, xname, "rxintr");
2463 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2464 NULL, xname, "linkintr");
2465
2466 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2467 NULL, xname, "rxipsum");
2468 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2469 NULL, xname, "rxtusum");
2470 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2471 NULL, xname, "txipsum");
2472 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2473 NULL, xname, "txtusum");
2474 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2475 NULL, xname, "txtusum6");
2476
2477 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2478 NULL, xname, "txtso");
2479 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2480 NULL, xname, "txtso6");
2481 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2482 NULL, xname, "txtsopain");
2483
2484 for (i = 0; i < WM_NTXSEGS; i++) {
2485 snprintf(wm_txseg_evcnt_names[i],
2486 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2487 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2488 NULL, xname, wm_txseg_evcnt_names[i]);
2489 }
2490
2491 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2492 NULL, xname, "txdrop");
2493
2494 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2495 NULL, xname, "tu");
2496
2497 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2498 NULL, xname, "tx_xoff");
2499 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2500 NULL, xname, "tx_xon");
2501 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2502 NULL, xname, "rx_xoff");
2503 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2504 NULL, xname, "rx_xon");
2505 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2506 NULL, xname, "rx_macctl");
2507 #endif /* WM_EVENT_COUNTERS */
2508
2509 if (pmf_device_register(self, wm_suspend, wm_resume))
2510 pmf_class_network_register(self, ifp);
2511 else
2512 aprint_error_dev(self, "couldn't establish power handler\n");
2513
2514 sc->sc_flags |= WM_F_ATTACHED;
2515 out:
2516 return;
2517 }
2518
2519 /* The detach function (ca_detach) */
2520 static int
2521 wm_detach(device_t self, int flags __unused)
2522 {
2523 struct wm_softc *sc = device_private(self);
2524 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2525 int i;
2526 #ifndef WM_MPSAFE
2527 int s;
2528 #endif
2529
2530 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2531 return 0;
2532
2533 #ifndef WM_MPSAFE
2534 s = splnet();
2535 #endif
2536 /* Stop the interface. Callouts are stopped in it. */
2537 wm_stop(ifp, 1);
2538
2539 #ifndef WM_MPSAFE
2540 splx(s);
2541 #endif
2542
2543 pmf_device_deregister(self);
2544
2545 /* Tell the firmware about the release */
2546 WM_CORE_LOCK(sc);
2547 wm_release_manageability(sc);
2548 wm_release_hw_control(sc);
2549 WM_CORE_UNLOCK(sc);
2550
2551 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2552
2553 /* Delete all remaining media. */
2554 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2555
2556 ether_ifdetach(ifp);
2557 if_detach(ifp);
2558
2559
2560 /* Unload RX dmamaps and free mbufs */
2561 for (i = 0; i < sc->sc_nrxqueues; i++) {
2562 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
2563 WM_RX_LOCK(rxq);
2564 wm_rxdrain(rxq);
2565 WM_RX_UNLOCK(rxq);
2566 }
2567 /* Must unlock here */
2568
2569 wm_free_txrx_queues(sc);
2570
2571 /* Disestablish the interrupt handler */
2572 for (i = 0; i < sc->sc_nintrs; i++) {
2573 if (sc->sc_ihs[i] != NULL) {
2574 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2575 sc->sc_ihs[i] = NULL;
2576 }
2577 }
2578 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
2579
2580 /* Unmap the registers */
2581 if (sc->sc_ss) {
2582 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2583 sc->sc_ss = 0;
2584 }
2585 if (sc->sc_ios) {
2586 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2587 sc->sc_ios = 0;
2588 }
2589 if (sc->sc_flashs) {
2590 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
2591 sc->sc_flashs = 0;
2592 }
2593
2594 if (sc->sc_core_lock)
2595 mutex_obj_free(sc->sc_core_lock);
2596
2597 return 0;
2598 }
2599
2600 static bool
2601 wm_suspend(device_t self, const pmf_qual_t *qual)
2602 {
2603 struct wm_softc *sc = device_private(self);
2604
2605 wm_release_manageability(sc);
2606 wm_release_hw_control(sc);
2607 #ifdef WM_WOL
2608 wm_enable_wakeup(sc);
2609 #endif
2610
2611 return true;
2612 }
2613
2614 static bool
2615 wm_resume(device_t self, const pmf_qual_t *qual)
2616 {
2617 struct wm_softc *sc = device_private(self);
2618
2619 wm_init_manageability(sc);
2620
2621 return true;
2622 }
2623
2624 /*
2625 * wm_watchdog: [ifnet interface function]
2626 *
2627 * Watchdog timer handler.
2628 */
2629 static void
2630 wm_watchdog(struct ifnet *ifp)
2631 {
2632 struct wm_softc *sc = ifp->if_softc;
2633 struct wm_txqueue *txq = &sc->sc_txq[0];
2634
2635 /*
2636 * Since we're using delayed interrupts, sweep up
2637 * before we report an error.
2638 */
2639 WM_TX_LOCK(txq);
2640 wm_txeof(sc);
2641 WM_TX_UNLOCK(txq);
2642
2643 if (txq->txq_free != WM_NTXDESC(txq)) {
2644 #ifdef WM_DEBUG
2645 int i, j;
2646 struct wm_txsoft *txs;
2647 #endif
2648 log(LOG_ERR,
2649 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2650 device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
2651 txq->txq_next);
2652 ifp->if_oerrors++;
2653 #ifdef WM_DEBUG
2654 for (i = txq->txq_sdirty; i != txq->txq_snext ;
2655 i = WM_NEXTTXS(txq, i)) {
2656 txs = &txq->txq_soft[i];
2657 printf("txs %d tx %d -> %d\n",
2658 i, txs->txs_firstdesc, txs->txs_lastdesc);
2659 for (j = txs->txs_firstdesc; ;
2660 j = WM_NEXTTX(txq, j)) {
2661 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2662 txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
2663 printf("\t %#08x%08x\n",
2664 txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
2665 txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
2666 if (j == txs->txs_lastdesc)
2667 break;
2668 }
2669 }
2670 #endif
2671 /* Reset the interface. */
2672 (void) wm_init(ifp);
2673 }
2674
2675 /* Try to get more packets going. */
2676 ifp->if_start(ifp);
2677 }
2678
2679 /*
2680 * wm_tick:
2681 *
2682 * One second timer, used to check link status, sweep up
2683 * completed transmit jobs, etc.
2684 */
2685 static void
2686 wm_tick(void *arg)
2687 {
2688 struct wm_softc *sc = arg;
2689 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2690 #ifndef WM_MPSAFE
2691 int s;
2692
2693 s = splnet();
2694 #endif
2695
2696 WM_CORE_LOCK(sc);
2697
2698 if (sc->sc_stopping)
2699 goto out;
2700
2701 if (sc->sc_type >= WM_T_82542_2_1) {
2702 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2703 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2704 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2705 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2706 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2707 }
2708
2709 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2710 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2711 + CSR_READ(sc, WMREG_CRCERRS)
2712 + CSR_READ(sc, WMREG_ALGNERRC)
2713 + CSR_READ(sc, WMREG_SYMERRC)
2714 + CSR_READ(sc, WMREG_RXERRC)
2715 + CSR_READ(sc, WMREG_SEC)
2716 + CSR_READ(sc, WMREG_CEXTERR)
2717 + CSR_READ(sc, WMREG_RLEC);
2718 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2719
2720 if (sc->sc_flags & WM_F_HAS_MII)
2721 mii_tick(&sc->sc_mii);
2722 else if ((sc->sc_type >= WM_T_82575)
2723 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
2724 wm_serdes_tick(sc);
2725 else
2726 wm_tbi_tick(sc);
2727
2728 out:
2729 WM_CORE_UNLOCK(sc);
2730 #ifndef WM_MPSAFE
2731 splx(s);
2732 #endif
2733
2734 if (!sc->sc_stopping)
2735 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2736 }
2737
2738 static int
2739 wm_ifflags_cb(struct ethercom *ec)
2740 {
2741 struct ifnet *ifp = &ec->ec_if;
2742 struct wm_softc *sc = ifp->if_softc;
2743 int change = ifp->if_flags ^ sc->sc_if_flags;
2744 int rc = 0;
2745
2746 WM_CORE_LOCK(sc);
2747
2748 if (change != 0)
2749 sc->sc_if_flags = ifp->if_flags;
2750
2751 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
2752 rc = ENETRESET;
2753 goto out;
2754 }
2755
2756 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2757 wm_set_filter(sc);
2758
2759 wm_set_vlan(sc);
2760
2761 out:
2762 WM_CORE_UNLOCK(sc);
2763
2764 return rc;
2765 }
2766
2767 /*
2768 * wm_ioctl: [ifnet interface function]
2769 *
2770 * Handle control requests from the operator.
2771 */
2772 static int
2773 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2774 {
2775 struct wm_softc *sc = ifp->if_softc;
2776 struct ifreq *ifr = (struct ifreq *) data;
2777 struct ifaddr *ifa = (struct ifaddr *)data;
2778 struct sockaddr_dl *sdl;
2779 int s, error;
2780
2781 #ifndef WM_MPSAFE
2782 s = splnet();
2783 #endif
2784 switch (cmd) {
2785 case SIOCSIFMEDIA:
2786 case SIOCGIFMEDIA:
2787 WM_CORE_LOCK(sc);
2788 /* Flow control requires full-duplex mode. */
2789 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2790 (ifr->ifr_media & IFM_FDX) == 0)
2791 ifr->ifr_media &= ~IFM_ETH_FMASK;
2792 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2793 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2794 /* We can do both TXPAUSE and RXPAUSE. */
2795 ifr->ifr_media |=
2796 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2797 }
2798 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2799 }
2800 WM_CORE_UNLOCK(sc);
2801 #ifdef WM_MPSAFE
2802 s = splnet();
2803 #endif
2804 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2805 #ifdef WM_MPSAFE
2806 splx(s);
2807 #endif
2808 break;
2809 case SIOCINITIFADDR:
2810 WM_CORE_LOCK(sc);
2811 if (ifa->ifa_addr->sa_family == AF_LINK) {
2812 sdl = satosdl(ifp->if_dl->ifa_addr);
2813 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2814 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2815 /* unicast address is first multicast entry */
2816 wm_set_filter(sc);
2817 error = 0;
2818 WM_CORE_UNLOCK(sc);
2819 break;
2820 }
2821 WM_CORE_UNLOCK(sc);
2822 /*FALLTHROUGH*/
2823 default:
2824 #ifdef WM_MPSAFE
2825 s = splnet();
2826 #endif
2827 /* It may call wm_start, so unlock here */
2828 error = ether_ioctl(ifp, cmd, data);
2829 #ifdef WM_MPSAFE
2830 splx(s);
2831 #endif
2832 if (error != ENETRESET)
2833 break;
2834
2835 error = 0;
2836
2837 if (cmd == SIOCSIFCAP) {
2838 error = (*ifp->if_init)(ifp);
2839 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2840 ;
2841 else if (ifp->if_flags & IFF_RUNNING) {
2842 /*
2843 * Multicast list has changed; set the hardware filter
2844 * accordingly.
2845 */
2846 WM_CORE_LOCK(sc);
2847 wm_set_filter(sc);
2848 WM_CORE_UNLOCK(sc);
2849 }
2850 break;
2851 }
2852
2853 #ifndef WM_MPSAFE
2854 splx(s);
2855 #endif
2856 return error;
2857 }
2858
2859 /* MAC address related */
2860
2861 /*
2862 * Get the offset of MAC address and return it.
2863 * If error occured, use offset 0.
2864 */
2865 static uint16_t
2866 wm_check_alt_mac_addr(struct wm_softc *sc)
2867 {
2868 uint16_t myea[ETHER_ADDR_LEN / 2];
2869 uint16_t offset = NVM_OFF_MACADDR;
2870
2871 /* Try to read alternative MAC address pointer */
2872 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2873 return 0;
2874
2875 /* Check pointer if it's valid or not. */
2876 if ((offset == 0x0000) || (offset == 0xffff))
2877 return 0;
2878
2879 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
2880 /*
2881 * Check whether alternative MAC address is valid or not.
2882 * Some cards have non 0xffff pointer but those don't use
2883 * alternative MAC address in reality.
2884 *
2885 * Check whether the broadcast bit is set or not.
2886 */
2887 if (wm_nvm_read(sc, offset, 1, myea) == 0)
2888 if (((myea[0] & 0xff) & 0x01) == 0)
2889 return offset; /* Found */
2890
2891 /* Not found */
2892 return 0;
2893 }
2894
2895 static int
2896 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2897 {
2898 uint16_t myea[ETHER_ADDR_LEN / 2];
2899 uint16_t offset = NVM_OFF_MACADDR;
2900 int do_invert = 0;
2901
2902 switch (sc->sc_type) {
2903 case WM_T_82580:
2904 case WM_T_I350:
2905 case WM_T_I354:
2906 /* EEPROM Top Level Partitioning */
2907 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
2908 break;
2909 case WM_T_82571:
2910 case WM_T_82575:
2911 case WM_T_82576:
2912 case WM_T_80003:
2913 case WM_T_I210:
2914 case WM_T_I211:
2915 offset = wm_check_alt_mac_addr(sc);
2916 if (offset == 0)
2917 if ((sc->sc_funcid & 0x01) == 1)
2918 do_invert = 1;
2919 break;
2920 default:
2921 if ((sc->sc_funcid & 0x01) == 1)
2922 do_invert = 1;
2923 break;
2924 }
2925
2926 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
2927 myea) != 0)
2928 goto bad;
2929
2930 enaddr[0] = myea[0] & 0xff;
2931 enaddr[1] = myea[0] >> 8;
2932 enaddr[2] = myea[1] & 0xff;
2933 enaddr[3] = myea[1] >> 8;
2934 enaddr[4] = myea[2] & 0xff;
2935 enaddr[5] = myea[2] >> 8;
2936
2937 /*
2938 * Toggle the LSB of the MAC address on the second port
2939 * of some dual port cards.
2940 */
2941 if (do_invert != 0)
2942 enaddr[5] ^= 1;
2943
2944 return 0;
2945
2946 bad:
2947 return -1;
2948 }
2949
2950 /*
2951 * wm_set_ral:
2952 *
2953 * Set an entery in the receive address list.
2954 */
2955 static void
2956 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2957 {
2958 uint32_t ral_lo, ral_hi;
2959
2960 if (enaddr != NULL) {
2961 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2962 (enaddr[3] << 24);
2963 ral_hi = enaddr[4] | (enaddr[5] << 8);
2964 ral_hi |= RAL_AV;
2965 } else {
2966 ral_lo = 0;
2967 ral_hi = 0;
2968 }
2969
2970 if (sc->sc_type >= WM_T_82544) {
2971 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2972 ral_lo);
2973 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2974 ral_hi);
2975 } else {
2976 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2977 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2978 }
2979 }
2980
2981 /*
2982 * wm_mchash:
2983 *
2984 * Compute the hash of the multicast address for the 4096-bit
2985 * multicast filter.
2986 */
2987 static uint32_t
2988 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2989 {
2990 static const int lo_shift[4] = { 4, 3, 2, 0 };
2991 static const int hi_shift[4] = { 4, 5, 6, 8 };
2992 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
2993 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
2994 uint32_t hash;
2995
2996 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2997 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2998 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
2999 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3000 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3001 return (hash & 0x3ff);
3002 }
3003 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3004 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3005
3006 return (hash & 0xfff);
3007 }
3008
3009 /*
3010 * wm_set_filter:
3011 *
3012 * Set up the receive filter.
3013 */
3014 static void
3015 wm_set_filter(struct wm_softc *sc)
3016 {
3017 struct ethercom *ec = &sc->sc_ethercom;
3018 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3019 struct ether_multi *enm;
3020 struct ether_multistep step;
3021 bus_addr_t mta_reg;
3022 uint32_t hash, reg, bit;
3023 int i, size;
3024
3025 if (sc->sc_type >= WM_T_82544)
3026 mta_reg = WMREG_CORDOVA_MTA;
3027 else
3028 mta_reg = WMREG_MTA;
3029
3030 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3031
3032 if (ifp->if_flags & IFF_BROADCAST)
3033 sc->sc_rctl |= RCTL_BAM;
3034 if (ifp->if_flags & IFF_PROMISC) {
3035 sc->sc_rctl |= RCTL_UPE;
3036 goto allmulti;
3037 }
3038
3039 /*
3040 * Set the station address in the first RAL slot, and
3041 * clear the remaining slots.
3042 */
3043 if (sc->sc_type == WM_T_ICH8)
3044 size = WM_RAL_TABSIZE_ICH8 -1;
3045 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3046 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
3047 || (sc->sc_type == WM_T_PCH_LPT))
3048 size = WM_RAL_TABSIZE_ICH8;
3049 else if (sc->sc_type == WM_T_82575)
3050 size = WM_RAL_TABSIZE_82575;
3051 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3052 size = WM_RAL_TABSIZE_82576;
3053 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3054 size = WM_RAL_TABSIZE_I350;
3055 else
3056 size = WM_RAL_TABSIZE;
3057 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3058 for (i = 1; i < size; i++)
3059 wm_set_ral(sc, NULL, i);
3060
3061 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3062 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3063 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
3064 size = WM_ICH8_MC_TABSIZE;
3065 else
3066 size = WM_MC_TABSIZE;
3067 /* Clear out the multicast table. */
3068 for (i = 0; i < size; i++)
3069 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3070
3071 ETHER_FIRST_MULTI(step, ec, enm);
3072 while (enm != NULL) {
3073 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3074 /*
3075 * We must listen to a range of multicast addresses.
3076 * For now, just accept all multicasts, rather than
3077 * trying to set only those filter bits needed to match
3078 * the range. (At this time, the only use of address
3079 * ranges is for IP multicast routing, for which the
3080 * range is big enough to require all bits set.)
3081 */
3082 goto allmulti;
3083 }
3084
3085 hash = wm_mchash(sc, enm->enm_addrlo);
3086
3087 reg = (hash >> 5);
3088 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3089 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3090 || (sc->sc_type == WM_T_PCH2)
3091 || (sc->sc_type == WM_T_PCH_LPT))
3092 reg &= 0x1f;
3093 else
3094 reg &= 0x7f;
3095 bit = hash & 0x1f;
3096
3097 hash = CSR_READ(sc, mta_reg + (reg << 2));
3098 hash |= 1U << bit;
3099
3100 /* XXX Hardware bug?? */
3101 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3102 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3103 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3104 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3105 } else
3106 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3107
3108 ETHER_NEXT_MULTI(step, enm);
3109 }
3110
3111 ifp->if_flags &= ~IFF_ALLMULTI;
3112 goto setit;
3113
3114 allmulti:
3115 ifp->if_flags |= IFF_ALLMULTI;
3116 sc->sc_rctl |= RCTL_MPE;
3117
3118 setit:
3119 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3120 }
3121
3122 /* Reset and init related */
3123
3124 static void
3125 wm_set_vlan(struct wm_softc *sc)
3126 {
3127 /* Deal with VLAN enables. */
3128 if (VLAN_ATTACHED(&sc->sc_ethercom))
3129 sc->sc_ctrl |= CTRL_VME;
3130 else
3131 sc->sc_ctrl &= ~CTRL_VME;
3132
3133 /* Write the control registers. */
3134 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3135 }
3136
3137 static void
3138 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3139 {
3140 uint32_t gcr;
3141 pcireg_t ctrl2;
3142
3143 gcr = CSR_READ(sc, WMREG_GCR);
3144
3145 /* Only take action if timeout value is defaulted to 0 */
3146 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3147 goto out;
3148
3149 if ((gcr & GCR_CAP_VER2) == 0) {
3150 gcr |= GCR_CMPL_TMOUT_10MS;
3151 goto out;
3152 }
3153
3154 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3155 sc->sc_pcixe_capoff + PCIE_DCSR2);
3156 ctrl2 |= WM_PCIE_DCSR2_16MS;
3157 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3158 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3159
3160 out:
3161 /* Disable completion timeout resend */
3162 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3163
3164 CSR_WRITE(sc, WMREG_GCR, gcr);
3165 }
3166
3167 void
3168 wm_get_auto_rd_done(struct wm_softc *sc)
3169 {
3170 int i;
3171
3172 /* wait for eeprom to reload */
3173 switch (sc->sc_type) {
3174 case WM_T_82571:
3175 case WM_T_82572:
3176 case WM_T_82573:
3177 case WM_T_82574:
3178 case WM_T_82583:
3179 case WM_T_82575:
3180 case WM_T_82576:
3181 case WM_T_82580:
3182 case WM_T_I350:
3183 case WM_T_I354:
3184 case WM_T_I210:
3185 case WM_T_I211:
3186 case WM_T_80003:
3187 case WM_T_ICH8:
3188 case WM_T_ICH9:
3189 for (i = 0; i < 10; i++) {
3190 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3191 break;
3192 delay(1000);
3193 }
3194 if (i == 10) {
3195 log(LOG_ERR, "%s: auto read from eeprom failed to "
3196 "complete\n", device_xname(sc->sc_dev));
3197 }
3198 break;
3199 default:
3200 break;
3201 }
3202 }
3203
3204 void
3205 wm_lan_init_done(struct wm_softc *sc)
3206 {
3207 uint32_t reg = 0;
3208 int i;
3209
3210 /* wait for eeprom to reload */
3211 switch (sc->sc_type) {
3212 case WM_T_ICH10:
3213 case WM_T_PCH:
3214 case WM_T_PCH2:
3215 case WM_T_PCH_LPT:
3216 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3217 reg = CSR_READ(sc, WMREG_STATUS);
3218 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3219 break;
3220 delay(100);
3221 }
3222 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3223 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3224 "complete\n", device_xname(sc->sc_dev), __func__);
3225 }
3226 break;
3227 default:
3228 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3229 __func__);
3230 break;
3231 }
3232
3233 reg &= ~STATUS_LAN_INIT_DONE;
3234 CSR_WRITE(sc, WMREG_STATUS, reg);
3235 }
3236
3237 void
3238 wm_get_cfg_done(struct wm_softc *sc)
3239 {
3240 int mask;
3241 uint32_t reg;
3242 int i;
3243
3244 /* wait for eeprom to reload */
3245 switch (sc->sc_type) {
3246 case WM_T_82542_2_0:
3247 case WM_T_82542_2_1:
3248 /* null */
3249 break;
3250 case WM_T_82543:
3251 case WM_T_82544:
3252 case WM_T_82540:
3253 case WM_T_82545:
3254 case WM_T_82545_3:
3255 case WM_T_82546:
3256 case WM_T_82546_3:
3257 case WM_T_82541:
3258 case WM_T_82541_2:
3259 case WM_T_82547:
3260 case WM_T_82547_2:
3261 case WM_T_82573:
3262 case WM_T_82574:
3263 case WM_T_82583:
3264 /* generic */
3265 delay(10*1000);
3266 break;
3267 case WM_T_80003:
3268 case WM_T_82571:
3269 case WM_T_82572:
3270 case WM_T_82575:
3271 case WM_T_82576:
3272 case WM_T_82580:
3273 case WM_T_I350:
3274 case WM_T_I354:
3275 case WM_T_I210:
3276 case WM_T_I211:
3277 if (sc->sc_type == WM_T_82571) {
3278 /* Only 82571 shares port 0 */
3279 mask = EEMNGCTL_CFGDONE_0;
3280 } else
3281 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3282 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3283 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3284 break;
3285 delay(1000);
3286 }
3287 if (i >= WM_PHY_CFG_TIMEOUT) {
3288 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3289 device_xname(sc->sc_dev), __func__));
3290 }
3291 break;
3292 case WM_T_ICH8:
3293 case WM_T_ICH9:
3294 case WM_T_ICH10:
3295 case WM_T_PCH:
3296 case WM_T_PCH2:
3297 case WM_T_PCH_LPT:
3298 delay(10*1000);
3299 if (sc->sc_type >= WM_T_ICH10)
3300 wm_lan_init_done(sc);
3301 else
3302 wm_get_auto_rd_done(sc);
3303
3304 reg = CSR_READ(sc, WMREG_STATUS);
3305 if ((reg & STATUS_PHYRA) != 0)
3306 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3307 break;
3308 default:
3309 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3310 __func__);
3311 break;
3312 }
3313 }
3314
3315 /* Init hardware bits */
3316 void
3317 wm_initialize_hardware_bits(struct wm_softc *sc)
3318 {
3319 uint32_t tarc0, tarc1, reg;
3320
3321 /* For 82571 variant, 80003 and ICHs */
3322 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3323 || (sc->sc_type >= WM_T_80003)) {
3324
3325 /* Transmit Descriptor Control 0 */
3326 reg = CSR_READ(sc, WMREG_TXDCTL(0));
3327 reg |= TXDCTL_COUNT_DESC;
3328 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3329
3330 /* Transmit Descriptor Control 1 */
3331 reg = CSR_READ(sc, WMREG_TXDCTL(1));
3332 reg |= TXDCTL_COUNT_DESC;
3333 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3334
3335 /* TARC0 */
3336 tarc0 = CSR_READ(sc, WMREG_TARC0);
3337 switch (sc->sc_type) {
3338 case WM_T_82571:
3339 case WM_T_82572:
3340 case WM_T_82573:
3341 case WM_T_82574:
3342 case WM_T_82583:
3343 case WM_T_80003:
3344 /* Clear bits 30..27 */
3345 tarc0 &= ~__BITS(30, 27);
3346 break;
3347 default:
3348 break;
3349 }
3350
3351 switch (sc->sc_type) {
3352 case WM_T_82571:
3353 case WM_T_82572:
3354 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3355
3356 tarc1 = CSR_READ(sc, WMREG_TARC1);
3357 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3358 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3359 /* 8257[12] Errata No.7 */
3360 tarc1 |= __BIT(22); /* TARC1 bits 22 */
3361
3362 /* TARC1 bit 28 */
3363 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3364 tarc1 &= ~__BIT(28);
3365 else
3366 tarc1 |= __BIT(28);
3367 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3368
3369 /*
3370 * 8257[12] Errata No.13
3371 * Disable Dyamic Clock Gating.
3372 */
3373 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3374 reg &= ~CTRL_EXT_DMA_DYN_CLK;
3375 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3376 break;
3377 case WM_T_82573:
3378 case WM_T_82574:
3379 case WM_T_82583:
3380 if ((sc->sc_type == WM_T_82574)
3381 || (sc->sc_type == WM_T_82583))
3382 tarc0 |= __BIT(26); /* TARC0 bit 26 */
3383
3384 /* Extended Device Control */
3385 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3386 reg &= ~__BIT(23); /* Clear bit 23 */
3387 reg |= __BIT(22); /* Set bit 22 */
3388 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3389
3390 /* Device Control */
3391 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
3392 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3393
3394 /* PCIe Control Register */
3395 /*
3396 * 82573 Errata (unknown).
3397 *
3398 * 82574 Errata 25 and 82583 Errata 12
3399 * "Dropped Rx Packets":
3400 * NVM Image Version 2.1.4 and newer has no this bug.
3401 */
3402 reg = CSR_READ(sc, WMREG_GCR);
3403 reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
3404 CSR_WRITE(sc, WMREG_GCR, reg);
3405
3406 if ((sc->sc_type == WM_T_82574)
3407 || (sc->sc_type == WM_T_82583)) {
3408 /*
3409 * Document says this bit must be set for
3410 * proper operation.
3411 */
3412 reg = CSR_READ(sc, WMREG_GCR);
3413 reg |= __BIT(22);
3414 CSR_WRITE(sc, WMREG_GCR, reg);
3415
3416 /*
3417 * Apply workaround for hardware errata
3418 * documented in errata docs Fixes issue where
3419 * some error prone or unreliable PCIe
3420 * completions are occurring, particularly
3421 * with ASPM enabled. Without fix, issue can
3422 * cause Tx timeouts.
3423 */
3424 reg = CSR_READ(sc, WMREG_GCR2);
3425 reg |= __BIT(0);
3426 CSR_WRITE(sc, WMREG_GCR2, reg);
3427 }
3428 break;
3429 case WM_T_80003:
3430 /* TARC0 */
3431 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3432 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3433 tarc0 &= ~__BIT(20); /* Clear bits 20 */
3434
3435 /* TARC1 bit 28 */
3436 tarc1 = CSR_READ(sc, WMREG_TARC1);
3437 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3438 tarc1 &= ~__BIT(28);
3439 else
3440 tarc1 |= __BIT(28);
3441 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3442 break;
3443 case WM_T_ICH8:
3444 case WM_T_ICH9:
3445 case WM_T_ICH10:
3446 case WM_T_PCH:
3447 case WM_T_PCH2:
3448 case WM_T_PCH_LPT:
3449 /* TARC 0 */
3450 if (sc->sc_type == WM_T_ICH8) {
3451 /* Set TARC0 bits 29 and 28 */
3452 tarc0 |= __BITS(29, 28);
3453 }
3454 /* Set TARC0 bits 23,24,26,27 */
3455 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3456
3457 /* CTRL_EXT */
3458 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3459 reg |= __BIT(22); /* Set bit 22 */
3460 /*
3461 * Enable PHY low-power state when MAC is at D3
3462 * w/o WoL
3463 */
3464 if (sc->sc_type >= WM_T_PCH)
3465 reg |= CTRL_EXT_PHYPDEN;
3466 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3467
3468 /* TARC1 */
3469 tarc1 = CSR_READ(sc, WMREG_TARC1);
3470 /* bit 28 */
3471 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3472 tarc1 &= ~__BIT(28);
3473 else
3474 tarc1 |= __BIT(28);
3475 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3476 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3477
3478 /* Device Status */
3479 if (sc->sc_type == WM_T_ICH8) {
3480 reg = CSR_READ(sc, WMREG_STATUS);
3481 reg &= ~__BIT(31);
3482 CSR_WRITE(sc, WMREG_STATUS, reg);
3483
3484 }
3485
3486 /*
3487 * Work-around descriptor data corruption issue during
3488 * NFS v2 UDP traffic, just disable the NFS filtering
3489 * capability.
3490 */
3491 reg = CSR_READ(sc, WMREG_RFCTL);
3492 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3493 CSR_WRITE(sc, WMREG_RFCTL, reg);
3494 break;
3495 default:
3496 break;
3497 }
3498 CSR_WRITE(sc, WMREG_TARC0, tarc0);
3499
3500 /*
3501 * 8257[12] Errata No.52 and some others.
3502 * Avoid RSS Hash Value bug.
3503 */
3504 switch (sc->sc_type) {
3505 case WM_T_82571:
3506 case WM_T_82572:
3507 case WM_T_82573:
3508 case WM_T_80003:
3509 case WM_T_ICH8:
3510 reg = CSR_READ(sc, WMREG_RFCTL);
3511 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3512 CSR_WRITE(sc, WMREG_RFCTL, reg);
3513 break;
3514 default:
3515 break;
3516 }
3517 }
3518 }
3519
3520 static uint32_t
3521 wm_rxpbs_adjust_82580(uint32_t val)
3522 {
3523 uint32_t rv = 0;
3524
3525 if (val < __arraycount(wm_82580_rxpbs_table))
3526 rv = wm_82580_rxpbs_table[val];
3527
3528 return rv;
3529 }
3530
3531 /*
3532 * wm_reset:
3533 *
3534 * Reset the i82542 chip.
3535 */
3536 static void
3537 wm_reset(struct wm_softc *sc)
3538 {
3539 int phy_reset = 0;
3540 int i, error = 0;
3541 uint32_t reg, mask;
3542
3543 /*
3544 * Allocate on-chip memory according to the MTU size.
3545 * The Packet Buffer Allocation register must be written
3546 * before the chip is reset.
3547 */
3548 switch (sc->sc_type) {
3549 case WM_T_82547:
3550 case WM_T_82547_2:
3551 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3552 PBA_22K : PBA_30K;
3553 for (i = 0; i < sc->sc_ntxqueues; i++) {
3554 struct wm_txqueue *txq = &sc->sc_txq[i];
3555 txq->txq_fifo_head = 0;
3556 txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3557 txq->txq_fifo_size =
3558 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3559 txq->txq_fifo_stall = 0;
3560 }
3561 break;
3562 case WM_T_82571:
3563 case WM_T_82572:
3564 case WM_T_82575: /* XXX need special handing for jumbo frames */
3565 case WM_T_80003:
3566 sc->sc_pba = PBA_32K;
3567 break;
3568 case WM_T_82573:
3569 sc->sc_pba = PBA_12K;
3570 break;
3571 case WM_T_82574:
3572 case WM_T_82583:
3573 sc->sc_pba = PBA_20K;
3574 break;
3575 case WM_T_82576:
3576 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
3577 sc->sc_pba &= RXPBS_SIZE_MASK_82576;
3578 break;
3579 case WM_T_82580:
3580 case WM_T_I350:
3581 case WM_T_I354:
3582 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
3583 break;
3584 case WM_T_I210:
3585 case WM_T_I211:
3586 sc->sc_pba = PBA_34K;
3587 break;
3588 case WM_T_ICH8:
3589 /* Workaround for a bit corruption issue in FIFO memory */
3590 sc->sc_pba = PBA_8K;
3591 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3592 break;
3593 case WM_T_ICH9:
3594 case WM_T_ICH10:
3595 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3596 PBA_14K : PBA_10K;
3597 break;
3598 case WM_T_PCH:
3599 case WM_T_PCH2:
3600 case WM_T_PCH_LPT:
3601 sc->sc_pba = PBA_26K;
3602 break;
3603 default:
3604 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3605 PBA_40K : PBA_48K;
3606 break;
3607 }
3608 /*
3609 * Only old or non-multiqueue devices have the PBA register
3610 * XXX Need special handling for 82575.
3611 */
3612 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3613 || (sc->sc_type == WM_T_82575))
3614 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3615
3616 /* Prevent the PCI-E bus from sticking */
3617 if (sc->sc_flags & WM_F_PCIE) {
3618 int timeout = 800;
3619
3620 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3621 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3622
3623 while (timeout--) {
3624 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3625 == 0)
3626 break;
3627 delay(100);
3628 }
3629 }
3630
3631 /* Set the completion timeout for interface */
3632 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3633 || (sc->sc_type == WM_T_82580)
3634 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3635 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3636 wm_set_pcie_completion_timeout(sc);
3637
3638 /* Clear interrupt */
3639 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3640 if (sc->sc_nintrs > 1) {
3641 if (sc->sc_type != WM_T_82574) {
3642 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3643 CSR_WRITE(sc, WMREG_EIAC, 0);
3644 } else {
3645 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3646 }
3647 }
3648
3649 /* Stop the transmit and receive processes. */
3650 CSR_WRITE(sc, WMREG_RCTL, 0);
3651 sc->sc_rctl &= ~RCTL_EN;
3652 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3653 CSR_WRITE_FLUSH(sc);
3654
3655 /* XXX set_tbi_sbp_82543() */
3656
3657 delay(10*1000);
3658
3659 /* Must acquire the MDIO ownership before MAC reset */
3660 switch (sc->sc_type) {
3661 case WM_T_82573:
3662 case WM_T_82574:
3663 case WM_T_82583:
3664 error = wm_get_hw_semaphore_82573(sc);
3665 break;
3666 default:
3667 break;
3668 }
3669
3670 /*
3671 * 82541 Errata 29? & 82547 Errata 28?
3672 * See also the description about PHY_RST bit in CTRL register
3673 * in 8254x_GBe_SDM.pdf.
3674 */
3675 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3676 CSR_WRITE(sc, WMREG_CTRL,
3677 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3678 CSR_WRITE_FLUSH(sc);
3679 delay(5000);
3680 }
3681
3682 switch (sc->sc_type) {
3683 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3684 case WM_T_82541:
3685 case WM_T_82541_2:
3686 case WM_T_82547:
3687 case WM_T_82547_2:
3688 /*
3689 * On some chipsets, a reset through a memory-mapped write
3690 * cycle can cause the chip to reset before completing the
3691 * write cycle. This causes major headache that can be
3692 * avoided by issuing the reset via indirect register writes
3693 * through I/O space.
3694 *
3695 * So, if we successfully mapped the I/O BAR at attach time,
3696 * use that. Otherwise, try our luck with a memory-mapped
3697 * reset.
3698 */
3699 if (sc->sc_flags & WM_F_IOH_VALID)
3700 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3701 else
3702 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3703 break;
3704 case WM_T_82545_3:
3705 case WM_T_82546_3:
3706 /* Use the shadow control register on these chips. */
3707 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3708 break;
3709 case WM_T_80003:
3710 mask = swfwphysem[sc->sc_funcid];
3711 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3712 wm_get_swfw_semaphore(sc, mask);
3713 CSR_WRITE(sc, WMREG_CTRL, reg);
3714 wm_put_swfw_semaphore(sc, mask);
3715 break;
3716 case WM_T_ICH8:
3717 case WM_T_ICH9:
3718 case WM_T_ICH10:
3719 case WM_T_PCH:
3720 case WM_T_PCH2:
3721 case WM_T_PCH_LPT:
3722 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3723 if (wm_check_reset_block(sc) == 0) {
3724 /*
3725 * Gate automatic PHY configuration by hardware on
3726 * non-managed 82579
3727 */
3728 if ((sc->sc_type == WM_T_PCH2)
3729 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3730 != 0))
3731 wm_gate_hw_phy_config_ich8lan(sc, 1);
3732
3733
3734 reg |= CTRL_PHY_RESET;
3735 phy_reset = 1;
3736 }
3737 wm_get_swfwhw_semaphore(sc);
3738 CSR_WRITE(sc, WMREG_CTRL, reg);
3739 /* Don't insert a completion barrier when reset */
3740 delay(20*1000);
3741 wm_put_swfwhw_semaphore(sc);
3742 break;
3743 case WM_T_82580:
3744 case WM_T_I350:
3745 case WM_T_I354:
3746 case WM_T_I210:
3747 case WM_T_I211:
3748 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3749 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3750 CSR_WRITE_FLUSH(sc);
3751 delay(5000);
3752 break;
3753 case WM_T_82542_2_0:
3754 case WM_T_82542_2_1:
3755 case WM_T_82543:
3756 case WM_T_82540:
3757 case WM_T_82545:
3758 case WM_T_82546:
3759 case WM_T_82571:
3760 case WM_T_82572:
3761 case WM_T_82573:
3762 case WM_T_82574:
3763 case WM_T_82575:
3764 case WM_T_82576:
3765 case WM_T_82583:
3766 default:
3767 /* Everything else can safely use the documented method. */
3768 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3769 break;
3770 }
3771
3772 /* Must release the MDIO ownership after MAC reset */
3773 switch (sc->sc_type) {
3774 case WM_T_82573:
3775 case WM_T_82574:
3776 case WM_T_82583:
3777 if (error == 0)
3778 wm_put_hw_semaphore_82573(sc);
3779 break;
3780 default:
3781 break;
3782 }
3783
3784 if (phy_reset != 0)
3785 wm_get_cfg_done(sc);
3786
3787 /* reload EEPROM */
3788 switch (sc->sc_type) {
3789 case WM_T_82542_2_0:
3790 case WM_T_82542_2_1:
3791 case WM_T_82543:
3792 case WM_T_82544:
3793 delay(10);
3794 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3795 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3796 CSR_WRITE_FLUSH(sc);
3797 delay(2000);
3798 break;
3799 case WM_T_82540:
3800 case WM_T_82545:
3801 case WM_T_82545_3:
3802 case WM_T_82546:
3803 case WM_T_82546_3:
3804 delay(5*1000);
3805 /* XXX Disable HW ARPs on ASF enabled adapters */
3806 break;
3807 case WM_T_82541:
3808 case WM_T_82541_2:
3809 case WM_T_82547:
3810 case WM_T_82547_2:
3811 delay(20000);
3812 /* XXX Disable HW ARPs on ASF enabled adapters */
3813 break;
3814 case WM_T_82571:
3815 case WM_T_82572:
3816 case WM_T_82573:
3817 case WM_T_82574:
3818 case WM_T_82583:
3819 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3820 delay(10);
3821 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3822 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3823 CSR_WRITE_FLUSH(sc);
3824 }
3825 /* check EECD_EE_AUTORD */
3826 wm_get_auto_rd_done(sc);
3827 /*
3828 * Phy configuration from NVM just starts after EECD_AUTO_RD
3829 * is set.
3830 */
3831 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3832 || (sc->sc_type == WM_T_82583))
3833 delay(25*1000);
3834 break;
3835 case WM_T_82575:
3836 case WM_T_82576:
3837 case WM_T_82580:
3838 case WM_T_I350:
3839 case WM_T_I354:
3840 case WM_T_I210:
3841 case WM_T_I211:
3842 case WM_T_80003:
3843 /* check EECD_EE_AUTORD */
3844 wm_get_auto_rd_done(sc);
3845 break;
3846 case WM_T_ICH8:
3847 case WM_T_ICH9:
3848 case WM_T_ICH10:
3849 case WM_T_PCH:
3850 case WM_T_PCH2:
3851 case WM_T_PCH_LPT:
3852 break;
3853 default:
3854 panic("%s: unknown type\n", __func__);
3855 }
3856
3857 /* Check whether EEPROM is present or not */
3858 switch (sc->sc_type) {
3859 case WM_T_82575:
3860 case WM_T_82576:
3861 case WM_T_82580:
3862 case WM_T_I350:
3863 case WM_T_I354:
3864 case WM_T_ICH8:
3865 case WM_T_ICH9:
3866 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3867 /* Not found */
3868 sc->sc_flags |= WM_F_EEPROM_INVALID;
3869 if (sc->sc_type == WM_T_82575)
3870 wm_reset_init_script_82575(sc);
3871 }
3872 break;
3873 default:
3874 break;
3875 }
3876
3877 if ((sc->sc_type == WM_T_82580)
3878 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
3879 /* clear global device reset status bit */
3880 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3881 }
3882
3883 /* Clear any pending interrupt events. */
3884 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3885 reg = CSR_READ(sc, WMREG_ICR);
3886 if (sc->sc_nintrs > 1) {
3887 if (sc->sc_type != WM_T_82574) {
3888 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3889 CSR_WRITE(sc, WMREG_EIAC, 0);
3890 } else
3891 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3892 }
3893
3894 /* reload sc_ctrl */
3895 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3896
3897 if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
3898 wm_set_eee_i350(sc);
3899
3900 /* dummy read from WUC */
3901 if (sc->sc_type == WM_T_PCH)
3902 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3903 /*
3904 * For PCH, this write will make sure that any noise will be detected
3905 * as a CRC error and be dropped rather than show up as a bad packet
3906 * to the DMA engine
3907 */
3908 if (sc->sc_type == WM_T_PCH)
3909 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3910
3911 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3912 CSR_WRITE(sc, WMREG_WUC, 0);
3913
3914 wm_reset_mdicnfg_82580(sc);
3915
3916 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
3917 wm_pll_workaround_i210(sc);
3918 }
3919
3920 /*
3921 * wm_add_rxbuf:
3922 *
3923 * Add a receive buffer to the indiciated descriptor.
3924 */
3925 static int
3926 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
3927 {
3928 struct wm_softc *sc = rxq->rxq_sc;
3929 struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
3930 struct mbuf *m;
3931 int error;
3932
3933 KASSERT(WM_RX_LOCKED(rxq));
3934
3935 MGETHDR(m, M_DONTWAIT, MT_DATA);
3936 if (m == NULL)
3937 return ENOBUFS;
3938
3939 MCLGET(m, M_DONTWAIT);
3940 if ((m->m_flags & M_EXT) == 0) {
3941 m_freem(m);
3942 return ENOBUFS;
3943 }
3944
3945 if (rxs->rxs_mbuf != NULL)
3946 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3947
3948 rxs->rxs_mbuf = m;
3949
3950 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3951 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3952 BUS_DMA_READ|BUS_DMA_NOWAIT);
3953 if (error) {
3954 /* XXX XXX XXX */
3955 aprint_error_dev(sc->sc_dev,
3956 "unable to load rx DMA map %d, error = %d\n",
3957 idx, error);
3958 panic("wm_add_rxbuf");
3959 }
3960
3961 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3962 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3963
3964 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3965 if ((sc->sc_rctl & RCTL_EN) != 0)
3966 wm_init_rxdesc(rxq, idx);
3967 } else
3968 wm_init_rxdesc(rxq, idx);
3969
3970 return 0;
3971 }
3972
3973 /*
3974 * wm_rxdrain:
3975 *
3976 * Drain the receive queue.
3977 */
3978 static void
3979 wm_rxdrain(struct wm_rxqueue *rxq)
3980 {
3981 struct wm_softc *sc = rxq->rxq_sc;
3982 struct wm_rxsoft *rxs;
3983 int i;
3984
3985 KASSERT(WM_RX_LOCKED(rxq));
3986
3987 for (i = 0; i < WM_NRXDESC; i++) {
3988 rxs = &rxq->rxq_soft[i];
3989 if (rxs->rxs_mbuf != NULL) {
3990 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3991 m_freem(rxs->rxs_mbuf);
3992 rxs->rxs_mbuf = NULL;
3993 }
3994 }
3995 }
3996
3997
3998 /*
3999 * XXX copy from FreeBSD's sys/net/rss_config.c
4000 */
4001 /*
4002 * RSS secret key, intended to prevent attacks on load-balancing. Its
4003 * effectiveness may be limited by algorithm choice and available entropy
4004 * during the boot.
4005 *
4006 * XXXRW: And that we don't randomize it yet!
4007 *
4008 * This is the default Microsoft RSS specification key which is also
4009 * the Chelsio T5 firmware default key.
4010 */
4011 #define RSS_KEYSIZE 40
4012 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
4013 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
4014 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
4015 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
4016 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
4017 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
4018 };
4019
4020 /*
4021 * Caller must pass an array of size sizeof(rss_key).
4022 *
4023 * XXX
4024 * As if_ixgbe may use this function, this function should not be
4025 * if_wm specific function.
4026 */
4027 static void
4028 wm_rss_getkey(uint8_t *key)
4029 {
4030
4031 memcpy(key, wm_rss_key, sizeof(wm_rss_key));
4032 }
4033
4034 /*
4035 * Setup registers for RSS.
4036 *
4037 * XXX not yet VMDq support
4038 */
4039 static void
4040 wm_init_rss(struct wm_softc *sc)
4041 {
4042 uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
4043 int i;
4044
4045 CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
4046
4047 for (i = 0; i < RETA_NUM_ENTRIES; i++) {
4048 int qid, reta_ent;
4049
4050 qid = i % sc->sc_nrxqueues;
4051 switch(sc->sc_type) {
4052 case WM_T_82574:
4053 reta_ent = __SHIFTIN(qid,
4054 RETA_ENT_QINDEX_MASK_82574);
4055 break;
4056 case WM_T_82575:
4057 reta_ent = __SHIFTIN(qid,
4058 RETA_ENT_QINDEX1_MASK_82575);
4059 break;
4060 default:
4061 reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
4062 break;
4063 }
4064
4065 reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
4066 reta_reg &= ~RETA_ENTRY_MASK_Q(i);
4067 reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
4068 CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
4069 }
4070
4071 wm_rss_getkey((uint8_t *)rss_key);
4072 for (i = 0; i < RSSRK_NUM_REGS; i++)
4073 CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
4074
4075 if (sc->sc_type == WM_T_82574)
4076 mrqc = MRQC_ENABLE_RSS_MQ_82574;
4077 else
4078 mrqc = MRQC_ENABLE_RSS_MQ;
4079
4080 /* XXXX
4081 * The same as FreeBSD igb.
4082 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
4083 */
4084 mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
4085 mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
4086 mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
4087 mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
4088
4089 CSR_WRITE(sc, WMREG_MRQC, mrqc);
4090 }
4091
4092 /*
4093 * Adjust TX and RX queue numbers which the system actulally uses.
4094 *
4095 * The numbers are affected by below parameters.
4096 * - The nubmer of hardware queues
4097 * - The number of MSI-X vectors (= "nvectors" argument)
4098 * - ncpu
4099 */
4100 static void
4101 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
4102 {
4103 int hw_ntxqueues, hw_nrxqueues;
4104
4105 if (nvectors < 3) {
4106 sc->sc_ntxqueues = 1;
4107 sc->sc_nrxqueues = 1;
4108 return;
4109 }
4110
4111 switch(sc->sc_type) {
4112 case WM_T_82572:
4113 hw_ntxqueues = 2;
4114 hw_nrxqueues = 2;
4115 break;
4116 case WM_T_82574:
4117 hw_ntxqueues = 2;
4118 hw_nrxqueues = 2;
4119 break;
4120 case WM_T_82575:
4121 hw_ntxqueues = 4;
4122 hw_nrxqueues = 4;
4123 break;
4124 case WM_T_82576:
4125 hw_ntxqueues = 16;
4126 hw_nrxqueues = 16;
4127 break;
4128 case WM_T_82580:
4129 case WM_T_I350:
4130 case WM_T_I354:
4131 hw_ntxqueues = 8;
4132 hw_nrxqueues = 8;
4133 break;
4134 case WM_T_I210:
4135 hw_ntxqueues = 4;
4136 hw_nrxqueues = 4;
4137 break;
4138 case WM_T_I211:
4139 hw_ntxqueues = 2;
4140 hw_nrxqueues = 2;
4141 break;
4142 /*
4143 * As below ethernet controllers does not support MSI-X,
4144 * this driver let them not use multiqueue.
4145 * - WM_T_80003
4146 * - WM_T_ICH8
4147 * - WM_T_ICH9
4148 * - WM_T_ICH10
4149 * - WM_T_PCH
4150 * - WM_T_PCH2
4151 * - WM_T_PCH_LPT
4152 */
4153 default:
4154 hw_ntxqueues = 1;
4155 hw_nrxqueues = 1;
4156 break;
4157 }
4158
4159 /*
4160 * As queues more then MSI-X vectors cannot improve scaling, we limit
4161 * the number of queues used actually.
4162 *
4163 * XXX
4164 * Currently, we separate TX queue interrupts and RX queue interrupts.
4165 * Howerver, the number of MSI-X vectors of recent controllers (such as
4166 * I354) expects that drivers bundle a TX queue interrupt and a RX
4167 * interrupt to one interrupt. e.g. FreeBSD's igb deals interrupts in
4168 * such a way.
4169 */
4170 if (nvectors < hw_ntxqueues + hw_nrxqueues + 1) {
4171 sc->sc_ntxqueues = (nvectors - 1) / 2;
4172 sc->sc_nrxqueues = (nvectors - 1) / 2;
4173 } else {
4174 sc->sc_ntxqueues = hw_ntxqueues;
4175 sc->sc_nrxqueues = hw_nrxqueues;
4176 }
4177
4178 /*
4179 * As queues more then cpus cannot improve scaling, we limit
4180 * the number of queues used actually.
4181 */
4182 if (ncpu < sc->sc_ntxqueues)
4183 sc->sc_ntxqueues = ncpu;
4184 if (ncpu < sc->sc_nrxqueues)
4185 sc->sc_nrxqueues = ncpu;
4186
4187 /* XXX Currently, this driver supports RX multiqueue only. */
4188 sc->sc_ntxqueues = 1;
4189 }
4190
4191 /*
4192 * Both single interrupt MSI and INTx can use this function.
4193 */
4194 static int
4195 wm_setup_legacy(struct wm_softc *sc)
4196 {
4197 pci_chipset_tag_t pc = sc->sc_pc;
4198 const char *intrstr = NULL;
4199 char intrbuf[PCI_INTRSTR_LEN];
4200 int error;
4201
4202 error = wm_alloc_txrx_queues(sc);
4203 if (error) {
4204 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4205 error);
4206 return ENOMEM;
4207 }
4208 intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
4209 sizeof(intrbuf));
4210 #ifdef WM_MPSAFE
4211 pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
4212 #endif
4213 sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
4214 IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
4215 if (sc->sc_ihs[0] == NULL) {
4216 aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
4217 (pci_intr_type(sc->sc_intrs[0])
4218 == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
4219 return ENOMEM;
4220 }
4221
4222 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
4223 sc->sc_nintrs = 1;
4224 return 0;
4225 }
4226
4227 static int
4228 wm_setup_msix(struct wm_softc *sc)
4229 {
4230 void *vih;
4231 kcpuset_t *affinity;
4232 int qidx, error, intr_idx, tx_established, rx_established;
4233 pci_chipset_tag_t pc = sc->sc_pc;
4234 const char *intrstr = NULL;
4235 char intrbuf[PCI_INTRSTR_LEN];
4236 char intr_xname[INTRDEVNAMEBUF];
4237
4238 error = wm_alloc_txrx_queues(sc);
4239 if (error) {
4240 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4241 error);
4242 return ENOMEM;
4243 }
4244
4245 kcpuset_create(&affinity, false);
4246 intr_idx = 0;
4247
4248 /*
4249 * TX
4250 */
4251 tx_established = 0;
4252 for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
4253 struct wm_txqueue *txq = &sc->sc_txq[qidx];
4254
4255 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4256 sizeof(intrbuf));
4257 #ifdef WM_MPSAFE
4258 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
4259 PCI_INTR_MPSAFE, true);
4260 #endif
4261 memset(intr_xname, 0, sizeof(intr_xname));
4262 snprintf(intr_xname, sizeof(intr_xname), "%sTX%d",
4263 device_xname(sc->sc_dev), qidx);
4264 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4265 IPL_NET, wm_txintr_msix, txq, intr_xname);
4266 if (vih == NULL) {
4267 aprint_error_dev(sc->sc_dev,
4268 "unable to establish MSI-X(for TX)%s%s\n",
4269 intrstr ? " at " : "",
4270 intrstr ? intrstr : "");
4271
4272 goto fail_0;
4273 }
4274 kcpuset_zero(affinity);
4275 /* Round-robin affinity */
4276 kcpuset_set(affinity, intr_idx % ncpu);
4277 error = interrupt_distribute(vih, affinity, NULL);
4278 if (error == 0) {
4279 aprint_normal_dev(sc->sc_dev,
4280 "for TX interrupting at %s affinity to %u\n",
4281 intrstr, intr_idx % ncpu);
4282 } else {
4283 aprint_normal_dev(sc->sc_dev,
4284 "for TX interrupting at %s\n", intrstr);
4285 }
4286 sc->sc_ihs[intr_idx] = vih;
4287 txq->txq_id = qidx;
4288 txq->txq_intr_idx = intr_idx;
4289
4290 tx_established++;
4291 intr_idx++;
4292 }
4293
4294 /*
4295 * RX
4296 */
4297 rx_established = 0;
4298 for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
4299 struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
4300
4301 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4302 sizeof(intrbuf));
4303 #ifdef WM_MPSAFE
4304 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
4305 PCI_INTR_MPSAFE, true);
4306 #endif
4307 memset(intr_xname, 0, sizeof(intr_xname));
4308 snprintf(intr_xname, sizeof(intr_xname), "%sRX%d",
4309 device_xname(sc->sc_dev), qidx);
4310 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4311 IPL_NET, wm_rxintr_msix, rxq, intr_xname);
4312 if (vih == NULL) {
4313 aprint_error_dev(sc->sc_dev,
4314 "unable to establish MSI-X(for RX)%s%s\n",
4315 intrstr ? " at " : "",
4316 intrstr ? intrstr : "");
4317
4318 goto fail_1;
4319 }
4320 kcpuset_zero(affinity);
4321 /* Round-robin affinity */
4322 kcpuset_set(affinity, intr_idx % ncpu);
4323 error = interrupt_distribute(vih, affinity, NULL);
4324 if (error == 0) {
4325 aprint_normal_dev(sc->sc_dev,
4326 "for RX interrupting at %s affinity to %u\n",
4327 intrstr, intr_idx % ncpu);
4328 } else {
4329 aprint_normal_dev(sc->sc_dev,
4330 "for RX interrupting at %s\n", intrstr);
4331 }
4332 sc->sc_ihs[intr_idx] = vih;
4333 rxq->rxq_id = qidx;
4334 rxq->rxq_intr_idx = intr_idx;
4335
4336 rx_established++;
4337 intr_idx++;
4338 }
4339
4340 /*
4341 * LINK
4342 */
4343 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4344 sizeof(intrbuf));
4345 #ifdef WM_MPSAFE
4346 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
4347 PCI_INTR_MPSAFE, true);
4348 #endif
4349 memset(intr_xname, 0, sizeof(intr_xname));
4350 snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
4351 device_xname(sc->sc_dev));
4352 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4353 IPL_NET, wm_linkintr_msix, sc, intr_xname);
4354 if (vih == NULL) {
4355 aprint_error_dev(sc->sc_dev,
4356 "unable to establish MSI-X(for LINK)%s%s\n",
4357 intrstr ? " at " : "",
4358 intrstr ? intrstr : "");
4359
4360 goto fail_1;
4361 }
4362 /* keep default affinity to LINK interrupt */
4363 aprint_normal_dev(sc->sc_dev,
4364 "for LINK interrupting at %s\n", intrstr);
4365 sc->sc_ihs[intr_idx] = vih;
4366 sc->sc_link_intr_idx = intr_idx;
4367
4368 sc->sc_nintrs = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
4369 kcpuset_destroy(affinity);
4370 return 0;
4371
4372 fail_1:
4373 for (qidx = 0; qidx < rx_established; qidx++) {
4374 struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
4375 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[rxq->rxq_intr_idx]);
4376 sc->sc_ihs[rxq->rxq_intr_idx] = NULL;
4377 }
4378 fail_0:
4379 for (qidx = 0; qidx < tx_established; qidx++) {
4380 struct wm_txqueue *txq = &sc->sc_txq[qidx];
4381 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[txq->txq_intr_idx]);
4382 sc->sc_ihs[txq->txq_intr_idx] = NULL;
4383 }
4384
4385 kcpuset_destroy(affinity);
4386 return ENOMEM;
4387 }
4388
4389 /*
4390 * wm_init: [ifnet interface function]
4391 *
4392 * Initialize the interface.
4393 */
4394 static int
4395 wm_init(struct ifnet *ifp)
4396 {
4397 struct wm_softc *sc = ifp->if_softc;
4398 int ret;
4399
4400 WM_CORE_LOCK(sc);
4401 ret = wm_init_locked(ifp);
4402 WM_CORE_UNLOCK(sc);
4403
4404 return ret;
4405 }
4406
4407 static int
4408 wm_init_locked(struct ifnet *ifp)
4409 {
4410 struct wm_softc *sc = ifp->if_softc;
4411 int i, j, trynum, error = 0;
4412 uint32_t reg;
4413
4414 KASSERT(WM_CORE_LOCKED(sc));
4415 /*
4416 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4417 * There is a small but measurable benefit to avoiding the adjusment
4418 * of the descriptor so that the headers are aligned, for normal mtu,
4419 * on such platforms. One possibility is that the DMA itself is
4420 * slightly more efficient if the front of the entire packet (instead
4421 * of the front of the headers) is aligned.
4422 *
4423 * Note we must always set align_tweak to 0 if we are using
4424 * jumbo frames.
4425 */
4426 #ifdef __NO_STRICT_ALIGNMENT
4427 sc->sc_align_tweak = 0;
4428 #else
4429 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4430 sc->sc_align_tweak = 0;
4431 else
4432 sc->sc_align_tweak = 2;
4433 #endif /* __NO_STRICT_ALIGNMENT */
4434
4435 /* Cancel any pending I/O. */
4436 wm_stop_locked(ifp, 0);
4437
4438 /* update statistics before reset */
4439 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4440 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4441
4442 /* Reset the chip to a known state. */
4443 wm_reset(sc);
4444
4445 switch (sc->sc_type) {
4446 case WM_T_82571:
4447 case WM_T_82572:
4448 case WM_T_82573:
4449 case WM_T_82574:
4450 case WM_T_82583:
4451 case WM_T_80003:
4452 case WM_T_ICH8:
4453 case WM_T_ICH9:
4454 case WM_T_ICH10:
4455 case WM_T_PCH:
4456 case WM_T_PCH2:
4457 case WM_T_PCH_LPT:
4458 if (wm_check_mng_mode(sc) != 0)
4459 wm_get_hw_control(sc);
4460 break;
4461 default:
4462 break;
4463 }
4464
4465 /* Init hardware bits */
4466 wm_initialize_hardware_bits(sc);
4467
4468 /* Reset the PHY. */
4469 if (sc->sc_flags & WM_F_HAS_MII)
4470 wm_gmii_reset(sc);
4471
4472 /* Calculate (E)ITR value */
4473 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4474 sc->sc_itr = 450; /* For EITR */
4475 } else if (sc->sc_type >= WM_T_82543) {
4476 /*
4477 * Set up the interrupt throttling register (units of 256ns)
4478 * Note that a footnote in Intel's documentation says this
4479 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4480 * or 10Mbit mode. Empirically, it appears to be the case
4481 * that that is also true for the 1024ns units of the other
4482 * interrupt-related timer registers -- so, really, we ought
4483 * to divide this value by 4 when the link speed is low.
4484 *
4485 * XXX implement this division at link speed change!
4486 */
4487
4488 /*
4489 * For N interrupts/sec, set this value to:
4490 * 1000000000 / (N * 256). Note that we set the
4491 * absolute and packet timer values to this value
4492 * divided by 4 to get "simple timer" behavior.
4493 */
4494
4495 sc->sc_itr = 1500; /* 2604 ints/sec */
4496 }
4497
4498 error = wm_init_txrx_queues(sc);
4499 if (error)
4500 goto out;
4501
4502 /*
4503 * Clear out the VLAN table -- we don't use it (yet).
4504 */
4505 CSR_WRITE(sc, WMREG_VET, 0);
4506 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4507 trynum = 10; /* Due to hw errata */
4508 else
4509 trynum = 1;
4510 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4511 for (j = 0; j < trynum; j++)
4512 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4513
4514 /*
4515 * Set up flow-control parameters.
4516 *
4517 * XXX Values could probably stand some tuning.
4518 */
4519 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4520 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4521 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
4522 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4523 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4524 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4525 }
4526
4527 sc->sc_fcrtl = FCRTL_DFLT;
4528 if (sc->sc_type < WM_T_82543) {
4529 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4530 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4531 } else {
4532 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4533 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4534 }
4535
4536 if (sc->sc_type == WM_T_80003)
4537 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4538 else
4539 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4540
4541 /* Writes the control register. */
4542 wm_set_vlan(sc);
4543
4544 if (sc->sc_flags & WM_F_HAS_MII) {
4545 int val;
4546
4547 switch (sc->sc_type) {
4548 case WM_T_80003:
4549 case WM_T_ICH8:
4550 case WM_T_ICH9:
4551 case WM_T_ICH10:
4552 case WM_T_PCH:
4553 case WM_T_PCH2:
4554 case WM_T_PCH_LPT:
4555 /*
4556 * Set the mac to wait the maximum time between each
4557 * iteration and increase the max iterations when
4558 * polling the phy; this fixes erroneous timeouts at
4559 * 10Mbps.
4560 */
4561 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4562 0xFFFF);
4563 val = wm_kmrn_readreg(sc,
4564 KUMCTRLSTA_OFFSET_INB_PARAM);
4565 val |= 0x3F;
4566 wm_kmrn_writereg(sc,
4567 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4568 break;
4569 default:
4570 break;
4571 }
4572
4573 if (sc->sc_type == WM_T_80003) {
4574 val = CSR_READ(sc, WMREG_CTRL_EXT);
4575 val &= ~CTRL_EXT_LINK_MODE_MASK;
4576 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4577
4578 /* Bypass RX and TX FIFO's */
4579 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4580 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4581 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4582 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4583 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4584 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4585 }
4586 }
4587 #if 0
4588 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4589 #endif
4590
4591 /* Set up checksum offload parameters. */
4592 reg = CSR_READ(sc, WMREG_RXCSUM);
4593 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4594 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4595 reg |= RXCSUM_IPOFL;
4596 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4597 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4598 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4599 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4600 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4601
4602 /* Set up MSI-X */
4603 if (sc->sc_nintrs > 1) {
4604 uint32_t ivar;
4605
4606 if (sc->sc_type == WM_T_82575) {
4607 /* Interrupt control */
4608 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4609 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
4610 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4611
4612 /* TX */
4613 for (i = 0; i < sc->sc_ntxqueues; i++) {
4614 struct wm_txqueue *txq = &sc->sc_txq[i];
4615 CSR_WRITE(sc, WMREG_MSIXBM(txq->txq_intr_idx),
4616 EITR_TX_QUEUE(txq->txq_id));
4617 }
4618 /* RX */
4619 for (i = 0; i < sc->sc_nrxqueues; i++) {
4620 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
4621 CSR_WRITE(sc, WMREG_MSIXBM(rxq->rxq_intr_idx),
4622 EITR_RX_QUEUE(rxq->rxq_id));
4623 }
4624 /* Link status */
4625 CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
4626 EITR_OTHER);
4627 } else if (sc->sc_type == WM_T_82574) {
4628 /* Interrupt control */
4629 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4630 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
4631 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4632
4633 ivar = 0;
4634 /* TX */
4635 for (i = 0; i < sc->sc_ntxqueues; i++) {
4636 struct wm_txqueue *txq = &sc->sc_txq[i];
4637 ivar |= __SHIFTIN((IVAR_VALID_82574|txq->txq_intr_idx),
4638 IVAR_TX_MASK_Q_82574(txq->txq_id));
4639 }
4640 /* RX */
4641 for (i = 0; i < sc->sc_nrxqueues; i++) {
4642 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
4643 ivar |= __SHIFTIN((IVAR_VALID_82574|rxq->rxq_intr_idx),
4644 IVAR_RX_MASK_Q_82574(rxq->rxq_id));
4645 }
4646 /* Link status */
4647 ivar |= __SHIFTIN((IVAR_VALID_82574|sc->sc_link_intr_idx),
4648 IVAR_OTHER_MASK);
4649 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
4650 } else {
4651 /* Interrupt control */
4652 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR
4653 | GPIE_MULTI_MSIX | GPIE_EIAME
4654 | GPIE_PBA);
4655
4656 switch (sc->sc_type) {
4657 case WM_T_82580:
4658 case WM_T_I350:
4659 case WM_T_I354:
4660 case WM_T_I210:
4661 case WM_T_I211:
4662 /* TX */
4663 for (i = 0; i < sc->sc_ntxqueues; i++) {
4664 struct wm_txqueue *txq = &sc->sc_txq[i];
4665 int qid = txq->txq_id;
4666 ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
4667 ivar &= ~IVAR_TX_MASK_Q(qid);
4668 ivar |= __SHIFTIN(
4669 (txq->txq_intr_idx | IVAR_VALID),
4670 IVAR_TX_MASK_Q(qid));
4671 CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
4672 }
4673
4674 /* RX */
4675 for (i = 0; i < sc->sc_nrxqueues; i++) {
4676 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
4677 int qid = rxq->rxq_id;
4678 ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
4679 ivar &= ~IVAR_RX_MASK_Q(qid);
4680 ivar |= __SHIFTIN(
4681 (rxq->rxq_intr_idx | IVAR_VALID),
4682 IVAR_RX_MASK_Q(qid));
4683 CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
4684 }
4685 break;
4686 case WM_T_82576:
4687 /* TX */
4688 for (i = 0; i < sc->sc_ntxqueues; i++) {
4689 struct wm_txqueue *txq = &sc->sc_txq[i];
4690 int qid = txq->txq_id;
4691 ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(qid));
4692 ivar &= ~IVAR_TX_MASK_Q_82576(qid);
4693 ivar |= __SHIFTIN(
4694 (txq->txq_intr_idx | IVAR_VALID),
4695 IVAR_TX_MASK_Q_82576(qid));
4696 CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid), ivar);
4697 }
4698
4699 /* RX */
4700 for (i = 0; i < sc->sc_nrxqueues; i++) {
4701 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
4702 int qid = rxq->rxq_id;
4703 ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(qid));
4704 ivar &= ~IVAR_RX_MASK_Q_82576(qid);
4705 ivar |= __SHIFTIN(
4706 (rxq->rxq_intr_idx | IVAR_VALID),
4707 IVAR_RX_MASK_Q_82576(qid));
4708 CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid), ivar);
4709 }
4710 break;
4711 default:
4712 break;
4713 }
4714
4715 /* Link status */
4716 ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
4717 IVAR_MISC_OTHER);
4718 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
4719 }
4720
4721 if (sc->sc_nrxqueues > 1) {
4722 wm_init_rss(sc);
4723
4724 /*
4725 ** NOTE: Receive Full-Packet Checksum Offload
4726 ** is mutually exclusive with Multiqueue. However
4727 ** this is not the same as TCP/IP checksums which
4728 ** still work.
4729 */
4730 reg = CSR_READ(sc, WMREG_RXCSUM);
4731 reg |= RXCSUM_PCSD;
4732 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4733 }
4734 }
4735
4736 /* Set up the interrupt registers. */
4737 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4738 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4739 ICR_RXO | ICR_RXT0;
4740 if (sc->sc_nintrs > 1) {
4741 uint32_t mask;
4742 switch (sc->sc_type) {
4743 case WM_T_82574:
4744 CSR_WRITE(sc, WMREG_EIAC_82574,
4745 WMREG_EIAC_82574_MSIX_MASK);
4746 sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
4747 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4748 break;
4749 default:
4750 if (sc->sc_type == WM_T_82575) {
4751 mask = 0;
4752 for (i = 0; i < sc->sc_ntxqueues; i++) {
4753 struct wm_txqueue *txq = &sc->sc_txq[i];
4754 mask |= EITR_TX_QUEUE(txq->txq_id);
4755 }
4756 for (i = 0; i < sc->sc_nrxqueues; i++) {
4757 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
4758 mask |= EITR_RX_QUEUE(rxq->rxq_id);
4759 }
4760 mask |= EITR_OTHER;
4761 } else {
4762 mask = 0;
4763 for (i = 0; i < sc->sc_ntxqueues; i++) {
4764 struct wm_txqueue *txq = &sc->sc_txq[i];
4765 mask |= 1 << txq->txq_intr_idx;
4766 }
4767 for (i = 0; i < sc->sc_nrxqueues; i++) {
4768 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
4769 mask |= 1 << rxq->rxq_intr_idx;
4770 }
4771 mask |= 1 << sc->sc_link_intr_idx;
4772 }
4773 CSR_WRITE(sc, WMREG_EIAC, mask);
4774 CSR_WRITE(sc, WMREG_EIAM, mask);
4775 CSR_WRITE(sc, WMREG_EIMS, mask);
4776 CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
4777 break;
4778 }
4779 } else
4780 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4781
4782 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4783 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4784 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4785 reg = CSR_READ(sc, WMREG_KABGTXD);
4786 reg |= KABGTXD_BGSQLBIAS;
4787 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4788 }
4789
4790 /* Set up the inter-packet gap. */
4791 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4792
4793 if (sc->sc_type >= WM_T_82543) {
4794 /*
4795 * XXX 82574 has both ITR and EITR. SET EITR when we use
4796 * the multi queue function with MSI-X.
4797 */
4798 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4799 int qidx;
4800 for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
4801 struct wm_txqueue *txq = &sc->sc_txq[qidx];
4802 CSR_WRITE(sc, WMREG_EITR(txq->txq_intr_idx),
4803 sc->sc_itr);
4804 }
4805 for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
4806 struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
4807 CSR_WRITE(sc, WMREG_EITR(rxq->rxq_intr_idx),
4808 sc->sc_itr);
4809 }
4810 /*
4811 * Link interrupts occur much less than TX
4812 * interrupts and RX interrupts. So, we don't
4813 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
4814 * FreeBSD's if_igb.
4815 */
4816 } else
4817 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4818 }
4819
4820 /* Set the VLAN ethernetype. */
4821 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4822
4823 /*
4824 * Set up the transmit control register; we start out with
4825 * a collision distance suitable for FDX, but update it whe
4826 * we resolve the media type.
4827 */
4828 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4829 | TCTL_CT(TX_COLLISION_THRESHOLD)
4830 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4831 if (sc->sc_type >= WM_T_82571)
4832 sc->sc_tctl |= TCTL_MULR;
4833 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4834
4835 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4836 /* Write TDT after TCTL.EN is set. See the document. */
4837 CSR_WRITE(sc, WMREG_TDT(0), 0);
4838 }
4839
4840 if (sc->sc_type == WM_T_80003) {
4841 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4842 reg &= ~TCTL_EXT_GCEX_MASK;
4843 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4844 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4845 }
4846
4847 /* Set the media. */
4848 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4849 goto out;
4850
4851 /* Configure for OS presence */
4852 wm_init_manageability(sc);
4853
4854 /*
4855 * Set up the receive control register; we actually program
4856 * the register when we set the receive filter. Use multicast
4857 * address offset type 0.
4858 *
4859 * Only the i82544 has the ability to strip the incoming
4860 * CRC, so we don't enable that feature.
4861 */
4862 sc->sc_mchash_type = 0;
4863 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4864 | RCTL_MO(sc->sc_mchash_type);
4865
4866 /*
4867 * The I350 has a bug where it always strips the CRC whether
4868 * asked to or not. So ask for stripped CRC here and cope in rxeof
4869 */
4870 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4871 || (sc->sc_type == WM_T_I210))
4872 sc->sc_rctl |= RCTL_SECRC;
4873
4874 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4875 && (ifp->if_mtu > ETHERMTU)) {
4876 sc->sc_rctl |= RCTL_LPE;
4877 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4878 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4879 }
4880
4881 if (MCLBYTES == 2048) {
4882 sc->sc_rctl |= RCTL_2k;
4883 } else {
4884 if (sc->sc_type >= WM_T_82543) {
4885 switch (MCLBYTES) {
4886 case 4096:
4887 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4888 break;
4889 case 8192:
4890 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4891 break;
4892 case 16384:
4893 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4894 break;
4895 default:
4896 panic("wm_init: MCLBYTES %d unsupported",
4897 MCLBYTES);
4898 break;
4899 }
4900 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4901 }
4902
4903 /* Set the receive filter. */
4904 wm_set_filter(sc);
4905
4906 /* Enable ECC */
4907 switch (sc->sc_type) {
4908 case WM_T_82571:
4909 reg = CSR_READ(sc, WMREG_PBA_ECC);
4910 reg |= PBA_ECC_CORR_EN;
4911 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4912 break;
4913 case WM_T_PCH_LPT:
4914 reg = CSR_READ(sc, WMREG_PBECCSTS);
4915 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4916 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4917
4918 reg = CSR_READ(sc, WMREG_CTRL);
4919 reg |= CTRL_MEHE;
4920 CSR_WRITE(sc, WMREG_CTRL, reg);
4921 break;
4922 default:
4923 break;
4924 }
4925
4926 /* On 575 and later set RDT only if RX enabled */
4927 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4928 int qidx;
4929 for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
4930 struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
4931 for (i = 0; i < WM_NRXDESC; i++) {
4932 WM_RX_LOCK(rxq);
4933 wm_init_rxdesc(rxq, i);
4934 WM_RX_UNLOCK(rxq);
4935
4936 }
4937 }
4938 }
4939
4940 sc->sc_stopping = false;
4941
4942 /* Start the one second link check clock. */
4943 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4944
4945 /* ...all done! */
4946 ifp->if_flags |= IFF_RUNNING;
4947 ifp->if_flags &= ~IFF_OACTIVE;
4948
4949 out:
4950 sc->sc_if_flags = ifp->if_flags;
4951 if (error)
4952 log(LOG_ERR, "%s: interface not running\n",
4953 device_xname(sc->sc_dev));
4954 return error;
4955 }
4956
4957 /*
4958 * wm_stop: [ifnet interface function]
4959 *
4960 * Stop transmission on the interface.
4961 */
4962 static void
4963 wm_stop(struct ifnet *ifp, int disable)
4964 {
4965 struct wm_softc *sc = ifp->if_softc;
4966
4967 WM_CORE_LOCK(sc);
4968 wm_stop_locked(ifp, disable);
4969 WM_CORE_UNLOCK(sc);
4970 }
4971
4972 static void
4973 wm_stop_locked(struct ifnet *ifp, int disable)
4974 {
4975 struct wm_softc *sc = ifp->if_softc;
4976 struct wm_txsoft *txs;
4977 int i, qidx;
4978
4979 KASSERT(WM_CORE_LOCKED(sc));
4980
4981 sc->sc_stopping = true;
4982
4983 /* Stop the one second clock. */
4984 callout_stop(&sc->sc_tick_ch);
4985
4986 /* Stop the 82547 Tx FIFO stall check timer. */
4987 if (sc->sc_type == WM_T_82547)
4988 callout_stop(&sc->sc_txfifo_ch);
4989
4990 if (sc->sc_flags & WM_F_HAS_MII) {
4991 /* Down the MII. */
4992 mii_down(&sc->sc_mii);
4993 } else {
4994 #if 0
4995 /* Should we clear PHY's status properly? */
4996 wm_reset(sc);
4997 #endif
4998 }
4999
5000 /* Stop the transmit and receive processes. */
5001 CSR_WRITE(sc, WMREG_TCTL, 0);
5002 CSR_WRITE(sc, WMREG_RCTL, 0);
5003 sc->sc_rctl &= ~RCTL_EN;
5004
5005 /*
5006 * Clear the interrupt mask to ensure the device cannot assert its
5007 * interrupt line.
5008 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
5009 * service any currently pending or shared interrupt.
5010 */
5011 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5012 sc->sc_icr = 0;
5013 if (sc->sc_nintrs > 1) {
5014 if (sc->sc_type != WM_T_82574) {
5015 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5016 CSR_WRITE(sc, WMREG_EIAC, 0);
5017 } else
5018 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5019 }
5020
5021 /* Release any queued transmit buffers. */
5022 for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
5023 struct wm_txqueue *txq = &sc->sc_txq[qidx];
5024 WM_TX_LOCK(txq);
5025 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5026 txs = &txq->txq_soft[i];
5027 if (txs->txs_mbuf != NULL) {
5028 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
5029 m_freem(txs->txs_mbuf);
5030 txs->txs_mbuf = NULL;
5031 }
5032 }
5033 WM_TX_UNLOCK(txq);
5034 }
5035
5036 /* Mark the interface as down and cancel the watchdog timer. */
5037 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
5038 ifp->if_timer = 0;
5039
5040 if (disable) {
5041 for (i = 0; i < sc->sc_nrxqueues; i++) {
5042 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5043 WM_RX_LOCK(rxq);
5044 wm_rxdrain(rxq);
5045 WM_RX_UNLOCK(rxq);
5046 }
5047 }
5048
5049 #if 0 /* notyet */
5050 if (sc->sc_type >= WM_T_82544)
5051 CSR_WRITE(sc, WMREG_WUC, 0);
5052 #endif
5053 }
5054
5055 static void
5056 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
5057 {
5058 struct mbuf *m;
5059 int i;
5060
5061 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
5062 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
5063 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
5064 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
5065 m->m_data, m->m_len, m->m_flags);
5066 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
5067 i, i == 1 ? "" : "s");
5068 }
5069
5070 /*
5071 * wm_82547_txfifo_stall:
5072 *
5073 * Callout used to wait for the 82547 Tx FIFO to drain,
5074 * reset the FIFO pointers, and restart packet transmission.
5075 */
5076 static void
5077 wm_82547_txfifo_stall(void *arg)
5078 {
5079 struct wm_softc *sc = arg;
5080 struct wm_txqueue *txq = sc->sc_txq;
5081 #ifndef WM_MPSAFE
5082 int s;
5083
5084 s = splnet();
5085 #endif
5086 WM_TX_LOCK(txq);
5087
5088 if (sc->sc_stopping)
5089 goto out;
5090
5091 if (txq->txq_fifo_stall) {
5092 if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
5093 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
5094 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
5095 /*
5096 * Packets have drained. Stop transmitter, reset
5097 * FIFO pointers, restart transmitter, and kick
5098 * the packet queue.
5099 */
5100 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
5101 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
5102 CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
5103 CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
5104 CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
5105 CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
5106 CSR_WRITE(sc, WMREG_TCTL, tctl);
5107 CSR_WRITE_FLUSH(sc);
5108
5109 txq->txq_fifo_head = 0;
5110 txq->txq_fifo_stall = 0;
5111 wm_start_locked(&sc->sc_ethercom.ec_if);
5112 } else {
5113 /*
5114 * Still waiting for packets to drain; try again in
5115 * another tick.
5116 */
5117 callout_schedule(&sc->sc_txfifo_ch, 1);
5118 }
5119 }
5120
5121 out:
5122 WM_TX_UNLOCK(txq);
5123 #ifndef WM_MPSAFE
5124 splx(s);
5125 #endif
5126 }
5127
5128 /*
5129 * wm_82547_txfifo_bugchk:
5130 *
5131 * Check for bug condition in the 82547 Tx FIFO. We need to
5132 * prevent enqueueing a packet that would wrap around the end
5133 * if the Tx FIFO ring buffer, otherwise the chip will croak.
5134 *
5135 * We do this by checking the amount of space before the end
5136 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
5137 * the Tx FIFO, wait for all remaining packets to drain, reset
5138 * the internal FIFO pointers to the beginning, and restart
5139 * transmission on the interface.
5140 */
5141 #define WM_FIFO_HDR 0x10
5142 #define WM_82547_PAD_LEN 0x3e0
5143 static int
5144 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
5145 {
5146 struct wm_txqueue *txq = &sc->sc_txq[0];
5147 int space = txq->txq_fifo_size - txq->txq_fifo_head;
5148 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
5149
5150 /* Just return if already stalled. */
5151 if (txq->txq_fifo_stall)
5152 return 1;
5153
5154 if (sc->sc_mii.mii_media_active & IFM_FDX) {
5155 /* Stall only occurs in half-duplex mode. */
5156 goto send_packet;
5157 }
5158
5159 if (len >= WM_82547_PAD_LEN + space) {
5160 txq->txq_fifo_stall = 1;
5161 callout_schedule(&sc->sc_txfifo_ch, 1);
5162 return 1;
5163 }
5164
5165 send_packet:
5166 txq->txq_fifo_head += len;
5167 if (txq->txq_fifo_head >= txq->txq_fifo_size)
5168 txq->txq_fifo_head -= txq->txq_fifo_size;
5169
5170 return 0;
5171 }
5172
5173 static int
5174 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5175 {
5176 int error;
5177
5178 /*
5179 * Allocate the control data structures, and create and load the
5180 * DMA map for it.
5181 *
5182 * NOTE: All Tx descriptors must be in the same 4G segment of
5183 * memory. So must Rx descriptors. We simplify by allocating
5184 * both sets within the same 4G segment.
5185 */
5186 if (sc->sc_type < WM_T_82544) {
5187 WM_NTXDESC(txq) = WM_NTXDESC_82542;
5188 txq->txq_desc_size = sizeof(wiseman_txdesc_t) * WM_NTXDESC(txq);
5189 } else {
5190 WM_NTXDESC(txq) = WM_NTXDESC_82544;
5191 txq->txq_desc_size = sizeof(txdescs_t);
5192 }
5193
5194 if ((error = bus_dmamem_alloc(sc->sc_dmat, txq->txq_desc_size, PAGE_SIZE,
5195 (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg, 1,
5196 &txq->txq_desc_rseg, 0)) != 0) {
5197 aprint_error_dev(sc->sc_dev,
5198 "unable to allocate TX control data, error = %d\n",
5199 error);
5200 goto fail_0;
5201 }
5202
5203 if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
5204 txq->txq_desc_rseg, txq->txq_desc_size,
5205 (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
5206 aprint_error_dev(sc->sc_dev,
5207 "unable to map TX control data, error = %d\n", error);
5208 goto fail_1;
5209 }
5210
5211 if ((error = bus_dmamap_create(sc->sc_dmat, txq->txq_desc_size, 1,
5212 txq->txq_desc_size, 0, 0, &txq->txq_desc_dmamap)) != 0) {
5213 aprint_error_dev(sc->sc_dev,
5214 "unable to create TX control data DMA map, error = %d\n",
5215 error);
5216 goto fail_2;
5217 }
5218
5219 if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
5220 txq->txq_descs_u, txq->txq_desc_size, NULL, 0)) != 0) {
5221 aprint_error_dev(sc->sc_dev,
5222 "unable to load TX control data DMA map, error = %d\n",
5223 error);
5224 goto fail_3;
5225 }
5226
5227 return 0;
5228
5229 fail_3:
5230 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5231 fail_2:
5232 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5233 txq->txq_desc_size);
5234 fail_1:
5235 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5236 fail_0:
5237 return error;
5238 }
5239
5240 static void
5241 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5242 {
5243
5244 bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
5245 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5246 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5247 txq->txq_desc_size);
5248 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5249 }
5250
5251 static int
5252 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5253 {
5254 int error;
5255
5256 /*
5257 * Allocate the control data structures, and create and load the
5258 * DMA map for it.
5259 *
5260 * NOTE: All Tx descriptors must be in the same 4G segment of
5261 * memory. So must Rx descriptors. We simplify by allocating
5262 * both sets within the same 4G segment.
5263 */
5264 rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
5265 if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size, PAGE_SIZE,
5266 (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg, 1,
5267 &rxq->rxq_desc_rseg, 0)) != 0) {
5268 aprint_error_dev(sc->sc_dev,
5269 "unable to allocate RX control data, error = %d\n",
5270 error);
5271 goto fail_0;
5272 }
5273
5274 if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
5275 rxq->rxq_desc_rseg, rxq->rxq_desc_size,
5276 (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
5277 aprint_error_dev(sc->sc_dev,
5278 "unable to map RX control data, error = %d\n", error);
5279 goto fail_1;
5280 }
5281
5282 if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
5283 rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
5284 aprint_error_dev(sc->sc_dev,
5285 "unable to create RX control data DMA map, error = %d\n",
5286 error);
5287 goto fail_2;
5288 }
5289
5290 if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
5291 rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
5292 aprint_error_dev(sc->sc_dev,
5293 "unable to load RX control data DMA map, error = %d\n",
5294 error);
5295 goto fail_3;
5296 }
5297
5298 return 0;
5299
5300 fail_3:
5301 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5302 fail_2:
5303 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5304 rxq->rxq_desc_size);
5305 fail_1:
5306 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5307 fail_0:
5308 return error;
5309 }
5310
5311 static void
5312 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5313 {
5314
5315 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
5316 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5317 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5318 rxq->rxq_desc_size);
5319 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5320 }
5321
5322
5323 static int
5324 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5325 {
5326 int i, error;
5327
5328 /* Create the transmit buffer DMA maps. */
5329 WM_TXQUEUELEN(txq) =
5330 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
5331 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
5332 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5333 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
5334 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
5335 &txq->txq_soft[i].txs_dmamap)) != 0) {
5336 aprint_error_dev(sc->sc_dev,
5337 "unable to create Tx DMA map %d, error = %d\n",
5338 i, error);
5339 goto fail;
5340 }
5341 }
5342
5343 return 0;
5344
5345 fail:
5346 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5347 if (txq->txq_soft[i].txs_dmamap != NULL)
5348 bus_dmamap_destroy(sc->sc_dmat,
5349 txq->txq_soft[i].txs_dmamap);
5350 }
5351 return error;
5352 }
5353
5354 static void
5355 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5356 {
5357 int i;
5358
5359 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5360 if (txq->txq_soft[i].txs_dmamap != NULL)
5361 bus_dmamap_destroy(sc->sc_dmat,
5362 txq->txq_soft[i].txs_dmamap);
5363 }
5364 }
5365
5366 static int
5367 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5368 {
5369 int i, error;
5370
5371 /* Create the receive buffer DMA maps. */
5372 for (i = 0; i < WM_NRXDESC; i++) {
5373 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
5374 MCLBYTES, 0, 0,
5375 &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
5376 aprint_error_dev(sc->sc_dev,
5377 "unable to create Rx DMA map %d error = %d\n",
5378 i, error);
5379 goto fail;
5380 }
5381 rxq->rxq_soft[i].rxs_mbuf = NULL;
5382 }
5383
5384 return 0;
5385
5386 fail:
5387 for (i = 0; i < WM_NRXDESC; i++) {
5388 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5389 bus_dmamap_destroy(sc->sc_dmat,
5390 rxq->rxq_soft[i].rxs_dmamap);
5391 }
5392 return error;
5393 }
5394
5395 static void
5396 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5397 {
5398 int i;
5399
5400 for (i = 0; i < WM_NRXDESC; i++) {
5401 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5402 bus_dmamap_destroy(sc->sc_dmat,
5403 rxq->rxq_soft[i].rxs_dmamap);
5404 }
5405 }
5406
5407 /*
5408 * wm_alloc_quques:
5409 * Allocate {tx,rx}descs and {tx,rx} buffers
5410 */
5411 static int
5412 wm_alloc_txrx_queues(struct wm_softc *sc)
5413 {
5414 int i, error, tx_done, rx_done;
5415
5416 /*
5417 * For transmission
5418 */
5419 sc->sc_txq = kmem_zalloc(sizeof(struct wm_txqueue) * sc->sc_ntxqueues,
5420 KM_SLEEP);
5421 if (sc->sc_txq == NULL) {
5422 aprint_error_dev(sc->sc_dev, "unable to allocate wm_txqueue\n");
5423 error = ENOMEM;
5424 goto fail_0;
5425 }
5426
5427 error = 0;
5428 tx_done = 0;
5429 for (i = 0; i < sc->sc_ntxqueues; i++) {
5430 struct wm_txqueue *txq = &sc->sc_txq[i];
5431 txq->txq_sc = sc;
5432 #ifdef WM_MPSAFE
5433 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5434 #else
5435 txq->txq_lock = NULL;
5436 #endif
5437 error = wm_alloc_tx_descs(sc, txq);
5438 if (error)
5439 break;
5440 error = wm_alloc_tx_buffer(sc, txq);
5441 if (error) {
5442 wm_free_tx_descs(sc, txq);
5443 break;
5444 }
5445 tx_done++;
5446 }
5447 if (error)
5448 goto fail_1;
5449
5450 /*
5451 * For recieve
5452 */
5453 sc->sc_rxq = kmem_zalloc(sizeof(struct wm_rxqueue) * sc->sc_nrxqueues,
5454 KM_SLEEP);
5455 if (sc->sc_rxq == NULL) {
5456 aprint_error_dev(sc->sc_dev, "unable to allocate wm_rxqueue\n");
5457 error = ENOMEM;
5458 goto fail_1;
5459 }
5460
5461 error = 0;
5462 rx_done = 0;
5463 for (i = 0; i < sc->sc_nrxqueues; i++) {
5464 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5465 rxq->rxq_sc = sc;
5466 #ifdef WM_MPSAFE
5467 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5468 #else
5469 rxq->rxq_lock = NULL;
5470 #endif
5471 error = wm_alloc_rx_descs(sc, rxq);
5472 if (error)
5473 break;
5474
5475 error = wm_alloc_rx_buffer(sc, rxq);
5476 if (error) {
5477 wm_free_rx_descs(sc, rxq);
5478 break;
5479 }
5480
5481 rx_done++;
5482 }
5483 if (error)
5484 goto fail_2;
5485
5486 return 0;
5487
5488 fail_2:
5489 for (i = 0; i < rx_done; i++) {
5490 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5491 wm_free_rx_buffer(sc, rxq);
5492 wm_free_rx_descs(sc, rxq);
5493 if (rxq->rxq_lock)
5494 mutex_obj_free(rxq->rxq_lock);
5495 }
5496 kmem_free(sc->sc_rxq,
5497 sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
5498 fail_1:
5499 for (i = 0; i < tx_done; i++) {
5500 struct wm_txqueue *txq = &sc->sc_txq[i];
5501 wm_free_tx_buffer(sc, txq);
5502 wm_free_tx_descs(sc, txq);
5503 if (txq->txq_lock)
5504 mutex_obj_free(txq->txq_lock);
5505 }
5506 kmem_free(sc->sc_txq,
5507 sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
5508 fail_0:
5509 return error;
5510 }
5511
5512 /*
5513 * wm_free_quques:
5514 * Free {tx,rx}descs and {tx,rx} buffers
5515 */
5516 static void
5517 wm_free_txrx_queues(struct wm_softc *sc)
5518 {
5519 int i;
5520
5521 for (i = 0; i < sc->sc_nrxqueues; i++) {
5522 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5523 wm_free_rx_buffer(sc, rxq);
5524 wm_free_rx_descs(sc, rxq);
5525 if (rxq->rxq_lock)
5526 mutex_obj_free(rxq->rxq_lock);
5527 }
5528 kmem_free(sc->sc_rxq, sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
5529
5530 for (i = 0; i < sc->sc_ntxqueues; i++) {
5531 struct wm_txqueue *txq = &sc->sc_txq[i];
5532 wm_free_tx_buffer(sc, txq);
5533 wm_free_tx_descs(sc, txq);
5534 if (txq->txq_lock)
5535 mutex_obj_free(txq->txq_lock);
5536 }
5537 kmem_free(sc->sc_txq, sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
5538 }
5539
5540 static void
5541 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5542 {
5543
5544 KASSERT(WM_TX_LOCKED(txq));
5545
5546 /* Initialize the transmit descriptor ring. */
5547 memset(txq->txq_descs, 0, WM_TXDESCSIZE(txq));
5548 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
5549 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5550 txq->txq_free = WM_NTXDESC(txq);
5551 txq->txq_next = 0;
5552 }
5553
5554 static void
5555 wm_init_tx_regs(struct wm_softc *sc, struct wm_txqueue *txq)
5556 {
5557
5558 KASSERT(WM_TX_LOCKED(txq));
5559
5560 if (sc->sc_type < WM_T_82543) {
5561 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
5562 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
5563 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(txq));
5564 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
5565 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
5566 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
5567 } else {
5568 int qid = txq->txq_id;
5569
5570 CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
5571 CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
5572 CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCSIZE(txq));
5573 CSR_WRITE(sc, WMREG_TDH(qid), 0);
5574
5575 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5576 /*
5577 * Don't write TDT before TCTL.EN is set.
5578 * See the document.
5579 */
5580 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
5581 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
5582 | TXDCTL_WTHRESH(0));
5583 else {
5584 /* ITR / 4 */
5585 CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
5586 if (sc->sc_type >= WM_T_82540) {
5587 /* should be same */
5588 CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
5589 }
5590
5591 CSR_WRITE(sc, WMREG_TDT(qid), 0);
5592 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
5593 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
5594 }
5595 }
5596 }
5597
5598 static void
5599 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5600 {
5601 int i;
5602
5603 KASSERT(WM_TX_LOCKED(txq));
5604
5605 /* Initialize the transmit job descriptors. */
5606 for (i = 0; i < WM_TXQUEUELEN(txq); i++)
5607 txq->txq_soft[i].txs_mbuf = NULL;
5608 txq->txq_sfree = WM_TXQUEUELEN(txq);
5609 txq->txq_snext = 0;
5610 txq->txq_sdirty = 0;
5611 }
5612
5613 static void
5614 wm_init_tx_queue(struct wm_softc *sc, struct wm_txqueue *txq)
5615 {
5616
5617 KASSERT(WM_TX_LOCKED(txq));
5618
5619 /*
5620 * Set up some register offsets that are different between
5621 * the i82542 and the i82543 and later chips.
5622 */
5623 if (sc->sc_type < WM_T_82543) {
5624 txq->txq_tdt_reg = WMREG_OLD_TDT;
5625 } else {
5626 txq->txq_tdt_reg = WMREG_TDT(0);
5627 }
5628
5629 wm_init_tx_descs(sc, txq);
5630 wm_init_tx_regs(sc, txq);
5631 wm_init_tx_buffer(sc, txq);
5632 }
5633
5634 static void
5635 wm_init_rx_regs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5636 {
5637
5638 KASSERT(WM_RX_LOCKED(rxq));
5639
5640 /*
5641 * Initialize the receive descriptor and receive job
5642 * descriptor rings.
5643 */
5644 if (sc->sc_type < WM_T_82543) {
5645 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
5646 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
5647 CSR_WRITE(sc, WMREG_OLD_RDLEN0,
5648 sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
5649 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
5650 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
5651 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
5652
5653 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
5654 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
5655 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
5656 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
5657 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
5658 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
5659 } else {
5660 int qid = rxq->rxq_id;
5661
5662 CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
5663 CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
5664 CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
5665
5666 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5667 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
5668 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
5669 CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
5670 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
5671 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
5672 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
5673 | RXDCTL_WTHRESH(1));
5674 CSR_WRITE(sc, WMREG_RDH(qid), 0);
5675 CSR_WRITE(sc, WMREG_RDT(qid), 0);
5676 } else {
5677 CSR_WRITE(sc, WMREG_RDH(qid), 0);
5678 CSR_WRITE(sc, WMREG_RDT(qid), 0);
5679 /* ITR / 4 */
5680 CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
5681 /* MUST be same */
5682 CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
5683 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
5684 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
5685 }
5686 }
5687 }
5688
5689 static int
5690 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5691 {
5692 struct wm_rxsoft *rxs;
5693 int error, i;
5694
5695 KASSERT(WM_RX_LOCKED(rxq));
5696
5697 for (i = 0; i < WM_NRXDESC; i++) {
5698 rxs = &rxq->rxq_soft[i];
5699 if (rxs->rxs_mbuf == NULL) {
5700 if ((error = wm_add_rxbuf(rxq, i)) != 0) {
5701 log(LOG_ERR, "%s: unable to allocate or map "
5702 "rx buffer %d, error = %d\n",
5703 device_xname(sc->sc_dev), i, error);
5704 /*
5705 * XXX Should attempt to run with fewer receive
5706 * XXX buffers instead of just failing.
5707 */
5708 wm_rxdrain(rxq);
5709 return ENOMEM;
5710 }
5711 } else {
5712 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
5713 wm_init_rxdesc(rxq, i);
5714 /*
5715 * For 82575 and newer device, the RX descriptors
5716 * must be initialized after the setting of RCTL.EN in
5717 * wm_set_filter()
5718 */
5719 }
5720 }
5721 rxq->rxq_ptr = 0;
5722 rxq->rxq_discard = 0;
5723 WM_RXCHAIN_RESET(rxq);
5724
5725 return 0;
5726 }
5727
5728 static int
5729 wm_init_rx_queue(struct wm_softc *sc, struct wm_rxqueue *rxq)
5730 {
5731
5732 KASSERT(WM_RX_LOCKED(rxq));
5733
5734 /*
5735 * Set up some register offsets that are different between
5736 * the i82542 and the i82543 and later chips.
5737 */
5738 if (sc->sc_type < WM_T_82543) {
5739 rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
5740 } else {
5741 rxq->rxq_rdt_reg = WMREG_RDT(rxq->rxq_id);
5742 }
5743
5744 wm_init_rx_regs(sc, rxq);
5745 return wm_init_rx_buffer(sc, rxq);
5746 }
5747
5748 /*
5749 * wm_init_quques:
5750 * Initialize {tx,rx}descs and {tx,rx} buffers
5751 */
5752 static int
5753 wm_init_txrx_queues(struct wm_softc *sc)
5754 {
5755 int i, error;
5756
5757 for (i = 0; i < sc->sc_ntxqueues; i++) {
5758 struct wm_txqueue *txq = &sc->sc_txq[i];
5759 WM_TX_LOCK(txq);
5760 wm_init_tx_queue(sc, txq);
5761 WM_TX_UNLOCK(txq);
5762 }
5763
5764 error = 0;
5765 for (i = 0; i < sc->sc_nrxqueues; i++) {
5766 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5767 WM_RX_LOCK(rxq);
5768 error = wm_init_rx_queue(sc, rxq);
5769 WM_RX_UNLOCK(rxq);
5770 if (error)
5771 break;
5772 }
5773
5774 return error;
5775 }
5776
5777 /*
5778 * wm_tx_offload:
5779 *
5780 * Set up TCP/IP checksumming parameters for the
5781 * specified packet.
5782 */
5783 static int
5784 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
5785 uint8_t *fieldsp)
5786 {
5787 struct wm_txqueue *txq = &sc->sc_txq[0];
5788 struct mbuf *m0 = txs->txs_mbuf;
5789 struct livengood_tcpip_ctxdesc *t;
5790 uint32_t ipcs, tucs, cmd, cmdlen, seg;
5791 uint32_t ipcse;
5792 struct ether_header *eh;
5793 int offset, iphl;
5794 uint8_t fields;
5795
5796 /*
5797 * XXX It would be nice if the mbuf pkthdr had offset
5798 * fields for the protocol headers.
5799 */
5800
5801 eh = mtod(m0, struct ether_header *);
5802 switch (htons(eh->ether_type)) {
5803 case ETHERTYPE_IP:
5804 case ETHERTYPE_IPV6:
5805 offset = ETHER_HDR_LEN;
5806 break;
5807
5808 case ETHERTYPE_VLAN:
5809 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5810 break;
5811
5812 default:
5813 /*
5814 * Don't support this protocol or encapsulation.
5815 */
5816 *fieldsp = 0;
5817 *cmdp = 0;
5818 return 0;
5819 }
5820
5821 if ((m0->m_pkthdr.csum_flags &
5822 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
5823 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
5824 } else {
5825 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
5826 }
5827 ipcse = offset + iphl - 1;
5828
5829 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
5830 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
5831 seg = 0;
5832 fields = 0;
5833
5834 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
5835 int hlen = offset + iphl;
5836 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
5837
5838 if (__predict_false(m0->m_len <
5839 (hlen + sizeof(struct tcphdr)))) {
5840 /*
5841 * TCP/IP headers are not in the first mbuf; we need
5842 * to do this the slow and painful way. Let's just
5843 * hope this doesn't happen very often.
5844 */
5845 struct tcphdr th;
5846
5847 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
5848
5849 m_copydata(m0, hlen, sizeof(th), &th);
5850 if (v4) {
5851 struct ip ip;
5852
5853 m_copydata(m0, offset, sizeof(ip), &ip);
5854 ip.ip_len = 0;
5855 m_copyback(m0,
5856 offset + offsetof(struct ip, ip_len),
5857 sizeof(ip.ip_len), &ip.ip_len);
5858 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
5859 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
5860 } else {
5861 struct ip6_hdr ip6;
5862
5863 m_copydata(m0, offset, sizeof(ip6), &ip6);
5864 ip6.ip6_plen = 0;
5865 m_copyback(m0,
5866 offset + offsetof(struct ip6_hdr, ip6_plen),
5867 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
5868 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
5869 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
5870 }
5871 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
5872 sizeof(th.th_sum), &th.th_sum);
5873
5874 hlen += th.th_off << 2;
5875 } else {
5876 /*
5877 * TCP/IP headers are in the first mbuf; we can do
5878 * this the easy way.
5879 */
5880 struct tcphdr *th;
5881
5882 if (v4) {
5883 struct ip *ip =
5884 (void *)(mtod(m0, char *) + offset);
5885 th = (void *)(mtod(m0, char *) + hlen);
5886
5887 ip->ip_len = 0;
5888 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
5889 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
5890 } else {
5891 struct ip6_hdr *ip6 =
5892 (void *)(mtod(m0, char *) + offset);
5893 th = (void *)(mtod(m0, char *) + hlen);
5894
5895 ip6->ip6_plen = 0;
5896 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
5897 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
5898 }
5899 hlen += th->th_off << 2;
5900 }
5901
5902 if (v4) {
5903 WM_EVCNT_INCR(&sc->sc_ev_txtso);
5904 cmdlen |= WTX_TCPIP_CMD_IP;
5905 } else {
5906 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
5907 ipcse = 0;
5908 }
5909 cmd |= WTX_TCPIP_CMD_TSE;
5910 cmdlen |= WTX_TCPIP_CMD_TSE |
5911 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
5912 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
5913 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
5914 }
5915
5916 /*
5917 * NOTE: Even if we're not using the IP or TCP/UDP checksum
5918 * offload feature, if we load the context descriptor, we
5919 * MUST provide valid values for IPCSS and TUCSS fields.
5920 */
5921
5922 ipcs = WTX_TCPIP_IPCSS(offset) |
5923 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
5924 WTX_TCPIP_IPCSE(ipcse);
5925 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
5926 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
5927 fields |= WTX_IXSM;
5928 }
5929
5930 offset += iphl;
5931
5932 if (m0->m_pkthdr.csum_flags &
5933 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
5934 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5935 fields |= WTX_TXSM;
5936 tucs = WTX_TCPIP_TUCSS(offset) |
5937 WTX_TCPIP_TUCSO(offset +
5938 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
5939 WTX_TCPIP_TUCSE(0) /* rest of packet */;
5940 } else if ((m0->m_pkthdr.csum_flags &
5941 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
5942 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5943 fields |= WTX_TXSM;
5944 tucs = WTX_TCPIP_TUCSS(offset) |
5945 WTX_TCPIP_TUCSO(offset +
5946 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
5947 WTX_TCPIP_TUCSE(0) /* rest of packet */;
5948 } else {
5949 /* Just initialize it to a valid TCP context. */
5950 tucs = WTX_TCPIP_TUCSS(offset) |
5951 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
5952 WTX_TCPIP_TUCSE(0) /* rest of packet */;
5953 }
5954
5955 /* Fill in the context descriptor. */
5956 t = (struct livengood_tcpip_ctxdesc *)
5957 &txq->txq_descs[txq->txq_next];
5958 t->tcpip_ipcs = htole32(ipcs);
5959 t->tcpip_tucs = htole32(tucs);
5960 t->tcpip_cmdlen = htole32(cmdlen);
5961 t->tcpip_seg = htole32(seg);
5962 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
5963
5964 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
5965 txs->txs_ndesc++;
5966
5967 *cmdp = cmd;
5968 *fieldsp = fields;
5969
5970 return 0;
5971 }
5972
5973 /*
5974 * wm_start: [ifnet interface function]
5975 *
5976 * Start packet transmission on the interface.
5977 */
5978 static void
5979 wm_start(struct ifnet *ifp)
5980 {
5981 struct wm_softc *sc = ifp->if_softc;
5982 struct wm_txqueue *txq = &sc->sc_txq[0];
5983
5984 WM_TX_LOCK(txq);
5985 if (!sc->sc_stopping)
5986 wm_start_locked(ifp);
5987 WM_TX_UNLOCK(txq);
5988 }
5989
5990 static void
5991 wm_start_locked(struct ifnet *ifp)
5992 {
5993 struct wm_softc *sc = ifp->if_softc;
5994 struct wm_txqueue *txq = &sc->sc_txq[0];
5995 struct mbuf *m0;
5996 struct m_tag *mtag;
5997 struct wm_txsoft *txs;
5998 bus_dmamap_t dmamap;
5999 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
6000 bus_addr_t curaddr;
6001 bus_size_t seglen, curlen;
6002 uint32_t cksumcmd;
6003 uint8_t cksumfields;
6004
6005 KASSERT(WM_TX_LOCKED(txq));
6006
6007 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
6008 return;
6009
6010 /* Remember the previous number of free descriptors. */
6011 ofree = txq->txq_free;
6012
6013 /*
6014 * Loop through the send queue, setting up transmit descriptors
6015 * until we drain the queue, or use up all available transmit
6016 * descriptors.
6017 */
6018 for (;;) {
6019 m0 = NULL;
6020
6021 /* Get a work queue entry. */
6022 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6023 wm_txeof(sc);
6024 if (txq->txq_sfree == 0) {
6025 DPRINTF(WM_DEBUG_TX,
6026 ("%s: TX: no free job descriptors\n",
6027 device_xname(sc->sc_dev)));
6028 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
6029 break;
6030 }
6031 }
6032
6033 /* Grab a packet off the queue. */
6034 IFQ_DEQUEUE(&ifp->if_snd, m0);
6035 if (m0 == NULL)
6036 break;
6037
6038 DPRINTF(WM_DEBUG_TX,
6039 ("%s: TX: have packet to transmit: %p\n",
6040 device_xname(sc->sc_dev), m0));
6041
6042 txs = &txq->txq_soft[txq->txq_snext];
6043 dmamap = txs->txs_dmamap;
6044
6045 use_tso = (m0->m_pkthdr.csum_flags &
6046 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
6047
6048 /*
6049 * So says the Linux driver:
6050 * The controller does a simple calculation to make sure
6051 * there is enough room in the FIFO before initiating the
6052 * DMA for each buffer. The calc is:
6053 * 4 = ceil(buffer len / MSS)
6054 * To make sure we don't overrun the FIFO, adjust the max
6055 * buffer len if the MSS drops.
6056 */
6057 dmamap->dm_maxsegsz =
6058 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
6059 ? m0->m_pkthdr.segsz << 2
6060 : WTX_MAX_LEN;
6061
6062 /*
6063 * Load the DMA map. If this fails, the packet either
6064 * didn't fit in the allotted number of segments, or we
6065 * were short on resources. For the too-many-segments
6066 * case, we simply report an error and drop the packet,
6067 * since we can't sanely copy a jumbo packet to a single
6068 * buffer.
6069 */
6070 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6071 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
6072 if (error) {
6073 if (error == EFBIG) {
6074 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6075 log(LOG_ERR, "%s: Tx packet consumes too many "
6076 "DMA segments, dropping...\n",
6077 device_xname(sc->sc_dev));
6078 wm_dump_mbuf_chain(sc, m0);
6079 m_freem(m0);
6080 continue;
6081 }
6082 /* Short on resources, just stop for now. */
6083 DPRINTF(WM_DEBUG_TX,
6084 ("%s: TX: dmamap load failed: %d\n",
6085 device_xname(sc->sc_dev), error));
6086 break;
6087 }
6088
6089 segs_needed = dmamap->dm_nsegs;
6090 if (use_tso) {
6091 /* For sentinel descriptor; see below. */
6092 segs_needed++;
6093 }
6094
6095 /*
6096 * Ensure we have enough descriptors free to describe
6097 * the packet. Note, we always reserve one descriptor
6098 * at the end of the ring due to the semantics of the
6099 * TDT register, plus one more in the event we need
6100 * to load offload context.
6101 */
6102 if (segs_needed > txq->txq_free - 2) {
6103 /*
6104 * Not enough free descriptors to transmit this
6105 * packet. We haven't committed anything yet,
6106 * so just unload the DMA map, put the packet
6107 * pack on the queue, and punt. Notify the upper
6108 * layer that there are no more slots left.
6109 */
6110 DPRINTF(WM_DEBUG_TX,
6111 ("%s: TX: need %d (%d) descriptors, have %d\n",
6112 device_xname(sc->sc_dev), dmamap->dm_nsegs,
6113 segs_needed, txq->txq_free - 1));
6114 ifp->if_flags |= IFF_OACTIVE;
6115 bus_dmamap_unload(sc->sc_dmat, dmamap);
6116 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
6117 break;
6118 }
6119
6120 /*
6121 * Check for 82547 Tx FIFO bug. We need to do this
6122 * once we know we can transmit the packet, since we
6123 * do some internal FIFO space accounting here.
6124 */
6125 if (sc->sc_type == WM_T_82547 &&
6126 wm_82547_txfifo_bugchk(sc, m0)) {
6127 DPRINTF(WM_DEBUG_TX,
6128 ("%s: TX: 82547 Tx FIFO bug detected\n",
6129 device_xname(sc->sc_dev)));
6130 ifp->if_flags |= IFF_OACTIVE;
6131 bus_dmamap_unload(sc->sc_dmat, dmamap);
6132 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
6133 break;
6134 }
6135
6136 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6137
6138 DPRINTF(WM_DEBUG_TX,
6139 ("%s: TX: packet has %d (%d) DMA segments\n",
6140 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6141
6142 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
6143
6144 /*
6145 * Store a pointer to the packet so that we can free it
6146 * later.
6147 *
6148 * Initially, we consider the number of descriptors the
6149 * packet uses the number of DMA segments. This may be
6150 * incremented by 1 if we do checksum offload (a descriptor
6151 * is used to set the checksum context).
6152 */
6153 txs->txs_mbuf = m0;
6154 txs->txs_firstdesc = txq->txq_next;
6155 txs->txs_ndesc = segs_needed;
6156
6157 /* Set up offload parameters for this packet. */
6158 if (m0->m_pkthdr.csum_flags &
6159 (M_CSUM_TSOv4|M_CSUM_TSOv6|
6160 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
6161 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
6162 if (wm_tx_offload(sc, txs, &cksumcmd,
6163 &cksumfields) != 0) {
6164 /* Error message already displayed. */
6165 bus_dmamap_unload(sc->sc_dmat, dmamap);
6166 continue;
6167 }
6168 } else {
6169 cksumcmd = 0;
6170 cksumfields = 0;
6171 }
6172
6173 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
6174
6175 /* Sync the DMA map. */
6176 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6177 BUS_DMASYNC_PREWRITE);
6178
6179 /* Initialize the transmit descriptor. */
6180 for (nexttx = txq->txq_next, seg = 0;
6181 seg < dmamap->dm_nsegs; seg++) {
6182 for (seglen = dmamap->dm_segs[seg].ds_len,
6183 curaddr = dmamap->dm_segs[seg].ds_addr;
6184 seglen != 0;
6185 curaddr += curlen, seglen -= curlen,
6186 nexttx = WM_NEXTTX(txq, nexttx)) {
6187 curlen = seglen;
6188
6189 /*
6190 * So says the Linux driver:
6191 * Work around for premature descriptor
6192 * write-backs in TSO mode. Append a
6193 * 4-byte sentinel descriptor.
6194 */
6195 if (use_tso &&
6196 seg == dmamap->dm_nsegs - 1 &&
6197 curlen > 8)
6198 curlen -= 4;
6199
6200 wm_set_dma_addr(
6201 &txq->txq_descs[nexttx].wtx_addr,
6202 curaddr);
6203 txq->txq_descs[nexttx].wtx_cmdlen =
6204 htole32(cksumcmd | curlen);
6205 txq->txq_descs[nexttx].wtx_fields.wtxu_status =
6206 0;
6207 txq->txq_descs[nexttx].wtx_fields.wtxu_options =
6208 cksumfields;
6209 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan = 0;
6210 lasttx = nexttx;
6211
6212 DPRINTF(WM_DEBUG_TX,
6213 ("%s: TX: desc %d: low %#" PRIx64 ", "
6214 "len %#04zx\n",
6215 device_xname(sc->sc_dev), nexttx,
6216 (uint64_t)curaddr, curlen));
6217 }
6218 }
6219
6220 KASSERT(lasttx != -1);
6221
6222 /*
6223 * Set up the command byte on the last descriptor of
6224 * the packet. If we're in the interrupt delay window,
6225 * delay the interrupt.
6226 */
6227 txq->txq_descs[lasttx].wtx_cmdlen |=
6228 htole32(WTX_CMD_EOP | WTX_CMD_RS);
6229
6230 /*
6231 * If VLANs are enabled and the packet has a VLAN tag, set
6232 * up the descriptor to encapsulate the packet for us.
6233 *
6234 * This is only valid on the last descriptor of the packet.
6235 */
6236 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6237 txq->txq_descs[lasttx].wtx_cmdlen |=
6238 htole32(WTX_CMD_VLE);
6239 txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
6240 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6241 }
6242
6243 txs->txs_lastdesc = lasttx;
6244
6245 DPRINTF(WM_DEBUG_TX,
6246 ("%s: TX: desc %d: cmdlen 0x%08x\n",
6247 device_xname(sc->sc_dev),
6248 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
6249
6250 /* Sync the descriptors we're using. */
6251 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
6252 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
6253
6254 /* Give the packet to the chip. */
6255 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
6256
6257 DPRINTF(WM_DEBUG_TX,
6258 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6259
6260 DPRINTF(WM_DEBUG_TX,
6261 ("%s: TX: finished transmitting packet, job %d\n",
6262 device_xname(sc->sc_dev), txq->txq_snext));
6263
6264 /* Advance the tx pointer. */
6265 txq->txq_free -= txs->txs_ndesc;
6266 txq->txq_next = nexttx;
6267
6268 txq->txq_sfree--;
6269 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
6270
6271 /* Pass the packet to any BPF listeners. */
6272 bpf_mtap(ifp, m0);
6273 }
6274
6275 if (m0 != NULL) {
6276 ifp->if_flags |= IFF_OACTIVE;
6277 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6278 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
6279 m_freem(m0);
6280 }
6281
6282 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
6283 /* No more slots; notify upper layer. */
6284 ifp->if_flags |= IFF_OACTIVE;
6285 }
6286
6287 if (txq->txq_free != ofree) {
6288 /* Set a watchdog timer in case the chip flakes out. */
6289 ifp->if_timer = 5;
6290 }
6291 }
6292
6293 /*
6294 * wm_nq_tx_offload:
6295 *
6296 * Set up TCP/IP checksumming parameters for the
6297 * specified packet, for NEWQUEUE devices
6298 */
6299 static int
6300 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
6301 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
6302 {
6303 struct wm_txqueue *txq = &sc->sc_txq[0];
6304 struct mbuf *m0 = txs->txs_mbuf;
6305 struct m_tag *mtag;
6306 uint32_t vl_len, mssidx, cmdc;
6307 struct ether_header *eh;
6308 int offset, iphl;
6309
6310 /*
6311 * XXX It would be nice if the mbuf pkthdr had offset
6312 * fields for the protocol headers.
6313 */
6314 *cmdlenp = 0;
6315 *fieldsp = 0;
6316
6317 eh = mtod(m0, struct ether_header *);
6318 switch (htons(eh->ether_type)) {
6319 case ETHERTYPE_IP:
6320 case ETHERTYPE_IPV6:
6321 offset = ETHER_HDR_LEN;
6322 break;
6323
6324 case ETHERTYPE_VLAN:
6325 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6326 break;
6327
6328 default:
6329 /* Don't support this protocol or encapsulation. */
6330 *do_csum = false;
6331 return 0;
6332 }
6333 *do_csum = true;
6334 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
6335 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
6336
6337 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
6338 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
6339
6340 if ((m0->m_pkthdr.csum_flags &
6341 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
6342 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6343 } else {
6344 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
6345 }
6346 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
6347 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
6348
6349 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6350 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
6351 << NQTXC_VLLEN_VLAN_SHIFT);
6352 *cmdlenp |= NQTX_CMD_VLE;
6353 }
6354
6355 mssidx = 0;
6356
6357 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6358 int hlen = offset + iphl;
6359 int tcp_hlen;
6360 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6361
6362 if (__predict_false(m0->m_len <
6363 (hlen + sizeof(struct tcphdr)))) {
6364 /*
6365 * TCP/IP headers are not in the first mbuf; we need
6366 * to do this the slow and painful way. Let's just
6367 * hope this doesn't happen very often.
6368 */
6369 struct tcphdr th;
6370
6371 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
6372
6373 m_copydata(m0, hlen, sizeof(th), &th);
6374 if (v4) {
6375 struct ip ip;
6376
6377 m_copydata(m0, offset, sizeof(ip), &ip);
6378 ip.ip_len = 0;
6379 m_copyback(m0,
6380 offset + offsetof(struct ip, ip_len),
6381 sizeof(ip.ip_len), &ip.ip_len);
6382 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6383 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6384 } else {
6385 struct ip6_hdr ip6;
6386
6387 m_copydata(m0, offset, sizeof(ip6), &ip6);
6388 ip6.ip6_plen = 0;
6389 m_copyback(m0,
6390 offset + offsetof(struct ip6_hdr, ip6_plen),
6391 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6392 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6393 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6394 }
6395 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6396 sizeof(th.th_sum), &th.th_sum);
6397
6398 tcp_hlen = th.th_off << 2;
6399 } else {
6400 /*
6401 * TCP/IP headers are in the first mbuf; we can do
6402 * this the easy way.
6403 */
6404 struct tcphdr *th;
6405
6406 if (v4) {
6407 struct ip *ip =
6408 (void *)(mtod(m0, char *) + offset);
6409 th = (void *)(mtod(m0, char *) + hlen);
6410
6411 ip->ip_len = 0;
6412 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6413 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6414 } else {
6415 struct ip6_hdr *ip6 =
6416 (void *)(mtod(m0, char *) + offset);
6417 th = (void *)(mtod(m0, char *) + hlen);
6418
6419 ip6->ip6_plen = 0;
6420 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6421 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6422 }
6423 tcp_hlen = th->th_off << 2;
6424 }
6425 hlen += tcp_hlen;
6426 *cmdlenp |= NQTX_CMD_TSE;
6427
6428 if (v4) {
6429 WM_EVCNT_INCR(&sc->sc_ev_txtso);
6430 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
6431 } else {
6432 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
6433 *fieldsp |= NQTXD_FIELDS_TUXSM;
6434 }
6435 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
6436 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6437 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
6438 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
6439 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
6440 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
6441 } else {
6442 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
6443 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6444 }
6445
6446 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
6447 *fieldsp |= NQTXD_FIELDS_IXSM;
6448 cmdc |= NQTXC_CMD_IP4;
6449 }
6450
6451 if (m0->m_pkthdr.csum_flags &
6452 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6453 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
6454 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6455 cmdc |= NQTXC_CMD_TCP;
6456 } else {
6457 cmdc |= NQTXC_CMD_UDP;
6458 }
6459 cmdc |= NQTXC_CMD_IP4;
6460 *fieldsp |= NQTXD_FIELDS_TUXSM;
6461 }
6462 if (m0->m_pkthdr.csum_flags &
6463 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6464 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
6465 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6466 cmdc |= NQTXC_CMD_TCP;
6467 } else {
6468 cmdc |= NQTXC_CMD_UDP;
6469 }
6470 cmdc |= NQTXC_CMD_IP6;
6471 *fieldsp |= NQTXD_FIELDS_TUXSM;
6472 }
6473
6474 /* Fill in the context descriptor. */
6475 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
6476 htole32(vl_len);
6477 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
6478 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
6479 htole32(cmdc);
6480 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
6481 htole32(mssidx);
6482 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6483 DPRINTF(WM_DEBUG_TX,
6484 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
6485 txq->txq_next, 0, vl_len));
6486 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
6487 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6488 txs->txs_ndesc++;
6489 return 0;
6490 }
6491
6492 /*
6493 * wm_nq_start: [ifnet interface function]
6494 *
6495 * Start packet transmission on the interface for NEWQUEUE devices
6496 */
6497 static void
6498 wm_nq_start(struct ifnet *ifp)
6499 {
6500 struct wm_softc *sc = ifp->if_softc;
6501 struct wm_txqueue *txq = &sc->sc_txq[0];
6502
6503 WM_TX_LOCK(txq);
6504 if (!sc->sc_stopping)
6505 wm_nq_start_locked(ifp);
6506 WM_TX_UNLOCK(txq);
6507 }
6508
6509 static void
6510 wm_nq_start_locked(struct ifnet *ifp)
6511 {
6512 struct wm_softc *sc = ifp->if_softc;
6513 struct wm_txqueue *txq = &sc->sc_txq[0];
6514 struct mbuf *m0;
6515 struct m_tag *mtag;
6516 struct wm_txsoft *txs;
6517 bus_dmamap_t dmamap;
6518 int error, nexttx, lasttx = -1, seg, segs_needed;
6519 bool do_csum, sent;
6520
6521 KASSERT(WM_TX_LOCKED(txq));
6522
6523 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
6524 return;
6525
6526 sent = false;
6527
6528 /*
6529 * Loop through the send queue, setting up transmit descriptors
6530 * until we drain the queue, or use up all available transmit
6531 * descriptors.
6532 */
6533 for (;;) {
6534 m0 = NULL;
6535
6536 /* Get a work queue entry. */
6537 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6538 wm_txeof(sc);
6539 if (txq->txq_sfree == 0) {
6540 DPRINTF(WM_DEBUG_TX,
6541 ("%s: TX: no free job descriptors\n",
6542 device_xname(sc->sc_dev)));
6543 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
6544 break;
6545 }
6546 }
6547
6548 /* Grab a packet off the queue. */
6549 IFQ_DEQUEUE(&ifp->if_snd, m0);
6550 if (m0 == NULL)
6551 break;
6552
6553 DPRINTF(WM_DEBUG_TX,
6554 ("%s: TX: have packet to transmit: %p\n",
6555 device_xname(sc->sc_dev), m0));
6556
6557 txs = &txq->txq_soft[txq->txq_snext];
6558 dmamap = txs->txs_dmamap;
6559
6560 /*
6561 * Load the DMA map. If this fails, the packet either
6562 * didn't fit in the allotted number of segments, or we
6563 * were short on resources. For the too-many-segments
6564 * case, we simply report an error and drop the packet,
6565 * since we can't sanely copy a jumbo packet to a single
6566 * buffer.
6567 */
6568 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6569 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
6570 if (error) {
6571 if (error == EFBIG) {
6572 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6573 log(LOG_ERR, "%s: Tx packet consumes too many "
6574 "DMA segments, dropping...\n",
6575 device_xname(sc->sc_dev));
6576 wm_dump_mbuf_chain(sc, m0);
6577 m_freem(m0);
6578 continue;
6579 }
6580 /* Short on resources, just stop for now. */
6581 DPRINTF(WM_DEBUG_TX,
6582 ("%s: TX: dmamap load failed: %d\n",
6583 device_xname(sc->sc_dev), error));
6584 break;
6585 }
6586
6587 segs_needed = dmamap->dm_nsegs;
6588
6589 /*
6590 * Ensure we have enough descriptors free to describe
6591 * the packet. Note, we always reserve one descriptor
6592 * at the end of the ring due to the semantics of the
6593 * TDT register, plus one more in the event we need
6594 * to load offload context.
6595 */
6596 if (segs_needed > txq->txq_free - 2) {
6597 /*
6598 * Not enough free descriptors to transmit this
6599 * packet. We haven't committed anything yet,
6600 * so just unload the DMA map, put the packet
6601 * pack on the queue, and punt. Notify the upper
6602 * layer that there are no more slots left.
6603 */
6604 DPRINTF(WM_DEBUG_TX,
6605 ("%s: TX: need %d (%d) descriptors, have %d\n",
6606 device_xname(sc->sc_dev), dmamap->dm_nsegs,
6607 segs_needed, txq->txq_free - 1));
6608 ifp->if_flags |= IFF_OACTIVE;
6609 bus_dmamap_unload(sc->sc_dmat, dmamap);
6610 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
6611 break;
6612 }
6613
6614 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6615
6616 DPRINTF(WM_DEBUG_TX,
6617 ("%s: TX: packet has %d (%d) DMA segments\n",
6618 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6619
6620 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
6621
6622 /*
6623 * Store a pointer to the packet so that we can free it
6624 * later.
6625 *
6626 * Initially, we consider the number of descriptors the
6627 * packet uses the number of DMA segments. This may be
6628 * incremented by 1 if we do checksum offload (a descriptor
6629 * is used to set the checksum context).
6630 */
6631 txs->txs_mbuf = m0;
6632 txs->txs_firstdesc = txq->txq_next;
6633 txs->txs_ndesc = segs_needed;
6634
6635 /* Set up offload parameters for this packet. */
6636 uint32_t cmdlen, fields, dcmdlen;
6637 if (m0->m_pkthdr.csum_flags &
6638 (M_CSUM_TSOv4|M_CSUM_TSOv6|
6639 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
6640 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
6641 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
6642 &do_csum) != 0) {
6643 /* Error message already displayed. */
6644 bus_dmamap_unload(sc->sc_dmat, dmamap);
6645 continue;
6646 }
6647 } else {
6648 do_csum = false;
6649 cmdlen = 0;
6650 fields = 0;
6651 }
6652
6653 /* Sync the DMA map. */
6654 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6655 BUS_DMASYNC_PREWRITE);
6656
6657 /* Initialize the first transmit descriptor. */
6658 nexttx = txq->txq_next;
6659 if (!do_csum) {
6660 /* setup a legacy descriptor */
6661 wm_set_dma_addr(
6662 &txq->txq_descs[nexttx].wtx_addr,
6663 dmamap->dm_segs[0].ds_addr);
6664 txq->txq_descs[nexttx].wtx_cmdlen =
6665 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
6666 txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
6667 txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
6668 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
6669 NULL) {
6670 txq->txq_descs[nexttx].wtx_cmdlen |=
6671 htole32(WTX_CMD_VLE);
6672 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
6673 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6674 } else {
6675 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
6676 }
6677 dcmdlen = 0;
6678 } else {
6679 /* setup an advanced data descriptor */
6680 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
6681 htole64(dmamap->dm_segs[0].ds_addr);
6682 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
6683 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
6684 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
6685 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
6686 htole32(fields);
6687 DPRINTF(WM_DEBUG_TX,
6688 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
6689 device_xname(sc->sc_dev), nexttx,
6690 (uint64_t)dmamap->dm_segs[0].ds_addr));
6691 DPRINTF(WM_DEBUG_TX,
6692 ("\t 0x%08x%08x\n", fields,
6693 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
6694 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
6695 }
6696
6697 lasttx = nexttx;
6698 nexttx = WM_NEXTTX(txq, nexttx);
6699 /*
6700 * fill in the next descriptors. legacy or adcanced format
6701 * is the same here
6702 */
6703 for (seg = 1; seg < dmamap->dm_nsegs;
6704 seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
6705 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
6706 htole64(dmamap->dm_segs[seg].ds_addr);
6707 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
6708 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
6709 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
6710 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
6711 lasttx = nexttx;
6712
6713 DPRINTF(WM_DEBUG_TX,
6714 ("%s: TX: desc %d: %#" PRIx64 ", "
6715 "len %#04zx\n",
6716 device_xname(sc->sc_dev), nexttx,
6717 (uint64_t)dmamap->dm_segs[seg].ds_addr,
6718 dmamap->dm_segs[seg].ds_len));
6719 }
6720
6721 KASSERT(lasttx != -1);
6722
6723 /*
6724 * Set up the command byte on the last descriptor of
6725 * the packet. If we're in the interrupt delay window,
6726 * delay the interrupt.
6727 */
6728 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
6729 (NQTX_CMD_EOP | NQTX_CMD_RS));
6730 txq->txq_descs[lasttx].wtx_cmdlen |=
6731 htole32(WTX_CMD_EOP | WTX_CMD_RS);
6732
6733 txs->txs_lastdesc = lasttx;
6734
6735 DPRINTF(WM_DEBUG_TX,
6736 ("%s: TX: desc %d: cmdlen 0x%08x\n",
6737 device_xname(sc->sc_dev),
6738 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
6739
6740 /* Sync the descriptors we're using. */
6741 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
6742 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
6743
6744 /* Give the packet to the chip. */
6745 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
6746 sent = true;
6747
6748 DPRINTF(WM_DEBUG_TX,
6749 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6750
6751 DPRINTF(WM_DEBUG_TX,
6752 ("%s: TX: finished transmitting packet, job %d\n",
6753 device_xname(sc->sc_dev), txq->txq_snext));
6754
6755 /* Advance the tx pointer. */
6756 txq->txq_free -= txs->txs_ndesc;
6757 txq->txq_next = nexttx;
6758
6759 txq->txq_sfree--;
6760 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
6761
6762 /* Pass the packet to any BPF listeners. */
6763 bpf_mtap(ifp, m0);
6764 }
6765
6766 if (m0 != NULL) {
6767 ifp->if_flags |= IFF_OACTIVE;
6768 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6769 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
6770 m_freem(m0);
6771 }
6772
6773 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
6774 /* No more slots; notify upper layer. */
6775 ifp->if_flags |= IFF_OACTIVE;
6776 }
6777
6778 if (sent) {
6779 /* Set a watchdog timer in case the chip flakes out. */
6780 ifp->if_timer = 5;
6781 }
6782 }
6783
6784 /* Interrupt */
6785
6786 /*
6787 * wm_txeof:
6788 *
6789 * Helper; handle transmit interrupts.
6790 */
6791 static int
6792 wm_txeof(struct wm_softc *sc)
6793 {
6794 struct wm_txqueue *txq = &sc->sc_txq[0];
6795 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6796 struct wm_txsoft *txs;
6797 bool processed = false;
6798 int count = 0;
6799 int i;
6800 uint8_t status;
6801
6802 if (sc->sc_stopping)
6803 return 0;
6804
6805 ifp->if_flags &= ~IFF_OACTIVE;
6806
6807 /*
6808 * Go through the Tx list and free mbufs for those
6809 * frames which have been transmitted.
6810 */
6811 for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
6812 i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
6813 txs = &txq->txq_soft[i];
6814
6815 DPRINTF(WM_DEBUG_TX,
6816 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
6817
6818 wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
6819 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
6820
6821 status =
6822 txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
6823 if ((status & WTX_ST_DD) == 0) {
6824 wm_cdtxsync(txq, txs->txs_lastdesc, 1,
6825 BUS_DMASYNC_PREREAD);
6826 break;
6827 }
6828
6829 processed = true;
6830 count++;
6831 DPRINTF(WM_DEBUG_TX,
6832 ("%s: TX: job %d done: descs %d..%d\n",
6833 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
6834 txs->txs_lastdesc));
6835
6836 /*
6837 * XXX We should probably be using the statistics
6838 * XXX registers, but I don't know if they exist
6839 * XXX on chips before the i82544.
6840 */
6841
6842 #ifdef WM_EVENT_COUNTERS
6843 if (status & WTX_ST_TU)
6844 WM_EVCNT_INCR(&sc->sc_ev_tu);
6845 #endif /* WM_EVENT_COUNTERS */
6846
6847 if (status & (WTX_ST_EC|WTX_ST_LC)) {
6848 ifp->if_oerrors++;
6849 if (status & WTX_ST_LC)
6850 log(LOG_WARNING, "%s: late collision\n",
6851 device_xname(sc->sc_dev));
6852 else if (status & WTX_ST_EC) {
6853 ifp->if_collisions += 16;
6854 log(LOG_WARNING, "%s: excessive collisions\n",
6855 device_xname(sc->sc_dev));
6856 }
6857 } else
6858 ifp->if_opackets++;
6859
6860 txq->txq_free += txs->txs_ndesc;
6861 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
6862 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
6863 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
6864 m_freem(txs->txs_mbuf);
6865 txs->txs_mbuf = NULL;
6866 }
6867
6868 /* Update the dirty transmit buffer pointer. */
6869 txq->txq_sdirty = i;
6870 DPRINTF(WM_DEBUG_TX,
6871 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
6872
6873 if (count != 0)
6874 rnd_add_uint32(&sc->rnd_source, count);
6875
6876 /*
6877 * If there are no more pending transmissions, cancel the watchdog
6878 * timer.
6879 */
6880 if (txq->txq_sfree == WM_TXQUEUELEN(txq))
6881 ifp->if_timer = 0;
6882
6883 return processed;
6884 }
6885
6886 /*
6887 * wm_rxeof:
6888 *
6889 * Helper; handle receive interrupts.
6890 */
6891 static void
6892 wm_rxeof(struct wm_rxqueue *rxq)
6893 {
6894 struct wm_softc *sc = rxq->rxq_sc;
6895 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6896 struct wm_rxsoft *rxs;
6897 struct mbuf *m;
6898 int i, len;
6899 int count = 0;
6900 uint8_t status, errors;
6901 uint16_t vlantag;
6902
6903 for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
6904 rxs = &rxq->rxq_soft[i];
6905
6906 DPRINTF(WM_DEBUG_RX,
6907 ("%s: RX: checking descriptor %d\n",
6908 device_xname(sc->sc_dev), i));
6909
6910 wm_cdrxsync(rxq, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
6911
6912 status = rxq->rxq_descs[i].wrx_status;
6913 errors = rxq->rxq_descs[i].wrx_errors;
6914 len = le16toh(rxq->rxq_descs[i].wrx_len);
6915 vlantag = rxq->rxq_descs[i].wrx_special;
6916
6917 if ((status & WRX_ST_DD) == 0) {
6918 /* We have processed all of the receive descriptors. */
6919 wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
6920 break;
6921 }
6922
6923 count++;
6924 if (__predict_false(rxq->rxq_discard)) {
6925 DPRINTF(WM_DEBUG_RX,
6926 ("%s: RX: discarding contents of descriptor %d\n",
6927 device_xname(sc->sc_dev), i));
6928 wm_init_rxdesc(rxq, i);
6929 if (status & WRX_ST_EOP) {
6930 /* Reset our state. */
6931 DPRINTF(WM_DEBUG_RX,
6932 ("%s: RX: resetting rxdiscard -> 0\n",
6933 device_xname(sc->sc_dev)));
6934 rxq->rxq_discard = 0;
6935 }
6936 continue;
6937 }
6938
6939 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
6940 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
6941
6942 m = rxs->rxs_mbuf;
6943
6944 /*
6945 * Add a new receive buffer to the ring, unless of
6946 * course the length is zero. Treat the latter as a
6947 * failed mapping.
6948 */
6949 if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
6950 /*
6951 * Failed, throw away what we've done so
6952 * far, and discard the rest of the packet.
6953 */
6954 ifp->if_ierrors++;
6955 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
6956 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
6957 wm_init_rxdesc(rxq, i);
6958 if ((status & WRX_ST_EOP) == 0)
6959 rxq->rxq_discard = 1;
6960 if (rxq->rxq_head != NULL)
6961 m_freem(rxq->rxq_head);
6962 WM_RXCHAIN_RESET(rxq);
6963 DPRINTF(WM_DEBUG_RX,
6964 ("%s: RX: Rx buffer allocation failed, "
6965 "dropping packet%s\n", device_xname(sc->sc_dev),
6966 rxq->rxq_discard ? " (discard)" : ""));
6967 continue;
6968 }
6969
6970 m->m_len = len;
6971 rxq->rxq_len += len;
6972 DPRINTF(WM_DEBUG_RX,
6973 ("%s: RX: buffer at %p len %d\n",
6974 device_xname(sc->sc_dev), m->m_data, len));
6975
6976 /* If this is not the end of the packet, keep looking. */
6977 if ((status & WRX_ST_EOP) == 0) {
6978 WM_RXCHAIN_LINK(rxq, m);
6979 DPRINTF(WM_DEBUG_RX,
6980 ("%s: RX: not yet EOP, rxlen -> %d\n",
6981 device_xname(sc->sc_dev), rxq->rxq_len));
6982 continue;
6983 }
6984
6985 /*
6986 * Okay, we have the entire packet now. The chip is
6987 * configured to include the FCS except I350 and I21[01]
6988 * (not all chips can be configured to strip it),
6989 * so we need to trim it.
6990 * May need to adjust length of previous mbuf in the
6991 * chain if the current mbuf is too short.
6992 * For an eratta, the RCTL_SECRC bit in RCTL register
6993 * is always set in I350, so we don't trim it.
6994 */
6995 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
6996 && (sc->sc_type != WM_T_I210)
6997 && (sc->sc_type != WM_T_I211)) {
6998 if (m->m_len < ETHER_CRC_LEN) {
6999 rxq->rxq_tail->m_len
7000 -= (ETHER_CRC_LEN - m->m_len);
7001 m->m_len = 0;
7002 } else
7003 m->m_len -= ETHER_CRC_LEN;
7004 len = rxq->rxq_len - ETHER_CRC_LEN;
7005 } else
7006 len = rxq->rxq_len;
7007
7008 WM_RXCHAIN_LINK(rxq, m);
7009
7010 *rxq->rxq_tailp = NULL;
7011 m = rxq->rxq_head;
7012
7013 WM_RXCHAIN_RESET(rxq);
7014
7015 DPRINTF(WM_DEBUG_RX,
7016 ("%s: RX: have entire packet, len -> %d\n",
7017 device_xname(sc->sc_dev), len));
7018
7019 /* If an error occurred, update stats and drop the packet. */
7020 if (errors &
7021 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
7022 if (errors & WRX_ER_SE)
7023 log(LOG_WARNING, "%s: symbol error\n",
7024 device_xname(sc->sc_dev));
7025 else if (errors & WRX_ER_SEQ)
7026 log(LOG_WARNING, "%s: receive sequence error\n",
7027 device_xname(sc->sc_dev));
7028 else if (errors & WRX_ER_CE)
7029 log(LOG_WARNING, "%s: CRC error\n",
7030 device_xname(sc->sc_dev));
7031 m_freem(m);
7032 continue;
7033 }
7034
7035 /* No errors. Receive the packet. */
7036 m->m_pkthdr.rcvif = ifp;
7037 m->m_pkthdr.len = len;
7038
7039 /*
7040 * If VLANs are enabled, VLAN packets have been unwrapped
7041 * for us. Associate the tag with the packet.
7042 */
7043 /* XXXX should check for i350 and i354 */
7044 if ((status & WRX_ST_VP) != 0) {
7045 VLAN_INPUT_TAG(ifp, m,
7046 le16toh(vlantag),
7047 continue);
7048 }
7049
7050 /* Set up checksum info for this packet. */
7051 if ((status & WRX_ST_IXSM) == 0) {
7052 if (status & WRX_ST_IPCS) {
7053 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
7054 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
7055 if (errors & WRX_ER_IPE)
7056 m->m_pkthdr.csum_flags |=
7057 M_CSUM_IPv4_BAD;
7058 }
7059 if (status & WRX_ST_TCPCS) {
7060 /*
7061 * Note: we don't know if this was TCP or UDP,
7062 * so we just set both bits, and expect the
7063 * upper layers to deal.
7064 */
7065 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
7066 m->m_pkthdr.csum_flags |=
7067 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7068 M_CSUM_TCPv6 | M_CSUM_UDPv6;
7069 if (errors & WRX_ER_TCPE)
7070 m->m_pkthdr.csum_flags |=
7071 M_CSUM_TCP_UDP_BAD;
7072 }
7073 }
7074
7075 ifp->if_ipackets++;
7076
7077 WM_RX_UNLOCK(rxq);
7078
7079 /* Pass this up to any BPF listeners. */
7080 bpf_mtap(ifp, m);
7081
7082 /* Pass it on. */
7083 (*ifp->if_input)(ifp, m);
7084
7085 WM_RX_LOCK(rxq);
7086
7087 if (sc->sc_stopping)
7088 break;
7089 }
7090
7091 /* Update the receive pointer. */
7092 rxq->rxq_ptr = i;
7093 if (count != 0)
7094 rnd_add_uint32(&sc->rnd_source, count);
7095
7096 DPRINTF(WM_DEBUG_RX,
7097 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
7098 }
7099
7100 /*
7101 * wm_linkintr_gmii:
7102 *
7103 * Helper; handle link interrupts for GMII.
7104 */
7105 static void
7106 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
7107 {
7108
7109 KASSERT(WM_CORE_LOCKED(sc));
7110
7111 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7112 __func__));
7113
7114 if (icr & ICR_LSC) {
7115 DPRINTF(WM_DEBUG_LINK,
7116 ("%s: LINK: LSC -> mii_pollstat\n",
7117 device_xname(sc->sc_dev)));
7118 mii_pollstat(&sc->sc_mii);
7119 if (sc->sc_type == WM_T_82543) {
7120 int miistatus, active;
7121
7122 /*
7123 * With 82543, we need to force speed and
7124 * duplex on the MAC equal to what the PHY
7125 * speed and duplex configuration is.
7126 */
7127 miistatus = sc->sc_mii.mii_media_status;
7128
7129 if (miistatus & IFM_ACTIVE) {
7130 active = sc->sc_mii.mii_media_active;
7131 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
7132 switch (IFM_SUBTYPE(active)) {
7133 case IFM_10_T:
7134 sc->sc_ctrl |= CTRL_SPEED_10;
7135 break;
7136 case IFM_100_TX:
7137 sc->sc_ctrl |= CTRL_SPEED_100;
7138 break;
7139 case IFM_1000_T:
7140 sc->sc_ctrl |= CTRL_SPEED_1000;
7141 break;
7142 default:
7143 /*
7144 * fiber?
7145 * Shoud not enter here.
7146 */
7147 printf("unknown media (%x)\n",
7148 active);
7149 break;
7150 }
7151 if (active & IFM_FDX)
7152 sc->sc_ctrl |= CTRL_FD;
7153 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7154 }
7155 } else if ((sc->sc_type == WM_T_ICH8)
7156 && (sc->sc_phytype == WMPHY_IGP_3)) {
7157 wm_kmrn_lock_loss_workaround_ich8lan(sc);
7158 } else if (sc->sc_type == WM_T_PCH) {
7159 wm_k1_gig_workaround_hv(sc,
7160 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
7161 }
7162
7163 if ((sc->sc_phytype == WMPHY_82578)
7164 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
7165 == IFM_1000_T)) {
7166
7167 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
7168 delay(200*1000); /* XXX too big */
7169
7170 /* Link stall fix for link up */
7171 wm_gmii_hv_writereg(sc->sc_dev, 1,
7172 HV_MUX_DATA_CTRL,
7173 HV_MUX_DATA_CTRL_GEN_TO_MAC
7174 | HV_MUX_DATA_CTRL_FORCE_SPEED);
7175 wm_gmii_hv_writereg(sc->sc_dev, 1,
7176 HV_MUX_DATA_CTRL,
7177 HV_MUX_DATA_CTRL_GEN_TO_MAC);
7178 }
7179 }
7180 } else if (icr & ICR_RXSEQ) {
7181 DPRINTF(WM_DEBUG_LINK,
7182 ("%s: LINK Receive sequence error\n",
7183 device_xname(sc->sc_dev)));
7184 }
7185 }
7186
7187 /*
7188 * wm_linkintr_tbi:
7189 *
7190 * Helper; handle link interrupts for TBI mode.
7191 */
7192 static void
7193 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
7194 {
7195 uint32_t status;
7196
7197 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7198 __func__));
7199
7200 status = CSR_READ(sc, WMREG_STATUS);
7201 if (icr & ICR_LSC) {
7202 if (status & STATUS_LU) {
7203 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
7204 device_xname(sc->sc_dev),
7205 (status & STATUS_FD) ? "FDX" : "HDX"));
7206 /*
7207 * NOTE: CTRL will update TFCE and RFCE automatically,
7208 * so we should update sc->sc_ctrl
7209 */
7210
7211 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7212 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7213 sc->sc_fcrtl &= ~FCRTL_XONE;
7214 if (status & STATUS_FD)
7215 sc->sc_tctl |=
7216 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7217 else
7218 sc->sc_tctl |=
7219 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7220 if (sc->sc_ctrl & CTRL_TFCE)
7221 sc->sc_fcrtl |= FCRTL_XONE;
7222 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7223 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7224 WMREG_OLD_FCRTL : WMREG_FCRTL,
7225 sc->sc_fcrtl);
7226 sc->sc_tbi_linkup = 1;
7227 } else {
7228 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
7229 device_xname(sc->sc_dev)));
7230 sc->sc_tbi_linkup = 0;
7231 }
7232 /* Update LED */
7233 wm_tbi_serdes_set_linkled(sc);
7234 } else if (icr & ICR_RXSEQ) {
7235 DPRINTF(WM_DEBUG_LINK,
7236 ("%s: LINK: Receive sequence error\n",
7237 device_xname(sc->sc_dev)));
7238 }
7239 }
7240
7241 /*
7242 * wm_linkintr_serdes:
7243 *
7244 * Helper; handle link interrupts for TBI mode.
7245 */
7246 static void
7247 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
7248 {
7249 struct mii_data *mii = &sc->sc_mii;
7250 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7251 uint32_t pcs_adv, pcs_lpab, reg;
7252
7253 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7254 __func__));
7255
7256 if (icr & ICR_LSC) {
7257 /* Check PCS */
7258 reg = CSR_READ(sc, WMREG_PCS_LSTS);
7259 if ((reg & PCS_LSTS_LINKOK) != 0) {
7260 mii->mii_media_status |= IFM_ACTIVE;
7261 sc->sc_tbi_linkup = 1;
7262 } else {
7263 mii->mii_media_status |= IFM_NONE;
7264 sc->sc_tbi_linkup = 0;
7265 wm_tbi_serdes_set_linkled(sc);
7266 return;
7267 }
7268 mii->mii_media_active |= IFM_1000_SX;
7269 if ((reg & PCS_LSTS_FDX) != 0)
7270 mii->mii_media_active |= IFM_FDX;
7271 else
7272 mii->mii_media_active |= IFM_HDX;
7273 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7274 /* Check flow */
7275 reg = CSR_READ(sc, WMREG_PCS_LSTS);
7276 if ((reg & PCS_LSTS_AN_COMP) == 0) {
7277 DPRINTF(WM_DEBUG_LINK,
7278 ("XXX LINKOK but not ACOMP\n"));
7279 return;
7280 }
7281 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
7282 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
7283 DPRINTF(WM_DEBUG_LINK,
7284 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
7285 if ((pcs_adv & TXCW_SYM_PAUSE)
7286 && (pcs_lpab & TXCW_SYM_PAUSE)) {
7287 mii->mii_media_active |= IFM_FLOW
7288 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
7289 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
7290 && (pcs_adv & TXCW_ASYM_PAUSE)
7291 && (pcs_lpab & TXCW_SYM_PAUSE)
7292 && (pcs_lpab & TXCW_ASYM_PAUSE))
7293 mii->mii_media_active |= IFM_FLOW
7294 | IFM_ETH_TXPAUSE;
7295 else if ((pcs_adv & TXCW_SYM_PAUSE)
7296 && (pcs_adv & TXCW_ASYM_PAUSE)
7297 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
7298 && (pcs_lpab & TXCW_ASYM_PAUSE))
7299 mii->mii_media_active |= IFM_FLOW
7300 | IFM_ETH_RXPAUSE;
7301 }
7302 /* Update LED */
7303 wm_tbi_serdes_set_linkled(sc);
7304 } else {
7305 DPRINTF(WM_DEBUG_LINK,
7306 ("%s: LINK: Receive sequence error\n",
7307 device_xname(sc->sc_dev)));
7308 }
7309 }
7310
7311 /*
7312 * wm_linkintr:
7313 *
7314 * Helper; handle link interrupts.
7315 */
7316 static void
7317 wm_linkintr(struct wm_softc *sc, uint32_t icr)
7318 {
7319
7320 KASSERT(WM_CORE_LOCKED(sc));
7321
7322 if (sc->sc_flags & WM_F_HAS_MII)
7323 wm_linkintr_gmii(sc, icr);
7324 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7325 && (sc->sc_type >= WM_T_82575))
7326 wm_linkintr_serdes(sc, icr);
7327 else
7328 wm_linkintr_tbi(sc, icr);
7329 }
7330
7331 /*
7332 * wm_intr_legacy:
7333 *
7334 * Interrupt service routine for INTx and MSI.
7335 */
7336 static int
7337 wm_intr_legacy(void *arg)
7338 {
7339 struct wm_softc *sc = arg;
7340 struct wm_txqueue *txq = &sc->sc_txq[0];
7341 struct wm_rxqueue *rxq = &sc->sc_rxq[0];
7342 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7343 uint32_t icr, rndval = 0;
7344 int handled = 0;
7345
7346 DPRINTF(WM_DEBUG_TX,
7347 ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
7348 while (1 /* CONSTCOND */) {
7349 icr = CSR_READ(sc, WMREG_ICR);
7350 if ((icr & sc->sc_icr) == 0)
7351 break;
7352 if (rndval == 0)
7353 rndval = icr;
7354
7355 WM_RX_LOCK(rxq);
7356
7357 if (sc->sc_stopping) {
7358 WM_RX_UNLOCK(rxq);
7359 break;
7360 }
7361
7362 handled = 1;
7363
7364 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7365 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
7366 DPRINTF(WM_DEBUG_RX,
7367 ("%s: RX: got Rx intr 0x%08x\n",
7368 device_xname(sc->sc_dev),
7369 icr & (ICR_RXDMT0|ICR_RXT0)));
7370 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
7371 }
7372 #endif
7373 wm_rxeof(rxq);
7374
7375 WM_RX_UNLOCK(rxq);
7376 WM_TX_LOCK(txq);
7377
7378 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7379 if (icr & ICR_TXDW) {
7380 DPRINTF(WM_DEBUG_TX,
7381 ("%s: TX: got TXDW interrupt\n",
7382 device_xname(sc->sc_dev)));
7383 WM_EVCNT_INCR(&sc->sc_ev_txdw);
7384 }
7385 #endif
7386 wm_txeof(sc);
7387
7388 WM_TX_UNLOCK(txq);
7389 WM_CORE_LOCK(sc);
7390
7391 if (icr & (ICR_LSC|ICR_RXSEQ)) {
7392 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7393 wm_linkintr(sc, icr);
7394 }
7395
7396 WM_CORE_UNLOCK(sc);
7397
7398 if (icr & ICR_RXO) {
7399 #if defined(WM_DEBUG)
7400 log(LOG_WARNING, "%s: Receive overrun\n",
7401 device_xname(sc->sc_dev));
7402 #endif /* defined(WM_DEBUG) */
7403 }
7404 }
7405
7406 rnd_add_uint32(&sc->rnd_source, rndval);
7407
7408 if (handled) {
7409 /* Try to get more packets going. */
7410 ifp->if_start(ifp);
7411 }
7412
7413 return handled;
7414 }
7415
7416 /*
7417 * wm_txintr_msix:
7418 *
7419 * Interrupt service routine for TX complete interrupt for MSI-X.
7420 */
7421 static int
7422 wm_txintr_msix(void *arg)
7423 {
7424 struct wm_txqueue *txq = arg;
7425 struct wm_softc *sc = txq->txq_sc;
7426 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7427 int handled = 0;
7428
7429 DPRINTF(WM_DEBUG_TX,
7430 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
7431
7432 if (sc->sc_type == WM_T_82574)
7433 CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(txq->txq_id)); /* 82574 only */
7434 else if (sc->sc_type == WM_T_82575)
7435 CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(txq->txq_id));
7436 else
7437 CSR_WRITE(sc, WMREG_EIMC, 1 << txq->txq_intr_idx);
7438
7439 WM_TX_LOCK(txq);
7440
7441 if (sc->sc_stopping)
7442 goto out;
7443
7444 WM_EVCNT_INCR(&sc->sc_ev_txdw);
7445 handled = wm_txeof(sc);
7446
7447 out:
7448 WM_TX_UNLOCK(txq);
7449
7450 if (sc->sc_type == WM_T_82574)
7451 CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(txq->txq_id)); /* 82574 only */
7452 else if (sc->sc_type == WM_T_82575)
7453 CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(txq->txq_id));
7454 else
7455 CSR_WRITE(sc, WMREG_EIMS, 1 << txq->txq_intr_idx);
7456
7457 if (handled) {
7458 /* Try to get more packets going. */
7459 ifp->if_start(ifp);
7460 }
7461
7462 return handled;
7463 }
7464
7465 /*
7466 * wm_rxintr_msix:
7467 *
7468 * Interrupt service routine for RX interrupt for MSI-X.
7469 */
7470 static int
7471 wm_rxintr_msix(void *arg)
7472 {
7473 struct wm_rxqueue *rxq = arg;
7474 struct wm_softc *sc = rxq->rxq_sc;
7475
7476 DPRINTF(WM_DEBUG_RX,
7477 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
7478
7479 if (sc->sc_type == WM_T_82574)
7480 CSR_WRITE(sc, WMREG_IMC, ICR_RXQ(rxq->rxq_id)); /* 82574 only */
7481 else if (sc->sc_type == WM_T_82575)
7482 CSR_WRITE(sc, WMREG_EIMC, EITR_RX_QUEUE(rxq->rxq_id));
7483 else
7484 CSR_WRITE(sc, WMREG_EIMC, 1 << rxq->rxq_intr_idx);
7485
7486 WM_RX_LOCK(rxq);
7487
7488 if (sc->sc_stopping)
7489 goto out;
7490
7491 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
7492 wm_rxeof(rxq);
7493
7494 out:
7495 WM_RX_UNLOCK(rxq);
7496
7497 if (sc->sc_type == WM_T_82574)
7498 CSR_WRITE(sc, WMREG_IMS, ICR_RXQ(rxq->rxq_id));
7499 else if (sc->sc_type == WM_T_82575)
7500 CSR_WRITE(sc, WMREG_EIMS, EITR_RX_QUEUE(rxq->rxq_id));
7501 else
7502 CSR_WRITE(sc, WMREG_EIMS, 1 << rxq->rxq_intr_idx);
7503
7504 return 1;
7505 }
7506
7507 /*
7508 * wm_linkintr_msix:
7509 *
7510 * Interrupt service routine for link status change for MSI-X.
7511 */
7512 static int
7513 wm_linkintr_msix(void *arg)
7514 {
7515 struct wm_softc *sc = arg;
7516 uint32_t reg;
7517
7518 DPRINTF(WM_DEBUG_LINK,
7519 ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
7520
7521 reg = CSR_READ(sc, WMREG_ICR);
7522 WM_CORE_LOCK(sc);
7523 if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0))
7524 goto out;
7525
7526 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7527 wm_linkintr(sc, ICR_LSC);
7528
7529 out:
7530 WM_CORE_UNLOCK(sc);
7531
7532 if (sc->sc_type == WM_T_82574)
7533 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC); /* 82574 only */
7534 else if (sc->sc_type == WM_T_82575)
7535 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
7536 else
7537 CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
7538
7539 return 1;
7540 }
7541
7542 /*
7543 * Media related.
7544 * GMII, SGMII, TBI (and SERDES)
7545 */
7546
7547 /* Common */
7548
7549 /*
7550 * wm_tbi_serdes_set_linkled:
7551 *
7552 * Update the link LED on TBI and SERDES devices.
7553 */
7554 static void
7555 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
7556 {
7557
7558 if (sc->sc_tbi_linkup)
7559 sc->sc_ctrl |= CTRL_SWDPIN(0);
7560 else
7561 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7562
7563 /* 82540 or newer devices are active low */
7564 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7565
7566 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7567 }
7568
7569 /* GMII related */
7570
7571 /*
7572 * wm_gmii_reset:
7573 *
7574 * Reset the PHY.
7575 */
7576 static void
7577 wm_gmii_reset(struct wm_softc *sc)
7578 {
7579 uint32_t reg;
7580 int rv;
7581
7582 /* get phy semaphore */
7583 switch (sc->sc_type) {
7584 case WM_T_82571:
7585 case WM_T_82572:
7586 case WM_T_82573:
7587 case WM_T_82574:
7588 case WM_T_82583:
7589 /* XXX should get sw semaphore, too */
7590 rv = wm_get_swsm_semaphore(sc);
7591 break;
7592 case WM_T_82575:
7593 case WM_T_82576:
7594 case WM_T_82580:
7595 case WM_T_I350:
7596 case WM_T_I354:
7597 case WM_T_I210:
7598 case WM_T_I211:
7599 case WM_T_80003:
7600 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7601 break;
7602 case WM_T_ICH8:
7603 case WM_T_ICH9:
7604 case WM_T_ICH10:
7605 case WM_T_PCH:
7606 case WM_T_PCH2:
7607 case WM_T_PCH_LPT:
7608 rv = wm_get_swfwhw_semaphore(sc);
7609 break;
7610 default:
7611 /* nothing to do*/
7612 rv = 0;
7613 break;
7614 }
7615 if (rv != 0) {
7616 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7617 __func__);
7618 return;
7619 }
7620
7621 switch (sc->sc_type) {
7622 case WM_T_82542_2_0:
7623 case WM_T_82542_2_1:
7624 /* null */
7625 break;
7626 case WM_T_82543:
7627 /*
7628 * With 82543, we need to force speed and duplex on the MAC
7629 * equal to what the PHY speed and duplex configuration is.
7630 * In addition, we need to perform a hardware reset on the PHY
7631 * to take it out of reset.
7632 */
7633 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
7634 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7635
7636 /* The PHY reset pin is active-low. */
7637 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7638 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
7639 CTRL_EXT_SWDPIN(4));
7640 reg |= CTRL_EXT_SWDPIO(4);
7641
7642 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7643 CSR_WRITE_FLUSH(sc);
7644 delay(10*1000);
7645
7646 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
7647 CSR_WRITE_FLUSH(sc);
7648 delay(150);
7649 #if 0
7650 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
7651 #endif
7652 delay(20*1000); /* XXX extra delay to get PHY ID? */
7653 break;
7654 case WM_T_82544: /* reset 10000us */
7655 case WM_T_82540:
7656 case WM_T_82545:
7657 case WM_T_82545_3:
7658 case WM_T_82546:
7659 case WM_T_82546_3:
7660 case WM_T_82541:
7661 case WM_T_82541_2:
7662 case WM_T_82547:
7663 case WM_T_82547_2:
7664 case WM_T_82571: /* reset 100us */
7665 case WM_T_82572:
7666 case WM_T_82573:
7667 case WM_T_82574:
7668 case WM_T_82575:
7669 case WM_T_82576:
7670 case WM_T_82580:
7671 case WM_T_I350:
7672 case WM_T_I354:
7673 case WM_T_I210:
7674 case WM_T_I211:
7675 case WM_T_82583:
7676 case WM_T_80003:
7677 /* generic reset */
7678 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7679 CSR_WRITE_FLUSH(sc);
7680 delay(20000);
7681 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7682 CSR_WRITE_FLUSH(sc);
7683 delay(20000);
7684
7685 if ((sc->sc_type == WM_T_82541)
7686 || (sc->sc_type == WM_T_82541_2)
7687 || (sc->sc_type == WM_T_82547)
7688 || (sc->sc_type == WM_T_82547_2)) {
7689 /* workaround for igp are done in igp_reset() */
7690 /* XXX add code to set LED after phy reset */
7691 }
7692 break;
7693 case WM_T_ICH8:
7694 case WM_T_ICH9:
7695 case WM_T_ICH10:
7696 case WM_T_PCH:
7697 case WM_T_PCH2:
7698 case WM_T_PCH_LPT:
7699 /* generic reset */
7700 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7701 CSR_WRITE_FLUSH(sc);
7702 delay(100);
7703 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7704 CSR_WRITE_FLUSH(sc);
7705 delay(150);
7706 break;
7707 default:
7708 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
7709 __func__);
7710 break;
7711 }
7712
7713 /* release PHY semaphore */
7714 switch (sc->sc_type) {
7715 case WM_T_82571:
7716 case WM_T_82572:
7717 case WM_T_82573:
7718 case WM_T_82574:
7719 case WM_T_82583:
7720 /* XXX should put sw semaphore, too */
7721 wm_put_swsm_semaphore(sc);
7722 break;
7723 case WM_T_82575:
7724 case WM_T_82576:
7725 case WM_T_82580:
7726 case WM_T_I350:
7727 case WM_T_I354:
7728 case WM_T_I210:
7729 case WM_T_I211:
7730 case WM_T_80003:
7731 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7732 break;
7733 case WM_T_ICH8:
7734 case WM_T_ICH9:
7735 case WM_T_ICH10:
7736 case WM_T_PCH:
7737 case WM_T_PCH2:
7738 case WM_T_PCH_LPT:
7739 wm_put_swfwhw_semaphore(sc);
7740 break;
7741 default:
7742 /* nothing to do*/
7743 rv = 0;
7744 break;
7745 }
7746
7747 /* get_cfg_done */
7748 wm_get_cfg_done(sc);
7749
7750 /* extra setup */
7751 switch (sc->sc_type) {
7752 case WM_T_82542_2_0:
7753 case WM_T_82542_2_1:
7754 case WM_T_82543:
7755 case WM_T_82544:
7756 case WM_T_82540:
7757 case WM_T_82545:
7758 case WM_T_82545_3:
7759 case WM_T_82546:
7760 case WM_T_82546_3:
7761 case WM_T_82541_2:
7762 case WM_T_82547_2:
7763 case WM_T_82571:
7764 case WM_T_82572:
7765 case WM_T_82573:
7766 case WM_T_82574:
7767 case WM_T_82575:
7768 case WM_T_82576:
7769 case WM_T_82580:
7770 case WM_T_I350:
7771 case WM_T_I354:
7772 case WM_T_I210:
7773 case WM_T_I211:
7774 case WM_T_82583:
7775 case WM_T_80003:
7776 /* null */
7777 break;
7778 case WM_T_82541:
7779 case WM_T_82547:
7780 /* XXX Configure actively LED after PHY reset */
7781 break;
7782 case WM_T_ICH8:
7783 case WM_T_ICH9:
7784 case WM_T_ICH10:
7785 case WM_T_PCH:
7786 case WM_T_PCH2:
7787 case WM_T_PCH_LPT:
7788 /* Allow time for h/w to get to a quiescent state afer reset */
7789 delay(10*1000);
7790
7791 if (sc->sc_type == WM_T_PCH)
7792 wm_hv_phy_workaround_ich8lan(sc);
7793
7794 if (sc->sc_type == WM_T_PCH2)
7795 wm_lv_phy_workaround_ich8lan(sc);
7796
7797 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
7798 /*
7799 * dummy read to clear the phy wakeup bit after lcd
7800 * reset
7801 */
7802 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
7803 }
7804
7805 /*
7806 * XXX Configure the LCD with th extended configuration region
7807 * in NVM
7808 */
7809
7810 /* Configure the LCD with the OEM bits in NVM */
7811 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
7812 || (sc->sc_type == WM_T_PCH_LPT)) {
7813 /*
7814 * Disable LPLU.
7815 * XXX It seems that 82567 has LPLU, too.
7816 */
7817 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
7818 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
7819 reg |= HV_OEM_BITS_ANEGNOW;
7820 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
7821 }
7822 break;
7823 default:
7824 panic("%s: unknown type\n", __func__);
7825 break;
7826 }
7827 }
7828
7829 /*
7830 * wm_get_phy_id_82575:
7831 *
7832 * Return PHY ID. Return -1 if it failed.
7833 */
7834 static int
7835 wm_get_phy_id_82575(struct wm_softc *sc)
7836 {
7837 uint32_t reg;
7838 int phyid = -1;
7839
7840 /* XXX */
7841 if ((sc->sc_flags & WM_F_SGMII) == 0)
7842 return -1;
7843
7844 if (wm_sgmii_uses_mdio(sc)) {
7845 switch (sc->sc_type) {
7846 case WM_T_82575:
7847 case WM_T_82576:
7848 reg = CSR_READ(sc, WMREG_MDIC);
7849 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
7850 break;
7851 case WM_T_82580:
7852 case WM_T_I350:
7853 case WM_T_I354:
7854 case WM_T_I210:
7855 case WM_T_I211:
7856 reg = CSR_READ(sc, WMREG_MDICNFG);
7857 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
7858 break;
7859 default:
7860 return -1;
7861 }
7862 }
7863
7864 return phyid;
7865 }
7866
7867
7868 /*
7869 * wm_gmii_mediainit:
7870 *
7871 * Initialize media for use on 1000BASE-T devices.
7872 */
7873 static void
7874 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
7875 {
7876 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7877 struct mii_data *mii = &sc->sc_mii;
7878 uint32_t reg;
7879
7880 /* We have GMII. */
7881 sc->sc_flags |= WM_F_HAS_MII;
7882
7883 if (sc->sc_type == WM_T_80003)
7884 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7885 else
7886 sc->sc_tipg = TIPG_1000T_DFLT;
7887
7888 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
7889 if ((sc->sc_type == WM_T_82580)
7890 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
7891 || (sc->sc_type == WM_T_I211)) {
7892 reg = CSR_READ(sc, WMREG_PHPM);
7893 reg &= ~PHPM_GO_LINK_D;
7894 CSR_WRITE(sc, WMREG_PHPM, reg);
7895 }
7896
7897 /*
7898 * Let the chip set speed/duplex on its own based on
7899 * signals from the PHY.
7900 * XXXbouyer - I'm not sure this is right for the 80003,
7901 * the em driver only sets CTRL_SLU here - but it seems to work.
7902 */
7903 sc->sc_ctrl |= CTRL_SLU;
7904 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7905
7906 /* Initialize our media structures and probe the GMII. */
7907 mii->mii_ifp = ifp;
7908
7909 /*
7910 * Determine the PHY access method.
7911 *
7912 * For SGMII, use SGMII specific method.
7913 *
7914 * For some devices, we can determine the PHY access method
7915 * from sc_type.
7916 *
7917 * For ICH and PCH variants, it's difficult to determine the PHY
7918 * access method by sc_type, so use the PCI product ID for some
7919 * devices.
7920 * For other ICH8 variants, try to use igp's method. If the PHY
7921 * can't detect, then use bm's method.
7922 */
7923 switch (prodid) {
7924 case PCI_PRODUCT_INTEL_PCH_M_LM:
7925 case PCI_PRODUCT_INTEL_PCH_M_LC:
7926 /* 82577 */
7927 sc->sc_phytype = WMPHY_82577;
7928 break;
7929 case PCI_PRODUCT_INTEL_PCH_D_DM:
7930 case PCI_PRODUCT_INTEL_PCH_D_DC:
7931 /* 82578 */
7932 sc->sc_phytype = WMPHY_82578;
7933 break;
7934 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
7935 case PCI_PRODUCT_INTEL_PCH2_LV_V:
7936 /* 82579 */
7937 sc->sc_phytype = WMPHY_82579;
7938 break;
7939 case PCI_PRODUCT_INTEL_82801I_BM:
7940 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
7941 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
7942 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
7943 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
7944 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
7945 /* 82567 */
7946 sc->sc_phytype = WMPHY_BM;
7947 mii->mii_readreg = wm_gmii_bm_readreg;
7948 mii->mii_writereg = wm_gmii_bm_writereg;
7949 break;
7950 default:
7951 if (((sc->sc_flags & WM_F_SGMII) != 0)
7952 && !wm_sgmii_uses_mdio(sc)){
7953 /* SGMII */
7954 mii->mii_readreg = wm_sgmii_readreg;
7955 mii->mii_writereg = wm_sgmii_writereg;
7956 } else if (sc->sc_type >= WM_T_80003) {
7957 /* 80003 */
7958 mii->mii_readreg = wm_gmii_i80003_readreg;
7959 mii->mii_writereg = wm_gmii_i80003_writereg;
7960 } else if (sc->sc_type >= WM_T_I210) {
7961 /* I210 and I211 */
7962 mii->mii_readreg = wm_gmii_gs40g_readreg;
7963 mii->mii_writereg = wm_gmii_gs40g_writereg;
7964 } else if (sc->sc_type >= WM_T_82580) {
7965 /* 82580, I350 and I354 */
7966 sc->sc_phytype = WMPHY_82580;
7967 mii->mii_readreg = wm_gmii_82580_readreg;
7968 mii->mii_writereg = wm_gmii_82580_writereg;
7969 } else if (sc->sc_type >= WM_T_82544) {
7970 /* 82544, 0, [56], [17], 8257[1234] and 82583 */
7971 mii->mii_readreg = wm_gmii_i82544_readreg;
7972 mii->mii_writereg = wm_gmii_i82544_writereg;
7973 } else {
7974 mii->mii_readreg = wm_gmii_i82543_readreg;
7975 mii->mii_writereg = wm_gmii_i82543_writereg;
7976 }
7977 break;
7978 }
7979 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) {
7980 /* All PCH* use _hv_ */
7981 mii->mii_readreg = wm_gmii_hv_readreg;
7982 mii->mii_writereg = wm_gmii_hv_writereg;
7983 }
7984 mii->mii_statchg = wm_gmii_statchg;
7985
7986 wm_gmii_reset(sc);
7987
7988 sc->sc_ethercom.ec_mii = &sc->sc_mii;
7989 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
7990 wm_gmii_mediastatus);
7991
7992 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
7993 || (sc->sc_type == WM_T_82580)
7994 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
7995 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
7996 if ((sc->sc_flags & WM_F_SGMII) == 0) {
7997 /* Attach only one port */
7998 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
7999 MII_OFFSET_ANY, MIIF_DOPAUSE);
8000 } else {
8001 int i, id;
8002 uint32_t ctrl_ext;
8003
8004 id = wm_get_phy_id_82575(sc);
8005 if (id != -1) {
8006 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
8007 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
8008 }
8009 if ((id == -1)
8010 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
8011 /* Power on sgmii phy if it is disabled */
8012 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8013 CSR_WRITE(sc, WMREG_CTRL_EXT,
8014 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
8015 CSR_WRITE_FLUSH(sc);
8016 delay(300*1000); /* XXX too long */
8017
8018 /* from 1 to 8 */
8019 for (i = 1; i < 8; i++)
8020 mii_attach(sc->sc_dev, &sc->sc_mii,
8021 0xffffffff, i, MII_OFFSET_ANY,
8022 MIIF_DOPAUSE);
8023
8024 /* restore previous sfp cage power state */
8025 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8026 }
8027 }
8028 } else {
8029 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8030 MII_OFFSET_ANY, MIIF_DOPAUSE);
8031 }
8032
8033 /*
8034 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
8035 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
8036 */
8037 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
8038 (LIST_FIRST(&mii->mii_phys) == NULL)) {
8039 wm_set_mdio_slow_mode_hv(sc);
8040 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8041 MII_OFFSET_ANY, MIIF_DOPAUSE);
8042 }
8043
8044 /*
8045 * (For ICH8 variants)
8046 * If PHY detection failed, use BM's r/w function and retry.
8047 */
8048 if (LIST_FIRST(&mii->mii_phys) == NULL) {
8049 /* if failed, retry with *_bm_* */
8050 mii->mii_readreg = wm_gmii_bm_readreg;
8051 mii->mii_writereg = wm_gmii_bm_writereg;
8052
8053 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8054 MII_OFFSET_ANY, MIIF_DOPAUSE);
8055 }
8056
8057 if (LIST_FIRST(&mii->mii_phys) == NULL) {
8058 /* Any PHY wasn't find */
8059 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
8060 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
8061 sc->sc_phytype = WMPHY_NONE;
8062 } else {
8063 /*
8064 * PHY Found!
8065 * Check PHY type.
8066 */
8067 uint32_t model;
8068 struct mii_softc *child;
8069
8070 child = LIST_FIRST(&mii->mii_phys);
8071 model = child->mii_mpd_model;
8072 if (model == MII_MODEL_yyINTEL_I82566)
8073 sc->sc_phytype = WMPHY_IGP_3;
8074
8075 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
8076 }
8077 }
8078
8079 /*
8080 * wm_gmii_mediachange: [ifmedia interface function]
8081 *
8082 * Set hardware to newly-selected media on a 1000BASE-T device.
8083 */
8084 static int
8085 wm_gmii_mediachange(struct ifnet *ifp)
8086 {
8087 struct wm_softc *sc = ifp->if_softc;
8088 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8089 int rc;
8090
8091 if ((ifp->if_flags & IFF_UP) == 0)
8092 return 0;
8093
8094 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
8095 sc->sc_ctrl |= CTRL_SLU;
8096 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8097 || (sc->sc_type > WM_T_82543)) {
8098 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
8099 } else {
8100 sc->sc_ctrl &= ~CTRL_ASDE;
8101 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
8102 if (ife->ifm_media & IFM_FDX)
8103 sc->sc_ctrl |= CTRL_FD;
8104 switch (IFM_SUBTYPE(ife->ifm_media)) {
8105 case IFM_10_T:
8106 sc->sc_ctrl |= CTRL_SPEED_10;
8107 break;
8108 case IFM_100_TX:
8109 sc->sc_ctrl |= CTRL_SPEED_100;
8110 break;
8111 case IFM_1000_T:
8112 sc->sc_ctrl |= CTRL_SPEED_1000;
8113 break;
8114 default:
8115 panic("wm_gmii_mediachange: bad media 0x%x",
8116 ife->ifm_media);
8117 }
8118 }
8119 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8120 if (sc->sc_type <= WM_T_82543)
8121 wm_gmii_reset(sc);
8122
8123 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
8124 return 0;
8125 return rc;
8126 }
8127
8128 /*
8129 * wm_gmii_mediastatus: [ifmedia interface function]
8130 *
8131 * Get the current interface media status on a 1000BASE-T device.
8132 */
8133 static void
8134 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8135 {
8136 struct wm_softc *sc = ifp->if_softc;
8137
8138 ether_mediastatus(ifp, ifmr);
8139 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
8140 | sc->sc_flowflags;
8141 }
8142
8143 #define MDI_IO CTRL_SWDPIN(2)
8144 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
8145 #define MDI_CLK CTRL_SWDPIN(3)
8146
8147 static void
8148 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
8149 {
8150 uint32_t i, v;
8151
8152 v = CSR_READ(sc, WMREG_CTRL);
8153 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8154 v |= MDI_DIR | CTRL_SWDPIO(3);
8155
8156 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
8157 if (data & i)
8158 v |= MDI_IO;
8159 else
8160 v &= ~MDI_IO;
8161 CSR_WRITE(sc, WMREG_CTRL, v);
8162 CSR_WRITE_FLUSH(sc);
8163 delay(10);
8164 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8165 CSR_WRITE_FLUSH(sc);
8166 delay(10);
8167 CSR_WRITE(sc, WMREG_CTRL, v);
8168 CSR_WRITE_FLUSH(sc);
8169 delay(10);
8170 }
8171 }
8172
8173 static uint32_t
8174 wm_i82543_mii_recvbits(struct wm_softc *sc)
8175 {
8176 uint32_t v, i, data = 0;
8177
8178 v = CSR_READ(sc, WMREG_CTRL);
8179 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8180 v |= CTRL_SWDPIO(3);
8181
8182 CSR_WRITE(sc, WMREG_CTRL, v);
8183 CSR_WRITE_FLUSH(sc);
8184 delay(10);
8185 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8186 CSR_WRITE_FLUSH(sc);
8187 delay(10);
8188 CSR_WRITE(sc, WMREG_CTRL, v);
8189 CSR_WRITE_FLUSH(sc);
8190 delay(10);
8191
8192 for (i = 0; i < 16; i++) {
8193 data <<= 1;
8194 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8195 CSR_WRITE_FLUSH(sc);
8196 delay(10);
8197 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
8198 data |= 1;
8199 CSR_WRITE(sc, WMREG_CTRL, v);
8200 CSR_WRITE_FLUSH(sc);
8201 delay(10);
8202 }
8203
8204 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8205 CSR_WRITE_FLUSH(sc);
8206 delay(10);
8207 CSR_WRITE(sc, WMREG_CTRL, v);
8208 CSR_WRITE_FLUSH(sc);
8209 delay(10);
8210
8211 return data;
8212 }
8213
8214 #undef MDI_IO
8215 #undef MDI_DIR
8216 #undef MDI_CLK
8217
8218 /*
8219 * wm_gmii_i82543_readreg: [mii interface function]
8220 *
8221 * Read a PHY register on the GMII (i82543 version).
8222 */
8223 static int
8224 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
8225 {
8226 struct wm_softc *sc = device_private(self);
8227 int rv;
8228
8229 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8230 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
8231 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
8232 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
8233
8234 DPRINTF(WM_DEBUG_GMII,
8235 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
8236 device_xname(sc->sc_dev), phy, reg, rv));
8237
8238 return rv;
8239 }
8240
8241 /*
8242 * wm_gmii_i82543_writereg: [mii interface function]
8243 *
8244 * Write a PHY register on the GMII (i82543 version).
8245 */
8246 static void
8247 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
8248 {
8249 struct wm_softc *sc = device_private(self);
8250
8251 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8252 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
8253 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
8254 (MII_COMMAND_START << 30), 32);
8255 }
8256
8257 /*
8258 * wm_gmii_i82544_readreg: [mii interface function]
8259 *
8260 * Read a PHY register on the GMII.
8261 */
8262 static int
8263 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
8264 {
8265 struct wm_softc *sc = device_private(self);
8266 uint32_t mdic = 0;
8267 int i, rv;
8268
8269 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
8270 MDIC_REGADD(reg));
8271
8272 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8273 mdic = CSR_READ(sc, WMREG_MDIC);
8274 if (mdic & MDIC_READY)
8275 break;
8276 delay(50);
8277 }
8278
8279 if ((mdic & MDIC_READY) == 0) {
8280 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
8281 device_xname(sc->sc_dev), phy, reg);
8282 rv = 0;
8283 } else if (mdic & MDIC_E) {
8284 #if 0 /* This is normal if no PHY is present. */
8285 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
8286 device_xname(sc->sc_dev), phy, reg);
8287 #endif
8288 rv = 0;
8289 } else {
8290 rv = MDIC_DATA(mdic);
8291 if (rv == 0xffff)
8292 rv = 0;
8293 }
8294
8295 return rv;
8296 }
8297
8298 /*
8299 * wm_gmii_i82544_writereg: [mii interface function]
8300 *
8301 * Write a PHY register on the GMII.
8302 */
8303 static void
8304 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
8305 {
8306 struct wm_softc *sc = device_private(self);
8307 uint32_t mdic = 0;
8308 int i;
8309
8310 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
8311 MDIC_REGADD(reg) | MDIC_DATA(val));
8312
8313 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8314 mdic = CSR_READ(sc, WMREG_MDIC);
8315 if (mdic & MDIC_READY)
8316 break;
8317 delay(50);
8318 }
8319
8320 if ((mdic & MDIC_READY) == 0)
8321 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
8322 device_xname(sc->sc_dev), phy, reg);
8323 else if (mdic & MDIC_E)
8324 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
8325 device_xname(sc->sc_dev), phy, reg);
8326 }
8327
8328 /*
8329 * wm_gmii_i80003_readreg: [mii interface function]
8330 *
8331 * Read a PHY register on the kumeran
8332 * This could be handled by the PHY layer if we didn't have to lock the
8333 * ressource ...
8334 */
8335 static int
8336 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
8337 {
8338 struct wm_softc *sc = device_private(self);
8339 int sem;
8340 int rv;
8341
8342 if (phy != 1) /* only one PHY on kumeran bus */
8343 return 0;
8344
8345 sem = swfwphysem[sc->sc_funcid];
8346 if (wm_get_swfw_semaphore(sc, sem)) {
8347 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8348 __func__);
8349 return 0;
8350 }
8351
8352 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
8353 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8354 reg >> GG82563_PAGE_SHIFT);
8355 } else {
8356 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8357 reg >> GG82563_PAGE_SHIFT);
8358 }
8359 /* Wait more 200us for a bug of the ready bit in the MDIC register */
8360 delay(200);
8361 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
8362 delay(200);
8363
8364 wm_put_swfw_semaphore(sc, sem);
8365 return rv;
8366 }
8367
8368 /*
8369 * wm_gmii_i80003_writereg: [mii interface function]
8370 *
8371 * Write a PHY register on the kumeran.
8372 * This could be handled by the PHY layer if we didn't have to lock the
8373 * ressource ...
8374 */
8375 static void
8376 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
8377 {
8378 struct wm_softc *sc = device_private(self);
8379 int sem;
8380
8381 if (phy != 1) /* only one PHY on kumeran bus */
8382 return;
8383
8384 sem = swfwphysem[sc->sc_funcid];
8385 if (wm_get_swfw_semaphore(sc, sem)) {
8386 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8387 __func__);
8388 return;
8389 }
8390
8391 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
8392 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8393 reg >> GG82563_PAGE_SHIFT);
8394 } else {
8395 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8396 reg >> GG82563_PAGE_SHIFT);
8397 }
8398 /* Wait more 200us for a bug of the ready bit in the MDIC register */
8399 delay(200);
8400 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
8401 delay(200);
8402
8403 wm_put_swfw_semaphore(sc, sem);
8404 }
8405
8406 /*
8407 * wm_gmii_bm_readreg: [mii interface function]
8408 *
8409 * Read a PHY register on the kumeran
8410 * This could be handled by the PHY layer if we didn't have to lock the
8411 * ressource ...
8412 */
8413 static int
8414 wm_gmii_bm_readreg(device_t self, int phy, int reg)
8415 {
8416 struct wm_softc *sc = device_private(self);
8417 int sem;
8418 int rv;
8419
8420 sem = swfwphysem[sc->sc_funcid];
8421 if (wm_get_swfw_semaphore(sc, sem)) {
8422 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8423 __func__);
8424 return 0;
8425 }
8426
8427 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8428 if (phy == 1)
8429 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
8430 reg);
8431 else
8432 wm_gmii_i82544_writereg(self, phy,
8433 GG82563_PHY_PAGE_SELECT,
8434 reg >> GG82563_PAGE_SHIFT);
8435 }
8436
8437 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
8438 wm_put_swfw_semaphore(sc, sem);
8439 return rv;
8440 }
8441
8442 /*
8443 * wm_gmii_bm_writereg: [mii interface function]
8444 *
8445 * Write a PHY register on the kumeran.
8446 * This could be handled by the PHY layer if we didn't have to lock the
8447 * ressource ...
8448 */
8449 static void
8450 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
8451 {
8452 struct wm_softc *sc = device_private(self);
8453 int sem;
8454
8455 sem = swfwphysem[sc->sc_funcid];
8456 if (wm_get_swfw_semaphore(sc, sem)) {
8457 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8458 __func__);
8459 return;
8460 }
8461
8462 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8463 if (phy == 1)
8464 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
8465 reg);
8466 else
8467 wm_gmii_i82544_writereg(self, phy,
8468 GG82563_PHY_PAGE_SELECT,
8469 reg >> GG82563_PAGE_SHIFT);
8470 }
8471
8472 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
8473 wm_put_swfw_semaphore(sc, sem);
8474 }
8475
8476 static void
8477 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
8478 {
8479 struct wm_softc *sc = device_private(self);
8480 uint16_t regnum = BM_PHY_REG_NUM(offset);
8481 uint16_t wuce;
8482
8483 /* XXX Gig must be disabled for MDIO accesses to page 800 */
8484 if (sc->sc_type == WM_T_PCH) {
8485 /* XXX e1000 driver do nothing... why? */
8486 }
8487
8488 /* Set page 769 */
8489 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8490 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
8491
8492 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
8493
8494 wuce &= ~BM_WUC_HOST_WU_BIT;
8495 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
8496 wuce | BM_WUC_ENABLE_BIT);
8497
8498 /* Select page 800 */
8499 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8500 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
8501
8502 /* Write page 800 */
8503 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
8504
8505 if (rd)
8506 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
8507 else
8508 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
8509
8510 /* Set page 769 */
8511 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8512 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
8513
8514 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
8515 }
8516
8517 /*
8518 * wm_gmii_hv_readreg: [mii interface function]
8519 *
8520 * Read a PHY register on the kumeran
8521 * This could be handled by the PHY layer if we didn't have to lock the
8522 * ressource ...
8523 */
8524 static int
8525 wm_gmii_hv_readreg(device_t self, int phy, int reg)
8526 {
8527 struct wm_softc *sc = device_private(self);
8528 uint16_t page = BM_PHY_REG_PAGE(reg);
8529 uint16_t regnum = BM_PHY_REG_NUM(reg);
8530 uint16_t val;
8531 int rv;
8532
8533 if (wm_get_swfwhw_semaphore(sc)) {
8534 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8535 __func__);
8536 return 0;
8537 }
8538
8539 /* XXX Workaround failure in MDIO access while cable is disconnected */
8540 if (sc->sc_phytype == WMPHY_82577) {
8541 /* XXX must write */
8542 }
8543
8544 /* Page 800 works differently than the rest so it has its own func */
8545 if (page == BM_WUC_PAGE) {
8546 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
8547 return val;
8548 }
8549
8550 /*
8551 * Lower than page 768 works differently than the rest so it has its
8552 * own func
8553 */
8554 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
8555 printf("gmii_hv_readreg!!!\n");
8556 return 0;
8557 }
8558
8559 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
8560 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8561 page << BME1000_PAGE_SHIFT);
8562 }
8563
8564 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
8565 wm_put_swfwhw_semaphore(sc);
8566 return rv;
8567 }
8568
8569 /*
8570 * wm_gmii_hv_writereg: [mii interface function]
8571 *
8572 * Write a PHY register on the kumeran.
8573 * This could be handled by the PHY layer if we didn't have to lock the
8574 * ressource ...
8575 */
8576 static void
8577 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
8578 {
8579 struct wm_softc *sc = device_private(self);
8580 uint16_t page = BM_PHY_REG_PAGE(reg);
8581 uint16_t regnum = BM_PHY_REG_NUM(reg);
8582
8583 if (wm_get_swfwhw_semaphore(sc)) {
8584 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8585 __func__);
8586 return;
8587 }
8588
8589 /* XXX Workaround failure in MDIO access while cable is disconnected */
8590
8591 /* Page 800 works differently than the rest so it has its own func */
8592 if (page == BM_WUC_PAGE) {
8593 uint16_t tmp;
8594
8595 tmp = val;
8596 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
8597 return;
8598 }
8599
8600 /*
8601 * Lower than page 768 works differently than the rest so it has its
8602 * own func
8603 */
8604 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
8605 printf("gmii_hv_writereg!!!\n");
8606 return;
8607 }
8608
8609 /*
8610 * XXX Workaround MDIO accesses being disabled after entering IEEE
8611 * Power Down (whenever bit 11 of the PHY control register is set)
8612 */
8613
8614 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
8615 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8616 page << BME1000_PAGE_SHIFT);
8617 }
8618
8619 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
8620 wm_put_swfwhw_semaphore(sc);
8621 }
8622
8623 /*
8624 * wm_gmii_82580_readreg: [mii interface function]
8625 *
8626 * Read a PHY register on the 82580 and I350.
8627 * This could be handled by the PHY layer if we didn't have to lock the
8628 * ressource ...
8629 */
8630 static int
8631 wm_gmii_82580_readreg(device_t self, int phy, int reg)
8632 {
8633 struct wm_softc *sc = device_private(self);
8634 int sem;
8635 int rv;
8636
8637 sem = swfwphysem[sc->sc_funcid];
8638 if (wm_get_swfw_semaphore(sc, sem)) {
8639 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8640 __func__);
8641 return 0;
8642 }
8643
8644 rv = wm_gmii_i82544_readreg(self, phy, reg);
8645
8646 wm_put_swfw_semaphore(sc, sem);
8647 return rv;
8648 }
8649
8650 /*
8651 * wm_gmii_82580_writereg: [mii interface function]
8652 *
8653 * Write a PHY register on the 82580 and I350.
8654 * This could be handled by the PHY layer if we didn't have to lock the
8655 * ressource ...
8656 */
8657 static void
8658 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
8659 {
8660 struct wm_softc *sc = device_private(self);
8661 int sem;
8662
8663 sem = swfwphysem[sc->sc_funcid];
8664 if (wm_get_swfw_semaphore(sc, sem)) {
8665 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8666 __func__);
8667 return;
8668 }
8669
8670 wm_gmii_i82544_writereg(self, phy, reg, val);
8671
8672 wm_put_swfw_semaphore(sc, sem);
8673 }
8674
8675 /*
8676 * wm_gmii_gs40g_readreg: [mii interface function]
8677 *
8678 * Read a PHY register on the I2100 and I211.
8679 * This could be handled by the PHY layer if we didn't have to lock the
8680 * ressource ...
8681 */
8682 static int
8683 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
8684 {
8685 struct wm_softc *sc = device_private(self);
8686 int sem;
8687 int page, offset;
8688 int rv;
8689
8690 /* Acquire semaphore */
8691 sem = swfwphysem[sc->sc_funcid];
8692 if (wm_get_swfw_semaphore(sc, sem)) {
8693 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8694 __func__);
8695 return 0;
8696 }
8697
8698 /* Page select */
8699 page = reg >> GS40G_PAGE_SHIFT;
8700 wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
8701
8702 /* Read reg */
8703 offset = reg & GS40G_OFFSET_MASK;
8704 rv = wm_gmii_i82544_readreg(self, phy, offset);
8705
8706 wm_put_swfw_semaphore(sc, sem);
8707 return rv;
8708 }
8709
8710 /*
8711 * wm_gmii_gs40g_writereg: [mii interface function]
8712 *
8713 * Write a PHY register on the I210 and I211.
8714 * This could be handled by the PHY layer if we didn't have to lock the
8715 * ressource ...
8716 */
8717 static void
8718 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
8719 {
8720 struct wm_softc *sc = device_private(self);
8721 int sem;
8722 int page, offset;
8723
8724 /* Acquire semaphore */
8725 sem = swfwphysem[sc->sc_funcid];
8726 if (wm_get_swfw_semaphore(sc, sem)) {
8727 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8728 __func__);
8729 return;
8730 }
8731
8732 /* Page select */
8733 page = reg >> GS40G_PAGE_SHIFT;
8734 wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
8735
8736 /* Write reg */
8737 offset = reg & GS40G_OFFSET_MASK;
8738 wm_gmii_i82544_writereg(self, phy, offset, val);
8739
8740 /* Release semaphore */
8741 wm_put_swfw_semaphore(sc, sem);
8742 }
8743
8744 /*
8745 * wm_gmii_statchg: [mii interface function]
8746 *
8747 * Callback from MII layer when media changes.
8748 */
8749 static void
8750 wm_gmii_statchg(struct ifnet *ifp)
8751 {
8752 struct wm_softc *sc = ifp->if_softc;
8753 struct mii_data *mii = &sc->sc_mii;
8754
8755 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
8756 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
8757 sc->sc_fcrtl &= ~FCRTL_XONE;
8758
8759 /*
8760 * Get flow control negotiation result.
8761 */
8762 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
8763 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
8764 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
8765 mii->mii_media_active &= ~IFM_ETH_FMASK;
8766 }
8767
8768 if (sc->sc_flowflags & IFM_FLOW) {
8769 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
8770 sc->sc_ctrl |= CTRL_TFCE;
8771 sc->sc_fcrtl |= FCRTL_XONE;
8772 }
8773 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
8774 sc->sc_ctrl |= CTRL_RFCE;
8775 }
8776
8777 if (sc->sc_mii.mii_media_active & IFM_FDX) {
8778 DPRINTF(WM_DEBUG_LINK,
8779 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
8780 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
8781 } else {
8782 DPRINTF(WM_DEBUG_LINK,
8783 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
8784 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
8785 }
8786
8787 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8788 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
8789 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
8790 : WMREG_FCRTL, sc->sc_fcrtl);
8791 if (sc->sc_type == WM_T_80003) {
8792 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
8793 case IFM_1000_T:
8794 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
8795 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
8796 sc->sc_tipg = TIPG_1000T_80003_DFLT;
8797 break;
8798 default:
8799 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
8800 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
8801 sc->sc_tipg = TIPG_10_100_80003_DFLT;
8802 break;
8803 }
8804 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
8805 }
8806 }
8807
8808 /*
8809 * wm_kmrn_readreg:
8810 *
8811 * Read a kumeran register
8812 */
8813 static int
8814 wm_kmrn_readreg(struct wm_softc *sc, int reg)
8815 {
8816 int rv;
8817
8818 if (sc->sc_flags & WM_F_LOCK_SWFW) {
8819 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
8820 aprint_error_dev(sc->sc_dev,
8821 "%s: failed to get semaphore\n", __func__);
8822 return 0;
8823 }
8824 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8825 if (wm_get_swfwhw_semaphore(sc)) {
8826 aprint_error_dev(sc->sc_dev,
8827 "%s: failed to get semaphore\n", __func__);
8828 return 0;
8829 }
8830 }
8831
8832 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
8833 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
8834 KUMCTRLSTA_REN);
8835 CSR_WRITE_FLUSH(sc);
8836 delay(2);
8837
8838 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
8839
8840 if (sc->sc_flags & WM_F_LOCK_SWFW)
8841 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
8842 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8843 wm_put_swfwhw_semaphore(sc);
8844
8845 return rv;
8846 }
8847
8848 /*
8849 * wm_kmrn_writereg:
8850 *
8851 * Write a kumeran register
8852 */
8853 static void
8854 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
8855 {
8856
8857 if (sc->sc_flags & WM_F_LOCK_SWFW) {
8858 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
8859 aprint_error_dev(sc->sc_dev,
8860 "%s: failed to get semaphore\n", __func__);
8861 return;
8862 }
8863 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8864 if (wm_get_swfwhw_semaphore(sc)) {
8865 aprint_error_dev(sc->sc_dev,
8866 "%s: failed to get semaphore\n", __func__);
8867 return;
8868 }
8869 }
8870
8871 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
8872 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
8873 (val & KUMCTRLSTA_MASK));
8874
8875 if (sc->sc_flags & WM_F_LOCK_SWFW)
8876 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
8877 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8878 wm_put_swfwhw_semaphore(sc);
8879 }
8880
8881 /* SGMII related */
8882
8883 /*
8884 * wm_sgmii_uses_mdio
8885 *
8886 * Check whether the transaction is to the internal PHY or the external
8887 * MDIO interface. Return true if it's MDIO.
8888 */
8889 static bool
8890 wm_sgmii_uses_mdio(struct wm_softc *sc)
8891 {
8892 uint32_t reg;
8893 bool ismdio = false;
8894
8895 switch (sc->sc_type) {
8896 case WM_T_82575:
8897 case WM_T_82576:
8898 reg = CSR_READ(sc, WMREG_MDIC);
8899 ismdio = ((reg & MDIC_DEST) != 0);
8900 break;
8901 case WM_T_82580:
8902 case WM_T_I350:
8903 case WM_T_I354:
8904 case WM_T_I210:
8905 case WM_T_I211:
8906 reg = CSR_READ(sc, WMREG_MDICNFG);
8907 ismdio = ((reg & MDICNFG_DEST) != 0);
8908 break;
8909 default:
8910 break;
8911 }
8912
8913 return ismdio;
8914 }
8915
8916 /*
8917 * wm_sgmii_readreg: [mii interface function]
8918 *
8919 * Read a PHY register on the SGMII
8920 * This could be handled by the PHY layer if we didn't have to lock the
8921 * ressource ...
8922 */
8923 static int
8924 wm_sgmii_readreg(device_t self, int phy, int reg)
8925 {
8926 struct wm_softc *sc = device_private(self);
8927 uint32_t i2ccmd;
8928 int i, rv;
8929
8930 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
8931 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8932 __func__);
8933 return 0;
8934 }
8935
8936 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
8937 | (phy << I2CCMD_PHY_ADDR_SHIFT)
8938 | I2CCMD_OPCODE_READ;
8939 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
8940
8941 /* Poll the ready bit */
8942 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
8943 delay(50);
8944 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
8945 if (i2ccmd & I2CCMD_READY)
8946 break;
8947 }
8948 if ((i2ccmd & I2CCMD_READY) == 0)
8949 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
8950 if ((i2ccmd & I2CCMD_ERROR) != 0)
8951 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
8952
8953 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
8954
8955 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
8956 return rv;
8957 }
8958
8959 /*
8960 * wm_sgmii_writereg: [mii interface function]
8961 *
8962 * Write a PHY register on the SGMII.
8963 * This could be handled by the PHY layer if we didn't have to lock the
8964 * ressource ...
8965 */
8966 static void
8967 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
8968 {
8969 struct wm_softc *sc = device_private(self);
8970 uint32_t i2ccmd;
8971 int i;
8972 int val_swapped;
8973
8974 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
8975 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8976 __func__);
8977 return;
8978 }
8979 /* Swap the data bytes for the I2C interface */
8980 val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
8981 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
8982 | (phy << I2CCMD_PHY_ADDR_SHIFT)
8983 | I2CCMD_OPCODE_WRITE | val_swapped;
8984 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
8985
8986 /* Poll the ready bit */
8987 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
8988 delay(50);
8989 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
8990 if (i2ccmd & I2CCMD_READY)
8991 break;
8992 }
8993 if ((i2ccmd & I2CCMD_READY) == 0)
8994 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
8995 if ((i2ccmd & I2CCMD_ERROR) != 0)
8996 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
8997
8998 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
8999 }
9000
9001 /* TBI related */
9002
9003 /*
9004 * wm_tbi_mediainit:
9005 *
9006 * Initialize media for use on 1000BASE-X devices.
9007 */
9008 static void
9009 wm_tbi_mediainit(struct wm_softc *sc)
9010 {
9011 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9012 const char *sep = "";
9013
9014 if (sc->sc_type < WM_T_82543)
9015 sc->sc_tipg = TIPG_WM_DFLT;
9016 else
9017 sc->sc_tipg = TIPG_LG_DFLT;
9018
9019 sc->sc_tbi_serdes_anegticks = 5;
9020
9021 /* Initialize our media structures */
9022 sc->sc_mii.mii_ifp = ifp;
9023 sc->sc_ethercom.ec_mii = &sc->sc_mii;
9024
9025 if ((sc->sc_type >= WM_T_82575)
9026 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
9027 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9028 wm_serdes_mediachange, wm_serdes_mediastatus);
9029 else
9030 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9031 wm_tbi_mediachange, wm_tbi_mediastatus);
9032
9033 /*
9034 * SWD Pins:
9035 *
9036 * 0 = Link LED (output)
9037 * 1 = Loss Of Signal (input)
9038 */
9039 sc->sc_ctrl |= CTRL_SWDPIO(0);
9040
9041 /* XXX Perhaps this is only for TBI */
9042 if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9043 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
9044
9045 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
9046 sc->sc_ctrl &= ~CTRL_LRST;
9047
9048 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9049
9050 #define ADD(ss, mm, dd) \
9051 do { \
9052 aprint_normal("%s%s", sep, ss); \
9053 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
9054 sep = ", "; \
9055 } while (/*CONSTCOND*/0)
9056
9057 aprint_normal_dev(sc->sc_dev, "");
9058
9059 /* Only 82545 is LX */
9060 if (sc->sc_type == WM_T_82545) {
9061 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
9062 ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
9063 } else {
9064 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
9065 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
9066 }
9067 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
9068 aprint_normal("\n");
9069
9070 #undef ADD
9071
9072 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
9073 }
9074
9075 /*
9076 * wm_tbi_mediachange: [ifmedia interface function]
9077 *
9078 * Set hardware to newly-selected media on a 1000BASE-X device.
9079 */
9080 static int
9081 wm_tbi_mediachange(struct ifnet *ifp)
9082 {
9083 struct wm_softc *sc = ifp->if_softc;
9084 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9085 uint32_t status;
9086 int i;
9087
9088 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9089 /* XXX need some work for >= 82571 and < 82575 */
9090 if (sc->sc_type < WM_T_82575)
9091 return 0;
9092 }
9093
9094 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9095 || (sc->sc_type >= WM_T_82575))
9096 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9097
9098 sc->sc_ctrl &= ~CTRL_LRST;
9099 sc->sc_txcw = TXCW_ANE;
9100 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9101 sc->sc_txcw |= TXCW_FD | TXCW_HD;
9102 else if (ife->ifm_media & IFM_FDX)
9103 sc->sc_txcw |= TXCW_FD;
9104 else
9105 sc->sc_txcw |= TXCW_HD;
9106
9107 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
9108 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
9109
9110 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
9111 device_xname(sc->sc_dev), sc->sc_txcw));
9112 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9113 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9114 CSR_WRITE_FLUSH(sc);
9115 delay(1000);
9116
9117 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
9118 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
9119
9120 /*
9121 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
9122 * optics detect a signal, 0 if they don't.
9123 */
9124 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
9125 /* Have signal; wait for the link to come up. */
9126 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
9127 delay(10000);
9128 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
9129 break;
9130 }
9131
9132 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
9133 device_xname(sc->sc_dev),i));
9134
9135 status = CSR_READ(sc, WMREG_STATUS);
9136 DPRINTF(WM_DEBUG_LINK,
9137 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
9138 device_xname(sc->sc_dev),status, STATUS_LU));
9139 if (status & STATUS_LU) {
9140 /* Link is up. */
9141 DPRINTF(WM_DEBUG_LINK,
9142 ("%s: LINK: set media -> link up %s\n",
9143 device_xname(sc->sc_dev),
9144 (status & STATUS_FD) ? "FDX" : "HDX"));
9145
9146 /*
9147 * NOTE: CTRL will update TFCE and RFCE automatically,
9148 * so we should update sc->sc_ctrl
9149 */
9150 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
9151 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9152 sc->sc_fcrtl &= ~FCRTL_XONE;
9153 if (status & STATUS_FD)
9154 sc->sc_tctl |=
9155 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9156 else
9157 sc->sc_tctl |=
9158 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9159 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
9160 sc->sc_fcrtl |= FCRTL_XONE;
9161 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9162 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
9163 WMREG_OLD_FCRTL : WMREG_FCRTL,
9164 sc->sc_fcrtl);
9165 sc->sc_tbi_linkup = 1;
9166 } else {
9167 if (i == WM_LINKUP_TIMEOUT)
9168 wm_check_for_link(sc);
9169 /* Link is down. */
9170 DPRINTF(WM_DEBUG_LINK,
9171 ("%s: LINK: set media -> link down\n",
9172 device_xname(sc->sc_dev)));
9173 sc->sc_tbi_linkup = 0;
9174 }
9175 } else {
9176 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
9177 device_xname(sc->sc_dev)));
9178 sc->sc_tbi_linkup = 0;
9179 }
9180
9181 wm_tbi_serdes_set_linkled(sc);
9182
9183 return 0;
9184 }
9185
9186 /*
9187 * wm_tbi_mediastatus: [ifmedia interface function]
9188 *
9189 * Get the current interface media status on a 1000BASE-X device.
9190 */
9191 static void
9192 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9193 {
9194 struct wm_softc *sc = ifp->if_softc;
9195 uint32_t ctrl, status;
9196
9197 ifmr->ifm_status = IFM_AVALID;
9198 ifmr->ifm_active = IFM_ETHER;
9199
9200 status = CSR_READ(sc, WMREG_STATUS);
9201 if ((status & STATUS_LU) == 0) {
9202 ifmr->ifm_active |= IFM_NONE;
9203 return;
9204 }
9205
9206 ifmr->ifm_status |= IFM_ACTIVE;
9207 /* Only 82545 is LX */
9208 if (sc->sc_type == WM_T_82545)
9209 ifmr->ifm_active |= IFM_1000_LX;
9210 else
9211 ifmr->ifm_active |= IFM_1000_SX;
9212 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
9213 ifmr->ifm_active |= IFM_FDX;
9214 else
9215 ifmr->ifm_active |= IFM_HDX;
9216 ctrl = CSR_READ(sc, WMREG_CTRL);
9217 if (ctrl & CTRL_RFCE)
9218 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
9219 if (ctrl & CTRL_TFCE)
9220 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
9221 }
9222
9223 /* XXX TBI only */
9224 static int
9225 wm_check_for_link(struct wm_softc *sc)
9226 {
9227 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9228 uint32_t rxcw;
9229 uint32_t ctrl;
9230 uint32_t status;
9231 uint32_t sig;
9232
9233 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9234 /* XXX need some work for >= 82571 */
9235 if (sc->sc_type >= WM_T_82571) {
9236 sc->sc_tbi_linkup = 1;
9237 return 0;
9238 }
9239 }
9240
9241 rxcw = CSR_READ(sc, WMREG_RXCW);
9242 ctrl = CSR_READ(sc, WMREG_CTRL);
9243 status = CSR_READ(sc, WMREG_STATUS);
9244
9245 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
9246
9247 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
9248 device_xname(sc->sc_dev), __func__,
9249 ((ctrl & CTRL_SWDPIN(1)) == sig),
9250 ((status & STATUS_LU) != 0),
9251 ((rxcw & RXCW_C) != 0)
9252 ));
9253
9254 /*
9255 * SWDPIN LU RXCW
9256 * 0 0 0
9257 * 0 0 1 (should not happen)
9258 * 0 1 0 (should not happen)
9259 * 0 1 1 (should not happen)
9260 * 1 0 0 Disable autonego and force linkup
9261 * 1 0 1 got /C/ but not linkup yet
9262 * 1 1 0 (linkup)
9263 * 1 1 1 If IFM_AUTO, back to autonego
9264 *
9265 */
9266 if (((ctrl & CTRL_SWDPIN(1)) == sig)
9267 && ((status & STATUS_LU) == 0)
9268 && ((rxcw & RXCW_C) == 0)) {
9269 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
9270 __func__));
9271 sc->sc_tbi_linkup = 0;
9272 /* Disable auto-negotiation in the TXCW register */
9273 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
9274
9275 /*
9276 * Force link-up and also force full-duplex.
9277 *
9278 * NOTE: CTRL was updated TFCE and RFCE automatically,
9279 * so we should update sc->sc_ctrl
9280 */
9281 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
9282 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9283 } else if (((status & STATUS_LU) != 0)
9284 && ((rxcw & RXCW_C) != 0)
9285 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
9286 sc->sc_tbi_linkup = 1;
9287 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
9288 __func__));
9289 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9290 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
9291 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
9292 && ((rxcw & RXCW_C) != 0)) {
9293 DPRINTF(WM_DEBUG_LINK, ("/C/"));
9294 } else {
9295 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
9296 status));
9297 }
9298
9299 return 0;
9300 }
9301
9302 /*
9303 * wm_tbi_tick:
9304 *
9305 * Check the link on TBI devices.
9306 * This function acts as mii_tick().
9307 */
9308 static void
9309 wm_tbi_tick(struct wm_softc *sc)
9310 {
9311 struct wm_txqueue *txq __diagused = &sc->sc_txq[0];
9312 struct mii_data *mii = &sc->sc_mii;
9313 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9314 uint32_t status;
9315
9316 KASSERT(WM_TX_LOCKED(txq));
9317
9318 status = CSR_READ(sc, WMREG_STATUS);
9319
9320 /* XXX is this needed? */
9321 (void)CSR_READ(sc, WMREG_RXCW);
9322 (void)CSR_READ(sc, WMREG_CTRL);
9323
9324 /* set link status */
9325 if ((status & STATUS_LU) == 0) {
9326 DPRINTF(WM_DEBUG_LINK,
9327 ("%s: LINK: checklink -> down\n",
9328 device_xname(sc->sc_dev)));
9329 sc->sc_tbi_linkup = 0;
9330 } else if (sc->sc_tbi_linkup == 0) {
9331 DPRINTF(WM_DEBUG_LINK,
9332 ("%s: LINK: checklink -> up %s\n",
9333 device_xname(sc->sc_dev),
9334 (status & STATUS_FD) ? "FDX" : "HDX"));
9335 sc->sc_tbi_linkup = 1;
9336 sc->sc_tbi_serdes_ticks = 0;
9337 }
9338
9339 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
9340 goto setled;
9341
9342 if ((status & STATUS_LU) == 0) {
9343 sc->sc_tbi_linkup = 0;
9344 /* If the timer expired, retry autonegotiation */
9345 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9346 && (++sc->sc_tbi_serdes_ticks
9347 >= sc->sc_tbi_serdes_anegticks)) {
9348 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9349 sc->sc_tbi_serdes_ticks = 0;
9350 /*
9351 * Reset the link, and let autonegotiation do
9352 * its thing
9353 */
9354 sc->sc_ctrl |= CTRL_LRST;
9355 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9356 CSR_WRITE_FLUSH(sc);
9357 delay(1000);
9358 sc->sc_ctrl &= ~CTRL_LRST;
9359 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9360 CSR_WRITE_FLUSH(sc);
9361 delay(1000);
9362 CSR_WRITE(sc, WMREG_TXCW,
9363 sc->sc_txcw & ~TXCW_ANE);
9364 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9365 }
9366 }
9367
9368 setled:
9369 wm_tbi_serdes_set_linkled(sc);
9370 }
9371
9372 /* SERDES related */
9373 static void
9374 wm_serdes_power_up_link_82575(struct wm_softc *sc)
9375 {
9376 uint32_t reg;
9377
9378 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9379 && ((sc->sc_flags & WM_F_SGMII) == 0))
9380 return;
9381
9382 reg = CSR_READ(sc, WMREG_PCS_CFG);
9383 reg |= PCS_CFG_PCS_EN;
9384 CSR_WRITE(sc, WMREG_PCS_CFG, reg);
9385
9386 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9387 reg &= ~CTRL_EXT_SWDPIN(3);
9388 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9389 CSR_WRITE_FLUSH(sc);
9390 }
9391
9392 static int
9393 wm_serdes_mediachange(struct ifnet *ifp)
9394 {
9395 struct wm_softc *sc = ifp->if_softc;
9396 bool pcs_autoneg = true; /* XXX */
9397 uint32_t ctrl_ext, pcs_lctl, reg;
9398
9399 /* XXX Currently, this function is not called on 8257[12] */
9400 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9401 || (sc->sc_type >= WM_T_82575))
9402 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9403
9404 wm_serdes_power_up_link_82575(sc);
9405
9406 sc->sc_ctrl |= CTRL_SLU;
9407
9408 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
9409 sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
9410
9411 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9412 pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
9413 switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
9414 case CTRL_EXT_LINK_MODE_SGMII:
9415 pcs_autoneg = true;
9416 pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
9417 break;
9418 case CTRL_EXT_LINK_MODE_1000KX:
9419 pcs_autoneg = false;
9420 /* FALLTHROUGH */
9421 default:
9422 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)){
9423 if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
9424 pcs_autoneg = false;
9425 }
9426 sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
9427 | CTRL_FRCFDX;
9428 pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
9429 }
9430 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9431
9432 if (pcs_autoneg) {
9433 pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
9434 pcs_lctl &= ~PCS_LCTL_FORCE_FC;
9435
9436 reg = CSR_READ(sc, WMREG_PCS_ANADV);
9437 reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
9438 reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
9439 CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
9440 } else
9441 pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
9442
9443 CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
9444
9445
9446 return 0;
9447 }
9448
9449 static void
9450 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9451 {
9452 struct wm_softc *sc = ifp->if_softc;
9453 struct mii_data *mii = &sc->sc_mii;
9454 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9455 uint32_t pcs_adv, pcs_lpab, reg;
9456
9457 ifmr->ifm_status = IFM_AVALID;
9458 ifmr->ifm_active = IFM_ETHER;
9459
9460 /* Check PCS */
9461 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9462 if ((reg & PCS_LSTS_LINKOK) == 0) {
9463 ifmr->ifm_active |= IFM_NONE;
9464 sc->sc_tbi_linkup = 0;
9465 goto setled;
9466 }
9467
9468 sc->sc_tbi_linkup = 1;
9469 ifmr->ifm_status |= IFM_ACTIVE;
9470 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
9471 if ((reg & PCS_LSTS_FDX) != 0)
9472 ifmr->ifm_active |= IFM_FDX;
9473 else
9474 ifmr->ifm_active |= IFM_HDX;
9475 mii->mii_media_active &= ~IFM_ETH_FMASK;
9476 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
9477 /* Check flow */
9478 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9479 if ((reg & PCS_LSTS_AN_COMP) == 0) {
9480 printf("XXX LINKOK but not ACOMP\n");
9481 goto setled;
9482 }
9483 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
9484 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
9485 printf("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab);
9486 if ((pcs_adv & TXCW_SYM_PAUSE)
9487 && (pcs_lpab & TXCW_SYM_PAUSE)) {
9488 mii->mii_media_active |= IFM_FLOW
9489 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
9490 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
9491 && (pcs_adv & TXCW_ASYM_PAUSE)
9492 && (pcs_lpab & TXCW_SYM_PAUSE)
9493 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
9494 mii->mii_media_active |= IFM_FLOW
9495 | IFM_ETH_TXPAUSE;
9496 } else if ((pcs_adv & TXCW_SYM_PAUSE)
9497 && (pcs_adv & TXCW_ASYM_PAUSE)
9498 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
9499 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
9500 mii->mii_media_active |= IFM_FLOW
9501 | IFM_ETH_RXPAUSE;
9502 } else {
9503 }
9504 }
9505 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
9506 | (mii->mii_media_active & IFM_ETH_FMASK);
9507 setled:
9508 wm_tbi_serdes_set_linkled(sc);
9509 }
9510
9511 /*
9512 * wm_serdes_tick:
9513 *
9514 * Check the link on serdes devices.
9515 */
9516 static void
9517 wm_serdes_tick(struct wm_softc *sc)
9518 {
9519 struct wm_txqueue *txq __diagused = &sc->sc_txq[0];
9520 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9521 struct mii_data *mii = &sc->sc_mii;
9522 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9523 uint32_t reg;
9524
9525 KASSERT(WM_TX_LOCKED(txq));
9526
9527 mii->mii_media_status = IFM_AVALID;
9528 mii->mii_media_active = IFM_ETHER;
9529
9530 /* Check PCS */
9531 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9532 if ((reg & PCS_LSTS_LINKOK) != 0) {
9533 mii->mii_media_status |= IFM_ACTIVE;
9534 sc->sc_tbi_linkup = 1;
9535 sc->sc_tbi_serdes_ticks = 0;
9536 mii->mii_media_active |= IFM_1000_SX; /* XXX */
9537 if ((reg & PCS_LSTS_FDX) != 0)
9538 mii->mii_media_active |= IFM_FDX;
9539 else
9540 mii->mii_media_active |= IFM_HDX;
9541 } else {
9542 mii->mii_media_status |= IFM_NONE;
9543 sc->sc_tbi_linkup = 0;
9544 /* If the timer expired, retry autonegotiation */
9545 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9546 && (++sc->sc_tbi_serdes_ticks
9547 >= sc->sc_tbi_serdes_anegticks)) {
9548 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9549 sc->sc_tbi_serdes_ticks = 0;
9550 /* XXX */
9551 wm_serdes_mediachange(ifp);
9552 }
9553 }
9554
9555 wm_tbi_serdes_set_linkled(sc);
9556 }
9557
9558 /* SFP related */
9559
9560 static int
9561 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
9562 {
9563 uint32_t i2ccmd;
9564 int i;
9565
9566 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
9567 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9568
9569 /* Poll the ready bit */
9570 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9571 delay(50);
9572 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9573 if (i2ccmd & I2CCMD_READY)
9574 break;
9575 }
9576 if ((i2ccmd & I2CCMD_READY) == 0)
9577 return -1;
9578 if ((i2ccmd & I2CCMD_ERROR) != 0)
9579 return -1;
9580
9581 *data = i2ccmd & 0x00ff;
9582
9583 return 0;
9584 }
9585
9586 static uint32_t
9587 wm_sfp_get_media_type(struct wm_softc *sc)
9588 {
9589 uint32_t ctrl_ext;
9590 uint8_t val = 0;
9591 int timeout = 3;
9592 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
9593 int rv = -1;
9594
9595 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9596 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
9597 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
9598 CSR_WRITE_FLUSH(sc);
9599
9600 /* Read SFP module data */
9601 while (timeout) {
9602 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
9603 if (rv == 0)
9604 break;
9605 delay(100*1000); /* XXX too big */
9606 timeout--;
9607 }
9608 if (rv != 0)
9609 goto out;
9610 switch (val) {
9611 case SFF_SFP_ID_SFF:
9612 aprint_normal_dev(sc->sc_dev,
9613 "Module/Connector soldered to board\n");
9614 break;
9615 case SFF_SFP_ID_SFP:
9616 aprint_normal_dev(sc->sc_dev, "SFP\n");
9617 break;
9618 case SFF_SFP_ID_UNKNOWN:
9619 goto out;
9620 default:
9621 break;
9622 }
9623
9624 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
9625 if (rv != 0) {
9626 goto out;
9627 }
9628
9629 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
9630 mediatype = WM_MEDIATYPE_SERDES;
9631 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
9632 sc->sc_flags |= WM_F_SGMII;
9633 mediatype = WM_MEDIATYPE_COPPER;
9634 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
9635 sc->sc_flags |= WM_F_SGMII;
9636 mediatype = WM_MEDIATYPE_SERDES;
9637 }
9638
9639 out:
9640 /* Restore I2C interface setting */
9641 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9642
9643 return mediatype;
9644 }
9645 /*
9646 * NVM related.
9647 * Microwire, SPI (w/wo EERD) and Flash.
9648 */
9649
9650 /* Both spi and uwire */
9651
9652 /*
9653 * wm_eeprom_sendbits:
9654 *
9655 * Send a series of bits to the EEPROM.
9656 */
9657 static void
9658 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
9659 {
9660 uint32_t reg;
9661 int x;
9662
9663 reg = CSR_READ(sc, WMREG_EECD);
9664
9665 for (x = nbits; x > 0; x--) {
9666 if (bits & (1U << (x - 1)))
9667 reg |= EECD_DI;
9668 else
9669 reg &= ~EECD_DI;
9670 CSR_WRITE(sc, WMREG_EECD, reg);
9671 CSR_WRITE_FLUSH(sc);
9672 delay(2);
9673 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
9674 CSR_WRITE_FLUSH(sc);
9675 delay(2);
9676 CSR_WRITE(sc, WMREG_EECD, reg);
9677 CSR_WRITE_FLUSH(sc);
9678 delay(2);
9679 }
9680 }
9681
9682 /*
9683 * wm_eeprom_recvbits:
9684 *
9685 * Receive a series of bits from the EEPROM.
9686 */
9687 static void
9688 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
9689 {
9690 uint32_t reg, val;
9691 int x;
9692
9693 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
9694
9695 val = 0;
9696 for (x = nbits; x > 0; x--) {
9697 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
9698 CSR_WRITE_FLUSH(sc);
9699 delay(2);
9700 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
9701 val |= (1U << (x - 1));
9702 CSR_WRITE(sc, WMREG_EECD, reg);
9703 CSR_WRITE_FLUSH(sc);
9704 delay(2);
9705 }
9706 *valp = val;
9707 }
9708
9709 /* Microwire */
9710
9711 /*
9712 * wm_nvm_read_uwire:
9713 *
9714 * Read a word from the EEPROM using the MicroWire protocol.
9715 */
9716 static int
9717 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9718 {
9719 uint32_t reg, val;
9720 int i;
9721
9722 for (i = 0; i < wordcnt; i++) {
9723 /* Clear SK and DI. */
9724 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
9725 CSR_WRITE(sc, WMREG_EECD, reg);
9726
9727 /*
9728 * XXX: workaround for a bug in qemu-0.12.x and prior
9729 * and Xen.
9730 *
9731 * We use this workaround only for 82540 because qemu's
9732 * e1000 act as 82540.
9733 */
9734 if (sc->sc_type == WM_T_82540) {
9735 reg |= EECD_SK;
9736 CSR_WRITE(sc, WMREG_EECD, reg);
9737 reg &= ~EECD_SK;
9738 CSR_WRITE(sc, WMREG_EECD, reg);
9739 CSR_WRITE_FLUSH(sc);
9740 delay(2);
9741 }
9742 /* XXX: end of workaround */
9743
9744 /* Set CHIP SELECT. */
9745 reg |= EECD_CS;
9746 CSR_WRITE(sc, WMREG_EECD, reg);
9747 CSR_WRITE_FLUSH(sc);
9748 delay(2);
9749
9750 /* Shift in the READ command. */
9751 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
9752
9753 /* Shift in address. */
9754 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
9755
9756 /* Shift out the data. */
9757 wm_eeprom_recvbits(sc, &val, 16);
9758 data[i] = val & 0xffff;
9759
9760 /* Clear CHIP SELECT. */
9761 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
9762 CSR_WRITE(sc, WMREG_EECD, reg);
9763 CSR_WRITE_FLUSH(sc);
9764 delay(2);
9765 }
9766
9767 return 0;
9768 }
9769
9770 /* SPI */
9771
9772 /*
9773 * Set SPI and FLASH related information from the EECD register.
9774 * For 82541 and 82547, the word size is taken from EEPROM.
9775 */
9776 static int
9777 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
9778 {
9779 int size;
9780 uint32_t reg;
9781 uint16_t data;
9782
9783 reg = CSR_READ(sc, WMREG_EECD);
9784 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
9785
9786 /* Read the size of NVM from EECD by default */
9787 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
9788 switch (sc->sc_type) {
9789 case WM_T_82541:
9790 case WM_T_82541_2:
9791 case WM_T_82547:
9792 case WM_T_82547_2:
9793 /* Set dummy value to access EEPROM */
9794 sc->sc_nvm_wordsize = 64;
9795 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
9796 reg = data;
9797 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
9798 if (size == 0)
9799 size = 6; /* 64 word size */
9800 else
9801 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
9802 break;
9803 case WM_T_80003:
9804 case WM_T_82571:
9805 case WM_T_82572:
9806 case WM_T_82573: /* SPI case */
9807 case WM_T_82574: /* SPI case */
9808 case WM_T_82583: /* SPI case */
9809 size += NVM_WORD_SIZE_BASE_SHIFT;
9810 if (size > 14)
9811 size = 14;
9812 break;
9813 case WM_T_82575:
9814 case WM_T_82576:
9815 case WM_T_82580:
9816 case WM_T_I350:
9817 case WM_T_I354:
9818 case WM_T_I210:
9819 case WM_T_I211:
9820 size += NVM_WORD_SIZE_BASE_SHIFT;
9821 if (size > 15)
9822 size = 15;
9823 break;
9824 default:
9825 aprint_error_dev(sc->sc_dev,
9826 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
9827 return -1;
9828 break;
9829 }
9830
9831 sc->sc_nvm_wordsize = 1 << size;
9832
9833 return 0;
9834 }
9835
9836 /*
9837 * wm_nvm_ready_spi:
9838 *
9839 * Wait for a SPI EEPROM to be ready for commands.
9840 */
9841 static int
9842 wm_nvm_ready_spi(struct wm_softc *sc)
9843 {
9844 uint32_t val;
9845 int usec;
9846
9847 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
9848 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
9849 wm_eeprom_recvbits(sc, &val, 8);
9850 if ((val & SPI_SR_RDY) == 0)
9851 break;
9852 }
9853 if (usec >= SPI_MAX_RETRIES) {
9854 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
9855 return 1;
9856 }
9857 return 0;
9858 }
9859
9860 /*
9861 * wm_nvm_read_spi:
9862 *
9863 * Read a work from the EEPROM using the SPI protocol.
9864 */
9865 static int
9866 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9867 {
9868 uint32_t reg, val;
9869 int i;
9870 uint8_t opc;
9871
9872 /* Clear SK and CS. */
9873 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
9874 CSR_WRITE(sc, WMREG_EECD, reg);
9875 CSR_WRITE_FLUSH(sc);
9876 delay(2);
9877
9878 if (wm_nvm_ready_spi(sc))
9879 return 1;
9880
9881 /* Toggle CS to flush commands. */
9882 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
9883 CSR_WRITE_FLUSH(sc);
9884 delay(2);
9885 CSR_WRITE(sc, WMREG_EECD, reg);
9886 CSR_WRITE_FLUSH(sc);
9887 delay(2);
9888
9889 opc = SPI_OPC_READ;
9890 if (sc->sc_nvm_addrbits == 8 && word >= 128)
9891 opc |= SPI_OPC_A8;
9892
9893 wm_eeprom_sendbits(sc, opc, 8);
9894 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
9895
9896 for (i = 0; i < wordcnt; i++) {
9897 wm_eeprom_recvbits(sc, &val, 16);
9898 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
9899 }
9900
9901 /* Raise CS and clear SK. */
9902 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
9903 CSR_WRITE(sc, WMREG_EECD, reg);
9904 CSR_WRITE_FLUSH(sc);
9905 delay(2);
9906
9907 return 0;
9908 }
9909
9910 /* Using with EERD */
9911
9912 static int
9913 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
9914 {
9915 uint32_t attempts = 100000;
9916 uint32_t i, reg = 0;
9917 int32_t done = -1;
9918
9919 for (i = 0; i < attempts; i++) {
9920 reg = CSR_READ(sc, rw);
9921
9922 if (reg & EERD_DONE) {
9923 done = 0;
9924 break;
9925 }
9926 delay(5);
9927 }
9928
9929 return done;
9930 }
9931
9932 static int
9933 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
9934 uint16_t *data)
9935 {
9936 int i, eerd = 0;
9937 int error = 0;
9938
9939 for (i = 0; i < wordcnt; i++) {
9940 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
9941
9942 CSR_WRITE(sc, WMREG_EERD, eerd);
9943 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
9944 if (error != 0)
9945 break;
9946
9947 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
9948 }
9949
9950 return error;
9951 }
9952
9953 /* Flash */
9954
9955 static int
9956 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
9957 {
9958 uint32_t eecd;
9959 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
9960 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
9961 uint8_t sig_byte = 0;
9962
9963 switch (sc->sc_type) {
9964 case WM_T_ICH8:
9965 case WM_T_ICH9:
9966 eecd = CSR_READ(sc, WMREG_EECD);
9967 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
9968 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
9969 return 0;
9970 }
9971 /* FALLTHROUGH */
9972 default:
9973 /* Default to 0 */
9974 *bank = 0;
9975
9976 /* Check bank 0 */
9977 wm_read_ich8_byte(sc, act_offset, &sig_byte);
9978 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
9979 *bank = 0;
9980 return 0;
9981 }
9982
9983 /* Check bank 1 */
9984 wm_read_ich8_byte(sc, act_offset + bank1_offset,
9985 &sig_byte);
9986 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
9987 *bank = 1;
9988 return 0;
9989 }
9990 }
9991
9992 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
9993 device_xname(sc->sc_dev)));
9994 return -1;
9995 }
9996
9997 /******************************************************************************
9998 * This function does initial flash setup so that a new read/write/erase cycle
9999 * can be started.
10000 *
10001 * sc - The pointer to the hw structure
10002 ****************************************************************************/
10003 static int32_t
10004 wm_ich8_cycle_init(struct wm_softc *sc)
10005 {
10006 uint16_t hsfsts;
10007 int32_t error = 1;
10008 int32_t i = 0;
10009
10010 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10011
10012 /* May be check the Flash Des Valid bit in Hw status */
10013 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
10014 return error;
10015 }
10016
10017 /* Clear FCERR in Hw status by writing 1 */
10018 /* Clear DAEL in Hw status by writing a 1 */
10019 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
10020
10021 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10022
10023 /*
10024 * Either we should have a hardware SPI cycle in progress bit to check
10025 * against, in order to start a new cycle or FDONE bit should be
10026 * changed in the hardware so that it is 1 after harware reset, which
10027 * can then be used as an indication whether a cycle is in progress or
10028 * has been completed .. we should also have some software semaphore
10029 * mechanism to guard FDONE or the cycle in progress bit so that two
10030 * threads access to those bits can be sequentiallized or a way so that
10031 * 2 threads dont start the cycle at the same time
10032 */
10033
10034 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10035 /*
10036 * There is no cycle running at present, so we can start a
10037 * cycle
10038 */
10039
10040 /* Begin by setting Flash Cycle Done. */
10041 hsfsts |= HSFSTS_DONE;
10042 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10043 error = 0;
10044 } else {
10045 /*
10046 * otherwise poll for sometime so the current cycle has a
10047 * chance to end before giving up.
10048 */
10049 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
10050 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10051 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10052 error = 0;
10053 break;
10054 }
10055 delay(1);
10056 }
10057 if (error == 0) {
10058 /*
10059 * Successful in waiting for previous cycle to timeout,
10060 * now set the Flash Cycle Done.
10061 */
10062 hsfsts |= HSFSTS_DONE;
10063 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10064 }
10065 }
10066 return error;
10067 }
10068
10069 /******************************************************************************
10070 * This function starts a flash cycle and waits for its completion
10071 *
10072 * sc - The pointer to the hw structure
10073 ****************************************************************************/
10074 static int32_t
10075 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
10076 {
10077 uint16_t hsflctl;
10078 uint16_t hsfsts;
10079 int32_t error = 1;
10080 uint32_t i = 0;
10081
10082 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
10083 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10084 hsflctl |= HSFCTL_GO;
10085 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10086
10087 /* Wait till FDONE bit is set to 1 */
10088 do {
10089 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10090 if (hsfsts & HSFSTS_DONE)
10091 break;
10092 delay(1);
10093 i++;
10094 } while (i < timeout);
10095 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
10096 error = 0;
10097
10098 return error;
10099 }
10100
10101 /******************************************************************************
10102 * Reads a byte or word from the NVM using the ICH8 flash access registers.
10103 *
10104 * sc - The pointer to the hw structure
10105 * index - The index of the byte or word to read.
10106 * size - Size of data to read, 1=byte 2=word
10107 * data - Pointer to the word to store the value read.
10108 *****************************************************************************/
10109 static int32_t
10110 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
10111 uint32_t size, uint16_t *data)
10112 {
10113 uint16_t hsfsts;
10114 uint16_t hsflctl;
10115 uint32_t flash_linear_address;
10116 uint32_t flash_data = 0;
10117 int32_t error = 1;
10118 int32_t count = 0;
10119
10120 if (size < 1 || size > 2 || data == 0x0 ||
10121 index > ICH_FLASH_LINEAR_ADDR_MASK)
10122 return error;
10123
10124 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
10125 sc->sc_ich8_flash_base;
10126
10127 do {
10128 delay(1);
10129 /* Steps */
10130 error = wm_ich8_cycle_init(sc);
10131 if (error)
10132 break;
10133
10134 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10135 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
10136 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
10137 & HSFCTL_BCOUNT_MASK;
10138 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
10139 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10140
10141 /*
10142 * Write the last 24 bits of index into Flash Linear address
10143 * field in Flash Address
10144 */
10145 /* TODO: TBD maybe check the index against the size of flash */
10146
10147 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
10148
10149 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
10150
10151 /*
10152 * Check if FCERR is set to 1, if set to 1, clear it and try
10153 * the whole sequence a few more times, else read in (shift in)
10154 * the Flash Data0, the order is least significant byte first
10155 * msb to lsb
10156 */
10157 if (error == 0) {
10158 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
10159 if (size == 1)
10160 *data = (uint8_t)(flash_data & 0x000000FF);
10161 else if (size == 2)
10162 *data = (uint16_t)(flash_data & 0x0000FFFF);
10163 break;
10164 } else {
10165 /*
10166 * If we've gotten here, then things are probably
10167 * completely hosed, but if the error condition is
10168 * detected, it won't hurt to give it another try...
10169 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
10170 */
10171 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10172 if (hsfsts & HSFSTS_ERR) {
10173 /* Repeat for some time before giving up. */
10174 continue;
10175 } else if ((hsfsts & HSFSTS_DONE) == 0)
10176 break;
10177 }
10178 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
10179
10180 return error;
10181 }
10182
10183 /******************************************************************************
10184 * Reads a single byte from the NVM using the ICH8 flash access registers.
10185 *
10186 * sc - pointer to wm_hw structure
10187 * index - The index of the byte to read.
10188 * data - Pointer to a byte to store the value read.
10189 *****************************************************************************/
10190 static int32_t
10191 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
10192 {
10193 int32_t status;
10194 uint16_t word = 0;
10195
10196 status = wm_read_ich8_data(sc, index, 1, &word);
10197 if (status == 0)
10198 *data = (uint8_t)word;
10199 else
10200 *data = 0;
10201
10202 return status;
10203 }
10204
10205 /******************************************************************************
10206 * Reads a word from the NVM using the ICH8 flash access registers.
10207 *
10208 * sc - pointer to wm_hw structure
10209 * index - The starting byte index of the word to read.
10210 * data - Pointer to a word to store the value read.
10211 *****************************************************************************/
10212 static int32_t
10213 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
10214 {
10215 int32_t status;
10216
10217 status = wm_read_ich8_data(sc, index, 2, data);
10218 return status;
10219 }
10220
10221 /******************************************************************************
10222 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
10223 * register.
10224 *
10225 * sc - Struct containing variables accessed by shared code
10226 * offset - offset of word in the EEPROM to read
10227 * data - word read from the EEPROM
10228 * words - number of words to read
10229 *****************************************************************************/
10230 static int
10231 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
10232 {
10233 int32_t error = 0;
10234 uint32_t flash_bank = 0;
10235 uint32_t act_offset = 0;
10236 uint32_t bank_offset = 0;
10237 uint16_t word = 0;
10238 uint16_t i = 0;
10239
10240 /*
10241 * We need to know which is the valid flash bank. In the event
10242 * that we didn't allocate eeprom_shadow_ram, we may not be
10243 * managing flash_bank. So it cannot be trusted and needs
10244 * to be updated with each read.
10245 */
10246 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
10247 if (error) {
10248 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
10249 device_xname(sc->sc_dev)));
10250 flash_bank = 0;
10251 }
10252
10253 /*
10254 * Adjust offset appropriately if we're on bank 1 - adjust for word
10255 * size
10256 */
10257 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
10258
10259 error = wm_get_swfwhw_semaphore(sc);
10260 if (error) {
10261 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10262 __func__);
10263 return error;
10264 }
10265
10266 for (i = 0; i < words; i++) {
10267 /* The NVM part needs a byte offset, hence * 2 */
10268 act_offset = bank_offset + ((offset + i) * 2);
10269 error = wm_read_ich8_word(sc, act_offset, &word);
10270 if (error) {
10271 aprint_error_dev(sc->sc_dev,
10272 "%s: failed to read NVM\n", __func__);
10273 break;
10274 }
10275 data[i] = word;
10276 }
10277
10278 wm_put_swfwhw_semaphore(sc);
10279 return error;
10280 }
10281
10282 /* iNVM */
10283
10284 static int
10285 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
10286 {
10287 int32_t rv = 0;
10288 uint32_t invm_dword;
10289 uint16_t i;
10290 uint8_t record_type, word_address;
10291
10292 for (i = 0; i < INVM_SIZE; i++) {
10293 invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
10294 /* Get record type */
10295 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
10296 if (record_type == INVM_UNINITIALIZED_STRUCTURE)
10297 break;
10298 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
10299 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
10300 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
10301 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
10302 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
10303 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
10304 if (word_address == address) {
10305 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
10306 rv = 0;
10307 break;
10308 }
10309 }
10310 }
10311
10312 return rv;
10313 }
10314
10315 static int
10316 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
10317 {
10318 int rv = 0;
10319 int i;
10320
10321 for (i = 0; i < words; i++) {
10322 switch (offset + i) {
10323 case NVM_OFF_MACADDR:
10324 case NVM_OFF_MACADDR1:
10325 case NVM_OFF_MACADDR2:
10326 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
10327 if (rv != 0) {
10328 data[i] = 0xffff;
10329 rv = -1;
10330 }
10331 break;
10332 case NVM_OFF_CFG2:
10333 rv = wm_nvm_read_word_invm(sc, offset, data);
10334 if (rv != 0) {
10335 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
10336 rv = 0;
10337 }
10338 break;
10339 case NVM_OFF_CFG4:
10340 rv = wm_nvm_read_word_invm(sc, offset, data);
10341 if (rv != 0) {
10342 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
10343 rv = 0;
10344 }
10345 break;
10346 case NVM_OFF_LED_1_CFG:
10347 rv = wm_nvm_read_word_invm(sc, offset, data);
10348 if (rv != 0) {
10349 *data = NVM_LED_1_CFG_DEFAULT_I211;
10350 rv = 0;
10351 }
10352 break;
10353 case NVM_OFF_LED_0_2_CFG:
10354 rv = wm_nvm_read_word_invm(sc, offset, data);
10355 if (rv != 0) {
10356 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
10357 rv = 0;
10358 }
10359 break;
10360 case NVM_OFF_ID_LED_SETTINGS:
10361 rv = wm_nvm_read_word_invm(sc, offset, data);
10362 if (rv != 0) {
10363 *data = ID_LED_RESERVED_FFFF;
10364 rv = 0;
10365 }
10366 break;
10367 default:
10368 DPRINTF(WM_DEBUG_NVM,
10369 ("NVM word 0x%02x is not mapped.\n", offset));
10370 *data = NVM_RESERVED_WORD;
10371 break;
10372 }
10373 }
10374
10375 return rv;
10376 }
10377
10378 /* Lock, detecting NVM type, validate checksum, version and read */
10379
10380 /*
10381 * wm_nvm_acquire:
10382 *
10383 * Perform the EEPROM handshake required on some chips.
10384 */
10385 static int
10386 wm_nvm_acquire(struct wm_softc *sc)
10387 {
10388 uint32_t reg;
10389 int x;
10390 int ret = 0;
10391
10392 /* always success */
10393 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
10394 return 0;
10395
10396 if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
10397 ret = wm_get_swfwhw_semaphore(sc);
10398 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
10399 /* This will also do wm_get_swsm_semaphore() if needed */
10400 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
10401 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
10402 ret = wm_get_swsm_semaphore(sc);
10403 }
10404
10405 if (ret) {
10406 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10407 __func__);
10408 return 1;
10409 }
10410
10411 if (sc->sc_flags & WM_F_LOCK_EECD) {
10412 reg = CSR_READ(sc, WMREG_EECD);
10413
10414 /* Request EEPROM access. */
10415 reg |= EECD_EE_REQ;
10416 CSR_WRITE(sc, WMREG_EECD, reg);
10417
10418 /* ..and wait for it to be granted. */
10419 for (x = 0; x < 1000; x++) {
10420 reg = CSR_READ(sc, WMREG_EECD);
10421 if (reg & EECD_EE_GNT)
10422 break;
10423 delay(5);
10424 }
10425 if ((reg & EECD_EE_GNT) == 0) {
10426 aprint_error_dev(sc->sc_dev,
10427 "could not acquire EEPROM GNT\n");
10428 reg &= ~EECD_EE_REQ;
10429 CSR_WRITE(sc, WMREG_EECD, reg);
10430 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
10431 wm_put_swfwhw_semaphore(sc);
10432 if (sc->sc_flags & WM_F_LOCK_SWFW)
10433 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
10434 else if (sc->sc_flags & WM_F_LOCK_SWSM)
10435 wm_put_swsm_semaphore(sc);
10436 return 1;
10437 }
10438 }
10439
10440 return 0;
10441 }
10442
10443 /*
10444 * wm_nvm_release:
10445 *
10446 * Release the EEPROM mutex.
10447 */
10448 static void
10449 wm_nvm_release(struct wm_softc *sc)
10450 {
10451 uint32_t reg;
10452
10453 /* always success */
10454 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
10455 return;
10456
10457 if (sc->sc_flags & WM_F_LOCK_EECD) {
10458 reg = CSR_READ(sc, WMREG_EECD);
10459 reg &= ~EECD_EE_REQ;
10460 CSR_WRITE(sc, WMREG_EECD, reg);
10461 }
10462
10463 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
10464 wm_put_swfwhw_semaphore(sc);
10465 if (sc->sc_flags & WM_F_LOCK_SWFW)
10466 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
10467 else if (sc->sc_flags & WM_F_LOCK_SWSM)
10468 wm_put_swsm_semaphore(sc);
10469 }
10470
10471 static int
10472 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
10473 {
10474 uint32_t eecd = 0;
10475
10476 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
10477 || sc->sc_type == WM_T_82583) {
10478 eecd = CSR_READ(sc, WMREG_EECD);
10479
10480 /* Isolate bits 15 & 16 */
10481 eecd = ((eecd >> 15) & 0x03);
10482
10483 /* If both bits are set, device is Flash type */
10484 if (eecd == 0x03)
10485 return 0;
10486 }
10487 return 1;
10488 }
10489
10490 static int
10491 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
10492 {
10493 uint32_t eec;
10494
10495 eec = CSR_READ(sc, WMREG_EEC);
10496 if ((eec & EEC_FLASH_DETECTED) != 0)
10497 return 1;
10498
10499 return 0;
10500 }
10501
10502 /*
10503 * wm_nvm_validate_checksum
10504 *
10505 * The checksum is defined as the sum of the first 64 (16 bit) words.
10506 */
10507 static int
10508 wm_nvm_validate_checksum(struct wm_softc *sc)
10509 {
10510 uint16_t checksum;
10511 uint16_t eeprom_data;
10512 #ifdef WM_DEBUG
10513 uint16_t csum_wordaddr, valid_checksum;
10514 #endif
10515 int i;
10516
10517 checksum = 0;
10518
10519 /* Don't check for I211 */
10520 if (sc->sc_type == WM_T_I211)
10521 return 0;
10522
10523 #ifdef WM_DEBUG
10524 if (sc->sc_type == WM_T_PCH_LPT) {
10525 csum_wordaddr = NVM_OFF_COMPAT;
10526 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
10527 } else {
10528 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
10529 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
10530 }
10531
10532 /* Dump EEPROM image for debug */
10533 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10534 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10535 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
10536 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
10537 if ((eeprom_data & valid_checksum) == 0) {
10538 DPRINTF(WM_DEBUG_NVM,
10539 ("%s: NVM need to be updated (%04x != %04x)\n",
10540 device_xname(sc->sc_dev), eeprom_data,
10541 valid_checksum));
10542 }
10543 }
10544
10545 if ((wm_debug & WM_DEBUG_NVM) != 0) {
10546 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
10547 for (i = 0; i < NVM_SIZE; i++) {
10548 if (wm_nvm_read(sc, i, 1, &eeprom_data))
10549 printf("XXXX ");
10550 else
10551 printf("%04hx ", eeprom_data);
10552 if (i % 8 == 7)
10553 printf("\n");
10554 }
10555 }
10556
10557 #endif /* WM_DEBUG */
10558
10559 for (i = 0; i < NVM_SIZE; i++) {
10560 if (wm_nvm_read(sc, i, 1, &eeprom_data))
10561 return 1;
10562 checksum += eeprom_data;
10563 }
10564
10565 if (checksum != (uint16_t) NVM_CHECKSUM) {
10566 #ifdef WM_DEBUG
10567 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
10568 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
10569 #endif
10570 }
10571
10572 return 0;
10573 }
10574
10575 static void
10576 wm_nvm_version_invm(struct wm_softc *sc)
10577 {
10578 uint32_t dword;
10579
10580 /*
10581 * Linux's code to decode version is very strange, so we don't
10582 * obey that algorithm and just use word 61 as the document.
10583 * Perhaps it's not perfect though...
10584 *
10585 * Example:
10586 *
10587 * Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
10588 */
10589 dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
10590 dword = __SHIFTOUT(dword, INVM_VER_1);
10591 sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
10592 sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
10593 }
10594
10595 static void
10596 wm_nvm_version(struct wm_softc *sc)
10597 {
10598 uint16_t major, minor, build, patch;
10599 uint16_t uid0, uid1;
10600 uint16_t nvm_data;
10601 uint16_t off;
10602 bool check_version = false;
10603 bool check_optionrom = false;
10604 bool have_build = false;
10605
10606 /*
10607 * Version format:
10608 *
10609 * XYYZ
10610 * X0YZ
10611 * X0YY
10612 *
10613 * Example:
10614 *
10615 * 82571 0x50a2 5.10.2? (the spec update notes about 5.6-5.10)
10616 * 82571 0x50a6 5.10.6?
10617 * 82572 0x506a 5.6.10?
10618 * 82572EI 0x5069 5.6.9?
10619 * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4)
10620 * 0x2013 2.1.3?
10621 * 82583 0x10a0 1.10.0? (document says it's default vaule)
10622 */
10623 wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
10624 switch (sc->sc_type) {
10625 case WM_T_82571:
10626 case WM_T_82572:
10627 case WM_T_82574:
10628 case WM_T_82583:
10629 check_version = true;
10630 check_optionrom = true;
10631 have_build = true;
10632 break;
10633 case WM_T_82575:
10634 case WM_T_82576:
10635 case WM_T_82580:
10636 if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
10637 check_version = true;
10638 break;
10639 case WM_T_I211:
10640 wm_nvm_version_invm(sc);
10641 goto printver;
10642 case WM_T_I210:
10643 if (!wm_nvm_get_flash_presence_i210(sc)) {
10644 wm_nvm_version_invm(sc);
10645 goto printver;
10646 }
10647 /* FALLTHROUGH */
10648 case WM_T_I350:
10649 case WM_T_I354:
10650 check_version = true;
10651 check_optionrom = true;
10652 break;
10653 default:
10654 return;
10655 }
10656 if (check_version) {
10657 wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
10658 major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
10659 if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
10660 minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
10661 build = nvm_data & NVM_BUILD_MASK;
10662 have_build = true;
10663 } else
10664 minor = nvm_data & 0x00ff;
10665
10666 /* Decimal */
10667 minor = (minor / 16) * 10 + (minor % 16);
10668 sc->sc_nvm_ver_major = major;
10669 sc->sc_nvm_ver_minor = minor;
10670
10671 printver:
10672 aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
10673 sc->sc_nvm_ver_minor);
10674 if (have_build) {
10675 sc->sc_nvm_ver_build = build;
10676 aprint_verbose(".%d", build);
10677 }
10678 }
10679 if (check_optionrom) {
10680 wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
10681 /* Option ROM Version */
10682 if ((off != 0x0000) && (off != 0xffff)) {
10683 off += NVM_COMBO_VER_OFF;
10684 wm_nvm_read(sc, off + 1, 1, &uid1);
10685 wm_nvm_read(sc, off, 1, &uid0);
10686 if ((uid0 != 0) && (uid0 != 0xffff)
10687 && (uid1 != 0) && (uid1 != 0xffff)) {
10688 /* 16bits */
10689 major = uid0 >> 8;
10690 build = (uid0 << 8) | (uid1 >> 8);
10691 patch = uid1 & 0x00ff;
10692 aprint_verbose(", option ROM Version %d.%d.%d",
10693 major, build, patch);
10694 }
10695 }
10696 }
10697
10698 wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
10699 aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
10700 }
10701
10702 /*
10703 * wm_nvm_read:
10704 *
10705 * Read data from the serial EEPROM.
10706 */
10707 static int
10708 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
10709 {
10710 int rv;
10711
10712 if (sc->sc_flags & WM_F_EEPROM_INVALID)
10713 return 1;
10714
10715 if (wm_nvm_acquire(sc))
10716 return 1;
10717
10718 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10719 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10720 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
10721 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
10722 else if (sc->sc_flags & WM_F_EEPROM_INVM)
10723 rv = wm_nvm_read_invm(sc, word, wordcnt, data);
10724 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
10725 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
10726 else if (sc->sc_flags & WM_F_EEPROM_SPI)
10727 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
10728 else
10729 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
10730
10731 wm_nvm_release(sc);
10732 return rv;
10733 }
10734
10735 /*
10736 * Hardware semaphores.
10737 * Very complexed...
10738 */
10739
10740 static int
10741 wm_get_swsm_semaphore(struct wm_softc *sc)
10742 {
10743 int32_t timeout;
10744 uint32_t swsm;
10745
10746 if (sc->sc_flags & WM_F_LOCK_SWSM) {
10747 /* Get the SW semaphore. */
10748 timeout = sc->sc_nvm_wordsize + 1;
10749 while (timeout) {
10750 swsm = CSR_READ(sc, WMREG_SWSM);
10751
10752 if ((swsm & SWSM_SMBI) == 0)
10753 break;
10754
10755 delay(50);
10756 timeout--;
10757 }
10758
10759 if (timeout == 0) {
10760 aprint_error_dev(sc->sc_dev,
10761 "could not acquire SWSM SMBI\n");
10762 return 1;
10763 }
10764 }
10765
10766 /* Get the FW semaphore. */
10767 timeout = sc->sc_nvm_wordsize + 1;
10768 while (timeout) {
10769 swsm = CSR_READ(sc, WMREG_SWSM);
10770 swsm |= SWSM_SWESMBI;
10771 CSR_WRITE(sc, WMREG_SWSM, swsm);
10772 /* If we managed to set the bit we got the semaphore. */
10773 swsm = CSR_READ(sc, WMREG_SWSM);
10774 if (swsm & SWSM_SWESMBI)
10775 break;
10776
10777 delay(50);
10778 timeout--;
10779 }
10780
10781 if (timeout == 0) {
10782 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
10783 /* Release semaphores */
10784 wm_put_swsm_semaphore(sc);
10785 return 1;
10786 }
10787 return 0;
10788 }
10789
10790 static void
10791 wm_put_swsm_semaphore(struct wm_softc *sc)
10792 {
10793 uint32_t swsm;
10794
10795 swsm = CSR_READ(sc, WMREG_SWSM);
10796 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
10797 CSR_WRITE(sc, WMREG_SWSM, swsm);
10798 }
10799
10800 static int
10801 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
10802 {
10803 uint32_t swfw_sync;
10804 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
10805 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
10806 int timeout = 200;
10807
10808 for (timeout = 0; timeout < 200; timeout++) {
10809 if (sc->sc_flags & WM_F_LOCK_SWSM) {
10810 if (wm_get_swsm_semaphore(sc)) {
10811 aprint_error_dev(sc->sc_dev,
10812 "%s: failed to get semaphore\n",
10813 __func__);
10814 return 1;
10815 }
10816 }
10817 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
10818 if ((swfw_sync & (swmask | fwmask)) == 0) {
10819 swfw_sync |= swmask;
10820 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
10821 if (sc->sc_flags & WM_F_LOCK_SWSM)
10822 wm_put_swsm_semaphore(sc);
10823 return 0;
10824 }
10825 if (sc->sc_flags & WM_F_LOCK_SWSM)
10826 wm_put_swsm_semaphore(sc);
10827 delay(5000);
10828 }
10829 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
10830 device_xname(sc->sc_dev), mask, swfw_sync);
10831 return 1;
10832 }
10833
10834 static void
10835 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
10836 {
10837 uint32_t swfw_sync;
10838
10839 if (sc->sc_flags & WM_F_LOCK_SWSM) {
10840 while (wm_get_swsm_semaphore(sc) != 0)
10841 continue;
10842 }
10843 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
10844 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
10845 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
10846 if (sc->sc_flags & WM_F_LOCK_SWSM)
10847 wm_put_swsm_semaphore(sc);
10848 }
10849
10850 static int
10851 wm_get_swfwhw_semaphore(struct wm_softc *sc)
10852 {
10853 uint32_t ext_ctrl;
10854 int timeout = 200;
10855
10856 for (timeout = 0; timeout < 200; timeout++) {
10857 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10858 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
10859 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
10860
10861 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10862 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
10863 return 0;
10864 delay(5000);
10865 }
10866 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
10867 device_xname(sc->sc_dev), ext_ctrl);
10868 return 1;
10869 }
10870
10871 static void
10872 wm_put_swfwhw_semaphore(struct wm_softc *sc)
10873 {
10874 uint32_t ext_ctrl;
10875 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10876 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
10877 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
10878 }
10879
10880 static int
10881 wm_get_hw_semaphore_82573(struct wm_softc *sc)
10882 {
10883 int i = 0;
10884 uint32_t reg;
10885
10886 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10887 do {
10888 CSR_WRITE(sc, WMREG_EXTCNFCTR,
10889 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
10890 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10891 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
10892 break;
10893 delay(2*1000);
10894 i++;
10895 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
10896
10897 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
10898 wm_put_hw_semaphore_82573(sc);
10899 log(LOG_ERR, "%s: Driver can't access the PHY\n",
10900 device_xname(sc->sc_dev));
10901 return -1;
10902 }
10903
10904 return 0;
10905 }
10906
10907 static void
10908 wm_put_hw_semaphore_82573(struct wm_softc *sc)
10909 {
10910 uint32_t reg;
10911
10912 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10913 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
10914 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
10915 }
10916
10917 /*
10918 * Management mode and power management related subroutines.
10919 * BMC, AMT, suspend/resume and EEE.
10920 */
10921
10922 static int
10923 wm_check_mng_mode(struct wm_softc *sc)
10924 {
10925 int rv;
10926
10927 switch (sc->sc_type) {
10928 case WM_T_ICH8:
10929 case WM_T_ICH9:
10930 case WM_T_ICH10:
10931 case WM_T_PCH:
10932 case WM_T_PCH2:
10933 case WM_T_PCH_LPT:
10934 rv = wm_check_mng_mode_ich8lan(sc);
10935 break;
10936 case WM_T_82574:
10937 case WM_T_82583:
10938 rv = wm_check_mng_mode_82574(sc);
10939 break;
10940 case WM_T_82571:
10941 case WM_T_82572:
10942 case WM_T_82573:
10943 case WM_T_80003:
10944 rv = wm_check_mng_mode_generic(sc);
10945 break;
10946 default:
10947 /* noting to do */
10948 rv = 0;
10949 break;
10950 }
10951
10952 return rv;
10953 }
10954
10955 static int
10956 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
10957 {
10958 uint32_t fwsm;
10959
10960 fwsm = CSR_READ(sc, WMREG_FWSM);
10961
10962 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
10963 return 1;
10964
10965 return 0;
10966 }
10967
10968 static int
10969 wm_check_mng_mode_82574(struct wm_softc *sc)
10970 {
10971 uint16_t data;
10972
10973 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
10974
10975 if ((data & NVM_CFG2_MNGM_MASK) != 0)
10976 return 1;
10977
10978 return 0;
10979 }
10980
10981 static int
10982 wm_check_mng_mode_generic(struct wm_softc *sc)
10983 {
10984 uint32_t fwsm;
10985
10986 fwsm = CSR_READ(sc, WMREG_FWSM);
10987
10988 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
10989 return 1;
10990
10991 return 0;
10992 }
10993
10994 static int
10995 wm_enable_mng_pass_thru(struct wm_softc *sc)
10996 {
10997 uint32_t manc, fwsm, factps;
10998
10999 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
11000 return 0;
11001
11002 manc = CSR_READ(sc, WMREG_MANC);
11003
11004 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
11005 device_xname(sc->sc_dev), manc));
11006 if ((manc & MANC_RECV_TCO_EN) == 0)
11007 return 0;
11008
11009 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
11010 fwsm = CSR_READ(sc, WMREG_FWSM);
11011 factps = CSR_READ(sc, WMREG_FACTPS);
11012 if (((factps & FACTPS_MNGCG) == 0)
11013 && ((fwsm & FWSM_MODE_MASK)
11014 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
11015 return 1;
11016 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
11017 uint16_t data;
11018
11019 factps = CSR_READ(sc, WMREG_FACTPS);
11020 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11021 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
11022 device_xname(sc->sc_dev), factps, data));
11023 if (((factps & FACTPS_MNGCG) == 0)
11024 && ((data & NVM_CFG2_MNGM_MASK)
11025 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
11026 return 1;
11027 } else if (((manc & MANC_SMBUS_EN) != 0)
11028 && ((manc & MANC_ASF_EN) == 0))
11029 return 1;
11030
11031 return 0;
11032 }
11033
11034 static int
11035 wm_check_reset_block(struct wm_softc *sc)
11036 {
11037 uint32_t reg;
11038
11039 switch (sc->sc_type) {
11040 case WM_T_ICH8:
11041 case WM_T_ICH9:
11042 case WM_T_ICH10:
11043 case WM_T_PCH:
11044 case WM_T_PCH2:
11045 case WM_T_PCH_LPT:
11046 reg = CSR_READ(sc, WMREG_FWSM);
11047 if ((reg & FWSM_RSPCIPHY) != 0)
11048 return 0;
11049 else
11050 return -1;
11051 break;
11052 case WM_T_82571:
11053 case WM_T_82572:
11054 case WM_T_82573:
11055 case WM_T_82574:
11056 case WM_T_82583:
11057 case WM_T_80003:
11058 reg = CSR_READ(sc, WMREG_MANC);
11059 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
11060 return -1;
11061 else
11062 return 0;
11063 break;
11064 default:
11065 /* no problem */
11066 break;
11067 }
11068
11069 return 0;
11070 }
11071
11072 static void
11073 wm_get_hw_control(struct wm_softc *sc)
11074 {
11075 uint32_t reg;
11076
11077 switch (sc->sc_type) {
11078 case WM_T_82573:
11079 reg = CSR_READ(sc, WMREG_SWSM);
11080 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
11081 break;
11082 case WM_T_82571:
11083 case WM_T_82572:
11084 case WM_T_82574:
11085 case WM_T_82583:
11086 case WM_T_80003:
11087 case WM_T_ICH8:
11088 case WM_T_ICH9:
11089 case WM_T_ICH10:
11090 case WM_T_PCH:
11091 case WM_T_PCH2:
11092 case WM_T_PCH_LPT:
11093 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11094 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
11095 break;
11096 default:
11097 break;
11098 }
11099 }
11100
11101 static void
11102 wm_release_hw_control(struct wm_softc *sc)
11103 {
11104 uint32_t reg;
11105
11106 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
11107 return;
11108
11109 if (sc->sc_type == WM_T_82573) {
11110 reg = CSR_READ(sc, WMREG_SWSM);
11111 reg &= ~SWSM_DRV_LOAD;
11112 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
11113 } else {
11114 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11115 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
11116 }
11117 }
11118
11119 static void
11120 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
11121 {
11122 uint32_t reg;
11123
11124 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11125
11126 if (on != 0)
11127 reg |= EXTCNFCTR_GATE_PHY_CFG;
11128 else
11129 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
11130
11131 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
11132 }
11133
11134 static void
11135 wm_smbustopci(struct wm_softc *sc)
11136 {
11137 uint32_t fwsm;
11138
11139 fwsm = CSR_READ(sc, WMREG_FWSM);
11140 if (((fwsm & FWSM_FW_VALID) == 0)
11141 && ((wm_check_reset_block(sc) == 0))) {
11142 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
11143 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
11144 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11145 CSR_WRITE_FLUSH(sc);
11146 delay(10);
11147 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
11148 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11149 CSR_WRITE_FLUSH(sc);
11150 delay(50*1000);
11151
11152 /*
11153 * Gate automatic PHY configuration by hardware on non-managed
11154 * 82579
11155 */
11156 if (sc->sc_type == WM_T_PCH2)
11157 wm_gate_hw_phy_config_ich8lan(sc, 1);
11158 }
11159 }
11160
11161 static void
11162 wm_init_manageability(struct wm_softc *sc)
11163 {
11164
11165 if (sc->sc_flags & WM_F_HAS_MANAGE) {
11166 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
11167 uint32_t manc = CSR_READ(sc, WMREG_MANC);
11168
11169 /* Disable hardware interception of ARP */
11170 manc &= ~MANC_ARP_EN;
11171
11172 /* Enable receiving management packets to the host */
11173 if (sc->sc_type >= WM_T_82571) {
11174 manc |= MANC_EN_MNG2HOST;
11175 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
11176 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
11177 }
11178
11179 CSR_WRITE(sc, WMREG_MANC, manc);
11180 }
11181 }
11182
11183 static void
11184 wm_release_manageability(struct wm_softc *sc)
11185 {
11186
11187 if (sc->sc_flags & WM_F_HAS_MANAGE) {
11188 uint32_t manc = CSR_READ(sc, WMREG_MANC);
11189
11190 manc |= MANC_ARP_EN;
11191 if (sc->sc_type >= WM_T_82571)
11192 manc &= ~MANC_EN_MNG2HOST;
11193
11194 CSR_WRITE(sc, WMREG_MANC, manc);
11195 }
11196 }
11197
11198 static void
11199 wm_get_wakeup(struct wm_softc *sc)
11200 {
11201
11202 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
11203 switch (sc->sc_type) {
11204 case WM_T_82573:
11205 case WM_T_82583:
11206 sc->sc_flags |= WM_F_HAS_AMT;
11207 /* FALLTHROUGH */
11208 case WM_T_80003:
11209 case WM_T_82541:
11210 case WM_T_82547:
11211 case WM_T_82571:
11212 case WM_T_82572:
11213 case WM_T_82574:
11214 case WM_T_82575:
11215 case WM_T_82576:
11216 case WM_T_82580:
11217 case WM_T_I350:
11218 case WM_T_I354:
11219 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
11220 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
11221 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
11222 break;
11223 case WM_T_ICH8:
11224 case WM_T_ICH9:
11225 case WM_T_ICH10:
11226 case WM_T_PCH:
11227 case WM_T_PCH2:
11228 case WM_T_PCH_LPT:
11229 sc->sc_flags |= WM_F_HAS_AMT;
11230 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
11231 break;
11232 default:
11233 break;
11234 }
11235
11236 /* 1: HAS_MANAGE */
11237 if (wm_enable_mng_pass_thru(sc) != 0)
11238 sc->sc_flags |= WM_F_HAS_MANAGE;
11239
11240 #ifdef WM_DEBUG
11241 printf("\n");
11242 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
11243 printf("HAS_AMT,");
11244 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
11245 printf("ARC_SUBSYS_VALID,");
11246 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
11247 printf("ASF_FIRMWARE_PRES,");
11248 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
11249 printf("HAS_MANAGE,");
11250 printf("\n");
11251 #endif
11252 /*
11253 * Note that the WOL flags is set after the resetting of the eeprom
11254 * stuff
11255 */
11256 }
11257
11258 #ifdef WM_WOL
11259 /* WOL in the newer chipset interfaces (pchlan) */
11260 static void
11261 wm_enable_phy_wakeup(struct wm_softc *sc)
11262 {
11263 #if 0
11264 uint16_t preg;
11265
11266 /* Copy MAC RARs to PHY RARs */
11267
11268 /* Copy MAC MTA to PHY MTA */
11269
11270 /* Configure PHY Rx Control register */
11271
11272 /* Enable PHY wakeup in MAC register */
11273
11274 /* Configure and enable PHY wakeup in PHY registers */
11275
11276 /* Activate PHY wakeup */
11277
11278 /* XXX */
11279 #endif
11280 }
11281
11282 /* Power down workaround on D3 */
11283 static void
11284 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
11285 {
11286 uint32_t reg;
11287 int i;
11288
11289 for (i = 0; i < 2; i++) {
11290 /* Disable link */
11291 reg = CSR_READ(sc, WMREG_PHY_CTRL);
11292 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
11293 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11294
11295 /*
11296 * Call gig speed drop workaround on Gig disable before
11297 * accessing any PHY registers
11298 */
11299 if (sc->sc_type == WM_T_ICH8)
11300 wm_gig_downshift_workaround_ich8lan(sc);
11301
11302 /* Write VR power-down enable */
11303 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
11304 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
11305 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
11306 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
11307
11308 /* Read it back and test */
11309 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
11310 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
11311 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
11312 break;
11313
11314 /* Issue PHY reset and repeat at most one more time */
11315 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
11316 }
11317 }
11318
11319 static void
11320 wm_enable_wakeup(struct wm_softc *sc)
11321 {
11322 uint32_t reg, pmreg;
11323 pcireg_t pmode;
11324
11325 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
11326 &pmreg, NULL) == 0)
11327 return;
11328
11329 /* Advertise the wakeup capability */
11330 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
11331 | CTRL_SWDPIN(3));
11332 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
11333
11334 /* ICH workaround */
11335 switch (sc->sc_type) {
11336 case WM_T_ICH8:
11337 case WM_T_ICH9:
11338 case WM_T_ICH10:
11339 case WM_T_PCH:
11340 case WM_T_PCH2:
11341 case WM_T_PCH_LPT:
11342 /* Disable gig during WOL */
11343 reg = CSR_READ(sc, WMREG_PHY_CTRL);
11344 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
11345 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11346 if (sc->sc_type == WM_T_PCH)
11347 wm_gmii_reset(sc);
11348
11349 /* Power down workaround */
11350 if (sc->sc_phytype == WMPHY_82577) {
11351 struct mii_softc *child;
11352
11353 /* Assume that the PHY is copper */
11354 child = LIST_FIRST(&sc->sc_mii.mii_phys);
11355 if (child->mii_mpd_rev <= 2)
11356 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
11357 (768 << 5) | 25, 0x0444); /* magic num */
11358 }
11359 break;
11360 default:
11361 break;
11362 }
11363
11364 /* Keep the laser running on fiber adapters */
11365 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
11366 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
11367 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11368 reg |= CTRL_EXT_SWDPIN(3);
11369 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11370 }
11371
11372 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
11373 #if 0 /* for the multicast packet */
11374 reg |= WUFC_MC;
11375 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
11376 #endif
11377
11378 if (sc->sc_type == WM_T_PCH) {
11379 wm_enable_phy_wakeup(sc);
11380 } else {
11381 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
11382 CSR_WRITE(sc, WMREG_WUFC, reg);
11383 }
11384
11385 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
11386 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
11387 || (sc->sc_type == WM_T_PCH2))
11388 && (sc->sc_phytype == WMPHY_IGP_3))
11389 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
11390
11391 /* Request PME */
11392 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
11393 #if 0
11394 /* Disable WOL */
11395 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
11396 #else
11397 /* For WOL */
11398 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
11399 #endif
11400 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
11401 }
11402 #endif /* WM_WOL */
11403
11404 /* EEE */
11405
11406 static void
11407 wm_set_eee_i350(struct wm_softc *sc)
11408 {
11409 uint32_t ipcnfg, eeer;
11410
11411 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
11412 eeer = CSR_READ(sc, WMREG_EEER);
11413
11414 if ((sc->sc_flags & WM_F_EEE) != 0) {
11415 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
11416 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
11417 | EEER_LPI_FC);
11418 } else {
11419 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
11420 ipcnfg &= ~IPCNFG_10BASE_TE;
11421 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
11422 | EEER_LPI_FC);
11423 }
11424
11425 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
11426 CSR_WRITE(sc, WMREG_EEER, eeer);
11427 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
11428 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
11429 }
11430
11431 /*
11432 * Workarounds (mainly PHY related).
11433 * Basically, PHY's workarounds are in the PHY drivers.
11434 */
11435
11436 /* Work-around for 82566 Kumeran PCS lock loss */
11437 static void
11438 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
11439 {
11440 int miistatus, active, i;
11441 int reg;
11442
11443 miistatus = sc->sc_mii.mii_media_status;
11444
11445 /* If the link is not up, do nothing */
11446 if ((miistatus & IFM_ACTIVE) != 0)
11447 return;
11448
11449 active = sc->sc_mii.mii_media_active;
11450
11451 /* Nothing to do if the link is other than 1Gbps */
11452 if (IFM_SUBTYPE(active) != IFM_1000_T)
11453 return;
11454
11455 for (i = 0; i < 10; i++) {
11456 /* read twice */
11457 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
11458 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
11459 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
11460 goto out; /* GOOD! */
11461
11462 /* Reset the PHY */
11463 wm_gmii_reset(sc);
11464 delay(5*1000);
11465 }
11466
11467 /* Disable GigE link negotiation */
11468 reg = CSR_READ(sc, WMREG_PHY_CTRL);
11469 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
11470 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11471
11472 /*
11473 * Call gig speed drop workaround on Gig disable before accessing
11474 * any PHY registers.
11475 */
11476 wm_gig_downshift_workaround_ich8lan(sc);
11477
11478 out:
11479 return;
11480 }
11481
11482 /* WOL from S5 stops working */
11483 static void
11484 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
11485 {
11486 uint16_t kmrn_reg;
11487
11488 /* Only for igp3 */
11489 if (sc->sc_phytype == WMPHY_IGP_3) {
11490 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
11491 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
11492 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
11493 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
11494 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
11495 }
11496 }
11497
11498 /*
11499 * Workaround for pch's PHYs
11500 * XXX should be moved to new PHY driver?
11501 */
11502 static void
11503 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
11504 {
11505 if (sc->sc_phytype == WMPHY_82577)
11506 wm_set_mdio_slow_mode_hv(sc);
11507
11508 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
11509
11510 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
11511
11512 /* 82578 */
11513 if (sc->sc_phytype == WMPHY_82578) {
11514 /* PCH rev. < 3 */
11515 if (sc->sc_rev < 3) {
11516 /* XXX 6 bit shift? Why? Is it page2? */
11517 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
11518 0x66c0);
11519 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
11520 0xffff);
11521 }
11522
11523 /* XXX phy rev. < 2 */
11524 }
11525
11526 /* Select page 0 */
11527
11528 /* XXX acquire semaphore */
11529 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
11530 /* XXX release semaphore */
11531
11532 /*
11533 * Configure the K1 Si workaround during phy reset assuming there is
11534 * link so that it disables K1 if link is in 1Gbps.
11535 */
11536 wm_k1_gig_workaround_hv(sc, 1);
11537 }
11538
11539 static void
11540 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
11541 {
11542
11543 wm_set_mdio_slow_mode_hv(sc);
11544 }
11545
11546 static void
11547 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
11548 {
11549 int k1_enable = sc->sc_nvm_k1_enabled;
11550
11551 /* XXX acquire semaphore */
11552
11553 if (link) {
11554 k1_enable = 0;
11555
11556 /* Link stall fix for link up */
11557 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
11558 } else {
11559 /* Link stall fix for link down */
11560 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
11561 }
11562
11563 wm_configure_k1_ich8lan(sc, k1_enable);
11564
11565 /* XXX release semaphore */
11566 }
11567
11568 static void
11569 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
11570 {
11571 uint32_t reg;
11572
11573 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
11574 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
11575 reg | HV_KMRN_MDIO_SLOW);
11576 }
11577
11578 static void
11579 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
11580 {
11581 uint32_t ctrl, ctrl_ext, tmp;
11582 uint16_t kmrn_reg;
11583
11584 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
11585
11586 if (k1_enable)
11587 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
11588 else
11589 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
11590
11591 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
11592
11593 delay(20);
11594
11595 ctrl = CSR_READ(sc, WMREG_CTRL);
11596 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
11597
11598 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
11599 tmp |= CTRL_FRCSPD;
11600
11601 CSR_WRITE(sc, WMREG_CTRL, tmp);
11602 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
11603 CSR_WRITE_FLUSH(sc);
11604 delay(20);
11605
11606 CSR_WRITE(sc, WMREG_CTRL, ctrl);
11607 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
11608 CSR_WRITE_FLUSH(sc);
11609 delay(20);
11610 }
11611
11612 /* special case - for 82575 - need to do manual init ... */
11613 static void
11614 wm_reset_init_script_82575(struct wm_softc *sc)
11615 {
11616 /*
11617 * remark: this is untested code - we have no board without EEPROM
11618 * same setup as mentioned int the FreeBSD driver for the i82575
11619 */
11620
11621 /* SerDes configuration via SERDESCTRL */
11622 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
11623 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
11624 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
11625 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
11626
11627 /* CCM configuration via CCMCTL register */
11628 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
11629 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
11630
11631 /* PCIe lanes configuration */
11632 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
11633 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
11634 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
11635 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
11636
11637 /* PCIe PLL Configuration */
11638 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
11639 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
11640 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
11641 }
11642
11643 static void
11644 wm_reset_mdicnfg_82580(struct wm_softc *sc)
11645 {
11646 uint32_t reg;
11647 uint16_t nvmword;
11648 int rv;
11649
11650 if ((sc->sc_flags & WM_F_SGMII) == 0)
11651 return;
11652
11653 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
11654 + NVM_OFF_CFG3_PORTA, 1, &nvmword);
11655 if (rv != 0) {
11656 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
11657 __func__);
11658 return;
11659 }
11660
11661 reg = CSR_READ(sc, WMREG_MDICNFG);
11662 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
11663 reg |= MDICNFG_DEST;
11664 if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
11665 reg |= MDICNFG_COM_MDIO;
11666 CSR_WRITE(sc, WMREG_MDICNFG, reg);
11667 }
11668
11669 /*
11670 * I210 Errata 25 and I211 Errata 10
11671 * Slow System Clock.
11672 */
11673 static void
11674 wm_pll_workaround_i210(struct wm_softc *sc)
11675 {
11676 uint32_t mdicnfg, wuc;
11677 uint32_t reg;
11678 pcireg_t pcireg;
11679 uint32_t pmreg;
11680 uint16_t nvmword, tmp_nvmword;
11681 int phyval;
11682 bool wa_done = false;
11683 int i;
11684
11685 /* Save WUC and MDICNFG registers */
11686 wuc = CSR_READ(sc, WMREG_WUC);
11687 mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
11688
11689 reg = mdicnfg & ~MDICNFG_DEST;
11690 CSR_WRITE(sc, WMREG_MDICNFG, reg);
11691
11692 if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
11693 nvmword = INVM_DEFAULT_AL;
11694 tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
11695
11696 /* Get Power Management cap offset */
11697 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
11698 &pmreg, NULL) == 0)
11699 return;
11700 for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
11701 phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
11702 GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
11703
11704 if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
11705 break; /* OK */
11706 }
11707
11708 wa_done = true;
11709 /* Directly reset the internal PHY */
11710 reg = CSR_READ(sc, WMREG_CTRL);
11711 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
11712
11713 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11714 reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
11715 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11716
11717 CSR_WRITE(sc, WMREG_WUC, 0);
11718 reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
11719 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
11720
11721 pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
11722 pmreg + PCI_PMCSR);
11723 pcireg |= PCI_PMCSR_STATE_D3;
11724 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
11725 pmreg + PCI_PMCSR, pcireg);
11726 delay(1000);
11727 pcireg &= ~PCI_PMCSR_STATE_D3;
11728 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
11729 pmreg + PCI_PMCSR, pcireg);
11730
11731 reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
11732 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
11733
11734 /* Restore WUC register */
11735 CSR_WRITE(sc, WMREG_WUC, wuc);
11736 }
11737
11738 /* Restore MDICNFG setting */
11739 CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
11740 if (wa_done)
11741 aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
11742 }
11743