if_wm.c revision 1.387 1 /* $NetBSD: if_wm.c,v 1.387 2015/12/25 05:45:40 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - Disable D0 LPLU on 8257[12356], 82580 and I350.
77 * - TX Multi queue
78 * - EEE (Energy Efficiency Ethernet)
79 * - Virtual Function
80 * - Set LED correctly (based on contents in EEPROM)
81 * - Rework how parameters are loaded from the EEPROM.
82 * - Image Unique ID
83 */
84
85 #include <sys/cdefs.h>
86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.387 2015/12/25 05:45:40 msaitoh Exp $");
87
88 #ifdef _KERNEL_OPT
89 #include "opt_net_mpsafe.h"
90 #endif
91
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/callout.h>
95 #include <sys/mbuf.h>
96 #include <sys/malloc.h>
97 #include <sys/kmem.h>
98 #include <sys/kernel.h>
99 #include <sys/socket.h>
100 #include <sys/ioctl.h>
101 #include <sys/errno.h>
102 #include <sys/device.h>
103 #include <sys/queue.h>
104 #include <sys/syslog.h>
105 #include <sys/interrupt.h>
106
107 #include <sys/rndsource.h>
108
109 #include <net/if.h>
110 #include <net/if_dl.h>
111 #include <net/if_media.h>
112 #include <net/if_ether.h>
113
114 #include <net/bpf.h>
115
116 #include <netinet/in.h> /* XXX for struct ip */
117 #include <netinet/in_systm.h> /* XXX for struct ip */
118 #include <netinet/ip.h> /* XXX for struct ip */
119 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
120 #include <netinet/tcp.h> /* XXX for struct tcphdr */
121
122 #include <sys/bus.h>
123 #include <sys/intr.h>
124 #include <machine/endian.h>
125
126 #include <dev/mii/mii.h>
127 #include <dev/mii/miivar.h>
128 #include <dev/mii/miidevs.h>
129 #include <dev/mii/mii_bitbang.h>
130 #include <dev/mii/ikphyreg.h>
131 #include <dev/mii/igphyreg.h>
132 #include <dev/mii/igphyvar.h>
133 #include <dev/mii/inbmphyreg.h>
134
135 #include <dev/pci/pcireg.h>
136 #include <dev/pci/pcivar.h>
137 #include <dev/pci/pcidevs.h>
138
139 #include <dev/pci/if_wmreg.h>
140 #include <dev/pci/if_wmvar.h>
141
142 #ifdef WM_DEBUG
143 #define WM_DEBUG_LINK 0x01
144 #define WM_DEBUG_TX 0x02
145 #define WM_DEBUG_RX 0x04
146 #define WM_DEBUG_GMII 0x08
147 #define WM_DEBUG_MANAGE 0x10
148 #define WM_DEBUG_NVM 0x20
149 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
150 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
151
152 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
153 #else
154 #define DPRINTF(x, y) /* nothing */
155 #endif /* WM_DEBUG */
156
157 #ifdef NET_MPSAFE
158 #define WM_MPSAFE 1
159 #endif
160
161 /*
162 * This device driver's max interrupt numbers.
163 */
164 #define WM_MAX_NTXINTR 16
165 #define WM_MAX_NRXINTR 16
166 #define WM_MAX_NINTR (WM_MAX_NTXINTR + WM_MAX_NRXINTR + 1)
167
168 /*
169 * Transmit descriptor list size. Due to errata, we can only have
170 * 256 hardware descriptors in the ring on < 82544, but we use 4096
171 * on >= 82544. We tell the upper layers that they can queue a lot
172 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
173 * of them at a time.
174 *
175 * We allow up to 256 (!) DMA segments per packet. Pathological packet
176 * chains containing many small mbufs have been observed in zero-copy
177 * situations with jumbo frames.
178 */
179 #define WM_NTXSEGS 256
180 #define WM_IFQUEUELEN 256
181 #define WM_TXQUEUELEN_MAX 64
182 #define WM_TXQUEUELEN_MAX_82547 16
183 #define WM_TXQUEUELEN(txq) ((txq)->txq_num)
184 #define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1)
185 #define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8)
186 #define WM_NTXDESC_82542 256
187 #define WM_NTXDESC_82544 4096
188 #define WM_NTXDESC(txq) ((txq)->txq_ndesc)
189 #define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1)
190 #define WM_TXDESCSIZE(txq) (WM_NTXDESC(txq) * sizeof(wiseman_txdesc_t))
191 #define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq))
192 #define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq))
193
194 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
195
196 /*
197 * Receive descriptor list size. We have one Rx buffer for normal
198 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
199 * packet. We allocate 256 receive descriptors, each with a 2k
200 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
201 */
202 #define WM_NRXDESC 256
203 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
204 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
205 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
206
207 typedef union txdescs {
208 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
209 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544];
210 } txdescs_t;
211
212 #define WM_CDTXOFF(x) (sizeof(wiseman_txdesc_t) * x)
213 #define WM_CDRXOFF(x) (sizeof(wiseman_rxdesc_t) * x)
214
215 /*
216 * Software state for transmit jobs.
217 */
218 struct wm_txsoft {
219 struct mbuf *txs_mbuf; /* head of our mbuf chain */
220 bus_dmamap_t txs_dmamap; /* our DMA map */
221 int txs_firstdesc; /* first descriptor in packet */
222 int txs_lastdesc; /* last descriptor in packet */
223 int txs_ndesc; /* # of descriptors used */
224 };
225
226 /*
227 * Software state for receive buffers. Each descriptor gets a
228 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
229 * more than one buffer, we chain them together.
230 */
231 struct wm_rxsoft {
232 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
233 bus_dmamap_t rxs_dmamap; /* our DMA map */
234 };
235
236 #define WM_LINKUP_TIMEOUT 50
237
238 static uint16_t swfwphysem[] = {
239 SWFW_PHY0_SM,
240 SWFW_PHY1_SM,
241 SWFW_PHY2_SM,
242 SWFW_PHY3_SM
243 };
244
245 static const uint32_t wm_82580_rxpbs_table[] = {
246 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
247 };
248
249 struct wm_softc;
250
251 struct wm_txqueue {
252 kmutex_t *txq_lock; /* lock for tx operations */
253
254 struct wm_softc *txq_sc;
255
256 int txq_id; /* index of transmit queues */
257 int txq_intr_idx; /* index of MSI-X tables */
258
259 /* Software state for the transmit descriptors. */
260 int txq_num; /* must be a power of two */
261 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
262
263 /* TX control data structures. */
264 int txq_ndesc; /* must be a power of two */
265 txdescs_t *txq_descs_u;
266 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */
267 bus_dma_segment_t txq_desc_seg; /* control data segment */
268 int txq_desc_rseg; /* real number of control segment */
269 size_t txq_desc_size; /* control data size */
270 #define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr
271 #define txq_descs txq_descs_u->sctxu_txdescs
272 #define txq_nq_descs txq_descs_u->sctxu_nq_txdescs
273
274 bus_addr_t txq_tdt_reg; /* offset of TDT register */
275
276 int txq_free; /* number of free Tx descriptors */
277 int txq_next; /* next ready Tx descriptor */
278
279 int txq_sfree; /* number of free Tx jobs */
280 int txq_snext; /* next free Tx job */
281 int txq_sdirty; /* dirty Tx jobs */
282
283 /* These 4 variables are used only on the 82547. */
284 int txq_fifo_size; /* Tx FIFO size */
285 int txq_fifo_head; /* current head of FIFO */
286 uint32_t txq_fifo_addr; /* internal address of start of FIFO */
287 int txq_fifo_stall; /* Tx FIFO is stalled */
288
289 /* XXX which event counter is required? */
290 };
291
292 struct wm_rxqueue {
293 kmutex_t *rxq_lock; /* lock for rx operations */
294
295 struct wm_softc *rxq_sc;
296
297 int rxq_id; /* index of receive queues */
298 int rxq_intr_idx; /* index of MSI-X tables */
299
300 /* Software state for the receive descriptors. */
301 wiseman_rxdesc_t *rxq_descs;
302
303 /* RX control data structures. */
304 struct wm_rxsoft rxq_soft[WM_NRXDESC];
305 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */
306 bus_dma_segment_t rxq_desc_seg; /* control data segment */
307 int rxq_desc_rseg; /* real number of control segment */
308 size_t rxq_desc_size; /* control data size */
309 #define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr
310
311 bus_addr_t rxq_rdt_reg; /* offset of RDT register */
312
313 int rxq_ptr; /* next ready Rx descriptor/queue ent */
314 int rxq_discard;
315 int rxq_len;
316 struct mbuf *rxq_head;
317 struct mbuf *rxq_tail;
318 struct mbuf **rxq_tailp;
319
320 /* XXX which event counter is required? */
321 };
322
323 /*
324 * Software state per device.
325 */
326 struct wm_softc {
327 device_t sc_dev; /* generic device information */
328 bus_space_tag_t sc_st; /* bus space tag */
329 bus_space_handle_t sc_sh; /* bus space handle */
330 bus_size_t sc_ss; /* bus space size */
331 bus_space_tag_t sc_iot; /* I/O space tag */
332 bus_space_handle_t sc_ioh; /* I/O space handle */
333 bus_size_t sc_ios; /* I/O space size */
334 bus_space_tag_t sc_flasht; /* flash registers space tag */
335 bus_space_handle_t sc_flashh; /* flash registers space handle */
336 bus_size_t sc_flashs; /* flash registers space size */
337 bus_dma_tag_t sc_dmat; /* bus DMA tag */
338
339 struct ethercom sc_ethercom; /* ethernet common data */
340 struct mii_data sc_mii; /* MII/media information */
341
342 pci_chipset_tag_t sc_pc;
343 pcitag_t sc_pcitag;
344 int sc_bus_speed; /* PCI/PCIX bus speed */
345 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
346
347 uint16_t sc_pcidevid; /* PCI device ID */
348 wm_chip_type sc_type; /* MAC type */
349 int sc_rev; /* MAC revision */
350 wm_phy_type sc_phytype; /* PHY type */
351 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
352 #define WM_MEDIATYPE_UNKNOWN 0x00
353 #define WM_MEDIATYPE_FIBER 0x01
354 #define WM_MEDIATYPE_COPPER 0x02
355 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
356 int sc_funcid; /* unit number of the chip (0 to 3) */
357 int sc_flags; /* flags; see below */
358 int sc_if_flags; /* last if_flags */
359 int sc_flowflags; /* 802.3x flow control flags */
360 int sc_align_tweak;
361
362 void *sc_ihs[WM_MAX_NINTR]; /*
363 * interrupt cookie.
364 * legacy and msi use sc_ihs[0].
365 */
366 pci_intr_handle_t *sc_intrs; /* legacy and msi use sc_intrs[0] */
367 int sc_nintrs; /* number of interrupts */
368
369 int sc_link_intr_idx; /* index of MSI-X tables */
370
371 callout_t sc_tick_ch; /* tick callout */
372 bool sc_stopping;
373
374 int sc_nvm_ver_major;
375 int sc_nvm_ver_minor;
376 int sc_nvm_ver_build;
377 int sc_nvm_addrbits; /* NVM address bits */
378 unsigned int sc_nvm_wordsize; /* NVM word size */
379 int sc_ich8_flash_base;
380 int sc_ich8_flash_bank_size;
381 int sc_nvm_k1_enabled;
382
383 int sc_ntxqueues;
384 struct wm_txqueue *sc_txq;
385
386 int sc_nrxqueues;
387 struct wm_rxqueue *sc_rxq;
388
389 #ifdef WM_EVENT_COUNTERS
390 /* Event counters. */
391 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
392 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
393 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
394 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
395 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
396 struct evcnt sc_ev_rxintr; /* Rx interrupts */
397 struct evcnt sc_ev_linkintr; /* Link interrupts */
398
399 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
400 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
401 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
402 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
403 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
404 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
405 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
406 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
407
408 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
409 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
410
411 struct evcnt sc_ev_tu; /* Tx underrun */
412
413 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
414 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
415 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
416 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
417 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
418 #endif /* WM_EVENT_COUNTERS */
419
420 /* This variable are used only on the 82547. */
421 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
422
423 uint32_t sc_ctrl; /* prototype CTRL register */
424 #if 0
425 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
426 #endif
427 uint32_t sc_icr; /* prototype interrupt bits */
428 uint32_t sc_itr; /* prototype intr throttling reg */
429 uint32_t sc_tctl; /* prototype TCTL register */
430 uint32_t sc_rctl; /* prototype RCTL register */
431 uint32_t sc_txcw; /* prototype TXCW register */
432 uint32_t sc_tipg; /* prototype TIPG register */
433 uint32_t sc_fcrtl; /* prototype FCRTL register */
434 uint32_t sc_pba; /* prototype PBA register */
435
436 int sc_tbi_linkup; /* TBI link status */
437 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
438 int sc_tbi_serdes_ticks; /* tbi ticks */
439
440 int sc_mchash_type; /* multicast filter offset */
441
442 krndsource_t rnd_source; /* random source */
443
444 kmutex_t *sc_core_lock; /* lock for softc operations */
445 };
446
447 #define WM_TX_LOCK(_txq) if ((_txq)->txq_lock) mutex_enter((_txq)->txq_lock)
448 #define WM_TX_UNLOCK(_txq) if ((_txq)->txq_lock) mutex_exit((_txq)->txq_lock)
449 #define WM_TX_LOCKED(_txq) (!(_txq)->txq_lock || mutex_owned((_txq)->txq_lock))
450 #define WM_RX_LOCK(_rxq) if ((_rxq)->rxq_lock) mutex_enter((_rxq)->rxq_lock)
451 #define WM_RX_UNLOCK(_rxq) if ((_rxq)->rxq_lock) mutex_exit((_rxq)->rxq_lock)
452 #define WM_RX_LOCKED(_rxq) (!(_rxq)->rxq_lock || mutex_owned((_rxq)->rxq_lock))
453 #define WM_CORE_LOCK(_sc) if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
454 #define WM_CORE_UNLOCK(_sc) if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
455 #define WM_CORE_LOCKED(_sc) (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
456
457 #ifdef WM_MPSAFE
458 #define CALLOUT_FLAGS CALLOUT_MPSAFE
459 #else
460 #define CALLOUT_FLAGS 0
461 #endif
462
463 #define WM_RXCHAIN_RESET(rxq) \
464 do { \
465 (rxq)->rxq_tailp = &(rxq)->rxq_head; \
466 *(rxq)->rxq_tailp = NULL; \
467 (rxq)->rxq_len = 0; \
468 } while (/*CONSTCOND*/0)
469
470 #define WM_RXCHAIN_LINK(rxq, m) \
471 do { \
472 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \
473 (rxq)->rxq_tailp = &(m)->m_next; \
474 } while (/*CONSTCOND*/0)
475
476 #ifdef WM_EVENT_COUNTERS
477 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
478 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
479 #else
480 #define WM_EVCNT_INCR(ev) /* nothing */
481 #define WM_EVCNT_ADD(ev, val) /* nothing */
482 #endif
483
484 #define CSR_READ(sc, reg) \
485 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
486 #define CSR_WRITE(sc, reg, val) \
487 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
488 #define CSR_WRITE_FLUSH(sc) \
489 (void) CSR_READ((sc), WMREG_STATUS)
490
491 #define ICH8_FLASH_READ32(sc, reg) \
492 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
493 #define ICH8_FLASH_WRITE32(sc, reg, data) \
494 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
495
496 #define ICH8_FLASH_READ16(sc, reg) \
497 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
498 #define ICH8_FLASH_WRITE16(sc, reg, data) \
499 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
500
501 #define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((x)))
502 #define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
503
504 #define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU)
505 #define WM_CDTXADDR_HI(txq, x) \
506 (sizeof(bus_addr_t) == 8 ? \
507 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
508
509 #define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
510 #define WM_CDRXADDR_HI(rxq, x) \
511 (sizeof(bus_addr_t) == 8 ? \
512 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
513
514 /*
515 * Register read/write functions.
516 * Other than CSR_{READ|WRITE}().
517 */
518 #if 0
519 static inline uint32_t wm_io_read(struct wm_softc *, int);
520 #endif
521 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
522 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
523 uint32_t, uint32_t);
524 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
525
526 /*
527 * Descriptor sync/init functions.
528 */
529 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
530 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
531 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
532
533 /*
534 * Device driver interface functions and commonly used functions.
535 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
536 */
537 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
538 static int wm_match(device_t, cfdata_t, void *);
539 static void wm_attach(device_t, device_t, void *);
540 static int wm_detach(device_t, int);
541 static bool wm_suspend(device_t, const pmf_qual_t *);
542 static bool wm_resume(device_t, const pmf_qual_t *);
543 static void wm_watchdog(struct ifnet *);
544 static void wm_tick(void *);
545 static int wm_ifflags_cb(struct ethercom *);
546 static int wm_ioctl(struct ifnet *, u_long, void *);
547 /* MAC address related */
548 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
549 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
550 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
551 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
552 static void wm_set_filter(struct wm_softc *);
553 /* Reset and init related */
554 static void wm_set_vlan(struct wm_softc *);
555 static void wm_set_pcie_completion_timeout(struct wm_softc *);
556 static void wm_get_auto_rd_done(struct wm_softc *);
557 static void wm_lan_init_done(struct wm_softc *);
558 static void wm_get_cfg_done(struct wm_softc *);
559 static void wm_initialize_hardware_bits(struct wm_softc *);
560 static uint32_t wm_rxpbs_adjust_82580(uint32_t);
561 static void wm_reset(struct wm_softc *);
562 static int wm_add_rxbuf(struct wm_rxqueue *, int);
563 static void wm_rxdrain(struct wm_rxqueue *);
564 static void wm_rss_getkey(uint8_t *);
565 static void wm_init_rss(struct wm_softc *);
566 static void wm_adjust_qnum(struct wm_softc *, int);
567 static int wm_setup_legacy(struct wm_softc *);
568 static int wm_setup_msix(struct wm_softc *);
569 static int wm_init(struct ifnet *);
570 static int wm_init_locked(struct ifnet *);
571 static void wm_stop(struct ifnet *, int);
572 static void wm_stop_locked(struct ifnet *, int);
573 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
574 static void wm_82547_txfifo_stall(void *);
575 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
576 /* DMA related */
577 static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
578 static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
579 static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
580 static void wm_init_tx_regs(struct wm_softc *, struct wm_txqueue *);
581 static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
582 static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
583 static void wm_init_rx_regs(struct wm_softc *, struct wm_rxqueue *);
584 static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
585 static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
586 static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
587 static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
588 static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
589 static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
590 static void wm_init_tx_queue(struct wm_softc *, struct wm_txqueue *);
591 static int wm_init_rx_queue(struct wm_softc *, struct wm_rxqueue *);
592 static int wm_alloc_txrx_queues(struct wm_softc *);
593 static void wm_free_txrx_queues(struct wm_softc *);
594 static int wm_init_txrx_queues(struct wm_softc *);
595 /* Start */
596 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
597 uint32_t *, uint8_t *);
598 static void wm_start(struct ifnet *);
599 static void wm_start_locked(struct ifnet *);
600 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
601 uint32_t *, uint32_t *, bool *);
602 static void wm_nq_start(struct ifnet *);
603 static void wm_nq_start_locked(struct ifnet *);
604 /* Interrupt */
605 static int wm_txeof(struct wm_softc *);
606 static void wm_rxeof(struct wm_rxqueue *);
607 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
608 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
609 static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
610 static void wm_linkintr(struct wm_softc *, uint32_t);
611 static int wm_intr_legacy(void *);
612 static int wm_txintr_msix(void *);
613 static int wm_rxintr_msix(void *);
614 static int wm_linkintr_msix(void *);
615
616 /*
617 * Media related.
618 * GMII, SGMII, TBI, SERDES and SFP.
619 */
620 /* Common */
621 static void wm_tbi_serdes_set_linkled(struct wm_softc *);
622 /* GMII related */
623 static void wm_gmii_reset(struct wm_softc *);
624 static int wm_get_phy_id_82575(struct wm_softc *);
625 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
626 static int wm_gmii_mediachange(struct ifnet *);
627 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
628 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
629 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
630 static int wm_gmii_i82543_readreg(device_t, int, int);
631 static void wm_gmii_i82543_writereg(device_t, int, int, int);
632 static int wm_gmii_i82544_readreg(device_t, int, int);
633 static void wm_gmii_i82544_writereg(device_t, int, int, int);
634 static int wm_gmii_i80003_readreg(device_t, int, int);
635 static void wm_gmii_i80003_writereg(device_t, int, int, int);
636 static int wm_gmii_bm_readreg(device_t, int, int);
637 static void wm_gmii_bm_writereg(device_t, int, int, int);
638 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
639 static int wm_gmii_hv_readreg(device_t, int, int);
640 static void wm_gmii_hv_writereg(device_t, int, int, int);
641 static int wm_gmii_82580_readreg(device_t, int, int);
642 static void wm_gmii_82580_writereg(device_t, int, int, int);
643 static int wm_gmii_gs40g_readreg(device_t, int, int);
644 static void wm_gmii_gs40g_writereg(device_t, int, int, int);
645 static void wm_gmii_statchg(struct ifnet *);
646 static int wm_kmrn_readreg(struct wm_softc *, int);
647 static void wm_kmrn_writereg(struct wm_softc *, int, int);
648 /* SGMII */
649 static bool wm_sgmii_uses_mdio(struct wm_softc *);
650 static int wm_sgmii_readreg(device_t, int, int);
651 static void wm_sgmii_writereg(device_t, int, int, int);
652 /* TBI related */
653 static void wm_tbi_mediainit(struct wm_softc *);
654 static int wm_tbi_mediachange(struct ifnet *);
655 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
656 static int wm_check_for_link(struct wm_softc *);
657 static void wm_tbi_tick(struct wm_softc *);
658 /* SERDES related */
659 static void wm_serdes_power_up_link_82575(struct wm_softc *);
660 static int wm_serdes_mediachange(struct ifnet *);
661 static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
662 static void wm_serdes_tick(struct wm_softc *);
663 /* SFP related */
664 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
665 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
666
667 /*
668 * NVM related.
669 * Microwire, SPI (w/wo EERD) and Flash.
670 */
671 /* Misc functions */
672 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
673 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
674 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
675 /* Microwire */
676 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
677 /* SPI */
678 static int wm_nvm_ready_spi(struct wm_softc *);
679 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
680 /* Using with EERD */
681 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
682 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
683 /* Flash */
684 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
685 unsigned int *);
686 static int32_t wm_ich8_cycle_init(struct wm_softc *);
687 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
688 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
689 uint16_t *);
690 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
691 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
692 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
693 /* iNVM */
694 static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
695 static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
696 /* Lock, detecting NVM type, validate checksum and read */
697 static int wm_nvm_acquire(struct wm_softc *);
698 static void wm_nvm_release(struct wm_softc *);
699 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
700 static int wm_nvm_get_flash_presence_i210(struct wm_softc *);
701 static int wm_nvm_validate_checksum(struct wm_softc *);
702 static void wm_nvm_version_invm(struct wm_softc *);
703 static void wm_nvm_version(struct wm_softc *);
704 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
705
706 /*
707 * Hardware semaphores.
708 * Very complexed...
709 */
710 static int wm_get_swsm_semaphore(struct wm_softc *);
711 static void wm_put_swsm_semaphore(struct wm_softc *);
712 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
713 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
714 static int wm_get_swfwhw_semaphore(struct wm_softc *);
715 static void wm_put_swfwhw_semaphore(struct wm_softc *);
716 static int wm_get_hw_semaphore_82573(struct wm_softc *);
717 static void wm_put_hw_semaphore_82573(struct wm_softc *);
718
719 /*
720 * Management mode and power management related subroutines.
721 * BMC, AMT, suspend/resume and EEE.
722 */
723 #ifdef WM_WOL
724 static int wm_check_mng_mode(struct wm_softc *);
725 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
726 static int wm_check_mng_mode_82574(struct wm_softc *);
727 static int wm_check_mng_mode_generic(struct wm_softc *);
728 #endif
729 static int wm_enable_mng_pass_thru(struct wm_softc *);
730 static bool wm_phy_resetisblocked(struct wm_softc *);
731 static void wm_get_hw_control(struct wm_softc *);
732 static void wm_release_hw_control(struct wm_softc *);
733 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
734 static void wm_smbustopci(struct wm_softc *);
735 static void wm_init_manageability(struct wm_softc *);
736 static void wm_release_manageability(struct wm_softc *);
737 static void wm_get_wakeup(struct wm_softc *);
738 #ifdef WM_WOL
739 static void wm_enable_phy_wakeup(struct wm_softc *);
740 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
741 static void wm_enable_wakeup(struct wm_softc *);
742 #endif
743 /* LPLU (Low Power Link Up) */
744 static void wm_lplu_d0_disable(struct wm_softc *);
745 static void wm_lplu_d0_disable_pch(struct wm_softc *);
746 /* EEE */
747 static void wm_set_eee_i350(struct wm_softc *);
748
749 /*
750 * Workarounds (mainly PHY related).
751 * Basically, PHY's workarounds are in the PHY drivers.
752 */
753 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
754 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
755 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
756 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
757 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
758 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
759 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
760 static void wm_reset_init_script_82575(struct wm_softc *);
761 static void wm_reset_mdicnfg_82580(struct wm_softc *);
762 static void wm_pll_workaround_i210(struct wm_softc *);
763
764 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
765 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
766
767 /*
768 * Devices supported by this driver.
769 */
770 static const struct wm_product {
771 pci_vendor_id_t wmp_vendor;
772 pci_product_id_t wmp_product;
773 const char *wmp_name;
774 wm_chip_type wmp_type;
775 uint32_t wmp_flags;
776 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
777 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
778 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
779 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
780 #define WMP_MEDIATYPE(x) ((x) & 0x03)
781 } wm_products[] = {
782 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
783 "Intel i82542 1000BASE-X Ethernet",
784 WM_T_82542_2_1, WMP_F_FIBER },
785
786 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
787 "Intel i82543GC 1000BASE-X Ethernet",
788 WM_T_82543, WMP_F_FIBER },
789
790 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
791 "Intel i82543GC 1000BASE-T Ethernet",
792 WM_T_82543, WMP_F_COPPER },
793
794 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
795 "Intel i82544EI 1000BASE-T Ethernet",
796 WM_T_82544, WMP_F_COPPER },
797
798 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
799 "Intel i82544EI 1000BASE-X Ethernet",
800 WM_T_82544, WMP_F_FIBER },
801
802 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
803 "Intel i82544GC 1000BASE-T Ethernet",
804 WM_T_82544, WMP_F_COPPER },
805
806 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
807 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
808 WM_T_82544, WMP_F_COPPER },
809
810 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
811 "Intel i82540EM 1000BASE-T Ethernet",
812 WM_T_82540, WMP_F_COPPER },
813
814 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
815 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
816 WM_T_82540, WMP_F_COPPER },
817
818 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
819 "Intel i82540EP 1000BASE-T Ethernet",
820 WM_T_82540, WMP_F_COPPER },
821
822 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
823 "Intel i82540EP 1000BASE-T Ethernet",
824 WM_T_82540, WMP_F_COPPER },
825
826 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
827 "Intel i82540EP 1000BASE-T Ethernet",
828 WM_T_82540, WMP_F_COPPER },
829
830 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
831 "Intel i82545EM 1000BASE-T Ethernet",
832 WM_T_82545, WMP_F_COPPER },
833
834 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
835 "Intel i82545GM 1000BASE-T Ethernet",
836 WM_T_82545_3, WMP_F_COPPER },
837
838 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
839 "Intel i82545GM 1000BASE-X Ethernet",
840 WM_T_82545_3, WMP_F_FIBER },
841
842 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
843 "Intel i82545GM Gigabit Ethernet (SERDES)",
844 WM_T_82545_3, WMP_F_SERDES },
845
846 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
847 "Intel i82546EB 1000BASE-T Ethernet",
848 WM_T_82546, WMP_F_COPPER },
849
850 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
851 "Intel i82546EB 1000BASE-T Ethernet",
852 WM_T_82546, WMP_F_COPPER },
853
854 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
855 "Intel i82545EM 1000BASE-X Ethernet",
856 WM_T_82545, WMP_F_FIBER },
857
858 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
859 "Intel i82546EB 1000BASE-X Ethernet",
860 WM_T_82546, WMP_F_FIBER },
861
862 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
863 "Intel i82546GB 1000BASE-T Ethernet",
864 WM_T_82546_3, WMP_F_COPPER },
865
866 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
867 "Intel i82546GB 1000BASE-X Ethernet",
868 WM_T_82546_3, WMP_F_FIBER },
869
870 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
871 "Intel i82546GB Gigabit Ethernet (SERDES)",
872 WM_T_82546_3, WMP_F_SERDES },
873
874 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
875 "i82546GB quad-port Gigabit Ethernet",
876 WM_T_82546_3, WMP_F_COPPER },
877
878 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
879 "i82546GB quad-port Gigabit Ethernet (KSP3)",
880 WM_T_82546_3, WMP_F_COPPER },
881
882 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
883 "Intel PRO/1000MT (82546GB)",
884 WM_T_82546_3, WMP_F_COPPER },
885
886 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
887 "Intel i82541EI 1000BASE-T Ethernet",
888 WM_T_82541, WMP_F_COPPER },
889
890 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
891 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
892 WM_T_82541, WMP_F_COPPER },
893
894 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
895 "Intel i82541EI Mobile 1000BASE-T Ethernet",
896 WM_T_82541, WMP_F_COPPER },
897
898 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
899 "Intel i82541ER 1000BASE-T Ethernet",
900 WM_T_82541_2, WMP_F_COPPER },
901
902 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
903 "Intel i82541GI 1000BASE-T Ethernet",
904 WM_T_82541_2, WMP_F_COPPER },
905
906 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
907 "Intel i82541GI Mobile 1000BASE-T Ethernet",
908 WM_T_82541_2, WMP_F_COPPER },
909
910 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
911 "Intel i82541PI 1000BASE-T Ethernet",
912 WM_T_82541_2, WMP_F_COPPER },
913
914 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
915 "Intel i82547EI 1000BASE-T Ethernet",
916 WM_T_82547, WMP_F_COPPER },
917
918 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
919 "Intel i82547EI Mobile 1000BASE-T Ethernet",
920 WM_T_82547, WMP_F_COPPER },
921
922 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
923 "Intel i82547GI 1000BASE-T Ethernet",
924 WM_T_82547_2, WMP_F_COPPER },
925
926 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
927 "Intel PRO/1000 PT (82571EB)",
928 WM_T_82571, WMP_F_COPPER },
929
930 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
931 "Intel PRO/1000 PF (82571EB)",
932 WM_T_82571, WMP_F_FIBER },
933
934 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
935 "Intel PRO/1000 PB (82571EB)",
936 WM_T_82571, WMP_F_SERDES },
937
938 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
939 "Intel PRO/1000 QT (82571EB)",
940 WM_T_82571, WMP_F_COPPER },
941
942 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
943 "Intel PRO/1000 PT Quad Port Server Adapter",
944 WM_T_82571, WMP_F_COPPER, },
945
946 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
947 "Intel Gigabit PT Quad Port Server ExpressModule",
948 WM_T_82571, WMP_F_COPPER, },
949
950 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
951 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
952 WM_T_82571, WMP_F_SERDES, },
953
954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
955 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
956 WM_T_82571, WMP_F_SERDES, },
957
958 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
959 "Intel 82571EB Quad 1000baseX Ethernet",
960 WM_T_82571, WMP_F_FIBER, },
961
962 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
963 "Intel i82572EI 1000baseT Ethernet",
964 WM_T_82572, WMP_F_COPPER },
965
966 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
967 "Intel i82572EI 1000baseX Ethernet",
968 WM_T_82572, WMP_F_FIBER },
969
970 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
971 "Intel i82572EI Gigabit Ethernet (SERDES)",
972 WM_T_82572, WMP_F_SERDES },
973
974 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
975 "Intel i82572EI 1000baseT Ethernet",
976 WM_T_82572, WMP_F_COPPER },
977
978 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
979 "Intel i82573E",
980 WM_T_82573, WMP_F_COPPER },
981
982 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
983 "Intel i82573E IAMT",
984 WM_T_82573, WMP_F_COPPER },
985
986 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
987 "Intel i82573L Gigabit Ethernet",
988 WM_T_82573, WMP_F_COPPER },
989
990 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
991 "Intel i82574L",
992 WM_T_82574, WMP_F_COPPER },
993
994 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
995 "Intel i82574L",
996 WM_T_82574, WMP_F_COPPER },
997
998 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
999 "Intel i82583V",
1000 WM_T_82583, WMP_F_COPPER },
1001
1002 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1003 "i80003 dual 1000baseT Ethernet",
1004 WM_T_80003, WMP_F_COPPER },
1005
1006 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1007 "i80003 dual 1000baseX Ethernet",
1008 WM_T_80003, WMP_F_COPPER },
1009
1010 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1011 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1012 WM_T_80003, WMP_F_SERDES },
1013
1014 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1015 "Intel i80003 1000baseT Ethernet",
1016 WM_T_80003, WMP_F_COPPER },
1017
1018 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1019 "Intel i80003 Gigabit Ethernet (SERDES)",
1020 WM_T_80003, WMP_F_SERDES },
1021
1022 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
1023 "Intel i82801H (M_AMT) LAN Controller",
1024 WM_T_ICH8, WMP_F_COPPER },
1025 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
1026 "Intel i82801H (AMT) LAN Controller",
1027 WM_T_ICH8, WMP_F_COPPER },
1028 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
1029 "Intel i82801H LAN Controller",
1030 WM_T_ICH8, WMP_F_COPPER },
1031 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1032 "Intel i82801H (IFE) LAN Controller",
1033 WM_T_ICH8, WMP_F_COPPER },
1034 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
1035 "Intel i82801H (M) LAN Controller",
1036 WM_T_ICH8, WMP_F_COPPER },
1037 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1038 "Intel i82801H IFE (GT) LAN Controller",
1039 WM_T_ICH8, WMP_F_COPPER },
1040 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1041 "Intel i82801H IFE (G) LAN Controller",
1042 WM_T_ICH8, WMP_F_COPPER },
1043 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1044 "82801I (AMT) LAN Controller",
1045 WM_T_ICH9, WMP_F_COPPER },
1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1047 "82801I LAN Controller",
1048 WM_T_ICH9, WMP_F_COPPER },
1049 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1050 "82801I (G) LAN Controller",
1051 WM_T_ICH9, WMP_F_COPPER },
1052 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1053 "82801I (GT) LAN Controller",
1054 WM_T_ICH9, WMP_F_COPPER },
1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1056 "82801I (C) LAN Controller",
1057 WM_T_ICH9, WMP_F_COPPER },
1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1059 "82801I mobile LAN Controller",
1060 WM_T_ICH9, WMP_F_COPPER },
1061 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1062 "82801I mobile (V) LAN Controller",
1063 WM_T_ICH9, WMP_F_COPPER },
1064 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1065 "82801I mobile (AMT) LAN Controller",
1066 WM_T_ICH9, WMP_F_COPPER },
1067 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1068 "82567LM-4 LAN Controller",
1069 WM_T_ICH9, WMP_F_COPPER },
1070 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
1071 "82567V-3 LAN Controller",
1072 WM_T_ICH9, WMP_F_COPPER },
1073 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1074 "82567LM-2 LAN Controller",
1075 WM_T_ICH10, WMP_F_COPPER },
1076 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1077 "82567LF-2 LAN Controller",
1078 WM_T_ICH10, WMP_F_COPPER },
1079 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1080 "82567LM-3 LAN Controller",
1081 WM_T_ICH10, WMP_F_COPPER },
1082 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1083 "82567LF-3 LAN Controller",
1084 WM_T_ICH10, WMP_F_COPPER },
1085 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1086 "82567V-2 LAN Controller",
1087 WM_T_ICH10, WMP_F_COPPER },
1088 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1089 "82567V-3? LAN Controller",
1090 WM_T_ICH10, WMP_F_COPPER },
1091 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1092 "HANKSVILLE LAN Controller",
1093 WM_T_ICH10, WMP_F_COPPER },
1094 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1095 "PCH LAN (82577LM) Controller",
1096 WM_T_PCH, WMP_F_COPPER },
1097 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1098 "PCH LAN (82577LC) Controller",
1099 WM_T_PCH, WMP_F_COPPER },
1100 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1101 "PCH LAN (82578DM) Controller",
1102 WM_T_PCH, WMP_F_COPPER },
1103 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1104 "PCH LAN (82578DC) Controller",
1105 WM_T_PCH, WMP_F_COPPER },
1106 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1107 "PCH2 LAN (82579LM) Controller",
1108 WM_T_PCH2, WMP_F_COPPER },
1109 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1110 "PCH2 LAN (82579V) Controller",
1111 WM_T_PCH2, WMP_F_COPPER },
1112 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1113 "82575EB dual-1000baseT Ethernet",
1114 WM_T_82575, WMP_F_COPPER },
1115 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1116 "82575EB dual-1000baseX Ethernet (SERDES)",
1117 WM_T_82575, WMP_F_SERDES },
1118 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1119 "82575GB quad-1000baseT Ethernet",
1120 WM_T_82575, WMP_F_COPPER },
1121 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1122 "82575GB quad-1000baseT Ethernet (PM)",
1123 WM_T_82575, WMP_F_COPPER },
1124 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1125 "82576 1000BaseT Ethernet",
1126 WM_T_82576, WMP_F_COPPER },
1127 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1128 "82576 1000BaseX Ethernet",
1129 WM_T_82576, WMP_F_FIBER },
1130
1131 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1132 "82576 gigabit Ethernet (SERDES)",
1133 WM_T_82576, WMP_F_SERDES },
1134
1135 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1136 "82576 quad-1000BaseT Ethernet",
1137 WM_T_82576, WMP_F_COPPER },
1138
1139 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1140 "82576 Gigabit ET2 Quad Port Server Adapter",
1141 WM_T_82576, WMP_F_COPPER },
1142
1143 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1144 "82576 gigabit Ethernet",
1145 WM_T_82576, WMP_F_COPPER },
1146
1147 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1148 "82576 gigabit Ethernet (SERDES)",
1149 WM_T_82576, WMP_F_SERDES },
1150 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1151 "82576 quad-gigabit Ethernet (SERDES)",
1152 WM_T_82576, WMP_F_SERDES },
1153
1154 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1155 "82580 1000BaseT Ethernet",
1156 WM_T_82580, WMP_F_COPPER },
1157 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1158 "82580 1000BaseX Ethernet",
1159 WM_T_82580, WMP_F_FIBER },
1160
1161 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1162 "82580 1000BaseT Ethernet (SERDES)",
1163 WM_T_82580, WMP_F_SERDES },
1164
1165 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1166 "82580 gigabit Ethernet (SGMII)",
1167 WM_T_82580, WMP_F_COPPER },
1168 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1169 "82580 dual-1000BaseT Ethernet",
1170 WM_T_82580, WMP_F_COPPER },
1171
1172 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1173 "82580 quad-1000BaseX Ethernet",
1174 WM_T_82580, WMP_F_FIBER },
1175
1176 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1177 "DH89XXCC Gigabit Ethernet (SGMII)",
1178 WM_T_82580, WMP_F_COPPER },
1179
1180 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1181 "DH89XXCC Gigabit Ethernet (SERDES)",
1182 WM_T_82580, WMP_F_SERDES },
1183
1184 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1185 "DH89XXCC 1000BASE-KX Ethernet",
1186 WM_T_82580, WMP_F_SERDES },
1187
1188 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1189 "DH89XXCC Gigabit Ethernet (SFP)",
1190 WM_T_82580, WMP_F_SERDES },
1191
1192 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1193 "I350 Gigabit Network Connection",
1194 WM_T_I350, WMP_F_COPPER },
1195
1196 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1197 "I350 Gigabit Fiber Network Connection",
1198 WM_T_I350, WMP_F_FIBER },
1199
1200 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1201 "I350 Gigabit Backplane Connection",
1202 WM_T_I350, WMP_F_SERDES },
1203
1204 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1205 "I350 Quad Port Gigabit Ethernet",
1206 WM_T_I350, WMP_F_SERDES },
1207
1208 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1209 "I350 Gigabit Connection",
1210 WM_T_I350, WMP_F_COPPER },
1211
1212 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1213 "I354 Gigabit Ethernet (KX)",
1214 WM_T_I354, WMP_F_SERDES },
1215
1216 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1217 "I354 Gigabit Ethernet (SGMII)",
1218 WM_T_I354, WMP_F_COPPER },
1219
1220 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1221 "I354 Gigabit Ethernet (2.5G)",
1222 WM_T_I354, WMP_F_COPPER },
1223
1224 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1225 "I210-T1 Ethernet Server Adapter",
1226 WM_T_I210, WMP_F_COPPER },
1227
1228 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1229 "I210 Ethernet (Copper OEM)",
1230 WM_T_I210, WMP_F_COPPER },
1231
1232 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1233 "I210 Ethernet (Copper IT)",
1234 WM_T_I210, WMP_F_COPPER },
1235
1236 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1237 "I210 Ethernet (FLASH less)",
1238 WM_T_I210, WMP_F_COPPER },
1239
1240 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1241 "I210 Gigabit Ethernet (Fiber)",
1242 WM_T_I210, WMP_F_FIBER },
1243
1244 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1245 "I210 Gigabit Ethernet (SERDES)",
1246 WM_T_I210, WMP_F_SERDES },
1247
1248 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1249 "I210 Gigabit Ethernet (FLASH less)",
1250 WM_T_I210, WMP_F_SERDES },
1251
1252 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1253 "I210 Gigabit Ethernet (SGMII)",
1254 WM_T_I210, WMP_F_COPPER },
1255
1256 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1257 "I211 Ethernet (COPPER)",
1258 WM_T_I211, WMP_F_COPPER },
1259 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1260 "I217 V Ethernet Connection",
1261 WM_T_PCH_LPT, WMP_F_COPPER },
1262 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1263 "I217 LM Ethernet Connection",
1264 WM_T_PCH_LPT, WMP_F_COPPER },
1265 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1266 "I218 V Ethernet Connection",
1267 WM_T_PCH_LPT, WMP_F_COPPER },
1268 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1269 "I218 V Ethernet Connection",
1270 WM_T_PCH_LPT, WMP_F_COPPER },
1271 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1272 "I218 V Ethernet Connection",
1273 WM_T_PCH_LPT, WMP_F_COPPER },
1274 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1275 "I218 LM Ethernet Connection",
1276 WM_T_PCH_LPT, WMP_F_COPPER },
1277 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1278 "I218 LM Ethernet Connection",
1279 WM_T_PCH_LPT, WMP_F_COPPER },
1280 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1281 "I218 LM Ethernet Connection",
1282 WM_T_PCH_LPT, WMP_F_COPPER },
1283 { 0, 0,
1284 NULL,
1285 0, 0 },
1286 };
1287
1288 #ifdef WM_EVENT_COUNTERS
1289 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1290 #endif /* WM_EVENT_COUNTERS */
1291
1292
1293 /*
1294 * Register read/write functions.
1295 * Other than CSR_{READ|WRITE}().
1296 */
1297
1298 #if 0 /* Not currently used */
1299 static inline uint32_t
1300 wm_io_read(struct wm_softc *sc, int reg)
1301 {
1302
1303 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1304 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1305 }
1306 #endif
1307
1308 static inline void
1309 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1310 {
1311
1312 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1313 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1314 }
1315
1316 static inline void
1317 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1318 uint32_t data)
1319 {
1320 uint32_t regval;
1321 int i;
1322
1323 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1324
1325 CSR_WRITE(sc, reg, regval);
1326
1327 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1328 delay(5);
1329 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1330 break;
1331 }
1332 if (i == SCTL_CTL_POLL_TIMEOUT) {
1333 aprint_error("%s: WARNING:"
1334 " i82575 reg 0x%08x setup did not indicate ready\n",
1335 device_xname(sc->sc_dev), reg);
1336 }
1337 }
1338
1339 static inline void
1340 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1341 {
1342 wa->wa_low = htole32(v & 0xffffffffU);
1343 if (sizeof(bus_addr_t) == 8)
1344 wa->wa_high = htole32((uint64_t) v >> 32);
1345 else
1346 wa->wa_high = 0;
1347 }
1348
1349 /*
1350 * Descriptor sync/init functions.
1351 */
1352 static inline void
1353 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1354 {
1355 struct wm_softc *sc = txq->txq_sc;
1356
1357 /* If it will wrap around, sync to the end of the ring. */
1358 if ((start + num) > WM_NTXDESC(txq)) {
1359 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1360 WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) *
1361 (WM_NTXDESC(txq) - start), ops);
1362 num -= (WM_NTXDESC(txq) - start);
1363 start = 0;
1364 }
1365
1366 /* Now sync whatever is left. */
1367 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1368 WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) * num, ops);
1369 }
1370
1371 static inline void
1372 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1373 {
1374 struct wm_softc *sc = rxq->rxq_sc;
1375
1376 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1377 WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
1378 }
1379
1380 static inline void
1381 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1382 {
1383 struct wm_softc *sc = rxq->rxq_sc;
1384 struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1385 wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1386 struct mbuf *m = rxs->rxs_mbuf;
1387
1388 /*
1389 * Note: We scoot the packet forward 2 bytes in the buffer
1390 * so that the payload after the Ethernet header is aligned
1391 * to a 4-byte boundary.
1392
1393 * XXX BRAINDAMAGE ALERT!
1394 * The stupid chip uses the same size for every buffer, which
1395 * is set in the Receive Control register. We are using the 2K
1396 * size option, but what we REALLY want is (2K - 2)! For this
1397 * reason, we can't "scoot" packets longer than the standard
1398 * Ethernet MTU. On strict-alignment platforms, if the total
1399 * size exceeds (2K - 2) we set align_tweak to 0 and let
1400 * the upper layer copy the headers.
1401 */
1402 m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1403
1404 wm_set_dma_addr(&rxd->wrx_addr,
1405 rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1406 rxd->wrx_len = 0;
1407 rxd->wrx_cksum = 0;
1408 rxd->wrx_status = 0;
1409 rxd->wrx_errors = 0;
1410 rxd->wrx_special = 0;
1411 wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1412
1413 CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1414 }
1415
1416 /*
1417 * Device driver interface functions and commonly used functions.
1418 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1419 */
1420
1421 /* Lookup supported device table */
1422 static const struct wm_product *
1423 wm_lookup(const struct pci_attach_args *pa)
1424 {
1425 const struct wm_product *wmp;
1426
1427 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1428 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1429 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1430 return wmp;
1431 }
1432 return NULL;
1433 }
1434
1435 /* The match function (ca_match) */
1436 static int
1437 wm_match(device_t parent, cfdata_t cf, void *aux)
1438 {
1439 struct pci_attach_args *pa = aux;
1440
1441 if (wm_lookup(pa) != NULL)
1442 return 1;
1443
1444 return 0;
1445 }
1446
1447 /* The attach function (ca_attach) */
1448 static void
1449 wm_attach(device_t parent, device_t self, void *aux)
1450 {
1451 struct wm_softc *sc = device_private(self);
1452 struct pci_attach_args *pa = aux;
1453 prop_dictionary_t dict;
1454 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1455 pci_chipset_tag_t pc = pa->pa_pc;
1456 int counts[PCI_INTR_TYPE_SIZE];
1457 pci_intr_type_t max_type;
1458 const char *eetype, *xname;
1459 bus_space_tag_t memt;
1460 bus_space_handle_t memh;
1461 bus_size_t memsize;
1462 int memh_valid;
1463 int i, error;
1464 const struct wm_product *wmp;
1465 prop_data_t ea;
1466 prop_number_t pn;
1467 uint8_t enaddr[ETHER_ADDR_LEN];
1468 uint16_t cfg1, cfg2, swdpin, nvmword;
1469 pcireg_t preg, memtype;
1470 uint16_t eeprom_data, apme_mask;
1471 bool force_clear_smbi;
1472 uint32_t link_mode;
1473 uint32_t reg;
1474
1475 sc->sc_dev = self;
1476 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1477 sc->sc_stopping = false;
1478
1479 wmp = wm_lookup(pa);
1480 #ifdef DIAGNOSTIC
1481 if (wmp == NULL) {
1482 printf("\n");
1483 panic("wm_attach: impossible");
1484 }
1485 #endif
1486 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1487
1488 sc->sc_pc = pa->pa_pc;
1489 sc->sc_pcitag = pa->pa_tag;
1490
1491 if (pci_dma64_available(pa))
1492 sc->sc_dmat = pa->pa_dmat64;
1493 else
1494 sc->sc_dmat = pa->pa_dmat;
1495
1496 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1497 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1498 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1499
1500 sc->sc_type = wmp->wmp_type;
1501 if (sc->sc_type < WM_T_82543) {
1502 if (sc->sc_rev < 2) {
1503 aprint_error_dev(sc->sc_dev,
1504 "i82542 must be at least rev. 2\n");
1505 return;
1506 }
1507 if (sc->sc_rev < 3)
1508 sc->sc_type = WM_T_82542_2_0;
1509 }
1510
1511 /*
1512 * Disable MSI for Errata:
1513 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1514 *
1515 * 82544: Errata 25
1516 * 82540: Errata 6 (easy to reproduce device timeout)
1517 * 82545: Errata 4 (easy to reproduce device timeout)
1518 * 82546: Errata 26 (easy to reproduce device timeout)
1519 * 82541: Errata 7 (easy to reproduce device timeout)
1520 *
1521 * "Byte Enables 2 and 3 are not set on MSI writes"
1522 *
1523 * 82571 & 82572: Errata 63
1524 */
1525 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1526 || (sc->sc_type == WM_T_82572))
1527 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1528
1529 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1530 || (sc->sc_type == WM_T_82580)
1531 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1532 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1533 sc->sc_flags |= WM_F_NEWQUEUE;
1534
1535 /* Set device properties (mactype) */
1536 dict = device_properties(sc->sc_dev);
1537 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1538
1539 /*
1540 * Map the device. All devices support memory-mapped acccess,
1541 * and it is really required for normal operation.
1542 */
1543 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1544 switch (memtype) {
1545 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1546 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1547 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1548 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1549 break;
1550 default:
1551 memh_valid = 0;
1552 break;
1553 }
1554
1555 if (memh_valid) {
1556 sc->sc_st = memt;
1557 sc->sc_sh = memh;
1558 sc->sc_ss = memsize;
1559 } else {
1560 aprint_error_dev(sc->sc_dev,
1561 "unable to map device registers\n");
1562 return;
1563 }
1564
1565 /*
1566 * In addition, i82544 and later support I/O mapped indirect
1567 * register access. It is not desirable (nor supported in
1568 * this driver) to use it for normal operation, though it is
1569 * required to work around bugs in some chip versions.
1570 */
1571 if (sc->sc_type >= WM_T_82544) {
1572 /* First we have to find the I/O BAR. */
1573 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1574 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1575 if (memtype == PCI_MAPREG_TYPE_IO)
1576 break;
1577 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1578 PCI_MAPREG_MEM_TYPE_64BIT)
1579 i += 4; /* skip high bits, too */
1580 }
1581 if (i < PCI_MAPREG_END) {
1582 /*
1583 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1584 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1585 * It's no problem because newer chips has no this
1586 * bug.
1587 *
1588 * The i8254x doesn't apparently respond when the
1589 * I/O BAR is 0, which looks somewhat like it's not
1590 * been configured.
1591 */
1592 preg = pci_conf_read(pc, pa->pa_tag, i);
1593 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1594 aprint_error_dev(sc->sc_dev,
1595 "WARNING: I/O BAR at zero.\n");
1596 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1597 0, &sc->sc_iot, &sc->sc_ioh,
1598 NULL, &sc->sc_ios) == 0) {
1599 sc->sc_flags |= WM_F_IOH_VALID;
1600 } else {
1601 aprint_error_dev(sc->sc_dev,
1602 "WARNING: unable to map I/O space\n");
1603 }
1604 }
1605
1606 }
1607
1608 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1609 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1610 preg |= PCI_COMMAND_MASTER_ENABLE;
1611 if (sc->sc_type < WM_T_82542_2_1)
1612 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1613 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1614
1615 /* power up chip */
1616 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1617 NULL)) && error != EOPNOTSUPP) {
1618 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1619 return;
1620 }
1621
1622 wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
1623
1624 /* Allocation settings */
1625 max_type = PCI_INTR_TYPE_MSIX;
1626 counts[PCI_INTR_TYPE_MSIX] = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
1627 counts[PCI_INTR_TYPE_MSI] = 1;
1628 counts[PCI_INTR_TYPE_INTX] = 1;
1629
1630 alloc_retry:
1631 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1632 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1633 return;
1634 }
1635
1636 if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1637 error = wm_setup_msix(sc);
1638 if (error) {
1639 pci_intr_release(pc, sc->sc_intrs,
1640 counts[PCI_INTR_TYPE_MSIX]);
1641
1642 /* Setup for MSI: Disable MSI-X */
1643 max_type = PCI_INTR_TYPE_MSI;
1644 counts[PCI_INTR_TYPE_MSI] = 1;
1645 counts[PCI_INTR_TYPE_INTX] = 1;
1646 goto alloc_retry;
1647 }
1648 } else if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
1649 wm_adjust_qnum(sc, 0); /* must not use multiqueue */
1650 error = wm_setup_legacy(sc);
1651 if (error) {
1652 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1653 counts[PCI_INTR_TYPE_MSI]);
1654
1655 /* The next try is for INTx: Disable MSI */
1656 max_type = PCI_INTR_TYPE_INTX;
1657 counts[PCI_INTR_TYPE_INTX] = 1;
1658 goto alloc_retry;
1659 }
1660 } else {
1661 wm_adjust_qnum(sc, 0); /* must not use multiqueue */
1662 error = wm_setup_legacy(sc);
1663 if (error) {
1664 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1665 counts[PCI_INTR_TYPE_INTX]);
1666 return;
1667 }
1668 }
1669
1670 /*
1671 * Check the function ID (unit number of the chip).
1672 */
1673 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1674 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1675 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1676 || (sc->sc_type == WM_T_82580)
1677 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1678 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1679 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1680 else
1681 sc->sc_funcid = 0;
1682
1683 /*
1684 * Determine a few things about the bus we're connected to.
1685 */
1686 if (sc->sc_type < WM_T_82543) {
1687 /* We don't really know the bus characteristics here. */
1688 sc->sc_bus_speed = 33;
1689 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1690 /*
1691 * CSA (Communication Streaming Architecture) is about as fast
1692 * a 32-bit 66MHz PCI Bus.
1693 */
1694 sc->sc_flags |= WM_F_CSA;
1695 sc->sc_bus_speed = 66;
1696 aprint_verbose_dev(sc->sc_dev,
1697 "Communication Streaming Architecture\n");
1698 if (sc->sc_type == WM_T_82547) {
1699 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1700 callout_setfunc(&sc->sc_txfifo_ch,
1701 wm_82547_txfifo_stall, sc);
1702 aprint_verbose_dev(sc->sc_dev,
1703 "using 82547 Tx FIFO stall work-around\n");
1704 }
1705 } else if (sc->sc_type >= WM_T_82571) {
1706 sc->sc_flags |= WM_F_PCIE;
1707 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1708 && (sc->sc_type != WM_T_ICH10)
1709 && (sc->sc_type != WM_T_PCH)
1710 && (sc->sc_type != WM_T_PCH2)
1711 && (sc->sc_type != WM_T_PCH_LPT)) {
1712 /* ICH* and PCH* have no PCIe capability registers */
1713 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1714 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1715 NULL) == 0)
1716 aprint_error_dev(sc->sc_dev,
1717 "unable to find PCIe capability\n");
1718 }
1719 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1720 } else {
1721 reg = CSR_READ(sc, WMREG_STATUS);
1722 if (reg & STATUS_BUS64)
1723 sc->sc_flags |= WM_F_BUS64;
1724 if ((reg & STATUS_PCIX_MODE) != 0) {
1725 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1726
1727 sc->sc_flags |= WM_F_PCIX;
1728 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1729 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1730 aprint_error_dev(sc->sc_dev,
1731 "unable to find PCIX capability\n");
1732 else if (sc->sc_type != WM_T_82545_3 &&
1733 sc->sc_type != WM_T_82546_3) {
1734 /*
1735 * Work around a problem caused by the BIOS
1736 * setting the max memory read byte count
1737 * incorrectly.
1738 */
1739 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1740 sc->sc_pcixe_capoff + PCIX_CMD);
1741 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1742 sc->sc_pcixe_capoff + PCIX_STATUS);
1743
1744 bytecnt =
1745 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1746 PCIX_CMD_BYTECNT_SHIFT;
1747 maxb =
1748 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1749 PCIX_STATUS_MAXB_SHIFT;
1750 if (bytecnt > maxb) {
1751 aprint_verbose_dev(sc->sc_dev,
1752 "resetting PCI-X MMRBC: %d -> %d\n",
1753 512 << bytecnt, 512 << maxb);
1754 pcix_cmd = (pcix_cmd &
1755 ~PCIX_CMD_BYTECNT_MASK) |
1756 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1757 pci_conf_write(pa->pa_pc, pa->pa_tag,
1758 sc->sc_pcixe_capoff + PCIX_CMD,
1759 pcix_cmd);
1760 }
1761 }
1762 }
1763 /*
1764 * The quad port adapter is special; it has a PCIX-PCIX
1765 * bridge on the board, and can run the secondary bus at
1766 * a higher speed.
1767 */
1768 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1769 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1770 : 66;
1771 } else if (sc->sc_flags & WM_F_PCIX) {
1772 switch (reg & STATUS_PCIXSPD_MASK) {
1773 case STATUS_PCIXSPD_50_66:
1774 sc->sc_bus_speed = 66;
1775 break;
1776 case STATUS_PCIXSPD_66_100:
1777 sc->sc_bus_speed = 100;
1778 break;
1779 case STATUS_PCIXSPD_100_133:
1780 sc->sc_bus_speed = 133;
1781 break;
1782 default:
1783 aprint_error_dev(sc->sc_dev,
1784 "unknown PCIXSPD %d; assuming 66MHz\n",
1785 reg & STATUS_PCIXSPD_MASK);
1786 sc->sc_bus_speed = 66;
1787 break;
1788 }
1789 } else
1790 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1791 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1792 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1793 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1794 }
1795
1796 /* clear interesting stat counters */
1797 CSR_READ(sc, WMREG_COLC);
1798 CSR_READ(sc, WMREG_RXERRC);
1799
1800 /* get PHY control from SMBus to PCIe */
1801 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1802 || (sc->sc_type == WM_T_PCH_LPT))
1803 wm_smbustopci(sc);
1804
1805 /* Reset the chip to a known state. */
1806 wm_reset(sc);
1807
1808 /* Get some information about the EEPROM. */
1809 switch (sc->sc_type) {
1810 case WM_T_82542_2_0:
1811 case WM_T_82542_2_1:
1812 case WM_T_82543:
1813 case WM_T_82544:
1814 /* Microwire */
1815 sc->sc_nvm_wordsize = 64;
1816 sc->sc_nvm_addrbits = 6;
1817 break;
1818 case WM_T_82540:
1819 case WM_T_82545:
1820 case WM_T_82545_3:
1821 case WM_T_82546:
1822 case WM_T_82546_3:
1823 /* Microwire */
1824 reg = CSR_READ(sc, WMREG_EECD);
1825 if (reg & EECD_EE_SIZE) {
1826 sc->sc_nvm_wordsize = 256;
1827 sc->sc_nvm_addrbits = 8;
1828 } else {
1829 sc->sc_nvm_wordsize = 64;
1830 sc->sc_nvm_addrbits = 6;
1831 }
1832 sc->sc_flags |= WM_F_LOCK_EECD;
1833 break;
1834 case WM_T_82541:
1835 case WM_T_82541_2:
1836 case WM_T_82547:
1837 case WM_T_82547_2:
1838 sc->sc_flags |= WM_F_LOCK_EECD;
1839 reg = CSR_READ(sc, WMREG_EECD);
1840 if (reg & EECD_EE_TYPE) {
1841 /* SPI */
1842 sc->sc_flags |= WM_F_EEPROM_SPI;
1843 wm_nvm_set_addrbits_size_eecd(sc);
1844 } else {
1845 /* Microwire */
1846 if ((reg & EECD_EE_ABITS) != 0) {
1847 sc->sc_nvm_wordsize = 256;
1848 sc->sc_nvm_addrbits = 8;
1849 } else {
1850 sc->sc_nvm_wordsize = 64;
1851 sc->sc_nvm_addrbits = 6;
1852 }
1853 }
1854 break;
1855 case WM_T_82571:
1856 case WM_T_82572:
1857 /* SPI */
1858 sc->sc_flags |= WM_F_EEPROM_SPI;
1859 wm_nvm_set_addrbits_size_eecd(sc);
1860 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1861 break;
1862 case WM_T_82573:
1863 sc->sc_flags |= WM_F_LOCK_SWSM;
1864 /* FALLTHROUGH */
1865 case WM_T_82574:
1866 case WM_T_82583:
1867 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1868 sc->sc_flags |= WM_F_EEPROM_FLASH;
1869 sc->sc_nvm_wordsize = 2048;
1870 } else {
1871 /* SPI */
1872 sc->sc_flags |= WM_F_EEPROM_SPI;
1873 wm_nvm_set_addrbits_size_eecd(sc);
1874 }
1875 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1876 break;
1877 case WM_T_82575:
1878 case WM_T_82576:
1879 case WM_T_82580:
1880 case WM_T_I350:
1881 case WM_T_I354:
1882 case WM_T_80003:
1883 /* SPI */
1884 sc->sc_flags |= WM_F_EEPROM_SPI;
1885 wm_nvm_set_addrbits_size_eecd(sc);
1886 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1887 | WM_F_LOCK_SWSM;
1888 break;
1889 case WM_T_ICH8:
1890 case WM_T_ICH9:
1891 case WM_T_ICH10:
1892 case WM_T_PCH:
1893 case WM_T_PCH2:
1894 case WM_T_PCH_LPT:
1895 /* FLASH */
1896 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1897 sc->sc_nvm_wordsize = 2048;
1898 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1899 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1900 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
1901 aprint_error_dev(sc->sc_dev,
1902 "can't map FLASH registers\n");
1903 goto out;
1904 }
1905 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1906 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1907 ICH_FLASH_SECTOR_SIZE;
1908 sc->sc_ich8_flash_bank_size =
1909 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1910 sc->sc_ich8_flash_bank_size -=
1911 (reg & ICH_GFPREG_BASE_MASK);
1912 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1913 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1914 break;
1915 case WM_T_I210:
1916 case WM_T_I211:
1917 if (wm_nvm_get_flash_presence_i210(sc)) {
1918 wm_nvm_set_addrbits_size_eecd(sc);
1919 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1920 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1921 } else {
1922 sc->sc_nvm_wordsize = INVM_SIZE;
1923 sc->sc_flags |= WM_F_EEPROM_INVM;
1924 sc->sc_flags |= WM_F_LOCK_SWFW;
1925 }
1926 break;
1927 default:
1928 break;
1929 }
1930
1931 /* Ensure the SMBI bit is clear before first NVM or PHY access */
1932 switch (sc->sc_type) {
1933 case WM_T_82571:
1934 case WM_T_82572:
1935 reg = CSR_READ(sc, WMREG_SWSM2);
1936 if ((reg & SWSM2_LOCK) == 0) {
1937 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
1938 force_clear_smbi = true;
1939 } else
1940 force_clear_smbi = false;
1941 break;
1942 case WM_T_82573:
1943 case WM_T_82574:
1944 case WM_T_82583:
1945 force_clear_smbi = true;
1946 break;
1947 default:
1948 force_clear_smbi = false;
1949 break;
1950 }
1951 if (force_clear_smbi) {
1952 reg = CSR_READ(sc, WMREG_SWSM);
1953 if ((reg & SWSM_SMBI) != 0)
1954 aprint_error_dev(sc->sc_dev,
1955 "Please update the Bootagent\n");
1956 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
1957 }
1958
1959 /*
1960 * Defer printing the EEPROM type until after verifying the checksum
1961 * This allows the EEPROM type to be printed correctly in the case
1962 * that no EEPROM is attached.
1963 */
1964 /*
1965 * Validate the EEPROM checksum. If the checksum fails, flag
1966 * this for later, so we can fail future reads from the EEPROM.
1967 */
1968 if (wm_nvm_validate_checksum(sc)) {
1969 /*
1970 * Read twice again because some PCI-e parts fail the
1971 * first check due to the link being in sleep state.
1972 */
1973 if (wm_nvm_validate_checksum(sc))
1974 sc->sc_flags |= WM_F_EEPROM_INVALID;
1975 }
1976
1977 /* Set device properties (macflags) */
1978 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1979
1980 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1981 aprint_verbose_dev(sc->sc_dev, "No EEPROM");
1982 else {
1983 aprint_verbose_dev(sc->sc_dev, "%u words ",
1984 sc->sc_nvm_wordsize);
1985 if (sc->sc_flags & WM_F_EEPROM_INVM)
1986 aprint_verbose("iNVM");
1987 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
1988 aprint_verbose("FLASH(HW)");
1989 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
1990 aprint_verbose("FLASH");
1991 else {
1992 if (sc->sc_flags & WM_F_EEPROM_SPI)
1993 eetype = "SPI";
1994 else
1995 eetype = "MicroWire";
1996 aprint_verbose("(%d address bits) %s EEPROM",
1997 sc->sc_nvm_addrbits, eetype);
1998 }
1999 }
2000 wm_nvm_version(sc);
2001 aprint_verbose("\n");
2002
2003 /* Check for I21[01] PLL workaround */
2004 if (sc->sc_type == WM_T_I210)
2005 sc->sc_flags |= WM_F_PLL_WA_I210;
2006 if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
2007 /* NVM image release 3.25 has a workaround */
2008 if ((sc->sc_nvm_ver_major < 3)
2009 || ((sc->sc_nvm_ver_major == 3)
2010 && (sc->sc_nvm_ver_minor < 25))) {
2011 aprint_verbose_dev(sc->sc_dev,
2012 "ROM image version %d.%d is older than 3.25\n",
2013 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2014 sc->sc_flags |= WM_F_PLL_WA_I210;
2015 }
2016 }
2017 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2018 wm_pll_workaround_i210(sc);
2019
2020 wm_get_wakeup(sc);
2021 switch (sc->sc_type) {
2022 case WM_T_82571:
2023 case WM_T_82572:
2024 case WM_T_82573:
2025 case WM_T_82574:
2026 case WM_T_82583:
2027 case WM_T_80003:
2028 case WM_T_ICH8:
2029 case WM_T_ICH9:
2030 case WM_T_ICH10:
2031 case WM_T_PCH:
2032 case WM_T_PCH2:
2033 case WM_T_PCH_LPT:
2034 /* Non-AMT based hardware can now take control from firmware */
2035 if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2036 wm_get_hw_control(sc);
2037 break;
2038 default:
2039 break;
2040 }
2041
2042 /*
2043 * Read the Ethernet address from the EEPROM, if not first found
2044 * in device properties.
2045 */
2046 ea = prop_dictionary_get(dict, "mac-address");
2047 if (ea != NULL) {
2048 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2049 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2050 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2051 } else {
2052 if (wm_read_mac_addr(sc, enaddr) != 0) {
2053 aprint_error_dev(sc->sc_dev,
2054 "unable to read Ethernet address\n");
2055 goto out;
2056 }
2057 }
2058
2059 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2060 ether_sprintf(enaddr));
2061
2062 /*
2063 * Read the config info from the EEPROM, and set up various
2064 * bits in the control registers based on their contents.
2065 */
2066 pn = prop_dictionary_get(dict, "i82543-cfg1");
2067 if (pn != NULL) {
2068 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2069 cfg1 = (uint16_t) prop_number_integer_value(pn);
2070 } else {
2071 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2072 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2073 goto out;
2074 }
2075 }
2076
2077 pn = prop_dictionary_get(dict, "i82543-cfg2");
2078 if (pn != NULL) {
2079 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2080 cfg2 = (uint16_t) prop_number_integer_value(pn);
2081 } else {
2082 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2083 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2084 goto out;
2085 }
2086 }
2087
2088 /* check for WM_F_WOL */
2089 switch (sc->sc_type) {
2090 case WM_T_82542_2_0:
2091 case WM_T_82542_2_1:
2092 case WM_T_82543:
2093 /* dummy? */
2094 eeprom_data = 0;
2095 apme_mask = NVM_CFG3_APME;
2096 break;
2097 case WM_T_82544:
2098 apme_mask = NVM_CFG2_82544_APM_EN;
2099 eeprom_data = cfg2;
2100 break;
2101 case WM_T_82546:
2102 case WM_T_82546_3:
2103 case WM_T_82571:
2104 case WM_T_82572:
2105 case WM_T_82573:
2106 case WM_T_82574:
2107 case WM_T_82583:
2108 case WM_T_80003:
2109 default:
2110 apme_mask = NVM_CFG3_APME;
2111 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2112 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2113 break;
2114 case WM_T_82575:
2115 case WM_T_82576:
2116 case WM_T_82580:
2117 case WM_T_I350:
2118 case WM_T_I354: /* XXX ok? */
2119 case WM_T_ICH8:
2120 case WM_T_ICH9:
2121 case WM_T_ICH10:
2122 case WM_T_PCH:
2123 case WM_T_PCH2:
2124 case WM_T_PCH_LPT:
2125 /* XXX The funcid should be checked on some devices */
2126 apme_mask = WUC_APME;
2127 eeprom_data = CSR_READ(sc, WMREG_WUC);
2128 break;
2129 }
2130
2131 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2132 if ((eeprom_data & apme_mask) != 0)
2133 sc->sc_flags |= WM_F_WOL;
2134 #ifdef WM_DEBUG
2135 if ((sc->sc_flags & WM_F_WOL) != 0)
2136 printf("WOL\n");
2137 #endif
2138
2139 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2140 /* Check NVM for autonegotiation */
2141 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2142 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2143 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2144 }
2145 }
2146
2147 /*
2148 * XXX need special handling for some multiple port cards
2149 * to disable a paticular port.
2150 */
2151
2152 if (sc->sc_type >= WM_T_82544) {
2153 pn = prop_dictionary_get(dict, "i82543-swdpin");
2154 if (pn != NULL) {
2155 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2156 swdpin = (uint16_t) prop_number_integer_value(pn);
2157 } else {
2158 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2159 aprint_error_dev(sc->sc_dev,
2160 "unable to read SWDPIN\n");
2161 goto out;
2162 }
2163 }
2164 }
2165
2166 if (cfg1 & NVM_CFG1_ILOS)
2167 sc->sc_ctrl |= CTRL_ILOS;
2168
2169 /*
2170 * XXX
2171 * This code isn't correct because pin 2 and 3 are located
2172 * in different position on newer chips. Check all datasheet.
2173 *
2174 * Until resolve this problem, check if a chip < 82580
2175 */
2176 if (sc->sc_type <= WM_T_82580) {
2177 if (sc->sc_type >= WM_T_82544) {
2178 sc->sc_ctrl |=
2179 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2180 CTRL_SWDPIO_SHIFT;
2181 sc->sc_ctrl |=
2182 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2183 CTRL_SWDPINS_SHIFT;
2184 } else {
2185 sc->sc_ctrl |=
2186 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2187 CTRL_SWDPIO_SHIFT;
2188 }
2189 }
2190
2191 /* XXX For other than 82580? */
2192 if (sc->sc_type == WM_T_82580) {
2193 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2194 printf("CFG3 = %08x\n", (uint32_t)nvmword);
2195 if (nvmword & __BIT(13)) {
2196 printf("SET ILOS\n");
2197 sc->sc_ctrl |= CTRL_ILOS;
2198 }
2199 }
2200
2201 #if 0
2202 if (sc->sc_type >= WM_T_82544) {
2203 if (cfg1 & NVM_CFG1_IPS0)
2204 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2205 if (cfg1 & NVM_CFG1_IPS1)
2206 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2207 sc->sc_ctrl_ext |=
2208 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2209 CTRL_EXT_SWDPIO_SHIFT;
2210 sc->sc_ctrl_ext |=
2211 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2212 CTRL_EXT_SWDPINS_SHIFT;
2213 } else {
2214 sc->sc_ctrl_ext |=
2215 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2216 CTRL_EXT_SWDPIO_SHIFT;
2217 }
2218 #endif
2219
2220 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2221 #if 0
2222 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2223 #endif
2224
2225 if (sc->sc_type == WM_T_PCH) {
2226 uint16_t val;
2227
2228 /* Save the NVM K1 bit setting */
2229 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2230
2231 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2232 sc->sc_nvm_k1_enabled = 1;
2233 else
2234 sc->sc_nvm_k1_enabled = 0;
2235 }
2236
2237 /*
2238 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2239 * media structures accordingly.
2240 */
2241 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2242 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2243 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2244 || sc->sc_type == WM_T_82573
2245 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2246 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2247 wm_gmii_mediainit(sc, wmp->wmp_product);
2248 } else if (sc->sc_type < WM_T_82543 ||
2249 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2250 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2251 aprint_error_dev(sc->sc_dev,
2252 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2253 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2254 }
2255 wm_tbi_mediainit(sc);
2256 } else {
2257 switch (sc->sc_type) {
2258 case WM_T_82575:
2259 case WM_T_82576:
2260 case WM_T_82580:
2261 case WM_T_I350:
2262 case WM_T_I354:
2263 case WM_T_I210:
2264 case WM_T_I211:
2265 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2266 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2267 switch (link_mode) {
2268 case CTRL_EXT_LINK_MODE_1000KX:
2269 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2270 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2271 break;
2272 case CTRL_EXT_LINK_MODE_SGMII:
2273 if (wm_sgmii_uses_mdio(sc)) {
2274 aprint_verbose_dev(sc->sc_dev,
2275 "SGMII(MDIO)\n");
2276 sc->sc_flags |= WM_F_SGMII;
2277 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2278 break;
2279 }
2280 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2281 /*FALLTHROUGH*/
2282 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2283 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2284 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2285 if (link_mode
2286 == CTRL_EXT_LINK_MODE_SGMII) {
2287 sc->sc_mediatype
2288 = WM_MEDIATYPE_COPPER;
2289 sc->sc_flags |= WM_F_SGMII;
2290 } else {
2291 sc->sc_mediatype
2292 = WM_MEDIATYPE_SERDES;
2293 aprint_verbose_dev(sc->sc_dev,
2294 "SERDES\n");
2295 }
2296 break;
2297 }
2298 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2299 aprint_verbose_dev(sc->sc_dev,
2300 "SERDES\n");
2301
2302 /* Change current link mode setting */
2303 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2304 switch (sc->sc_mediatype) {
2305 case WM_MEDIATYPE_COPPER:
2306 reg |= CTRL_EXT_LINK_MODE_SGMII;
2307 break;
2308 case WM_MEDIATYPE_SERDES:
2309 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2310 break;
2311 default:
2312 break;
2313 }
2314 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2315 break;
2316 case CTRL_EXT_LINK_MODE_GMII:
2317 default:
2318 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2319 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2320 break;
2321 }
2322
2323 reg &= ~CTRL_EXT_I2C_ENA;
2324 if ((sc->sc_flags & WM_F_SGMII) != 0)
2325 reg |= CTRL_EXT_I2C_ENA;
2326 else
2327 reg &= ~CTRL_EXT_I2C_ENA;
2328 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2329
2330 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2331 wm_gmii_mediainit(sc, wmp->wmp_product);
2332 else
2333 wm_tbi_mediainit(sc);
2334 break;
2335 default:
2336 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
2337 aprint_error_dev(sc->sc_dev,
2338 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2339 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2340 wm_gmii_mediainit(sc, wmp->wmp_product);
2341 }
2342 }
2343
2344 ifp = &sc->sc_ethercom.ec_if;
2345 xname = device_xname(sc->sc_dev);
2346 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2347 ifp->if_softc = sc;
2348 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2349 ifp->if_ioctl = wm_ioctl;
2350 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2351 ifp->if_start = wm_nq_start;
2352 else
2353 ifp->if_start = wm_start;
2354 ifp->if_watchdog = wm_watchdog;
2355 ifp->if_init = wm_init;
2356 ifp->if_stop = wm_stop;
2357 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2358 IFQ_SET_READY(&ifp->if_snd);
2359
2360 /* Check for jumbo frame */
2361 switch (sc->sc_type) {
2362 case WM_T_82573:
2363 /* XXX limited to 9234 if ASPM is disabled */
2364 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2365 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2366 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2367 break;
2368 case WM_T_82571:
2369 case WM_T_82572:
2370 case WM_T_82574:
2371 case WM_T_82575:
2372 case WM_T_82576:
2373 case WM_T_82580:
2374 case WM_T_I350:
2375 case WM_T_I354: /* XXXX ok? */
2376 case WM_T_I210:
2377 case WM_T_I211:
2378 case WM_T_80003:
2379 case WM_T_ICH9:
2380 case WM_T_ICH10:
2381 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2382 case WM_T_PCH_LPT:
2383 /* XXX limited to 9234 */
2384 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2385 break;
2386 case WM_T_PCH:
2387 /* XXX limited to 4096 */
2388 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2389 break;
2390 case WM_T_82542_2_0:
2391 case WM_T_82542_2_1:
2392 case WM_T_82583:
2393 case WM_T_ICH8:
2394 /* No support for jumbo frame */
2395 break;
2396 default:
2397 /* ETHER_MAX_LEN_JUMBO */
2398 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2399 break;
2400 }
2401
2402 /* If we're a i82543 or greater, we can support VLANs. */
2403 if (sc->sc_type >= WM_T_82543)
2404 sc->sc_ethercom.ec_capabilities |=
2405 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2406
2407 /*
2408 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2409 * on i82543 and later.
2410 */
2411 if (sc->sc_type >= WM_T_82543) {
2412 ifp->if_capabilities |=
2413 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2414 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2415 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2416 IFCAP_CSUM_TCPv6_Tx |
2417 IFCAP_CSUM_UDPv6_Tx;
2418 }
2419
2420 /*
2421 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2422 *
2423 * 82541GI (8086:1076) ... no
2424 * 82572EI (8086:10b9) ... yes
2425 */
2426 if (sc->sc_type >= WM_T_82571) {
2427 ifp->if_capabilities |=
2428 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2429 }
2430
2431 /*
2432 * If we're a i82544 or greater (except i82547), we can do
2433 * TCP segmentation offload.
2434 */
2435 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2436 ifp->if_capabilities |= IFCAP_TSOv4;
2437 }
2438
2439 if (sc->sc_type >= WM_T_82571) {
2440 ifp->if_capabilities |= IFCAP_TSOv6;
2441 }
2442
2443 #ifdef WM_MPSAFE
2444 sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2445 #else
2446 sc->sc_core_lock = NULL;
2447 #endif
2448
2449 /* Attach the interface. */
2450 if_attach(ifp);
2451 ether_ifattach(ifp, enaddr);
2452 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2453 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2454 RND_FLAG_DEFAULT);
2455
2456 #ifdef WM_EVENT_COUNTERS
2457 /* Attach event counters. */
2458 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2459 NULL, xname, "txsstall");
2460 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2461 NULL, xname, "txdstall");
2462 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2463 NULL, xname, "txfifo_stall");
2464 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2465 NULL, xname, "txdw");
2466 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2467 NULL, xname, "txqe");
2468 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2469 NULL, xname, "rxintr");
2470 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2471 NULL, xname, "linkintr");
2472
2473 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2474 NULL, xname, "rxipsum");
2475 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2476 NULL, xname, "rxtusum");
2477 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2478 NULL, xname, "txipsum");
2479 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2480 NULL, xname, "txtusum");
2481 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2482 NULL, xname, "txtusum6");
2483
2484 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2485 NULL, xname, "txtso");
2486 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2487 NULL, xname, "txtso6");
2488 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2489 NULL, xname, "txtsopain");
2490
2491 for (i = 0; i < WM_NTXSEGS; i++) {
2492 snprintf(wm_txseg_evcnt_names[i],
2493 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2494 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2495 NULL, xname, wm_txseg_evcnt_names[i]);
2496 }
2497
2498 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2499 NULL, xname, "txdrop");
2500
2501 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2502 NULL, xname, "tu");
2503
2504 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2505 NULL, xname, "tx_xoff");
2506 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2507 NULL, xname, "tx_xon");
2508 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2509 NULL, xname, "rx_xoff");
2510 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2511 NULL, xname, "rx_xon");
2512 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2513 NULL, xname, "rx_macctl");
2514 #endif /* WM_EVENT_COUNTERS */
2515
2516 if (pmf_device_register(self, wm_suspend, wm_resume))
2517 pmf_class_network_register(self, ifp);
2518 else
2519 aprint_error_dev(self, "couldn't establish power handler\n");
2520
2521 sc->sc_flags |= WM_F_ATTACHED;
2522 out:
2523 return;
2524 }
2525
2526 /* The detach function (ca_detach) */
2527 static int
2528 wm_detach(device_t self, int flags __unused)
2529 {
2530 struct wm_softc *sc = device_private(self);
2531 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2532 int i;
2533 #ifndef WM_MPSAFE
2534 int s;
2535 #endif
2536
2537 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2538 return 0;
2539
2540 #ifndef WM_MPSAFE
2541 s = splnet();
2542 #endif
2543 /* Stop the interface. Callouts are stopped in it. */
2544 wm_stop(ifp, 1);
2545
2546 #ifndef WM_MPSAFE
2547 splx(s);
2548 #endif
2549
2550 pmf_device_deregister(self);
2551
2552 /* Tell the firmware about the release */
2553 WM_CORE_LOCK(sc);
2554 wm_release_manageability(sc);
2555 wm_release_hw_control(sc);
2556 WM_CORE_UNLOCK(sc);
2557
2558 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2559
2560 /* Delete all remaining media. */
2561 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2562
2563 ether_ifdetach(ifp);
2564 if_detach(ifp);
2565
2566
2567 /* Unload RX dmamaps and free mbufs */
2568 for (i = 0; i < sc->sc_nrxqueues; i++) {
2569 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
2570 WM_RX_LOCK(rxq);
2571 wm_rxdrain(rxq);
2572 WM_RX_UNLOCK(rxq);
2573 }
2574 /* Must unlock here */
2575
2576 wm_free_txrx_queues(sc);
2577
2578 /* Disestablish the interrupt handler */
2579 for (i = 0; i < sc->sc_nintrs; i++) {
2580 if (sc->sc_ihs[i] != NULL) {
2581 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2582 sc->sc_ihs[i] = NULL;
2583 }
2584 }
2585 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
2586
2587 /* Unmap the registers */
2588 if (sc->sc_ss) {
2589 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2590 sc->sc_ss = 0;
2591 }
2592 if (sc->sc_ios) {
2593 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2594 sc->sc_ios = 0;
2595 }
2596 if (sc->sc_flashs) {
2597 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
2598 sc->sc_flashs = 0;
2599 }
2600
2601 if (sc->sc_core_lock)
2602 mutex_obj_free(sc->sc_core_lock);
2603
2604 return 0;
2605 }
2606
2607 static bool
2608 wm_suspend(device_t self, const pmf_qual_t *qual)
2609 {
2610 struct wm_softc *sc = device_private(self);
2611
2612 wm_release_manageability(sc);
2613 wm_release_hw_control(sc);
2614 #ifdef WM_WOL
2615 wm_enable_wakeup(sc);
2616 #endif
2617
2618 return true;
2619 }
2620
2621 static bool
2622 wm_resume(device_t self, const pmf_qual_t *qual)
2623 {
2624 struct wm_softc *sc = device_private(self);
2625
2626 wm_init_manageability(sc);
2627
2628 return true;
2629 }
2630
2631 /*
2632 * wm_watchdog: [ifnet interface function]
2633 *
2634 * Watchdog timer handler.
2635 */
2636 static void
2637 wm_watchdog(struct ifnet *ifp)
2638 {
2639 struct wm_softc *sc = ifp->if_softc;
2640 struct wm_txqueue *txq = &sc->sc_txq[0];
2641
2642 /*
2643 * Since we're using delayed interrupts, sweep up
2644 * before we report an error.
2645 */
2646 WM_TX_LOCK(txq);
2647 wm_txeof(sc);
2648 WM_TX_UNLOCK(txq);
2649
2650 if (txq->txq_free != WM_NTXDESC(txq)) {
2651 #ifdef WM_DEBUG
2652 int i, j;
2653 struct wm_txsoft *txs;
2654 #endif
2655 log(LOG_ERR,
2656 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2657 device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
2658 txq->txq_next);
2659 ifp->if_oerrors++;
2660 #ifdef WM_DEBUG
2661 for (i = txq->txq_sdirty; i != txq->txq_snext ;
2662 i = WM_NEXTTXS(txq, i)) {
2663 txs = &txq->txq_soft[i];
2664 printf("txs %d tx %d -> %d\n",
2665 i, txs->txs_firstdesc, txs->txs_lastdesc);
2666 for (j = txs->txs_firstdesc; ;
2667 j = WM_NEXTTX(txq, j)) {
2668 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2669 txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
2670 printf("\t %#08x%08x\n",
2671 txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
2672 txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
2673 if (j == txs->txs_lastdesc)
2674 break;
2675 }
2676 }
2677 #endif
2678 /* Reset the interface. */
2679 (void) wm_init(ifp);
2680 }
2681
2682 /* Try to get more packets going. */
2683 ifp->if_start(ifp);
2684 }
2685
2686 /*
2687 * wm_tick:
2688 *
2689 * One second timer, used to check link status, sweep up
2690 * completed transmit jobs, etc.
2691 */
2692 static void
2693 wm_tick(void *arg)
2694 {
2695 struct wm_softc *sc = arg;
2696 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2697 #ifndef WM_MPSAFE
2698 int s;
2699
2700 s = splnet();
2701 #endif
2702
2703 WM_CORE_LOCK(sc);
2704
2705 if (sc->sc_stopping)
2706 goto out;
2707
2708 if (sc->sc_type >= WM_T_82542_2_1) {
2709 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2710 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2711 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2712 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2713 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2714 }
2715
2716 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2717 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2718 + CSR_READ(sc, WMREG_CRCERRS)
2719 + CSR_READ(sc, WMREG_ALGNERRC)
2720 + CSR_READ(sc, WMREG_SYMERRC)
2721 + CSR_READ(sc, WMREG_RXERRC)
2722 + CSR_READ(sc, WMREG_SEC)
2723 + CSR_READ(sc, WMREG_CEXTERR)
2724 + CSR_READ(sc, WMREG_RLEC);
2725 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2726
2727 if (sc->sc_flags & WM_F_HAS_MII)
2728 mii_tick(&sc->sc_mii);
2729 else if ((sc->sc_type >= WM_T_82575)
2730 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
2731 wm_serdes_tick(sc);
2732 else
2733 wm_tbi_tick(sc);
2734
2735 out:
2736 WM_CORE_UNLOCK(sc);
2737 #ifndef WM_MPSAFE
2738 splx(s);
2739 #endif
2740
2741 if (!sc->sc_stopping)
2742 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2743 }
2744
2745 static int
2746 wm_ifflags_cb(struct ethercom *ec)
2747 {
2748 struct ifnet *ifp = &ec->ec_if;
2749 struct wm_softc *sc = ifp->if_softc;
2750 int change = ifp->if_flags ^ sc->sc_if_flags;
2751 int rc = 0;
2752
2753 WM_CORE_LOCK(sc);
2754
2755 if (change != 0)
2756 sc->sc_if_flags = ifp->if_flags;
2757
2758 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
2759 rc = ENETRESET;
2760 goto out;
2761 }
2762
2763 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2764 wm_set_filter(sc);
2765
2766 wm_set_vlan(sc);
2767
2768 out:
2769 WM_CORE_UNLOCK(sc);
2770
2771 return rc;
2772 }
2773
2774 /*
2775 * wm_ioctl: [ifnet interface function]
2776 *
2777 * Handle control requests from the operator.
2778 */
2779 static int
2780 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2781 {
2782 struct wm_softc *sc = ifp->if_softc;
2783 struct ifreq *ifr = (struct ifreq *) data;
2784 struct ifaddr *ifa = (struct ifaddr *)data;
2785 struct sockaddr_dl *sdl;
2786 int s, error;
2787
2788 #ifndef WM_MPSAFE
2789 s = splnet();
2790 #endif
2791 switch (cmd) {
2792 case SIOCSIFMEDIA:
2793 case SIOCGIFMEDIA:
2794 WM_CORE_LOCK(sc);
2795 /* Flow control requires full-duplex mode. */
2796 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2797 (ifr->ifr_media & IFM_FDX) == 0)
2798 ifr->ifr_media &= ~IFM_ETH_FMASK;
2799 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2800 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2801 /* We can do both TXPAUSE and RXPAUSE. */
2802 ifr->ifr_media |=
2803 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2804 }
2805 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2806 }
2807 WM_CORE_UNLOCK(sc);
2808 #ifdef WM_MPSAFE
2809 s = splnet();
2810 #endif
2811 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2812 #ifdef WM_MPSAFE
2813 splx(s);
2814 #endif
2815 break;
2816 case SIOCINITIFADDR:
2817 WM_CORE_LOCK(sc);
2818 if (ifa->ifa_addr->sa_family == AF_LINK) {
2819 sdl = satosdl(ifp->if_dl->ifa_addr);
2820 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2821 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2822 /* unicast address is first multicast entry */
2823 wm_set_filter(sc);
2824 error = 0;
2825 WM_CORE_UNLOCK(sc);
2826 break;
2827 }
2828 WM_CORE_UNLOCK(sc);
2829 /*FALLTHROUGH*/
2830 default:
2831 #ifdef WM_MPSAFE
2832 s = splnet();
2833 #endif
2834 /* It may call wm_start, so unlock here */
2835 error = ether_ioctl(ifp, cmd, data);
2836 #ifdef WM_MPSAFE
2837 splx(s);
2838 #endif
2839 if (error != ENETRESET)
2840 break;
2841
2842 error = 0;
2843
2844 if (cmd == SIOCSIFCAP) {
2845 error = (*ifp->if_init)(ifp);
2846 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2847 ;
2848 else if (ifp->if_flags & IFF_RUNNING) {
2849 /*
2850 * Multicast list has changed; set the hardware filter
2851 * accordingly.
2852 */
2853 WM_CORE_LOCK(sc);
2854 wm_set_filter(sc);
2855 WM_CORE_UNLOCK(sc);
2856 }
2857 break;
2858 }
2859
2860 #ifndef WM_MPSAFE
2861 splx(s);
2862 #endif
2863 return error;
2864 }
2865
2866 /* MAC address related */
2867
2868 /*
2869 * Get the offset of MAC address and return it.
2870 * If error occured, use offset 0.
2871 */
2872 static uint16_t
2873 wm_check_alt_mac_addr(struct wm_softc *sc)
2874 {
2875 uint16_t myea[ETHER_ADDR_LEN / 2];
2876 uint16_t offset = NVM_OFF_MACADDR;
2877
2878 /* Try to read alternative MAC address pointer */
2879 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2880 return 0;
2881
2882 /* Check pointer if it's valid or not. */
2883 if ((offset == 0x0000) || (offset == 0xffff))
2884 return 0;
2885
2886 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
2887 /*
2888 * Check whether alternative MAC address is valid or not.
2889 * Some cards have non 0xffff pointer but those don't use
2890 * alternative MAC address in reality.
2891 *
2892 * Check whether the broadcast bit is set or not.
2893 */
2894 if (wm_nvm_read(sc, offset, 1, myea) == 0)
2895 if (((myea[0] & 0xff) & 0x01) == 0)
2896 return offset; /* Found */
2897
2898 /* Not found */
2899 return 0;
2900 }
2901
2902 static int
2903 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2904 {
2905 uint16_t myea[ETHER_ADDR_LEN / 2];
2906 uint16_t offset = NVM_OFF_MACADDR;
2907 int do_invert = 0;
2908
2909 switch (sc->sc_type) {
2910 case WM_T_82580:
2911 case WM_T_I350:
2912 case WM_T_I354:
2913 /* EEPROM Top Level Partitioning */
2914 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
2915 break;
2916 case WM_T_82571:
2917 case WM_T_82575:
2918 case WM_T_82576:
2919 case WM_T_80003:
2920 case WM_T_I210:
2921 case WM_T_I211:
2922 offset = wm_check_alt_mac_addr(sc);
2923 if (offset == 0)
2924 if ((sc->sc_funcid & 0x01) == 1)
2925 do_invert = 1;
2926 break;
2927 default:
2928 if ((sc->sc_funcid & 0x01) == 1)
2929 do_invert = 1;
2930 break;
2931 }
2932
2933 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
2934 myea) != 0)
2935 goto bad;
2936
2937 enaddr[0] = myea[0] & 0xff;
2938 enaddr[1] = myea[0] >> 8;
2939 enaddr[2] = myea[1] & 0xff;
2940 enaddr[3] = myea[1] >> 8;
2941 enaddr[4] = myea[2] & 0xff;
2942 enaddr[5] = myea[2] >> 8;
2943
2944 /*
2945 * Toggle the LSB of the MAC address on the second port
2946 * of some dual port cards.
2947 */
2948 if (do_invert != 0)
2949 enaddr[5] ^= 1;
2950
2951 return 0;
2952
2953 bad:
2954 return -1;
2955 }
2956
2957 /*
2958 * wm_set_ral:
2959 *
2960 * Set an entery in the receive address list.
2961 */
2962 static void
2963 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2964 {
2965 uint32_t ral_lo, ral_hi;
2966
2967 if (enaddr != NULL) {
2968 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2969 (enaddr[3] << 24);
2970 ral_hi = enaddr[4] | (enaddr[5] << 8);
2971 ral_hi |= RAL_AV;
2972 } else {
2973 ral_lo = 0;
2974 ral_hi = 0;
2975 }
2976
2977 if (sc->sc_type >= WM_T_82544) {
2978 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2979 ral_lo);
2980 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2981 ral_hi);
2982 } else {
2983 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2984 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2985 }
2986 }
2987
2988 /*
2989 * wm_mchash:
2990 *
2991 * Compute the hash of the multicast address for the 4096-bit
2992 * multicast filter.
2993 */
2994 static uint32_t
2995 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2996 {
2997 static const int lo_shift[4] = { 4, 3, 2, 0 };
2998 static const int hi_shift[4] = { 4, 5, 6, 8 };
2999 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3000 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3001 uint32_t hash;
3002
3003 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3004 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3005 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
3006 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3007 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3008 return (hash & 0x3ff);
3009 }
3010 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3011 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3012
3013 return (hash & 0xfff);
3014 }
3015
3016 /*
3017 * wm_set_filter:
3018 *
3019 * Set up the receive filter.
3020 */
3021 static void
3022 wm_set_filter(struct wm_softc *sc)
3023 {
3024 struct ethercom *ec = &sc->sc_ethercom;
3025 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3026 struct ether_multi *enm;
3027 struct ether_multistep step;
3028 bus_addr_t mta_reg;
3029 uint32_t hash, reg, bit;
3030 int i, size, max;
3031
3032 if (sc->sc_type >= WM_T_82544)
3033 mta_reg = WMREG_CORDOVA_MTA;
3034 else
3035 mta_reg = WMREG_MTA;
3036
3037 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3038
3039 if (ifp->if_flags & IFF_BROADCAST)
3040 sc->sc_rctl |= RCTL_BAM;
3041 if (ifp->if_flags & IFF_PROMISC) {
3042 sc->sc_rctl |= RCTL_UPE;
3043 goto allmulti;
3044 }
3045
3046 /*
3047 * Set the station address in the first RAL slot, and
3048 * clear the remaining slots.
3049 */
3050 if (sc->sc_type == WM_T_ICH8)
3051 size = WM_RAL_TABSIZE_ICH8 -1;
3052 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3053 || (sc->sc_type == WM_T_PCH))
3054 size = WM_RAL_TABSIZE_ICH8;
3055 else if (sc->sc_type == WM_T_PCH2)
3056 size = WM_RAL_TABSIZE_PCH2;
3057 else if (sc->sc_type == WM_T_PCH_LPT)
3058 size = WM_RAL_TABSIZE_PCH_LPT;
3059 else if (sc->sc_type == WM_T_82575)
3060 size = WM_RAL_TABSIZE_82575;
3061 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3062 size = WM_RAL_TABSIZE_82576;
3063 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3064 size = WM_RAL_TABSIZE_I350;
3065 else
3066 size = WM_RAL_TABSIZE;
3067 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3068
3069 if (sc->sc_type == WM_T_PCH_LPT) {
3070 i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
3071 switch (i) {
3072 case 0:
3073 /* We can use all entries */
3074 max = size;
3075 break;
3076 case 1:
3077 /* Only RAR[0] */
3078 max = 1;
3079 break;
3080 default:
3081 /* available SHRA + RAR[0] */
3082 max = i + 1;
3083 }
3084 } else
3085 max = size;
3086 for (i = 1; i < size; i++) {
3087 if (i < max)
3088 wm_set_ral(sc, NULL, i);
3089 }
3090
3091 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3092 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3093 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
3094 size = WM_ICH8_MC_TABSIZE;
3095 else
3096 size = WM_MC_TABSIZE;
3097 /* Clear out the multicast table. */
3098 for (i = 0; i < size; i++)
3099 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3100
3101 ETHER_FIRST_MULTI(step, ec, enm);
3102 while (enm != NULL) {
3103 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3104 /*
3105 * We must listen to a range of multicast addresses.
3106 * For now, just accept all multicasts, rather than
3107 * trying to set only those filter bits needed to match
3108 * the range. (At this time, the only use of address
3109 * ranges is for IP multicast routing, for which the
3110 * range is big enough to require all bits set.)
3111 */
3112 goto allmulti;
3113 }
3114
3115 hash = wm_mchash(sc, enm->enm_addrlo);
3116
3117 reg = (hash >> 5);
3118 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3119 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3120 || (sc->sc_type == WM_T_PCH2)
3121 || (sc->sc_type == WM_T_PCH_LPT))
3122 reg &= 0x1f;
3123 else
3124 reg &= 0x7f;
3125 bit = hash & 0x1f;
3126
3127 hash = CSR_READ(sc, mta_reg + (reg << 2));
3128 hash |= 1U << bit;
3129
3130 if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
3131 /*
3132 * 82544 Errata 9: Certain register cannot be written
3133 * with particular alignments in PCI-X bus operation
3134 * (FCAH, MTA and VFTA).
3135 */
3136 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3137 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3138 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3139 } else
3140 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3141
3142 ETHER_NEXT_MULTI(step, enm);
3143 }
3144
3145 ifp->if_flags &= ~IFF_ALLMULTI;
3146 goto setit;
3147
3148 allmulti:
3149 ifp->if_flags |= IFF_ALLMULTI;
3150 sc->sc_rctl |= RCTL_MPE;
3151
3152 setit:
3153 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3154 }
3155
3156 /* Reset and init related */
3157
3158 static void
3159 wm_set_vlan(struct wm_softc *sc)
3160 {
3161 /* Deal with VLAN enables. */
3162 if (VLAN_ATTACHED(&sc->sc_ethercom))
3163 sc->sc_ctrl |= CTRL_VME;
3164 else
3165 sc->sc_ctrl &= ~CTRL_VME;
3166
3167 /* Write the control registers. */
3168 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3169 }
3170
3171 static void
3172 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3173 {
3174 uint32_t gcr;
3175 pcireg_t ctrl2;
3176
3177 gcr = CSR_READ(sc, WMREG_GCR);
3178
3179 /* Only take action if timeout value is defaulted to 0 */
3180 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3181 goto out;
3182
3183 if ((gcr & GCR_CAP_VER2) == 0) {
3184 gcr |= GCR_CMPL_TMOUT_10MS;
3185 goto out;
3186 }
3187
3188 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3189 sc->sc_pcixe_capoff + PCIE_DCSR2);
3190 ctrl2 |= WM_PCIE_DCSR2_16MS;
3191 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3192 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3193
3194 out:
3195 /* Disable completion timeout resend */
3196 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3197
3198 CSR_WRITE(sc, WMREG_GCR, gcr);
3199 }
3200
3201 void
3202 wm_get_auto_rd_done(struct wm_softc *sc)
3203 {
3204 int i;
3205
3206 /* wait for eeprom to reload */
3207 switch (sc->sc_type) {
3208 case WM_T_82571:
3209 case WM_T_82572:
3210 case WM_T_82573:
3211 case WM_T_82574:
3212 case WM_T_82583:
3213 case WM_T_82575:
3214 case WM_T_82576:
3215 case WM_T_82580:
3216 case WM_T_I350:
3217 case WM_T_I354:
3218 case WM_T_I210:
3219 case WM_T_I211:
3220 case WM_T_80003:
3221 case WM_T_ICH8:
3222 case WM_T_ICH9:
3223 for (i = 0; i < 10; i++) {
3224 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3225 break;
3226 delay(1000);
3227 }
3228 if (i == 10) {
3229 log(LOG_ERR, "%s: auto read from eeprom failed to "
3230 "complete\n", device_xname(sc->sc_dev));
3231 }
3232 break;
3233 default:
3234 break;
3235 }
3236 }
3237
3238 void
3239 wm_lan_init_done(struct wm_softc *sc)
3240 {
3241 uint32_t reg = 0;
3242 int i;
3243
3244 /* wait for eeprom to reload */
3245 switch (sc->sc_type) {
3246 case WM_T_ICH10:
3247 case WM_T_PCH:
3248 case WM_T_PCH2:
3249 case WM_T_PCH_LPT:
3250 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3251 reg = CSR_READ(sc, WMREG_STATUS);
3252 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3253 break;
3254 delay(100);
3255 }
3256 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3257 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3258 "complete\n", device_xname(sc->sc_dev), __func__);
3259 }
3260 break;
3261 default:
3262 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3263 __func__);
3264 break;
3265 }
3266
3267 reg &= ~STATUS_LAN_INIT_DONE;
3268 CSR_WRITE(sc, WMREG_STATUS, reg);
3269 }
3270
3271 void
3272 wm_get_cfg_done(struct wm_softc *sc)
3273 {
3274 int mask;
3275 uint32_t reg;
3276 int i;
3277
3278 /* wait for eeprom to reload */
3279 switch (sc->sc_type) {
3280 case WM_T_82542_2_0:
3281 case WM_T_82542_2_1:
3282 /* null */
3283 break;
3284 case WM_T_82543:
3285 case WM_T_82544:
3286 case WM_T_82540:
3287 case WM_T_82545:
3288 case WM_T_82545_3:
3289 case WM_T_82546:
3290 case WM_T_82546_3:
3291 case WM_T_82541:
3292 case WM_T_82541_2:
3293 case WM_T_82547:
3294 case WM_T_82547_2:
3295 case WM_T_82573:
3296 case WM_T_82574:
3297 case WM_T_82583:
3298 /* generic */
3299 delay(10*1000);
3300 break;
3301 case WM_T_80003:
3302 case WM_T_82571:
3303 case WM_T_82572:
3304 case WM_T_82575:
3305 case WM_T_82576:
3306 case WM_T_82580:
3307 case WM_T_I350:
3308 case WM_T_I354:
3309 case WM_T_I210:
3310 case WM_T_I211:
3311 if (sc->sc_type == WM_T_82571) {
3312 /* Only 82571 shares port 0 */
3313 mask = EEMNGCTL_CFGDONE_0;
3314 } else
3315 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3316 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3317 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3318 break;
3319 delay(1000);
3320 }
3321 if (i >= WM_PHY_CFG_TIMEOUT) {
3322 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3323 device_xname(sc->sc_dev), __func__));
3324 }
3325 break;
3326 case WM_T_ICH8:
3327 case WM_T_ICH9:
3328 case WM_T_ICH10:
3329 case WM_T_PCH:
3330 case WM_T_PCH2:
3331 case WM_T_PCH_LPT:
3332 delay(10*1000);
3333 if (sc->sc_type >= WM_T_ICH10)
3334 wm_lan_init_done(sc);
3335 else
3336 wm_get_auto_rd_done(sc);
3337
3338 reg = CSR_READ(sc, WMREG_STATUS);
3339 if ((reg & STATUS_PHYRA) != 0)
3340 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3341 break;
3342 default:
3343 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3344 __func__);
3345 break;
3346 }
3347 }
3348
3349 /* Init hardware bits */
3350 void
3351 wm_initialize_hardware_bits(struct wm_softc *sc)
3352 {
3353 uint32_t tarc0, tarc1, reg;
3354
3355 /* For 82571 variant, 80003 and ICHs */
3356 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3357 || (sc->sc_type >= WM_T_80003)) {
3358
3359 /* Transmit Descriptor Control 0 */
3360 reg = CSR_READ(sc, WMREG_TXDCTL(0));
3361 reg |= TXDCTL_COUNT_DESC;
3362 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3363
3364 /* Transmit Descriptor Control 1 */
3365 reg = CSR_READ(sc, WMREG_TXDCTL(1));
3366 reg |= TXDCTL_COUNT_DESC;
3367 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3368
3369 /* TARC0 */
3370 tarc0 = CSR_READ(sc, WMREG_TARC0);
3371 switch (sc->sc_type) {
3372 case WM_T_82571:
3373 case WM_T_82572:
3374 case WM_T_82573:
3375 case WM_T_82574:
3376 case WM_T_82583:
3377 case WM_T_80003:
3378 /* Clear bits 30..27 */
3379 tarc0 &= ~__BITS(30, 27);
3380 break;
3381 default:
3382 break;
3383 }
3384
3385 switch (sc->sc_type) {
3386 case WM_T_82571:
3387 case WM_T_82572:
3388 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3389
3390 tarc1 = CSR_READ(sc, WMREG_TARC1);
3391 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3392 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3393 /* 8257[12] Errata No.7 */
3394 tarc1 |= __BIT(22); /* TARC1 bits 22 */
3395
3396 /* TARC1 bit 28 */
3397 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3398 tarc1 &= ~__BIT(28);
3399 else
3400 tarc1 |= __BIT(28);
3401 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3402
3403 /*
3404 * 8257[12] Errata No.13
3405 * Disable Dyamic Clock Gating.
3406 */
3407 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3408 reg &= ~CTRL_EXT_DMA_DYN_CLK;
3409 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3410 break;
3411 case WM_T_82573:
3412 case WM_T_82574:
3413 case WM_T_82583:
3414 if ((sc->sc_type == WM_T_82574)
3415 || (sc->sc_type == WM_T_82583))
3416 tarc0 |= __BIT(26); /* TARC0 bit 26 */
3417
3418 /* Extended Device Control */
3419 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3420 reg &= ~__BIT(23); /* Clear bit 23 */
3421 reg |= __BIT(22); /* Set bit 22 */
3422 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3423
3424 /* Device Control */
3425 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
3426 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3427
3428 /* PCIe Control Register */
3429 /*
3430 * 82573 Errata (unknown).
3431 *
3432 * 82574 Errata 25 and 82583 Errata 12
3433 * "Dropped Rx Packets":
3434 * NVM Image Version 2.1.4 and newer has no this bug.
3435 */
3436 reg = CSR_READ(sc, WMREG_GCR);
3437 reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
3438 CSR_WRITE(sc, WMREG_GCR, reg);
3439
3440 if ((sc->sc_type == WM_T_82574)
3441 || (sc->sc_type == WM_T_82583)) {
3442 /*
3443 * Document says this bit must be set for
3444 * proper operation.
3445 */
3446 reg = CSR_READ(sc, WMREG_GCR);
3447 reg |= __BIT(22);
3448 CSR_WRITE(sc, WMREG_GCR, reg);
3449
3450 /*
3451 * Apply workaround for hardware errata
3452 * documented in errata docs Fixes issue where
3453 * some error prone or unreliable PCIe
3454 * completions are occurring, particularly
3455 * with ASPM enabled. Without fix, issue can
3456 * cause Tx timeouts.
3457 */
3458 reg = CSR_READ(sc, WMREG_GCR2);
3459 reg |= __BIT(0);
3460 CSR_WRITE(sc, WMREG_GCR2, reg);
3461 }
3462 break;
3463 case WM_T_80003:
3464 /* TARC0 */
3465 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3466 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3467 tarc0 &= ~__BIT(20); /* Clear bits 20 */
3468
3469 /* TARC1 bit 28 */
3470 tarc1 = CSR_READ(sc, WMREG_TARC1);
3471 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3472 tarc1 &= ~__BIT(28);
3473 else
3474 tarc1 |= __BIT(28);
3475 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3476 break;
3477 case WM_T_ICH8:
3478 case WM_T_ICH9:
3479 case WM_T_ICH10:
3480 case WM_T_PCH:
3481 case WM_T_PCH2:
3482 case WM_T_PCH_LPT:
3483 /* TARC 0 */
3484 if (sc->sc_type == WM_T_ICH8) {
3485 /* Set TARC0 bits 29 and 28 */
3486 tarc0 |= __BITS(29, 28);
3487 }
3488 /* Set TARC0 bits 23,24,26,27 */
3489 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3490
3491 /* CTRL_EXT */
3492 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3493 reg |= __BIT(22); /* Set bit 22 */
3494 /*
3495 * Enable PHY low-power state when MAC is at D3
3496 * w/o WoL
3497 */
3498 if (sc->sc_type >= WM_T_PCH)
3499 reg |= CTRL_EXT_PHYPDEN;
3500 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3501
3502 /* TARC1 */
3503 tarc1 = CSR_READ(sc, WMREG_TARC1);
3504 /* bit 28 */
3505 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3506 tarc1 &= ~__BIT(28);
3507 else
3508 tarc1 |= __BIT(28);
3509 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3510 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3511
3512 /* Device Status */
3513 if (sc->sc_type == WM_T_ICH8) {
3514 reg = CSR_READ(sc, WMREG_STATUS);
3515 reg &= ~__BIT(31);
3516 CSR_WRITE(sc, WMREG_STATUS, reg);
3517
3518 }
3519
3520 /*
3521 * Work-around descriptor data corruption issue during
3522 * NFS v2 UDP traffic, just disable the NFS filtering
3523 * capability.
3524 */
3525 reg = CSR_READ(sc, WMREG_RFCTL);
3526 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3527 CSR_WRITE(sc, WMREG_RFCTL, reg);
3528 break;
3529 default:
3530 break;
3531 }
3532 CSR_WRITE(sc, WMREG_TARC0, tarc0);
3533
3534 /*
3535 * 8257[12] Errata No.52 and some others.
3536 * Avoid RSS Hash Value bug.
3537 */
3538 switch (sc->sc_type) {
3539 case WM_T_82571:
3540 case WM_T_82572:
3541 case WM_T_82573:
3542 case WM_T_80003:
3543 case WM_T_ICH8:
3544 reg = CSR_READ(sc, WMREG_RFCTL);
3545 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3546 CSR_WRITE(sc, WMREG_RFCTL, reg);
3547 break;
3548 default:
3549 break;
3550 }
3551 }
3552 }
3553
3554 static uint32_t
3555 wm_rxpbs_adjust_82580(uint32_t val)
3556 {
3557 uint32_t rv = 0;
3558
3559 if (val < __arraycount(wm_82580_rxpbs_table))
3560 rv = wm_82580_rxpbs_table[val];
3561
3562 return rv;
3563 }
3564
3565 /*
3566 * wm_reset:
3567 *
3568 * Reset the i82542 chip.
3569 */
3570 static void
3571 wm_reset(struct wm_softc *sc)
3572 {
3573 int phy_reset = 0;
3574 int i, error = 0;
3575 uint32_t reg, mask;
3576
3577 /*
3578 * Allocate on-chip memory according to the MTU size.
3579 * The Packet Buffer Allocation register must be written
3580 * before the chip is reset.
3581 */
3582 switch (sc->sc_type) {
3583 case WM_T_82547:
3584 case WM_T_82547_2:
3585 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3586 PBA_22K : PBA_30K;
3587 for (i = 0; i < sc->sc_ntxqueues; i++) {
3588 struct wm_txqueue *txq = &sc->sc_txq[i];
3589 txq->txq_fifo_head = 0;
3590 txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3591 txq->txq_fifo_size =
3592 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3593 txq->txq_fifo_stall = 0;
3594 }
3595 break;
3596 case WM_T_82571:
3597 case WM_T_82572:
3598 case WM_T_82575: /* XXX need special handing for jumbo frames */
3599 case WM_T_80003:
3600 sc->sc_pba = PBA_32K;
3601 break;
3602 case WM_T_82573:
3603 sc->sc_pba = PBA_12K;
3604 break;
3605 case WM_T_82574:
3606 case WM_T_82583:
3607 sc->sc_pba = PBA_20K;
3608 break;
3609 case WM_T_82576:
3610 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
3611 sc->sc_pba &= RXPBS_SIZE_MASK_82576;
3612 break;
3613 case WM_T_82580:
3614 case WM_T_I350:
3615 case WM_T_I354:
3616 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
3617 break;
3618 case WM_T_I210:
3619 case WM_T_I211:
3620 sc->sc_pba = PBA_34K;
3621 break;
3622 case WM_T_ICH8:
3623 /* Workaround for a bit corruption issue in FIFO memory */
3624 sc->sc_pba = PBA_8K;
3625 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3626 break;
3627 case WM_T_ICH9:
3628 case WM_T_ICH10:
3629 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3630 PBA_14K : PBA_10K;
3631 break;
3632 case WM_T_PCH:
3633 case WM_T_PCH2:
3634 case WM_T_PCH_LPT:
3635 sc->sc_pba = PBA_26K;
3636 break;
3637 default:
3638 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3639 PBA_40K : PBA_48K;
3640 break;
3641 }
3642 /*
3643 * Only old or non-multiqueue devices have the PBA register
3644 * XXX Need special handling for 82575.
3645 */
3646 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3647 || (sc->sc_type == WM_T_82575))
3648 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3649
3650 /* Prevent the PCI-E bus from sticking */
3651 if (sc->sc_flags & WM_F_PCIE) {
3652 int timeout = 800;
3653
3654 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3655 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3656
3657 while (timeout--) {
3658 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3659 == 0)
3660 break;
3661 delay(100);
3662 }
3663 }
3664
3665 /* Set the completion timeout for interface */
3666 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3667 || (sc->sc_type == WM_T_82580)
3668 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3669 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3670 wm_set_pcie_completion_timeout(sc);
3671
3672 /* Clear interrupt */
3673 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3674 if (sc->sc_nintrs > 1) {
3675 if (sc->sc_type != WM_T_82574) {
3676 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3677 CSR_WRITE(sc, WMREG_EIAC, 0);
3678 } else {
3679 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3680 }
3681 }
3682
3683 /* Stop the transmit and receive processes. */
3684 CSR_WRITE(sc, WMREG_RCTL, 0);
3685 sc->sc_rctl &= ~RCTL_EN;
3686 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3687 CSR_WRITE_FLUSH(sc);
3688
3689 /* XXX set_tbi_sbp_82543() */
3690
3691 delay(10*1000);
3692
3693 /* Must acquire the MDIO ownership before MAC reset */
3694 switch (sc->sc_type) {
3695 case WM_T_82573:
3696 case WM_T_82574:
3697 case WM_T_82583:
3698 error = wm_get_hw_semaphore_82573(sc);
3699 break;
3700 default:
3701 break;
3702 }
3703
3704 /*
3705 * 82541 Errata 29? & 82547 Errata 28?
3706 * See also the description about PHY_RST bit in CTRL register
3707 * in 8254x_GBe_SDM.pdf.
3708 */
3709 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3710 CSR_WRITE(sc, WMREG_CTRL,
3711 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3712 CSR_WRITE_FLUSH(sc);
3713 delay(5000);
3714 }
3715
3716 switch (sc->sc_type) {
3717 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3718 case WM_T_82541:
3719 case WM_T_82541_2:
3720 case WM_T_82547:
3721 case WM_T_82547_2:
3722 /*
3723 * On some chipsets, a reset through a memory-mapped write
3724 * cycle can cause the chip to reset before completing the
3725 * write cycle. This causes major headache that can be
3726 * avoided by issuing the reset via indirect register writes
3727 * through I/O space.
3728 *
3729 * So, if we successfully mapped the I/O BAR at attach time,
3730 * use that. Otherwise, try our luck with a memory-mapped
3731 * reset.
3732 */
3733 if (sc->sc_flags & WM_F_IOH_VALID)
3734 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3735 else
3736 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3737 break;
3738 case WM_T_82545_3:
3739 case WM_T_82546_3:
3740 /* Use the shadow control register on these chips. */
3741 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3742 break;
3743 case WM_T_80003:
3744 mask = swfwphysem[sc->sc_funcid];
3745 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3746 wm_get_swfw_semaphore(sc, mask);
3747 CSR_WRITE(sc, WMREG_CTRL, reg);
3748 wm_put_swfw_semaphore(sc, mask);
3749 break;
3750 case WM_T_ICH8:
3751 case WM_T_ICH9:
3752 case WM_T_ICH10:
3753 case WM_T_PCH:
3754 case WM_T_PCH2:
3755 case WM_T_PCH_LPT:
3756 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3757 if (wm_phy_resetisblocked(sc) == false) {
3758 /*
3759 * Gate automatic PHY configuration by hardware on
3760 * non-managed 82579
3761 */
3762 if ((sc->sc_type == WM_T_PCH2)
3763 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3764 == 0))
3765 wm_gate_hw_phy_config_ich8lan(sc, 1);
3766
3767 reg |= CTRL_PHY_RESET;
3768 phy_reset = 1;
3769 }
3770 wm_get_swfwhw_semaphore(sc);
3771 CSR_WRITE(sc, WMREG_CTRL, reg);
3772 /* Don't insert a completion barrier when reset */
3773 delay(20*1000);
3774 wm_put_swfwhw_semaphore(sc);
3775 break;
3776 case WM_T_82580:
3777 case WM_T_I350:
3778 case WM_T_I354:
3779 case WM_T_I210:
3780 case WM_T_I211:
3781 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3782 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3783 CSR_WRITE_FLUSH(sc);
3784 delay(5000);
3785 break;
3786 case WM_T_82542_2_0:
3787 case WM_T_82542_2_1:
3788 case WM_T_82543:
3789 case WM_T_82540:
3790 case WM_T_82545:
3791 case WM_T_82546:
3792 case WM_T_82571:
3793 case WM_T_82572:
3794 case WM_T_82573:
3795 case WM_T_82574:
3796 case WM_T_82575:
3797 case WM_T_82576:
3798 case WM_T_82583:
3799 default:
3800 /* Everything else can safely use the documented method. */
3801 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3802 break;
3803 }
3804
3805 /* Must release the MDIO ownership after MAC reset */
3806 switch (sc->sc_type) {
3807 case WM_T_82573:
3808 case WM_T_82574:
3809 case WM_T_82583:
3810 if (error == 0)
3811 wm_put_hw_semaphore_82573(sc);
3812 break;
3813 default:
3814 break;
3815 }
3816
3817 if (phy_reset != 0)
3818 wm_get_cfg_done(sc);
3819
3820 /* reload EEPROM */
3821 switch (sc->sc_type) {
3822 case WM_T_82542_2_0:
3823 case WM_T_82542_2_1:
3824 case WM_T_82543:
3825 case WM_T_82544:
3826 delay(10);
3827 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3828 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3829 CSR_WRITE_FLUSH(sc);
3830 delay(2000);
3831 break;
3832 case WM_T_82540:
3833 case WM_T_82545:
3834 case WM_T_82545_3:
3835 case WM_T_82546:
3836 case WM_T_82546_3:
3837 delay(5*1000);
3838 /* XXX Disable HW ARPs on ASF enabled adapters */
3839 break;
3840 case WM_T_82541:
3841 case WM_T_82541_2:
3842 case WM_T_82547:
3843 case WM_T_82547_2:
3844 delay(20000);
3845 /* XXX Disable HW ARPs on ASF enabled adapters */
3846 break;
3847 case WM_T_82571:
3848 case WM_T_82572:
3849 case WM_T_82573:
3850 case WM_T_82574:
3851 case WM_T_82583:
3852 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3853 delay(10);
3854 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3855 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3856 CSR_WRITE_FLUSH(sc);
3857 }
3858 /* check EECD_EE_AUTORD */
3859 wm_get_auto_rd_done(sc);
3860 /*
3861 * Phy configuration from NVM just starts after EECD_AUTO_RD
3862 * is set.
3863 */
3864 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3865 || (sc->sc_type == WM_T_82583))
3866 delay(25*1000);
3867 break;
3868 case WM_T_82575:
3869 case WM_T_82576:
3870 case WM_T_82580:
3871 case WM_T_I350:
3872 case WM_T_I354:
3873 case WM_T_I210:
3874 case WM_T_I211:
3875 case WM_T_80003:
3876 /* check EECD_EE_AUTORD */
3877 wm_get_auto_rd_done(sc);
3878 break;
3879 case WM_T_ICH8:
3880 case WM_T_ICH9:
3881 case WM_T_ICH10:
3882 case WM_T_PCH:
3883 case WM_T_PCH2:
3884 case WM_T_PCH_LPT:
3885 break;
3886 default:
3887 panic("%s: unknown type\n", __func__);
3888 }
3889
3890 /* Check whether EEPROM is present or not */
3891 switch (sc->sc_type) {
3892 case WM_T_82575:
3893 case WM_T_82576:
3894 case WM_T_82580:
3895 case WM_T_I350:
3896 case WM_T_I354:
3897 case WM_T_ICH8:
3898 case WM_T_ICH9:
3899 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3900 /* Not found */
3901 sc->sc_flags |= WM_F_EEPROM_INVALID;
3902 if (sc->sc_type == WM_T_82575)
3903 wm_reset_init_script_82575(sc);
3904 }
3905 break;
3906 default:
3907 break;
3908 }
3909
3910 if ((sc->sc_type == WM_T_82580)
3911 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
3912 /* clear global device reset status bit */
3913 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3914 }
3915
3916 /* Clear any pending interrupt events. */
3917 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3918 reg = CSR_READ(sc, WMREG_ICR);
3919 if (sc->sc_nintrs > 1) {
3920 if (sc->sc_type != WM_T_82574) {
3921 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3922 CSR_WRITE(sc, WMREG_EIAC, 0);
3923 } else
3924 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3925 }
3926
3927 /* reload sc_ctrl */
3928 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3929
3930 if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
3931 wm_set_eee_i350(sc);
3932
3933 /* dummy read from WUC */
3934 if (sc->sc_type == WM_T_PCH)
3935 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3936 /*
3937 * For PCH, this write will make sure that any noise will be detected
3938 * as a CRC error and be dropped rather than show up as a bad packet
3939 * to the DMA engine
3940 */
3941 if (sc->sc_type == WM_T_PCH)
3942 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3943
3944 if (sc->sc_type >= WM_T_82544)
3945 CSR_WRITE(sc, WMREG_WUC, 0);
3946
3947 wm_reset_mdicnfg_82580(sc);
3948
3949 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
3950 wm_pll_workaround_i210(sc);
3951 }
3952
3953 /*
3954 * wm_add_rxbuf:
3955 *
3956 * Add a receive buffer to the indiciated descriptor.
3957 */
3958 static int
3959 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
3960 {
3961 struct wm_softc *sc = rxq->rxq_sc;
3962 struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
3963 struct mbuf *m;
3964 int error;
3965
3966 KASSERT(WM_RX_LOCKED(rxq));
3967
3968 MGETHDR(m, M_DONTWAIT, MT_DATA);
3969 if (m == NULL)
3970 return ENOBUFS;
3971
3972 MCLGET(m, M_DONTWAIT);
3973 if ((m->m_flags & M_EXT) == 0) {
3974 m_freem(m);
3975 return ENOBUFS;
3976 }
3977
3978 if (rxs->rxs_mbuf != NULL)
3979 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3980
3981 rxs->rxs_mbuf = m;
3982
3983 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3984 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3985 BUS_DMA_READ|BUS_DMA_NOWAIT);
3986 if (error) {
3987 /* XXX XXX XXX */
3988 aprint_error_dev(sc->sc_dev,
3989 "unable to load rx DMA map %d, error = %d\n",
3990 idx, error);
3991 panic("wm_add_rxbuf");
3992 }
3993
3994 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3995 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3996
3997 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3998 if ((sc->sc_rctl & RCTL_EN) != 0)
3999 wm_init_rxdesc(rxq, idx);
4000 } else
4001 wm_init_rxdesc(rxq, idx);
4002
4003 return 0;
4004 }
4005
4006 /*
4007 * wm_rxdrain:
4008 *
4009 * Drain the receive queue.
4010 */
4011 static void
4012 wm_rxdrain(struct wm_rxqueue *rxq)
4013 {
4014 struct wm_softc *sc = rxq->rxq_sc;
4015 struct wm_rxsoft *rxs;
4016 int i;
4017
4018 KASSERT(WM_RX_LOCKED(rxq));
4019
4020 for (i = 0; i < WM_NRXDESC; i++) {
4021 rxs = &rxq->rxq_soft[i];
4022 if (rxs->rxs_mbuf != NULL) {
4023 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4024 m_freem(rxs->rxs_mbuf);
4025 rxs->rxs_mbuf = NULL;
4026 }
4027 }
4028 }
4029
4030
4031 /*
4032 * XXX copy from FreeBSD's sys/net/rss_config.c
4033 */
4034 /*
4035 * RSS secret key, intended to prevent attacks on load-balancing. Its
4036 * effectiveness may be limited by algorithm choice and available entropy
4037 * during the boot.
4038 *
4039 * XXXRW: And that we don't randomize it yet!
4040 *
4041 * This is the default Microsoft RSS specification key which is also
4042 * the Chelsio T5 firmware default key.
4043 */
4044 #define RSS_KEYSIZE 40
4045 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
4046 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
4047 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
4048 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
4049 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
4050 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
4051 };
4052
4053 /*
4054 * Caller must pass an array of size sizeof(rss_key).
4055 *
4056 * XXX
4057 * As if_ixgbe may use this function, this function should not be
4058 * if_wm specific function.
4059 */
4060 static void
4061 wm_rss_getkey(uint8_t *key)
4062 {
4063
4064 memcpy(key, wm_rss_key, sizeof(wm_rss_key));
4065 }
4066
4067 /*
4068 * Setup registers for RSS.
4069 *
4070 * XXX not yet VMDq support
4071 */
4072 static void
4073 wm_init_rss(struct wm_softc *sc)
4074 {
4075 uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
4076 int i;
4077
4078 CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
4079
4080 for (i = 0; i < RETA_NUM_ENTRIES; i++) {
4081 int qid, reta_ent;
4082
4083 qid = i % sc->sc_nrxqueues;
4084 switch(sc->sc_type) {
4085 case WM_T_82574:
4086 reta_ent = __SHIFTIN(qid,
4087 RETA_ENT_QINDEX_MASK_82574);
4088 break;
4089 case WM_T_82575:
4090 reta_ent = __SHIFTIN(qid,
4091 RETA_ENT_QINDEX1_MASK_82575);
4092 break;
4093 default:
4094 reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
4095 break;
4096 }
4097
4098 reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
4099 reta_reg &= ~RETA_ENTRY_MASK_Q(i);
4100 reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
4101 CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
4102 }
4103
4104 wm_rss_getkey((uint8_t *)rss_key);
4105 for (i = 0; i < RSSRK_NUM_REGS; i++)
4106 CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
4107
4108 if (sc->sc_type == WM_T_82574)
4109 mrqc = MRQC_ENABLE_RSS_MQ_82574;
4110 else
4111 mrqc = MRQC_ENABLE_RSS_MQ;
4112
4113 /* XXXX
4114 * The same as FreeBSD igb.
4115 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
4116 */
4117 mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
4118 mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
4119 mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
4120 mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
4121
4122 CSR_WRITE(sc, WMREG_MRQC, mrqc);
4123 }
4124
4125 /*
4126 * Adjust TX and RX queue numbers which the system actulally uses.
4127 *
4128 * The numbers are affected by below parameters.
4129 * - The nubmer of hardware queues
4130 * - The number of MSI-X vectors (= "nvectors" argument)
4131 * - ncpu
4132 */
4133 static void
4134 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
4135 {
4136 int hw_ntxqueues, hw_nrxqueues;
4137
4138 if (nvectors < 3) {
4139 sc->sc_ntxqueues = 1;
4140 sc->sc_nrxqueues = 1;
4141 return;
4142 }
4143
4144 switch(sc->sc_type) {
4145 case WM_T_82572:
4146 hw_ntxqueues = 2;
4147 hw_nrxqueues = 2;
4148 break;
4149 case WM_T_82574:
4150 hw_ntxqueues = 2;
4151 hw_nrxqueues = 2;
4152 break;
4153 case WM_T_82575:
4154 hw_ntxqueues = 4;
4155 hw_nrxqueues = 4;
4156 break;
4157 case WM_T_82576:
4158 hw_ntxqueues = 16;
4159 hw_nrxqueues = 16;
4160 break;
4161 case WM_T_82580:
4162 case WM_T_I350:
4163 case WM_T_I354:
4164 hw_ntxqueues = 8;
4165 hw_nrxqueues = 8;
4166 break;
4167 case WM_T_I210:
4168 hw_ntxqueues = 4;
4169 hw_nrxqueues = 4;
4170 break;
4171 case WM_T_I211:
4172 hw_ntxqueues = 2;
4173 hw_nrxqueues = 2;
4174 break;
4175 /*
4176 * As below ethernet controllers does not support MSI-X,
4177 * this driver let them not use multiqueue.
4178 * - WM_T_80003
4179 * - WM_T_ICH8
4180 * - WM_T_ICH9
4181 * - WM_T_ICH10
4182 * - WM_T_PCH
4183 * - WM_T_PCH2
4184 * - WM_T_PCH_LPT
4185 */
4186 default:
4187 hw_ntxqueues = 1;
4188 hw_nrxqueues = 1;
4189 break;
4190 }
4191
4192 /*
4193 * As queues more then MSI-X vectors cannot improve scaling, we limit
4194 * the number of queues used actually.
4195 *
4196 * XXX
4197 * Currently, we separate TX queue interrupts and RX queue interrupts.
4198 * Howerver, the number of MSI-X vectors of recent controllers (such as
4199 * I354) expects that drivers bundle a TX queue interrupt and a RX
4200 * interrupt to one interrupt. e.g. FreeBSD's igb deals interrupts in
4201 * such a way.
4202 */
4203 if (nvectors < hw_ntxqueues + hw_nrxqueues + 1) {
4204 sc->sc_ntxqueues = (nvectors - 1) / 2;
4205 sc->sc_nrxqueues = (nvectors - 1) / 2;
4206 } else {
4207 sc->sc_ntxqueues = hw_ntxqueues;
4208 sc->sc_nrxqueues = hw_nrxqueues;
4209 }
4210
4211 /*
4212 * As queues more then cpus cannot improve scaling, we limit
4213 * the number of queues used actually.
4214 */
4215 if (ncpu < sc->sc_ntxqueues)
4216 sc->sc_ntxqueues = ncpu;
4217 if (ncpu < sc->sc_nrxqueues)
4218 sc->sc_nrxqueues = ncpu;
4219
4220 /* XXX Currently, this driver supports RX multiqueue only. */
4221 sc->sc_ntxqueues = 1;
4222 }
4223
4224 /*
4225 * Both single interrupt MSI and INTx can use this function.
4226 */
4227 static int
4228 wm_setup_legacy(struct wm_softc *sc)
4229 {
4230 pci_chipset_tag_t pc = sc->sc_pc;
4231 const char *intrstr = NULL;
4232 char intrbuf[PCI_INTRSTR_LEN];
4233 int error;
4234
4235 error = wm_alloc_txrx_queues(sc);
4236 if (error) {
4237 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4238 error);
4239 return ENOMEM;
4240 }
4241 intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
4242 sizeof(intrbuf));
4243 #ifdef WM_MPSAFE
4244 pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
4245 #endif
4246 sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
4247 IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
4248 if (sc->sc_ihs[0] == NULL) {
4249 aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
4250 (pci_intr_type(sc->sc_intrs[0])
4251 == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
4252 return ENOMEM;
4253 }
4254
4255 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
4256 sc->sc_nintrs = 1;
4257 return 0;
4258 }
4259
4260 static int
4261 wm_setup_msix(struct wm_softc *sc)
4262 {
4263 void *vih;
4264 kcpuset_t *affinity;
4265 int qidx, error, intr_idx, tx_established, rx_established;
4266 pci_chipset_tag_t pc = sc->sc_pc;
4267 const char *intrstr = NULL;
4268 char intrbuf[PCI_INTRSTR_LEN];
4269 char intr_xname[INTRDEVNAMEBUF];
4270 /*
4271 * To avoid other devices' interrupts, the affinity of Tx/Rx interrupts
4272 * start from CPU#1.
4273 */
4274 int affinity_offset = 1;
4275
4276 error = wm_alloc_txrx_queues(sc);
4277 if (error) {
4278 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4279 error);
4280 return ENOMEM;
4281 }
4282
4283 kcpuset_create(&affinity, false);
4284 intr_idx = 0;
4285
4286 /*
4287 * TX
4288 */
4289 tx_established = 0;
4290 for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
4291 struct wm_txqueue *txq = &sc->sc_txq[qidx];
4292 int affinity_to = (affinity_offset + intr_idx) % ncpu;
4293
4294 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4295 sizeof(intrbuf));
4296 #ifdef WM_MPSAFE
4297 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
4298 PCI_INTR_MPSAFE, true);
4299 #endif
4300 memset(intr_xname, 0, sizeof(intr_xname));
4301 snprintf(intr_xname, sizeof(intr_xname), "%sTX%d",
4302 device_xname(sc->sc_dev), qidx);
4303 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4304 IPL_NET, wm_txintr_msix, txq, intr_xname);
4305 if (vih == NULL) {
4306 aprint_error_dev(sc->sc_dev,
4307 "unable to establish MSI-X(for TX)%s%s\n",
4308 intrstr ? " at " : "",
4309 intrstr ? intrstr : "");
4310
4311 goto fail_0;
4312 }
4313 kcpuset_zero(affinity);
4314 /* Round-robin affinity */
4315 kcpuset_set(affinity, affinity_to);
4316 error = interrupt_distribute(vih, affinity, NULL);
4317 if (error == 0) {
4318 aprint_normal_dev(sc->sc_dev,
4319 "for TX interrupting at %s affinity to %u\n",
4320 intrstr, affinity_to);
4321 } else {
4322 aprint_normal_dev(sc->sc_dev,
4323 "for TX interrupting at %s\n", intrstr);
4324 }
4325 sc->sc_ihs[intr_idx] = vih;
4326 txq->txq_id = qidx;
4327 txq->txq_intr_idx = intr_idx;
4328
4329 tx_established++;
4330 intr_idx++;
4331 }
4332
4333 /*
4334 * RX
4335 */
4336 rx_established = 0;
4337 for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
4338 struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
4339 int affinity_to = (affinity_offset + intr_idx) % ncpu;
4340
4341 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4342 sizeof(intrbuf));
4343 #ifdef WM_MPSAFE
4344 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
4345 PCI_INTR_MPSAFE, true);
4346 #endif
4347 memset(intr_xname, 0, sizeof(intr_xname));
4348 snprintf(intr_xname, sizeof(intr_xname), "%sRX%d",
4349 device_xname(sc->sc_dev), qidx);
4350 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4351 IPL_NET, wm_rxintr_msix, rxq, intr_xname);
4352 if (vih == NULL) {
4353 aprint_error_dev(sc->sc_dev,
4354 "unable to establish MSI-X(for RX)%s%s\n",
4355 intrstr ? " at " : "",
4356 intrstr ? intrstr : "");
4357
4358 goto fail_1;
4359 }
4360 kcpuset_zero(affinity);
4361 /* Round-robin affinity */
4362 kcpuset_set(affinity, affinity_to);
4363 error = interrupt_distribute(vih, affinity, NULL);
4364 if (error == 0) {
4365 aprint_normal_dev(sc->sc_dev,
4366 "for RX interrupting at %s affinity to %u\n",
4367 intrstr, affinity_to);
4368 } else {
4369 aprint_normal_dev(sc->sc_dev,
4370 "for RX interrupting at %s\n", intrstr);
4371 }
4372 sc->sc_ihs[intr_idx] = vih;
4373 rxq->rxq_id = qidx;
4374 rxq->rxq_intr_idx = intr_idx;
4375
4376 rx_established++;
4377 intr_idx++;
4378 }
4379
4380 /*
4381 * LINK
4382 */
4383 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4384 sizeof(intrbuf));
4385 #ifdef WM_MPSAFE
4386 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
4387 PCI_INTR_MPSAFE, true);
4388 #endif
4389 memset(intr_xname, 0, sizeof(intr_xname));
4390 snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
4391 device_xname(sc->sc_dev));
4392 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4393 IPL_NET, wm_linkintr_msix, sc, intr_xname);
4394 if (vih == NULL) {
4395 aprint_error_dev(sc->sc_dev,
4396 "unable to establish MSI-X(for LINK)%s%s\n",
4397 intrstr ? " at " : "",
4398 intrstr ? intrstr : "");
4399
4400 goto fail_1;
4401 }
4402 /* keep default affinity to LINK interrupt */
4403 aprint_normal_dev(sc->sc_dev,
4404 "for LINK interrupting at %s\n", intrstr);
4405 sc->sc_ihs[intr_idx] = vih;
4406 sc->sc_link_intr_idx = intr_idx;
4407
4408 sc->sc_nintrs = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
4409 kcpuset_destroy(affinity);
4410 return 0;
4411
4412 fail_1:
4413 for (qidx = 0; qidx < rx_established; qidx++) {
4414 struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
4415 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[rxq->rxq_intr_idx]);
4416 sc->sc_ihs[rxq->rxq_intr_idx] = NULL;
4417 }
4418 fail_0:
4419 for (qidx = 0; qidx < tx_established; qidx++) {
4420 struct wm_txqueue *txq = &sc->sc_txq[qidx];
4421 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[txq->txq_intr_idx]);
4422 sc->sc_ihs[txq->txq_intr_idx] = NULL;
4423 }
4424
4425 kcpuset_destroy(affinity);
4426 return ENOMEM;
4427 }
4428
4429 /*
4430 * wm_init: [ifnet interface function]
4431 *
4432 * Initialize the interface.
4433 */
4434 static int
4435 wm_init(struct ifnet *ifp)
4436 {
4437 struct wm_softc *sc = ifp->if_softc;
4438 int ret;
4439
4440 WM_CORE_LOCK(sc);
4441 ret = wm_init_locked(ifp);
4442 WM_CORE_UNLOCK(sc);
4443
4444 return ret;
4445 }
4446
4447 static int
4448 wm_init_locked(struct ifnet *ifp)
4449 {
4450 struct wm_softc *sc = ifp->if_softc;
4451 int i, j, trynum, error = 0;
4452 uint32_t reg;
4453
4454 KASSERT(WM_CORE_LOCKED(sc));
4455 /*
4456 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4457 * There is a small but measurable benefit to avoiding the adjusment
4458 * of the descriptor so that the headers are aligned, for normal mtu,
4459 * on such platforms. One possibility is that the DMA itself is
4460 * slightly more efficient if the front of the entire packet (instead
4461 * of the front of the headers) is aligned.
4462 *
4463 * Note we must always set align_tweak to 0 if we are using
4464 * jumbo frames.
4465 */
4466 #ifdef __NO_STRICT_ALIGNMENT
4467 sc->sc_align_tweak = 0;
4468 #else
4469 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4470 sc->sc_align_tweak = 0;
4471 else
4472 sc->sc_align_tweak = 2;
4473 #endif /* __NO_STRICT_ALIGNMENT */
4474
4475 /* Cancel any pending I/O. */
4476 wm_stop_locked(ifp, 0);
4477
4478 /* update statistics before reset */
4479 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4480 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4481
4482 /* Reset the chip to a known state. */
4483 wm_reset(sc);
4484
4485 switch (sc->sc_type) {
4486 case WM_T_82571:
4487 case WM_T_82572:
4488 case WM_T_82573:
4489 case WM_T_82574:
4490 case WM_T_82583:
4491 case WM_T_80003:
4492 case WM_T_ICH8:
4493 case WM_T_ICH9:
4494 case WM_T_ICH10:
4495 case WM_T_PCH:
4496 case WM_T_PCH2:
4497 case WM_T_PCH_LPT:
4498 /* AMT based hardware can now take control from firmware */
4499 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
4500 wm_get_hw_control(sc);
4501 break;
4502 default:
4503 break;
4504 }
4505
4506 /* Init hardware bits */
4507 wm_initialize_hardware_bits(sc);
4508
4509 /* Reset the PHY. */
4510 if (sc->sc_flags & WM_F_HAS_MII)
4511 wm_gmii_reset(sc);
4512
4513 /* Calculate (E)ITR value */
4514 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4515 sc->sc_itr = 450; /* For EITR */
4516 } else if (sc->sc_type >= WM_T_82543) {
4517 /*
4518 * Set up the interrupt throttling register (units of 256ns)
4519 * Note that a footnote in Intel's documentation says this
4520 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4521 * or 10Mbit mode. Empirically, it appears to be the case
4522 * that that is also true for the 1024ns units of the other
4523 * interrupt-related timer registers -- so, really, we ought
4524 * to divide this value by 4 when the link speed is low.
4525 *
4526 * XXX implement this division at link speed change!
4527 */
4528
4529 /*
4530 * For N interrupts/sec, set this value to:
4531 * 1000000000 / (N * 256). Note that we set the
4532 * absolute and packet timer values to this value
4533 * divided by 4 to get "simple timer" behavior.
4534 */
4535
4536 sc->sc_itr = 1500; /* 2604 ints/sec */
4537 }
4538
4539 error = wm_init_txrx_queues(sc);
4540 if (error)
4541 goto out;
4542
4543 /*
4544 * Clear out the VLAN table -- we don't use it (yet).
4545 */
4546 CSR_WRITE(sc, WMREG_VET, 0);
4547 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4548 trynum = 10; /* Due to hw errata */
4549 else
4550 trynum = 1;
4551 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4552 for (j = 0; j < trynum; j++)
4553 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4554
4555 /*
4556 * Set up flow-control parameters.
4557 *
4558 * XXX Values could probably stand some tuning.
4559 */
4560 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4561 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4562 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
4563 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4564 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4565 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4566 }
4567
4568 sc->sc_fcrtl = FCRTL_DFLT;
4569 if (sc->sc_type < WM_T_82543) {
4570 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4571 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4572 } else {
4573 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4574 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4575 }
4576
4577 if (sc->sc_type == WM_T_80003)
4578 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4579 else
4580 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4581
4582 /* Writes the control register. */
4583 wm_set_vlan(sc);
4584
4585 if (sc->sc_flags & WM_F_HAS_MII) {
4586 int val;
4587
4588 switch (sc->sc_type) {
4589 case WM_T_80003:
4590 case WM_T_ICH8:
4591 case WM_T_ICH9:
4592 case WM_T_ICH10:
4593 case WM_T_PCH:
4594 case WM_T_PCH2:
4595 case WM_T_PCH_LPT:
4596 /*
4597 * Set the mac to wait the maximum time between each
4598 * iteration and increase the max iterations when
4599 * polling the phy; this fixes erroneous timeouts at
4600 * 10Mbps.
4601 */
4602 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4603 0xFFFF);
4604 val = wm_kmrn_readreg(sc,
4605 KUMCTRLSTA_OFFSET_INB_PARAM);
4606 val |= 0x3F;
4607 wm_kmrn_writereg(sc,
4608 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4609 break;
4610 default:
4611 break;
4612 }
4613
4614 if (sc->sc_type == WM_T_80003) {
4615 val = CSR_READ(sc, WMREG_CTRL_EXT);
4616 val &= ~CTRL_EXT_LINK_MODE_MASK;
4617 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4618
4619 /* Bypass RX and TX FIFO's */
4620 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4621 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4622 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4623 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4624 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4625 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4626 }
4627 }
4628 #if 0
4629 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4630 #endif
4631
4632 /* Set up checksum offload parameters. */
4633 reg = CSR_READ(sc, WMREG_RXCSUM);
4634 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4635 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4636 reg |= RXCSUM_IPOFL;
4637 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4638 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4639 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4640 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4641 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4642
4643 /* Set up MSI-X */
4644 if (sc->sc_nintrs > 1) {
4645 uint32_t ivar;
4646
4647 if (sc->sc_type == WM_T_82575) {
4648 /* Interrupt control */
4649 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4650 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
4651 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4652
4653 /* TX */
4654 for (i = 0; i < sc->sc_ntxqueues; i++) {
4655 struct wm_txqueue *txq = &sc->sc_txq[i];
4656 CSR_WRITE(sc, WMREG_MSIXBM(txq->txq_intr_idx),
4657 EITR_TX_QUEUE(txq->txq_id));
4658 }
4659 /* RX */
4660 for (i = 0; i < sc->sc_nrxqueues; i++) {
4661 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
4662 CSR_WRITE(sc, WMREG_MSIXBM(rxq->rxq_intr_idx),
4663 EITR_RX_QUEUE(rxq->rxq_id));
4664 }
4665 /* Link status */
4666 CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
4667 EITR_OTHER);
4668 } else if (sc->sc_type == WM_T_82574) {
4669 /* Interrupt control */
4670 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4671 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
4672 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4673
4674 ivar = 0;
4675 /* TX */
4676 for (i = 0; i < sc->sc_ntxqueues; i++) {
4677 struct wm_txqueue *txq = &sc->sc_txq[i];
4678 ivar |= __SHIFTIN((IVAR_VALID_82574|txq->txq_intr_idx),
4679 IVAR_TX_MASK_Q_82574(txq->txq_id));
4680 }
4681 /* RX */
4682 for (i = 0; i < sc->sc_nrxqueues; i++) {
4683 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
4684 ivar |= __SHIFTIN((IVAR_VALID_82574|rxq->rxq_intr_idx),
4685 IVAR_RX_MASK_Q_82574(rxq->rxq_id));
4686 }
4687 /* Link status */
4688 ivar |= __SHIFTIN((IVAR_VALID_82574|sc->sc_link_intr_idx),
4689 IVAR_OTHER_MASK);
4690 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
4691 } else {
4692 /* Interrupt control */
4693 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR
4694 | GPIE_MULTI_MSIX | GPIE_EIAME
4695 | GPIE_PBA);
4696
4697 switch (sc->sc_type) {
4698 case WM_T_82580:
4699 case WM_T_I350:
4700 case WM_T_I354:
4701 case WM_T_I210:
4702 case WM_T_I211:
4703 /* TX */
4704 for (i = 0; i < sc->sc_ntxqueues; i++) {
4705 struct wm_txqueue *txq = &sc->sc_txq[i];
4706 int qid = txq->txq_id;
4707 ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
4708 ivar &= ~IVAR_TX_MASK_Q(qid);
4709 ivar |= __SHIFTIN(
4710 (txq->txq_intr_idx | IVAR_VALID),
4711 IVAR_TX_MASK_Q(qid));
4712 CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
4713 }
4714
4715 /* RX */
4716 for (i = 0; i < sc->sc_nrxqueues; i++) {
4717 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
4718 int qid = rxq->rxq_id;
4719 ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
4720 ivar &= ~IVAR_RX_MASK_Q(qid);
4721 ivar |= __SHIFTIN(
4722 (rxq->rxq_intr_idx | IVAR_VALID),
4723 IVAR_RX_MASK_Q(qid));
4724 CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
4725 }
4726 break;
4727 case WM_T_82576:
4728 /* TX */
4729 for (i = 0; i < sc->sc_ntxqueues; i++) {
4730 struct wm_txqueue *txq = &sc->sc_txq[i];
4731 int qid = txq->txq_id;
4732 ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(qid));
4733 ivar &= ~IVAR_TX_MASK_Q_82576(qid);
4734 ivar |= __SHIFTIN(
4735 (txq->txq_intr_idx | IVAR_VALID),
4736 IVAR_TX_MASK_Q_82576(qid));
4737 CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid), ivar);
4738 }
4739
4740 /* RX */
4741 for (i = 0; i < sc->sc_nrxqueues; i++) {
4742 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
4743 int qid = rxq->rxq_id;
4744 ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(qid));
4745 ivar &= ~IVAR_RX_MASK_Q_82576(qid);
4746 ivar |= __SHIFTIN(
4747 (rxq->rxq_intr_idx | IVAR_VALID),
4748 IVAR_RX_MASK_Q_82576(qid));
4749 CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid), ivar);
4750 }
4751 break;
4752 default:
4753 break;
4754 }
4755
4756 /* Link status */
4757 ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
4758 IVAR_MISC_OTHER);
4759 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
4760 }
4761
4762 if (sc->sc_nrxqueues > 1) {
4763 wm_init_rss(sc);
4764
4765 /*
4766 ** NOTE: Receive Full-Packet Checksum Offload
4767 ** is mutually exclusive with Multiqueue. However
4768 ** this is not the same as TCP/IP checksums which
4769 ** still work.
4770 */
4771 reg = CSR_READ(sc, WMREG_RXCSUM);
4772 reg |= RXCSUM_PCSD;
4773 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4774 }
4775 }
4776
4777 /* Set up the interrupt registers. */
4778 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4779 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4780 ICR_RXO | ICR_RXT0;
4781 if (sc->sc_nintrs > 1) {
4782 uint32_t mask;
4783 switch (sc->sc_type) {
4784 case WM_T_82574:
4785 CSR_WRITE(sc, WMREG_EIAC_82574,
4786 WMREG_EIAC_82574_MSIX_MASK);
4787 sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
4788 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4789 break;
4790 default:
4791 if (sc->sc_type == WM_T_82575) {
4792 mask = 0;
4793 for (i = 0; i < sc->sc_ntxqueues; i++) {
4794 struct wm_txqueue *txq = &sc->sc_txq[i];
4795 mask |= EITR_TX_QUEUE(txq->txq_id);
4796 }
4797 for (i = 0; i < sc->sc_nrxqueues; i++) {
4798 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
4799 mask |= EITR_RX_QUEUE(rxq->rxq_id);
4800 }
4801 mask |= EITR_OTHER;
4802 } else {
4803 mask = 0;
4804 for (i = 0; i < sc->sc_ntxqueues; i++) {
4805 struct wm_txqueue *txq = &sc->sc_txq[i];
4806 mask |= 1 << txq->txq_intr_idx;
4807 }
4808 for (i = 0; i < sc->sc_nrxqueues; i++) {
4809 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
4810 mask |= 1 << rxq->rxq_intr_idx;
4811 }
4812 mask |= 1 << sc->sc_link_intr_idx;
4813 }
4814 CSR_WRITE(sc, WMREG_EIAC, mask);
4815 CSR_WRITE(sc, WMREG_EIAM, mask);
4816 CSR_WRITE(sc, WMREG_EIMS, mask);
4817 CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
4818 break;
4819 }
4820 } else
4821 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4822
4823 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4824 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4825 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4826 reg = CSR_READ(sc, WMREG_KABGTXD);
4827 reg |= KABGTXD_BGSQLBIAS;
4828 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4829 }
4830
4831 /* Set up the inter-packet gap. */
4832 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4833
4834 if (sc->sc_type >= WM_T_82543) {
4835 /*
4836 * XXX 82574 has both ITR and EITR. SET EITR when we use
4837 * the multi queue function with MSI-X.
4838 */
4839 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4840 int qidx;
4841 for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
4842 struct wm_txqueue *txq = &sc->sc_txq[qidx];
4843 CSR_WRITE(sc, WMREG_EITR(txq->txq_intr_idx),
4844 sc->sc_itr);
4845 }
4846 for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
4847 struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
4848 CSR_WRITE(sc, WMREG_EITR(rxq->rxq_intr_idx),
4849 sc->sc_itr);
4850 }
4851 /*
4852 * Link interrupts occur much less than TX
4853 * interrupts and RX interrupts. So, we don't
4854 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
4855 * FreeBSD's if_igb.
4856 */
4857 } else
4858 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4859 }
4860
4861 /* Set the VLAN ethernetype. */
4862 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4863
4864 /*
4865 * Set up the transmit control register; we start out with
4866 * a collision distance suitable for FDX, but update it whe
4867 * we resolve the media type.
4868 */
4869 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4870 | TCTL_CT(TX_COLLISION_THRESHOLD)
4871 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4872 if (sc->sc_type >= WM_T_82571)
4873 sc->sc_tctl |= TCTL_MULR;
4874 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4875
4876 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4877 /* Write TDT after TCTL.EN is set. See the document. */
4878 CSR_WRITE(sc, WMREG_TDT(0), 0);
4879 }
4880
4881 if (sc->sc_type == WM_T_80003) {
4882 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4883 reg &= ~TCTL_EXT_GCEX_MASK;
4884 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4885 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4886 }
4887
4888 /* Set the media. */
4889 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4890 goto out;
4891
4892 /* Configure for OS presence */
4893 wm_init_manageability(sc);
4894
4895 /*
4896 * Set up the receive control register; we actually program
4897 * the register when we set the receive filter. Use multicast
4898 * address offset type 0.
4899 *
4900 * Only the i82544 has the ability to strip the incoming
4901 * CRC, so we don't enable that feature.
4902 */
4903 sc->sc_mchash_type = 0;
4904 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4905 | RCTL_MO(sc->sc_mchash_type);
4906
4907 /*
4908 * The I350 has a bug where it always strips the CRC whether
4909 * asked to or not. So ask for stripped CRC here and cope in rxeof
4910 */
4911 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4912 || (sc->sc_type == WM_T_I210))
4913 sc->sc_rctl |= RCTL_SECRC;
4914
4915 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4916 && (ifp->if_mtu > ETHERMTU)) {
4917 sc->sc_rctl |= RCTL_LPE;
4918 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4919 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4920 }
4921
4922 if (MCLBYTES == 2048) {
4923 sc->sc_rctl |= RCTL_2k;
4924 } else {
4925 if (sc->sc_type >= WM_T_82543) {
4926 switch (MCLBYTES) {
4927 case 4096:
4928 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4929 break;
4930 case 8192:
4931 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4932 break;
4933 case 16384:
4934 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4935 break;
4936 default:
4937 panic("wm_init: MCLBYTES %d unsupported",
4938 MCLBYTES);
4939 break;
4940 }
4941 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4942 }
4943
4944 /* Set the receive filter. */
4945 wm_set_filter(sc);
4946
4947 /* Enable ECC */
4948 switch (sc->sc_type) {
4949 case WM_T_82571:
4950 reg = CSR_READ(sc, WMREG_PBA_ECC);
4951 reg |= PBA_ECC_CORR_EN;
4952 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4953 break;
4954 case WM_T_PCH_LPT:
4955 reg = CSR_READ(sc, WMREG_PBECCSTS);
4956 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4957 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4958
4959 reg = CSR_READ(sc, WMREG_CTRL);
4960 reg |= CTRL_MEHE;
4961 CSR_WRITE(sc, WMREG_CTRL, reg);
4962 break;
4963 default:
4964 break;
4965 }
4966
4967 /* On 575 and later set RDT only if RX enabled */
4968 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4969 int qidx;
4970 for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
4971 struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
4972 for (i = 0; i < WM_NRXDESC; i++) {
4973 WM_RX_LOCK(rxq);
4974 wm_init_rxdesc(rxq, i);
4975 WM_RX_UNLOCK(rxq);
4976
4977 }
4978 }
4979 }
4980
4981 sc->sc_stopping = false;
4982
4983 /* Start the one second link check clock. */
4984 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4985
4986 /* ...all done! */
4987 ifp->if_flags |= IFF_RUNNING;
4988 ifp->if_flags &= ~IFF_OACTIVE;
4989
4990 out:
4991 sc->sc_if_flags = ifp->if_flags;
4992 if (error)
4993 log(LOG_ERR, "%s: interface not running\n",
4994 device_xname(sc->sc_dev));
4995 return error;
4996 }
4997
4998 /*
4999 * wm_stop: [ifnet interface function]
5000 *
5001 * Stop transmission on the interface.
5002 */
5003 static void
5004 wm_stop(struct ifnet *ifp, int disable)
5005 {
5006 struct wm_softc *sc = ifp->if_softc;
5007
5008 WM_CORE_LOCK(sc);
5009 wm_stop_locked(ifp, disable);
5010 WM_CORE_UNLOCK(sc);
5011 }
5012
5013 static void
5014 wm_stop_locked(struct ifnet *ifp, int disable)
5015 {
5016 struct wm_softc *sc = ifp->if_softc;
5017 struct wm_txsoft *txs;
5018 int i, qidx;
5019
5020 KASSERT(WM_CORE_LOCKED(sc));
5021
5022 sc->sc_stopping = true;
5023
5024 /* Stop the one second clock. */
5025 callout_stop(&sc->sc_tick_ch);
5026
5027 /* Stop the 82547 Tx FIFO stall check timer. */
5028 if (sc->sc_type == WM_T_82547)
5029 callout_stop(&sc->sc_txfifo_ch);
5030
5031 if (sc->sc_flags & WM_F_HAS_MII) {
5032 /* Down the MII. */
5033 mii_down(&sc->sc_mii);
5034 } else {
5035 #if 0
5036 /* Should we clear PHY's status properly? */
5037 wm_reset(sc);
5038 #endif
5039 }
5040
5041 /* Stop the transmit and receive processes. */
5042 CSR_WRITE(sc, WMREG_TCTL, 0);
5043 CSR_WRITE(sc, WMREG_RCTL, 0);
5044 sc->sc_rctl &= ~RCTL_EN;
5045
5046 /*
5047 * Clear the interrupt mask to ensure the device cannot assert its
5048 * interrupt line.
5049 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
5050 * service any currently pending or shared interrupt.
5051 */
5052 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5053 sc->sc_icr = 0;
5054 if (sc->sc_nintrs > 1) {
5055 if (sc->sc_type != WM_T_82574) {
5056 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5057 CSR_WRITE(sc, WMREG_EIAC, 0);
5058 } else
5059 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5060 }
5061
5062 /* Release any queued transmit buffers. */
5063 for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
5064 struct wm_txqueue *txq = &sc->sc_txq[qidx];
5065 WM_TX_LOCK(txq);
5066 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5067 txs = &txq->txq_soft[i];
5068 if (txs->txs_mbuf != NULL) {
5069 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
5070 m_freem(txs->txs_mbuf);
5071 txs->txs_mbuf = NULL;
5072 }
5073 }
5074 WM_TX_UNLOCK(txq);
5075 }
5076
5077 /* Mark the interface as down and cancel the watchdog timer. */
5078 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
5079 ifp->if_timer = 0;
5080
5081 if (disable) {
5082 for (i = 0; i < sc->sc_nrxqueues; i++) {
5083 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5084 WM_RX_LOCK(rxq);
5085 wm_rxdrain(rxq);
5086 WM_RX_UNLOCK(rxq);
5087 }
5088 }
5089
5090 #if 0 /* notyet */
5091 if (sc->sc_type >= WM_T_82544)
5092 CSR_WRITE(sc, WMREG_WUC, 0);
5093 #endif
5094 }
5095
5096 static void
5097 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
5098 {
5099 struct mbuf *m;
5100 int i;
5101
5102 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
5103 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
5104 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
5105 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
5106 m->m_data, m->m_len, m->m_flags);
5107 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
5108 i, i == 1 ? "" : "s");
5109 }
5110
5111 /*
5112 * wm_82547_txfifo_stall:
5113 *
5114 * Callout used to wait for the 82547 Tx FIFO to drain,
5115 * reset the FIFO pointers, and restart packet transmission.
5116 */
5117 static void
5118 wm_82547_txfifo_stall(void *arg)
5119 {
5120 struct wm_softc *sc = arg;
5121 struct wm_txqueue *txq = sc->sc_txq;
5122 #ifndef WM_MPSAFE
5123 int s;
5124
5125 s = splnet();
5126 #endif
5127 WM_TX_LOCK(txq);
5128
5129 if (sc->sc_stopping)
5130 goto out;
5131
5132 if (txq->txq_fifo_stall) {
5133 if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
5134 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
5135 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
5136 /*
5137 * Packets have drained. Stop transmitter, reset
5138 * FIFO pointers, restart transmitter, and kick
5139 * the packet queue.
5140 */
5141 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
5142 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
5143 CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
5144 CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
5145 CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
5146 CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
5147 CSR_WRITE(sc, WMREG_TCTL, tctl);
5148 CSR_WRITE_FLUSH(sc);
5149
5150 txq->txq_fifo_head = 0;
5151 txq->txq_fifo_stall = 0;
5152 wm_start_locked(&sc->sc_ethercom.ec_if);
5153 } else {
5154 /*
5155 * Still waiting for packets to drain; try again in
5156 * another tick.
5157 */
5158 callout_schedule(&sc->sc_txfifo_ch, 1);
5159 }
5160 }
5161
5162 out:
5163 WM_TX_UNLOCK(txq);
5164 #ifndef WM_MPSAFE
5165 splx(s);
5166 #endif
5167 }
5168
5169 /*
5170 * wm_82547_txfifo_bugchk:
5171 *
5172 * Check for bug condition in the 82547 Tx FIFO. We need to
5173 * prevent enqueueing a packet that would wrap around the end
5174 * if the Tx FIFO ring buffer, otherwise the chip will croak.
5175 *
5176 * We do this by checking the amount of space before the end
5177 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
5178 * the Tx FIFO, wait for all remaining packets to drain, reset
5179 * the internal FIFO pointers to the beginning, and restart
5180 * transmission on the interface.
5181 */
5182 #define WM_FIFO_HDR 0x10
5183 #define WM_82547_PAD_LEN 0x3e0
5184 static int
5185 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
5186 {
5187 struct wm_txqueue *txq = &sc->sc_txq[0];
5188 int space = txq->txq_fifo_size - txq->txq_fifo_head;
5189 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
5190
5191 /* Just return if already stalled. */
5192 if (txq->txq_fifo_stall)
5193 return 1;
5194
5195 if (sc->sc_mii.mii_media_active & IFM_FDX) {
5196 /* Stall only occurs in half-duplex mode. */
5197 goto send_packet;
5198 }
5199
5200 if (len >= WM_82547_PAD_LEN + space) {
5201 txq->txq_fifo_stall = 1;
5202 callout_schedule(&sc->sc_txfifo_ch, 1);
5203 return 1;
5204 }
5205
5206 send_packet:
5207 txq->txq_fifo_head += len;
5208 if (txq->txq_fifo_head >= txq->txq_fifo_size)
5209 txq->txq_fifo_head -= txq->txq_fifo_size;
5210
5211 return 0;
5212 }
5213
5214 static int
5215 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5216 {
5217 int error;
5218
5219 /*
5220 * Allocate the control data structures, and create and load the
5221 * DMA map for it.
5222 *
5223 * NOTE: All Tx descriptors must be in the same 4G segment of
5224 * memory. So must Rx descriptors. We simplify by allocating
5225 * both sets within the same 4G segment.
5226 */
5227 if (sc->sc_type < WM_T_82544) {
5228 WM_NTXDESC(txq) = WM_NTXDESC_82542;
5229 txq->txq_desc_size = sizeof(wiseman_txdesc_t) * WM_NTXDESC(txq);
5230 } else {
5231 WM_NTXDESC(txq) = WM_NTXDESC_82544;
5232 txq->txq_desc_size = sizeof(txdescs_t);
5233 }
5234
5235 if ((error = bus_dmamem_alloc(sc->sc_dmat, txq->txq_desc_size, PAGE_SIZE,
5236 (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg, 1,
5237 &txq->txq_desc_rseg, 0)) != 0) {
5238 aprint_error_dev(sc->sc_dev,
5239 "unable to allocate TX control data, error = %d\n",
5240 error);
5241 goto fail_0;
5242 }
5243
5244 if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
5245 txq->txq_desc_rseg, txq->txq_desc_size,
5246 (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
5247 aprint_error_dev(sc->sc_dev,
5248 "unable to map TX control data, error = %d\n", error);
5249 goto fail_1;
5250 }
5251
5252 if ((error = bus_dmamap_create(sc->sc_dmat, txq->txq_desc_size, 1,
5253 txq->txq_desc_size, 0, 0, &txq->txq_desc_dmamap)) != 0) {
5254 aprint_error_dev(sc->sc_dev,
5255 "unable to create TX control data DMA map, error = %d\n",
5256 error);
5257 goto fail_2;
5258 }
5259
5260 if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
5261 txq->txq_descs_u, txq->txq_desc_size, NULL, 0)) != 0) {
5262 aprint_error_dev(sc->sc_dev,
5263 "unable to load TX control data DMA map, error = %d\n",
5264 error);
5265 goto fail_3;
5266 }
5267
5268 return 0;
5269
5270 fail_3:
5271 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5272 fail_2:
5273 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5274 txq->txq_desc_size);
5275 fail_1:
5276 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5277 fail_0:
5278 return error;
5279 }
5280
5281 static void
5282 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5283 {
5284
5285 bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
5286 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5287 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5288 txq->txq_desc_size);
5289 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5290 }
5291
5292 static int
5293 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5294 {
5295 int error;
5296
5297 /*
5298 * Allocate the control data structures, and create and load the
5299 * DMA map for it.
5300 *
5301 * NOTE: All Tx descriptors must be in the same 4G segment of
5302 * memory. So must Rx descriptors. We simplify by allocating
5303 * both sets within the same 4G segment.
5304 */
5305 rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
5306 if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size, PAGE_SIZE,
5307 (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg, 1,
5308 &rxq->rxq_desc_rseg, 0)) != 0) {
5309 aprint_error_dev(sc->sc_dev,
5310 "unable to allocate RX control data, error = %d\n",
5311 error);
5312 goto fail_0;
5313 }
5314
5315 if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
5316 rxq->rxq_desc_rseg, rxq->rxq_desc_size,
5317 (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
5318 aprint_error_dev(sc->sc_dev,
5319 "unable to map RX control data, error = %d\n", error);
5320 goto fail_1;
5321 }
5322
5323 if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
5324 rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
5325 aprint_error_dev(sc->sc_dev,
5326 "unable to create RX control data DMA map, error = %d\n",
5327 error);
5328 goto fail_2;
5329 }
5330
5331 if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
5332 rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
5333 aprint_error_dev(sc->sc_dev,
5334 "unable to load RX control data DMA map, error = %d\n",
5335 error);
5336 goto fail_3;
5337 }
5338
5339 return 0;
5340
5341 fail_3:
5342 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5343 fail_2:
5344 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5345 rxq->rxq_desc_size);
5346 fail_1:
5347 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5348 fail_0:
5349 return error;
5350 }
5351
5352 static void
5353 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5354 {
5355
5356 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
5357 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5358 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5359 rxq->rxq_desc_size);
5360 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5361 }
5362
5363
5364 static int
5365 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5366 {
5367 int i, error;
5368
5369 /* Create the transmit buffer DMA maps. */
5370 WM_TXQUEUELEN(txq) =
5371 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
5372 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
5373 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5374 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
5375 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
5376 &txq->txq_soft[i].txs_dmamap)) != 0) {
5377 aprint_error_dev(sc->sc_dev,
5378 "unable to create Tx DMA map %d, error = %d\n",
5379 i, error);
5380 goto fail;
5381 }
5382 }
5383
5384 return 0;
5385
5386 fail:
5387 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5388 if (txq->txq_soft[i].txs_dmamap != NULL)
5389 bus_dmamap_destroy(sc->sc_dmat,
5390 txq->txq_soft[i].txs_dmamap);
5391 }
5392 return error;
5393 }
5394
5395 static void
5396 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5397 {
5398 int i;
5399
5400 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5401 if (txq->txq_soft[i].txs_dmamap != NULL)
5402 bus_dmamap_destroy(sc->sc_dmat,
5403 txq->txq_soft[i].txs_dmamap);
5404 }
5405 }
5406
5407 static int
5408 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5409 {
5410 int i, error;
5411
5412 /* Create the receive buffer DMA maps. */
5413 for (i = 0; i < WM_NRXDESC; i++) {
5414 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
5415 MCLBYTES, 0, 0,
5416 &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
5417 aprint_error_dev(sc->sc_dev,
5418 "unable to create Rx DMA map %d error = %d\n",
5419 i, error);
5420 goto fail;
5421 }
5422 rxq->rxq_soft[i].rxs_mbuf = NULL;
5423 }
5424
5425 return 0;
5426
5427 fail:
5428 for (i = 0; i < WM_NRXDESC; i++) {
5429 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5430 bus_dmamap_destroy(sc->sc_dmat,
5431 rxq->rxq_soft[i].rxs_dmamap);
5432 }
5433 return error;
5434 }
5435
5436 static void
5437 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5438 {
5439 int i;
5440
5441 for (i = 0; i < WM_NRXDESC; i++) {
5442 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5443 bus_dmamap_destroy(sc->sc_dmat,
5444 rxq->rxq_soft[i].rxs_dmamap);
5445 }
5446 }
5447
5448 /*
5449 * wm_alloc_quques:
5450 * Allocate {tx,rx}descs and {tx,rx} buffers
5451 */
5452 static int
5453 wm_alloc_txrx_queues(struct wm_softc *sc)
5454 {
5455 int i, error, tx_done, rx_done;
5456
5457 /*
5458 * For transmission
5459 */
5460 sc->sc_txq = kmem_zalloc(sizeof(struct wm_txqueue) * sc->sc_ntxqueues,
5461 KM_SLEEP);
5462 if (sc->sc_txq == NULL) {
5463 aprint_error_dev(sc->sc_dev, "unable to allocate wm_txqueue\n");
5464 error = ENOMEM;
5465 goto fail_0;
5466 }
5467
5468 error = 0;
5469 tx_done = 0;
5470 for (i = 0; i < sc->sc_ntxqueues; i++) {
5471 struct wm_txqueue *txq = &sc->sc_txq[i];
5472 txq->txq_sc = sc;
5473 #ifdef WM_MPSAFE
5474 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5475 #else
5476 txq->txq_lock = NULL;
5477 #endif
5478 error = wm_alloc_tx_descs(sc, txq);
5479 if (error)
5480 break;
5481 error = wm_alloc_tx_buffer(sc, txq);
5482 if (error) {
5483 wm_free_tx_descs(sc, txq);
5484 break;
5485 }
5486 tx_done++;
5487 }
5488 if (error)
5489 goto fail_1;
5490
5491 /*
5492 * For recieve
5493 */
5494 sc->sc_rxq = kmem_zalloc(sizeof(struct wm_rxqueue) * sc->sc_nrxqueues,
5495 KM_SLEEP);
5496 if (sc->sc_rxq == NULL) {
5497 aprint_error_dev(sc->sc_dev, "unable to allocate wm_rxqueue\n");
5498 error = ENOMEM;
5499 goto fail_1;
5500 }
5501
5502 error = 0;
5503 rx_done = 0;
5504 for (i = 0; i < sc->sc_nrxqueues; i++) {
5505 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5506 rxq->rxq_sc = sc;
5507 #ifdef WM_MPSAFE
5508 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5509 #else
5510 rxq->rxq_lock = NULL;
5511 #endif
5512 error = wm_alloc_rx_descs(sc, rxq);
5513 if (error)
5514 break;
5515
5516 error = wm_alloc_rx_buffer(sc, rxq);
5517 if (error) {
5518 wm_free_rx_descs(sc, rxq);
5519 break;
5520 }
5521
5522 rx_done++;
5523 }
5524 if (error)
5525 goto fail_2;
5526
5527 return 0;
5528
5529 fail_2:
5530 for (i = 0; i < rx_done; i++) {
5531 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5532 wm_free_rx_buffer(sc, rxq);
5533 wm_free_rx_descs(sc, rxq);
5534 if (rxq->rxq_lock)
5535 mutex_obj_free(rxq->rxq_lock);
5536 }
5537 kmem_free(sc->sc_rxq,
5538 sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
5539 fail_1:
5540 for (i = 0; i < tx_done; i++) {
5541 struct wm_txqueue *txq = &sc->sc_txq[i];
5542 wm_free_tx_buffer(sc, txq);
5543 wm_free_tx_descs(sc, txq);
5544 if (txq->txq_lock)
5545 mutex_obj_free(txq->txq_lock);
5546 }
5547 kmem_free(sc->sc_txq,
5548 sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
5549 fail_0:
5550 return error;
5551 }
5552
5553 /*
5554 * wm_free_quques:
5555 * Free {tx,rx}descs and {tx,rx} buffers
5556 */
5557 static void
5558 wm_free_txrx_queues(struct wm_softc *sc)
5559 {
5560 int i;
5561
5562 for (i = 0; i < sc->sc_nrxqueues; i++) {
5563 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5564 wm_free_rx_buffer(sc, rxq);
5565 wm_free_rx_descs(sc, rxq);
5566 if (rxq->rxq_lock)
5567 mutex_obj_free(rxq->rxq_lock);
5568 }
5569 kmem_free(sc->sc_rxq, sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
5570
5571 for (i = 0; i < sc->sc_ntxqueues; i++) {
5572 struct wm_txqueue *txq = &sc->sc_txq[i];
5573 wm_free_tx_buffer(sc, txq);
5574 wm_free_tx_descs(sc, txq);
5575 if (txq->txq_lock)
5576 mutex_obj_free(txq->txq_lock);
5577 }
5578 kmem_free(sc->sc_txq, sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
5579 }
5580
5581 static void
5582 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5583 {
5584
5585 KASSERT(WM_TX_LOCKED(txq));
5586
5587 /* Initialize the transmit descriptor ring. */
5588 memset(txq->txq_descs, 0, WM_TXDESCSIZE(txq));
5589 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
5590 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5591 txq->txq_free = WM_NTXDESC(txq);
5592 txq->txq_next = 0;
5593 }
5594
5595 static void
5596 wm_init_tx_regs(struct wm_softc *sc, struct wm_txqueue *txq)
5597 {
5598
5599 KASSERT(WM_TX_LOCKED(txq));
5600
5601 if (sc->sc_type < WM_T_82543) {
5602 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
5603 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
5604 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(txq));
5605 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
5606 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
5607 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
5608 } else {
5609 int qid = txq->txq_id;
5610
5611 CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
5612 CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
5613 CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCSIZE(txq));
5614 CSR_WRITE(sc, WMREG_TDH(qid), 0);
5615
5616 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5617 /*
5618 * Don't write TDT before TCTL.EN is set.
5619 * See the document.
5620 */
5621 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
5622 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
5623 | TXDCTL_WTHRESH(0));
5624 else {
5625 /* ITR / 4 */
5626 CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
5627 if (sc->sc_type >= WM_T_82540) {
5628 /* should be same */
5629 CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
5630 }
5631
5632 CSR_WRITE(sc, WMREG_TDT(qid), 0);
5633 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
5634 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
5635 }
5636 }
5637 }
5638
5639 static void
5640 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5641 {
5642 int i;
5643
5644 KASSERT(WM_TX_LOCKED(txq));
5645
5646 /* Initialize the transmit job descriptors. */
5647 for (i = 0; i < WM_TXQUEUELEN(txq); i++)
5648 txq->txq_soft[i].txs_mbuf = NULL;
5649 txq->txq_sfree = WM_TXQUEUELEN(txq);
5650 txq->txq_snext = 0;
5651 txq->txq_sdirty = 0;
5652 }
5653
5654 static void
5655 wm_init_tx_queue(struct wm_softc *sc, struct wm_txqueue *txq)
5656 {
5657
5658 KASSERT(WM_TX_LOCKED(txq));
5659
5660 /*
5661 * Set up some register offsets that are different between
5662 * the i82542 and the i82543 and later chips.
5663 */
5664 if (sc->sc_type < WM_T_82543) {
5665 txq->txq_tdt_reg = WMREG_OLD_TDT;
5666 } else {
5667 txq->txq_tdt_reg = WMREG_TDT(txq->txq_id);
5668 }
5669
5670 wm_init_tx_descs(sc, txq);
5671 wm_init_tx_regs(sc, txq);
5672 wm_init_tx_buffer(sc, txq);
5673 }
5674
5675 static void
5676 wm_init_rx_regs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5677 {
5678
5679 KASSERT(WM_RX_LOCKED(rxq));
5680
5681 /*
5682 * Initialize the receive descriptor and receive job
5683 * descriptor rings.
5684 */
5685 if (sc->sc_type < WM_T_82543) {
5686 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
5687 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
5688 CSR_WRITE(sc, WMREG_OLD_RDLEN0,
5689 sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
5690 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
5691 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
5692 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
5693
5694 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
5695 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
5696 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
5697 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
5698 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
5699 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
5700 } else {
5701 int qid = rxq->rxq_id;
5702
5703 CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
5704 CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
5705 CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
5706
5707 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5708 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
5709 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
5710 CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
5711 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
5712 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
5713 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
5714 | RXDCTL_WTHRESH(1));
5715 CSR_WRITE(sc, WMREG_RDH(qid), 0);
5716 CSR_WRITE(sc, WMREG_RDT(qid), 0);
5717 } else {
5718 CSR_WRITE(sc, WMREG_RDH(qid), 0);
5719 CSR_WRITE(sc, WMREG_RDT(qid), 0);
5720 /* ITR / 4 */
5721 CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
5722 /* MUST be same */
5723 CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
5724 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
5725 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
5726 }
5727 }
5728 }
5729
5730 static int
5731 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5732 {
5733 struct wm_rxsoft *rxs;
5734 int error, i;
5735
5736 KASSERT(WM_RX_LOCKED(rxq));
5737
5738 for (i = 0; i < WM_NRXDESC; i++) {
5739 rxs = &rxq->rxq_soft[i];
5740 if (rxs->rxs_mbuf == NULL) {
5741 if ((error = wm_add_rxbuf(rxq, i)) != 0) {
5742 log(LOG_ERR, "%s: unable to allocate or map "
5743 "rx buffer %d, error = %d\n",
5744 device_xname(sc->sc_dev), i, error);
5745 /*
5746 * XXX Should attempt to run with fewer receive
5747 * XXX buffers instead of just failing.
5748 */
5749 wm_rxdrain(rxq);
5750 return ENOMEM;
5751 }
5752 } else {
5753 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
5754 wm_init_rxdesc(rxq, i);
5755 /*
5756 * For 82575 and newer device, the RX descriptors
5757 * must be initialized after the setting of RCTL.EN in
5758 * wm_set_filter()
5759 */
5760 }
5761 }
5762 rxq->rxq_ptr = 0;
5763 rxq->rxq_discard = 0;
5764 WM_RXCHAIN_RESET(rxq);
5765
5766 return 0;
5767 }
5768
5769 static int
5770 wm_init_rx_queue(struct wm_softc *sc, struct wm_rxqueue *rxq)
5771 {
5772
5773 KASSERT(WM_RX_LOCKED(rxq));
5774
5775 /*
5776 * Set up some register offsets that are different between
5777 * the i82542 and the i82543 and later chips.
5778 */
5779 if (sc->sc_type < WM_T_82543) {
5780 rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
5781 } else {
5782 rxq->rxq_rdt_reg = WMREG_RDT(rxq->rxq_id);
5783 }
5784
5785 wm_init_rx_regs(sc, rxq);
5786 return wm_init_rx_buffer(sc, rxq);
5787 }
5788
5789 /*
5790 * wm_init_quques:
5791 * Initialize {tx,rx}descs and {tx,rx} buffers
5792 */
5793 static int
5794 wm_init_txrx_queues(struct wm_softc *sc)
5795 {
5796 int i, error;
5797
5798 for (i = 0; i < sc->sc_ntxqueues; i++) {
5799 struct wm_txqueue *txq = &sc->sc_txq[i];
5800 WM_TX_LOCK(txq);
5801 wm_init_tx_queue(sc, txq);
5802 WM_TX_UNLOCK(txq);
5803 }
5804
5805 error = 0;
5806 for (i = 0; i < sc->sc_nrxqueues; i++) {
5807 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5808 WM_RX_LOCK(rxq);
5809 error = wm_init_rx_queue(sc, rxq);
5810 WM_RX_UNLOCK(rxq);
5811 if (error)
5812 break;
5813 }
5814
5815 return error;
5816 }
5817
5818 /*
5819 * wm_tx_offload:
5820 *
5821 * Set up TCP/IP checksumming parameters for the
5822 * specified packet.
5823 */
5824 static int
5825 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
5826 uint8_t *fieldsp)
5827 {
5828 struct wm_txqueue *txq = &sc->sc_txq[0];
5829 struct mbuf *m0 = txs->txs_mbuf;
5830 struct livengood_tcpip_ctxdesc *t;
5831 uint32_t ipcs, tucs, cmd, cmdlen, seg;
5832 uint32_t ipcse;
5833 struct ether_header *eh;
5834 int offset, iphl;
5835 uint8_t fields;
5836
5837 /*
5838 * XXX It would be nice if the mbuf pkthdr had offset
5839 * fields for the protocol headers.
5840 */
5841
5842 eh = mtod(m0, struct ether_header *);
5843 switch (htons(eh->ether_type)) {
5844 case ETHERTYPE_IP:
5845 case ETHERTYPE_IPV6:
5846 offset = ETHER_HDR_LEN;
5847 break;
5848
5849 case ETHERTYPE_VLAN:
5850 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5851 break;
5852
5853 default:
5854 /*
5855 * Don't support this protocol or encapsulation.
5856 */
5857 *fieldsp = 0;
5858 *cmdp = 0;
5859 return 0;
5860 }
5861
5862 if ((m0->m_pkthdr.csum_flags &
5863 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
5864 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
5865 } else {
5866 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
5867 }
5868 ipcse = offset + iphl - 1;
5869
5870 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
5871 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
5872 seg = 0;
5873 fields = 0;
5874
5875 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
5876 int hlen = offset + iphl;
5877 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
5878
5879 if (__predict_false(m0->m_len <
5880 (hlen + sizeof(struct tcphdr)))) {
5881 /*
5882 * TCP/IP headers are not in the first mbuf; we need
5883 * to do this the slow and painful way. Let's just
5884 * hope this doesn't happen very often.
5885 */
5886 struct tcphdr th;
5887
5888 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
5889
5890 m_copydata(m0, hlen, sizeof(th), &th);
5891 if (v4) {
5892 struct ip ip;
5893
5894 m_copydata(m0, offset, sizeof(ip), &ip);
5895 ip.ip_len = 0;
5896 m_copyback(m0,
5897 offset + offsetof(struct ip, ip_len),
5898 sizeof(ip.ip_len), &ip.ip_len);
5899 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
5900 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
5901 } else {
5902 struct ip6_hdr ip6;
5903
5904 m_copydata(m0, offset, sizeof(ip6), &ip6);
5905 ip6.ip6_plen = 0;
5906 m_copyback(m0,
5907 offset + offsetof(struct ip6_hdr, ip6_plen),
5908 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
5909 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
5910 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
5911 }
5912 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
5913 sizeof(th.th_sum), &th.th_sum);
5914
5915 hlen += th.th_off << 2;
5916 } else {
5917 /*
5918 * TCP/IP headers are in the first mbuf; we can do
5919 * this the easy way.
5920 */
5921 struct tcphdr *th;
5922
5923 if (v4) {
5924 struct ip *ip =
5925 (void *)(mtod(m0, char *) + offset);
5926 th = (void *)(mtod(m0, char *) + hlen);
5927
5928 ip->ip_len = 0;
5929 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
5930 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
5931 } else {
5932 struct ip6_hdr *ip6 =
5933 (void *)(mtod(m0, char *) + offset);
5934 th = (void *)(mtod(m0, char *) + hlen);
5935
5936 ip6->ip6_plen = 0;
5937 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
5938 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
5939 }
5940 hlen += th->th_off << 2;
5941 }
5942
5943 if (v4) {
5944 WM_EVCNT_INCR(&sc->sc_ev_txtso);
5945 cmdlen |= WTX_TCPIP_CMD_IP;
5946 } else {
5947 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
5948 ipcse = 0;
5949 }
5950 cmd |= WTX_TCPIP_CMD_TSE;
5951 cmdlen |= WTX_TCPIP_CMD_TSE |
5952 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
5953 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
5954 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
5955 }
5956
5957 /*
5958 * NOTE: Even if we're not using the IP or TCP/UDP checksum
5959 * offload feature, if we load the context descriptor, we
5960 * MUST provide valid values for IPCSS and TUCSS fields.
5961 */
5962
5963 ipcs = WTX_TCPIP_IPCSS(offset) |
5964 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
5965 WTX_TCPIP_IPCSE(ipcse);
5966 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
5967 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
5968 fields |= WTX_IXSM;
5969 }
5970
5971 offset += iphl;
5972
5973 if (m0->m_pkthdr.csum_flags &
5974 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
5975 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5976 fields |= WTX_TXSM;
5977 tucs = WTX_TCPIP_TUCSS(offset) |
5978 WTX_TCPIP_TUCSO(offset +
5979 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
5980 WTX_TCPIP_TUCSE(0) /* rest of packet */;
5981 } else if ((m0->m_pkthdr.csum_flags &
5982 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
5983 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5984 fields |= WTX_TXSM;
5985 tucs = WTX_TCPIP_TUCSS(offset) |
5986 WTX_TCPIP_TUCSO(offset +
5987 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
5988 WTX_TCPIP_TUCSE(0) /* rest of packet */;
5989 } else {
5990 /* Just initialize it to a valid TCP context. */
5991 tucs = WTX_TCPIP_TUCSS(offset) |
5992 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
5993 WTX_TCPIP_TUCSE(0) /* rest of packet */;
5994 }
5995
5996 /* Fill in the context descriptor. */
5997 t = (struct livengood_tcpip_ctxdesc *)
5998 &txq->txq_descs[txq->txq_next];
5999 t->tcpip_ipcs = htole32(ipcs);
6000 t->tcpip_tucs = htole32(tucs);
6001 t->tcpip_cmdlen = htole32(cmdlen);
6002 t->tcpip_seg = htole32(seg);
6003 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6004
6005 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6006 txs->txs_ndesc++;
6007
6008 *cmdp = cmd;
6009 *fieldsp = fields;
6010
6011 return 0;
6012 }
6013
6014 /*
6015 * wm_start: [ifnet interface function]
6016 *
6017 * Start packet transmission on the interface.
6018 */
6019 static void
6020 wm_start(struct ifnet *ifp)
6021 {
6022 struct wm_softc *sc = ifp->if_softc;
6023 struct wm_txqueue *txq = &sc->sc_txq[0];
6024
6025 WM_TX_LOCK(txq);
6026 if (!sc->sc_stopping)
6027 wm_start_locked(ifp);
6028 WM_TX_UNLOCK(txq);
6029 }
6030
6031 static void
6032 wm_start_locked(struct ifnet *ifp)
6033 {
6034 struct wm_softc *sc = ifp->if_softc;
6035 struct wm_txqueue *txq = &sc->sc_txq[0];
6036 struct mbuf *m0;
6037 struct m_tag *mtag;
6038 struct wm_txsoft *txs;
6039 bus_dmamap_t dmamap;
6040 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
6041 bus_addr_t curaddr;
6042 bus_size_t seglen, curlen;
6043 uint32_t cksumcmd;
6044 uint8_t cksumfields;
6045
6046 KASSERT(WM_TX_LOCKED(txq));
6047
6048 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
6049 return;
6050
6051 /* Remember the previous number of free descriptors. */
6052 ofree = txq->txq_free;
6053
6054 /*
6055 * Loop through the send queue, setting up transmit descriptors
6056 * until we drain the queue, or use up all available transmit
6057 * descriptors.
6058 */
6059 for (;;) {
6060 m0 = NULL;
6061
6062 /* Get a work queue entry. */
6063 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6064 wm_txeof(sc);
6065 if (txq->txq_sfree == 0) {
6066 DPRINTF(WM_DEBUG_TX,
6067 ("%s: TX: no free job descriptors\n",
6068 device_xname(sc->sc_dev)));
6069 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
6070 break;
6071 }
6072 }
6073
6074 /* Grab a packet off the queue. */
6075 IFQ_DEQUEUE(&ifp->if_snd, m0);
6076 if (m0 == NULL)
6077 break;
6078
6079 DPRINTF(WM_DEBUG_TX,
6080 ("%s: TX: have packet to transmit: %p\n",
6081 device_xname(sc->sc_dev), m0));
6082
6083 txs = &txq->txq_soft[txq->txq_snext];
6084 dmamap = txs->txs_dmamap;
6085
6086 use_tso = (m0->m_pkthdr.csum_flags &
6087 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
6088
6089 /*
6090 * So says the Linux driver:
6091 * The controller does a simple calculation to make sure
6092 * there is enough room in the FIFO before initiating the
6093 * DMA for each buffer. The calc is:
6094 * 4 = ceil(buffer len / MSS)
6095 * To make sure we don't overrun the FIFO, adjust the max
6096 * buffer len if the MSS drops.
6097 */
6098 dmamap->dm_maxsegsz =
6099 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
6100 ? m0->m_pkthdr.segsz << 2
6101 : WTX_MAX_LEN;
6102
6103 /*
6104 * Load the DMA map. If this fails, the packet either
6105 * didn't fit in the allotted number of segments, or we
6106 * were short on resources. For the too-many-segments
6107 * case, we simply report an error and drop the packet,
6108 * since we can't sanely copy a jumbo packet to a single
6109 * buffer.
6110 */
6111 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6112 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
6113 if (error) {
6114 if (error == EFBIG) {
6115 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6116 log(LOG_ERR, "%s: Tx packet consumes too many "
6117 "DMA segments, dropping...\n",
6118 device_xname(sc->sc_dev));
6119 wm_dump_mbuf_chain(sc, m0);
6120 m_freem(m0);
6121 continue;
6122 }
6123 /* Short on resources, just stop for now. */
6124 DPRINTF(WM_DEBUG_TX,
6125 ("%s: TX: dmamap load failed: %d\n",
6126 device_xname(sc->sc_dev), error));
6127 break;
6128 }
6129
6130 segs_needed = dmamap->dm_nsegs;
6131 if (use_tso) {
6132 /* For sentinel descriptor; see below. */
6133 segs_needed++;
6134 }
6135
6136 /*
6137 * Ensure we have enough descriptors free to describe
6138 * the packet. Note, we always reserve one descriptor
6139 * at the end of the ring due to the semantics of the
6140 * TDT register, plus one more in the event we need
6141 * to load offload context.
6142 */
6143 if (segs_needed > txq->txq_free - 2) {
6144 /*
6145 * Not enough free descriptors to transmit this
6146 * packet. We haven't committed anything yet,
6147 * so just unload the DMA map, put the packet
6148 * pack on the queue, and punt. Notify the upper
6149 * layer that there are no more slots left.
6150 */
6151 DPRINTF(WM_DEBUG_TX,
6152 ("%s: TX: need %d (%d) descriptors, have %d\n",
6153 device_xname(sc->sc_dev), dmamap->dm_nsegs,
6154 segs_needed, txq->txq_free - 1));
6155 ifp->if_flags |= IFF_OACTIVE;
6156 bus_dmamap_unload(sc->sc_dmat, dmamap);
6157 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
6158 break;
6159 }
6160
6161 /*
6162 * Check for 82547 Tx FIFO bug. We need to do this
6163 * once we know we can transmit the packet, since we
6164 * do some internal FIFO space accounting here.
6165 */
6166 if (sc->sc_type == WM_T_82547 &&
6167 wm_82547_txfifo_bugchk(sc, m0)) {
6168 DPRINTF(WM_DEBUG_TX,
6169 ("%s: TX: 82547 Tx FIFO bug detected\n",
6170 device_xname(sc->sc_dev)));
6171 ifp->if_flags |= IFF_OACTIVE;
6172 bus_dmamap_unload(sc->sc_dmat, dmamap);
6173 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
6174 break;
6175 }
6176
6177 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6178
6179 DPRINTF(WM_DEBUG_TX,
6180 ("%s: TX: packet has %d (%d) DMA segments\n",
6181 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6182
6183 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
6184
6185 /*
6186 * Store a pointer to the packet so that we can free it
6187 * later.
6188 *
6189 * Initially, we consider the number of descriptors the
6190 * packet uses the number of DMA segments. This may be
6191 * incremented by 1 if we do checksum offload (a descriptor
6192 * is used to set the checksum context).
6193 */
6194 txs->txs_mbuf = m0;
6195 txs->txs_firstdesc = txq->txq_next;
6196 txs->txs_ndesc = segs_needed;
6197
6198 /* Set up offload parameters for this packet. */
6199 if (m0->m_pkthdr.csum_flags &
6200 (M_CSUM_TSOv4|M_CSUM_TSOv6|
6201 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
6202 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
6203 if (wm_tx_offload(sc, txs, &cksumcmd,
6204 &cksumfields) != 0) {
6205 /* Error message already displayed. */
6206 bus_dmamap_unload(sc->sc_dmat, dmamap);
6207 continue;
6208 }
6209 } else {
6210 cksumcmd = 0;
6211 cksumfields = 0;
6212 }
6213
6214 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
6215
6216 /* Sync the DMA map. */
6217 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6218 BUS_DMASYNC_PREWRITE);
6219
6220 /* Initialize the transmit descriptor. */
6221 for (nexttx = txq->txq_next, seg = 0;
6222 seg < dmamap->dm_nsegs; seg++) {
6223 for (seglen = dmamap->dm_segs[seg].ds_len,
6224 curaddr = dmamap->dm_segs[seg].ds_addr;
6225 seglen != 0;
6226 curaddr += curlen, seglen -= curlen,
6227 nexttx = WM_NEXTTX(txq, nexttx)) {
6228 curlen = seglen;
6229
6230 /*
6231 * So says the Linux driver:
6232 * Work around for premature descriptor
6233 * write-backs in TSO mode. Append a
6234 * 4-byte sentinel descriptor.
6235 */
6236 if (use_tso &&
6237 seg == dmamap->dm_nsegs - 1 &&
6238 curlen > 8)
6239 curlen -= 4;
6240
6241 wm_set_dma_addr(
6242 &txq->txq_descs[nexttx].wtx_addr,
6243 curaddr);
6244 txq->txq_descs[nexttx].wtx_cmdlen =
6245 htole32(cksumcmd | curlen);
6246 txq->txq_descs[nexttx].wtx_fields.wtxu_status =
6247 0;
6248 txq->txq_descs[nexttx].wtx_fields.wtxu_options =
6249 cksumfields;
6250 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan = 0;
6251 lasttx = nexttx;
6252
6253 DPRINTF(WM_DEBUG_TX,
6254 ("%s: TX: desc %d: low %#" PRIx64 ", "
6255 "len %#04zx\n",
6256 device_xname(sc->sc_dev), nexttx,
6257 (uint64_t)curaddr, curlen));
6258 }
6259 }
6260
6261 KASSERT(lasttx != -1);
6262
6263 /*
6264 * Set up the command byte on the last descriptor of
6265 * the packet. If we're in the interrupt delay window,
6266 * delay the interrupt.
6267 */
6268 txq->txq_descs[lasttx].wtx_cmdlen |=
6269 htole32(WTX_CMD_EOP | WTX_CMD_RS);
6270
6271 /*
6272 * If VLANs are enabled and the packet has a VLAN tag, set
6273 * up the descriptor to encapsulate the packet for us.
6274 *
6275 * This is only valid on the last descriptor of the packet.
6276 */
6277 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6278 txq->txq_descs[lasttx].wtx_cmdlen |=
6279 htole32(WTX_CMD_VLE);
6280 txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
6281 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6282 }
6283
6284 txs->txs_lastdesc = lasttx;
6285
6286 DPRINTF(WM_DEBUG_TX,
6287 ("%s: TX: desc %d: cmdlen 0x%08x\n",
6288 device_xname(sc->sc_dev),
6289 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
6290
6291 /* Sync the descriptors we're using. */
6292 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
6293 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
6294
6295 /* Give the packet to the chip. */
6296 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
6297
6298 DPRINTF(WM_DEBUG_TX,
6299 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6300
6301 DPRINTF(WM_DEBUG_TX,
6302 ("%s: TX: finished transmitting packet, job %d\n",
6303 device_xname(sc->sc_dev), txq->txq_snext));
6304
6305 /* Advance the tx pointer. */
6306 txq->txq_free -= txs->txs_ndesc;
6307 txq->txq_next = nexttx;
6308
6309 txq->txq_sfree--;
6310 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
6311
6312 /* Pass the packet to any BPF listeners. */
6313 bpf_mtap(ifp, m0);
6314 }
6315
6316 if (m0 != NULL) {
6317 ifp->if_flags |= IFF_OACTIVE;
6318 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6319 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
6320 m_freem(m0);
6321 }
6322
6323 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
6324 /* No more slots; notify upper layer. */
6325 ifp->if_flags |= IFF_OACTIVE;
6326 }
6327
6328 if (txq->txq_free != ofree) {
6329 /* Set a watchdog timer in case the chip flakes out. */
6330 ifp->if_timer = 5;
6331 }
6332 }
6333
6334 /*
6335 * wm_nq_tx_offload:
6336 *
6337 * Set up TCP/IP checksumming parameters for the
6338 * specified packet, for NEWQUEUE devices
6339 */
6340 static int
6341 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
6342 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
6343 {
6344 struct wm_txqueue *txq = &sc->sc_txq[0];
6345 struct mbuf *m0 = txs->txs_mbuf;
6346 struct m_tag *mtag;
6347 uint32_t vl_len, mssidx, cmdc;
6348 struct ether_header *eh;
6349 int offset, iphl;
6350
6351 /*
6352 * XXX It would be nice if the mbuf pkthdr had offset
6353 * fields for the protocol headers.
6354 */
6355 *cmdlenp = 0;
6356 *fieldsp = 0;
6357
6358 eh = mtod(m0, struct ether_header *);
6359 switch (htons(eh->ether_type)) {
6360 case ETHERTYPE_IP:
6361 case ETHERTYPE_IPV6:
6362 offset = ETHER_HDR_LEN;
6363 break;
6364
6365 case ETHERTYPE_VLAN:
6366 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6367 break;
6368
6369 default:
6370 /* Don't support this protocol or encapsulation. */
6371 *do_csum = false;
6372 return 0;
6373 }
6374 *do_csum = true;
6375 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
6376 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
6377
6378 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
6379 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
6380
6381 if ((m0->m_pkthdr.csum_flags &
6382 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
6383 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6384 } else {
6385 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
6386 }
6387 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
6388 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
6389
6390 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6391 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
6392 << NQTXC_VLLEN_VLAN_SHIFT);
6393 *cmdlenp |= NQTX_CMD_VLE;
6394 }
6395
6396 mssidx = 0;
6397
6398 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6399 int hlen = offset + iphl;
6400 int tcp_hlen;
6401 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6402
6403 if (__predict_false(m0->m_len <
6404 (hlen + sizeof(struct tcphdr)))) {
6405 /*
6406 * TCP/IP headers are not in the first mbuf; we need
6407 * to do this the slow and painful way. Let's just
6408 * hope this doesn't happen very often.
6409 */
6410 struct tcphdr th;
6411
6412 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
6413
6414 m_copydata(m0, hlen, sizeof(th), &th);
6415 if (v4) {
6416 struct ip ip;
6417
6418 m_copydata(m0, offset, sizeof(ip), &ip);
6419 ip.ip_len = 0;
6420 m_copyback(m0,
6421 offset + offsetof(struct ip, ip_len),
6422 sizeof(ip.ip_len), &ip.ip_len);
6423 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6424 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6425 } else {
6426 struct ip6_hdr ip6;
6427
6428 m_copydata(m0, offset, sizeof(ip6), &ip6);
6429 ip6.ip6_plen = 0;
6430 m_copyback(m0,
6431 offset + offsetof(struct ip6_hdr, ip6_plen),
6432 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6433 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6434 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6435 }
6436 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6437 sizeof(th.th_sum), &th.th_sum);
6438
6439 tcp_hlen = th.th_off << 2;
6440 } else {
6441 /*
6442 * TCP/IP headers are in the first mbuf; we can do
6443 * this the easy way.
6444 */
6445 struct tcphdr *th;
6446
6447 if (v4) {
6448 struct ip *ip =
6449 (void *)(mtod(m0, char *) + offset);
6450 th = (void *)(mtod(m0, char *) + hlen);
6451
6452 ip->ip_len = 0;
6453 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6454 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6455 } else {
6456 struct ip6_hdr *ip6 =
6457 (void *)(mtod(m0, char *) + offset);
6458 th = (void *)(mtod(m0, char *) + hlen);
6459
6460 ip6->ip6_plen = 0;
6461 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6462 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6463 }
6464 tcp_hlen = th->th_off << 2;
6465 }
6466 hlen += tcp_hlen;
6467 *cmdlenp |= NQTX_CMD_TSE;
6468
6469 if (v4) {
6470 WM_EVCNT_INCR(&sc->sc_ev_txtso);
6471 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
6472 } else {
6473 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
6474 *fieldsp |= NQTXD_FIELDS_TUXSM;
6475 }
6476 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
6477 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6478 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
6479 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
6480 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
6481 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
6482 } else {
6483 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
6484 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6485 }
6486
6487 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
6488 *fieldsp |= NQTXD_FIELDS_IXSM;
6489 cmdc |= NQTXC_CMD_IP4;
6490 }
6491
6492 if (m0->m_pkthdr.csum_flags &
6493 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6494 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
6495 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6496 cmdc |= NQTXC_CMD_TCP;
6497 } else {
6498 cmdc |= NQTXC_CMD_UDP;
6499 }
6500 cmdc |= NQTXC_CMD_IP4;
6501 *fieldsp |= NQTXD_FIELDS_TUXSM;
6502 }
6503 if (m0->m_pkthdr.csum_flags &
6504 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6505 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
6506 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6507 cmdc |= NQTXC_CMD_TCP;
6508 } else {
6509 cmdc |= NQTXC_CMD_UDP;
6510 }
6511 cmdc |= NQTXC_CMD_IP6;
6512 *fieldsp |= NQTXD_FIELDS_TUXSM;
6513 }
6514
6515 /* Fill in the context descriptor. */
6516 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
6517 htole32(vl_len);
6518 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
6519 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
6520 htole32(cmdc);
6521 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
6522 htole32(mssidx);
6523 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6524 DPRINTF(WM_DEBUG_TX,
6525 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
6526 txq->txq_next, 0, vl_len));
6527 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
6528 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6529 txs->txs_ndesc++;
6530 return 0;
6531 }
6532
6533 /*
6534 * wm_nq_start: [ifnet interface function]
6535 *
6536 * Start packet transmission on the interface for NEWQUEUE devices
6537 */
6538 static void
6539 wm_nq_start(struct ifnet *ifp)
6540 {
6541 struct wm_softc *sc = ifp->if_softc;
6542 struct wm_txqueue *txq = &sc->sc_txq[0];
6543
6544 WM_TX_LOCK(txq);
6545 if (!sc->sc_stopping)
6546 wm_nq_start_locked(ifp);
6547 WM_TX_UNLOCK(txq);
6548 }
6549
6550 static void
6551 wm_nq_start_locked(struct ifnet *ifp)
6552 {
6553 struct wm_softc *sc = ifp->if_softc;
6554 struct wm_txqueue *txq = &sc->sc_txq[0];
6555 struct mbuf *m0;
6556 struct m_tag *mtag;
6557 struct wm_txsoft *txs;
6558 bus_dmamap_t dmamap;
6559 int error, nexttx, lasttx = -1, seg, segs_needed;
6560 bool do_csum, sent;
6561
6562 KASSERT(WM_TX_LOCKED(txq));
6563
6564 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
6565 return;
6566
6567 sent = false;
6568
6569 /*
6570 * Loop through the send queue, setting up transmit descriptors
6571 * until we drain the queue, or use up all available transmit
6572 * descriptors.
6573 */
6574 for (;;) {
6575 m0 = NULL;
6576
6577 /* Get a work queue entry. */
6578 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6579 wm_txeof(sc);
6580 if (txq->txq_sfree == 0) {
6581 DPRINTF(WM_DEBUG_TX,
6582 ("%s: TX: no free job descriptors\n",
6583 device_xname(sc->sc_dev)));
6584 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
6585 break;
6586 }
6587 }
6588
6589 /* Grab a packet off the queue. */
6590 IFQ_DEQUEUE(&ifp->if_snd, m0);
6591 if (m0 == NULL)
6592 break;
6593
6594 DPRINTF(WM_DEBUG_TX,
6595 ("%s: TX: have packet to transmit: %p\n",
6596 device_xname(sc->sc_dev), m0));
6597
6598 txs = &txq->txq_soft[txq->txq_snext];
6599 dmamap = txs->txs_dmamap;
6600
6601 /*
6602 * Load the DMA map. If this fails, the packet either
6603 * didn't fit in the allotted number of segments, or we
6604 * were short on resources. For the too-many-segments
6605 * case, we simply report an error and drop the packet,
6606 * since we can't sanely copy a jumbo packet to a single
6607 * buffer.
6608 */
6609 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6610 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
6611 if (error) {
6612 if (error == EFBIG) {
6613 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6614 log(LOG_ERR, "%s: Tx packet consumes too many "
6615 "DMA segments, dropping...\n",
6616 device_xname(sc->sc_dev));
6617 wm_dump_mbuf_chain(sc, m0);
6618 m_freem(m0);
6619 continue;
6620 }
6621 /* Short on resources, just stop for now. */
6622 DPRINTF(WM_DEBUG_TX,
6623 ("%s: TX: dmamap load failed: %d\n",
6624 device_xname(sc->sc_dev), error));
6625 break;
6626 }
6627
6628 segs_needed = dmamap->dm_nsegs;
6629
6630 /*
6631 * Ensure we have enough descriptors free to describe
6632 * the packet. Note, we always reserve one descriptor
6633 * at the end of the ring due to the semantics of the
6634 * TDT register, plus one more in the event we need
6635 * to load offload context.
6636 */
6637 if (segs_needed > txq->txq_free - 2) {
6638 /*
6639 * Not enough free descriptors to transmit this
6640 * packet. We haven't committed anything yet,
6641 * so just unload the DMA map, put the packet
6642 * pack on the queue, and punt. Notify the upper
6643 * layer that there are no more slots left.
6644 */
6645 DPRINTF(WM_DEBUG_TX,
6646 ("%s: TX: need %d (%d) descriptors, have %d\n",
6647 device_xname(sc->sc_dev), dmamap->dm_nsegs,
6648 segs_needed, txq->txq_free - 1));
6649 ifp->if_flags |= IFF_OACTIVE;
6650 bus_dmamap_unload(sc->sc_dmat, dmamap);
6651 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
6652 break;
6653 }
6654
6655 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6656
6657 DPRINTF(WM_DEBUG_TX,
6658 ("%s: TX: packet has %d (%d) DMA segments\n",
6659 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6660
6661 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
6662
6663 /*
6664 * Store a pointer to the packet so that we can free it
6665 * later.
6666 *
6667 * Initially, we consider the number of descriptors the
6668 * packet uses the number of DMA segments. This may be
6669 * incremented by 1 if we do checksum offload (a descriptor
6670 * is used to set the checksum context).
6671 */
6672 txs->txs_mbuf = m0;
6673 txs->txs_firstdesc = txq->txq_next;
6674 txs->txs_ndesc = segs_needed;
6675
6676 /* Set up offload parameters for this packet. */
6677 uint32_t cmdlen, fields, dcmdlen;
6678 if (m0->m_pkthdr.csum_flags &
6679 (M_CSUM_TSOv4|M_CSUM_TSOv6|
6680 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
6681 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
6682 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
6683 &do_csum) != 0) {
6684 /* Error message already displayed. */
6685 bus_dmamap_unload(sc->sc_dmat, dmamap);
6686 continue;
6687 }
6688 } else {
6689 do_csum = false;
6690 cmdlen = 0;
6691 fields = 0;
6692 }
6693
6694 /* Sync the DMA map. */
6695 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6696 BUS_DMASYNC_PREWRITE);
6697
6698 /* Initialize the first transmit descriptor. */
6699 nexttx = txq->txq_next;
6700 if (!do_csum) {
6701 /* setup a legacy descriptor */
6702 wm_set_dma_addr(
6703 &txq->txq_descs[nexttx].wtx_addr,
6704 dmamap->dm_segs[0].ds_addr);
6705 txq->txq_descs[nexttx].wtx_cmdlen =
6706 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
6707 txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
6708 txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
6709 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
6710 NULL) {
6711 txq->txq_descs[nexttx].wtx_cmdlen |=
6712 htole32(WTX_CMD_VLE);
6713 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
6714 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6715 } else {
6716 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
6717 }
6718 dcmdlen = 0;
6719 } else {
6720 /* setup an advanced data descriptor */
6721 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
6722 htole64(dmamap->dm_segs[0].ds_addr);
6723 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
6724 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
6725 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
6726 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
6727 htole32(fields);
6728 DPRINTF(WM_DEBUG_TX,
6729 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
6730 device_xname(sc->sc_dev), nexttx,
6731 (uint64_t)dmamap->dm_segs[0].ds_addr));
6732 DPRINTF(WM_DEBUG_TX,
6733 ("\t 0x%08x%08x\n", fields,
6734 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
6735 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
6736 }
6737
6738 lasttx = nexttx;
6739 nexttx = WM_NEXTTX(txq, nexttx);
6740 /*
6741 * fill in the next descriptors. legacy or adcanced format
6742 * is the same here
6743 */
6744 for (seg = 1; seg < dmamap->dm_nsegs;
6745 seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
6746 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
6747 htole64(dmamap->dm_segs[seg].ds_addr);
6748 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
6749 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
6750 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
6751 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
6752 lasttx = nexttx;
6753
6754 DPRINTF(WM_DEBUG_TX,
6755 ("%s: TX: desc %d: %#" PRIx64 ", "
6756 "len %#04zx\n",
6757 device_xname(sc->sc_dev), nexttx,
6758 (uint64_t)dmamap->dm_segs[seg].ds_addr,
6759 dmamap->dm_segs[seg].ds_len));
6760 }
6761
6762 KASSERT(lasttx != -1);
6763
6764 /*
6765 * Set up the command byte on the last descriptor of
6766 * the packet. If we're in the interrupt delay window,
6767 * delay the interrupt.
6768 */
6769 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
6770 (NQTX_CMD_EOP | NQTX_CMD_RS));
6771 txq->txq_descs[lasttx].wtx_cmdlen |=
6772 htole32(WTX_CMD_EOP | WTX_CMD_RS);
6773
6774 txs->txs_lastdesc = lasttx;
6775
6776 DPRINTF(WM_DEBUG_TX,
6777 ("%s: TX: desc %d: cmdlen 0x%08x\n",
6778 device_xname(sc->sc_dev),
6779 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
6780
6781 /* Sync the descriptors we're using. */
6782 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
6783 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
6784
6785 /* Give the packet to the chip. */
6786 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
6787 sent = true;
6788
6789 DPRINTF(WM_DEBUG_TX,
6790 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6791
6792 DPRINTF(WM_DEBUG_TX,
6793 ("%s: TX: finished transmitting packet, job %d\n",
6794 device_xname(sc->sc_dev), txq->txq_snext));
6795
6796 /* Advance the tx pointer. */
6797 txq->txq_free -= txs->txs_ndesc;
6798 txq->txq_next = nexttx;
6799
6800 txq->txq_sfree--;
6801 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
6802
6803 /* Pass the packet to any BPF listeners. */
6804 bpf_mtap(ifp, m0);
6805 }
6806
6807 if (m0 != NULL) {
6808 ifp->if_flags |= IFF_OACTIVE;
6809 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6810 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
6811 m_freem(m0);
6812 }
6813
6814 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
6815 /* No more slots; notify upper layer. */
6816 ifp->if_flags |= IFF_OACTIVE;
6817 }
6818
6819 if (sent) {
6820 /* Set a watchdog timer in case the chip flakes out. */
6821 ifp->if_timer = 5;
6822 }
6823 }
6824
6825 /* Interrupt */
6826
6827 /*
6828 * wm_txeof:
6829 *
6830 * Helper; handle transmit interrupts.
6831 */
6832 static int
6833 wm_txeof(struct wm_softc *sc)
6834 {
6835 struct wm_txqueue *txq = &sc->sc_txq[0];
6836 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6837 struct wm_txsoft *txs;
6838 bool processed = false;
6839 int count = 0;
6840 int i;
6841 uint8_t status;
6842
6843 if (sc->sc_stopping)
6844 return 0;
6845
6846 ifp->if_flags &= ~IFF_OACTIVE;
6847
6848 /*
6849 * Go through the Tx list and free mbufs for those
6850 * frames which have been transmitted.
6851 */
6852 for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
6853 i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
6854 txs = &txq->txq_soft[i];
6855
6856 DPRINTF(WM_DEBUG_TX,
6857 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
6858
6859 wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
6860 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
6861
6862 status =
6863 txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
6864 if ((status & WTX_ST_DD) == 0) {
6865 wm_cdtxsync(txq, txs->txs_lastdesc, 1,
6866 BUS_DMASYNC_PREREAD);
6867 break;
6868 }
6869
6870 processed = true;
6871 count++;
6872 DPRINTF(WM_DEBUG_TX,
6873 ("%s: TX: job %d done: descs %d..%d\n",
6874 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
6875 txs->txs_lastdesc));
6876
6877 /*
6878 * XXX We should probably be using the statistics
6879 * XXX registers, but I don't know if they exist
6880 * XXX on chips before the i82544.
6881 */
6882
6883 #ifdef WM_EVENT_COUNTERS
6884 if (status & WTX_ST_TU)
6885 WM_EVCNT_INCR(&sc->sc_ev_tu);
6886 #endif /* WM_EVENT_COUNTERS */
6887
6888 if (status & (WTX_ST_EC|WTX_ST_LC)) {
6889 ifp->if_oerrors++;
6890 if (status & WTX_ST_LC)
6891 log(LOG_WARNING, "%s: late collision\n",
6892 device_xname(sc->sc_dev));
6893 else if (status & WTX_ST_EC) {
6894 ifp->if_collisions += 16;
6895 log(LOG_WARNING, "%s: excessive collisions\n",
6896 device_xname(sc->sc_dev));
6897 }
6898 } else
6899 ifp->if_opackets++;
6900
6901 txq->txq_free += txs->txs_ndesc;
6902 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
6903 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
6904 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
6905 m_freem(txs->txs_mbuf);
6906 txs->txs_mbuf = NULL;
6907 }
6908
6909 /* Update the dirty transmit buffer pointer. */
6910 txq->txq_sdirty = i;
6911 DPRINTF(WM_DEBUG_TX,
6912 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
6913
6914 if (count != 0)
6915 rnd_add_uint32(&sc->rnd_source, count);
6916
6917 /*
6918 * If there are no more pending transmissions, cancel the watchdog
6919 * timer.
6920 */
6921 if (txq->txq_sfree == WM_TXQUEUELEN(txq))
6922 ifp->if_timer = 0;
6923
6924 return processed;
6925 }
6926
6927 /*
6928 * wm_rxeof:
6929 *
6930 * Helper; handle receive interrupts.
6931 */
6932 static void
6933 wm_rxeof(struct wm_rxqueue *rxq)
6934 {
6935 struct wm_softc *sc = rxq->rxq_sc;
6936 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6937 struct wm_rxsoft *rxs;
6938 struct mbuf *m;
6939 int i, len;
6940 int count = 0;
6941 uint8_t status, errors;
6942 uint16_t vlantag;
6943
6944 for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
6945 rxs = &rxq->rxq_soft[i];
6946
6947 DPRINTF(WM_DEBUG_RX,
6948 ("%s: RX: checking descriptor %d\n",
6949 device_xname(sc->sc_dev), i));
6950
6951 wm_cdrxsync(rxq, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
6952
6953 status = rxq->rxq_descs[i].wrx_status;
6954 errors = rxq->rxq_descs[i].wrx_errors;
6955 len = le16toh(rxq->rxq_descs[i].wrx_len);
6956 vlantag = rxq->rxq_descs[i].wrx_special;
6957
6958 if ((status & WRX_ST_DD) == 0) {
6959 /* We have processed all of the receive descriptors. */
6960 wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
6961 break;
6962 }
6963
6964 count++;
6965 if (__predict_false(rxq->rxq_discard)) {
6966 DPRINTF(WM_DEBUG_RX,
6967 ("%s: RX: discarding contents of descriptor %d\n",
6968 device_xname(sc->sc_dev), i));
6969 wm_init_rxdesc(rxq, i);
6970 if (status & WRX_ST_EOP) {
6971 /* Reset our state. */
6972 DPRINTF(WM_DEBUG_RX,
6973 ("%s: RX: resetting rxdiscard -> 0\n",
6974 device_xname(sc->sc_dev)));
6975 rxq->rxq_discard = 0;
6976 }
6977 continue;
6978 }
6979
6980 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
6981 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
6982
6983 m = rxs->rxs_mbuf;
6984
6985 /*
6986 * Add a new receive buffer to the ring, unless of
6987 * course the length is zero. Treat the latter as a
6988 * failed mapping.
6989 */
6990 if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
6991 /*
6992 * Failed, throw away what we've done so
6993 * far, and discard the rest of the packet.
6994 */
6995 ifp->if_ierrors++;
6996 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
6997 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
6998 wm_init_rxdesc(rxq, i);
6999 if ((status & WRX_ST_EOP) == 0)
7000 rxq->rxq_discard = 1;
7001 if (rxq->rxq_head != NULL)
7002 m_freem(rxq->rxq_head);
7003 WM_RXCHAIN_RESET(rxq);
7004 DPRINTF(WM_DEBUG_RX,
7005 ("%s: RX: Rx buffer allocation failed, "
7006 "dropping packet%s\n", device_xname(sc->sc_dev),
7007 rxq->rxq_discard ? " (discard)" : ""));
7008 continue;
7009 }
7010
7011 m->m_len = len;
7012 rxq->rxq_len += len;
7013 DPRINTF(WM_DEBUG_RX,
7014 ("%s: RX: buffer at %p len %d\n",
7015 device_xname(sc->sc_dev), m->m_data, len));
7016
7017 /* If this is not the end of the packet, keep looking. */
7018 if ((status & WRX_ST_EOP) == 0) {
7019 WM_RXCHAIN_LINK(rxq, m);
7020 DPRINTF(WM_DEBUG_RX,
7021 ("%s: RX: not yet EOP, rxlen -> %d\n",
7022 device_xname(sc->sc_dev), rxq->rxq_len));
7023 continue;
7024 }
7025
7026 /*
7027 * Okay, we have the entire packet now. The chip is
7028 * configured to include the FCS except I350 and I21[01]
7029 * (not all chips can be configured to strip it),
7030 * so we need to trim it.
7031 * May need to adjust length of previous mbuf in the
7032 * chain if the current mbuf is too short.
7033 * For an eratta, the RCTL_SECRC bit in RCTL register
7034 * is always set in I350, so we don't trim it.
7035 */
7036 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
7037 && (sc->sc_type != WM_T_I210)
7038 && (sc->sc_type != WM_T_I211)) {
7039 if (m->m_len < ETHER_CRC_LEN) {
7040 rxq->rxq_tail->m_len
7041 -= (ETHER_CRC_LEN - m->m_len);
7042 m->m_len = 0;
7043 } else
7044 m->m_len -= ETHER_CRC_LEN;
7045 len = rxq->rxq_len - ETHER_CRC_LEN;
7046 } else
7047 len = rxq->rxq_len;
7048
7049 WM_RXCHAIN_LINK(rxq, m);
7050
7051 *rxq->rxq_tailp = NULL;
7052 m = rxq->rxq_head;
7053
7054 WM_RXCHAIN_RESET(rxq);
7055
7056 DPRINTF(WM_DEBUG_RX,
7057 ("%s: RX: have entire packet, len -> %d\n",
7058 device_xname(sc->sc_dev), len));
7059
7060 /* If an error occurred, update stats and drop the packet. */
7061 if (errors &
7062 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
7063 if (errors & WRX_ER_SE)
7064 log(LOG_WARNING, "%s: symbol error\n",
7065 device_xname(sc->sc_dev));
7066 else if (errors & WRX_ER_SEQ)
7067 log(LOG_WARNING, "%s: receive sequence error\n",
7068 device_xname(sc->sc_dev));
7069 else if (errors & WRX_ER_CE)
7070 log(LOG_WARNING, "%s: CRC error\n",
7071 device_xname(sc->sc_dev));
7072 m_freem(m);
7073 continue;
7074 }
7075
7076 /* No errors. Receive the packet. */
7077 m->m_pkthdr.rcvif = ifp;
7078 m->m_pkthdr.len = len;
7079
7080 /*
7081 * If VLANs are enabled, VLAN packets have been unwrapped
7082 * for us. Associate the tag with the packet.
7083 */
7084 /* XXXX should check for i350 and i354 */
7085 if ((status & WRX_ST_VP) != 0) {
7086 VLAN_INPUT_TAG(ifp, m,
7087 le16toh(vlantag),
7088 continue);
7089 }
7090
7091 /* Set up checksum info for this packet. */
7092 if ((status & WRX_ST_IXSM) == 0) {
7093 if (status & WRX_ST_IPCS) {
7094 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
7095 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
7096 if (errors & WRX_ER_IPE)
7097 m->m_pkthdr.csum_flags |=
7098 M_CSUM_IPv4_BAD;
7099 }
7100 if (status & WRX_ST_TCPCS) {
7101 /*
7102 * Note: we don't know if this was TCP or UDP,
7103 * so we just set both bits, and expect the
7104 * upper layers to deal.
7105 */
7106 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
7107 m->m_pkthdr.csum_flags |=
7108 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7109 M_CSUM_TCPv6 | M_CSUM_UDPv6;
7110 if (errors & WRX_ER_TCPE)
7111 m->m_pkthdr.csum_flags |=
7112 M_CSUM_TCP_UDP_BAD;
7113 }
7114 }
7115
7116 ifp->if_ipackets++;
7117
7118 WM_RX_UNLOCK(rxq);
7119
7120 /* Pass this up to any BPF listeners. */
7121 bpf_mtap(ifp, m);
7122
7123 /* Pass it on. */
7124 (*ifp->if_input)(ifp, m);
7125
7126 WM_RX_LOCK(rxq);
7127
7128 if (sc->sc_stopping)
7129 break;
7130 }
7131
7132 /* Update the receive pointer. */
7133 rxq->rxq_ptr = i;
7134 if (count != 0)
7135 rnd_add_uint32(&sc->rnd_source, count);
7136
7137 DPRINTF(WM_DEBUG_RX,
7138 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
7139 }
7140
7141 /*
7142 * wm_linkintr_gmii:
7143 *
7144 * Helper; handle link interrupts for GMII.
7145 */
7146 static void
7147 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
7148 {
7149
7150 KASSERT(WM_CORE_LOCKED(sc));
7151
7152 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7153 __func__));
7154
7155 if (icr & ICR_LSC) {
7156 uint32_t status = CSR_READ(sc, WMREG_STATUS);
7157
7158 if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
7159 wm_gig_downshift_workaround_ich8lan(sc);
7160
7161 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
7162 device_xname(sc->sc_dev)));
7163 mii_pollstat(&sc->sc_mii);
7164 if (sc->sc_type == WM_T_82543) {
7165 int miistatus, active;
7166
7167 /*
7168 * With 82543, we need to force speed and
7169 * duplex on the MAC equal to what the PHY
7170 * speed and duplex configuration is.
7171 */
7172 miistatus = sc->sc_mii.mii_media_status;
7173
7174 if (miistatus & IFM_ACTIVE) {
7175 active = sc->sc_mii.mii_media_active;
7176 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
7177 switch (IFM_SUBTYPE(active)) {
7178 case IFM_10_T:
7179 sc->sc_ctrl |= CTRL_SPEED_10;
7180 break;
7181 case IFM_100_TX:
7182 sc->sc_ctrl |= CTRL_SPEED_100;
7183 break;
7184 case IFM_1000_T:
7185 sc->sc_ctrl |= CTRL_SPEED_1000;
7186 break;
7187 default:
7188 /*
7189 * fiber?
7190 * Shoud not enter here.
7191 */
7192 printf("unknown media (%x)\n",
7193 active);
7194 break;
7195 }
7196 if (active & IFM_FDX)
7197 sc->sc_ctrl |= CTRL_FD;
7198 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7199 }
7200 } else if ((sc->sc_type == WM_T_ICH8)
7201 && (sc->sc_phytype == WMPHY_IGP_3)) {
7202 wm_kmrn_lock_loss_workaround_ich8lan(sc);
7203 } else if (sc->sc_type == WM_T_PCH) {
7204 wm_k1_gig_workaround_hv(sc,
7205 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
7206 }
7207
7208 if ((sc->sc_phytype == WMPHY_82578)
7209 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
7210 == IFM_1000_T)) {
7211
7212 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
7213 delay(200*1000); /* XXX too big */
7214
7215 /* Link stall fix for link up */
7216 wm_gmii_hv_writereg(sc->sc_dev, 1,
7217 HV_MUX_DATA_CTRL,
7218 HV_MUX_DATA_CTRL_GEN_TO_MAC
7219 | HV_MUX_DATA_CTRL_FORCE_SPEED);
7220 wm_gmii_hv_writereg(sc->sc_dev, 1,
7221 HV_MUX_DATA_CTRL,
7222 HV_MUX_DATA_CTRL_GEN_TO_MAC);
7223 }
7224 }
7225 } else if (icr & ICR_RXSEQ) {
7226 DPRINTF(WM_DEBUG_LINK,
7227 ("%s: LINK Receive sequence error\n",
7228 device_xname(sc->sc_dev)));
7229 }
7230 }
7231
7232 /*
7233 * wm_linkintr_tbi:
7234 *
7235 * Helper; handle link interrupts for TBI mode.
7236 */
7237 static void
7238 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
7239 {
7240 uint32_t status;
7241
7242 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7243 __func__));
7244
7245 status = CSR_READ(sc, WMREG_STATUS);
7246 if (icr & ICR_LSC) {
7247 if (status & STATUS_LU) {
7248 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
7249 device_xname(sc->sc_dev),
7250 (status & STATUS_FD) ? "FDX" : "HDX"));
7251 /*
7252 * NOTE: CTRL will update TFCE and RFCE automatically,
7253 * so we should update sc->sc_ctrl
7254 */
7255
7256 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7257 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7258 sc->sc_fcrtl &= ~FCRTL_XONE;
7259 if (status & STATUS_FD)
7260 sc->sc_tctl |=
7261 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7262 else
7263 sc->sc_tctl |=
7264 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7265 if (sc->sc_ctrl & CTRL_TFCE)
7266 sc->sc_fcrtl |= FCRTL_XONE;
7267 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7268 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7269 WMREG_OLD_FCRTL : WMREG_FCRTL,
7270 sc->sc_fcrtl);
7271 sc->sc_tbi_linkup = 1;
7272 } else {
7273 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
7274 device_xname(sc->sc_dev)));
7275 sc->sc_tbi_linkup = 0;
7276 }
7277 /* Update LED */
7278 wm_tbi_serdes_set_linkled(sc);
7279 } else if (icr & ICR_RXSEQ) {
7280 DPRINTF(WM_DEBUG_LINK,
7281 ("%s: LINK: Receive sequence error\n",
7282 device_xname(sc->sc_dev)));
7283 }
7284 }
7285
7286 /*
7287 * wm_linkintr_serdes:
7288 *
7289 * Helper; handle link interrupts for TBI mode.
7290 */
7291 static void
7292 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
7293 {
7294 struct mii_data *mii = &sc->sc_mii;
7295 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7296 uint32_t pcs_adv, pcs_lpab, reg;
7297
7298 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7299 __func__));
7300
7301 if (icr & ICR_LSC) {
7302 /* Check PCS */
7303 reg = CSR_READ(sc, WMREG_PCS_LSTS);
7304 if ((reg & PCS_LSTS_LINKOK) != 0) {
7305 mii->mii_media_status |= IFM_ACTIVE;
7306 sc->sc_tbi_linkup = 1;
7307 } else {
7308 mii->mii_media_status |= IFM_NONE;
7309 sc->sc_tbi_linkup = 0;
7310 wm_tbi_serdes_set_linkled(sc);
7311 return;
7312 }
7313 mii->mii_media_active |= IFM_1000_SX;
7314 if ((reg & PCS_LSTS_FDX) != 0)
7315 mii->mii_media_active |= IFM_FDX;
7316 else
7317 mii->mii_media_active |= IFM_HDX;
7318 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7319 /* Check flow */
7320 reg = CSR_READ(sc, WMREG_PCS_LSTS);
7321 if ((reg & PCS_LSTS_AN_COMP) == 0) {
7322 DPRINTF(WM_DEBUG_LINK,
7323 ("XXX LINKOK but not ACOMP\n"));
7324 return;
7325 }
7326 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
7327 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
7328 DPRINTF(WM_DEBUG_LINK,
7329 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
7330 if ((pcs_adv & TXCW_SYM_PAUSE)
7331 && (pcs_lpab & TXCW_SYM_PAUSE)) {
7332 mii->mii_media_active |= IFM_FLOW
7333 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
7334 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
7335 && (pcs_adv & TXCW_ASYM_PAUSE)
7336 && (pcs_lpab & TXCW_SYM_PAUSE)
7337 && (pcs_lpab & TXCW_ASYM_PAUSE))
7338 mii->mii_media_active |= IFM_FLOW
7339 | IFM_ETH_TXPAUSE;
7340 else if ((pcs_adv & TXCW_SYM_PAUSE)
7341 && (pcs_adv & TXCW_ASYM_PAUSE)
7342 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
7343 && (pcs_lpab & TXCW_ASYM_PAUSE))
7344 mii->mii_media_active |= IFM_FLOW
7345 | IFM_ETH_RXPAUSE;
7346 }
7347 /* Update LED */
7348 wm_tbi_serdes_set_linkled(sc);
7349 } else {
7350 DPRINTF(WM_DEBUG_LINK,
7351 ("%s: LINK: Receive sequence error\n",
7352 device_xname(sc->sc_dev)));
7353 }
7354 }
7355
7356 /*
7357 * wm_linkintr:
7358 *
7359 * Helper; handle link interrupts.
7360 */
7361 static void
7362 wm_linkintr(struct wm_softc *sc, uint32_t icr)
7363 {
7364
7365 KASSERT(WM_CORE_LOCKED(sc));
7366
7367 if (sc->sc_flags & WM_F_HAS_MII)
7368 wm_linkintr_gmii(sc, icr);
7369 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7370 && (sc->sc_type >= WM_T_82575))
7371 wm_linkintr_serdes(sc, icr);
7372 else
7373 wm_linkintr_tbi(sc, icr);
7374 }
7375
7376 /*
7377 * wm_intr_legacy:
7378 *
7379 * Interrupt service routine for INTx and MSI.
7380 */
7381 static int
7382 wm_intr_legacy(void *arg)
7383 {
7384 struct wm_softc *sc = arg;
7385 struct wm_txqueue *txq = &sc->sc_txq[0];
7386 struct wm_rxqueue *rxq = &sc->sc_rxq[0];
7387 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7388 uint32_t icr, rndval = 0;
7389 int handled = 0;
7390
7391 DPRINTF(WM_DEBUG_TX,
7392 ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
7393 while (1 /* CONSTCOND */) {
7394 icr = CSR_READ(sc, WMREG_ICR);
7395 if ((icr & sc->sc_icr) == 0)
7396 break;
7397 if (rndval == 0)
7398 rndval = icr;
7399
7400 WM_RX_LOCK(rxq);
7401
7402 if (sc->sc_stopping) {
7403 WM_RX_UNLOCK(rxq);
7404 break;
7405 }
7406
7407 handled = 1;
7408
7409 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7410 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
7411 DPRINTF(WM_DEBUG_RX,
7412 ("%s: RX: got Rx intr 0x%08x\n",
7413 device_xname(sc->sc_dev),
7414 icr & (ICR_RXDMT0|ICR_RXT0)));
7415 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
7416 }
7417 #endif
7418 wm_rxeof(rxq);
7419
7420 WM_RX_UNLOCK(rxq);
7421 WM_TX_LOCK(txq);
7422
7423 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7424 if (icr & ICR_TXDW) {
7425 DPRINTF(WM_DEBUG_TX,
7426 ("%s: TX: got TXDW interrupt\n",
7427 device_xname(sc->sc_dev)));
7428 WM_EVCNT_INCR(&sc->sc_ev_txdw);
7429 }
7430 #endif
7431 wm_txeof(sc);
7432
7433 WM_TX_UNLOCK(txq);
7434 WM_CORE_LOCK(sc);
7435
7436 if (icr & (ICR_LSC|ICR_RXSEQ)) {
7437 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7438 wm_linkintr(sc, icr);
7439 }
7440
7441 WM_CORE_UNLOCK(sc);
7442
7443 if (icr & ICR_RXO) {
7444 #if defined(WM_DEBUG)
7445 log(LOG_WARNING, "%s: Receive overrun\n",
7446 device_xname(sc->sc_dev));
7447 #endif /* defined(WM_DEBUG) */
7448 }
7449 }
7450
7451 rnd_add_uint32(&sc->rnd_source, rndval);
7452
7453 if (handled) {
7454 /* Try to get more packets going. */
7455 ifp->if_start(ifp);
7456 }
7457
7458 return handled;
7459 }
7460
7461 /*
7462 * wm_txintr_msix:
7463 *
7464 * Interrupt service routine for TX complete interrupt for MSI-X.
7465 */
7466 static int
7467 wm_txintr_msix(void *arg)
7468 {
7469 struct wm_txqueue *txq = arg;
7470 struct wm_softc *sc = txq->txq_sc;
7471 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7472 int handled = 0;
7473
7474 DPRINTF(WM_DEBUG_TX,
7475 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
7476
7477 if (sc->sc_type == WM_T_82574)
7478 CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(txq->txq_id)); /* 82574 only */
7479 else if (sc->sc_type == WM_T_82575)
7480 CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(txq->txq_id));
7481 else
7482 CSR_WRITE(sc, WMREG_EIMC, 1 << txq->txq_intr_idx);
7483
7484 WM_TX_LOCK(txq);
7485
7486 if (sc->sc_stopping)
7487 goto out;
7488
7489 WM_EVCNT_INCR(&sc->sc_ev_txdw);
7490 handled = wm_txeof(sc);
7491
7492 out:
7493 WM_TX_UNLOCK(txq);
7494
7495 if (sc->sc_type == WM_T_82574)
7496 CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(txq->txq_id)); /* 82574 only */
7497 else if (sc->sc_type == WM_T_82575)
7498 CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(txq->txq_id));
7499 else
7500 CSR_WRITE(sc, WMREG_EIMS, 1 << txq->txq_intr_idx);
7501
7502 if (handled) {
7503 /* Try to get more packets going. */
7504 ifp->if_start(ifp);
7505 }
7506
7507 return handled;
7508 }
7509
7510 /*
7511 * wm_rxintr_msix:
7512 *
7513 * Interrupt service routine for RX interrupt for MSI-X.
7514 */
7515 static int
7516 wm_rxintr_msix(void *arg)
7517 {
7518 struct wm_rxqueue *rxq = arg;
7519 struct wm_softc *sc = rxq->rxq_sc;
7520
7521 DPRINTF(WM_DEBUG_RX,
7522 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
7523
7524 if (sc->sc_type == WM_T_82574)
7525 CSR_WRITE(sc, WMREG_IMC, ICR_RXQ(rxq->rxq_id)); /* 82574 only */
7526 else if (sc->sc_type == WM_T_82575)
7527 CSR_WRITE(sc, WMREG_EIMC, EITR_RX_QUEUE(rxq->rxq_id));
7528 else
7529 CSR_WRITE(sc, WMREG_EIMC, 1 << rxq->rxq_intr_idx);
7530
7531 WM_RX_LOCK(rxq);
7532
7533 if (sc->sc_stopping)
7534 goto out;
7535
7536 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
7537 wm_rxeof(rxq);
7538
7539 out:
7540 WM_RX_UNLOCK(rxq);
7541
7542 if (sc->sc_type == WM_T_82574)
7543 CSR_WRITE(sc, WMREG_IMS, ICR_RXQ(rxq->rxq_id));
7544 else if (sc->sc_type == WM_T_82575)
7545 CSR_WRITE(sc, WMREG_EIMS, EITR_RX_QUEUE(rxq->rxq_id));
7546 else
7547 CSR_WRITE(sc, WMREG_EIMS, 1 << rxq->rxq_intr_idx);
7548
7549 return 1;
7550 }
7551
7552 /*
7553 * wm_linkintr_msix:
7554 *
7555 * Interrupt service routine for link status change for MSI-X.
7556 */
7557 static int
7558 wm_linkintr_msix(void *arg)
7559 {
7560 struct wm_softc *sc = arg;
7561 uint32_t reg;
7562
7563 DPRINTF(WM_DEBUG_LINK,
7564 ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
7565
7566 reg = CSR_READ(sc, WMREG_ICR);
7567 WM_CORE_LOCK(sc);
7568 if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0))
7569 goto out;
7570
7571 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7572 wm_linkintr(sc, ICR_LSC);
7573
7574 out:
7575 WM_CORE_UNLOCK(sc);
7576
7577 if (sc->sc_type == WM_T_82574)
7578 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC); /* 82574 only */
7579 else if (sc->sc_type == WM_T_82575)
7580 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
7581 else
7582 CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
7583
7584 return 1;
7585 }
7586
7587 /*
7588 * Media related.
7589 * GMII, SGMII, TBI (and SERDES)
7590 */
7591
7592 /* Common */
7593
7594 /*
7595 * wm_tbi_serdes_set_linkled:
7596 *
7597 * Update the link LED on TBI and SERDES devices.
7598 */
7599 static void
7600 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
7601 {
7602
7603 if (sc->sc_tbi_linkup)
7604 sc->sc_ctrl |= CTRL_SWDPIN(0);
7605 else
7606 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7607
7608 /* 82540 or newer devices are active low */
7609 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7610
7611 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7612 }
7613
7614 /* GMII related */
7615
7616 /*
7617 * wm_gmii_reset:
7618 *
7619 * Reset the PHY.
7620 */
7621 static void
7622 wm_gmii_reset(struct wm_softc *sc)
7623 {
7624 uint32_t reg;
7625 int rv;
7626
7627 /* get phy semaphore */
7628 switch (sc->sc_type) {
7629 case WM_T_82571:
7630 case WM_T_82572:
7631 case WM_T_82573:
7632 case WM_T_82574:
7633 case WM_T_82583:
7634 /* XXX should get sw semaphore, too */
7635 rv = wm_get_swsm_semaphore(sc);
7636 break;
7637 case WM_T_82575:
7638 case WM_T_82576:
7639 case WM_T_82580:
7640 case WM_T_I350:
7641 case WM_T_I354:
7642 case WM_T_I210:
7643 case WM_T_I211:
7644 case WM_T_80003:
7645 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7646 break;
7647 case WM_T_ICH8:
7648 case WM_T_ICH9:
7649 case WM_T_ICH10:
7650 case WM_T_PCH:
7651 case WM_T_PCH2:
7652 case WM_T_PCH_LPT:
7653 rv = wm_get_swfwhw_semaphore(sc);
7654 break;
7655 default:
7656 /* nothing to do*/
7657 rv = 0;
7658 break;
7659 }
7660 if (rv != 0) {
7661 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7662 __func__);
7663 return;
7664 }
7665
7666 switch (sc->sc_type) {
7667 case WM_T_82542_2_0:
7668 case WM_T_82542_2_1:
7669 /* null */
7670 break;
7671 case WM_T_82543:
7672 /*
7673 * With 82543, we need to force speed and duplex on the MAC
7674 * equal to what the PHY speed and duplex configuration is.
7675 * In addition, we need to perform a hardware reset on the PHY
7676 * to take it out of reset.
7677 */
7678 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
7679 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7680
7681 /* The PHY reset pin is active-low. */
7682 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7683 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
7684 CTRL_EXT_SWDPIN(4));
7685 reg |= CTRL_EXT_SWDPIO(4);
7686
7687 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7688 CSR_WRITE_FLUSH(sc);
7689 delay(10*1000);
7690
7691 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
7692 CSR_WRITE_FLUSH(sc);
7693 delay(150);
7694 #if 0
7695 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
7696 #endif
7697 delay(20*1000); /* XXX extra delay to get PHY ID? */
7698 break;
7699 case WM_T_82544: /* reset 10000us */
7700 case WM_T_82540:
7701 case WM_T_82545:
7702 case WM_T_82545_3:
7703 case WM_T_82546:
7704 case WM_T_82546_3:
7705 case WM_T_82541:
7706 case WM_T_82541_2:
7707 case WM_T_82547:
7708 case WM_T_82547_2:
7709 case WM_T_82571: /* reset 100us */
7710 case WM_T_82572:
7711 case WM_T_82573:
7712 case WM_T_82574:
7713 case WM_T_82575:
7714 case WM_T_82576:
7715 case WM_T_82580:
7716 case WM_T_I350:
7717 case WM_T_I354:
7718 case WM_T_I210:
7719 case WM_T_I211:
7720 case WM_T_82583:
7721 case WM_T_80003:
7722 /* generic reset */
7723 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7724 CSR_WRITE_FLUSH(sc);
7725 delay(20000);
7726 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7727 CSR_WRITE_FLUSH(sc);
7728 delay(20000);
7729
7730 if ((sc->sc_type == WM_T_82541)
7731 || (sc->sc_type == WM_T_82541_2)
7732 || (sc->sc_type == WM_T_82547)
7733 || (sc->sc_type == WM_T_82547_2)) {
7734 /* workaround for igp are done in igp_reset() */
7735 /* XXX add code to set LED after phy reset */
7736 }
7737 break;
7738 case WM_T_ICH8:
7739 case WM_T_ICH9:
7740 case WM_T_ICH10:
7741 case WM_T_PCH:
7742 case WM_T_PCH2:
7743 case WM_T_PCH_LPT:
7744 /* generic reset */
7745 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7746 CSR_WRITE_FLUSH(sc);
7747 delay(100);
7748 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7749 CSR_WRITE_FLUSH(sc);
7750 delay(150);
7751 break;
7752 default:
7753 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
7754 __func__);
7755 break;
7756 }
7757
7758 /* release PHY semaphore */
7759 switch (sc->sc_type) {
7760 case WM_T_82571:
7761 case WM_T_82572:
7762 case WM_T_82573:
7763 case WM_T_82574:
7764 case WM_T_82583:
7765 /* XXX should put sw semaphore, too */
7766 wm_put_swsm_semaphore(sc);
7767 break;
7768 case WM_T_82575:
7769 case WM_T_82576:
7770 case WM_T_82580:
7771 case WM_T_I350:
7772 case WM_T_I354:
7773 case WM_T_I210:
7774 case WM_T_I211:
7775 case WM_T_80003:
7776 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7777 break;
7778 case WM_T_ICH8:
7779 case WM_T_ICH9:
7780 case WM_T_ICH10:
7781 case WM_T_PCH:
7782 case WM_T_PCH2:
7783 case WM_T_PCH_LPT:
7784 wm_put_swfwhw_semaphore(sc);
7785 break;
7786 default:
7787 /* nothing to do*/
7788 rv = 0;
7789 break;
7790 }
7791
7792 /* get_cfg_done */
7793 wm_get_cfg_done(sc);
7794
7795 /* extra setup */
7796 switch (sc->sc_type) {
7797 case WM_T_82542_2_0:
7798 case WM_T_82542_2_1:
7799 case WM_T_82543:
7800 case WM_T_82544:
7801 case WM_T_82540:
7802 case WM_T_82545:
7803 case WM_T_82545_3:
7804 case WM_T_82546:
7805 case WM_T_82546_3:
7806 case WM_T_82541_2:
7807 case WM_T_82547_2:
7808 case WM_T_82571:
7809 case WM_T_82572:
7810 case WM_T_82573:
7811 case WM_T_82575:
7812 case WM_T_82576:
7813 case WM_T_82580:
7814 case WM_T_I350:
7815 case WM_T_I354:
7816 case WM_T_I210:
7817 case WM_T_I211:
7818 case WM_T_80003:
7819 /* null */
7820 break;
7821 case WM_T_82574:
7822 case WM_T_82583:
7823 wm_lplu_d0_disable(sc);
7824 break;
7825 case WM_T_82541:
7826 case WM_T_82547:
7827 /* XXX Configure actively LED after PHY reset */
7828 break;
7829 case WM_T_ICH8:
7830 case WM_T_ICH9:
7831 case WM_T_ICH10:
7832 case WM_T_PCH:
7833 case WM_T_PCH2:
7834 case WM_T_PCH_LPT:
7835 /* Allow time for h/w to get to a quiescent state afer reset */
7836 delay(10*1000);
7837
7838 if (sc->sc_type == WM_T_PCH)
7839 wm_hv_phy_workaround_ich8lan(sc);
7840
7841 if (sc->sc_type == WM_T_PCH2)
7842 wm_lv_phy_workaround_ich8lan(sc);
7843
7844 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
7845 /*
7846 * dummy read to clear the phy wakeup bit after lcd
7847 * reset
7848 */
7849 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
7850 }
7851
7852 /*
7853 * XXX Configure the LCD with th extended configuration region
7854 * in NVM
7855 */
7856
7857 /* Disable D0 LPLU. */
7858 if (sc->sc_type >= WM_T_PCH) /* PCH* */
7859 wm_lplu_d0_disable_pch(sc);
7860 else
7861 wm_lplu_d0_disable(sc); /* ICH* */
7862 break;
7863 default:
7864 panic("%s: unknown type\n", __func__);
7865 break;
7866 }
7867 }
7868
7869 /*
7870 * wm_get_phy_id_82575:
7871 *
7872 * Return PHY ID. Return -1 if it failed.
7873 */
7874 static int
7875 wm_get_phy_id_82575(struct wm_softc *sc)
7876 {
7877 uint32_t reg;
7878 int phyid = -1;
7879
7880 /* XXX */
7881 if ((sc->sc_flags & WM_F_SGMII) == 0)
7882 return -1;
7883
7884 if (wm_sgmii_uses_mdio(sc)) {
7885 switch (sc->sc_type) {
7886 case WM_T_82575:
7887 case WM_T_82576:
7888 reg = CSR_READ(sc, WMREG_MDIC);
7889 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
7890 break;
7891 case WM_T_82580:
7892 case WM_T_I350:
7893 case WM_T_I354:
7894 case WM_T_I210:
7895 case WM_T_I211:
7896 reg = CSR_READ(sc, WMREG_MDICNFG);
7897 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
7898 break;
7899 default:
7900 return -1;
7901 }
7902 }
7903
7904 return phyid;
7905 }
7906
7907
7908 /*
7909 * wm_gmii_mediainit:
7910 *
7911 * Initialize media for use on 1000BASE-T devices.
7912 */
7913 static void
7914 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
7915 {
7916 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7917 struct mii_data *mii = &sc->sc_mii;
7918 uint32_t reg;
7919
7920 /* We have GMII. */
7921 sc->sc_flags |= WM_F_HAS_MII;
7922
7923 if (sc->sc_type == WM_T_80003)
7924 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7925 else
7926 sc->sc_tipg = TIPG_1000T_DFLT;
7927
7928 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
7929 if ((sc->sc_type == WM_T_82580)
7930 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
7931 || (sc->sc_type == WM_T_I211)) {
7932 reg = CSR_READ(sc, WMREG_PHPM);
7933 reg &= ~PHPM_GO_LINK_D;
7934 CSR_WRITE(sc, WMREG_PHPM, reg);
7935 }
7936
7937 /*
7938 * Let the chip set speed/duplex on its own based on
7939 * signals from the PHY.
7940 * XXXbouyer - I'm not sure this is right for the 80003,
7941 * the em driver only sets CTRL_SLU here - but it seems to work.
7942 */
7943 sc->sc_ctrl |= CTRL_SLU;
7944 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7945
7946 /* Initialize our media structures and probe the GMII. */
7947 mii->mii_ifp = ifp;
7948
7949 /*
7950 * Determine the PHY access method.
7951 *
7952 * For SGMII, use SGMII specific method.
7953 *
7954 * For some devices, we can determine the PHY access method
7955 * from sc_type.
7956 *
7957 * For ICH and PCH variants, it's difficult to determine the PHY
7958 * access method by sc_type, so use the PCI product ID for some
7959 * devices.
7960 * For other ICH8 variants, try to use igp's method. If the PHY
7961 * can't detect, then use bm's method.
7962 */
7963 switch (prodid) {
7964 case PCI_PRODUCT_INTEL_PCH_M_LM:
7965 case PCI_PRODUCT_INTEL_PCH_M_LC:
7966 /* 82577 */
7967 sc->sc_phytype = WMPHY_82577;
7968 break;
7969 case PCI_PRODUCT_INTEL_PCH_D_DM:
7970 case PCI_PRODUCT_INTEL_PCH_D_DC:
7971 /* 82578 */
7972 sc->sc_phytype = WMPHY_82578;
7973 break;
7974 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
7975 case PCI_PRODUCT_INTEL_PCH2_LV_V:
7976 /* 82579 */
7977 sc->sc_phytype = WMPHY_82579;
7978 break;
7979 case PCI_PRODUCT_INTEL_82801I_BM:
7980 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
7981 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
7982 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
7983 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
7984 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
7985 /* 82567 */
7986 sc->sc_phytype = WMPHY_BM;
7987 mii->mii_readreg = wm_gmii_bm_readreg;
7988 mii->mii_writereg = wm_gmii_bm_writereg;
7989 break;
7990 default:
7991 if (((sc->sc_flags & WM_F_SGMII) != 0)
7992 && !wm_sgmii_uses_mdio(sc)){
7993 /* SGMII */
7994 mii->mii_readreg = wm_sgmii_readreg;
7995 mii->mii_writereg = wm_sgmii_writereg;
7996 } else if (sc->sc_type >= WM_T_80003) {
7997 /* 80003 */
7998 mii->mii_readreg = wm_gmii_i80003_readreg;
7999 mii->mii_writereg = wm_gmii_i80003_writereg;
8000 } else if (sc->sc_type >= WM_T_I210) {
8001 /* I210 and I211 */
8002 mii->mii_readreg = wm_gmii_gs40g_readreg;
8003 mii->mii_writereg = wm_gmii_gs40g_writereg;
8004 } else if (sc->sc_type >= WM_T_82580) {
8005 /* 82580, I350 and I354 */
8006 sc->sc_phytype = WMPHY_82580;
8007 mii->mii_readreg = wm_gmii_82580_readreg;
8008 mii->mii_writereg = wm_gmii_82580_writereg;
8009 } else if (sc->sc_type >= WM_T_82544) {
8010 /* 82544, 0, [56], [17], 8257[1234] and 82583 */
8011 mii->mii_readreg = wm_gmii_i82544_readreg;
8012 mii->mii_writereg = wm_gmii_i82544_writereg;
8013 } else {
8014 mii->mii_readreg = wm_gmii_i82543_readreg;
8015 mii->mii_writereg = wm_gmii_i82543_writereg;
8016 }
8017 break;
8018 }
8019 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) {
8020 /* All PCH* use _hv_ */
8021 mii->mii_readreg = wm_gmii_hv_readreg;
8022 mii->mii_writereg = wm_gmii_hv_writereg;
8023 }
8024 mii->mii_statchg = wm_gmii_statchg;
8025
8026 wm_gmii_reset(sc);
8027
8028 sc->sc_ethercom.ec_mii = &sc->sc_mii;
8029 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
8030 wm_gmii_mediastatus);
8031
8032 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
8033 || (sc->sc_type == WM_T_82580)
8034 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
8035 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
8036 if ((sc->sc_flags & WM_F_SGMII) == 0) {
8037 /* Attach only one port */
8038 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
8039 MII_OFFSET_ANY, MIIF_DOPAUSE);
8040 } else {
8041 int i, id;
8042 uint32_t ctrl_ext;
8043
8044 id = wm_get_phy_id_82575(sc);
8045 if (id != -1) {
8046 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
8047 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
8048 }
8049 if ((id == -1)
8050 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
8051 /* Power on sgmii phy if it is disabled */
8052 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8053 CSR_WRITE(sc, WMREG_CTRL_EXT,
8054 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
8055 CSR_WRITE_FLUSH(sc);
8056 delay(300*1000); /* XXX too long */
8057
8058 /* from 1 to 8 */
8059 for (i = 1; i < 8; i++)
8060 mii_attach(sc->sc_dev, &sc->sc_mii,
8061 0xffffffff, i, MII_OFFSET_ANY,
8062 MIIF_DOPAUSE);
8063
8064 /* restore previous sfp cage power state */
8065 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8066 }
8067 }
8068 } else {
8069 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8070 MII_OFFSET_ANY, MIIF_DOPAUSE);
8071 }
8072
8073 /*
8074 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
8075 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
8076 */
8077 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
8078 (LIST_FIRST(&mii->mii_phys) == NULL)) {
8079 wm_set_mdio_slow_mode_hv(sc);
8080 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8081 MII_OFFSET_ANY, MIIF_DOPAUSE);
8082 }
8083
8084 /*
8085 * (For ICH8 variants)
8086 * If PHY detection failed, use BM's r/w function and retry.
8087 */
8088 if (LIST_FIRST(&mii->mii_phys) == NULL) {
8089 /* if failed, retry with *_bm_* */
8090 mii->mii_readreg = wm_gmii_bm_readreg;
8091 mii->mii_writereg = wm_gmii_bm_writereg;
8092
8093 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8094 MII_OFFSET_ANY, MIIF_DOPAUSE);
8095 }
8096
8097 if (LIST_FIRST(&mii->mii_phys) == NULL) {
8098 /* Any PHY wasn't find */
8099 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
8100 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
8101 sc->sc_phytype = WMPHY_NONE;
8102 } else {
8103 /*
8104 * PHY Found!
8105 * Check PHY type.
8106 */
8107 uint32_t model;
8108 struct mii_softc *child;
8109
8110 child = LIST_FIRST(&mii->mii_phys);
8111 model = child->mii_mpd_model;
8112 if (model == MII_MODEL_yyINTEL_I82566)
8113 sc->sc_phytype = WMPHY_IGP_3;
8114
8115 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
8116 }
8117 }
8118
8119 /*
8120 * wm_gmii_mediachange: [ifmedia interface function]
8121 *
8122 * Set hardware to newly-selected media on a 1000BASE-T device.
8123 */
8124 static int
8125 wm_gmii_mediachange(struct ifnet *ifp)
8126 {
8127 struct wm_softc *sc = ifp->if_softc;
8128 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8129 int rc;
8130
8131 if ((ifp->if_flags & IFF_UP) == 0)
8132 return 0;
8133
8134 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
8135 sc->sc_ctrl |= CTRL_SLU;
8136 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8137 || (sc->sc_type > WM_T_82543)) {
8138 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
8139 } else {
8140 sc->sc_ctrl &= ~CTRL_ASDE;
8141 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
8142 if (ife->ifm_media & IFM_FDX)
8143 sc->sc_ctrl |= CTRL_FD;
8144 switch (IFM_SUBTYPE(ife->ifm_media)) {
8145 case IFM_10_T:
8146 sc->sc_ctrl |= CTRL_SPEED_10;
8147 break;
8148 case IFM_100_TX:
8149 sc->sc_ctrl |= CTRL_SPEED_100;
8150 break;
8151 case IFM_1000_T:
8152 sc->sc_ctrl |= CTRL_SPEED_1000;
8153 break;
8154 default:
8155 panic("wm_gmii_mediachange: bad media 0x%x",
8156 ife->ifm_media);
8157 }
8158 }
8159 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8160 if (sc->sc_type <= WM_T_82543)
8161 wm_gmii_reset(sc);
8162
8163 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
8164 return 0;
8165 return rc;
8166 }
8167
8168 /*
8169 * wm_gmii_mediastatus: [ifmedia interface function]
8170 *
8171 * Get the current interface media status on a 1000BASE-T device.
8172 */
8173 static void
8174 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8175 {
8176 struct wm_softc *sc = ifp->if_softc;
8177
8178 ether_mediastatus(ifp, ifmr);
8179 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
8180 | sc->sc_flowflags;
8181 }
8182
8183 #define MDI_IO CTRL_SWDPIN(2)
8184 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
8185 #define MDI_CLK CTRL_SWDPIN(3)
8186
8187 static void
8188 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
8189 {
8190 uint32_t i, v;
8191
8192 v = CSR_READ(sc, WMREG_CTRL);
8193 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8194 v |= MDI_DIR | CTRL_SWDPIO(3);
8195
8196 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
8197 if (data & i)
8198 v |= MDI_IO;
8199 else
8200 v &= ~MDI_IO;
8201 CSR_WRITE(sc, WMREG_CTRL, v);
8202 CSR_WRITE_FLUSH(sc);
8203 delay(10);
8204 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8205 CSR_WRITE_FLUSH(sc);
8206 delay(10);
8207 CSR_WRITE(sc, WMREG_CTRL, v);
8208 CSR_WRITE_FLUSH(sc);
8209 delay(10);
8210 }
8211 }
8212
8213 static uint32_t
8214 wm_i82543_mii_recvbits(struct wm_softc *sc)
8215 {
8216 uint32_t v, i, data = 0;
8217
8218 v = CSR_READ(sc, WMREG_CTRL);
8219 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8220 v |= CTRL_SWDPIO(3);
8221
8222 CSR_WRITE(sc, WMREG_CTRL, v);
8223 CSR_WRITE_FLUSH(sc);
8224 delay(10);
8225 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8226 CSR_WRITE_FLUSH(sc);
8227 delay(10);
8228 CSR_WRITE(sc, WMREG_CTRL, v);
8229 CSR_WRITE_FLUSH(sc);
8230 delay(10);
8231
8232 for (i = 0; i < 16; i++) {
8233 data <<= 1;
8234 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8235 CSR_WRITE_FLUSH(sc);
8236 delay(10);
8237 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
8238 data |= 1;
8239 CSR_WRITE(sc, WMREG_CTRL, v);
8240 CSR_WRITE_FLUSH(sc);
8241 delay(10);
8242 }
8243
8244 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8245 CSR_WRITE_FLUSH(sc);
8246 delay(10);
8247 CSR_WRITE(sc, WMREG_CTRL, v);
8248 CSR_WRITE_FLUSH(sc);
8249 delay(10);
8250
8251 return data;
8252 }
8253
8254 #undef MDI_IO
8255 #undef MDI_DIR
8256 #undef MDI_CLK
8257
8258 /*
8259 * wm_gmii_i82543_readreg: [mii interface function]
8260 *
8261 * Read a PHY register on the GMII (i82543 version).
8262 */
8263 static int
8264 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
8265 {
8266 struct wm_softc *sc = device_private(self);
8267 int rv;
8268
8269 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8270 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
8271 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
8272 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
8273
8274 DPRINTF(WM_DEBUG_GMII,
8275 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
8276 device_xname(sc->sc_dev), phy, reg, rv));
8277
8278 return rv;
8279 }
8280
8281 /*
8282 * wm_gmii_i82543_writereg: [mii interface function]
8283 *
8284 * Write a PHY register on the GMII (i82543 version).
8285 */
8286 static void
8287 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
8288 {
8289 struct wm_softc *sc = device_private(self);
8290
8291 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8292 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
8293 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
8294 (MII_COMMAND_START << 30), 32);
8295 }
8296
8297 /*
8298 * wm_gmii_i82544_readreg: [mii interface function]
8299 *
8300 * Read a PHY register on the GMII.
8301 */
8302 static int
8303 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
8304 {
8305 struct wm_softc *sc = device_private(self);
8306 uint32_t mdic = 0;
8307 int i, rv;
8308
8309 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
8310 MDIC_REGADD(reg));
8311
8312 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8313 mdic = CSR_READ(sc, WMREG_MDIC);
8314 if (mdic & MDIC_READY)
8315 break;
8316 delay(50);
8317 }
8318
8319 if ((mdic & MDIC_READY) == 0) {
8320 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
8321 device_xname(sc->sc_dev), phy, reg);
8322 rv = 0;
8323 } else if (mdic & MDIC_E) {
8324 #if 0 /* This is normal if no PHY is present. */
8325 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
8326 device_xname(sc->sc_dev), phy, reg);
8327 #endif
8328 rv = 0;
8329 } else {
8330 rv = MDIC_DATA(mdic);
8331 if (rv == 0xffff)
8332 rv = 0;
8333 }
8334
8335 return rv;
8336 }
8337
8338 /*
8339 * wm_gmii_i82544_writereg: [mii interface function]
8340 *
8341 * Write a PHY register on the GMII.
8342 */
8343 static void
8344 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
8345 {
8346 struct wm_softc *sc = device_private(self);
8347 uint32_t mdic = 0;
8348 int i;
8349
8350 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
8351 MDIC_REGADD(reg) | MDIC_DATA(val));
8352
8353 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8354 mdic = CSR_READ(sc, WMREG_MDIC);
8355 if (mdic & MDIC_READY)
8356 break;
8357 delay(50);
8358 }
8359
8360 if ((mdic & MDIC_READY) == 0)
8361 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
8362 device_xname(sc->sc_dev), phy, reg);
8363 else if (mdic & MDIC_E)
8364 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
8365 device_xname(sc->sc_dev), phy, reg);
8366 }
8367
8368 /*
8369 * wm_gmii_i80003_readreg: [mii interface function]
8370 *
8371 * Read a PHY register on the kumeran
8372 * This could be handled by the PHY layer if we didn't have to lock the
8373 * ressource ...
8374 */
8375 static int
8376 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
8377 {
8378 struct wm_softc *sc = device_private(self);
8379 int sem;
8380 int rv;
8381
8382 if (phy != 1) /* only one PHY on kumeran bus */
8383 return 0;
8384
8385 sem = swfwphysem[sc->sc_funcid];
8386 if (wm_get_swfw_semaphore(sc, sem)) {
8387 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8388 __func__);
8389 return 0;
8390 }
8391
8392 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
8393 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8394 reg >> GG82563_PAGE_SHIFT);
8395 } else {
8396 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8397 reg >> GG82563_PAGE_SHIFT);
8398 }
8399 /* Wait more 200us for a bug of the ready bit in the MDIC register */
8400 delay(200);
8401 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
8402 delay(200);
8403
8404 wm_put_swfw_semaphore(sc, sem);
8405 return rv;
8406 }
8407
8408 /*
8409 * wm_gmii_i80003_writereg: [mii interface function]
8410 *
8411 * Write a PHY register on the kumeran.
8412 * This could be handled by the PHY layer if we didn't have to lock the
8413 * ressource ...
8414 */
8415 static void
8416 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
8417 {
8418 struct wm_softc *sc = device_private(self);
8419 int sem;
8420
8421 if (phy != 1) /* only one PHY on kumeran bus */
8422 return;
8423
8424 sem = swfwphysem[sc->sc_funcid];
8425 if (wm_get_swfw_semaphore(sc, sem)) {
8426 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8427 __func__);
8428 return;
8429 }
8430
8431 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
8432 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8433 reg >> GG82563_PAGE_SHIFT);
8434 } else {
8435 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8436 reg >> GG82563_PAGE_SHIFT);
8437 }
8438 /* Wait more 200us for a bug of the ready bit in the MDIC register */
8439 delay(200);
8440 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
8441 delay(200);
8442
8443 wm_put_swfw_semaphore(sc, sem);
8444 }
8445
8446 /*
8447 * wm_gmii_bm_readreg: [mii interface function]
8448 *
8449 * Read a PHY register on the kumeran
8450 * This could be handled by the PHY layer if we didn't have to lock the
8451 * ressource ...
8452 */
8453 static int
8454 wm_gmii_bm_readreg(device_t self, int phy, int reg)
8455 {
8456 struct wm_softc *sc = device_private(self);
8457 int sem;
8458 int rv;
8459
8460 sem = swfwphysem[sc->sc_funcid];
8461 if (wm_get_swfw_semaphore(sc, sem)) {
8462 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8463 __func__);
8464 return 0;
8465 }
8466
8467 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8468 if (phy == 1)
8469 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
8470 reg);
8471 else
8472 wm_gmii_i82544_writereg(self, phy,
8473 GG82563_PHY_PAGE_SELECT,
8474 reg >> GG82563_PAGE_SHIFT);
8475 }
8476
8477 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
8478 wm_put_swfw_semaphore(sc, sem);
8479 return rv;
8480 }
8481
8482 /*
8483 * wm_gmii_bm_writereg: [mii interface function]
8484 *
8485 * Write a PHY register on the kumeran.
8486 * This could be handled by the PHY layer if we didn't have to lock the
8487 * ressource ...
8488 */
8489 static void
8490 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
8491 {
8492 struct wm_softc *sc = device_private(self);
8493 int sem;
8494
8495 sem = swfwphysem[sc->sc_funcid];
8496 if (wm_get_swfw_semaphore(sc, sem)) {
8497 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8498 __func__);
8499 return;
8500 }
8501
8502 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8503 if (phy == 1)
8504 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
8505 reg);
8506 else
8507 wm_gmii_i82544_writereg(self, phy,
8508 GG82563_PHY_PAGE_SELECT,
8509 reg >> GG82563_PAGE_SHIFT);
8510 }
8511
8512 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
8513 wm_put_swfw_semaphore(sc, sem);
8514 }
8515
8516 static void
8517 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
8518 {
8519 struct wm_softc *sc = device_private(self);
8520 uint16_t regnum = BM_PHY_REG_NUM(offset);
8521 uint16_t wuce;
8522
8523 /* XXX Gig must be disabled for MDIO accesses to page 800 */
8524 if (sc->sc_type == WM_T_PCH) {
8525 /* XXX e1000 driver do nothing... why? */
8526 }
8527
8528 /* Set page 769 */
8529 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8530 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
8531
8532 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
8533
8534 wuce &= ~BM_WUC_HOST_WU_BIT;
8535 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
8536 wuce | BM_WUC_ENABLE_BIT);
8537
8538 /* Select page 800 */
8539 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8540 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
8541
8542 /* Write page 800 */
8543 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
8544
8545 if (rd)
8546 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
8547 else
8548 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
8549
8550 /* Set page 769 */
8551 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8552 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
8553
8554 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
8555 }
8556
8557 /*
8558 * wm_gmii_hv_readreg: [mii interface function]
8559 *
8560 * Read a PHY register on the kumeran
8561 * This could be handled by the PHY layer if we didn't have to lock the
8562 * ressource ...
8563 */
8564 static int
8565 wm_gmii_hv_readreg(device_t self, int phy, int reg)
8566 {
8567 struct wm_softc *sc = device_private(self);
8568 uint16_t page = BM_PHY_REG_PAGE(reg);
8569 uint16_t regnum = BM_PHY_REG_NUM(reg);
8570 uint16_t val;
8571 int rv;
8572
8573 if (wm_get_swfwhw_semaphore(sc)) {
8574 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8575 __func__);
8576 return 0;
8577 }
8578
8579 /* XXX Workaround failure in MDIO access while cable is disconnected */
8580 if (sc->sc_phytype == WMPHY_82577) {
8581 /* XXX must write */
8582 }
8583
8584 /* Page 800 works differently than the rest so it has its own func */
8585 if (page == BM_WUC_PAGE) {
8586 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
8587 return val;
8588 }
8589
8590 /*
8591 * Lower than page 768 works differently than the rest so it has its
8592 * own func
8593 */
8594 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
8595 printf("gmii_hv_readreg!!!\n");
8596 return 0;
8597 }
8598
8599 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
8600 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8601 page << BME1000_PAGE_SHIFT);
8602 }
8603
8604 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
8605 wm_put_swfwhw_semaphore(sc);
8606 return rv;
8607 }
8608
8609 /*
8610 * wm_gmii_hv_writereg: [mii interface function]
8611 *
8612 * Write a PHY register on the kumeran.
8613 * This could be handled by the PHY layer if we didn't have to lock the
8614 * ressource ...
8615 */
8616 static void
8617 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
8618 {
8619 struct wm_softc *sc = device_private(self);
8620 uint16_t page = BM_PHY_REG_PAGE(reg);
8621 uint16_t regnum = BM_PHY_REG_NUM(reg);
8622
8623 if (wm_get_swfwhw_semaphore(sc)) {
8624 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8625 __func__);
8626 return;
8627 }
8628
8629 /* XXX Workaround failure in MDIO access while cable is disconnected */
8630
8631 /* Page 800 works differently than the rest so it has its own func */
8632 if (page == BM_WUC_PAGE) {
8633 uint16_t tmp;
8634
8635 tmp = val;
8636 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
8637 return;
8638 }
8639
8640 /*
8641 * Lower than page 768 works differently than the rest so it has its
8642 * own func
8643 */
8644 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
8645 printf("gmii_hv_writereg!!!\n");
8646 return;
8647 }
8648
8649 /*
8650 * XXX Workaround MDIO accesses being disabled after entering IEEE
8651 * Power Down (whenever bit 11 of the PHY control register is set)
8652 */
8653
8654 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
8655 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8656 page << BME1000_PAGE_SHIFT);
8657 }
8658
8659 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
8660 wm_put_swfwhw_semaphore(sc);
8661 }
8662
8663 /*
8664 * wm_gmii_82580_readreg: [mii interface function]
8665 *
8666 * Read a PHY register on the 82580 and I350.
8667 * This could be handled by the PHY layer if we didn't have to lock the
8668 * ressource ...
8669 */
8670 static int
8671 wm_gmii_82580_readreg(device_t self, int phy, int reg)
8672 {
8673 struct wm_softc *sc = device_private(self);
8674 int sem;
8675 int rv;
8676
8677 sem = swfwphysem[sc->sc_funcid];
8678 if (wm_get_swfw_semaphore(sc, sem)) {
8679 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8680 __func__);
8681 return 0;
8682 }
8683
8684 rv = wm_gmii_i82544_readreg(self, phy, reg);
8685
8686 wm_put_swfw_semaphore(sc, sem);
8687 return rv;
8688 }
8689
8690 /*
8691 * wm_gmii_82580_writereg: [mii interface function]
8692 *
8693 * Write a PHY register on the 82580 and I350.
8694 * This could be handled by the PHY layer if we didn't have to lock the
8695 * ressource ...
8696 */
8697 static void
8698 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
8699 {
8700 struct wm_softc *sc = device_private(self);
8701 int sem;
8702
8703 sem = swfwphysem[sc->sc_funcid];
8704 if (wm_get_swfw_semaphore(sc, sem)) {
8705 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8706 __func__);
8707 return;
8708 }
8709
8710 wm_gmii_i82544_writereg(self, phy, reg, val);
8711
8712 wm_put_swfw_semaphore(sc, sem);
8713 }
8714
8715 /*
8716 * wm_gmii_gs40g_readreg: [mii interface function]
8717 *
8718 * Read a PHY register on the I2100 and I211.
8719 * This could be handled by the PHY layer if we didn't have to lock the
8720 * ressource ...
8721 */
8722 static int
8723 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
8724 {
8725 struct wm_softc *sc = device_private(self);
8726 int sem;
8727 int page, offset;
8728 int rv;
8729
8730 /* Acquire semaphore */
8731 sem = swfwphysem[sc->sc_funcid];
8732 if (wm_get_swfw_semaphore(sc, sem)) {
8733 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8734 __func__);
8735 return 0;
8736 }
8737
8738 /* Page select */
8739 page = reg >> GS40G_PAGE_SHIFT;
8740 wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
8741
8742 /* Read reg */
8743 offset = reg & GS40G_OFFSET_MASK;
8744 rv = wm_gmii_i82544_readreg(self, phy, offset);
8745
8746 wm_put_swfw_semaphore(sc, sem);
8747 return rv;
8748 }
8749
8750 /*
8751 * wm_gmii_gs40g_writereg: [mii interface function]
8752 *
8753 * Write a PHY register on the I210 and I211.
8754 * This could be handled by the PHY layer if we didn't have to lock the
8755 * ressource ...
8756 */
8757 static void
8758 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
8759 {
8760 struct wm_softc *sc = device_private(self);
8761 int sem;
8762 int page, offset;
8763
8764 /* Acquire semaphore */
8765 sem = swfwphysem[sc->sc_funcid];
8766 if (wm_get_swfw_semaphore(sc, sem)) {
8767 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8768 __func__);
8769 return;
8770 }
8771
8772 /* Page select */
8773 page = reg >> GS40G_PAGE_SHIFT;
8774 wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
8775
8776 /* Write reg */
8777 offset = reg & GS40G_OFFSET_MASK;
8778 wm_gmii_i82544_writereg(self, phy, offset, val);
8779
8780 /* Release semaphore */
8781 wm_put_swfw_semaphore(sc, sem);
8782 }
8783
8784 /*
8785 * wm_gmii_statchg: [mii interface function]
8786 *
8787 * Callback from MII layer when media changes.
8788 */
8789 static void
8790 wm_gmii_statchg(struct ifnet *ifp)
8791 {
8792 struct wm_softc *sc = ifp->if_softc;
8793 struct mii_data *mii = &sc->sc_mii;
8794
8795 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
8796 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
8797 sc->sc_fcrtl &= ~FCRTL_XONE;
8798
8799 /*
8800 * Get flow control negotiation result.
8801 */
8802 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
8803 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
8804 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
8805 mii->mii_media_active &= ~IFM_ETH_FMASK;
8806 }
8807
8808 if (sc->sc_flowflags & IFM_FLOW) {
8809 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
8810 sc->sc_ctrl |= CTRL_TFCE;
8811 sc->sc_fcrtl |= FCRTL_XONE;
8812 }
8813 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
8814 sc->sc_ctrl |= CTRL_RFCE;
8815 }
8816
8817 if (sc->sc_mii.mii_media_active & IFM_FDX) {
8818 DPRINTF(WM_DEBUG_LINK,
8819 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
8820 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
8821 } else {
8822 DPRINTF(WM_DEBUG_LINK,
8823 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
8824 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
8825 }
8826
8827 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8828 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
8829 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
8830 : WMREG_FCRTL, sc->sc_fcrtl);
8831 if (sc->sc_type == WM_T_80003) {
8832 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
8833 case IFM_1000_T:
8834 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
8835 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
8836 sc->sc_tipg = TIPG_1000T_80003_DFLT;
8837 break;
8838 default:
8839 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
8840 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
8841 sc->sc_tipg = TIPG_10_100_80003_DFLT;
8842 break;
8843 }
8844 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
8845 }
8846 }
8847
8848 /*
8849 * wm_kmrn_readreg:
8850 *
8851 * Read a kumeran register
8852 */
8853 static int
8854 wm_kmrn_readreg(struct wm_softc *sc, int reg)
8855 {
8856 int rv;
8857
8858 if (sc->sc_flags & WM_F_LOCK_SWFW) {
8859 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
8860 aprint_error_dev(sc->sc_dev,
8861 "%s: failed to get semaphore\n", __func__);
8862 return 0;
8863 }
8864 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8865 if (wm_get_swfwhw_semaphore(sc)) {
8866 aprint_error_dev(sc->sc_dev,
8867 "%s: failed to get semaphore\n", __func__);
8868 return 0;
8869 }
8870 }
8871
8872 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
8873 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
8874 KUMCTRLSTA_REN);
8875 CSR_WRITE_FLUSH(sc);
8876 delay(2);
8877
8878 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
8879
8880 if (sc->sc_flags & WM_F_LOCK_SWFW)
8881 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
8882 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8883 wm_put_swfwhw_semaphore(sc);
8884
8885 return rv;
8886 }
8887
8888 /*
8889 * wm_kmrn_writereg:
8890 *
8891 * Write a kumeran register
8892 */
8893 static void
8894 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
8895 {
8896
8897 if (sc->sc_flags & WM_F_LOCK_SWFW) {
8898 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
8899 aprint_error_dev(sc->sc_dev,
8900 "%s: failed to get semaphore\n", __func__);
8901 return;
8902 }
8903 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8904 if (wm_get_swfwhw_semaphore(sc)) {
8905 aprint_error_dev(sc->sc_dev,
8906 "%s: failed to get semaphore\n", __func__);
8907 return;
8908 }
8909 }
8910
8911 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
8912 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
8913 (val & KUMCTRLSTA_MASK));
8914
8915 if (sc->sc_flags & WM_F_LOCK_SWFW)
8916 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
8917 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8918 wm_put_swfwhw_semaphore(sc);
8919 }
8920
8921 /* SGMII related */
8922
8923 /*
8924 * wm_sgmii_uses_mdio
8925 *
8926 * Check whether the transaction is to the internal PHY or the external
8927 * MDIO interface. Return true if it's MDIO.
8928 */
8929 static bool
8930 wm_sgmii_uses_mdio(struct wm_softc *sc)
8931 {
8932 uint32_t reg;
8933 bool ismdio = false;
8934
8935 switch (sc->sc_type) {
8936 case WM_T_82575:
8937 case WM_T_82576:
8938 reg = CSR_READ(sc, WMREG_MDIC);
8939 ismdio = ((reg & MDIC_DEST) != 0);
8940 break;
8941 case WM_T_82580:
8942 case WM_T_I350:
8943 case WM_T_I354:
8944 case WM_T_I210:
8945 case WM_T_I211:
8946 reg = CSR_READ(sc, WMREG_MDICNFG);
8947 ismdio = ((reg & MDICNFG_DEST) != 0);
8948 break;
8949 default:
8950 break;
8951 }
8952
8953 return ismdio;
8954 }
8955
8956 /*
8957 * wm_sgmii_readreg: [mii interface function]
8958 *
8959 * Read a PHY register on the SGMII
8960 * This could be handled by the PHY layer if we didn't have to lock the
8961 * ressource ...
8962 */
8963 static int
8964 wm_sgmii_readreg(device_t self, int phy, int reg)
8965 {
8966 struct wm_softc *sc = device_private(self);
8967 uint32_t i2ccmd;
8968 int i, rv;
8969
8970 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
8971 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8972 __func__);
8973 return 0;
8974 }
8975
8976 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
8977 | (phy << I2CCMD_PHY_ADDR_SHIFT)
8978 | I2CCMD_OPCODE_READ;
8979 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
8980
8981 /* Poll the ready bit */
8982 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
8983 delay(50);
8984 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
8985 if (i2ccmd & I2CCMD_READY)
8986 break;
8987 }
8988 if ((i2ccmd & I2CCMD_READY) == 0)
8989 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
8990 if ((i2ccmd & I2CCMD_ERROR) != 0)
8991 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
8992
8993 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
8994
8995 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
8996 return rv;
8997 }
8998
8999 /*
9000 * wm_sgmii_writereg: [mii interface function]
9001 *
9002 * Write a PHY register on the SGMII.
9003 * This could be handled by the PHY layer if we didn't have to lock the
9004 * ressource ...
9005 */
9006 static void
9007 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
9008 {
9009 struct wm_softc *sc = device_private(self);
9010 uint32_t i2ccmd;
9011 int i;
9012 int val_swapped;
9013
9014 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
9015 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9016 __func__);
9017 return;
9018 }
9019 /* Swap the data bytes for the I2C interface */
9020 val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
9021 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
9022 | (phy << I2CCMD_PHY_ADDR_SHIFT)
9023 | I2CCMD_OPCODE_WRITE | val_swapped;
9024 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9025
9026 /* Poll the ready bit */
9027 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9028 delay(50);
9029 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9030 if (i2ccmd & I2CCMD_READY)
9031 break;
9032 }
9033 if ((i2ccmd & I2CCMD_READY) == 0)
9034 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
9035 if ((i2ccmd & I2CCMD_ERROR) != 0)
9036 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
9037
9038 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
9039 }
9040
9041 /* TBI related */
9042
9043 /*
9044 * wm_tbi_mediainit:
9045 *
9046 * Initialize media for use on 1000BASE-X devices.
9047 */
9048 static void
9049 wm_tbi_mediainit(struct wm_softc *sc)
9050 {
9051 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9052 const char *sep = "";
9053
9054 if (sc->sc_type < WM_T_82543)
9055 sc->sc_tipg = TIPG_WM_DFLT;
9056 else
9057 sc->sc_tipg = TIPG_LG_DFLT;
9058
9059 sc->sc_tbi_serdes_anegticks = 5;
9060
9061 /* Initialize our media structures */
9062 sc->sc_mii.mii_ifp = ifp;
9063 sc->sc_ethercom.ec_mii = &sc->sc_mii;
9064
9065 if ((sc->sc_type >= WM_T_82575)
9066 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
9067 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9068 wm_serdes_mediachange, wm_serdes_mediastatus);
9069 else
9070 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9071 wm_tbi_mediachange, wm_tbi_mediastatus);
9072
9073 /*
9074 * SWD Pins:
9075 *
9076 * 0 = Link LED (output)
9077 * 1 = Loss Of Signal (input)
9078 */
9079 sc->sc_ctrl |= CTRL_SWDPIO(0);
9080
9081 /* XXX Perhaps this is only for TBI */
9082 if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9083 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
9084
9085 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
9086 sc->sc_ctrl &= ~CTRL_LRST;
9087
9088 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9089
9090 #define ADD(ss, mm, dd) \
9091 do { \
9092 aprint_normal("%s%s", sep, ss); \
9093 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
9094 sep = ", "; \
9095 } while (/*CONSTCOND*/0)
9096
9097 aprint_normal_dev(sc->sc_dev, "");
9098
9099 /* Only 82545 is LX */
9100 if (sc->sc_type == WM_T_82545) {
9101 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
9102 ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
9103 } else {
9104 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
9105 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
9106 }
9107 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
9108 aprint_normal("\n");
9109
9110 #undef ADD
9111
9112 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
9113 }
9114
9115 /*
9116 * wm_tbi_mediachange: [ifmedia interface function]
9117 *
9118 * Set hardware to newly-selected media on a 1000BASE-X device.
9119 */
9120 static int
9121 wm_tbi_mediachange(struct ifnet *ifp)
9122 {
9123 struct wm_softc *sc = ifp->if_softc;
9124 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9125 uint32_t status;
9126 int i;
9127
9128 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9129 /* XXX need some work for >= 82571 and < 82575 */
9130 if (sc->sc_type < WM_T_82575)
9131 return 0;
9132 }
9133
9134 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9135 || (sc->sc_type >= WM_T_82575))
9136 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9137
9138 sc->sc_ctrl &= ~CTRL_LRST;
9139 sc->sc_txcw = TXCW_ANE;
9140 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9141 sc->sc_txcw |= TXCW_FD | TXCW_HD;
9142 else if (ife->ifm_media & IFM_FDX)
9143 sc->sc_txcw |= TXCW_FD;
9144 else
9145 sc->sc_txcw |= TXCW_HD;
9146
9147 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
9148 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
9149
9150 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
9151 device_xname(sc->sc_dev), sc->sc_txcw));
9152 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9153 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9154 CSR_WRITE_FLUSH(sc);
9155 delay(1000);
9156
9157 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
9158 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
9159
9160 /*
9161 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
9162 * optics detect a signal, 0 if they don't.
9163 */
9164 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
9165 /* Have signal; wait for the link to come up. */
9166 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
9167 delay(10000);
9168 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
9169 break;
9170 }
9171
9172 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
9173 device_xname(sc->sc_dev),i));
9174
9175 status = CSR_READ(sc, WMREG_STATUS);
9176 DPRINTF(WM_DEBUG_LINK,
9177 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
9178 device_xname(sc->sc_dev),status, STATUS_LU));
9179 if (status & STATUS_LU) {
9180 /* Link is up. */
9181 DPRINTF(WM_DEBUG_LINK,
9182 ("%s: LINK: set media -> link up %s\n",
9183 device_xname(sc->sc_dev),
9184 (status & STATUS_FD) ? "FDX" : "HDX"));
9185
9186 /*
9187 * NOTE: CTRL will update TFCE and RFCE automatically,
9188 * so we should update sc->sc_ctrl
9189 */
9190 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
9191 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9192 sc->sc_fcrtl &= ~FCRTL_XONE;
9193 if (status & STATUS_FD)
9194 sc->sc_tctl |=
9195 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9196 else
9197 sc->sc_tctl |=
9198 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9199 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
9200 sc->sc_fcrtl |= FCRTL_XONE;
9201 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9202 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
9203 WMREG_OLD_FCRTL : WMREG_FCRTL,
9204 sc->sc_fcrtl);
9205 sc->sc_tbi_linkup = 1;
9206 } else {
9207 if (i == WM_LINKUP_TIMEOUT)
9208 wm_check_for_link(sc);
9209 /* Link is down. */
9210 DPRINTF(WM_DEBUG_LINK,
9211 ("%s: LINK: set media -> link down\n",
9212 device_xname(sc->sc_dev)));
9213 sc->sc_tbi_linkup = 0;
9214 }
9215 } else {
9216 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
9217 device_xname(sc->sc_dev)));
9218 sc->sc_tbi_linkup = 0;
9219 }
9220
9221 wm_tbi_serdes_set_linkled(sc);
9222
9223 return 0;
9224 }
9225
9226 /*
9227 * wm_tbi_mediastatus: [ifmedia interface function]
9228 *
9229 * Get the current interface media status on a 1000BASE-X device.
9230 */
9231 static void
9232 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9233 {
9234 struct wm_softc *sc = ifp->if_softc;
9235 uint32_t ctrl, status;
9236
9237 ifmr->ifm_status = IFM_AVALID;
9238 ifmr->ifm_active = IFM_ETHER;
9239
9240 status = CSR_READ(sc, WMREG_STATUS);
9241 if ((status & STATUS_LU) == 0) {
9242 ifmr->ifm_active |= IFM_NONE;
9243 return;
9244 }
9245
9246 ifmr->ifm_status |= IFM_ACTIVE;
9247 /* Only 82545 is LX */
9248 if (sc->sc_type == WM_T_82545)
9249 ifmr->ifm_active |= IFM_1000_LX;
9250 else
9251 ifmr->ifm_active |= IFM_1000_SX;
9252 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
9253 ifmr->ifm_active |= IFM_FDX;
9254 else
9255 ifmr->ifm_active |= IFM_HDX;
9256 ctrl = CSR_READ(sc, WMREG_CTRL);
9257 if (ctrl & CTRL_RFCE)
9258 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
9259 if (ctrl & CTRL_TFCE)
9260 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
9261 }
9262
9263 /* XXX TBI only */
9264 static int
9265 wm_check_for_link(struct wm_softc *sc)
9266 {
9267 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9268 uint32_t rxcw;
9269 uint32_t ctrl;
9270 uint32_t status;
9271 uint32_t sig;
9272
9273 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9274 /* XXX need some work for >= 82571 */
9275 if (sc->sc_type >= WM_T_82571) {
9276 sc->sc_tbi_linkup = 1;
9277 return 0;
9278 }
9279 }
9280
9281 rxcw = CSR_READ(sc, WMREG_RXCW);
9282 ctrl = CSR_READ(sc, WMREG_CTRL);
9283 status = CSR_READ(sc, WMREG_STATUS);
9284
9285 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
9286
9287 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
9288 device_xname(sc->sc_dev), __func__,
9289 ((ctrl & CTRL_SWDPIN(1)) == sig),
9290 ((status & STATUS_LU) != 0),
9291 ((rxcw & RXCW_C) != 0)
9292 ));
9293
9294 /*
9295 * SWDPIN LU RXCW
9296 * 0 0 0
9297 * 0 0 1 (should not happen)
9298 * 0 1 0 (should not happen)
9299 * 0 1 1 (should not happen)
9300 * 1 0 0 Disable autonego and force linkup
9301 * 1 0 1 got /C/ but not linkup yet
9302 * 1 1 0 (linkup)
9303 * 1 1 1 If IFM_AUTO, back to autonego
9304 *
9305 */
9306 if (((ctrl & CTRL_SWDPIN(1)) == sig)
9307 && ((status & STATUS_LU) == 0)
9308 && ((rxcw & RXCW_C) == 0)) {
9309 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
9310 __func__));
9311 sc->sc_tbi_linkup = 0;
9312 /* Disable auto-negotiation in the TXCW register */
9313 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
9314
9315 /*
9316 * Force link-up and also force full-duplex.
9317 *
9318 * NOTE: CTRL was updated TFCE and RFCE automatically,
9319 * so we should update sc->sc_ctrl
9320 */
9321 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
9322 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9323 } else if (((status & STATUS_LU) != 0)
9324 && ((rxcw & RXCW_C) != 0)
9325 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
9326 sc->sc_tbi_linkup = 1;
9327 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
9328 __func__));
9329 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9330 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
9331 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
9332 && ((rxcw & RXCW_C) != 0)) {
9333 DPRINTF(WM_DEBUG_LINK, ("/C/"));
9334 } else {
9335 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
9336 status));
9337 }
9338
9339 return 0;
9340 }
9341
9342 /*
9343 * wm_tbi_tick:
9344 *
9345 * Check the link on TBI devices.
9346 * This function acts as mii_tick().
9347 */
9348 static void
9349 wm_tbi_tick(struct wm_softc *sc)
9350 {
9351 struct mii_data *mii = &sc->sc_mii;
9352 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9353 uint32_t status;
9354
9355 KASSERT(WM_CORE_LOCKED(sc));
9356
9357 status = CSR_READ(sc, WMREG_STATUS);
9358
9359 /* XXX is this needed? */
9360 (void)CSR_READ(sc, WMREG_RXCW);
9361 (void)CSR_READ(sc, WMREG_CTRL);
9362
9363 /* set link status */
9364 if ((status & STATUS_LU) == 0) {
9365 DPRINTF(WM_DEBUG_LINK,
9366 ("%s: LINK: checklink -> down\n",
9367 device_xname(sc->sc_dev)));
9368 sc->sc_tbi_linkup = 0;
9369 } else if (sc->sc_tbi_linkup == 0) {
9370 DPRINTF(WM_DEBUG_LINK,
9371 ("%s: LINK: checklink -> up %s\n",
9372 device_xname(sc->sc_dev),
9373 (status & STATUS_FD) ? "FDX" : "HDX"));
9374 sc->sc_tbi_linkup = 1;
9375 sc->sc_tbi_serdes_ticks = 0;
9376 }
9377
9378 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
9379 goto setled;
9380
9381 if ((status & STATUS_LU) == 0) {
9382 sc->sc_tbi_linkup = 0;
9383 /* If the timer expired, retry autonegotiation */
9384 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9385 && (++sc->sc_tbi_serdes_ticks
9386 >= sc->sc_tbi_serdes_anegticks)) {
9387 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9388 sc->sc_tbi_serdes_ticks = 0;
9389 /*
9390 * Reset the link, and let autonegotiation do
9391 * its thing
9392 */
9393 sc->sc_ctrl |= CTRL_LRST;
9394 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9395 CSR_WRITE_FLUSH(sc);
9396 delay(1000);
9397 sc->sc_ctrl &= ~CTRL_LRST;
9398 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9399 CSR_WRITE_FLUSH(sc);
9400 delay(1000);
9401 CSR_WRITE(sc, WMREG_TXCW,
9402 sc->sc_txcw & ~TXCW_ANE);
9403 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9404 }
9405 }
9406
9407 setled:
9408 wm_tbi_serdes_set_linkled(sc);
9409 }
9410
9411 /* SERDES related */
9412 static void
9413 wm_serdes_power_up_link_82575(struct wm_softc *sc)
9414 {
9415 uint32_t reg;
9416
9417 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9418 && ((sc->sc_flags & WM_F_SGMII) == 0))
9419 return;
9420
9421 reg = CSR_READ(sc, WMREG_PCS_CFG);
9422 reg |= PCS_CFG_PCS_EN;
9423 CSR_WRITE(sc, WMREG_PCS_CFG, reg);
9424
9425 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9426 reg &= ~CTRL_EXT_SWDPIN(3);
9427 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9428 CSR_WRITE_FLUSH(sc);
9429 }
9430
9431 static int
9432 wm_serdes_mediachange(struct ifnet *ifp)
9433 {
9434 struct wm_softc *sc = ifp->if_softc;
9435 bool pcs_autoneg = true; /* XXX */
9436 uint32_t ctrl_ext, pcs_lctl, reg;
9437
9438 /* XXX Currently, this function is not called on 8257[12] */
9439 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9440 || (sc->sc_type >= WM_T_82575))
9441 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9442
9443 wm_serdes_power_up_link_82575(sc);
9444
9445 sc->sc_ctrl |= CTRL_SLU;
9446
9447 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
9448 sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
9449
9450 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9451 pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
9452 switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
9453 case CTRL_EXT_LINK_MODE_SGMII:
9454 pcs_autoneg = true;
9455 pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
9456 break;
9457 case CTRL_EXT_LINK_MODE_1000KX:
9458 pcs_autoneg = false;
9459 /* FALLTHROUGH */
9460 default:
9461 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)){
9462 if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
9463 pcs_autoneg = false;
9464 }
9465 sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
9466 | CTRL_FRCFDX;
9467 pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
9468 }
9469 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9470
9471 if (pcs_autoneg) {
9472 pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
9473 pcs_lctl &= ~PCS_LCTL_FORCE_FC;
9474
9475 reg = CSR_READ(sc, WMREG_PCS_ANADV);
9476 reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
9477 reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
9478 CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
9479 } else
9480 pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
9481
9482 CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
9483
9484
9485 return 0;
9486 }
9487
9488 static void
9489 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9490 {
9491 struct wm_softc *sc = ifp->if_softc;
9492 struct mii_data *mii = &sc->sc_mii;
9493 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9494 uint32_t pcs_adv, pcs_lpab, reg;
9495
9496 ifmr->ifm_status = IFM_AVALID;
9497 ifmr->ifm_active = IFM_ETHER;
9498
9499 /* Check PCS */
9500 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9501 if ((reg & PCS_LSTS_LINKOK) == 0) {
9502 ifmr->ifm_active |= IFM_NONE;
9503 sc->sc_tbi_linkup = 0;
9504 goto setled;
9505 }
9506
9507 sc->sc_tbi_linkup = 1;
9508 ifmr->ifm_status |= IFM_ACTIVE;
9509 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
9510 if ((reg & PCS_LSTS_FDX) != 0)
9511 ifmr->ifm_active |= IFM_FDX;
9512 else
9513 ifmr->ifm_active |= IFM_HDX;
9514 mii->mii_media_active &= ~IFM_ETH_FMASK;
9515 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
9516 /* Check flow */
9517 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9518 if ((reg & PCS_LSTS_AN_COMP) == 0) {
9519 printf("XXX LINKOK but not ACOMP\n");
9520 goto setled;
9521 }
9522 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
9523 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
9524 printf("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab);
9525 if ((pcs_adv & TXCW_SYM_PAUSE)
9526 && (pcs_lpab & TXCW_SYM_PAUSE)) {
9527 mii->mii_media_active |= IFM_FLOW
9528 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
9529 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
9530 && (pcs_adv & TXCW_ASYM_PAUSE)
9531 && (pcs_lpab & TXCW_SYM_PAUSE)
9532 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
9533 mii->mii_media_active |= IFM_FLOW
9534 | IFM_ETH_TXPAUSE;
9535 } else if ((pcs_adv & TXCW_SYM_PAUSE)
9536 && (pcs_adv & TXCW_ASYM_PAUSE)
9537 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
9538 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
9539 mii->mii_media_active |= IFM_FLOW
9540 | IFM_ETH_RXPAUSE;
9541 } else {
9542 }
9543 }
9544 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
9545 | (mii->mii_media_active & IFM_ETH_FMASK);
9546 setled:
9547 wm_tbi_serdes_set_linkled(sc);
9548 }
9549
9550 /*
9551 * wm_serdes_tick:
9552 *
9553 * Check the link on serdes devices.
9554 */
9555 static void
9556 wm_serdes_tick(struct wm_softc *sc)
9557 {
9558 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9559 struct mii_data *mii = &sc->sc_mii;
9560 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9561 uint32_t reg;
9562
9563 KASSERT(WM_CORE_LOCKED(sc));
9564
9565 mii->mii_media_status = IFM_AVALID;
9566 mii->mii_media_active = IFM_ETHER;
9567
9568 /* Check PCS */
9569 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9570 if ((reg & PCS_LSTS_LINKOK) != 0) {
9571 mii->mii_media_status |= IFM_ACTIVE;
9572 sc->sc_tbi_linkup = 1;
9573 sc->sc_tbi_serdes_ticks = 0;
9574 mii->mii_media_active |= IFM_1000_SX; /* XXX */
9575 if ((reg & PCS_LSTS_FDX) != 0)
9576 mii->mii_media_active |= IFM_FDX;
9577 else
9578 mii->mii_media_active |= IFM_HDX;
9579 } else {
9580 mii->mii_media_status |= IFM_NONE;
9581 sc->sc_tbi_linkup = 0;
9582 /* If the timer expired, retry autonegotiation */
9583 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9584 && (++sc->sc_tbi_serdes_ticks
9585 >= sc->sc_tbi_serdes_anegticks)) {
9586 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9587 sc->sc_tbi_serdes_ticks = 0;
9588 /* XXX */
9589 wm_serdes_mediachange(ifp);
9590 }
9591 }
9592
9593 wm_tbi_serdes_set_linkled(sc);
9594 }
9595
9596 /* SFP related */
9597
9598 static int
9599 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
9600 {
9601 uint32_t i2ccmd;
9602 int i;
9603
9604 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
9605 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9606
9607 /* Poll the ready bit */
9608 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9609 delay(50);
9610 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9611 if (i2ccmd & I2CCMD_READY)
9612 break;
9613 }
9614 if ((i2ccmd & I2CCMD_READY) == 0)
9615 return -1;
9616 if ((i2ccmd & I2CCMD_ERROR) != 0)
9617 return -1;
9618
9619 *data = i2ccmd & 0x00ff;
9620
9621 return 0;
9622 }
9623
9624 static uint32_t
9625 wm_sfp_get_media_type(struct wm_softc *sc)
9626 {
9627 uint32_t ctrl_ext;
9628 uint8_t val = 0;
9629 int timeout = 3;
9630 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
9631 int rv = -1;
9632
9633 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9634 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
9635 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
9636 CSR_WRITE_FLUSH(sc);
9637
9638 /* Read SFP module data */
9639 while (timeout) {
9640 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
9641 if (rv == 0)
9642 break;
9643 delay(100*1000); /* XXX too big */
9644 timeout--;
9645 }
9646 if (rv != 0)
9647 goto out;
9648 switch (val) {
9649 case SFF_SFP_ID_SFF:
9650 aprint_normal_dev(sc->sc_dev,
9651 "Module/Connector soldered to board\n");
9652 break;
9653 case SFF_SFP_ID_SFP:
9654 aprint_normal_dev(sc->sc_dev, "SFP\n");
9655 break;
9656 case SFF_SFP_ID_UNKNOWN:
9657 goto out;
9658 default:
9659 break;
9660 }
9661
9662 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
9663 if (rv != 0) {
9664 goto out;
9665 }
9666
9667 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
9668 mediatype = WM_MEDIATYPE_SERDES;
9669 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
9670 sc->sc_flags |= WM_F_SGMII;
9671 mediatype = WM_MEDIATYPE_COPPER;
9672 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
9673 sc->sc_flags |= WM_F_SGMII;
9674 mediatype = WM_MEDIATYPE_SERDES;
9675 }
9676
9677 out:
9678 /* Restore I2C interface setting */
9679 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9680
9681 return mediatype;
9682 }
9683 /*
9684 * NVM related.
9685 * Microwire, SPI (w/wo EERD) and Flash.
9686 */
9687
9688 /* Both spi and uwire */
9689
9690 /*
9691 * wm_eeprom_sendbits:
9692 *
9693 * Send a series of bits to the EEPROM.
9694 */
9695 static void
9696 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
9697 {
9698 uint32_t reg;
9699 int x;
9700
9701 reg = CSR_READ(sc, WMREG_EECD);
9702
9703 for (x = nbits; x > 0; x--) {
9704 if (bits & (1U << (x - 1)))
9705 reg |= EECD_DI;
9706 else
9707 reg &= ~EECD_DI;
9708 CSR_WRITE(sc, WMREG_EECD, reg);
9709 CSR_WRITE_FLUSH(sc);
9710 delay(2);
9711 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
9712 CSR_WRITE_FLUSH(sc);
9713 delay(2);
9714 CSR_WRITE(sc, WMREG_EECD, reg);
9715 CSR_WRITE_FLUSH(sc);
9716 delay(2);
9717 }
9718 }
9719
9720 /*
9721 * wm_eeprom_recvbits:
9722 *
9723 * Receive a series of bits from the EEPROM.
9724 */
9725 static void
9726 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
9727 {
9728 uint32_t reg, val;
9729 int x;
9730
9731 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
9732
9733 val = 0;
9734 for (x = nbits; x > 0; x--) {
9735 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
9736 CSR_WRITE_FLUSH(sc);
9737 delay(2);
9738 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
9739 val |= (1U << (x - 1));
9740 CSR_WRITE(sc, WMREG_EECD, reg);
9741 CSR_WRITE_FLUSH(sc);
9742 delay(2);
9743 }
9744 *valp = val;
9745 }
9746
9747 /* Microwire */
9748
9749 /*
9750 * wm_nvm_read_uwire:
9751 *
9752 * Read a word from the EEPROM using the MicroWire protocol.
9753 */
9754 static int
9755 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9756 {
9757 uint32_t reg, val;
9758 int i;
9759
9760 for (i = 0; i < wordcnt; i++) {
9761 /* Clear SK and DI. */
9762 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
9763 CSR_WRITE(sc, WMREG_EECD, reg);
9764
9765 /*
9766 * XXX: workaround for a bug in qemu-0.12.x and prior
9767 * and Xen.
9768 *
9769 * We use this workaround only for 82540 because qemu's
9770 * e1000 act as 82540.
9771 */
9772 if (sc->sc_type == WM_T_82540) {
9773 reg |= EECD_SK;
9774 CSR_WRITE(sc, WMREG_EECD, reg);
9775 reg &= ~EECD_SK;
9776 CSR_WRITE(sc, WMREG_EECD, reg);
9777 CSR_WRITE_FLUSH(sc);
9778 delay(2);
9779 }
9780 /* XXX: end of workaround */
9781
9782 /* Set CHIP SELECT. */
9783 reg |= EECD_CS;
9784 CSR_WRITE(sc, WMREG_EECD, reg);
9785 CSR_WRITE_FLUSH(sc);
9786 delay(2);
9787
9788 /* Shift in the READ command. */
9789 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
9790
9791 /* Shift in address. */
9792 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
9793
9794 /* Shift out the data. */
9795 wm_eeprom_recvbits(sc, &val, 16);
9796 data[i] = val & 0xffff;
9797
9798 /* Clear CHIP SELECT. */
9799 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
9800 CSR_WRITE(sc, WMREG_EECD, reg);
9801 CSR_WRITE_FLUSH(sc);
9802 delay(2);
9803 }
9804
9805 return 0;
9806 }
9807
9808 /* SPI */
9809
9810 /*
9811 * Set SPI and FLASH related information from the EECD register.
9812 * For 82541 and 82547, the word size is taken from EEPROM.
9813 */
9814 static int
9815 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
9816 {
9817 int size;
9818 uint32_t reg;
9819 uint16_t data;
9820
9821 reg = CSR_READ(sc, WMREG_EECD);
9822 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
9823
9824 /* Read the size of NVM from EECD by default */
9825 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
9826 switch (sc->sc_type) {
9827 case WM_T_82541:
9828 case WM_T_82541_2:
9829 case WM_T_82547:
9830 case WM_T_82547_2:
9831 /* Set dummy value to access EEPROM */
9832 sc->sc_nvm_wordsize = 64;
9833 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
9834 reg = data;
9835 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
9836 if (size == 0)
9837 size = 6; /* 64 word size */
9838 else
9839 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
9840 break;
9841 case WM_T_80003:
9842 case WM_T_82571:
9843 case WM_T_82572:
9844 case WM_T_82573: /* SPI case */
9845 case WM_T_82574: /* SPI case */
9846 case WM_T_82583: /* SPI case */
9847 size += NVM_WORD_SIZE_BASE_SHIFT;
9848 if (size > 14)
9849 size = 14;
9850 break;
9851 case WM_T_82575:
9852 case WM_T_82576:
9853 case WM_T_82580:
9854 case WM_T_I350:
9855 case WM_T_I354:
9856 case WM_T_I210:
9857 case WM_T_I211:
9858 size += NVM_WORD_SIZE_BASE_SHIFT;
9859 if (size > 15)
9860 size = 15;
9861 break;
9862 default:
9863 aprint_error_dev(sc->sc_dev,
9864 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
9865 return -1;
9866 break;
9867 }
9868
9869 sc->sc_nvm_wordsize = 1 << size;
9870
9871 return 0;
9872 }
9873
9874 /*
9875 * wm_nvm_ready_spi:
9876 *
9877 * Wait for a SPI EEPROM to be ready for commands.
9878 */
9879 static int
9880 wm_nvm_ready_spi(struct wm_softc *sc)
9881 {
9882 uint32_t val;
9883 int usec;
9884
9885 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
9886 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
9887 wm_eeprom_recvbits(sc, &val, 8);
9888 if ((val & SPI_SR_RDY) == 0)
9889 break;
9890 }
9891 if (usec >= SPI_MAX_RETRIES) {
9892 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
9893 return 1;
9894 }
9895 return 0;
9896 }
9897
9898 /*
9899 * wm_nvm_read_spi:
9900 *
9901 * Read a work from the EEPROM using the SPI protocol.
9902 */
9903 static int
9904 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9905 {
9906 uint32_t reg, val;
9907 int i;
9908 uint8_t opc;
9909
9910 /* Clear SK and CS. */
9911 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
9912 CSR_WRITE(sc, WMREG_EECD, reg);
9913 CSR_WRITE_FLUSH(sc);
9914 delay(2);
9915
9916 if (wm_nvm_ready_spi(sc))
9917 return 1;
9918
9919 /* Toggle CS to flush commands. */
9920 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
9921 CSR_WRITE_FLUSH(sc);
9922 delay(2);
9923 CSR_WRITE(sc, WMREG_EECD, reg);
9924 CSR_WRITE_FLUSH(sc);
9925 delay(2);
9926
9927 opc = SPI_OPC_READ;
9928 if (sc->sc_nvm_addrbits == 8 && word >= 128)
9929 opc |= SPI_OPC_A8;
9930
9931 wm_eeprom_sendbits(sc, opc, 8);
9932 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
9933
9934 for (i = 0; i < wordcnt; i++) {
9935 wm_eeprom_recvbits(sc, &val, 16);
9936 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
9937 }
9938
9939 /* Raise CS and clear SK. */
9940 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
9941 CSR_WRITE(sc, WMREG_EECD, reg);
9942 CSR_WRITE_FLUSH(sc);
9943 delay(2);
9944
9945 return 0;
9946 }
9947
9948 /* Using with EERD */
9949
9950 static int
9951 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
9952 {
9953 uint32_t attempts = 100000;
9954 uint32_t i, reg = 0;
9955 int32_t done = -1;
9956
9957 for (i = 0; i < attempts; i++) {
9958 reg = CSR_READ(sc, rw);
9959
9960 if (reg & EERD_DONE) {
9961 done = 0;
9962 break;
9963 }
9964 delay(5);
9965 }
9966
9967 return done;
9968 }
9969
9970 static int
9971 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
9972 uint16_t *data)
9973 {
9974 int i, eerd = 0;
9975 int error = 0;
9976
9977 for (i = 0; i < wordcnt; i++) {
9978 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
9979
9980 CSR_WRITE(sc, WMREG_EERD, eerd);
9981 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
9982 if (error != 0)
9983 break;
9984
9985 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
9986 }
9987
9988 return error;
9989 }
9990
9991 /* Flash */
9992
9993 static int
9994 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
9995 {
9996 uint32_t eecd;
9997 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
9998 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
9999 uint8_t sig_byte = 0;
10000
10001 switch (sc->sc_type) {
10002 case WM_T_ICH8:
10003 case WM_T_ICH9:
10004 eecd = CSR_READ(sc, WMREG_EECD);
10005 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
10006 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
10007 return 0;
10008 }
10009 /* FALLTHROUGH */
10010 default:
10011 /* Default to 0 */
10012 *bank = 0;
10013
10014 /* Check bank 0 */
10015 wm_read_ich8_byte(sc, act_offset, &sig_byte);
10016 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
10017 *bank = 0;
10018 return 0;
10019 }
10020
10021 /* Check bank 1 */
10022 wm_read_ich8_byte(sc, act_offset + bank1_offset,
10023 &sig_byte);
10024 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
10025 *bank = 1;
10026 return 0;
10027 }
10028 }
10029
10030 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
10031 device_xname(sc->sc_dev)));
10032 return -1;
10033 }
10034
10035 /******************************************************************************
10036 * This function does initial flash setup so that a new read/write/erase cycle
10037 * can be started.
10038 *
10039 * sc - The pointer to the hw structure
10040 ****************************************************************************/
10041 static int32_t
10042 wm_ich8_cycle_init(struct wm_softc *sc)
10043 {
10044 uint16_t hsfsts;
10045 int32_t error = 1;
10046 int32_t i = 0;
10047
10048 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10049
10050 /* May be check the Flash Des Valid bit in Hw status */
10051 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
10052 return error;
10053 }
10054
10055 /* Clear FCERR in Hw status by writing 1 */
10056 /* Clear DAEL in Hw status by writing a 1 */
10057 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
10058
10059 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10060
10061 /*
10062 * Either we should have a hardware SPI cycle in progress bit to check
10063 * against, in order to start a new cycle or FDONE bit should be
10064 * changed in the hardware so that it is 1 after harware reset, which
10065 * can then be used as an indication whether a cycle is in progress or
10066 * has been completed .. we should also have some software semaphore
10067 * mechanism to guard FDONE or the cycle in progress bit so that two
10068 * threads access to those bits can be sequentiallized or a way so that
10069 * 2 threads dont start the cycle at the same time
10070 */
10071
10072 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10073 /*
10074 * There is no cycle running at present, so we can start a
10075 * cycle
10076 */
10077
10078 /* Begin by setting Flash Cycle Done. */
10079 hsfsts |= HSFSTS_DONE;
10080 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10081 error = 0;
10082 } else {
10083 /*
10084 * otherwise poll for sometime so the current cycle has a
10085 * chance to end before giving up.
10086 */
10087 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
10088 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10089 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10090 error = 0;
10091 break;
10092 }
10093 delay(1);
10094 }
10095 if (error == 0) {
10096 /*
10097 * Successful in waiting for previous cycle to timeout,
10098 * now set the Flash Cycle Done.
10099 */
10100 hsfsts |= HSFSTS_DONE;
10101 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10102 }
10103 }
10104 return error;
10105 }
10106
10107 /******************************************************************************
10108 * This function starts a flash cycle and waits for its completion
10109 *
10110 * sc - The pointer to the hw structure
10111 ****************************************************************************/
10112 static int32_t
10113 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
10114 {
10115 uint16_t hsflctl;
10116 uint16_t hsfsts;
10117 int32_t error = 1;
10118 uint32_t i = 0;
10119
10120 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
10121 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10122 hsflctl |= HSFCTL_GO;
10123 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10124
10125 /* Wait till FDONE bit is set to 1 */
10126 do {
10127 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10128 if (hsfsts & HSFSTS_DONE)
10129 break;
10130 delay(1);
10131 i++;
10132 } while (i < timeout);
10133 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
10134 error = 0;
10135
10136 return error;
10137 }
10138
10139 /******************************************************************************
10140 * Reads a byte or word from the NVM using the ICH8 flash access registers.
10141 *
10142 * sc - The pointer to the hw structure
10143 * index - The index of the byte or word to read.
10144 * size - Size of data to read, 1=byte 2=word
10145 * data - Pointer to the word to store the value read.
10146 *****************************************************************************/
10147 static int32_t
10148 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
10149 uint32_t size, uint16_t *data)
10150 {
10151 uint16_t hsfsts;
10152 uint16_t hsflctl;
10153 uint32_t flash_linear_address;
10154 uint32_t flash_data = 0;
10155 int32_t error = 1;
10156 int32_t count = 0;
10157
10158 if (size < 1 || size > 2 || data == 0x0 ||
10159 index > ICH_FLASH_LINEAR_ADDR_MASK)
10160 return error;
10161
10162 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
10163 sc->sc_ich8_flash_base;
10164
10165 do {
10166 delay(1);
10167 /* Steps */
10168 error = wm_ich8_cycle_init(sc);
10169 if (error)
10170 break;
10171
10172 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10173 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
10174 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
10175 & HSFCTL_BCOUNT_MASK;
10176 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
10177 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10178
10179 /*
10180 * Write the last 24 bits of index into Flash Linear address
10181 * field in Flash Address
10182 */
10183 /* TODO: TBD maybe check the index against the size of flash */
10184
10185 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
10186
10187 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
10188
10189 /*
10190 * Check if FCERR is set to 1, if set to 1, clear it and try
10191 * the whole sequence a few more times, else read in (shift in)
10192 * the Flash Data0, the order is least significant byte first
10193 * msb to lsb
10194 */
10195 if (error == 0) {
10196 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
10197 if (size == 1)
10198 *data = (uint8_t)(flash_data & 0x000000FF);
10199 else if (size == 2)
10200 *data = (uint16_t)(flash_data & 0x0000FFFF);
10201 break;
10202 } else {
10203 /*
10204 * If we've gotten here, then things are probably
10205 * completely hosed, but if the error condition is
10206 * detected, it won't hurt to give it another try...
10207 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
10208 */
10209 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10210 if (hsfsts & HSFSTS_ERR) {
10211 /* Repeat for some time before giving up. */
10212 continue;
10213 } else if ((hsfsts & HSFSTS_DONE) == 0)
10214 break;
10215 }
10216 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
10217
10218 return error;
10219 }
10220
10221 /******************************************************************************
10222 * Reads a single byte from the NVM using the ICH8 flash access registers.
10223 *
10224 * sc - pointer to wm_hw structure
10225 * index - The index of the byte to read.
10226 * data - Pointer to a byte to store the value read.
10227 *****************************************************************************/
10228 static int32_t
10229 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
10230 {
10231 int32_t status;
10232 uint16_t word = 0;
10233
10234 status = wm_read_ich8_data(sc, index, 1, &word);
10235 if (status == 0)
10236 *data = (uint8_t)word;
10237 else
10238 *data = 0;
10239
10240 return status;
10241 }
10242
10243 /******************************************************************************
10244 * Reads a word from the NVM using the ICH8 flash access registers.
10245 *
10246 * sc - pointer to wm_hw structure
10247 * index - The starting byte index of the word to read.
10248 * data - Pointer to a word to store the value read.
10249 *****************************************************************************/
10250 static int32_t
10251 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
10252 {
10253 int32_t status;
10254
10255 status = wm_read_ich8_data(sc, index, 2, data);
10256 return status;
10257 }
10258
10259 /******************************************************************************
10260 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
10261 * register.
10262 *
10263 * sc - Struct containing variables accessed by shared code
10264 * offset - offset of word in the EEPROM to read
10265 * data - word read from the EEPROM
10266 * words - number of words to read
10267 *****************************************************************************/
10268 static int
10269 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
10270 {
10271 int32_t error = 0;
10272 uint32_t flash_bank = 0;
10273 uint32_t act_offset = 0;
10274 uint32_t bank_offset = 0;
10275 uint16_t word = 0;
10276 uint16_t i = 0;
10277
10278 /*
10279 * We need to know which is the valid flash bank. In the event
10280 * that we didn't allocate eeprom_shadow_ram, we may not be
10281 * managing flash_bank. So it cannot be trusted and needs
10282 * to be updated with each read.
10283 */
10284 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
10285 if (error) {
10286 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
10287 device_xname(sc->sc_dev)));
10288 flash_bank = 0;
10289 }
10290
10291 /*
10292 * Adjust offset appropriately if we're on bank 1 - adjust for word
10293 * size
10294 */
10295 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
10296
10297 error = wm_get_swfwhw_semaphore(sc);
10298 if (error) {
10299 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10300 __func__);
10301 return error;
10302 }
10303
10304 for (i = 0; i < words; i++) {
10305 /* The NVM part needs a byte offset, hence * 2 */
10306 act_offset = bank_offset + ((offset + i) * 2);
10307 error = wm_read_ich8_word(sc, act_offset, &word);
10308 if (error) {
10309 aprint_error_dev(sc->sc_dev,
10310 "%s: failed to read NVM\n", __func__);
10311 break;
10312 }
10313 data[i] = word;
10314 }
10315
10316 wm_put_swfwhw_semaphore(sc);
10317 return error;
10318 }
10319
10320 /* iNVM */
10321
10322 static int
10323 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
10324 {
10325 int32_t rv = 0;
10326 uint32_t invm_dword;
10327 uint16_t i;
10328 uint8_t record_type, word_address;
10329
10330 for (i = 0; i < INVM_SIZE; i++) {
10331 invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
10332 /* Get record type */
10333 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
10334 if (record_type == INVM_UNINITIALIZED_STRUCTURE)
10335 break;
10336 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
10337 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
10338 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
10339 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
10340 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
10341 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
10342 if (word_address == address) {
10343 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
10344 rv = 0;
10345 break;
10346 }
10347 }
10348 }
10349
10350 return rv;
10351 }
10352
10353 static int
10354 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
10355 {
10356 int rv = 0;
10357 int i;
10358
10359 for (i = 0; i < words; i++) {
10360 switch (offset + i) {
10361 case NVM_OFF_MACADDR:
10362 case NVM_OFF_MACADDR1:
10363 case NVM_OFF_MACADDR2:
10364 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
10365 if (rv != 0) {
10366 data[i] = 0xffff;
10367 rv = -1;
10368 }
10369 break;
10370 case NVM_OFF_CFG2:
10371 rv = wm_nvm_read_word_invm(sc, offset, data);
10372 if (rv != 0) {
10373 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
10374 rv = 0;
10375 }
10376 break;
10377 case NVM_OFF_CFG4:
10378 rv = wm_nvm_read_word_invm(sc, offset, data);
10379 if (rv != 0) {
10380 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
10381 rv = 0;
10382 }
10383 break;
10384 case NVM_OFF_LED_1_CFG:
10385 rv = wm_nvm_read_word_invm(sc, offset, data);
10386 if (rv != 0) {
10387 *data = NVM_LED_1_CFG_DEFAULT_I211;
10388 rv = 0;
10389 }
10390 break;
10391 case NVM_OFF_LED_0_2_CFG:
10392 rv = wm_nvm_read_word_invm(sc, offset, data);
10393 if (rv != 0) {
10394 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
10395 rv = 0;
10396 }
10397 break;
10398 case NVM_OFF_ID_LED_SETTINGS:
10399 rv = wm_nvm_read_word_invm(sc, offset, data);
10400 if (rv != 0) {
10401 *data = ID_LED_RESERVED_FFFF;
10402 rv = 0;
10403 }
10404 break;
10405 default:
10406 DPRINTF(WM_DEBUG_NVM,
10407 ("NVM word 0x%02x is not mapped.\n", offset));
10408 *data = NVM_RESERVED_WORD;
10409 break;
10410 }
10411 }
10412
10413 return rv;
10414 }
10415
10416 /* Lock, detecting NVM type, validate checksum, version and read */
10417
10418 /*
10419 * wm_nvm_acquire:
10420 *
10421 * Perform the EEPROM handshake required on some chips.
10422 */
10423 static int
10424 wm_nvm_acquire(struct wm_softc *sc)
10425 {
10426 uint32_t reg;
10427 int x;
10428 int ret = 0;
10429
10430 /* always success */
10431 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
10432 return 0;
10433
10434 if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
10435 ret = wm_get_swfwhw_semaphore(sc);
10436 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
10437 /* This will also do wm_get_swsm_semaphore() if needed */
10438 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
10439 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
10440 ret = wm_get_swsm_semaphore(sc);
10441 }
10442
10443 if (ret) {
10444 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10445 __func__);
10446 return 1;
10447 }
10448
10449 if (sc->sc_flags & WM_F_LOCK_EECD) {
10450 reg = CSR_READ(sc, WMREG_EECD);
10451
10452 /* Request EEPROM access. */
10453 reg |= EECD_EE_REQ;
10454 CSR_WRITE(sc, WMREG_EECD, reg);
10455
10456 /* ..and wait for it to be granted. */
10457 for (x = 0; x < 1000; x++) {
10458 reg = CSR_READ(sc, WMREG_EECD);
10459 if (reg & EECD_EE_GNT)
10460 break;
10461 delay(5);
10462 }
10463 if ((reg & EECD_EE_GNT) == 0) {
10464 aprint_error_dev(sc->sc_dev,
10465 "could not acquire EEPROM GNT\n");
10466 reg &= ~EECD_EE_REQ;
10467 CSR_WRITE(sc, WMREG_EECD, reg);
10468 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
10469 wm_put_swfwhw_semaphore(sc);
10470 if (sc->sc_flags & WM_F_LOCK_SWFW)
10471 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
10472 else if (sc->sc_flags & WM_F_LOCK_SWSM)
10473 wm_put_swsm_semaphore(sc);
10474 return 1;
10475 }
10476 }
10477
10478 return 0;
10479 }
10480
10481 /*
10482 * wm_nvm_release:
10483 *
10484 * Release the EEPROM mutex.
10485 */
10486 static void
10487 wm_nvm_release(struct wm_softc *sc)
10488 {
10489 uint32_t reg;
10490
10491 /* always success */
10492 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
10493 return;
10494
10495 if (sc->sc_flags & WM_F_LOCK_EECD) {
10496 reg = CSR_READ(sc, WMREG_EECD);
10497 reg &= ~EECD_EE_REQ;
10498 CSR_WRITE(sc, WMREG_EECD, reg);
10499 }
10500
10501 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
10502 wm_put_swfwhw_semaphore(sc);
10503 if (sc->sc_flags & WM_F_LOCK_SWFW)
10504 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
10505 else if (sc->sc_flags & WM_F_LOCK_SWSM)
10506 wm_put_swsm_semaphore(sc);
10507 }
10508
10509 static int
10510 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
10511 {
10512 uint32_t eecd = 0;
10513
10514 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
10515 || sc->sc_type == WM_T_82583) {
10516 eecd = CSR_READ(sc, WMREG_EECD);
10517
10518 /* Isolate bits 15 & 16 */
10519 eecd = ((eecd >> 15) & 0x03);
10520
10521 /* If both bits are set, device is Flash type */
10522 if (eecd == 0x03)
10523 return 0;
10524 }
10525 return 1;
10526 }
10527
10528 static int
10529 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
10530 {
10531 uint32_t eec;
10532
10533 eec = CSR_READ(sc, WMREG_EEC);
10534 if ((eec & EEC_FLASH_DETECTED) != 0)
10535 return 1;
10536
10537 return 0;
10538 }
10539
10540 /*
10541 * wm_nvm_validate_checksum
10542 *
10543 * The checksum is defined as the sum of the first 64 (16 bit) words.
10544 */
10545 static int
10546 wm_nvm_validate_checksum(struct wm_softc *sc)
10547 {
10548 uint16_t checksum;
10549 uint16_t eeprom_data;
10550 #ifdef WM_DEBUG
10551 uint16_t csum_wordaddr, valid_checksum;
10552 #endif
10553 int i;
10554
10555 checksum = 0;
10556
10557 /* Don't check for I211 */
10558 if (sc->sc_type == WM_T_I211)
10559 return 0;
10560
10561 #ifdef WM_DEBUG
10562 if (sc->sc_type == WM_T_PCH_LPT) {
10563 csum_wordaddr = NVM_OFF_COMPAT;
10564 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
10565 } else {
10566 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
10567 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
10568 }
10569
10570 /* Dump EEPROM image for debug */
10571 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10572 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10573 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
10574 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
10575 if ((eeprom_data & valid_checksum) == 0) {
10576 DPRINTF(WM_DEBUG_NVM,
10577 ("%s: NVM need to be updated (%04x != %04x)\n",
10578 device_xname(sc->sc_dev), eeprom_data,
10579 valid_checksum));
10580 }
10581 }
10582
10583 if ((wm_debug & WM_DEBUG_NVM) != 0) {
10584 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
10585 for (i = 0; i < NVM_SIZE; i++) {
10586 if (wm_nvm_read(sc, i, 1, &eeprom_data))
10587 printf("XXXX ");
10588 else
10589 printf("%04hx ", eeprom_data);
10590 if (i % 8 == 7)
10591 printf("\n");
10592 }
10593 }
10594
10595 #endif /* WM_DEBUG */
10596
10597 for (i = 0; i < NVM_SIZE; i++) {
10598 if (wm_nvm_read(sc, i, 1, &eeprom_data))
10599 return 1;
10600 checksum += eeprom_data;
10601 }
10602
10603 if (checksum != (uint16_t) NVM_CHECKSUM) {
10604 #ifdef WM_DEBUG
10605 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
10606 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
10607 #endif
10608 }
10609
10610 return 0;
10611 }
10612
10613 static void
10614 wm_nvm_version_invm(struct wm_softc *sc)
10615 {
10616 uint32_t dword;
10617
10618 /*
10619 * Linux's code to decode version is very strange, so we don't
10620 * obey that algorithm and just use word 61 as the document.
10621 * Perhaps it's not perfect though...
10622 *
10623 * Example:
10624 *
10625 * Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
10626 */
10627 dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
10628 dword = __SHIFTOUT(dword, INVM_VER_1);
10629 sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
10630 sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
10631 }
10632
10633 static void
10634 wm_nvm_version(struct wm_softc *sc)
10635 {
10636 uint16_t major, minor, build, patch;
10637 uint16_t uid0, uid1;
10638 uint16_t nvm_data;
10639 uint16_t off;
10640 bool check_version = false;
10641 bool check_optionrom = false;
10642 bool have_build = false;
10643
10644 /*
10645 * Version format:
10646 *
10647 * XYYZ
10648 * X0YZ
10649 * X0YY
10650 *
10651 * Example:
10652 *
10653 * 82571 0x50a2 5.10.2? (the spec update notes about 5.6-5.10)
10654 * 82571 0x50a6 5.10.6?
10655 * 82572 0x506a 5.6.10?
10656 * 82572EI 0x5069 5.6.9?
10657 * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4)
10658 * 0x2013 2.1.3?
10659 * 82583 0x10a0 1.10.0? (document says it's default vaule)
10660 */
10661 wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
10662 switch (sc->sc_type) {
10663 case WM_T_82571:
10664 case WM_T_82572:
10665 case WM_T_82574:
10666 case WM_T_82583:
10667 check_version = true;
10668 check_optionrom = true;
10669 have_build = true;
10670 break;
10671 case WM_T_82575:
10672 case WM_T_82576:
10673 case WM_T_82580:
10674 if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
10675 check_version = true;
10676 break;
10677 case WM_T_I211:
10678 wm_nvm_version_invm(sc);
10679 goto printver;
10680 case WM_T_I210:
10681 if (!wm_nvm_get_flash_presence_i210(sc)) {
10682 wm_nvm_version_invm(sc);
10683 goto printver;
10684 }
10685 /* FALLTHROUGH */
10686 case WM_T_I350:
10687 case WM_T_I354:
10688 check_version = true;
10689 check_optionrom = true;
10690 break;
10691 default:
10692 return;
10693 }
10694 if (check_version) {
10695 wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
10696 major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
10697 if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
10698 minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
10699 build = nvm_data & NVM_BUILD_MASK;
10700 have_build = true;
10701 } else
10702 minor = nvm_data & 0x00ff;
10703
10704 /* Decimal */
10705 minor = (minor / 16) * 10 + (minor % 16);
10706 sc->sc_nvm_ver_major = major;
10707 sc->sc_nvm_ver_minor = minor;
10708
10709 printver:
10710 aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
10711 sc->sc_nvm_ver_minor);
10712 if (have_build) {
10713 sc->sc_nvm_ver_build = build;
10714 aprint_verbose(".%d", build);
10715 }
10716 }
10717 if (check_optionrom) {
10718 wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
10719 /* Option ROM Version */
10720 if ((off != 0x0000) && (off != 0xffff)) {
10721 off += NVM_COMBO_VER_OFF;
10722 wm_nvm_read(sc, off + 1, 1, &uid1);
10723 wm_nvm_read(sc, off, 1, &uid0);
10724 if ((uid0 != 0) && (uid0 != 0xffff)
10725 && (uid1 != 0) && (uid1 != 0xffff)) {
10726 /* 16bits */
10727 major = uid0 >> 8;
10728 build = (uid0 << 8) | (uid1 >> 8);
10729 patch = uid1 & 0x00ff;
10730 aprint_verbose(", option ROM Version %d.%d.%d",
10731 major, build, patch);
10732 }
10733 }
10734 }
10735
10736 wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
10737 aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
10738 }
10739
10740 /*
10741 * wm_nvm_read:
10742 *
10743 * Read data from the serial EEPROM.
10744 */
10745 static int
10746 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
10747 {
10748 int rv;
10749
10750 if (sc->sc_flags & WM_F_EEPROM_INVALID)
10751 return 1;
10752
10753 if (wm_nvm_acquire(sc))
10754 return 1;
10755
10756 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10757 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10758 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
10759 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
10760 else if (sc->sc_flags & WM_F_EEPROM_INVM)
10761 rv = wm_nvm_read_invm(sc, word, wordcnt, data);
10762 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
10763 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
10764 else if (sc->sc_flags & WM_F_EEPROM_SPI)
10765 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
10766 else
10767 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
10768
10769 wm_nvm_release(sc);
10770 return rv;
10771 }
10772
10773 /*
10774 * Hardware semaphores.
10775 * Very complexed...
10776 */
10777
10778 static int
10779 wm_get_swsm_semaphore(struct wm_softc *sc)
10780 {
10781 int32_t timeout;
10782 uint32_t swsm;
10783
10784 if (sc->sc_flags & WM_F_LOCK_SWSM) {
10785 /* Get the SW semaphore. */
10786 timeout = sc->sc_nvm_wordsize + 1;
10787 while (timeout) {
10788 swsm = CSR_READ(sc, WMREG_SWSM);
10789
10790 if ((swsm & SWSM_SMBI) == 0)
10791 break;
10792
10793 delay(50);
10794 timeout--;
10795 }
10796
10797 if (timeout == 0) {
10798 aprint_error_dev(sc->sc_dev,
10799 "could not acquire SWSM SMBI\n");
10800 return 1;
10801 }
10802 }
10803
10804 /* Get the FW semaphore. */
10805 timeout = sc->sc_nvm_wordsize + 1;
10806 while (timeout) {
10807 swsm = CSR_READ(sc, WMREG_SWSM);
10808 swsm |= SWSM_SWESMBI;
10809 CSR_WRITE(sc, WMREG_SWSM, swsm);
10810 /* If we managed to set the bit we got the semaphore. */
10811 swsm = CSR_READ(sc, WMREG_SWSM);
10812 if (swsm & SWSM_SWESMBI)
10813 break;
10814
10815 delay(50);
10816 timeout--;
10817 }
10818
10819 if (timeout == 0) {
10820 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
10821 /* Release semaphores */
10822 wm_put_swsm_semaphore(sc);
10823 return 1;
10824 }
10825 return 0;
10826 }
10827
10828 static void
10829 wm_put_swsm_semaphore(struct wm_softc *sc)
10830 {
10831 uint32_t swsm;
10832
10833 swsm = CSR_READ(sc, WMREG_SWSM);
10834 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
10835 CSR_WRITE(sc, WMREG_SWSM, swsm);
10836 }
10837
10838 static int
10839 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
10840 {
10841 uint32_t swfw_sync;
10842 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
10843 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
10844 int timeout = 200;
10845
10846 for (timeout = 0; timeout < 200; timeout++) {
10847 if (sc->sc_flags & WM_F_LOCK_SWSM) {
10848 if (wm_get_swsm_semaphore(sc)) {
10849 aprint_error_dev(sc->sc_dev,
10850 "%s: failed to get semaphore\n",
10851 __func__);
10852 return 1;
10853 }
10854 }
10855 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
10856 if ((swfw_sync & (swmask | fwmask)) == 0) {
10857 swfw_sync |= swmask;
10858 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
10859 if (sc->sc_flags & WM_F_LOCK_SWSM)
10860 wm_put_swsm_semaphore(sc);
10861 return 0;
10862 }
10863 if (sc->sc_flags & WM_F_LOCK_SWSM)
10864 wm_put_swsm_semaphore(sc);
10865 delay(5000);
10866 }
10867 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
10868 device_xname(sc->sc_dev), mask, swfw_sync);
10869 return 1;
10870 }
10871
10872 static void
10873 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
10874 {
10875 uint32_t swfw_sync;
10876
10877 if (sc->sc_flags & WM_F_LOCK_SWSM) {
10878 while (wm_get_swsm_semaphore(sc) != 0)
10879 continue;
10880 }
10881 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
10882 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
10883 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
10884 if (sc->sc_flags & WM_F_LOCK_SWSM)
10885 wm_put_swsm_semaphore(sc);
10886 }
10887
10888 static int
10889 wm_get_swfwhw_semaphore(struct wm_softc *sc)
10890 {
10891 uint32_t ext_ctrl;
10892 int timeout = 200;
10893
10894 for (timeout = 0; timeout < 200; timeout++) {
10895 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10896 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
10897 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
10898
10899 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10900 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
10901 return 0;
10902 delay(5000);
10903 }
10904 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
10905 device_xname(sc->sc_dev), ext_ctrl);
10906 return 1;
10907 }
10908
10909 static void
10910 wm_put_swfwhw_semaphore(struct wm_softc *sc)
10911 {
10912 uint32_t ext_ctrl;
10913 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10914 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
10915 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
10916 }
10917
10918 static int
10919 wm_get_hw_semaphore_82573(struct wm_softc *sc)
10920 {
10921 int i = 0;
10922 uint32_t reg;
10923
10924 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10925 do {
10926 CSR_WRITE(sc, WMREG_EXTCNFCTR,
10927 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
10928 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10929 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
10930 break;
10931 delay(2*1000);
10932 i++;
10933 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
10934
10935 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
10936 wm_put_hw_semaphore_82573(sc);
10937 log(LOG_ERR, "%s: Driver can't access the PHY\n",
10938 device_xname(sc->sc_dev));
10939 return -1;
10940 }
10941
10942 return 0;
10943 }
10944
10945 static void
10946 wm_put_hw_semaphore_82573(struct wm_softc *sc)
10947 {
10948 uint32_t reg;
10949
10950 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10951 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
10952 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
10953 }
10954
10955 /*
10956 * Management mode and power management related subroutines.
10957 * BMC, AMT, suspend/resume and EEE.
10958 */
10959
10960 #ifdef WM_WOL
10961 static int
10962 wm_check_mng_mode(struct wm_softc *sc)
10963 {
10964 int rv;
10965
10966 switch (sc->sc_type) {
10967 case WM_T_ICH8:
10968 case WM_T_ICH9:
10969 case WM_T_ICH10:
10970 case WM_T_PCH:
10971 case WM_T_PCH2:
10972 case WM_T_PCH_LPT:
10973 rv = wm_check_mng_mode_ich8lan(sc);
10974 break;
10975 case WM_T_82574:
10976 case WM_T_82583:
10977 rv = wm_check_mng_mode_82574(sc);
10978 break;
10979 case WM_T_82571:
10980 case WM_T_82572:
10981 case WM_T_82573:
10982 case WM_T_80003:
10983 rv = wm_check_mng_mode_generic(sc);
10984 break;
10985 default:
10986 /* noting to do */
10987 rv = 0;
10988 break;
10989 }
10990
10991 return rv;
10992 }
10993
10994 static int
10995 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
10996 {
10997 uint32_t fwsm;
10998
10999 fwsm = CSR_READ(sc, WMREG_FWSM);
11000
11001 if (((fwsm & FWSM_FW_VALID) != 0)
11002 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
11003 return 1;
11004
11005 return 0;
11006 }
11007
11008 static int
11009 wm_check_mng_mode_82574(struct wm_softc *sc)
11010 {
11011 uint16_t data;
11012
11013 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11014
11015 if ((data & NVM_CFG2_MNGM_MASK) != 0)
11016 return 1;
11017
11018 return 0;
11019 }
11020
11021 static int
11022 wm_check_mng_mode_generic(struct wm_softc *sc)
11023 {
11024 uint32_t fwsm;
11025
11026 fwsm = CSR_READ(sc, WMREG_FWSM);
11027
11028 if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
11029 return 1;
11030
11031 return 0;
11032 }
11033 #endif /* WM_WOL */
11034
11035 static int
11036 wm_enable_mng_pass_thru(struct wm_softc *sc)
11037 {
11038 uint32_t manc, fwsm, factps;
11039
11040 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
11041 return 0;
11042
11043 manc = CSR_READ(sc, WMREG_MANC);
11044
11045 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
11046 device_xname(sc->sc_dev), manc));
11047 if ((manc & MANC_RECV_TCO_EN) == 0)
11048 return 0;
11049
11050 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
11051 fwsm = CSR_READ(sc, WMREG_FWSM);
11052 factps = CSR_READ(sc, WMREG_FACTPS);
11053 if (((factps & FACTPS_MNGCG) == 0)
11054 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
11055 return 1;
11056 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
11057 uint16_t data;
11058
11059 factps = CSR_READ(sc, WMREG_FACTPS);
11060 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11061 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
11062 device_xname(sc->sc_dev), factps, data));
11063 if (((factps & FACTPS_MNGCG) == 0)
11064 && ((data & NVM_CFG2_MNGM_MASK)
11065 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
11066 return 1;
11067 } else if (((manc & MANC_SMBUS_EN) != 0)
11068 && ((manc & MANC_ASF_EN) == 0))
11069 return 1;
11070
11071 return 0;
11072 }
11073
11074 static bool
11075 wm_phy_resetisblocked(struct wm_softc *sc)
11076 {
11077 bool blocked = false;
11078 uint32_t reg;
11079 int i = 0;
11080
11081 switch (sc->sc_type) {
11082 case WM_T_ICH8:
11083 case WM_T_ICH9:
11084 case WM_T_ICH10:
11085 case WM_T_PCH:
11086 case WM_T_PCH2:
11087 case WM_T_PCH_LPT:
11088 do {
11089 reg = CSR_READ(sc, WMREG_FWSM);
11090 if ((reg & FWSM_RSPCIPHY) == 0) {
11091 blocked = true;
11092 delay(10*1000);
11093 continue;
11094 }
11095 blocked = false;
11096 } while (blocked && (i++ < 10));
11097 return blocked;
11098 break;
11099 case WM_T_82571:
11100 case WM_T_82572:
11101 case WM_T_82573:
11102 case WM_T_82574:
11103 case WM_T_82583:
11104 case WM_T_80003:
11105 reg = CSR_READ(sc, WMREG_MANC);
11106 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
11107 return true;
11108 else
11109 return false;
11110 break;
11111 default:
11112 /* no problem */
11113 break;
11114 }
11115
11116 return false;
11117 }
11118
11119 static void
11120 wm_get_hw_control(struct wm_softc *sc)
11121 {
11122 uint32_t reg;
11123
11124 switch (sc->sc_type) {
11125 case WM_T_82573:
11126 reg = CSR_READ(sc, WMREG_SWSM);
11127 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
11128 break;
11129 case WM_T_82571:
11130 case WM_T_82572:
11131 case WM_T_82574:
11132 case WM_T_82583:
11133 case WM_T_80003:
11134 case WM_T_ICH8:
11135 case WM_T_ICH9:
11136 case WM_T_ICH10:
11137 case WM_T_PCH:
11138 case WM_T_PCH2:
11139 case WM_T_PCH_LPT:
11140 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11141 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
11142 break;
11143 default:
11144 break;
11145 }
11146 }
11147
11148 static void
11149 wm_release_hw_control(struct wm_softc *sc)
11150 {
11151 uint32_t reg;
11152
11153 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
11154 return;
11155
11156 if (sc->sc_type == WM_T_82573) {
11157 reg = CSR_READ(sc, WMREG_SWSM);
11158 reg &= ~SWSM_DRV_LOAD;
11159 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
11160 } else {
11161 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11162 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
11163 }
11164 }
11165
11166 static void
11167 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
11168 {
11169 uint32_t reg;
11170
11171 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11172
11173 if (on != 0)
11174 reg |= EXTCNFCTR_GATE_PHY_CFG;
11175 else
11176 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
11177
11178 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
11179 }
11180
11181 static void
11182 wm_smbustopci(struct wm_softc *sc)
11183 {
11184 uint32_t fwsm;
11185
11186 fwsm = CSR_READ(sc, WMREG_FWSM);
11187 if (((fwsm & FWSM_FW_VALID) == 0)
11188 && ((wm_phy_resetisblocked(sc) == false))) {
11189 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
11190 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
11191 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11192 CSR_WRITE_FLUSH(sc);
11193 delay(10);
11194 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
11195 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11196 CSR_WRITE_FLUSH(sc);
11197 delay(50*1000);
11198
11199 /*
11200 * Gate automatic PHY configuration by hardware on non-managed
11201 * 82579
11202 */
11203 if (sc->sc_type == WM_T_PCH2)
11204 wm_gate_hw_phy_config_ich8lan(sc, 1);
11205 }
11206 }
11207
11208 static void
11209 wm_init_manageability(struct wm_softc *sc)
11210 {
11211
11212 if (sc->sc_flags & WM_F_HAS_MANAGE) {
11213 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
11214 uint32_t manc = CSR_READ(sc, WMREG_MANC);
11215
11216 /* Disable hardware interception of ARP */
11217 manc &= ~MANC_ARP_EN;
11218
11219 /* Enable receiving management packets to the host */
11220 if (sc->sc_type >= WM_T_82571) {
11221 manc |= MANC_EN_MNG2HOST;
11222 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
11223 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
11224 }
11225
11226 CSR_WRITE(sc, WMREG_MANC, manc);
11227 }
11228 }
11229
11230 static void
11231 wm_release_manageability(struct wm_softc *sc)
11232 {
11233
11234 if (sc->sc_flags & WM_F_HAS_MANAGE) {
11235 uint32_t manc = CSR_READ(sc, WMREG_MANC);
11236
11237 manc |= MANC_ARP_EN;
11238 if (sc->sc_type >= WM_T_82571)
11239 manc &= ~MANC_EN_MNG2HOST;
11240
11241 CSR_WRITE(sc, WMREG_MANC, manc);
11242 }
11243 }
11244
11245 static void
11246 wm_get_wakeup(struct wm_softc *sc)
11247 {
11248
11249 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
11250 switch (sc->sc_type) {
11251 case WM_T_82573:
11252 case WM_T_82583:
11253 sc->sc_flags |= WM_F_HAS_AMT;
11254 /* FALLTHROUGH */
11255 case WM_T_80003:
11256 case WM_T_82541:
11257 case WM_T_82547:
11258 case WM_T_82571:
11259 case WM_T_82572:
11260 case WM_T_82574:
11261 case WM_T_82575:
11262 case WM_T_82576:
11263 case WM_T_82580:
11264 case WM_T_I350:
11265 case WM_T_I354:
11266 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
11267 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
11268 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
11269 break;
11270 case WM_T_ICH8:
11271 case WM_T_ICH9:
11272 case WM_T_ICH10:
11273 case WM_T_PCH:
11274 case WM_T_PCH2:
11275 case WM_T_PCH_LPT:
11276 sc->sc_flags |= WM_F_HAS_AMT;
11277 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
11278 break;
11279 default:
11280 break;
11281 }
11282
11283 /* 1: HAS_MANAGE */
11284 if (wm_enable_mng_pass_thru(sc) != 0)
11285 sc->sc_flags |= WM_F_HAS_MANAGE;
11286
11287 #ifdef WM_DEBUG
11288 printf("\n");
11289 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
11290 printf("HAS_AMT,");
11291 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
11292 printf("ARC_SUBSYS_VALID,");
11293 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
11294 printf("ASF_FIRMWARE_PRES,");
11295 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
11296 printf("HAS_MANAGE,");
11297 printf("\n");
11298 #endif
11299 /*
11300 * Note that the WOL flags is set after the resetting of the eeprom
11301 * stuff
11302 */
11303 }
11304
11305 #ifdef WM_WOL
11306 /* WOL in the newer chipset interfaces (pchlan) */
11307 static void
11308 wm_enable_phy_wakeup(struct wm_softc *sc)
11309 {
11310 #if 0
11311 uint16_t preg;
11312
11313 /* Copy MAC RARs to PHY RARs */
11314
11315 /* Copy MAC MTA to PHY MTA */
11316
11317 /* Configure PHY Rx Control register */
11318
11319 /* Enable PHY wakeup in MAC register */
11320
11321 /* Configure and enable PHY wakeup in PHY registers */
11322
11323 /* Activate PHY wakeup */
11324
11325 /* XXX */
11326 #endif
11327 }
11328
11329 /* Power down workaround on D3 */
11330 static void
11331 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
11332 {
11333 uint32_t reg;
11334 int i;
11335
11336 for (i = 0; i < 2; i++) {
11337 /* Disable link */
11338 reg = CSR_READ(sc, WMREG_PHY_CTRL);
11339 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
11340 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11341
11342 /*
11343 * Call gig speed drop workaround on Gig disable before
11344 * accessing any PHY registers
11345 */
11346 if (sc->sc_type == WM_T_ICH8)
11347 wm_gig_downshift_workaround_ich8lan(sc);
11348
11349 /* Write VR power-down enable */
11350 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
11351 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
11352 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
11353 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
11354
11355 /* Read it back and test */
11356 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
11357 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
11358 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
11359 break;
11360
11361 /* Issue PHY reset and repeat at most one more time */
11362 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
11363 }
11364 }
11365
11366 static void
11367 wm_enable_wakeup(struct wm_softc *sc)
11368 {
11369 uint32_t reg, pmreg;
11370 pcireg_t pmode;
11371
11372 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
11373 &pmreg, NULL) == 0)
11374 return;
11375
11376 /* Advertise the wakeup capability */
11377 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
11378 | CTRL_SWDPIN(3));
11379 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
11380
11381 /* ICH workaround */
11382 switch (sc->sc_type) {
11383 case WM_T_ICH8:
11384 case WM_T_ICH9:
11385 case WM_T_ICH10:
11386 case WM_T_PCH:
11387 case WM_T_PCH2:
11388 case WM_T_PCH_LPT:
11389 /* Disable gig during WOL */
11390 reg = CSR_READ(sc, WMREG_PHY_CTRL);
11391 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
11392 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11393 if (sc->sc_type == WM_T_PCH)
11394 wm_gmii_reset(sc);
11395
11396 /* Power down workaround */
11397 if (sc->sc_phytype == WMPHY_82577) {
11398 struct mii_softc *child;
11399
11400 /* Assume that the PHY is copper */
11401 child = LIST_FIRST(&sc->sc_mii.mii_phys);
11402 if (child->mii_mpd_rev <= 2)
11403 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
11404 (768 << 5) | 25, 0x0444); /* magic num */
11405 }
11406 break;
11407 default:
11408 break;
11409 }
11410
11411 /* Keep the laser running on fiber adapters */
11412 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
11413 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
11414 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11415 reg |= CTRL_EXT_SWDPIN(3);
11416 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11417 }
11418
11419 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
11420 #if 0 /* for the multicast packet */
11421 reg |= WUFC_MC;
11422 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
11423 #endif
11424
11425 if (sc->sc_type == WM_T_PCH) {
11426 wm_enable_phy_wakeup(sc);
11427 } else {
11428 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
11429 CSR_WRITE(sc, WMREG_WUFC, reg);
11430 }
11431
11432 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
11433 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
11434 || (sc->sc_type == WM_T_PCH2))
11435 && (sc->sc_phytype == WMPHY_IGP_3))
11436 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
11437
11438 /* Request PME */
11439 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
11440 #if 0
11441 /* Disable WOL */
11442 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
11443 #else
11444 /* For WOL */
11445 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
11446 #endif
11447 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
11448 }
11449 #endif /* WM_WOL */
11450
11451 /* LPLU */
11452
11453 static void
11454 wm_lplu_d0_disable(struct wm_softc *sc)
11455 {
11456 uint32_t reg;
11457
11458 reg = CSR_READ(sc, WMREG_PHY_CTRL);
11459 reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
11460 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11461 }
11462
11463 static void
11464 wm_lplu_d0_disable_pch(struct wm_softc *sc)
11465 {
11466 uint32_t reg;
11467
11468 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
11469 reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
11470 reg |= HV_OEM_BITS_ANEGNOW;
11471 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
11472 }
11473
11474 /* EEE */
11475
11476 static void
11477 wm_set_eee_i350(struct wm_softc *sc)
11478 {
11479 uint32_t ipcnfg, eeer;
11480
11481 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
11482 eeer = CSR_READ(sc, WMREG_EEER);
11483
11484 if ((sc->sc_flags & WM_F_EEE) != 0) {
11485 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
11486 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
11487 | EEER_LPI_FC);
11488 } else {
11489 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
11490 ipcnfg &= ~IPCNFG_10BASE_TE;
11491 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
11492 | EEER_LPI_FC);
11493 }
11494
11495 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
11496 CSR_WRITE(sc, WMREG_EEER, eeer);
11497 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
11498 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
11499 }
11500
11501 /*
11502 * Workarounds (mainly PHY related).
11503 * Basically, PHY's workarounds are in the PHY drivers.
11504 */
11505
11506 /* Work-around for 82566 Kumeran PCS lock loss */
11507 static void
11508 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
11509 {
11510 #if 0
11511 int miistatus, active, i;
11512 int reg;
11513
11514 miistatus = sc->sc_mii.mii_media_status;
11515
11516 /* If the link is not up, do nothing */
11517 if ((miistatus & IFM_ACTIVE) == 0)
11518 return;
11519
11520 active = sc->sc_mii.mii_media_active;
11521
11522 /* Nothing to do if the link is other than 1Gbps */
11523 if (IFM_SUBTYPE(active) != IFM_1000_T)
11524 return;
11525
11526 for (i = 0; i < 10; i++) {
11527 /* read twice */
11528 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
11529 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
11530 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
11531 goto out; /* GOOD! */
11532
11533 /* Reset the PHY */
11534 wm_gmii_reset(sc);
11535 delay(5*1000);
11536 }
11537
11538 /* Disable GigE link negotiation */
11539 reg = CSR_READ(sc, WMREG_PHY_CTRL);
11540 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
11541 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11542
11543 /*
11544 * Call gig speed drop workaround on Gig disable before accessing
11545 * any PHY registers.
11546 */
11547 wm_gig_downshift_workaround_ich8lan(sc);
11548
11549 out:
11550 return;
11551 #endif
11552 }
11553
11554 /* WOL from S5 stops working */
11555 static void
11556 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
11557 {
11558 uint16_t kmrn_reg;
11559
11560 /* Only for igp3 */
11561 if (sc->sc_phytype == WMPHY_IGP_3) {
11562 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
11563 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
11564 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
11565 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
11566 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
11567 }
11568 }
11569
11570 /*
11571 * Workaround for pch's PHYs
11572 * XXX should be moved to new PHY driver?
11573 */
11574 static void
11575 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
11576 {
11577 if (sc->sc_phytype == WMPHY_82577)
11578 wm_set_mdio_slow_mode_hv(sc);
11579
11580 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
11581
11582 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
11583
11584 /* 82578 */
11585 if (sc->sc_phytype == WMPHY_82578) {
11586 /* PCH rev. < 3 */
11587 if (sc->sc_rev < 3) {
11588 /* XXX 6 bit shift? Why? Is it page2? */
11589 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
11590 0x66c0);
11591 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
11592 0xffff);
11593 }
11594
11595 /* XXX phy rev. < 2 */
11596 }
11597
11598 /* Select page 0 */
11599
11600 /* XXX acquire semaphore */
11601 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
11602 /* XXX release semaphore */
11603
11604 /*
11605 * Configure the K1 Si workaround during phy reset assuming there is
11606 * link so that it disables K1 if link is in 1Gbps.
11607 */
11608 wm_k1_gig_workaround_hv(sc, 1);
11609 }
11610
11611 static void
11612 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
11613 {
11614
11615 wm_set_mdio_slow_mode_hv(sc);
11616 }
11617
11618 static void
11619 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
11620 {
11621 int k1_enable = sc->sc_nvm_k1_enabled;
11622
11623 /* XXX acquire semaphore */
11624
11625 if (link) {
11626 k1_enable = 0;
11627
11628 /* Link stall fix for link up */
11629 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
11630 } else {
11631 /* Link stall fix for link down */
11632 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
11633 }
11634
11635 wm_configure_k1_ich8lan(sc, k1_enable);
11636
11637 /* XXX release semaphore */
11638 }
11639
11640 static void
11641 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
11642 {
11643 uint32_t reg;
11644
11645 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
11646 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
11647 reg | HV_KMRN_MDIO_SLOW);
11648 }
11649
11650 static void
11651 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
11652 {
11653 uint32_t ctrl, ctrl_ext, tmp;
11654 uint16_t kmrn_reg;
11655
11656 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
11657
11658 if (k1_enable)
11659 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
11660 else
11661 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
11662
11663 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
11664
11665 delay(20);
11666
11667 ctrl = CSR_READ(sc, WMREG_CTRL);
11668 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
11669
11670 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
11671 tmp |= CTRL_FRCSPD;
11672
11673 CSR_WRITE(sc, WMREG_CTRL, tmp);
11674 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
11675 CSR_WRITE_FLUSH(sc);
11676 delay(20);
11677
11678 CSR_WRITE(sc, WMREG_CTRL, ctrl);
11679 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
11680 CSR_WRITE_FLUSH(sc);
11681 delay(20);
11682 }
11683
11684 /* special case - for 82575 - need to do manual init ... */
11685 static void
11686 wm_reset_init_script_82575(struct wm_softc *sc)
11687 {
11688 /*
11689 * remark: this is untested code - we have no board without EEPROM
11690 * same setup as mentioned int the FreeBSD driver for the i82575
11691 */
11692
11693 /* SerDes configuration via SERDESCTRL */
11694 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
11695 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
11696 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
11697 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
11698
11699 /* CCM configuration via CCMCTL register */
11700 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
11701 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
11702
11703 /* PCIe lanes configuration */
11704 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
11705 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
11706 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
11707 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
11708
11709 /* PCIe PLL Configuration */
11710 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
11711 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
11712 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
11713 }
11714
11715 static void
11716 wm_reset_mdicnfg_82580(struct wm_softc *sc)
11717 {
11718 uint32_t reg;
11719 uint16_t nvmword;
11720 int rv;
11721
11722 if ((sc->sc_flags & WM_F_SGMII) == 0)
11723 return;
11724
11725 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
11726 + NVM_OFF_CFG3_PORTA, 1, &nvmword);
11727 if (rv != 0) {
11728 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
11729 __func__);
11730 return;
11731 }
11732
11733 reg = CSR_READ(sc, WMREG_MDICNFG);
11734 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
11735 reg |= MDICNFG_DEST;
11736 if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
11737 reg |= MDICNFG_COM_MDIO;
11738 CSR_WRITE(sc, WMREG_MDICNFG, reg);
11739 }
11740
11741 /*
11742 * I210 Errata 25 and I211 Errata 10
11743 * Slow System Clock.
11744 */
11745 static void
11746 wm_pll_workaround_i210(struct wm_softc *sc)
11747 {
11748 uint32_t mdicnfg, wuc;
11749 uint32_t reg;
11750 pcireg_t pcireg;
11751 uint32_t pmreg;
11752 uint16_t nvmword, tmp_nvmword;
11753 int phyval;
11754 bool wa_done = false;
11755 int i;
11756
11757 /* Save WUC and MDICNFG registers */
11758 wuc = CSR_READ(sc, WMREG_WUC);
11759 mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
11760
11761 reg = mdicnfg & ~MDICNFG_DEST;
11762 CSR_WRITE(sc, WMREG_MDICNFG, reg);
11763
11764 if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
11765 nvmword = INVM_DEFAULT_AL;
11766 tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
11767
11768 /* Get Power Management cap offset */
11769 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
11770 &pmreg, NULL) == 0)
11771 return;
11772 for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
11773 phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
11774 GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
11775
11776 if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
11777 break; /* OK */
11778 }
11779
11780 wa_done = true;
11781 /* Directly reset the internal PHY */
11782 reg = CSR_READ(sc, WMREG_CTRL);
11783 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
11784
11785 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11786 reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
11787 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11788
11789 CSR_WRITE(sc, WMREG_WUC, 0);
11790 reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
11791 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
11792
11793 pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
11794 pmreg + PCI_PMCSR);
11795 pcireg |= PCI_PMCSR_STATE_D3;
11796 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
11797 pmreg + PCI_PMCSR, pcireg);
11798 delay(1000);
11799 pcireg &= ~PCI_PMCSR_STATE_D3;
11800 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
11801 pmreg + PCI_PMCSR, pcireg);
11802
11803 reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
11804 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
11805
11806 /* Restore WUC register */
11807 CSR_WRITE(sc, WMREG_WUC, wuc);
11808 }
11809
11810 /* Restore MDICNFG setting */
11811 CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
11812 if (wa_done)
11813 aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
11814 }
11815