if_wm.c revision 1.331 1 /* $NetBSD: if_wm.c,v 1.331 2015/06/06 17:36:50 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - EEE (Energy Efficiency Ethernet)
77 * - MSI/MSI-X
78 * - Virtual Function
79 * - Set LED correctly (based on contents in EEPROM)
80 * - Rework how parameters are loaded from the EEPROM.
81 */
82
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.331 2015/06/06 17:36:50 msaitoh Exp $");
85
86 #ifdef _KERNEL_OPT
87 #include "opt_net_mpsafe.h"
88 #endif
89
90 #include <sys/param.h>
91 #include <sys/systm.h>
92 #include <sys/callout.h>
93 #include <sys/mbuf.h>
94 #include <sys/malloc.h>
95 #include <sys/kernel.h>
96 #include <sys/socket.h>
97 #include <sys/ioctl.h>
98 #include <sys/errno.h>
99 #include <sys/device.h>
100 #include <sys/queue.h>
101 #include <sys/syslog.h>
102
103 #include <sys/rndsource.h>
104
105 #include <net/if.h>
106 #include <net/if_dl.h>
107 #include <net/if_media.h>
108 #include <net/if_ether.h>
109
110 #include <net/bpf.h>
111
112 #include <netinet/in.h> /* XXX for struct ip */
113 #include <netinet/in_systm.h> /* XXX for struct ip */
114 #include <netinet/ip.h> /* XXX for struct ip */
115 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
116 #include <netinet/tcp.h> /* XXX for struct tcphdr */
117
118 #include <sys/bus.h>
119 #include <sys/intr.h>
120 #include <machine/endian.h>
121
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
124 #include <dev/mii/miidevs.h>
125 #include <dev/mii/mii_bitbang.h>
126 #include <dev/mii/ikphyreg.h>
127 #include <dev/mii/igphyreg.h>
128 #include <dev/mii/igphyvar.h>
129 #include <dev/mii/inbmphyreg.h>
130
131 #include <dev/pci/pcireg.h>
132 #include <dev/pci/pcivar.h>
133 #include <dev/pci/pcidevs.h>
134
135 #include <dev/pci/if_wmreg.h>
136 #include <dev/pci/if_wmvar.h>
137
138 #ifdef WM_DEBUG
139 #define WM_DEBUG_LINK 0x01
140 #define WM_DEBUG_TX 0x02
141 #define WM_DEBUG_RX 0x04
142 #define WM_DEBUG_GMII 0x08
143 #define WM_DEBUG_MANAGE 0x10
144 #define WM_DEBUG_NVM 0x20
145 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
146 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
147
148 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
149 #else
150 #define DPRINTF(x, y) /* nothing */
151 #endif /* WM_DEBUG */
152
153 #ifdef NET_MPSAFE
154 #define WM_MPSAFE 1
155 #endif
156
157 /*
158 * Transmit descriptor list size. Due to errata, we can only have
159 * 256 hardware descriptors in the ring on < 82544, but we use 4096
160 * on >= 82544. We tell the upper layers that they can queue a lot
161 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
162 * of them at a time.
163 *
164 * We allow up to 256 (!) DMA segments per packet. Pathological packet
165 * chains containing many small mbufs have been observed in zero-copy
166 * situations with jumbo frames.
167 */
168 #define WM_NTXSEGS 256
169 #define WM_IFQUEUELEN 256
170 #define WM_TXQUEUELEN_MAX 64
171 #define WM_TXQUEUELEN_MAX_82547 16
172 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
173 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
174 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
175 #define WM_NTXDESC_82542 256
176 #define WM_NTXDESC_82544 4096
177 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
178 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
179 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
180 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
181 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
182
183 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
184
185 /*
186 * Receive descriptor list size. We have one Rx buffer for normal
187 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
188 * packet. We allocate 256 receive descriptors, each with a 2k
189 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
190 */
191 #define WM_NRXDESC 256
192 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
193 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
194 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
195
196 /*
197 * Control structures are DMA'd to the i82542 chip. We allocate them in
198 * a single clump that maps to a single DMA segment to make several things
199 * easier.
200 */
201 struct wm_control_data_82544 {
202 /*
203 * The receive descriptors.
204 */
205 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
206
207 /*
208 * The transmit descriptors. Put these at the end, because
209 * we might use a smaller number of them.
210 */
211 union {
212 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
213 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
214 } wdc_u;
215 };
216
217 struct wm_control_data_82542 {
218 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
219 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
220 };
221
222 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
223 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
224 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
225
226 /*
227 * Software state for transmit jobs.
228 */
229 struct wm_txsoft {
230 struct mbuf *txs_mbuf; /* head of our mbuf chain */
231 bus_dmamap_t txs_dmamap; /* our DMA map */
232 int txs_firstdesc; /* first descriptor in packet */
233 int txs_lastdesc; /* last descriptor in packet */
234 int txs_ndesc; /* # of descriptors used */
235 };
236
237 /*
238 * Software state for receive buffers. Each descriptor gets a
239 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
240 * more than one buffer, we chain them together.
241 */
242 struct wm_rxsoft {
243 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
244 bus_dmamap_t rxs_dmamap; /* our DMA map */
245 };
246
247 #define WM_LINKUP_TIMEOUT 50
248
249 static uint16_t swfwphysem[] = {
250 SWFW_PHY0_SM,
251 SWFW_PHY1_SM,
252 SWFW_PHY2_SM,
253 SWFW_PHY3_SM
254 };
255
256 static const uint32_t wm_82580_rxpbs_table[] = {
257 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
258 };
259
260 /*
261 * Software state per device.
262 */
263 struct wm_softc {
264 device_t sc_dev; /* generic device information */
265 bus_space_tag_t sc_st; /* bus space tag */
266 bus_space_handle_t sc_sh; /* bus space handle */
267 bus_size_t sc_ss; /* bus space size */
268 bus_space_tag_t sc_iot; /* I/O space tag */
269 bus_space_handle_t sc_ioh; /* I/O space handle */
270 bus_size_t sc_ios; /* I/O space size */
271 bus_space_tag_t sc_flasht; /* flash registers space tag */
272 bus_space_handle_t sc_flashh; /* flash registers space handle */
273 bus_dma_tag_t sc_dmat; /* bus DMA tag */
274
275 struct ethercom sc_ethercom; /* ethernet common data */
276 struct mii_data sc_mii; /* MII/media information */
277
278 pci_chipset_tag_t sc_pc;
279 pcitag_t sc_pcitag;
280 int sc_bus_speed; /* PCI/PCIX bus speed */
281 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
282
283 uint16_t sc_pcidevid; /* PCI device ID */
284 wm_chip_type sc_type; /* MAC type */
285 int sc_rev; /* MAC revision */
286 wm_phy_type sc_phytype; /* PHY type */
287 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
288 #define WM_MEDIATYPE_UNKNOWN 0x00
289 #define WM_MEDIATYPE_FIBER 0x01
290 #define WM_MEDIATYPE_COPPER 0x02
291 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
292 int sc_funcid; /* unit number of the chip (0 to 3) */
293 int sc_flags; /* flags; see below */
294 int sc_if_flags; /* last if_flags */
295 int sc_flowflags; /* 802.3x flow control flags */
296 int sc_align_tweak;
297
298 void *sc_ih; /* interrupt cookie */
299 callout_t sc_tick_ch; /* tick callout */
300 bool sc_stopping;
301
302 int sc_nvm_ver_major;
303 int sc_nvm_ver_minor;
304 int sc_nvm_addrbits; /* NVM address bits */
305 unsigned int sc_nvm_wordsize; /* NVM word size */
306 int sc_ich8_flash_base;
307 int sc_ich8_flash_bank_size;
308 int sc_nvm_k1_enabled;
309
310 /* Software state for the transmit and receive descriptors. */
311 int sc_txnum; /* must be a power of two */
312 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
313 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
314
315 /* Control data structures. */
316 int sc_ntxdesc; /* must be a power of two */
317 struct wm_control_data_82544 *sc_control_data;
318 bus_dmamap_t sc_cddmamap; /* control data DMA map */
319 bus_dma_segment_t sc_cd_seg; /* control data segment */
320 int sc_cd_rseg; /* real number of control segment */
321 size_t sc_cd_size; /* control data size */
322 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
323 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
324 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
325 #define sc_rxdescs sc_control_data->wcd_rxdescs
326
327 #ifdef WM_EVENT_COUNTERS
328 /* Event counters. */
329 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
330 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
331 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
332 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
333 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
334 struct evcnt sc_ev_rxintr; /* Rx interrupts */
335 struct evcnt sc_ev_linkintr; /* Link interrupts */
336
337 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
338 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
339 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
340 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
341 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
342 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
343 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
344 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
345
346 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
347 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
348
349 struct evcnt sc_ev_tu; /* Tx underrun */
350
351 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
352 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
353 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
354 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
355 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
356 #endif /* WM_EVENT_COUNTERS */
357
358 bus_addr_t sc_tdt_reg; /* offset of TDT register */
359
360 int sc_txfree; /* number of free Tx descriptors */
361 int sc_txnext; /* next ready Tx descriptor */
362
363 int sc_txsfree; /* number of free Tx jobs */
364 int sc_txsnext; /* next free Tx job */
365 int sc_txsdirty; /* dirty Tx jobs */
366
367 /* These 5 variables are used only on the 82547. */
368 int sc_txfifo_size; /* Tx FIFO size */
369 int sc_txfifo_head; /* current head of FIFO */
370 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
371 int sc_txfifo_stall; /* Tx FIFO is stalled */
372 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
373
374 bus_addr_t sc_rdt_reg; /* offset of RDT register */
375
376 int sc_rxptr; /* next ready Rx descriptor/queue ent */
377 int sc_rxdiscard;
378 int sc_rxlen;
379 struct mbuf *sc_rxhead;
380 struct mbuf *sc_rxtail;
381 struct mbuf **sc_rxtailp;
382
383 uint32_t sc_ctrl; /* prototype CTRL register */
384 #if 0
385 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
386 #endif
387 uint32_t sc_icr; /* prototype interrupt bits */
388 uint32_t sc_itr; /* prototype intr throttling reg */
389 uint32_t sc_tctl; /* prototype TCTL register */
390 uint32_t sc_rctl; /* prototype RCTL register */
391 uint32_t sc_txcw; /* prototype TXCW register */
392 uint32_t sc_tipg; /* prototype TIPG register */
393 uint32_t sc_fcrtl; /* prototype FCRTL register */
394 uint32_t sc_pba; /* prototype PBA register */
395
396 int sc_tbi_linkup; /* TBI link status */
397 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
398 int sc_tbi_serdes_ticks; /* tbi ticks */
399
400 int sc_mchash_type; /* multicast filter offset */
401
402 krndsource_t rnd_source; /* random source */
403
404 kmutex_t *sc_tx_lock; /* lock for tx operations */
405 kmutex_t *sc_rx_lock; /* lock for rx operations */
406 };
407
408 #define WM_TX_LOCK(_sc) if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
409 #define WM_TX_UNLOCK(_sc) if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
410 #define WM_TX_LOCKED(_sc) (!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
411 #define WM_RX_LOCK(_sc) if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
412 #define WM_RX_UNLOCK(_sc) if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
413 #define WM_RX_LOCKED(_sc) (!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
414 #define WM_BOTH_LOCK(_sc) do {WM_TX_LOCK(_sc); WM_RX_LOCK(_sc);} while (0)
415 #define WM_BOTH_UNLOCK(_sc) do {WM_RX_UNLOCK(_sc); WM_TX_UNLOCK(_sc);} while (0)
416 #define WM_BOTH_LOCKED(_sc) (WM_TX_LOCKED(_sc) && WM_RX_LOCKED(_sc))
417
418 #ifdef WM_MPSAFE
419 #define CALLOUT_FLAGS CALLOUT_MPSAFE
420 #else
421 #define CALLOUT_FLAGS 0
422 #endif
423
424 #define WM_RXCHAIN_RESET(sc) \
425 do { \
426 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
427 *(sc)->sc_rxtailp = NULL; \
428 (sc)->sc_rxlen = 0; \
429 } while (/*CONSTCOND*/0)
430
431 #define WM_RXCHAIN_LINK(sc, m) \
432 do { \
433 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
434 (sc)->sc_rxtailp = &(m)->m_next; \
435 } while (/*CONSTCOND*/0)
436
437 #ifdef WM_EVENT_COUNTERS
438 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
439 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
440 #else
441 #define WM_EVCNT_INCR(ev) /* nothing */
442 #define WM_EVCNT_ADD(ev, val) /* nothing */
443 #endif
444
445 #define CSR_READ(sc, reg) \
446 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
447 #define CSR_WRITE(sc, reg, val) \
448 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
449 #define CSR_WRITE_FLUSH(sc) \
450 (void) CSR_READ((sc), WMREG_STATUS)
451
452 #define ICH8_FLASH_READ32(sc, reg) \
453 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
454 #define ICH8_FLASH_WRITE32(sc, reg, data) \
455 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
456
457 #define ICH8_FLASH_READ16(sc, reg) \
458 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
459 #define ICH8_FLASH_WRITE16(sc, reg, data) \
460 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
461
462 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
463 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
464
465 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
466 #define WM_CDTXADDR_HI(sc, x) \
467 (sizeof(bus_addr_t) == 8 ? \
468 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
469
470 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
471 #define WM_CDRXADDR_HI(sc, x) \
472 (sizeof(bus_addr_t) == 8 ? \
473 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
474
475 #define WM_CDTXSYNC(sc, x, n, ops) \
476 do { \
477 int __x, __n; \
478 \
479 __x = (x); \
480 __n = (n); \
481 \
482 /* If it will wrap around, sync to the end of the ring. */ \
483 if ((__x + __n) > WM_NTXDESC(sc)) { \
484 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
485 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
486 (WM_NTXDESC(sc) - __x), (ops)); \
487 __n -= (WM_NTXDESC(sc) - __x); \
488 __x = 0; \
489 } \
490 \
491 /* Now sync whatever is left. */ \
492 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
493 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
494 } while (/*CONSTCOND*/0)
495
496 #define WM_CDRXSYNC(sc, x, ops) \
497 do { \
498 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
499 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
500 } while (/*CONSTCOND*/0)
501
502 #define WM_INIT_RXDESC(sc, x) \
503 do { \
504 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
505 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
506 struct mbuf *__m = __rxs->rxs_mbuf; \
507 \
508 /* \
509 * Note: We scoot the packet forward 2 bytes in the buffer \
510 * so that the payload after the Ethernet header is aligned \
511 * to a 4-byte boundary. \
512 * \
513 * XXX BRAINDAMAGE ALERT! \
514 * The stupid chip uses the same size for every buffer, which \
515 * is set in the Receive Control register. We are using the 2K \
516 * size option, but what we REALLY want is (2K - 2)! For this \
517 * reason, we can't "scoot" packets longer than the standard \
518 * Ethernet MTU. On strict-alignment platforms, if the total \
519 * size exceeds (2K - 2) we set align_tweak to 0 and let \
520 * the upper layer copy the headers. \
521 */ \
522 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
523 \
524 wm_set_dma_addr(&__rxd->wrx_addr, \
525 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
526 __rxd->wrx_len = 0; \
527 __rxd->wrx_cksum = 0; \
528 __rxd->wrx_status = 0; \
529 __rxd->wrx_errors = 0; \
530 __rxd->wrx_special = 0; \
531 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
532 \
533 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
534 } while (/*CONSTCOND*/0)
535
536 /*
537 * Register read/write functions.
538 * Other than CSR_{READ|WRITE}().
539 */
540 #if 0
541 static inline uint32_t wm_io_read(struct wm_softc *, int);
542 #endif
543 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
544 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
545 uint32_t, uint32_t);
546 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
547
548 /*
549 * Device driver interface functions and commonly used functions.
550 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
551 */
552 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
553 static int wm_match(device_t, cfdata_t, void *);
554 static void wm_attach(device_t, device_t, void *);
555 static int wm_detach(device_t, int);
556 static bool wm_suspend(device_t, const pmf_qual_t *);
557 static bool wm_resume(device_t, const pmf_qual_t *);
558 static void wm_watchdog(struct ifnet *);
559 static void wm_tick(void *);
560 static int wm_ifflags_cb(struct ethercom *);
561 static int wm_ioctl(struct ifnet *, u_long, void *);
562 /* MAC address related */
563 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
564 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
565 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
566 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
567 static void wm_set_filter(struct wm_softc *);
568 /* Reset and init related */
569 static void wm_set_vlan(struct wm_softc *);
570 static void wm_set_pcie_completion_timeout(struct wm_softc *);
571 static void wm_get_auto_rd_done(struct wm_softc *);
572 static void wm_lan_init_done(struct wm_softc *);
573 static void wm_get_cfg_done(struct wm_softc *);
574 static void wm_initialize_hardware_bits(struct wm_softc *);
575 static uint32_t wm_rxpbs_adjust_82580(uint32_t);
576 static void wm_reset(struct wm_softc *);
577 static int wm_add_rxbuf(struct wm_softc *, int);
578 static void wm_rxdrain(struct wm_softc *);
579 static int wm_init(struct ifnet *);
580 static int wm_init_locked(struct ifnet *);
581 static void wm_stop(struct ifnet *, int);
582 static void wm_stop_locked(struct ifnet *, int);
583 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
584 uint32_t *, uint8_t *);
585 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
586 static void wm_82547_txfifo_stall(void *);
587 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
588 /* Start */
589 static void wm_start(struct ifnet *);
590 static void wm_start_locked(struct ifnet *);
591 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
592 uint32_t *, uint32_t *, bool *);
593 static void wm_nq_start(struct ifnet *);
594 static void wm_nq_start_locked(struct ifnet *);
595 /* Interrupt */
596 static void wm_txintr(struct wm_softc *);
597 static void wm_rxintr(struct wm_softc *);
598 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
599 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
600 static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
601 static void wm_linkintr(struct wm_softc *, uint32_t);
602 static int wm_intr(void *);
603
604 /*
605 * Media related.
606 * GMII, SGMII, TBI, SERDES and SFP.
607 */
608 /* Common */
609 static void wm_tbi_serdes_set_linkled(struct wm_softc *);
610 /* GMII related */
611 static void wm_gmii_reset(struct wm_softc *);
612 static int wm_get_phy_id_82575(struct wm_softc *);
613 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
614 static int wm_gmii_mediachange(struct ifnet *);
615 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
616 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
617 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
618 static int wm_gmii_i82543_readreg(device_t, int, int);
619 static void wm_gmii_i82543_writereg(device_t, int, int, int);
620 static int wm_gmii_i82544_readreg(device_t, int, int);
621 static void wm_gmii_i82544_writereg(device_t, int, int, int);
622 static int wm_gmii_i80003_readreg(device_t, int, int);
623 static void wm_gmii_i80003_writereg(device_t, int, int, int);
624 static int wm_gmii_bm_readreg(device_t, int, int);
625 static void wm_gmii_bm_writereg(device_t, int, int, int);
626 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
627 static int wm_gmii_hv_readreg(device_t, int, int);
628 static void wm_gmii_hv_writereg(device_t, int, int, int);
629 static int wm_gmii_82580_readreg(device_t, int, int);
630 static void wm_gmii_82580_writereg(device_t, int, int, int);
631 static int wm_gmii_gs40g_readreg(device_t, int, int);
632 static void wm_gmii_gs40g_writereg(device_t, int, int, int);
633 static void wm_gmii_statchg(struct ifnet *);
634 static int wm_kmrn_readreg(struct wm_softc *, int);
635 static void wm_kmrn_writereg(struct wm_softc *, int, int);
636 /* SGMII */
637 static bool wm_sgmii_uses_mdio(struct wm_softc *);
638 static int wm_sgmii_readreg(device_t, int, int);
639 static void wm_sgmii_writereg(device_t, int, int, int);
640 /* TBI related */
641 static void wm_tbi_mediainit(struct wm_softc *);
642 static int wm_tbi_mediachange(struct ifnet *);
643 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
644 static int wm_check_for_link(struct wm_softc *);
645 static void wm_tbi_tick(struct wm_softc *);
646 /* SERDES related */
647 static void wm_serdes_power_up_link_82575(struct wm_softc *);
648 static int wm_serdes_mediachange(struct ifnet *);
649 static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
650 static void wm_serdes_tick(struct wm_softc *);
651 /* SFP related */
652 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
653 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
654
655 /*
656 * NVM related.
657 * Microwire, SPI (w/wo EERD) and Flash.
658 */
659 /* Misc functions */
660 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
661 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
662 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
663 /* Microwire */
664 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
665 /* SPI */
666 static int wm_nvm_ready_spi(struct wm_softc *);
667 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
668 /* Using with EERD */
669 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
670 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
671 /* Flash */
672 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
673 unsigned int *);
674 static int32_t wm_ich8_cycle_init(struct wm_softc *);
675 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
676 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
677 uint16_t *);
678 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
679 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
680 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
681 /* iNVM */
682 static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
683 static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
684 /* Lock, detecting NVM type, validate checksum and read */
685 static int wm_nvm_acquire(struct wm_softc *);
686 static void wm_nvm_release(struct wm_softc *);
687 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
688 static int wm_nvm_get_flash_presence_i210(struct wm_softc *);
689 static int wm_nvm_validate_checksum(struct wm_softc *);
690 static void wm_nvm_version(struct wm_softc *);
691 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
692
693 /*
694 * Hardware semaphores.
695 * Very complexed...
696 */
697 static int wm_get_swsm_semaphore(struct wm_softc *);
698 static void wm_put_swsm_semaphore(struct wm_softc *);
699 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
700 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
701 static int wm_get_swfwhw_semaphore(struct wm_softc *);
702 static void wm_put_swfwhw_semaphore(struct wm_softc *);
703 static int wm_get_hw_semaphore_82573(struct wm_softc *);
704 static void wm_put_hw_semaphore_82573(struct wm_softc *);
705
706 /*
707 * Management mode and power management related subroutines.
708 * BMC, AMT, suspend/resume and EEE.
709 */
710 static int wm_check_mng_mode(struct wm_softc *);
711 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
712 static int wm_check_mng_mode_82574(struct wm_softc *);
713 static int wm_check_mng_mode_generic(struct wm_softc *);
714 static int wm_enable_mng_pass_thru(struct wm_softc *);
715 static int wm_check_reset_block(struct wm_softc *);
716 static void wm_get_hw_control(struct wm_softc *);
717 static void wm_release_hw_control(struct wm_softc *);
718 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
719 static void wm_smbustopci(struct wm_softc *);
720 static void wm_init_manageability(struct wm_softc *);
721 static void wm_release_manageability(struct wm_softc *);
722 static void wm_get_wakeup(struct wm_softc *);
723 #ifdef WM_WOL
724 static void wm_enable_phy_wakeup(struct wm_softc *);
725 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
726 static void wm_enable_wakeup(struct wm_softc *);
727 #endif
728 /* EEE */
729 static void wm_set_eee_i350(struct wm_softc *);
730
731 /*
732 * Workarounds (mainly PHY related).
733 * Basically, PHY's workarounds are in the PHY drivers.
734 */
735 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
736 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
737 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
738 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
739 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
740 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
741 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
742 static void wm_reset_init_script_82575(struct wm_softc *);
743 static void wm_reset_mdicnfg_82580(struct wm_softc *);
744 static void wm_pll_workaround_i210(struct wm_softc *);
745
746 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
747 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
748
749 /*
750 * Devices supported by this driver.
751 */
752 static const struct wm_product {
753 pci_vendor_id_t wmp_vendor;
754 pci_product_id_t wmp_product;
755 const char *wmp_name;
756 wm_chip_type wmp_type;
757 uint32_t wmp_flags;
758 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
759 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
760 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
761 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
762 #define WMP_MEDIATYPE(x) ((x) & 0x03)
763 } wm_products[] = {
764 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
765 "Intel i82542 1000BASE-X Ethernet",
766 WM_T_82542_2_1, WMP_F_FIBER },
767
768 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
769 "Intel i82543GC 1000BASE-X Ethernet",
770 WM_T_82543, WMP_F_FIBER },
771
772 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
773 "Intel i82543GC 1000BASE-T Ethernet",
774 WM_T_82543, WMP_F_COPPER },
775
776 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
777 "Intel i82544EI 1000BASE-T Ethernet",
778 WM_T_82544, WMP_F_COPPER },
779
780 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
781 "Intel i82544EI 1000BASE-X Ethernet",
782 WM_T_82544, WMP_F_FIBER },
783
784 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
785 "Intel i82544GC 1000BASE-T Ethernet",
786 WM_T_82544, WMP_F_COPPER },
787
788 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
789 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
790 WM_T_82544, WMP_F_COPPER },
791
792 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
793 "Intel i82540EM 1000BASE-T Ethernet",
794 WM_T_82540, WMP_F_COPPER },
795
796 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
797 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
798 WM_T_82540, WMP_F_COPPER },
799
800 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
801 "Intel i82540EP 1000BASE-T Ethernet",
802 WM_T_82540, WMP_F_COPPER },
803
804 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
805 "Intel i82540EP 1000BASE-T Ethernet",
806 WM_T_82540, WMP_F_COPPER },
807
808 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
809 "Intel i82540EP 1000BASE-T Ethernet",
810 WM_T_82540, WMP_F_COPPER },
811
812 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
813 "Intel i82545EM 1000BASE-T Ethernet",
814 WM_T_82545, WMP_F_COPPER },
815
816 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
817 "Intel i82545GM 1000BASE-T Ethernet",
818 WM_T_82545_3, WMP_F_COPPER },
819
820 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
821 "Intel i82545GM 1000BASE-X Ethernet",
822 WM_T_82545_3, WMP_F_FIBER },
823
824 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
825 "Intel i82545GM Gigabit Ethernet (SERDES)",
826 WM_T_82545_3, WMP_F_SERDES },
827
828 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
829 "Intel i82546EB 1000BASE-T Ethernet",
830 WM_T_82546, WMP_F_COPPER },
831
832 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
833 "Intel i82546EB 1000BASE-T Ethernet",
834 WM_T_82546, WMP_F_COPPER },
835
836 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
837 "Intel i82545EM 1000BASE-X Ethernet",
838 WM_T_82545, WMP_F_FIBER },
839
840 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
841 "Intel i82546EB 1000BASE-X Ethernet",
842 WM_T_82546, WMP_F_FIBER },
843
844 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
845 "Intel i82546GB 1000BASE-T Ethernet",
846 WM_T_82546_3, WMP_F_COPPER },
847
848 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
849 "Intel i82546GB 1000BASE-X Ethernet",
850 WM_T_82546_3, WMP_F_FIBER },
851
852 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
853 "Intel i82546GB Gigabit Ethernet (SERDES)",
854 WM_T_82546_3, WMP_F_SERDES },
855
856 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
857 "i82546GB quad-port Gigabit Ethernet",
858 WM_T_82546_3, WMP_F_COPPER },
859
860 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
861 "i82546GB quad-port Gigabit Ethernet (KSP3)",
862 WM_T_82546_3, WMP_F_COPPER },
863
864 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
865 "Intel PRO/1000MT (82546GB)",
866 WM_T_82546_3, WMP_F_COPPER },
867
868 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
869 "Intel i82541EI 1000BASE-T Ethernet",
870 WM_T_82541, WMP_F_COPPER },
871
872 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
873 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
874 WM_T_82541, WMP_F_COPPER },
875
876 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
877 "Intel i82541EI Mobile 1000BASE-T Ethernet",
878 WM_T_82541, WMP_F_COPPER },
879
880 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
881 "Intel i82541ER 1000BASE-T Ethernet",
882 WM_T_82541_2, WMP_F_COPPER },
883
884 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
885 "Intel i82541GI 1000BASE-T Ethernet",
886 WM_T_82541_2, WMP_F_COPPER },
887
888 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
889 "Intel i82541GI Mobile 1000BASE-T Ethernet",
890 WM_T_82541_2, WMP_F_COPPER },
891
892 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
893 "Intel i82541PI 1000BASE-T Ethernet",
894 WM_T_82541_2, WMP_F_COPPER },
895
896 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
897 "Intel i82547EI 1000BASE-T Ethernet",
898 WM_T_82547, WMP_F_COPPER },
899
900 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
901 "Intel i82547EI Mobile 1000BASE-T Ethernet",
902 WM_T_82547, WMP_F_COPPER },
903
904 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
905 "Intel i82547GI 1000BASE-T Ethernet",
906 WM_T_82547_2, WMP_F_COPPER },
907
908 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
909 "Intel PRO/1000 PT (82571EB)",
910 WM_T_82571, WMP_F_COPPER },
911
912 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
913 "Intel PRO/1000 PF (82571EB)",
914 WM_T_82571, WMP_F_FIBER },
915
916 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
917 "Intel PRO/1000 PB (82571EB)",
918 WM_T_82571, WMP_F_SERDES },
919
920 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
921 "Intel PRO/1000 QT (82571EB)",
922 WM_T_82571, WMP_F_COPPER },
923
924 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
925 "Intel PRO/1000 PT Quad Port Server Adapter",
926 WM_T_82571, WMP_F_COPPER, },
927
928 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
929 "Intel Gigabit PT Quad Port Server ExpressModule",
930 WM_T_82571, WMP_F_COPPER, },
931
932 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
933 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
934 WM_T_82571, WMP_F_SERDES, },
935
936 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
937 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
938 WM_T_82571, WMP_F_SERDES, },
939
940 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
941 "Intel 82571EB Quad 1000baseX Ethernet",
942 WM_T_82571, WMP_F_FIBER, },
943
944 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
945 "Intel i82572EI 1000baseT Ethernet",
946 WM_T_82572, WMP_F_COPPER },
947
948 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
949 "Intel i82572EI 1000baseX Ethernet",
950 WM_T_82572, WMP_F_FIBER },
951
952 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
953 "Intel i82572EI Gigabit Ethernet (SERDES)",
954 WM_T_82572, WMP_F_SERDES },
955
956 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
957 "Intel i82572EI 1000baseT Ethernet",
958 WM_T_82572, WMP_F_COPPER },
959
960 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
961 "Intel i82573E",
962 WM_T_82573, WMP_F_COPPER },
963
964 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
965 "Intel i82573E IAMT",
966 WM_T_82573, WMP_F_COPPER },
967
968 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
969 "Intel i82573L Gigabit Ethernet",
970 WM_T_82573, WMP_F_COPPER },
971
972 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
973 "Intel i82574L",
974 WM_T_82574, WMP_F_COPPER },
975
976 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
977 "Intel i82574L",
978 WM_T_82574, WMP_F_COPPER },
979
980 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
981 "Intel i82583V",
982 WM_T_82583, WMP_F_COPPER },
983
984 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
985 "i80003 dual 1000baseT Ethernet",
986 WM_T_80003, WMP_F_COPPER },
987
988 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
989 "i80003 dual 1000baseX Ethernet",
990 WM_T_80003, WMP_F_COPPER },
991
992 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
993 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
994 WM_T_80003, WMP_F_SERDES },
995
996 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
997 "Intel i80003 1000baseT Ethernet",
998 WM_T_80003, WMP_F_COPPER },
999
1000 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1001 "Intel i80003 Gigabit Ethernet (SERDES)",
1002 WM_T_80003, WMP_F_SERDES },
1003
1004 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
1005 "Intel i82801H (M_AMT) LAN Controller",
1006 WM_T_ICH8, WMP_F_COPPER },
1007 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
1008 "Intel i82801H (AMT) LAN Controller",
1009 WM_T_ICH8, WMP_F_COPPER },
1010 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
1011 "Intel i82801H LAN Controller",
1012 WM_T_ICH8, WMP_F_COPPER },
1013 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1014 "Intel i82801H (IFE) LAN Controller",
1015 WM_T_ICH8, WMP_F_COPPER },
1016 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
1017 "Intel i82801H (M) LAN Controller",
1018 WM_T_ICH8, WMP_F_COPPER },
1019 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1020 "Intel i82801H IFE (GT) LAN Controller",
1021 WM_T_ICH8, WMP_F_COPPER },
1022 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1023 "Intel i82801H IFE (G) LAN Controller",
1024 WM_T_ICH8, WMP_F_COPPER },
1025 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1026 "82801I (AMT) LAN Controller",
1027 WM_T_ICH9, WMP_F_COPPER },
1028 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1029 "82801I LAN Controller",
1030 WM_T_ICH9, WMP_F_COPPER },
1031 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1032 "82801I (G) LAN Controller",
1033 WM_T_ICH9, WMP_F_COPPER },
1034 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1035 "82801I (GT) LAN Controller",
1036 WM_T_ICH9, WMP_F_COPPER },
1037 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1038 "82801I (C) LAN Controller",
1039 WM_T_ICH9, WMP_F_COPPER },
1040 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1041 "82801I mobile LAN Controller",
1042 WM_T_ICH9, WMP_F_COPPER },
1043 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1044 "82801I mobile (V) LAN Controller",
1045 WM_T_ICH9, WMP_F_COPPER },
1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1047 "82801I mobile (AMT) LAN Controller",
1048 WM_T_ICH9, WMP_F_COPPER },
1049 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1050 "82567LM-4 LAN Controller",
1051 WM_T_ICH9, WMP_F_COPPER },
1052 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
1053 "82567V-3 LAN Controller",
1054 WM_T_ICH9, WMP_F_COPPER },
1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1056 "82567LM-2 LAN Controller",
1057 WM_T_ICH10, WMP_F_COPPER },
1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1059 "82567LF-2 LAN Controller",
1060 WM_T_ICH10, WMP_F_COPPER },
1061 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1062 "82567LM-3 LAN Controller",
1063 WM_T_ICH10, WMP_F_COPPER },
1064 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1065 "82567LF-3 LAN Controller",
1066 WM_T_ICH10, WMP_F_COPPER },
1067 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1068 "82567V-2 LAN Controller",
1069 WM_T_ICH10, WMP_F_COPPER },
1070 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1071 "82567V-3? LAN Controller",
1072 WM_T_ICH10, WMP_F_COPPER },
1073 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1074 "HANKSVILLE LAN Controller",
1075 WM_T_ICH10, WMP_F_COPPER },
1076 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1077 "PCH LAN (82577LM) Controller",
1078 WM_T_PCH, WMP_F_COPPER },
1079 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1080 "PCH LAN (82577LC) Controller",
1081 WM_T_PCH, WMP_F_COPPER },
1082 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1083 "PCH LAN (82578DM) Controller",
1084 WM_T_PCH, WMP_F_COPPER },
1085 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1086 "PCH LAN (82578DC) Controller",
1087 WM_T_PCH, WMP_F_COPPER },
1088 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1089 "PCH2 LAN (82579LM) Controller",
1090 WM_T_PCH2, WMP_F_COPPER },
1091 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1092 "PCH2 LAN (82579V) Controller",
1093 WM_T_PCH2, WMP_F_COPPER },
1094 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1095 "82575EB dual-1000baseT Ethernet",
1096 WM_T_82575, WMP_F_COPPER },
1097 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1098 "82575EB dual-1000baseX Ethernet (SERDES)",
1099 WM_T_82575, WMP_F_SERDES },
1100 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1101 "82575GB quad-1000baseT Ethernet",
1102 WM_T_82575, WMP_F_COPPER },
1103 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1104 "82575GB quad-1000baseT Ethernet (PM)",
1105 WM_T_82575, WMP_F_COPPER },
1106 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1107 "82576 1000BaseT Ethernet",
1108 WM_T_82576, WMP_F_COPPER },
1109 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1110 "82576 1000BaseX Ethernet",
1111 WM_T_82576, WMP_F_FIBER },
1112
1113 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1114 "82576 gigabit Ethernet (SERDES)",
1115 WM_T_82576, WMP_F_SERDES },
1116
1117 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1118 "82576 quad-1000BaseT Ethernet",
1119 WM_T_82576, WMP_F_COPPER },
1120
1121 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1122 "82576 Gigabit ET2 Quad Port Server Adapter",
1123 WM_T_82576, WMP_F_COPPER },
1124
1125 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1126 "82576 gigabit Ethernet",
1127 WM_T_82576, WMP_F_COPPER },
1128
1129 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1130 "82576 gigabit Ethernet (SERDES)",
1131 WM_T_82576, WMP_F_SERDES },
1132 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1133 "82576 quad-gigabit Ethernet (SERDES)",
1134 WM_T_82576, WMP_F_SERDES },
1135
1136 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1137 "82580 1000BaseT Ethernet",
1138 WM_T_82580, WMP_F_COPPER },
1139 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1140 "82580 1000BaseX Ethernet",
1141 WM_T_82580, WMP_F_FIBER },
1142
1143 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1144 "82580 1000BaseT Ethernet (SERDES)",
1145 WM_T_82580, WMP_F_SERDES },
1146
1147 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1148 "82580 gigabit Ethernet (SGMII)",
1149 WM_T_82580, WMP_F_COPPER },
1150 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1151 "82580 dual-1000BaseT Ethernet",
1152 WM_T_82580, WMP_F_COPPER },
1153
1154 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1155 "82580 quad-1000BaseX Ethernet",
1156 WM_T_82580, WMP_F_FIBER },
1157
1158 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1159 "DH89XXCC Gigabit Ethernet (SGMII)",
1160 WM_T_82580, WMP_F_COPPER },
1161
1162 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1163 "DH89XXCC Gigabit Ethernet (SERDES)",
1164 WM_T_82580, WMP_F_SERDES },
1165
1166 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1167 "DH89XXCC 1000BASE-KX Ethernet",
1168 WM_T_82580, WMP_F_SERDES },
1169
1170 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1171 "DH89XXCC Gigabit Ethernet (SFP)",
1172 WM_T_82580, WMP_F_SERDES },
1173
1174 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1175 "I350 Gigabit Network Connection",
1176 WM_T_I350, WMP_F_COPPER },
1177
1178 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1179 "I350 Gigabit Fiber Network Connection",
1180 WM_T_I350, WMP_F_FIBER },
1181
1182 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1183 "I350 Gigabit Backplane Connection",
1184 WM_T_I350, WMP_F_SERDES },
1185
1186 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1187 "I350 Quad Port Gigabit Ethernet",
1188 WM_T_I350, WMP_F_SERDES },
1189
1190 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1191 "I350 Gigabit Connection",
1192 WM_T_I350, WMP_F_COPPER },
1193
1194 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1195 "I354 Gigabit Ethernet (KX)",
1196 WM_T_I354, WMP_F_SERDES },
1197
1198 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1199 "I354 Gigabit Ethernet (SGMII)",
1200 WM_T_I354, WMP_F_COPPER },
1201
1202 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1203 "I354 Gigabit Ethernet (2.5G)",
1204 WM_T_I354, WMP_F_COPPER },
1205
1206 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1207 "I210-T1 Ethernet Server Adapter",
1208 WM_T_I210, WMP_F_COPPER },
1209
1210 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1211 "I210 Ethernet (Copper OEM)",
1212 WM_T_I210, WMP_F_COPPER },
1213
1214 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1215 "I210 Ethernet (Copper IT)",
1216 WM_T_I210, WMP_F_COPPER },
1217
1218 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1219 "I210 Ethernet (FLASH less)",
1220 WM_T_I210, WMP_F_COPPER },
1221
1222 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1223 "I210 Gigabit Ethernet (Fiber)",
1224 WM_T_I210, WMP_F_FIBER },
1225
1226 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1227 "I210 Gigabit Ethernet (SERDES)",
1228 WM_T_I210, WMP_F_SERDES },
1229
1230 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1231 "I210 Gigabit Ethernet (FLASH less)",
1232 WM_T_I210, WMP_F_SERDES },
1233
1234 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1235 "I210 Gigabit Ethernet (SGMII)",
1236 WM_T_I210, WMP_F_COPPER },
1237
1238 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1239 "I211 Ethernet (COPPER)",
1240 WM_T_I211, WMP_F_COPPER },
1241 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1242 "I217 V Ethernet Connection",
1243 WM_T_PCH_LPT, WMP_F_COPPER },
1244 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1245 "I217 LM Ethernet Connection",
1246 WM_T_PCH_LPT, WMP_F_COPPER },
1247 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1248 "I218 V Ethernet Connection",
1249 WM_T_PCH_LPT, WMP_F_COPPER },
1250 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1251 "I218 V Ethernet Connection",
1252 WM_T_PCH_LPT, WMP_F_COPPER },
1253 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1254 "I218 V Ethernet Connection",
1255 WM_T_PCH_LPT, WMP_F_COPPER },
1256 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1257 "I218 LM Ethernet Connection",
1258 WM_T_PCH_LPT, WMP_F_COPPER },
1259 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1260 "I218 LM Ethernet Connection",
1261 WM_T_PCH_LPT, WMP_F_COPPER },
1262 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1263 "I218 LM Ethernet Connection",
1264 WM_T_PCH_LPT, WMP_F_COPPER },
1265 { 0, 0,
1266 NULL,
1267 0, 0 },
1268 };
1269
1270 #ifdef WM_EVENT_COUNTERS
1271 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1272 #endif /* WM_EVENT_COUNTERS */
1273
1274
1275 /*
1276 * Register read/write functions.
1277 * Other than CSR_{READ|WRITE}().
1278 */
1279
1280 #if 0 /* Not currently used */
1281 static inline uint32_t
1282 wm_io_read(struct wm_softc *sc, int reg)
1283 {
1284
1285 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1286 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1287 }
1288 #endif
1289
1290 static inline void
1291 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1292 {
1293
1294 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1295 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1296 }
1297
1298 static inline void
1299 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1300 uint32_t data)
1301 {
1302 uint32_t regval;
1303 int i;
1304
1305 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1306
1307 CSR_WRITE(sc, reg, regval);
1308
1309 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1310 delay(5);
1311 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1312 break;
1313 }
1314 if (i == SCTL_CTL_POLL_TIMEOUT) {
1315 aprint_error("%s: WARNING:"
1316 " i82575 reg 0x%08x setup did not indicate ready\n",
1317 device_xname(sc->sc_dev), reg);
1318 }
1319 }
1320
1321 static inline void
1322 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1323 {
1324 wa->wa_low = htole32(v & 0xffffffffU);
1325 if (sizeof(bus_addr_t) == 8)
1326 wa->wa_high = htole32((uint64_t) v >> 32);
1327 else
1328 wa->wa_high = 0;
1329 }
1330
1331 /*
1332 * Device driver interface functions and commonly used functions.
1333 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1334 */
1335
1336 /* Lookup supported device table */
1337 static const struct wm_product *
1338 wm_lookup(const struct pci_attach_args *pa)
1339 {
1340 const struct wm_product *wmp;
1341
1342 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1343 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1344 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1345 return wmp;
1346 }
1347 return NULL;
1348 }
1349
1350 /* The match function (ca_match) */
1351 static int
1352 wm_match(device_t parent, cfdata_t cf, void *aux)
1353 {
1354 struct pci_attach_args *pa = aux;
1355
1356 if (wm_lookup(pa) != NULL)
1357 return 1;
1358
1359 return 0;
1360 }
1361
1362 /* The attach function (ca_attach) */
1363 static void
1364 wm_attach(device_t parent, device_t self, void *aux)
1365 {
1366 struct wm_softc *sc = device_private(self);
1367 struct pci_attach_args *pa = aux;
1368 prop_dictionary_t dict;
1369 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1370 pci_chipset_tag_t pc = pa->pa_pc;
1371 pci_intr_handle_t ih;
1372 const char *intrstr = NULL;
1373 const char *eetype, *xname;
1374 bus_space_tag_t memt;
1375 bus_space_handle_t memh;
1376 bus_size_t memsize;
1377 int memh_valid;
1378 int i, error;
1379 const struct wm_product *wmp;
1380 prop_data_t ea;
1381 prop_number_t pn;
1382 uint8_t enaddr[ETHER_ADDR_LEN];
1383 uint16_t cfg1, cfg2, swdpin, nvmword;
1384 pcireg_t preg, memtype;
1385 uint16_t eeprom_data, apme_mask;
1386 bool force_clear_smbi;
1387 uint32_t link_mode;
1388 uint32_t reg;
1389 char intrbuf[PCI_INTRSTR_LEN];
1390
1391 sc->sc_dev = self;
1392 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1393 sc->sc_stopping = false;
1394
1395 wmp = wm_lookup(pa);
1396 #ifdef DIAGNOSTIC
1397 if (wmp == NULL) {
1398 printf("\n");
1399 panic("wm_attach: impossible");
1400 }
1401 #endif
1402 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1403
1404 sc->sc_pc = pa->pa_pc;
1405 sc->sc_pcitag = pa->pa_tag;
1406
1407 if (pci_dma64_available(pa))
1408 sc->sc_dmat = pa->pa_dmat64;
1409 else
1410 sc->sc_dmat = pa->pa_dmat;
1411
1412 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1413 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1414 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1415
1416 sc->sc_type = wmp->wmp_type;
1417 if (sc->sc_type < WM_T_82543) {
1418 if (sc->sc_rev < 2) {
1419 aprint_error_dev(sc->sc_dev,
1420 "i82542 must be at least rev. 2\n");
1421 return;
1422 }
1423 if (sc->sc_rev < 3)
1424 sc->sc_type = WM_T_82542_2_0;
1425 }
1426
1427 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1428 || (sc->sc_type == WM_T_82580)
1429 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1430 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1431 sc->sc_flags |= WM_F_NEWQUEUE;
1432
1433 /* Set device properties (mactype) */
1434 dict = device_properties(sc->sc_dev);
1435 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1436
1437 /*
1438 * Map the device. All devices support memory-mapped acccess,
1439 * and it is really required for normal operation.
1440 */
1441 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1442 switch (memtype) {
1443 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1444 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1445 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1446 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1447 break;
1448 default:
1449 memh_valid = 0;
1450 break;
1451 }
1452
1453 if (memh_valid) {
1454 sc->sc_st = memt;
1455 sc->sc_sh = memh;
1456 sc->sc_ss = memsize;
1457 } else {
1458 aprint_error_dev(sc->sc_dev,
1459 "unable to map device registers\n");
1460 return;
1461 }
1462
1463 /*
1464 * In addition, i82544 and later support I/O mapped indirect
1465 * register access. It is not desirable (nor supported in
1466 * this driver) to use it for normal operation, though it is
1467 * required to work around bugs in some chip versions.
1468 */
1469 if (sc->sc_type >= WM_T_82544) {
1470 /* First we have to find the I/O BAR. */
1471 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1472 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1473 if (memtype == PCI_MAPREG_TYPE_IO)
1474 break;
1475 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1476 PCI_MAPREG_MEM_TYPE_64BIT)
1477 i += 4; /* skip high bits, too */
1478 }
1479 if (i < PCI_MAPREG_END) {
1480 /*
1481 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1482 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1483 * It's no problem because newer chips has no this
1484 * bug.
1485 *
1486 * The i8254x doesn't apparently respond when the
1487 * I/O BAR is 0, which looks somewhat like it's not
1488 * been configured.
1489 */
1490 preg = pci_conf_read(pc, pa->pa_tag, i);
1491 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1492 aprint_error_dev(sc->sc_dev,
1493 "WARNING: I/O BAR at zero.\n");
1494 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1495 0, &sc->sc_iot, &sc->sc_ioh,
1496 NULL, &sc->sc_ios) == 0) {
1497 sc->sc_flags |= WM_F_IOH_VALID;
1498 } else {
1499 aprint_error_dev(sc->sc_dev,
1500 "WARNING: unable to map I/O space\n");
1501 }
1502 }
1503
1504 }
1505
1506 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1507 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1508 preg |= PCI_COMMAND_MASTER_ENABLE;
1509 if (sc->sc_type < WM_T_82542_2_1)
1510 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1511 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1512
1513 /* power up chip */
1514 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1515 NULL)) && error != EOPNOTSUPP) {
1516 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1517 return;
1518 }
1519
1520 /*
1521 * Map and establish our interrupt.
1522 */
1523 if (pci_intr_map(pa, &ih)) {
1524 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1525 return;
1526 }
1527 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1528 #ifdef WM_MPSAFE
1529 pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
1530 #endif
1531 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1532 if (sc->sc_ih == NULL) {
1533 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1534 if (intrstr != NULL)
1535 aprint_error(" at %s", intrstr);
1536 aprint_error("\n");
1537 return;
1538 }
1539 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1540
1541 /*
1542 * Check the function ID (unit number of the chip).
1543 */
1544 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1545 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1546 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1547 || (sc->sc_type == WM_T_82580)
1548 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1549 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1550 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1551 else
1552 sc->sc_funcid = 0;
1553
1554 /*
1555 * Determine a few things about the bus we're connected to.
1556 */
1557 if (sc->sc_type < WM_T_82543) {
1558 /* We don't really know the bus characteristics here. */
1559 sc->sc_bus_speed = 33;
1560 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1561 /*
1562 * CSA (Communication Streaming Architecture) is about as fast
1563 * a 32-bit 66MHz PCI Bus.
1564 */
1565 sc->sc_flags |= WM_F_CSA;
1566 sc->sc_bus_speed = 66;
1567 aprint_verbose_dev(sc->sc_dev,
1568 "Communication Streaming Architecture\n");
1569 if (sc->sc_type == WM_T_82547) {
1570 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1571 callout_setfunc(&sc->sc_txfifo_ch,
1572 wm_82547_txfifo_stall, sc);
1573 aprint_verbose_dev(sc->sc_dev,
1574 "using 82547 Tx FIFO stall work-around\n");
1575 }
1576 } else if (sc->sc_type >= WM_T_82571) {
1577 sc->sc_flags |= WM_F_PCIE;
1578 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1579 && (sc->sc_type != WM_T_ICH10)
1580 && (sc->sc_type != WM_T_PCH)
1581 && (sc->sc_type != WM_T_PCH2)
1582 && (sc->sc_type != WM_T_PCH_LPT)) {
1583 /* ICH* and PCH* have no PCIe capability registers */
1584 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1585 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1586 NULL) == 0)
1587 aprint_error_dev(sc->sc_dev,
1588 "unable to find PCIe capability\n");
1589 }
1590 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1591 } else {
1592 reg = CSR_READ(sc, WMREG_STATUS);
1593 if (reg & STATUS_BUS64)
1594 sc->sc_flags |= WM_F_BUS64;
1595 if ((reg & STATUS_PCIX_MODE) != 0) {
1596 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1597
1598 sc->sc_flags |= WM_F_PCIX;
1599 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1600 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1601 aprint_error_dev(sc->sc_dev,
1602 "unable to find PCIX capability\n");
1603 else if (sc->sc_type != WM_T_82545_3 &&
1604 sc->sc_type != WM_T_82546_3) {
1605 /*
1606 * Work around a problem caused by the BIOS
1607 * setting the max memory read byte count
1608 * incorrectly.
1609 */
1610 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1611 sc->sc_pcixe_capoff + PCIX_CMD);
1612 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1613 sc->sc_pcixe_capoff + PCIX_STATUS);
1614
1615 bytecnt =
1616 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1617 PCIX_CMD_BYTECNT_SHIFT;
1618 maxb =
1619 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1620 PCIX_STATUS_MAXB_SHIFT;
1621 if (bytecnt > maxb) {
1622 aprint_verbose_dev(sc->sc_dev,
1623 "resetting PCI-X MMRBC: %d -> %d\n",
1624 512 << bytecnt, 512 << maxb);
1625 pcix_cmd = (pcix_cmd &
1626 ~PCIX_CMD_BYTECNT_MASK) |
1627 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1628 pci_conf_write(pa->pa_pc, pa->pa_tag,
1629 sc->sc_pcixe_capoff + PCIX_CMD,
1630 pcix_cmd);
1631 }
1632 }
1633 }
1634 /*
1635 * The quad port adapter is special; it has a PCIX-PCIX
1636 * bridge on the board, and can run the secondary bus at
1637 * a higher speed.
1638 */
1639 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1640 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1641 : 66;
1642 } else if (sc->sc_flags & WM_F_PCIX) {
1643 switch (reg & STATUS_PCIXSPD_MASK) {
1644 case STATUS_PCIXSPD_50_66:
1645 sc->sc_bus_speed = 66;
1646 break;
1647 case STATUS_PCIXSPD_66_100:
1648 sc->sc_bus_speed = 100;
1649 break;
1650 case STATUS_PCIXSPD_100_133:
1651 sc->sc_bus_speed = 133;
1652 break;
1653 default:
1654 aprint_error_dev(sc->sc_dev,
1655 "unknown PCIXSPD %d; assuming 66MHz\n",
1656 reg & STATUS_PCIXSPD_MASK);
1657 sc->sc_bus_speed = 66;
1658 break;
1659 }
1660 } else
1661 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1662 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1663 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1664 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1665 }
1666
1667 /*
1668 * Allocate the control data structures, and create and load the
1669 * DMA map for it.
1670 *
1671 * NOTE: All Tx descriptors must be in the same 4G segment of
1672 * memory. So must Rx descriptors. We simplify by allocating
1673 * both sets within the same 4G segment.
1674 */
1675 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1676 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1677 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1678 sizeof(struct wm_control_data_82542) :
1679 sizeof(struct wm_control_data_82544);
1680 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1681 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1682 &sc->sc_cd_rseg, 0)) != 0) {
1683 aprint_error_dev(sc->sc_dev,
1684 "unable to allocate control data, error = %d\n",
1685 error);
1686 goto fail_0;
1687 }
1688
1689 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1690 sc->sc_cd_rseg, sc->sc_cd_size,
1691 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1692 aprint_error_dev(sc->sc_dev,
1693 "unable to map control data, error = %d\n", error);
1694 goto fail_1;
1695 }
1696
1697 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1698 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1699 aprint_error_dev(sc->sc_dev,
1700 "unable to create control data DMA map, error = %d\n",
1701 error);
1702 goto fail_2;
1703 }
1704
1705 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1706 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1707 aprint_error_dev(sc->sc_dev,
1708 "unable to load control data DMA map, error = %d\n",
1709 error);
1710 goto fail_3;
1711 }
1712
1713 /* Create the transmit buffer DMA maps. */
1714 WM_TXQUEUELEN(sc) =
1715 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1716 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1717 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1718 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1719 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1720 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1721 aprint_error_dev(sc->sc_dev,
1722 "unable to create Tx DMA map %d, error = %d\n",
1723 i, error);
1724 goto fail_4;
1725 }
1726 }
1727
1728 /* Create the receive buffer DMA maps. */
1729 for (i = 0; i < WM_NRXDESC; i++) {
1730 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1731 MCLBYTES, 0, 0,
1732 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1733 aprint_error_dev(sc->sc_dev,
1734 "unable to create Rx DMA map %d error = %d\n",
1735 i, error);
1736 goto fail_5;
1737 }
1738 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1739 }
1740
1741 /* clear interesting stat counters */
1742 CSR_READ(sc, WMREG_COLC);
1743 CSR_READ(sc, WMREG_RXERRC);
1744
1745 /* get PHY control from SMBus to PCIe */
1746 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1747 || (sc->sc_type == WM_T_PCH_LPT))
1748 wm_smbustopci(sc);
1749
1750 /* Reset the chip to a known state. */
1751 wm_reset(sc);
1752
1753 /* Get some information about the EEPROM. */
1754 switch (sc->sc_type) {
1755 case WM_T_82542_2_0:
1756 case WM_T_82542_2_1:
1757 case WM_T_82543:
1758 case WM_T_82544:
1759 /* Microwire */
1760 sc->sc_nvm_wordsize = 64;
1761 sc->sc_nvm_addrbits = 6;
1762 break;
1763 case WM_T_82540:
1764 case WM_T_82545:
1765 case WM_T_82545_3:
1766 case WM_T_82546:
1767 case WM_T_82546_3:
1768 /* Microwire */
1769 reg = CSR_READ(sc, WMREG_EECD);
1770 if (reg & EECD_EE_SIZE) {
1771 sc->sc_nvm_wordsize = 256;
1772 sc->sc_nvm_addrbits = 8;
1773 } else {
1774 sc->sc_nvm_wordsize = 64;
1775 sc->sc_nvm_addrbits = 6;
1776 }
1777 sc->sc_flags |= WM_F_LOCK_EECD;
1778 break;
1779 case WM_T_82541:
1780 case WM_T_82541_2:
1781 case WM_T_82547:
1782 case WM_T_82547_2:
1783 sc->sc_flags |= WM_F_LOCK_EECD;
1784 reg = CSR_READ(sc, WMREG_EECD);
1785 if (reg & EECD_EE_TYPE) {
1786 /* SPI */
1787 sc->sc_flags |= WM_F_EEPROM_SPI;
1788 wm_nvm_set_addrbits_size_eecd(sc);
1789 } else {
1790 /* Microwire */
1791 if ((reg & EECD_EE_ABITS) != 0) {
1792 sc->sc_nvm_wordsize = 256;
1793 sc->sc_nvm_addrbits = 8;
1794 } else {
1795 sc->sc_nvm_wordsize = 64;
1796 sc->sc_nvm_addrbits = 6;
1797 }
1798 }
1799 break;
1800 case WM_T_82571:
1801 case WM_T_82572:
1802 /* SPI */
1803 sc->sc_flags |= WM_F_EEPROM_SPI;
1804 wm_nvm_set_addrbits_size_eecd(sc);
1805 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1806 break;
1807 case WM_T_82573:
1808 sc->sc_flags |= WM_F_LOCK_SWSM;
1809 /* FALLTHROUGH */
1810 case WM_T_82574:
1811 case WM_T_82583:
1812 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1813 sc->sc_flags |= WM_F_EEPROM_FLASH;
1814 sc->sc_nvm_wordsize = 2048;
1815 } else {
1816 /* SPI */
1817 sc->sc_flags |= WM_F_EEPROM_SPI;
1818 wm_nvm_set_addrbits_size_eecd(sc);
1819 }
1820 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1821 break;
1822 case WM_T_82575:
1823 case WM_T_82576:
1824 case WM_T_82580:
1825 case WM_T_I350:
1826 case WM_T_I354:
1827 case WM_T_80003:
1828 /* SPI */
1829 sc->sc_flags |= WM_F_EEPROM_SPI;
1830 wm_nvm_set_addrbits_size_eecd(sc);
1831 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1832 | WM_F_LOCK_SWSM;
1833 break;
1834 case WM_T_ICH8:
1835 case WM_T_ICH9:
1836 case WM_T_ICH10:
1837 case WM_T_PCH:
1838 case WM_T_PCH2:
1839 case WM_T_PCH_LPT:
1840 /* FLASH */
1841 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1842 sc->sc_nvm_wordsize = 2048;
1843 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1844 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1845 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1846 aprint_error_dev(sc->sc_dev,
1847 "can't map FLASH registers\n");
1848 goto fail_5;
1849 }
1850 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1851 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1852 ICH_FLASH_SECTOR_SIZE;
1853 sc->sc_ich8_flash_bank_size =
1854 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1855 sc->sc_ich8_flash_bank_size -=
1856 (reg & ICH_GFPREG_BASE_MASK);
1857 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1858 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1859 break;
1860 case WM_T_I210:
1861 case WM_T_I211:
1862 if (wm_nvm_get_flash_presence_i210(sc)) {
1863 wm_nvm_set_addrbits_size_eecd(sc);
1864 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1865 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1866 } else {
1867 sc->sc_nvm_wordsize = INVM_SIZE;
1868 sc->sc_flags |= WM_F_EEPROM_INVM;
1869 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1870 }
1871 break;
1872 default:
1873 break;
1874 }
1875
1876 /* Ensure the SMBI bit is clear before first NVM or PHY access */
1877 switch (sc->sc_type) {
1878 case WM_T_82571:
1879 case WM_T_82572:
1880 reg = CSR_READ(sc, WMREG_SWSM2);
1881 if ((reg & SWSM2_LOCK) == 0) {
1882 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
1883 force_clear_smbi = true;
1884 } else
1885 force_clear_smbi = false;
1886 break;
1887 case WM_T_82573:
1888 case WM_T_82574:
1889 case WM_T_82583:
1890 force_clear_smbi = true;
1891 break;
1892 default:
1893 force_clear_smbi = false;
1894 break;
1895 }
1896 if (force_clear_smbi) {
1897 reg = CSR_READ(sc, WMREG_SWSM);
1898 if ((reg & SWSM_SMBI) != 0)
1899 aprint_error_dev(sc->sc_dev,
1900 "Please update the Bootagent\n");
1901 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
1902 }
1903
1904 /*
1905 * Defer printing the EEPROM type until after verifying the checksum
1906 * This allows the EEPROM type to be printed correctly in the case
1907 * that no EEPROM is attached.
1908 */
1909 /*
1910 * Validate the EEPROM checksum. If the checksum fails, flag
1911 * this for later, so we can fail future reads from the EEPROM.
1912 */
1913 if (wm_nvm_validate_checksum(sc)) {
1914 /*
1915 * Read twice again because some PCI-e parts fail the
1916 * first check due to the link being in sleep state.
1917 */
1918 if (wm_nvm_validate_checksum(sc))
1919 sc->sc_flags |= WM_F_EEPROM_INVALID;
1920 }
1921
1922 /* Set device properties (macflags) */
1923 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1924
1925 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1926 aprint_verbose_dev(sc->sc_dev, "No EEPROM");
1927 else {
1928 aprint_verbose_dev(sc->sc_dev, "%u words ",
1929 sc->sc_nvm_wordsize);
1930 if (sc->sc_flags & WM_F_EEPROM_INVM)
1931 aprint_verbose("iNVM");
1932 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
1933 aprint_verbose("FLASH(HW)");
1934 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
1935 aprint_verbose("FLASH");
1936 else {
1937 if (sc->sc_flags & WM_F_EEPROM_SPI)
1938 eetype = "SPI";
1939 else
1940 eetype = "MicroWire";
1941 aprint_verbose("(%d address bits) %s EEPROM",
1942 sc->sc_nvm_addrbits, eetype);
1943 }
1944 }
1945 wm_nvm_version(sc);
1946 aprint_verbose("\n");
1947
1948 /* Check for I21[01] PLL workaround */
1949 if (sc->sc_type == WM_T_I210)
1950 sc->sc_flags |= WM_F_PLL_WA_I210;
1951 if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
1952 /* NVM image release 3.25 has a workaround */
1953 if ((sc->sc_nvm_ver_major > 3)
1954 || ((sc->sc_nvm_ver_major == 3)
1955 && (sc->sc_nvm_ver_minor >= 25)))
1956 return;
1957 else {
1958 aprint_verbose_dev(sc->sc_dev,
1959 "ROM image version %d.%d is older than 3.25\n",
1960 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
1961 sc->sc_flags |= WM_F_PLL_WA_I210;
1962 }
1963 }
1964 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
1965 wm_pll_workaround_i210(sc);
1966
1967 switch (sc->sc_type) {
1968 case WM_T_82571:
1969 case WM_T_82572:
1970 case WM_T_82573:
1971 case WM_T_82574:
1972 case WM_T_82583:
1973 case WM_T_80003:
1974 case WM_T_ICH8:
1975 case WM_T_ICH9:
1976 case WM_T_ICH10:
1977 case WM_T_PCH:
1978 case WM_T_PCH2:
1979 case WM_T_PCH_LPT:
1980 if (wm_check_mng_mode(sc) != 0)
1981 wm_get_hw_control(sc);
1982 break;
1983 default:
1984 break;
1985 }
1986 wm_get_wakeup(sc);
1987 /*
1988 * Read the Ethernet address from the EEPROM, if not first found
1989 * in device properties.
1990 */
1991 ea = prop_dictionary_get(dict, "mac-address");
1992 if (ea != NULL) {
1993 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1994 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1995 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1996 } else {
1997 if (wm_read_mac_addr(sc, enaddr) != 0) {
1998 aprint_error_dev(sc->sc_dev,
1999 "unable to read Ethernet address\n");
2000 goto fail_5;
2001 }
2002 }
2003
2004 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2005 ether_sprintf(enaddr));
2006
2007 /*
2008 * Read the config info from the EEPROM, and set up various
2009 * bits in the control registers based on their contents.
2010 */
2011 pn = prop_dictionary_get(dict, "i82543-cfg1");
2012 if (pn != NULL) {
2013 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2014 cfg1 = (uint16_t) prop_number_integer_value(pn);
2015 } else {
2016 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2017 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2018 goto fail_5;
2019 }
2020 }
2021
2022 pn = prop_dictionary_get(dict, "i82543-cfg2");
2023 if (pn != NULL) {
2024 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2025 cfg2 = (uint16_t) prop_number_integer_value(pn);
2026 } else {
2027 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2028 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2029 goto fail_5;
2030 }
2031 }
2032
2033 /* check for WM_F_WOL */
2034 switch (sc->sc_type) {
2035 case WM_T_82542_2_0:
2036 case WM_T_82542_2_1:
2037 case WM_T_82543:
2038 /* dummy? */
2039 eeprom_data = 0;
2040 apme_mask = NVM_CFG3_APME;
2041 break;
2042 case WM_T_82544:
2043 apme_mask = NVM_CFG2_82544_APM_EN;
2044 eeprom_data = cfg2;
2045 break;
2046 case WM_T_82546:
2047 case WM_T_82546_3:
2048 case WM_T_82571:
2049 case WM_T_82572:
2050 case WM_T_82573:
2051 case WM_T_82574:
2052 case WM_T_82583:
2053 case WM_T_80003:
2054 default:
2055 apme_mask = NVM_CFG3_APME;
2056 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2057 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2058 break;
2059 case WM_T_82575:
2060 case WM_T_82576:
2061 case WM_T_82580:
2062 case WM_T_I350:
2063 case WM_T_I354: /* XXX ok? */
2064 case WM_T_ICH8:
2065 case WM_T_ICH9:
2066 case WM_T_ICH10:
2067 case WM_T_PCH:
2068 case WM_T_PCH2:
2069 case WM_T_PCH_LPT:
2070 /* XXX The funcid should be checked on some devices */
2071 apme_mask = WUC_APME;
2072 eeprom_data = CSR_READ(sc, WMREG_WUC);
2073 break;
2074 }
2075
2076 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2077 if ((eeprom_data & apme_mask) != 0)
2078 sc->sc_flags |= WM_F_WOL;
2079 #ifdef WM_DEBUG
2080 if ((sc->sc_flags & WM_F_WOL) != 0)
2081 printf("WOL\n");
2082 #endif
2083
2084 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2085 /* Check NVM for autonegotiation */
2086 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2087 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2088 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2089 }
2090 }
2091
2092 /*
2093 * XXX need special handling for some multiple port cards
2094 * to disable a paticular port.
2095 */
2096
2097 if (sc->sc_type >= WM_T_82544) {
2098 pn = prop_dictionary_get(dict, "i82543-swdpin");
2099 if (pn != NULL) {
2100 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2101 swdpin = (uint16_t) prop_number_integer_value(pn);
2102 } else {
2103 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2104 aprint_error_dev(sc->sc_dev,
2105 "unable to read SWDPIN\n");
2106 goto fail_5;
2107 }
2108 }
2109 }
2110
2111 if (cfg1 & NVM_CFG1_ILOS)
2112 sc->sc_ctrl |= CTRL_ILOS;
2113
2114 /*
2115 * XXX
2116 * This code isn't correct because pin 2 and 3 are located
2117 * in different position on newer chips. Check all datasheet.
2118 *
2119 * Until resolve this problem, check if a chip < 82580
2120 */
2121 if (sc->sc_type <= WM_T_82580) {
2122 if (sc->sc_type >= WM_T_82544) {
2123 sc->sc_ctrl |=
2124 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2125 CTRL_SWDPIO_SHIFT;
2126 sc->sc_ctrl |=
2127 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2128 CTRL_SWDPINS_SHIFT;
2129 } else {
2130 sc->sc_ctrl |=
2131 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2132 CTRL_SWDPIO_SHIFT;
2133 }
2134 }
2135
2136 /* XXX For other than 82580? */
2137 if (sc->sc_type == WM_T_82580) {
2138 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2139 printf("CFG3 = %08x\n", (uint32_t)nvmword);
2140 if (nvmword & __BIT(13)) {
2141 printf("SET ILOS\n");
2142 sc->sc_ctrl |= CTRL_ILOS;
2143 }
2144 }
2145
2146 #if 0
2147 if (sc->sc_type >= WM_T_82544) {
2148 if (cfg1 & NVM_CFG1_IPS0)
2149 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2150 if (cfg1 & NVM_CFG1_IPS1)
2151 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2152 sc->sc_ctrl_ext |=
2153 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2154 CTRL_EXT_SWDPIO_SHIFT;
2155 sc->sc_ctrl_ext |=
2156 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2157 CTRL_EXT_SWDPINS_SHIFT;
2158 } else {
2159 sc->sc_ctrl_ext |=
2160 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2161 CTRL_EXT_SWDPIO_SHIFT;
2162 }
2163 #endif
2164
2165 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2166 #if 0
2167 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2168 #endif
2169
2170 /*
2171 * Set up some register offsets that are different between
2172 * the i82542 and the i82543 and later chips.
2173 */
2174 if (sc->sc_type < WM_T_82543) {
2175 sc->sc_rdt_reg = WMREG_OLD_RDT0;
2176 sc->sc_tdt_reg = WMREG_OLD_TDT;
2177 } else {
2178 sc->sc_rdt_reg = WMREG_RDT;
2179 sc->sc_tdt_reg = WMREG_TDT;
2180 }
2181
2182 if (sc->sc_type == WM_T_PCH) {
2183 uint16_t val;
2184
2185 /* Save the NVM K1 bit setting */
2186 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2187
2188 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2189 sc->sc_nvm_k1_enabled = 1;
2190 else
2191 sc->sc_nvm_k1_enabled = 0;
2192 }
2193
2194 /*
2195 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2196 * media structures accordingly.
2197 */
2198 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2199 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2200 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2201 || sc->sc_type == WM_T_82573
2202 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2203 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2204 wm_gmii_mediainit(sc, wmp->wmp_product);
2205 } else if (sc->sc_type < WM_T_82543 ||
2206 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2207 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2208 aprint_error_dev(sc->sc_dev,
2209 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2210 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2211 }
2212 wm_tbi_mediainit(sc);
2213 } else {
2214 switch (sc->sc_type) {
2215 case WM_T_82575:
2216 case WM_T_82576:
2217 case WM_T_82580:
2218 case WM_T_I350:
2219 case WM_T_I354:
2220 case WM_T_I210:
2221 case WM_T_I211:
2222 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2223 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2224 switch (link_mode) {
2225 case CTRL_EXT_LINK_MODE_1000KX:
2226 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2227 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2228 break;
2229 case CTRL_EXT_LINK_MODE_SGMII:
2230 if (wm_sgmii_uses_mdio(sc)) {
2231 aprint_verbose_dev(sc->sc_dev,
2232 "SGMII(MDIO)\n");
2233 sc->sc_flags |= WM_F_SGMII;
2234 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2235 break;
2236 }
2237 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2238 /*FALLTHROUGH*/
2239 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2240 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2241 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2242 if (link_mode
2243 == CTRL_EXT_LINK_MODE_SGMII) {
2244 sc->sc_mediatype
2245 = WM_MEDIATYPE_COPPER;
2246 sc->sc_flags |= WM_F_SGMII;
2247 } else {
2248 sc->sc_mediatype
2249 = WM_MEDIATYPE_SERDES;
2250 aprint_verbose_dev(sc->sc_dev,
2251 "SERDES\n");
2252 }
2253 break;
2254 }
2255 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2256 aprint_verbose_dev(sc->sc_dev,
2257 "SERDES\n");
2258
2259 /* Change current link mode setting */
2260 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2261 switch (sc->sc_mediatype) {
2262 case WM_MEDIATYPE_COPPER:
2263 reg |= CTRL_EXT_LINK_MODE_SGMII;
2264 break;
2265 case WM_MEDIATYPE_SERDES:
2266 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2267 break;
2268 default:
2269 break;
2270 }
2271 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2272 break;
2273 case CTRL_EXT_LINK_MODE_GMII:
2274 default:
2275 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2276 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2277 break;
2278 }
2279
2280 reg &= ~CTRL_EXT_I2C_ENA;
2281 if ((sc->sc_flags & WM_F_SGMII) != 0)
2282 reg |= CTRL_EXT_I2C_ENA;
2283 else
2284 reg &= ~CTRL_EXT_I2C_ENA;
2285 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2286
2287 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2288 wm_gmii_mediainit(sc, wmp->wmp_product);
2289 else
2290 wm_tbi_mediainit(sc);
2291 break;
2292 default:
2293 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
2294 aprint_error_dev(sc->sc_dev,
2295 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2296 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2297 wm_gmii_mediainit(sc, wmp->wmp_product);
2298 }
2299 }
2300
2301 ifp = &sc->sc_ethercom.ec_if;
2302 xname = device_xname(sc->sc_dev);
2303 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2304 ifp->if_softc = sc;
2305 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2306 ifp->if_ioctl = wm_ioctl;
2307 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2308 ifp->if_start = wm_nq_start;
2309 else
2310 ifp->if_start = wm_start;
2311 ifp->if_watchdog = wm_watchdog;
2312 ifp->if_init = wm_init;
2313 ifp->if_stop = wm_stop;
2314 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2315 IFQ_SET_READY(&ifp->if_snd);
2316
2317 /* Check for jumbo frame */
2318 switch (sc->sc_type) {
2319 case WM_T_82573:
2320 /* XXX limited to 9234 if ASPM is disabled */
2321 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2322 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2323 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2324 break;
2325 case WM_T_82571:
2326 case WM_T_82572:
2327 case WM_T_82574:
2328 case WM_T_82575:
2329 case WM_T_82576:
2330 case WM_T_82580:
2331 case WM_T_I350:
2332 case WM_T_I354: /* XXXX ok? */
2333 case WM_T_I210:
2334 case WM_T_I211:
2335 case WM_T_80003:
2336 case WM_T_ICH9:
2337 case WM_T_ICH10:
2338 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2339 case WM_T_PCH_LPT:
2340 /* XXX limited to 9234 */
2341 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2342 break;
2343 case WM_T_PCH:
2344 /* XXX limited to 4096 */
2345 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2346 break;
2347 case WM_T_82542_2_0:
2348 case WM_T_82542_2_1:
2349 case WM_T_82583:
2350 case WM_T_ICH8:
2351 /* No support for jumbo frame */
2352 break;
2353 default:
2354 /* ETHER_MAX_LEN_JUMBO */
2355 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2356 break;
2357 }
2358
2359 /* If we're a i82543 or greater, we can support VLANs. */
2360 if (sc->sc_type >= WM_T_82543)
2361 sc->sc_ethercom.ec_capabilities |=
2362 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2363
2364 /*
2365 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2366 * on i82543 and later.
2367 */
2368 if (sc->sc_type >= WM_T_82543) {
2369 ifp->if_capabilities |=
2370 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2371 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2372 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2373 IFCAP_CSUM_TCPv6_Tx |
2374 IFCAP_CSUM_UDPv6_Tx;
2375 }
2376
2377 /*
2378 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2379 *
2380 * 82541GI (8086:1076) ... no
2381 * 82572EI (8086:10b9) ... yes
2382 */
2383 if (sc->sc_type >= WM_T_82571) {
2384 ifp->if_capabilities |=
2385 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2386 }
2387
2388 /*
2389 * If we're a i82544 or greater (except i82547), we can do
2390 * TCP segmentation offload.
2391 */
2392 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2393 ifp->if_capabilities |= IFCAP_TSOv4;
2394 }
2395
2396 if (sc->sc_type >= WM_T_82571) {
2397 ifp->if_capabilities |= IFCAP_TSOv6;
2398 }
2399
2400 #ifdef WM_MPSAFE
2401 sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2402 sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2403 #else
2404 sc->sc_tx_lock = NULL;
2405 sc->sc_rx_lock = NULL;
2406 #endif
2407
2408 /* Attach the interface. */
2409 if_attach(ifp);
2410 ether_ifattach(ifp, enaddr);
2411 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2412 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2413 RND_FLAG_DEFAULT);
2414
2415 #ifdef WM_EVENT_COUNTERS
2416 /* Attach event counters. */
2417 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2418 NULL, xname, "txsstall");
2419 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2420 NULL, xname, "txdstall");
2421 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2422 NULL, xname, "txfifo_stall");
2423 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2424 NULL, xname, "txdw");
2425 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2426 NULL, xname, "txqe");
2427 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2428 NULL, xname, "rxintr");
2429 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2430 NULL, xname, "linkintr");
2431
2432 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2433 NULL, xname, "rxipsum");
2434 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2435 NULL, xname, "rxtusum");
2436 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2437 NULL, xname, "txipsum");
2438 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2439 NULL, xname, "txtusum");
2440 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2441 NULL, xname, "txtusum6");
2442
2443 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2444 NULL, xname, "txtso");
2445 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2446 NULL, xname, "txtso6");
2447 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2448 NULL, xname, "txtsopain");
2449
2450 for (i = 0; i < WM_NTXSEGS; i++) {
2451 snprintf(wm_txseg_evcnt_names[i],
2452 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2453 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2454 NULL, xname, wm_txseg_evcnt_names[i]);
2455 }
2456
2457 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2458 NULL, xname, "txdrop");
2459
2460 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2461 NULL, xname, "tu");
2462
2463 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2464 NULL, xname, "tx_xoff");
2465 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2466 NULL, xname, "tx_xon");
2467 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2468 NULL, xname, "rx_xoff");
2469 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2470 NULL, xname, "rx_xon");
2471 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2472 NULL, xname, "rx_macctl");
2473 #endif /* WM_EVENT_COUNTERS */
2474
2475 if (pmf_device_register(self, wm_suspend, wm_resume))
2476 pmf_class_network_register(self, ifp);
2477 else
2478 aprint_error_dev(self, "couldn't establish power handler\n");
2479
2480 sc->sc_flags |= WM_F_ATTACHED;
2481 return;
2482
2483 /*
2484 * Free any resources we've allocated during the failed attach
2485 * attempt. Do this in reverse order and fall through.
2486 */
2487 fail_5:
2488 for (i = 0; i < WM_NRXDESC; i++) {
2489 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2490 bus_dmamap_destroy(sc->sc_dmat,
2491 sc->sc_rxsoft[i].rxs_dmamap);
2492 }
2493 fail_4:
2494 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2495 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2496 bus_dmamap_destroy(sc->sc_dmat,
2497 sc->sc_txsoft[i].txs_dmamap);
2498 }
2499 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2500 fail_3:
2501 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2502 fail_2:
2503 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2504 sc->sc_cd_size);
2505 fail_1:
2506 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2507 fail_0:
2508 return;
2509 }
2510
2511 /* The detach function (ca_detach) */
2512 static int
2513 wm_detach(device_t self, int flags __unused)
2514 {
2515 struct wm_softc *sc = device_private(self);
2516 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2517 int i;
2518 #ifndef WM_MPSAFE
2519 int s;
2520 #endif
2521
2522 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2523 return 0;
2524
2525 #ifndef WM_MPSAFE
2526 s = splnet();
2527 #endif
2528 /* Stop the interface. Callouts are stopped in it. */
2529 wm_stop(ifp, 1);
2530
2531 #ifndef WM_MPSAFE
2532 splx(s);
2533 #endif
2534
2535 pmf_device_deregister(self);
2536
2537 /* Tell the firmware about the release */
2538 WM_BOTH_LOCK(sc);
2539 wm_release_manageability(sc);
2540 wm_release_hw_control(sc);
2541 WM_BOTH_UNLOCK(sc);
2542
2543 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2544
2545 /* Delete all remaining media. */
2546 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2547
2548 ether_ifdetach(ifp);
2549 if_detach(ifp);
2550
2551
2552 /* Unload RX dmamaps and free mbufs */
2553 WM_RX_LOCK(sc);
2554 wm_rxdrain(sc);
2555 WM_RX_UNLOCK(sc);
2556 /* Must unlock here */
2557
2558 /* Free dmamap. It's the same as the end of the wm_attach() function */
2559 for (i = 0; i < WM_NRXDESC; i++) {
2560 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2561 bus_dmamap_destroy(sc->sc_dmat,
2562 sc->sc_rxsoft[i].rxs_dmamap);
2563 }
2564 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2565 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2566 bus_dmamap_destroy(sc->sc_dmat,
2567 sc->sc_txsoft[i].txs_dmamap);
2568 }
2569 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2570 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2571 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2572 sc->sc_cd_size);
2573 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2574
2575 /* Disestablish the interrupt handler */
2576 if (sc->sc_ih != NULL) {
2577 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2578 sc->sc_ih = NULL;
2579 }
2580
2581 /* Unmap the registers */
2582 if (sc->sc_ss) {
2583 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2584 sc->sc_ss = 0;
2585 }
2586
2587 if (sc->sc_ios) {
2588 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2589 sc->sc_ios = 0;
2590 }
2591
2592 if (sc->sc_tx_lock)
2593 mutex_obj_free(sc->sc_tx_lock);
2594 if (sc->sc_rx_lock)
2595 mutex_obj_free(sc->sc_rx_lock);
2596
2597 return 0;
2598 }
2599
2600 static bool
2601 wm_suspend(device_t self, const pmf_qual_t *qual)
2602 {
2603 struct wm_softc *sc = device_private(self);
2604
2605 wm_release_manageability(sc);
2606 wm_release_hw_control(sc);
2607 #ifdef WM_WOL
2608 wm_enable_wakeup(sc);
2609 #endif
2610
2611 return true;
2612 }
2613
2614 static bool
2615 wm_resume(device_t self, const pmf_qual_t *qual)
2616 {
2617 struct wm_softc *sc = device_private(self);
2618
2619 wm_init_manageability(sc);
2620
2621 return true;
2622 }
2623
2624 /*
2625 * wm_watchdog: [ifnet interface function]
2626 *
2627 * Watchdog timer handler.
2628 */
2629 static void
2630 wm_watchdog(struct ifnet *ifp)
2631 {
2632 struct wm_softc *sc = ifp->if_softc;
2633
2634 /*
2635 * Since we're using delayed interrupts, sweep up
2636 * before we report an error.
2637 */
2638 WM_TX_LOCK(sc);
2639 wm_txintr(sc);
2640 WM_TX_UNLOCK(sc);
2641
2642 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2643 #ifdef WM_DEBUG
2644 int i, j;
2645 struct wm_txsoft *txs;
2646 #endif
2647 log(LOG_ERR,
2648 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2649 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2650 sc->sc_txnext);
2651 ifp->if_oerrors++;
2652 #ifdef WM_DEBUG
2653 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
2654 i = WM_NEXTTXS(sc, i)) {
2655 txs = &sc->sc_txsoft[i];
2656 printf("txs %d tx %d -> %d\n",
2657 i, txs->txs_firstdesc, txs->txs_lastdesc);
2658 for (j = txs->txs_firstdesc; ;
2659 j = WM_NEXTTX(sc, j)) {
2660 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2661 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
2662 printf("\t %#08x%08x\n",
2663 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
2664 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
2665 if (j == txs->txs_lastdesc)
2666 break;
2667 }
2668 }
2669 #endif
2670 /* Reset the interface. */
2671 (void) wm_init(ifp);
2672 }
2673
2674 /* Try to get more packets going. */
2675 ifp->if_start(ifp);
2676 }
2677
2678 /*
2679 * wm_tick:
2680 *
2681 * One second timer, used to check link status, sweep up
2682 * completed transmit jobs, etc.
2683 */
2684 static void
2685 wm_tick(void *arg)
2686 {
2687 struct wm_softc *sc = arg;
2688 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2689 #ifndef WM_MPSAFE
2690 int s;
2691
2692 s = splnet();
2693 #endif
2694
2695 WM_TX_LOCK(sc);
2696
2697 if (sc->sc_stopping)
2698 goto out;
2699
2700 if (sc->sc_type >= WM_T_82542_2_1) {
2701 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2702 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2703 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2704 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2705 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2706 }
2707
2708 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2709 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2710 + CSR_READ(sc, WMREG_CRCERRS)
2711 + CSR_READ(sc, WMREG_ALGNERRC)
2712 + CSR_READ(sc, WMREG_SYMERRC)
2713 + CSR_READ(sc, WMREG_RXERRC)
2714 + CSR_READ(sc, WMREG_SEC)
2715 + CSR_READ(sc, WMREG_CEXTERR)
2716 + CSR_READ(sc, WMREG_RLEC);
2717 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2718
2719 if (sc->sc_flags & WM_F_HAS_MII)
2720 mii_tick(&sc->sc_mii);
2721 else if ((sc->sc_type >= WM_T_82575)
2722 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
2723 wm_serdes_tick(sc);
2724 else
2725 wm_tbi_tick(sc);
2726
2727 out:
2728 WM_TX_UNLOCK(sc);
2729 #ifndef WM_MPSAFE
2730 splx(s);
2731 #endif
2732
2733 if (!sc->sc_stopping)
2734 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2735 }
2736
2737 static int
2738 wm_ifflags_cb(struct ethercom *ec)
2739 {
2740 struct ifnet *ifp = &ec->ec_if;
2741 struct wm_softc *sc = ifp->if_softc;
2742 int change = ifp->if_flags ^ sc->sc_if_flags;
2743 int rc = 0;
2744
2745 WM_BOTH_LOCK(sc);
2746
2747 if (change != 0)
2748 sc->sc_if_flags = ifp->if_flags;
2749
2750 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
2751 rc = ENETRESET;
2752 goto out;
2753 }
2754
2755 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2756 wm_set_filter(sc);
2757
2758 wm_set_vlan(sc);
2759
2760 out:
2761 WM_BOTH_UNLOCK(sc);
2762
2763 return rc;
2764 }
2765
2766 /*
2767 * wm_ioctl: [ifnet interface function]
2768 *
2769 * Handle control requests from the operator.
2770 */
2771 static int
2772 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2773 {
2774 struct wm_softc *sc = ifp->if_softc;
2775 struct ifreq *ifr = (struct ifreq *) data;
2776 struct ifaddr *ifa = (struct ifaddr *)data;
2777 struct sockaddr_dl *sdl;
2778 int s, error;
2779
2780 #ifndef WM_MPSAFE
2781 s = splnet();
2782 #endif
2783 switch (cmd) {
2784 case SIOCSIFMEDIA:
2785 case SIOCGIFMEDIA:
2786 WM_BOTH_LOCK(sc);
2787 /* Flow control requires full-duplex mode. */
2788 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2789 (ifr->ifr_media & IFM_FDX) == 0)
2790 ifr->ifr_media &= ~IFM_ETH_FMASK;
2791 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2792 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2793 /* We can do both TXPAUSE and RXPAUSE. */
2794 ifr->ifr_media |=
2795 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2796 }
2797 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2798 }
2799 WM_BOTH_UNLOCK(sc);
2800 #ifdef WM_MPSAFE
2801 s = splnet();
2802 #endif
2803 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2804 #ifdef WM_MPSAFE
2805 splx(s);
2806 #endif
2807 break;
2808 case SIOCINITIFADDR:
2809 WM_BOTH_LOCK(sc);
2810 if (ifa->ifa_addr->sa_family == AF_LINK) {
2811 sdl = satosdl(ifp->if_dl->ifa_addr);
2812 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2813 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2814 /* unicast address is first multicast entry */
2815 wm_set_filter(sc);
2816 error = 0;
2817 WM_BOTH_UNLOCK(sc);
2818 break;
2819 }
2820 WM_BOTH_UNLOCK(sc);
2821 /*FALLTHROUGH*/
2822 default:
2823 #ifdef WM_MPSAFE
2824 s = splnet();
2825 #endif
2826 /* It may call wm_start, so unlock here */
2827 error = ether_ioctl(ifp, cmd, data);
2828 #ifdef WM_MPSAFE
2829 splx(s);
2830 #endif
2831 if (error != ENETRESET)
2832 break;
2833
2834 error = 0;
2835
2836 if (cmd == SIOCSIFCAP) {
2837 error = (*ifp->if_init)(ifp);
2838 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2839 ;
2840 else if (ifp->if_flags & IFF_RUNNING) {
2841 /*
2842 * Multicast list has changed; set the hardware filter
2843 * accordingly.
2844 */
2845 WM_BOTH_LOCK(sc);
2846 wm_set_filter(sc);
2847 WM_BOTH_UNLOCK(sc);
2848 }
2849 break;
2850 }
2851
2852 /* Try to get more packets going. */
2853 ifp->if_start(ifp);
2854
2855 #ifndef WM_MPSAFE
2856 splx(s);
2857 #endif
2858 return error;
2859 }
2860
2861 /* MAC address related */
2862
2863 /*
2864 * Get the offset of MAC address and return it.
2865 * If error occured, use offset 0.
2866 */
2867 static uint16_t
2868 wm_check_alt_mac_addr(struct wm_softc *sc)
2869 {
2870 uint16_t myea[ETHER_ADDR_LEN / 2];
2871 uint16_t offset = NVM_OFF_MACADDR;
2872
2873 /* Try to read alternative MAC address pointer */
2874 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2875 return 0;
2876
2877 /* Check pointer if it's valid or not. */
2878 if ((offset == 0x0000) || (offset == 0xffff))
2879 return 0;
2880
2881 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
2882 /*
2883 * Check whether alternative MAC address is valid or not.
2884 * Some cards have non 0xffff pointer but those don't use
2885 * alternative MAC address in reality.
2886 *
2887 * Check whether the broadcast bit is set or not.
2888 */
2889 if (wm_nvm_read(sc, offset, 1, myea) == 0)
2890 if (((myea[0] & 0xff) & 0x01) == 0)
2891 return offset; /* Found */
2892
2893 /* Not found */
2894 return 0;
2895 }
2896
2897 static int
2898 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2899 {
2900 uint16_t myea[ETHER_ADDR_LEN / 2];
2901 uint16_t offset = NVM_OFF_MACADDR;
2902 int do_invert = 0;
2903
2904 switch (sc->sc_type) {
2905 case WM_T_82580:
2906 case WM_T_I350:
2907 case WM_T_I354:
2908 /* EEPROM Top Level Partitioning */
2909 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
2910 break;
2911 case WM_T_82571:
2912 case WM_T_82575:
2913 case WM_T_82576:
2914 case WM_T_80003:
2915 case WM_T_I210:
2916 case WM_T_I211:
2917 offset = wm_check_alt_mac_addr(sc);
2918 if (offset == 0)
2919 if ((sc->sc_funcid & 0x01) == 1)
2920 do_invert = 1;
2921 break;
2922 default:
2923 if ((sc->sc_funcid & 0x01) == 1)
2924 do_invert = 1;
2925 break;
2926 }
2927
2928 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
2929 myea) != 0)
2930 goto bad;
2931
2932 enaddr[0] = myea[0] & 0xff;
2933 enaddr[1] = myea[0] >> 8;
2934 enaddr[2] = myea[1] & 0xff;
2935 enaddr[3] = myea[1] >> 8;
2936 enaddr[4] = myea[2] & 0xff;
2937 enaddr[5] = myea[2] >> 8;
2938
2939 /*
2940 * Toggle the LSB of the MAC address on the second port
2941 * of some dual port cards.
2942 */
2943 if (do_invert != 0)
2944 enaddr[5] ^= 1;
2945
2946 return 0;
2947
2948 bad:
2949 return -1;
2950 }
2951
2952 /*
2953 * wm_set_ral:
2954 *
2955 * Set an entery in the receive address list.
2956 */
2957 static void
2958 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2959 {
2960 uint32_t ral_lo, ral_hi;
2961
2962 if (enaddr != NULL) {
2963 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2964 (enaddr[3] << 24);
2965 ral_hi = enaddr[4] | (enaddr[5] << 8);
2966 ral_hi |= RAL_AV;
2967 } else {
2968 ral_lo = 0;
2969 ral_hi = 0;
2970 }
2971
2972 if (sc->sc_type >= WM_T_82544) {
2973 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2974 ral_lo);
2975 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2976 ral_hi);
2977 } else {
2978 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2979 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2980 }
2981 }
2982
2983 /*
2984 * wm_mchash:
2985 *
2986 * Compute the hash of the multicast address for the 4096-bit
2987 * multicast filter.
2988 */
2989 static uint32_t
2990 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2991 {
2992 static const int lo_shift[4] = { 4, 3, 2, 0 };
2993 static const int hi_shift[4] = { 4, 5, 6, 8 };
2994 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
2995 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
2996 uint32_t hash;
2997
2998 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2999 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3000 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
3001 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3002 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3003 return (hash & 0x3ff);
3004 }
3005 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3006 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3007
3008 return (hash & 0xfff);
3009 }
3010
3011 /*
3012 * wm_set_filter:
3013 *
3014 * Set up the receive filter.
3015 */
3016 static void
3017 wm_set_filter(struct wm_softc *sc)
3018 {
3019 struct ethercom *ec = &sc->sc_ethercom;
3020 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3021 struct ether_multi *enm;
3022 struct ether_multistep step;
3023 bus_addr_t mta_reg;
3024 uint32_t hash, reg, bit;
3025 int i, size;
3026
3027 if (sc->sc_type >= WM_T_82544)
3028 mta_reg = WMREG_CORDOVA_MTA;
3029 else
3030 mta_reg = WMREG_MTA;
3031
3032 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3033
3034 if (ifp->if_flags & IFF_BROADCAST)
3035 sc->sc_rctl |= RCTL_BAM;
3036 if (ifp->if_flags & IFF_PROMISC) {
3037 sc->sc_rctl |= RCTL_UPE;
3038 goto allmulti;
3039 }
3040
3041 /*
3042 * Set the station address in the first RAL slot, and
3043 * clear the remaining slots.
3044 */
3045 if (sc->sc_type == WM_T_ICH8)
3046 size = WM_RAL_TABSIZE_ICH8 -1;
3047 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3048 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
3049 || (sc->sc_type == WM_T_PCH_LPT))
3050 size = WM_RAL_TABSIZE_ICH8;
3051 else if (sc->sc_type == WM_T_82575)
3052 size = WM_RAL_TABSIZE_82575;
3053 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3054 size = WM_RAL_TABSIZE_82576;
3055 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3056 size = WM_RAL_TABSIZE_I350;
3057 else
3058 size = WM_RAL_TABSIZE;
3059 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3060 for (i = 1; i < size; i++)
3061 wm_set_ral(sc, NULL, i);
3062
3063 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3064 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3065 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
3066 size = WM_ICH8_MC_TABSIZE;
3067 else
3068 size = WM_MC_TABSIZE;
3069 /* Clear out the multicast table. */
3070 for (i = 0; i < size; i++)
3071 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3072
3073 ETHER_FIRST_MULTI(step, ec, enm);
3074 while (enm != NULL) {
3075 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3076 /*
3077 * We must listen to a range of multicast addresses.
3078 * For now, just accept all multicasts, rather than
3079 * trying to set only those filter bits needed to match
3080 * the range. (At this time, the only use of address
3081 * ranges is for IP multicast routing, for which the
3082 * range is big enough to require all bits set.)
3083 */
3084 goto allmulti;
3085 }
3086
3087 hash = wm_mchash(sc, enm->enm_addrlo);
3088
3089 reg = (hash >> 5);
3090 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3091 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3092 || (sc->sc_type == WM_T_PCH2)
3093 || (sc->sc_type == WM_T_PCH_LPT))
3094 reg &= 0x1f;
3095 else
3096 reg &= 0x7f;
3097 bit = hash & 0x1f;
3098
3099 hash = CSR_READ(sc, mta_reg + (reg << 2));
3100 hash |= 1U << bit;
3101
3102 /* XXX Hardware bug?? */
3103 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3104 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3105 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3106 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3107 } else
3108 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3109
3110 ETHER_NEXT_MULTI(step, enm);
3111 }
3112
3113 ifp->if_flags &= ~IFF_ALLMULTI;
3114 goto setit;
3115
3116 allmulti:
3117 ifp->if_flags |= IFF_ALLMULTI;
3118 sc->sc_rctl |= RCTL_MPE;
3119
3120 setit:
3121 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3122 }
3123
3124 /* Reset and init related */
3125
3126 static void
3127 wm_set_vlan(struct wm_softc *sc)
3128 {
3129 /* Deal with VLAN enables. */
3130 if (VLAN_ATTACHED(&sc->sc_ethercom))
3131 sc->sc_ctrl |= CTRL_VME;
3132 else
3133 sc->sc_ctrl &= ~CTRL_VME;
3134
3135 /* Write the control registers. */
3136 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3137 }
3138
3139 static void
3140 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3141 {
3142 uint32_t gcr;
3143 pcireg_t ctrl2;
3144
3145 gcr = CSR_READ(sc, WMREG_GCR);
3146
3147 /* Only take action if timeout value is defaulted to 0 */
3148 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3149 goto out;
3150
3151 if ((gcr & GCR_CAP_VER2) == 0) {
3152 gcr |= GCR_CMPL_TMOUT_10MS;
3153 goto out;
3154 }
3155
3156 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3157 sc->sc_pcixe_capoff + PCIE_DCSR2);
3158 ctrl2 |= WM_PCIE_DCSR2_16MS;
3159 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3160 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3161
3162 out:
3163 /* Disable completion timeout resend */
3164 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3165
3166 CSR_WRITE(sc, WMREG_GCR, gcr);
3167 }
3168
3169 void
3170 wm_get_auto_rd_done(struct wm_softc *sc)
3171 {
3172 int i;
3173
3174 /* wait for eeprom to reload */
3175 switch (sc->sc_type) {
3176 case WM_T_82571:
3177 case WM_T_82572:
3178 case WM_T_82573:
3179 case WM_T_82574:
3180 case WM_T_82583:
3181 case WM_T_82575:
3182 case WM_T_82576:
3183 case WM_T_82580:
3184 case WM_T_I350:
3185 case WM_T_I354:
3186 case WM_T_I210:
3187 case WM_T_I211:
3188 case WM_T_80003:
3189 case WM_T_ICH8:
3190 case WM_T_ICH9:
3191 for (i = 0; i < 10; i++) {
3192 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3193 break;
3194 delay(1000);
3195 }
3196 if (i == 10) {
3197 log(LOG_ERR, "%s: auto read from eeprom failed to "
3198 "complete\n", device_xname(sc->sc_dev));
3199 }
3200 break;
3201 default:
3202 break;
3203 }
3204 }
3205
3206 void
3207 wm_lan_init_done(struct wm_softc *sc)
3208 {
3209 uint32_t reg = 0;
3210 int i;
3211
3212 /* wait for eeprom to reload */
3213 switch (sc->sc_type) {
3214 case WM_T_ICH10:
3215 case WM_T_PCH:
3216 case WM_T_PCH2:
3217 case WM_T_PCH_LPT:
3218 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3219 reg = CSR_READ(sc, WMREG_STATUS);
3220 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3221 break;
3222 delay(100);
3223 }
3224 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3225 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3226 "complete\n", device_xname(sc->sc_dev), __func__);
3227 }
3228 break;
3229 default:
3230 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3231 __func__);
3232 break;
3233 }
3234
3235 reg &= ~STATUS_LAN_INIT_DONE;
3236 CSR_WRITE(sc, WMREG_STATUS, reg);
3237 }
3238
3239 void
3240 wm_get_cfg_done(struct wm_softc *sc)
3241 {
3242 int mask;
3243 uint32_t reg;
3244 int i;
3245
3246 /* wait for eeprom to reload */
3247 switch (sc->sc_type) {
3248 case WM_T_82542_2_0:
3249 case WM_T_82542_2_1:
3250 /* null */
3251 break;
3252 case WM_T_82543:
3253 case WM_T_82544:
3254 case WM_T_82540:
3255 case WM_T_82545:
3256 case WM_T_82545_3:
3257 case WM_T_82546:
3258 case WM_T_82546_3:
3259 case WM_T_82541:
3260 case WM_T_82541_2:
3261 case WM_T_82547:
3262 case WM_T_82547_2:
3263 case WM_T_82573:
3264 case WM_T_82574:
3265 case WM_T_82583:
3266 /* generic */
3267 delay(10*1000);
3268 break;
3269 case WM_T_80003:
3270 case WM_T_82571:
3271 case WM_T_82572:
3272 case WM_T_82575:
3273 case WM_T_82576:
3274 case WM_T_82580:
3275 case WM_T_I350:
3276 case WM_T_I354:
3277 case WM_T_I210:
3278 case WM_T_I211:
3279 if (sc->sc_type == WM_T_82571) {
3280 /* Only 82571 shares port 0 */
3281 mask = EEMNGCTL_CFGDONE_0;
3282 } else
3283 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3284 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3285 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3286 break;
3287 delay(1000);
3288 }
3289 if (i >= WM_PHY_CFG_TIMEOUT) {
3290 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3291 device_xname(sc->sc_dev), __func__));
3292 }
3293 break;
3294 case WM_T_ICH8:
3295 case WM_T_ICH9:
3296 case WM_T_ICH10:
3297 case WM_T_PCH:
3298 case WM_T_PCH2:
3299 case WM_T_PCH_LPT:
3300 delay(10*1000);
3301 if (sc->sc_type >= WM_T_ICH10)
3302 wm_lan_init_done(sc);
3303 else
3304 wm_get_auto_rd_done(sc);
3305
3306 reg = CSR_READ(sc, WMREG_STATUS);
3307 if ((reg & STATUS_PHYRA) != 0)
3308 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3309 break;
3310 default:
3311 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3312 __func__);
3313 break;
3314 }
3315 }
3316
3317 /* Init hardware bits */
3318 void
3319 wm_initialize_hardware_bits(struct wm_softc *sc)
3320 {
3321 uint32_t tarc0, tarc1, reg;
3322
3323 /* For 82571 variant, 80003 and ICHs */
3324 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3325 || (sc->sc_type >= WM_T_80003)) {
3326
3327 /* Transmit Descriptor Control 0 */
3328 reg = CSR_READ(sc, WMREG_TXDCTL(0));
3329 reg |= TXDCTL_COUNT_DESC;
3330 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3331
3332 /* Transmit Descriptor Control 1 */
3333 reg = CSR_READ(sc, WMREG_TXDCTL(1));
3334 reg |= TXDCTL_COUNT_DESC;
3335 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3336
3337 /* TARC0 */
3338 tarc0 = CSR_READ(sc, WMREG_TARC0);
3339 switch (sc->sc_type) {
3340 case WM_T_82571:
3341 case WM_T_82572:
3342 case WM_T_82573:
3343 case WM_T_82574:
3344 case WM_T_82583:
3345 case WM_T_80003:
3346 /* Clear bits 30..27 */
3347 tarc0 &= ~__BITS(30, 27);
3348 break;
3349 default:
3350 break;
3351 }
3352
3353 switch (sc->sc_type) {
3354 case WM_T_82571:
3355 case WM_T_82572:
3356 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3357
3358 tarc1 = CSR_READ(sc, WMREG_TARC1);
3359 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3360 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3361 /* 8257[12] Errata No.7 */
3362 tarc1 |= __BIT(22); /* TARC1 bits 22 */
3363
3364 /* TARC1 bit 28 */
3365 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3366 tarc1 &= ~__BIT(28);
3367 else
3368 tarc1 |= __BIT(28);
3369 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3370
3371 /*
3372 * 8257[12] Errata No.13
3373 * Disable Dyamic Clock Gating.
3374 */
3375 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3376 reg &= ~CTRL_EXT_DMA_DYN_CLK;
3377 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3378 break;
3379 case WM_T_82573:
3380 case WM_T_82574:
3381 case WM_T_82583:
3382 if ((sc->sc_type == WM_T_82574)
3383 || (sc->sc_type == WM_T_82583))
3384 tarc0 |= __BIT(26); /* TARC0 bit 26 */
3385
3386 /* Extended Device Control */
3387 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3388 reg &= ~__BIT(23); /* Clear bit 23 */
3389 reg |= __BIT(22); /* Set bit 22 */
3390 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3391
3392 /* Device Control */
3393 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
3394 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3395
3396 /* PCIe Control Register */
3397 if ((sc->sc_type == WM_T_82574)
3398 || (sc->sc_type == WM_T_82583)) {
3399 /*
3400 * Document says this bit must be set for
3401 * proper operation.
3402 */
3403 reg = CSR_READ(sc, WMREG_GCR);
3404 reg |= __BIT(22);
3405 CSR_WRITE(sc, WMREG_GCR, reg);
3406
3407 /*
3408 * Apply workaround for hardware errata
3409 * documented in errata docs Fixes issue where
3410 * some error prone or unreliable PCIe
3411 * completions are occurring, particularly
3412 * with ASPM enabled. Without fix, issue can
3413 * cause Tx timeouts.
3414 */
3415 reg = CSR_READ(sc, WMREG_GCR2);
3416 reg |= __BIT(0);
3417 CSR_WRITE(sc, WMREG_GCR2, reg);
3418 }
3419 break;
3420 case WM_T_80003:
3421 /* TARC0 */
3422 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3423 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3424 tarc0 &= ~__BIT(20); /* Clear bits 20 */
3425
3426 /* TARC1 bit 28 */
3427 tarc1 = CSR_READ(sc, WMREG_TARC1);
3428 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3429 tarc1 &= ~__BIT(28);
3430 else
3431 tarc1 |= __BIT(28);
3432 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3433 break;
3434 case WM_T_ICH8:
3435 case WM_T_ICH9:
3436 case WM_T_ICH10:
3437 case WM_T_PCH:
3438 case WM_T_PCH2:
3439 case WM_T_PCH_LPT:
3440 /* TARC 0 */
3441 if (sc->sc_type == WM_T_ICH8) {
3442 /* Set TARC0 bits 29 and 28 */
3443 tarc0 |= __BITS(29, 28);
3444 }
3445 /* Set TARC0 bits 23,24,26,27 */
3446 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3447
3448 /* CTRL_EXT */
3449 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3450 reg |= __BIT(22); /* Set bit 22 */
3451 /*
3452 * Enable PHY low-power state when MAC is at D3
3453 * w/o WoL
3454 */
3455 if (sc->sc_type >= WM_T_PCH)
3456 reg |= CTRL_EXT_PHYPDEN;
3457 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3458
3459 /* TARC1 */
3460 tarc1 = CSR_READ(sc, WMREG_TARC1);
3461 /* bit 28 */
3462 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3463 tarc1 &= ~__BIT(28);
3464 else
3465 tarc1 |= __BIT(28);
3466 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3467 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3468
3469 /* Device Status */
3470 if (sc->sc_type == WM_T_ICH8) {
3471 reg = CSR_READ(sc, WMREG_STATUS);
3472 reg &= ~__BIT(31);
3473 CSR_WRITE(sc, WMREG_STATUS, reg);
3474
3475 }
3476
3477 /*
3478 * Work-around descriptor data corruption issue during
3479 * NFS v2 UDP traffic, just disable the NFS filtering
3480 * capability.
3481 */
3482 reg = CSR_READ(sc, WMREG_RFCTL);
3483 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3484 CSR_WRITE(sc, WMREG_RFCTL, reg);
3485 break;
3486 default:
3487 break;
3488 }
3489 CSR_WRITE(sc, WMREG_TARC0, tarc0);
3490
3491 /*
3492 * 8257[12] Errata No.52 and some others.
3493 * Avoid RSS Hash Value bug.
3494 */
3495 switch (sc->sc_type) {
3496 case WM_T_82571:
3497 case WM_T_82572:
3498 case WM_T_82573:
3499 case WM_T_80003:
3500 case WM_T_ICH8:
3501 reg = CSR_READ(sc, WMREG_RFCTL);
3502 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3503 CSR_WRITE(sc, WMREG_RFCTL, reg);
3504 break;
3505 default:
3506 break;
3507 }
3508 }
3509 }
3510
3511 static uint32_t
3512 wm_rxpbs_adjust_82580(uint32_t val)
3513 {
3514 uint32_t rv = 0;
3515
3516 if (val < __arraycount(wm_82580_rxpbs_table))
3517 rv = wm_82580_rxpbs_table[val];
3518
3519 return rv;
3520 }
3521
3522 /*
3523 * wm_reset:
3524 *
3525 * Reset the i82542 chip.
3526 */
3527 static void
3528 wm_reset(struct wm_softc *sc)
3529 {
3530 int phy_reset = 0;
3531 int error = 0;
3532 uint32_t reg, mask;
3533
3534 /*
3535 * Allocate on-chip memory according to the MTU size.
3536 * The Packet Buffer Allocation register must be written
3537 * before the chip is reset.
3538 */
3539 switch (sc->sc_type) {
3540 case WM_T_82547:
3541 case WM_T_82547_2:
3542 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3543 PBA_22K : PBA_30K;
3544 sc->sc_txfifo_head = 0;
3545 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3546 sc->sc_txfifo_size =
3547 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3548 sc->sc_txfifo_stall = 0;
3549 break;
3550 case WM_T_82571:
3551 case WM_T_82572:
3552 case WM_T_82575: /* XXX need special handing for jumbo frames */
3553 case WM_T_80003:
3554 sc->sc_pba = PBA_32K;
3555 break;
3556 case WM_T_82573:
3557 sc->sc_pba = PBA_12K;
3558 break;
3559 case WM_T_82574:
3560 case WM_T_82583:
3561 sc->sc_pba = PBA_20K;
3562 break;
3563 case WM_T_82576:
3564 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
3565 sc->sc_pba &= RXPBS_SIZE_MASK_82576;
3566 break;
3567 case WM_T_82580:
3568 case WM_T_I350:
3569 case WM_T_I354:
3570 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
3571 break;
3572 case WM_T_I210:
3573 case WM_T_I211:
3574 sc->sc_pba = PBA_34K;
3575 break;
3576 case WM_T_ICH8:
3577 /* Workaround for a bit corruption issue in FIFO memory */
3578 sc->sc_pba = PBA_8K;
3579 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3580 break;
3581 case WM_T_ICH9:
3582 case WM_T_ICH10:
3583 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3584 PBA_14K : PBA_10K;
3585 break;
3586 case WM_T_PCH:
3587 case WM_T_PCH2:
3588 case WM_T_PCH_LPT:
3589 sc->sc_pba = PBA_26K;
3590 break;
3591 default:
3592 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3593 PBA_40K : PBA_48K;
3594 break;
3595 }
3596 /*
3597 * Only old or non-multiqueue devices have the PBA register
3598 * XXX Need special handling for 82575.
3599 */
3600 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3601 || (sc->sc_type == WM_T_82575))
3602 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3603
3604 /* Prevent the PCI-E bus from sticking */
3605 if (sc->sc_flags & WM_F_PCIE) {
3606 int timeout = 800;
3607
3608 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3609 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3610
3611 while (timeout--) {
3612 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3613 == 0)
3614 break;
3615 delay(100);
3616 }
3617 }
3618
3619 /* Set the completion timeout for interface */
3620 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3621 || (sc->sc_type == WM_T_82580)
3622 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3623 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3624 wm_set_pcie_completion_timeout(sc);
3625
3626 /* Clear interrupt */
3627 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3628
3629 /* Stop the transmit and receive processes. */
3630 CSR_WRITE(sc, WMREG_RCTL, 0);
3631 sc->sc_rctl &= ~RCTL_EN;
3632 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3633 CSR_WRITE_FLUSH(sc);
3634
3635 /* XXX set_tbi_sbp_82543() */
3636
3637 delay(10*1000);
3638
3639 /* Must acquire the MDIO ownership before MAC reset */
3640 switch (sc->sc_type) {
3641 case WM_T_82573:
3642 case WM_T_82574:
3643 case WM_T_82583:
3644 error = wm_get_hw_semaphore_82573(sc);
3645 break;
3646 default:
3647 break;
3648 }
3649
3650 /*
3651 * 82541 Errata 29? & 82547 Errata 28?
3652 * See also the description about PHY_RST bit in CTRL register
3653 * in 8254x_GBe_SDM.pdf.
3654 */
3655 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3656 CSR_WRITE(sc, WMREG_CTRL,
3657 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3658 CSR_WRITE_FLUSH(sc);
3659 delay(5000);
3660 }
3661
3662 switch (sc->sc_type) {
3663 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3664 case WM_T_82541:
3665 case WM_T_82541_2:
3666 case WM_T_82547:
3667 case WM_T_82547_2:
3668 /*
3669 * On some chipsets, a reset through a memory-mapped write
3670 * cycle can cause the chip to reset before completing the
3671 * write cycle. This causes major headache that can be
3672 * avoided by issuing the reset via indirect register writes
3673 * through I/O space.
3674 *
3675 * So, if we successfully mapped the I/O BAR at attach time,
3676 * use that. Otherwise, try our luck with a memory-mapped
3677 * reset.
3678 */
3679 if (sc->sc_flags & WM_F_IOH_VALID)
3680 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3681 else
3682 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3683 break;
3684 case WM_T_82545_3:
3685 case WM_T_82546_3:
3686 /* Use the shadow control register on these chips. */
3687 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3688 break;
3689 case WM_T_80003:
3690 mask = swfwphysem[sc->sc_funcid];
3691 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3692 wm_get_swfw_semaphore(sc, mask);
3693 CSR_WRITE(sc, WMREG_CTRL, reg);
3694 wm_put_swfw_semaphore(sc, mask);
3695 break;
3696 case WM_T_ICH8:
3697 case WM_T_ICH9:
3698 case WM_T_ICH10:
3699 case WM_T_PCH:
3700 case WM_T_PCH2:
3701 case WM_T_PCH_LPT:
3702 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3703 if (wm_check_reset_block(sc) == 0) {
3704 /*
3705 * Gate automatic PHY configuration by hardware on
3706 * non-managed 82579
3707 */
3708 if ((sc->sc_type == WM_T_PCH2)
3709 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3710 != 0))
3711 wm_gate_hw_phy_config_ich8lan(sc, 1);
3712
3713
3714 reg |= CTRL_PHY_RESET;
3715 phy_reset = 1;
3716 }
3717 wm_get_swfwhw_semaphore(sc);
3718 CSR_WRITE(sc, WMREG_CTRL, reg);
3719 /* Don't insert a completion barrier when reset */
3720 delay(20*1000);
3721 wm_put_swfwhw_semaphore(sc);
3722 break;
3723 case WM_T_82580:
3724 case WM_T_I350:
3725 case WM_T_I354:
3726 case WM_T_I210:
3727 case WM_T_I211:
3728 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3729 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3730 CSR_WRITE_FLUSH(sc);
3731 delay(5000);
3732 break;
3733 case WM_T_82542_2_0:
3734 case WM_T_82542_2_1:
3735 case WM_T_82543:
3736 case WM_T_82540:
3737 case WM_T_82545:
3738 case WM_T_82546:
3739 case WM_T_82571:
3740 case WM_T_82572:
3741 case WM_T_82573:
3742 case WM_T_82574:
3743 case WM_T_82575:
3744 case WM_T_82576:
3745 case WM_T_82583:
3746 default:
3747 /* Everything else can safely use the documented method. */
3748 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3749 break;
3750 }
3751
3752 /* Must release the MDIO ownership after MAC reset */
3753 switch (sc->sc_type) {
3754 case WM_T_82573:
3755 case WM_T_82574:
3756 case WM_T_82583:
3757 if (error == 0)
3758 wm_put_hw_semaphore_82573(sc);
3759 break;
3760 default:
3761 break;
3762 }
3763
3764 if (phy_reset != 0)
3765 wm_get_cfg_done(sc);
3766
3767 /* reload EEPROM */
3768 switch (sc->sc_type) {
3769 case WM_T_82542_2_0:
3770 case WM_T_82542_2_1:
3771 case WM_T_82543:
3772 case WM_T_82544:
3773 delay(10);
3774 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3775 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3776 CSR_WRITE_FLUSH(sc);
3777 delay(2000);
3778 break;
3779 case WM_T_82540:
3780 case WM_T_82545:
3781 case WM_T_82545_3:
3782 case WM_T_82546:
3783 case WM_T_82546_3:
3784 delay(5*1000);
3785 /* XXX Disable HW ARPs on ASF enabled adapters */
3786 break;
3787 case WM_T_82541:
3788 case WM_T_82541_2:
3789 case WM_T_82547:
3790 case WM_T_82547_2:
3791 delay(20000);
3792 /* XXX Disable HW ARPs on ASF enabled adapters */
3793 break;
3794 case WM_T_82571:
3795 case WM_T_82572:
3796 case WM_T_82573:
3797 case WM_T_82574:
3798 case WM_T_82583:
3799 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3800 delay(10);
3801 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3802 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3803 CSR_WRITE_FLUSH(sc);
3804 }
3805 /* check EECD_EE_AUTORD */
3806 wm_get_auto_rd_done(sc);
3807 /*
3808 * Phy configuration from NVM just starts after EECD_AUTO_RD
3809 * is set.
3810 */
3811 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3812 || (sc->sc_type == WM_T_82583))
3813 delay(25*1000);
3814 break;
3815 case WM_T_82575:
3816 case WM_T_82576:
3817 case WM_T_82580:
3818 case WM_T_I350:
3819 case WM_T_I354:
3820 case WM_T_I210:
3821 case WM_T_I211:
3822 case WM_T_80003:
3823 /* check EECD_EE_AUTORD */
3824 wm_get_auto_rd_done(sc);
3825 break;
3826 case WM_T_ICH8:
3827 case WM_T_ICH9:
3828 case WM_T_ICH10:
3829 case WM_T_PCH:
3830 case WM_T_PCH2:
3831 case WM_T_PCH_LPT:
3832 break;
3833 default:
3834 panic("%s: unknown type\n", __func__);
3835 }
3836
3837 /* Check whether EEPROM is present or not */
3838 switch (sc->sc_type) {
3839 case WM_T_82575:
3840 case WM_T_82576:
3841 case WM_T_82580:
3842 case WM_T_I350:
3843 case WM_T_I354:
3844 case WM_T_ICH8:
3845 case WM_T_ICH9:
3846 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3847 /* Not found */
3848 sc->sc_flags |= WM_F_EEPROM_INVALID;
3849 if (sc->sc_type == WM_T_82575)
3850 wm_reset_init_script_82575(sc);
3851 }
3852 break;
3853 default:
3854 break;
3855 }
3856
3857 if ((sc->sc_type == WM_T_82580)
3858 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
3859 /* clear global device reset status bit */
3860 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3861 }
3862
3863 /* Clear any pending interrupt events. */
3864 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3865 reg = CSR_READ(sc, WMREG_ICR);
3866
3867 /* reload sc_ctrl */
3868 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3869
3870 if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
3871 wm_set_eee_i350(sc);
3872
3873 /* dummy read from WUC */
3874 if (sc->sc_type == WM_T_PCH)
3875 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3876 /*
3877 * For PCH, this write will make sure that any noise will be detected
3878 * as a CRC error and be dropped rather than show up as a bad packet
3879 * to the DMA engine
3880 */
3881 if (sc->sc_type == WM_T_PCH)
3882 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3883
3884 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3885 CSR_WRITE(sc, WMREG_WUC, 0);
3886
3887 wm_reset_mdicnfg_82580(sc);
3888 }
3889
3890 /*
3891 * wm_add_rxbuf:
3892 *
3893 * Add a receive buffer to the indiciated descriptor.
3894 */
3895 static int
3896 wm_add_rxbuf(struct wm_softc *sc, int idx)
3897 {
3898 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3899 struct mbuf *m;
3900 int error;
3901
3902 KASSERT(WM_RX_LOCKED(sc));
3903
3904 MGETHDR(m, M_DONTWAIT, MT_DATA);
3905 if (m == NULL)
3906 return ENOBUFS;
3907
3908 MCLGET(m, M_DONTWAIT);
3909 if ((m->m_flags & M_EXT) == 0) {
3910 m_freem(m);
3911 return ENOBUFS;
3912 }
3913
3914 if (rxs->rxs_mbuf != NULL)
3915 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3916
3917 rxs->rxs_mbuf = m;
3918
3919 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3920 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3921 BUS_DMA_READ|BUS_DMA_NOWAIT);
3922 if (error) {
3923 /* XXX XXX XXX */
3924 aprint_error_dev(sc->sc_dev,
3925 "unable to load rx DMA map %d, error = %d\n",
3926 idx, error);
3927 panic("wm_add_rxbuf");
3928 }
3929
3930 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3931 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3932
3933 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3934 if ((sc->sc_rctl & RCTL_EN) != 0)
3935 WM_INIT_RXDESC(sc, idx);
3936 } else
3937 WM_INIT_RXDESC(sc, idx);
3938
3939 return 0;
3940 }
3941
3942 /*
3943 * wm_rxdrain:
3944 *
3945 * Drain the receive queue.
3946 */
3947 static void
3948 wm_rxdrain(struct wm_softc *sc)
3949 {
3950 struct wm_rxsoft *rxs;
3951 int i;
3952
3953 KASSERT(WM_RX_LOCKED(sc));
3954
3955 for (i = 0; i < WM_NRXDESC; i++) {
3956 rxs = &sc->sc_rxsoft[i];
3957 if (rxs->rxs_mbuf != NULL) {
3958 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3959 m_freem(rxs->rxs_mbuf);
3960 rxs->rxs_mbuf = NULL;
3961 }
3962 }
3963 }
3964
3965 /*
3966 * wm_init: [ifnet interface function]
3967 *
3968 * Initialize the interface.
3969 */
3970 static int
3971 wm_init(struct ifnet *ifp)
3972 {
3973 struct wm_softc *sc = ifp->if_softc;
3974 int ret;
3975
3976 WM_BOTH_LOCK(sc);
3977 ret = wm_init_locked(ifp);
3978 WM_BOTH_UNLOCK(sc);
3979
3980 return ret;
3981 }
3982
3983 static int
3984 wm_init_locked(struct ifnet *ifp)
3985 {
3986 struct wm_softc *sc = ifp->if_softc;
3987 struct wm_rxsoft *rxs;
3988 int i, j, trynum, error = 0;
3989 uint32_t reg;
3990
3991 KASSERT(WM_BOTH_LOCKED(sc));
3992 /*
3993 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3994 * There is a small but measurable benefit to avoiding the adjusment
3995 * of the descriptor so that the headers are aligned, for normal mtu,
3996 * on such platforms. One possibility is that the DMA itself is
3997 * slightly more efficient if the front of the entire packet (instead
3998 * of the front of the headers) is aligned.
3999 *
4000 * Note we must always set align_tweak to 0 if we are using
4001 * jumbo frames.
4002 */
4003 #ifdef __NO_STRICT_ALIGNMENT
4004 sc->sc_align_tweak = 0;
4005 #else
4006 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4007 sc->sc_align_tweak = 0;
4008 else
4009 sc->sc_align_tweak = 2;
4010 #endif /* __NO_STRICT_ALIGNMENT */
4011
4012 /* Cancel any pending I/O. */
4013 wm_stop_locked(ifp, 0);
4014
4015 /* update statistics before reset */
4016 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4017 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4018
4019 /* Reset the chip to a known state. */
4020 wm_reset(sc);
4021
4022 switch (sc->sc_type) {
4023 case WM_T_82571:
4024 case WM_T_82572:
4025 case WM_T_82573:
4026 case WM_T_82574:
4027 case WM_T_82583:
4028 case WM_T_80003:
4029 case WM_T_ICH8:
4030 case WM_T_ICH9:
4031 case WM_T_ICH10:
4032 case WM_T_PCH:
4033 case WM_T_PCH2:
4034 case WM_T_PCH_LPT:
4035 if (wm_check_mng_mode(sc) != 0)
4036 wm_get_hw_control(sc);
4037 break;
4038 default:
4039 break;
4040 }
4041
4042 /* Init hardware bits */
4043 wm_initialize_hardware_bits(sc);
4044
4045 /* Reset the PHY. */
4046 if (sc->sc_flags & WM_F_HAS_MII)
4047 wm_gmii_reset(sc);
4048
4049 /* Calculate (E)ITR value */
4050 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4051 sc->sc_itr = 450; /* For EITR */
4052 } else if (sc->sc_type >= WM_T_82543) {
4053 /*
4054 * Set up the interrupt throttling register (units of 256ns)
4055 * Note that a footnote in Intel's documentation says this
4056 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4057 * or 10Mbit mode. Empirically, it appears to be the case
4058 * that that is also true for the 1024ns units of the other
4059 * interrupt-related timer registers -- so, really, we ought
4060 * to divide this value by 4 when the link speed is low.
4061 *
4062 * XXX implement this division at link speed change!
4063 */
4064
4065 /*
4066 * For N interrupts/sec, set this value to:
4067 * 1000000000 / (N * 256). Note that we set the
4068 * absolute and packet timer values to this value
4069 * divided by 4 to get "simple timer" behavior.
4070 */
4071
4072 sc->sc_itr = 1500; /* 2604 ints/sec */
4073 }
4074
4075 /* Initialize the transmit descriptor ring. */
4076 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
4077 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
4078 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4079 sc->sc_txfree = WM_NTXDESC(sc);
4080 sc->sc_txnext = 0;
4081
4082 if (sc->sc_type < WM_T_82543) {
4083 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
4084 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
4085 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
4086 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
4087 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
4088 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
4089 } else {
4090 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
4091 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
4092 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
4093 CSR_WRITE(sc, WMREG_TDH, 0);
4094
4095 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4096 /*
4097 * Don't write TDT before TCTL.EN is set.
4098 * See the document.
4099 */
4100 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_QUEUE_ENABLE
4101 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
4102 | TXDCTL_WTHRESH(0));
4103 else {
4104 /* ITR / 4 */
4105 CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
4106 if (sc->sc_type >= WM_T_82540) {
4107 /* should be same */
4108 CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
4109 }
4110
4111 CSR_WRITE(sc, WMREG_TDT, 0);
4112 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_PTHRESH(0) |
4113 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
4114 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
4115 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
4116 }
4117 }
4118
4119 /* Initialize the transmit job descriptors. */
4120 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
4121 sc->sc_txsoft[i].txs_mbuf = NULL;
4122 sc->sc_txsfree = WM_TXQUEUELEN(sc);
4123 sc->sc_txsnext = 0;
4124 sc->sc_txsdirty = 0;
4125
4126 /*
4127 * Initialize the receive descriptor and receive job
4128 * descriptor rings.
4129 */
4130 if (sc->sc_type < WM_T_82543) {
4131 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4132 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4133 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4134 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4135 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4136 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4137
4138 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4139 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4140 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4141 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4142 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4143 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4144 } else {
4145 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4146 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4147 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4148
4149 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4150 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4151 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4152 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4153 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4154 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4155 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4156 | RXDCTL_WTHRESH(1));
4157 } else {
4158 CSR_WRITE(sc, WMREG_RDH, 0);
4159 CSR_WRITE(sc, WMREG_RDT, 0);
4160 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
4161 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
4162 }
4163 }
4164 for (i = 0; i < WM_NRXDESC; i++) {
4165 rxs = &sc->sc_rxsoft[i];
4166 if (rxs->rxs_mbuf == NULL) {
4167 if ((error = wm_add_rxbuf(sc, i)) != 0) {
4168 log(LOG_ERR, "%s: unable to allocate or map "
4169 "rx buffer %d, error = %d\n",
4170 device_xname(sc->sc_dev), i, error);
4171 /*
4172 * XXX Should attempt to run with fewer receive
4173 * XXX buffers instead of just failing.
4174 */
4175 wm_rxdrain(sc);
4176 goto out;
4177 }
4178 } else {
4179 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4180 WM_INIT_RXDESC(sc, i);
4181 /*
4182 * For 82575 and newer device, the RX descriptors
4183 * must be initialized after the setting of RCTL.EN in
4184 * wm_set_filter()
4185 */
4186 }
4187 }
4188 sc->sc_rxptr = 0;
4189 sc->sc_rxdiscard = 0;
4190 WM_RXCHAIN_RESET(sc);
4191
4192 /*
4193 * Clear out the VLAN table -- we don't use it (yet).
4194 */
4195 CSR_WRITE(sc, WMREG_VET, 0);
4196 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4197 trynum = 10; /* Due to hw errata */
4198 else
4199 trynum = 1;
4200 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4201 for (j = 0; j < trynum; j++)
4202 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4203
4204 /*
4205 * Set up flow-control parameters.
4206 *
4207 * XXX Values could probably stand some tuning.
4208 */
4209 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4210 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4211 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
4212 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4213 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4214 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4215 }
4216
4217 sc->sc_fcrtl = FCRTL_DFLT;
4218 if (sc->sc_type < WM_T_82543) {
4219 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4220 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4221 } else {
4222 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4223 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4224 }
4225
4226 if (sc->sc_type == WM_T_80003)
4227 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4228 else
4229 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4230
4231 /* Writes the control register. */
4232 wm_set_vlan(sc);
4233
4234 if (sc->sc_flags & WM_F_HAS_MII) {
4235 int val;
4236
4237 switch (sc->sc_type) {
4238 case WM_T_80003:
4239 case WM_T_ICH8:
4240 case WM_T_ICH9:
4241 case WM_T_ICH10:
4242 case WM_T_PCH:
4243 case WM_T_PCH2:
4244 case WM_T_PCH_LPT:
4245 /*
4246 * Set the mac to wait the maximum time between each
4247 * iteration and increase the max iterations when
4248 * polling the phy; this fixes erroneous timeouts at
4249 * 10Mbps.
4250 */
4251 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4252 0xFFFF);
4253 val = wm_kmrn_readreg(sc,
4254 KUMCTRLSTA_OFFSET_INB_PARAM);
4255 val |= 0x3F;
4256 wm_kmrn_writereg(sc,
4257 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4258 break;
4259 default:
4260 break;
4261 }
4262
4263 if (sc->sc_type == WM_T_80003) {
4264 val = CSR_READ(sc, WMREG_CTRL_EXT);
4265 val &= ~CTRL_EXT_LINK_MODE_MASK;
4266 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4267
4268 /* Bypass RX and TX FIFO's */
4269 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4270 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4271 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4272 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4273 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4274 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4275 }
4276 }
4277 #if 0
4278 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4279 #endif
4280
4281 /* Set up checksum offload parameters. */
4282 reg = CSR_READ(sc, WMREG_RXCSUM);
4283 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4284 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4285 reg |= RXCSUM_IPOFL;
4286 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4287 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4288 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4289 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4290 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4291
4292 /* Set up the interrupt registers. */
4293 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4294 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4295 ICR_RXO | ICR_RXT0;
4296 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4297
4298 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4299 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4300 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4301 reg = CSR_READ(sc, WMREG_KABGTXD);
4302 reg |= KABGTXD_BGSQLBIAS;
4303 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4304 }
4305
4306 /* Set up the inter-packet gap. */
4307 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4308
4309 if (sc->sc_type >= WM_T_82543) {
4310 /*
4311 * XXX 82574 has both ITR and EITR. SET EITR when we use
4312 * the multi queue function with MSI-X.
4313 */
4314 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4315 CSR_WRITE(sc, WMREG_EITR(0), sc->sc_itr);
4316 else
4317 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4318 }
4319
4320 /* Set the VLAN ethernetype. */
4321 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4322
4323 /*
4324 * Set up the transmit control register; we start out with
4325 * a collision distance suitable for FDX, but update it whe
4326 * we resolve the media type.
4327 */
4328 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4329 | TCTL_CT(TX_COLLISION_THRESHOLD)
4330 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4331 if (sc->sc_type >= WM_T_82571)
4332 sc->sc_tctl |= TCTL_MULR;
4333 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4334
4335 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4336 /* Write TDT after TCTL.EN is set. See the document. */
4337 CSR_WRITE(sc, WMREG_TDT, 0);
4338 }
4339
4340 if (sc->sc_type == WM_T_80003) {
4341 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4342 reg &= ~TCTL_EXT_GCEX_MASK;
4343 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4344 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4345 }
4346
4347 /* Set the media. */
4348 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4349 goto out;
4350
4351 /* Configure for OS presence */
4352 wm_init_manageability(sc);
4353
4354 /*
4355 * Set up the receive control register; we actually program
4356 * the register when we set the receive filter. Use multicast
4357 * address offset type 0.
4358 *
4359 * Only the i82544 has the ability to strip the incoming
4360 * CRC, so we don't enable that feature.
4361 */
4362 sc->sc_mchash_type = 0;
4363 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4364 | RCTL_MO(sc->sc_mchash_type);
4365
4366 /*
4367 * The I350 has a bug where it always strips the CRC whether
4368 * asked to or not. So ask for stripped CRC here and cope in rxeof
4369 */
4370 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4371 || (sc->sc_type == WM_T_I210))
4372 sc->sc_rctl |= RCTL_SECRC;
4373
4374 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4375 && (ifp->if_mtu > ETHERMTU)) {
4376 sc->sc_rctl |= RCTL_LPE;
4377 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4378 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4379 }
4380
4381 if (MCLBYTES == 2048) {
4382 sc->sc_rctl |= RCTL_2k;
4383 } else {
4384 if (sc->sc_type >= WM_T_82543) {
4385 switch (MCLBYTES) {
4386 case 4096:
4387 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4388 break;
4389 case 8192:
4390 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4391 break;
4392 case 16384:
4393 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4394 break;
4395 default:
4396 panic("wm_init: MCLBYTES %d unsupported",
4397 MCLBYTES);
4398 break;
4399 }
4400 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4401 }
4402
4403 /* Set the receive filter. */
4404 wm_set_filter(sc);
4405
4406 /* Enable ECC */
4407 switch (sc->sc_type) {
4408 case WM_T_82571:
4409 reg = CSR_READ(sc, WMREG_PBA_ECC);
4410 reg |= PBA_ECC_CORR_EN;
4411 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4412 break;
4413 case WM_T_PCH_LPT:
4414 reg = CSR_READ(sc, WMREG_PBECCSTS);
4415 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4416 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4417
4418 reg = CSR_READ(sc, WMREG_CTRL);
4419 reg |= CTRL_MEHE;
4420 CSR_WRITE(sc, WMREG_CTRL, reg);
4421 break;
4422 default:
4423 break;
4424 }
4425
4426 /* On 575 and later set RDT only if RX enabled */
4427 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4428 for (i = 0; i < WM_NRXDESC; i++)
4429 WM_INIT_RXDESC(sc, i);
4430
4431 sc->sc_stopping = false;
4432
4433 /* Start the one second link check clock. */
4434 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4435
4436 /* ...all done! */
4437 ifp->if_flags |= IFF_RUNNING;
4438 ifp->if_flags &= ~IFF_OACTIVE;
4439
4440 out:
4441 sc->sc_if_flags = ifp->if_flags;
4442 if (error)
4443 log(LOG_ERR, "%s: interface not running\n",
4444 device_xname(sc->sc_dev));
4445 return error;
4446 }
4447
4448 /*
4449 * wm_stop: [ifnet interface function]
4450 *
4451 * Stop transmission on the interface.
4452 */
4453 static void
4454 wm_stop(struct ifnet *ifp, int disable)
4455 {
4456 struct wm_softc *sc = ifp->if_softc;
4457
4458 WM_BOTH_LOCK(sc);
4459 wm_stop_locked(ifp, disable);
4460 WM_BOTH_UNLOCK(sc);
4461 }
4462
4463 static void
4464 wm_stop_locked(struct ifnet *ifp, int disable)
4465 {
4466 struct wm_softc *sc = ifp->if_softc;
4467 struct wm_txsoft *txs;
4468 int i;
4469
4470 KASSERT(WM_BOTH_LOCKED(sc));
4471
4472 sc->sc_stopping = true;
4473
4474 /* Stop the one second clock. */
4475 callout_stop(&sc->sc_tick_ch);
4476
4477 /* Stop the 82547 Tx FIFO stall check timer. */
4478 if (sc->sc_type == WM_T_82547)
4479 callout_stop(&sc->sc_txfifo_ch);
4480
4481 if (sc->sc_flags & WM_F_HAS_MII) {
4482 /* Down the MII. */
4483 mii_down(&sc->sc_mii);
4484 } else {
4485 #if 0
4486 /* Should we clear PHY's status properly? */
4487 wm_reset(sc);
4488 #endif
4489 }
4490
4491 /* Stop the transmit and receive processes. */
4492 CSR_WRITE(sc, WMREG_TCTL, 0);
4493 CSR_WRITE(sc, WMREG_RCTL, 0);
4494 sc->sc_rctl &= ~RCTL_EN;
4495
4496 /*
4497 * Clear the interrupt mask to ensure the device cannot assert its
4498 * interrupt line.
4499 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4500 * any currently pending or shared interrupt.
4501 */
4502 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4503 sc->sc_icr = 0;
4504
4505 /* Release any queued transmit buffers. */
4506 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4507 txs = &sc->sc_txsoft[i];
4508 if (txs->txs_mbuf != NULL) {
4509 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4510 m_freem(txs->txs_mbuf);
4511 txs->txs_mbuf = NULL;
4512 }
4513 }
4514
4515 /* Mark the interface as down and cancel the watchdog timer. */
4516 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4517 ifp->if_timer = 0;
4518
4519 if (disable)
4520 wm_rxdrain(sc);
4521
4522 #if 0 /* notyet */
4523 if (sc->sc_type >= WM_T_82544)
4524 CSR_WRITE(sc, WMREG_WUC, 0);
4525 #endif
4526 }
4527
4528 /*
4529 * wm_tx_offload:
4530 *
4531 * Set up TCP/IP checksumming parameters for the
4532 * specified packet.
4533 */
4534 static int
4535 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
4536 uint8_t *fieldsp)
4537 {
4538 struct mbuf *m0 = txs->txs_mbuf;
4539 struct livengood_tcpip_ctxdesc *t;
4540 uint32_t ipcs, tucs, cmd, cmdlen, seg;
4541 uint32_t ipcse;
4542 struct ether_header *eh;
4543 int offset, iphl;
4544 uint8_t fields;
4545
4546 /*
4547 * XXX It would be nice if the mbuf pkthdr had offset
4548 * fields for the protocol headers.
4549 */
4550
4551 eh = mtod(m0, struct ether_header *);
4552 switch (htons(eh->ether_type)) {
4553 case ETHERTYPE_IP:
4554 case ETHERTYPE_IPV6:
4555 offset = ETHER_HDR_LEN;
4556 break;
4557
4558 case ETHERTYPE_VLAN:
4559 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4560 break;
4561
4562 default:
4563 /*
4564 * Don't support this protocol or encapsulation.
4565 */
4566 *fieldsp = 0;
4567 *cmdp = 0;
4568 return 0;
4569 }
4570
4571 if ((m0->m_pkthdr.csum_flags &
4572 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
4573 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4574 } else {
4575 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4576 }
4577 ipcse = offset + iphl - 1;
4578
4579 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
4580 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
4581 seg = 0;
4582 fields = 0;
4583
4584 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4585 int hlen = offset + iphl;
4586 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4587
4588 if (__predict_false(m0->m_len <
4589 (hlen + sizeof(struct tcphdr)))) {
4590 /*
4591 * TCP/IP headers are not in the first mbuf; we need
4592 * to do this the slow and painful way. Let's just
4593 * hope this doesn't happen very often.
4594 */
4595 struct tcphdr th;
4596
4597 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4598
4599 m_copydata(m0, hlen, sizeof(th), &th);
4600 if (v4) {
4601 struct ip ip;
4602
4603 m_copydata(m0, offset, sizeof(ip), &ip);
4604 ip.ip_len = 0;
4605 m_copyback(m0,
4606 offset + offsetof(struct ip, ip_len),
4607 sizeof(ip.ip_len), &ip.ip_len);
4608 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4609 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4610 } else {
4611 struct ip6_hdr ip6;
4612
4613 m_copydata(m0, offset, sizeof(ip6), &ip6);
4614 ip6.ip6_plen = 0;
4615 m_copyback(m0,
4616 offset + offsetof(struct ip6_hdr, ip6_plen),
4617 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4618 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4619 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4620 }
4621 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4622 sizeof(th.th_sum), &th.th_sum);
4623
4624 hlen += th.th_off << 2;
4625 } else {
4626 /*
4627 * TCP/IP headers are in the first mbuf; we can do
4628 * this the easy way.
4629 */
4630 struct tcphdr *th;
4631
4632 if (v4) {
4633 struct ip *ip =
4634 (void *)(mtod(m0, char *) + offset);
4635 th = (void *)(mtod(m0, char *) + hlen);
4636
4637 ip->ip_len = 0;
4638 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4639 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4640 } else {
4641 struct ip6_hdr *ip6 =
4642 (void *)(mtod(m0, char *) + offset);
4643 th = (void *)(mtod(m0, char *) + hlen);
4644
4645 ip6->ip6_plen = 0;
4646 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4647 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4648 }
4649 hlen += th->th_off << 2;
4650 }
4651
4652 if (v4) {
4653 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4654 cmdlen |= WTX_TCPIP_CMD_IP;
4655 } else {
4656 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4657 ipcse = 0;
4658 }
4659 cmd |= WTX_TCPIP_CMD_TSE;
4660 cmdlen |= WTX_TCPIP_CMD_TSE |
4661 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
4662 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
4663 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
4664 }
4665
4666 /*
4667 * NOTE: Even if we're not using the IP or TCP/UDP checksum
4668 * offload feature, if we load the context descriptor, we
4669 * MUST provide valid values for IPCSS and TUCSS fields.
4670 */
4671
4672 ipcs = WTX_TCPIP_IPCSS(offset) |
4673 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
4674 WTX_TCPIP_IPCSE(ipcse);
4675 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
4676 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
4677 fields |= WTX_IXSM;
4678 }
4679
4680 offset += iphl;
4681
4682 if (m0->m_pkthdr.csum_flags &
4683 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
4684 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
4685 fields |= WTX_TXSM;
4686 tucs = WTX_TCPIP_TUCSS(offset) |
4687 WTX_TCPIP_TUCSO(offset +
4688 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
4689 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4690 } else if ((m0->m_pkthdr.csum_flags &
4691 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
4692 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
4693 fields |= WTX_TXSM;
4694 tucs = WTX_TCPIP_TUCSS(offset) |
4695 WTX_TCPIP_TUCSO(offset +
4696 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
4697 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4698 } else {
4699 /* Just initialize it to a valid TCP context. */
4700 tucs = WTX_TCPIP_TUCSS(offset) |
4701 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
4702 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4703 }
4704
4705 /* Fill in the context descriptor. */
4706 t = (struct livengood_tcpip_ctxdesc *)
4707 &sc->sc_txdescs[sc->sc_txnext];
4708 t->tcpip_ipcs = htole32(ipcs);
4709 t->tcpip_tucs = htole32(tucs);
4710 t->tcpip_cmdlen = htole32(cmdlen);
4711 t->tcpip_seg = htole32(seg);
4712 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
4713
4714 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
4715 txs->txs_ndesc++;
4716
4717 *cmdp = cmd;
4718 *fieldsp = fields;
4719
4720 return 0;
4721 }
4722
4723 static void
4724 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
4725 {
4726 struct mbuf *m;
4727 int i;
4728
4729 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
4730 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
4731 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
4732 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
4733 m->m_data, m->m_len, m->m_flags);
4734 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
4735 i, i == 1 ? "" : "s");
4736 }
4737
4738 /*
4739 * wm_82547_txfifo_stall:
4740 *
4741 * Callout used to wait for the 82547 Tx FIFO to drain,
4742 * reset the FIFO pointers, and restart packet transmission.
4743 */
4744 static void
4745 wm_82547_txfifo_stall(void *arg)
4746 {
4747 struct wm_softc *sc = arg;
4748 #ifndef WM_MPSAFE
4749 int s;
4750
4751 s = splnet();
4752 #endif
4753 WM_TX_LOCK(sc);
4754
4755 if (sc->sc_stopping)
4756 goto out;
4757
4758 if (sc->sc_txfifo_stall) {
4759 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
4760 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
4761 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
4762 /*
4763 * Packets have drained. Stop transmitter, reset
4764 * FIFO pointers, restart transmitter, and kick
4765 * the packet queue.
4766 */
4767 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
4768 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
4769 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
4770 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
4771 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
4772 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
4773 CSR_WRITE(sc, WMREG_TCTL, tctl);
4774 CSR_WRITE_FLUSH(sc);
4775
4776 sc->sc_txfifo_head = 0;
4777 sc->sc_txfifo_stall = 0;
4778 wm_start_locked(&sc->sc_ethercom.ec_if);
4779 } else {
4780 /*
4781 * Still waiting for packets to drain; try again in
4782 * another tick.
4783 */
4784 callout_schedule(&sc->sc_txfifo_ch, 1);
4785 }
4786 }
4787
4788 out:
4789 WM_TX_UNLOCK(sc);
4790 #ifndef WM_MPSAFE
4791 splx(s);
4792 #endif
4793 }
4794
4795 /*
4796 * wm_82547_txfifo_bugchk:
4797 *
4798 * Check for bug condition in the 82547 Tx FIFO. We need to
4799 * prevent enqueueing a packet that would wrap around the end
4800 * if the Tx FIFO ring buffer, otherwise the chip will croak.
4801 *
4802 * We do this by checking the amount of space before the end
4803 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
4804 * the Tx FIFO, wait for all remaining packets to drain, reset
4805 * the internal FIFO pointers to the beginning, and restart
4806 * transmission on the interface.
4807 */
4808 #define WM_FIFO_HDR 0x10
4809 #define WM_82547_PAD_LEN 0x3e0
4810 static int
4811 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
4812 {
4813 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
4814 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
4815
4816 /* Just return if already stalled. */
4817 if (sc->sc_txfifo_stall)
4818 return 1;
4819
4820 if (sc->sc_mii.mii_media_active & IFM_FDX) {
4821 /* Stall only occurs in half-duplex mode. */
4822 goto send_packet;
4823 }
4824
4825 if (len >= WM_82547_PAD_LEN + space) {
4826 sc->sc_txfifo_stall = 1;
4827 callout_schedule(&sc->sc_txfifo_ch, 1);
4828 return 1;
4829 }
4830
4831 send_packet:
4832 sc->sc_txfifo_head += len;
4833 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
4834 sc->sc_txfifo_head -= sc->sc_txfifo_size;
4835
4836 return 0;
4837 }
4838
4839 /*
4840 * wm_start: [ifnet interface function]
4841 *
4842 * Start packet transmission on the interface.
4843 */
4844 static void
4845 wm_start(struct ifnet *ifp)
4846 {
4847 struct wm_softc *sc = ifp->if_softc;
4848
4849 WM_TX_LOCK(sc);
4850 if (!sc->sc_stopping)
4851 wm_start_locked(ifp);
4852 WM_TX_UNLOCK(sc);
4853 }
4854
4855 static void
4856 wm_start_locked(struct ifnet *ifp)
4857 {
4858 struct wm_softc *sc = ifp->if_softc;
4859 struct mbuf *m0;
4860 struct m_tag *mtag;
4861 struct wm_txsoft *txs;
4862 bus_dmamap_t dmamap;
4863 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
4864 bus_addr_t curaddr;
4865 bus_size_t seglen, curlen;
4866 uint32_t cksumcmd;
4867 uint8_t cksumfields;
4868
4869 KASSERT(WM_TX_LOCKED(sc));
4870
4871 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
4872 return;
4873
4874 /* Remember the previous number of free descriptors. */
4875 ofree = sc->sc_txfree;
4876
4877 /*
4878 * Loop through the send queue, setting up transmit descriptors
4879 * until we drain the queue, or use up all available transmit
4880 * descriptors.
4881 */
4882 for (;;) {
4883 m0 = NULL;
4884
4885 /* Get a work queue entry. */
4886 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
4887 wm_txintr(sc);
4888 if (sc->sc_txsfree == 0) {
4889 DPRINTF(WM_DEBUG_TX,
4890 ("%s: TX: no free job descriptors\n",
4891 device_xname(sc->sc_dev)));
4892 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
4893 break;
4894 }
4895 }
4896
4897 /* Grab a packet off the queue. */
4898 IFQ_DEQUEUE(&ifp->if_snd, m0);
4899 if (m0 == NULL)
4900 break;
4901
4902 DPRINTF(WM_DEBUG_TX,
4903 ("%s: TX: have packet to transmit: %p\n",
4904 device_xname(sc->sc_dev), m0));
4905
4906 txs = &sc->sc_txsoft[sc->sc_txsnext];
4907 dmamap = txs->txs_dmamap;
4908
4909 use_tso = (m0->m_pkthdr.csum_flags &
4910 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
4911
4912 /*
4913 * So says the Linux driver:
4914 * The controller does a simple calculation to make sure
4915 * there is enough room in the FIFO before initiating the
4916 * DMA for each buffer. The calc is:
4917 * 4 = ceil(buffer len / MSS)
4918 * To make sure we don't overrun the FIFO, adjust the max
4919 * buffer len if the MSS drops.
4920 */
4921 dmamap->dm_maxsegsz =
4922 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
4923 ? m0->m_pkthdr.segsz << 2
4924 : WTX_MAX_LEN;
4925
4926 /*
4927 * Load the DMA map. If this fails, the packet either
4928 * didn't fit in the allotted number of segments, or we
4929 * were short on resources. For the too-many-segments
4930 * case, we simply report an error and drop the packet,
4931 * since we can't sanely copy a jumbo packet to a single
4932 * buffer.
4933 */
4934 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
4935 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
4936 if (error) {
4937 if (error == EFBIG) {
4938 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4939 log(LOG_ERR, "%s: Tx packet consumes too many "
4940 "DMA segments, dropping...\n",
4941 device_xname(sc->sc_dev));
4942 wm_dump_mbuf_chain(sc, m0);
4943 m_freem(m0);
4944 continue;
4945 }
4946 /* Short on resources, just stop for now. */
4947 DPRINTF(WM_DEBUG_TX,
4948 ("%s: TX: dmamap load failed: %d\n",
4949 device_xname(sc->sc_dev), error));
4950 break;
4951 }
4952
4953 segs_needed = dmamap->dm_nsegs;
4954 if (use_tso) {
4955 /* For sentinel descriptor; see below. */
4956 segs_needed++;
4957 }
4958
4959 /*
4960 * Ensure we have enough descriptors free to describe
4961 * the packet. Note, we always reserve one descriptor
4962 * at the end of the ring due to the semantics of the
4963 * TDT register, plus one more in the event we need
4964 * to load offload context.
4965 */
4966 if (segs_needed > sc->sc_txfree - 2) {
4967 /*
4968 * Not enough free descriptors to transmit this
4969 * packet. We haven't committed anything yet,
4970 * so just unload the DMA map, put the packet
4971 * pack on the queue, and punt. Notify the upper
4972 * layer that there are no more slots left.
4973 */
4974 DPRINTF(WM_DEBUG_TX,
4975 ("%s: TX: need %d (%d) descriptors, have %d\n",
4976 device_xname(sc->sc_dev), dmamap->dm_nsegs,
4977 segs_needed, sc->sc_txfree - 1));
4978 ifp->if_flags |= IFF_OACTIVE;
4979 bus_dmamap_unload(sc->sc_dmat, dmamap);
4980 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
4981 break;
4982 }
4983
4984 /*
4985 * Check for 82547 Tx FIFO bug. We need to do this
4986 * once we know we can transmit the packet, since we
4987 * do some internal FIFO space accounting here.
4988 */
4989 if (sc->sc_type == WM_T_82547 &&
4990 wm_82547_txfifo_bugchk(sc, m0)) {
4991 DPRINTF(WM_DEBUG_TX,
4992 ("%s: TX: 82547 Tx FIFO bug detected\n",
4993 device_xname(sc->sc_dev)));
4994 ifp->if_flags |= IFF_OACTIVE;
4995 bus_dmamap_unload(sc->sc_dmat, dmamap);
4996 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
4997 break;
4998 }
4999
5000 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5001
5002 DPRINTF(WM_DEBUG_TX,
5003 ("%s: TX: packet has %d (%d) DMA segments\n",
5004 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5005
5006 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5007
5008 /*
5009 * Store a pointer to the packet so that we can free it
5010 * later.
5011 *
5012 * Initially, we consider the number of descriptors the
5013 * packet uses the number of DMA segments. This may be
5014 * incremented by 1 if we do checksum offload (a descriptor
5015 * is used to set the checksum context).
5016 */
5017 txs->txs_mbuf = m0;
5018 txs->txs_firstdesc = sc->sc_txnext;
5019 txs->txs_ndesc = segs_needed;
5020
5021 /* Set up offload parameters for this packet. */
5022 if (m0->m_pkthdr.csum_flags &
5023 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5024 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5025 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5026 if (wm_tx_offload(sc, txs, &cksumcmd,
5027 &cksumfields) != 0) {
5028 /* Error message already displayed. */
5029 bus_dmamap_unload(sc->sc_dmat, dmamap);
5030 continue;
5031 }
5032 } else {
5033 cksumcmd = 0;
5034 cksumfields = 0;
5035 }
5036
5037 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
5038
5039 /* Sync the DMA map. */
5040 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5041 BUS_DMASYNC_PREWRITE);
5042
5043 /* Initialize the transmit descriptor. */
5044 for (nexttx = sc->sc_txnext, seg = 0;
5045 seg < dmamap->dm_nsegs; seg++) {
5046 for (seglen = dmamap->dm_segs[seg].ds_len,
5047 curaddr = dmamap->dm_segs[seg].ds_addr;
5048 seglen != 0;
5049 curaddr += curlen, seglen -= curlen,
5050 nexttx = WM_NEXTTX(sc, nexttx)) {
5051 curlen = seglen;
5052
5053 /*
5054 * So says the Linux driver:
5055 * Work around for premature descriptor
5056 * write-backs in TSO mode. Append a
5057 * 4-byte sentinel descriptor.
5058 */
5059 if (use_tso &&
5060 seg == dmamap->dm_nsegs - 1 &&
5061 curlen > 8)
5062 curlen -= 4;
5063
5064 wm_set_dma_addr(
5065 &sc->sc_txdescs[nexttx].wtx_addr,
5066 curaddr);
5067 sc->sc_txdescs[nexttx].wtx_cmdlen =
5068 htole32(cksumcmd | curlen);
5069 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
5070 0;
5071 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
5072 cksumfields;
5073 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
5074 lasttx = nexttx;
5075
5076 DPRINTF(WM_DEBUG_TX,
5077 ("%s: TX: desc %d: low %#" PRIx64 ", "
5078 "len %#04zx\n",
5079 device_xname(sc->sc_dev), nexttx,
5080 (uint64_t)curaddr, curlen));
5081 }
5082 }
5083
5084 KASSERT(lasttx != -1);
5085
5086 /*
5087 * Set up the command byte on the last descriptor of
5088 * the packet. If we're in the interrupt delay window,
5089 * delay the interrupt.
5090 */
5091 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5092 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5093
5094 /*
5095 * If VLANs are enabled and the packet has a VLAN tag, set
5096 * up the descriptor to encapsulate the packet for us.
5097 *
5098 * This is only valid on the last descriptor of the packet.
5099 */
5100 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
5101 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5102 htole32(WTX_CMD_VLE);
5103 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
5104 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5105 }
5106
5107 txs->txs_lastdesc = lasttx;
5108
5109 DPRINTF(WM_DEBUG_TX,
5110 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5111 device_xname(sc->sc_dev),
5112 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5113
5114 /* Sync the descriptors we're using. */
5115 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5116 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5117
5118 /* Give the packet to the chip. */
5119 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5120
5121 DPRINTF(WM_DEBUG_TX,
5122 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5123
5124 DPRINTF(WM_DEBUG_TX,
5125 ("%s: TX: finished transmitting packet, job %d\n",
5126 device_xname(sc->sc_dev), sc->sc_txsnext));
5127
5128 /* Advance the tx pointer. */
5129 sc->sc_txfree -= txs->txs_ndesc;
5130 sc->sc_txnext = nexttx;
5131
5132 sc->sc_txsfree--;
5133 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5134
5135 /* Pass the packet to any BPF listeners. */
5136 bpf_mtap(ifp, m0);
5137 }
5138
5139 if (m0 != NULL) {
5140 ifp->if_flags |= IFF_OACTIVE;
5141 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5142 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5143 m_freem(m0);
5144 }
5145
5146 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5147 /* No more slots; notify upper layer. */
5148 ifp->if_flags |= IFF_OACTIVE;
5149 }
5150
5151 if (sc->sc_txfree != ofree) {
5152 /* Set a watchdog timer in case the chip flakes out. */
5153 ifp->if_timer = 5;
5154 }
5155 }
5156
5157 /*
5158 * wm_nq_tx_offload:
5159 *
5160 * Set up TCP/IP checksumming parameters for the
5161 * specified packet, for NEWQUEUE devices
5162 */
5163 static int
5164 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
5165 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
5166 {
5167 struct mbuf *m0 = txs->txs_mbuf;
5168 struct m_tag *mtag;
5169 uint32_t vl_len, mssidx, cmdc;
5170 struct ether_header *eh;
5171 int offset, iphl;
5172
5173 /*
5174 * XXX It would be nice if the mbuf pkthdr had offset
5175 * fields for the protocol headers.
5176 */
5177 *cmdlenp = 0;
5178 *fieldsp = 0;
5179
5180 eh = mtod(m0, struct ether_header *);
5181 switch (htons(eh->ether_type)) {
5182 case ETHERTYPE_IP:
5183 case ETHERTYPE_IPV6:
5184 offset = ETHER_HDR_LEN;
5185 break;
5186
5187 case ETHERTYPE_VLAN:
5188 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5189 break;
5190
5191 default:
5192 /* Don't support this protocol or encapsulation. */
5193 *do_csum = false;
5194 return 0;
5195 }
5196 *do_csum = true;
5197 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
5198 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
5199
5200 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
5201 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
5202
5203 if ((m0->m_pkthdr.csum_flags &
5204 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
5205 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
5206 } else {
5207 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
5208 }
5209 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
5210 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
5211
5212 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
5213 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
5214 << NQTXC_VLLEN_VLAN_SHIFT);
5215 *cmdlenp |= NQTX_CMD_VLE;
5216 }
5217
5218 mssidx = 0;
5219
5220 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
5221 int hlen = offset + iphl;
5222 int tcp_hlen;
5223 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
5224
5225 if (__predict_false(m0->m_len <
5226 (hlen + sizeof(struct tcphdr)))) {
5227 /*
5228 * TCP/IP headers are not in the first mbuf; we need
5229 * to do this the slow and painful way. Let's just
5230 * hope this doesn't happen very often.
5231 */
5232 struct tcphdr th;
5233
5234 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
5235
5236 m_copydata(m0, hlen, sizeof(th), &th);
5237 if (v4) {
5238 struct ip ip;
5239
5240 m_copydata(m0, offset, sizeof(ip), &ip);
5241 ip.ip_len = 0;
5242 m_copyback(m0,
5243 offset + offsetof(struct ip, ip_len),
5244 sizeof(ip.ip_len), &ip.ip_len);
5245 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
5246 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
5247 } else {
5248 struct ip6_hdr ip6;
5249
5250 m_copydata(m0, offset, sizeof(ip6), &ip6);
5251 ip6.ip6_plen = 0;
5252 m_copyback(m0,
5253 offset + offsetof(struct ip6_hdr, ip6_plen),
5254 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
5255 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
5256 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
5257 }
5258 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
5259 sizeof(th.th_sum), &th.th_sum);
5260
5261 tcp_hlen = th.th_off << 2;
5262 } else {
5263 /*
5264 * TCP/IP headers are in the first mbuf; we can do
5265 * this the easy way.
5266 */
5267 struct tcphdr *th;
5268
5269 if (v4) {
5270 struct ip *ip =
5271 (void *)(mtod(m0, char *) + offset);
5272 th = (void *)(mtod(m0, char *) + hlen);
5273
5274 ip->ip_len = 0;
5275 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
5276 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
5277 } else {
5278 struct ip6_hdr *ip6 =
5279 (void *)(mtod(m0, char *) + offset);
5280 th = (void *)(mtod(m0, char *) + hlen);
5281
5282 ip6->ip6_plen = 0;
5283 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
5284 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
5285 }
5286 tcp_hlen = th->th_off << 2;
5287 }
5288 hlen += tcp_hlen;
5289 *cmdlenp |= NQTX_CMD_TSE;
5290
5291 if (v4) {
5292 WM_EVCNT_INCR(&sc->sc_ev_txtso);
5293 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
5294 } else {
5295 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
5296 *fieldsp |= NQTXD_FIELDS_TUXSM;
5297 }
5298 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
5299 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5300 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
5301 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
5302 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
5303 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
5304 } else {
5305 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
5306 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5307 }
5308
5309 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
5310 *fieldsp |= NQTXD_FIELDS_IXSM;
5311 cmdc |= NQTXC_CMD_IP4;
5312 }
5313
5314 if (m0->m_pkthdr.csum_flags &
5315 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5316 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5317 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5318 cmdc |= NQTXC_CMD_TCP;
5319 } else {
5320 cmdc |= NQTXC_CMD_UDP;
5321 }
5322 cmdc |= NQTXC_CMD_IP4;
5323 *fieldsp |= NQTXD_FIELDS_TUXSM;
5324 }
5325 if (m0->m_pkthdr.csum_flags &
5326 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5327 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5328 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5329 cmdc |= NQTXC_CMD_TCP;
5330 } else {
5331 cmdc |= NQTXC_CMD_UDP;
5332 }
5333 cmdc |= NQTXC_CMD_IP6;
5334 *fieldsp |= NQTXD_FIELDS_TUXSM;
5335 }
5336
5337 /* Fill in the context descriptor. */
5338 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
5339 htole32(vl_len);
5340 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
5341 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
5342 htole32(cmdc);
5343 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
5344 htole32(mssidx);
5345 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
5346 DPRINTF(WM_DEBUG_TX,
5347 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
5348 sc->sc_txnext, 0, vl_len));
5349 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
5350 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
5351 txs->txs_ndesc++;
5352 return 0;
5353 }
5354
5355 /*
5356 * wm_nq_start: [ifnet interface function]
5357 *
5358 * Start packet transmission on the interface for NEWQUEUE devices
5359 */
5360 static void
5361 wm_nq_start(struct ifnet *ifp)
5362 {
5363 struct wm_softc *sc = ifp->if_softc;
5364
5365 WM_TX_LOCK(sc);
5366 if (!sc->sc_stopping)
5367 wm_nq_start_locked(ifp);
5368 WM_TX_UNLOCK(sc);
5369 }
5370
5371 static void
5372 wm_nq_start_locked(struct ifnet *ifp)
5373 {
5374 struct wm_softc *sc = ifp->if_softc;
5375 struct mbuf *m0;
5376 struct m_tag *mtag;
5377 struct wm_txsoft *txs;
5378 bus_dmamap_t dmamap;
5379 int error, nexttx, lasttx = -1, seg, segs_needed;
5380 bool do_csum, sent;
5381
5382 KASSERT(WM_TX_LOCKED(sc));
5383
5384 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5385 return;
5386
5387 sent = false;
5388
5389 /*
5390 * Loop through the send queue, setting up transmit descriptors
5391 * until we drain the queue, or use up all available transmit
5392 * descriptors.
5393 */
5394 for (;;) {
5395 m0 = NULL;
5396
5397 /* Get a work queue entry. */
5398 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
5399 wm_txintr(sc);
5400 if (sc->sc_txsfree == 0) {
5401 DPRINTF(WM_DEBUG_TX,
5402 ("%s: TX: no free job descriptors\n",
5403 device_xname(sc->sc_dev)));
5404 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
5405 break;
5406 }
5407 }
5408
5409 /* Grab a packet off the queue. */
5410 IFQ_DEQUEUE(&ifp->if_snd, m0);
5411 if (m0 == NULL)
5412 break;
5413
5414 DPRINTF(WM_DEBUG_TX,
5415 ("%s: TX: have packet to transmit: %p\n",
5416 device_xname(sc->sc_dev), m0));
5417
5418 txs = &sc->sc_txsoft[sc->sc_txsnext];
5419 dmamap = txs->txs_dmamap;
5420
5421 /*
5422 * Load the DMA map. If this fails, the packet either
5423 * didn't fit in the allotted number of segments, or we
5424 * were short on resources. For the too-many-segments
5425 * case, we simply report an error and drop the packet,
5426 * since we can't sanely copy a jumbo packet to a single
5427 * buffer.
5428 */
5429 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5430 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5431 if (error) {
5432 if (error == EFBIG) {
5433 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5434 log(LOG_ERR, "%s: Tx packet consumes too many "
5435 "DMA segments, dropping...\n",
5436 device_xname(sc->sc_dev));
5437 wm_dump_mbuf_chain(sc, m0);
5438 m_freem(m0);
5439 continue;
5440 }
5441 /* Short on resources, just stop for now. */
5442 DPRINTF(WM_DEBUG_TX,
5443 ("%s: TX: dmamap load failed: %d\n",
5444 device_xname(sc->sc_dev), error));
5445 break;
5446 }
5447
5448 segs_needed = dmamap->dm_nsegs;
5449
5450 /*
5451 * Ensure we have enough descriptors free to describe
5452 * the packet. Note, we always reserve one descriptor
5453 * at the end of the ring due to the semantics of the
5454 * TDT register, plus one more in the event we need
5455 * to load offload context.
5456 */
5457 if (segs_needed > sc->sc_txfree - 2) {
5458 /*
5459 * Not enough free descriptors to transmit this
5460 * packet. We haven't committed anything yet,
5461 * so just unload the DMA map, put the packet
5462 * pack on the queue, and punt. Notify the upper
5463 * layer that there are no more slots left.
5464 */
5465 DPRINTF(WM_DEBUG_TX,
5466 ("%s: TX: need %d (%d) descriptors, have %d\n",
5467 device_xname(sc->sc_dev), dmamap->dm_nsegs,
5468 segs_needed, sc->sc_txfree - 1));
5469 ifp->if_flags |= IFF_OACTIVE;
5470 bus_dmamap_unload(sc->sc_dmat, dmamap);
5471 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5472 break;
5473 }
5474
5475 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5476
5477 DPRINTF(WM_DEBUG_TX,
5478 ("%s: TX: packet has %d (%d) DMA segments\n",
5479 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5480
5481 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5482
5483 /*
5484 * Store a pointer to the packet so that we can free it
5485 * later.
5486 *
5487 * Initially, we consider the number of descriptors the
5488 * packet uses the number of DMA segments. This may be
5489 * incremented by 1 if we do checksum offload (a descriptor
5490 * is used to set the checksum context).
5491 */
5492 txs->txs_mbuf = m0;
5493 txs->txs_firstdesc = sc->sc_txnext;
5494 txs->txs_ndesc = segs_needed;
5495
5496 /* Set up offload parameters for this packet. */
5497 uint32_t cmdlen, fields, dcmdlen;
5498 if (m0->m_pkthdr.csum_flags &
5499 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5500 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5501 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5502 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
5503 &do_csum) != 0) {
5504 /* Error message already displayed. */
5505 bus_dmamap_unload(sc->sc_dmat, dmamap);
5506 continue;
5507 }
5508 } else {
5509 do_csum = false;
5510 cmdlen = 0;
5511 fields = 0;
5512 }
5513
5514 /* Sync the DMA map. */
5515 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5516 BUS_DMASYNC_PREWRITE);
5517
5518 /* Initialize the first transmit descriptor. */
5519 nexttx = sc->sc_txnext;
5520 if (!do_csum) {
5521 /* setup a legacy descriptor */
5522 wm_set_dma_addr(
5523 &sc->sc_txdescs[nexttx].wtx_addr,
5524 dmamap->dm_segs[0].ds_addr);
5525 sc->sc_txdescs[nexttx].wtx_cmdlen =
5526 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
5527 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
5528 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
5529 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
5530 NULL) {
5531 sc->sc_txdescs[nexttx].wtx_cmdlen |=
5532 htole32(WTX_CMD_VLE);
5533 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
5534 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5535 } else {
5536 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
5537 }
5538 dcmdlen = 0;
5539 } else {
5540 /* setup an advanced data descriptor */
5541 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5542 htole64(dmamap->dm_segs[0].ds_addr);
5543 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
5544 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5545 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
5546 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
5547 htole32(fields);
5548 DPRINTF(WM_DEBUG_TX,
5549 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
5550 device_xname(sc->sc_dev), nexttx,
5551 (uint64_t)dmamap->dm_segs[0].ds_addr));
5552 DPRINTF(WM_DEBUG_TX,
5553 ("\t 0x%08x%08x\n", fields,
5554 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
5555 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
5556 }
5557
5558 lasttx = nexttx;
5559 nexttx = WM_NEXTTX(sc, nexttx);
5560 /*
5561 * fill in the next descriptors. legacy or adcanced format
5562 * is the same here
5563 */
5564 for (seg = 1; seg < dmamap->dm_nsegs;
5565 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
5566 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5567 htole64(dmamap->dm_segs[seg].ds_addr);
5568 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5569 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
5570 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
5571 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
5572 lasttx = nexttx;
5573
5574 DPRINTF(WM_DEBUG_TX,
5575 ("%s: TX: desc %d: %#" PRIx64 ", "
5576 "len %#04zx\n",
5577 device_xname(sc->sc_dev), nexttx,
5578 (uint64_t)dmamap->dm_segs[seg].ds_addr,
5579 dmamap->dm_segs[seg].ds_len));
5580 }
5581
5582 KASSERT(lasttx != -1);
5583
5584 /*
5585 * Set up the command byte on the last descriptor of
5586 * the packet. If we're in the interrupt delay window,
5587 * delay the interrupt.
5588 */
5589 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
5590 (NQTX_CMD_EOP | NQTX_CMD_RS));
5591 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5592 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5593
5594 txs->txs_lastdesc = lasttx;
5595
5596 DPRINTF(WM_DEBUG_TX,
5597 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5598 device_xname(sc->sc_dev),
5599 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5600
5601 /* Sync the descriptors we're using. */
5602 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5603 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5604
5605 /* Give the packet to the chip. */
5606 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5607 sent = true;
5608
5609 DPRINTF(WM_DEBUG_TX,
5610 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5611
5612 DPRINTF(WM_DEBUG_TX,
5613 ("%s: TX: finished transmitting packet, job %d\n",
5614 device_xname(sc->sc_dev), sc->sc_txsnext));
5615
5616 /* Advance the tx pointer. */
5617 sc->sc_txfree -= txs->txs_ndesc;
5618 sc->sc_txnext = nexttx;
5619
5620 sc->sc_txsfree--;
5621 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5622
5623 /* Pass the packet to any BPF listeners. */
5624 bpf_mtap(ifp, m0);
5625 }
5626
5627 if (m0 != NULL) {
5628 ifp->if_flags |= IFF_OACTIVE;
5629 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5630 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5631 m_freem(m0);
5632 }
5633
5634 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5635 /* No more slots; notify upper layer. */
5636 ifp->if_flags |= IFF_OACTIVE;
5637 }
5638
5639 if (sent) {
5640 /* Set a watchdog timer in case the chip flakes out. */
5641 ifp->if_timer = 5;
5642 }
5643 }
5644
5645 /* Interrupt */
5646
5647 /*
5648 * wm_txintr:
5649 *
5650 * Helper; handle transmit interrupts.
5651 */
5652 static void
5653 wm_txintr(struct wm_softc *sc)
5654 {
5655 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5656 struct wm_txsoft *txs;
5657 uint8_t status;
5658 int i;
5659
5660 if (sc->sc_stopping)
5661 return;
5662
5663 ifp->if_flags &= ~IFF_OACTIVE;
5664
5665 /*
5666 * Go through the Tx list and free mbufs for those
5667 * frames which have been transmitted.
5668 */
5669 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
5670 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
5671 txs = &sc->sc_txsoft[i];
5672
5673 DPRINTF(WM_DEBUG_TX,
5674 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
5675
5676 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
5677 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5678
5679 status =
5680 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
5681 if ((status & WTX_ST_DD) == 0) {
5682 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
5683 BUS_DMASYNC_PREREAD);
5684 break;
5685 }
5686
5687 DPRINTF(WM_DEBUG_TX,
5688 ("%s: TX: job %d done: descs %d..%d\n",
5689 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
5690 txs->txs_lastdesc));
5691
5692 /*
5693 * XXX We should probably be using the statistics
5694 * XXX registers, but I don't know if they exist
5695 * XXX on chips before the i82544.
5696 */
5697
5698 #ifdef WM_EVENT_COUNTERS
5699 if (status & WTX_ST_TU)
5700 WM_EVCNT_INCR(&sc->sc_ev_tu);
5701 #endif /* WM_EVENT_COUNTERS */
5702
5703 if (status & (WTX_ST_EC|WTX_ST_LC)) {
5704 ifp->if_oerrors++;
5705 if (status & WTX_ST_LC)
5706 log(LOG_WARNING, "%s: late collision\n",
5707 device_xname(sc->sc_dev));
5708 else if (status & WTX_ST_EC) {
5709 ifp->if_collisions += 16;
5710 log(LOG_WARNING, "%s: excessive collisions\n",
5711 device_xname(sc->sc_dev));
5712 }
5713 } else
5714 ifp->if_opackets++;
5715
5716 sc->sc_txfree += txs->txs_ndesc;
5717 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
5718 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
5719 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
5720 m_freem(txs->txs_mbuf);
5721 txs->txs_mbuf = NULL;
5722 }
5723
5724 /* Update the dirty transmit buffer pointer. */
5725 sc->sc_txsdirty = i;
5726 DPRINTF(WM_DEBUG_TX,
5727 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
5728
5729 /*
5730 * If there are no more pending transmissions, cancel the watchdog
5731 * timer.
5732 */
5733 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
5734 ifp->if_timer = 0;
5735 }
5736
5737 /*
5738 * wm_rxintr:
5739 *
5740 * Helper; handle receive interrupts.
5741 */
5742 static void
5743 wm_rxintr(struct wm_softc *sc)
5744 {
5745 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5746 struct wm_rxsoft *rxs;
5747 struct mbuf *m;
5748 int i, len;
5749 uint8_t status, errors;
5750 uint16_t vlantag;
5751
5752 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
5753 rxs = &sc->sc_rxsoft[i];
5754
5755 DPRINTF(WM_DEBUG_RX,
5756 ("%s: RX: checking descriptor %d\n",
5757 device_xname(sc->sc_dev), i));
5758
5759 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5760
5761 status = sc->sc_rxdescs[i].wrx_status;
5762 errors = sc->sc_rxdescs[i].wrx_errors;
5763 len = le16toh(sc->sc_rxdescs[i].wrx_len);
5764 vlantag = sc->sc_rxdescs[i].wrx_special;
5765
5766 if ((status & WRX_ST_DD) == 0) {
5767 /* We have processed all of the receive descriptors. */
5768 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
5769 break;
5770 }
5771
5772 if (__predict_false(sc->sc_rxdiscard)) {
5773 DPRINTF(WM_DEBUG_RX,
5774 ("%s: RX: discarding contents of descriptor %d\n",
5775 device_xname(sc->sc_dev), i));
5776 WM_INIT_RXDESC(sc, i);
5777 if (status & WRX_ST_EOP) {
5778 /* Reset our state. */
5779 DPRINTF(WM_DEBUG_RX,
5780 ("%s: RX: resetting rxdiscard -> 0\n",
5781 device_xname(sc->sc_dev)));
5782 sc->sc_rxdiscard = 0;
5783 }
5784 continue;
5785 }
5786
5787 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5788 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
5789
5790 m = rxs->rxs_mbuf;
5791
5792 /*
5793 * Add a new receive buffer to the ring, unless of
5794 * course the length is zero. Treat the latter as a
5795 * failed mapping.
5796 */
5797 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
5798 /*
5799 * Failed, throw away what we've done so
5800 * far, and discard the rest of the packet.
5801 */
5802 ifp->if_ierrors++;
5803 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5804 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5805 WM_INIT_RXDESC(sc, i);
5806 if ((status & WRX_ST_EOP) == 0)
5807 sc->sc_rxdiscard = 1;
5808 if (sc->sc_rxhead != NULL)
5809 m_freem(sc->sc_rxhead);
5810 WM_RXCHAIN_RESET(sc);
5811 DPRINTF(WM_DEBUG_RX,
5812 ("%s: RX: Rx buffer allocation failed, "
5813 "dropping packet%s\n", device_xname(sc->sc_dev),
5814 sc->sc_rxdiscard ? " (discard)" : ""));
5815 continue;
5816 }
5817
5818 m->m_len = len;
5819 sc->sc_rxlen += len;
5820 DPRINTF(WM_DEBUG_RX,
5821 ("%s: RX: buffer at %p len %d\n",
5822 device_xname(sc->sc_dev), m->m_data, len));
5823
5824 /* If this is not the end of the packet, keep looking. */
5825 if ((status & WRX_ST_EOP) == 0) {
5826 WM_RXCHAIN_LINK(sc, m);
5827 DPRINTF(WM_DEBUG_RX,
5828 ("%s: RX: not yet EOP, rxlen -> %d\n",
5829 device_xname(sc->sc_dev), sc->sc_rxlen));
5830 continue;
5831 }
5832
5833 /*
5834 * Okay, we have the entire packet now. The chip is
5835 * configured to include the FCS except I350 and I21[01]
5836 * (not all chips can be configured to strip it),
5837 * so we need to trim it.
5838 * May need to adjust length of previous mbuf in the
5839 * chain if the current mbuf is too short.
5840 * For an eratta, the RCTL_SECRC bit in RCTL register
5841 * is always set in I350, so we don't trim it.
5842 */
5843 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
5844 && (sc->sc_type != WM_T_I210)
5845 && (sc->sc_type != WM_T_I211)) {
5846 if (m->m_len < ETHER_CRC_LEN) {
5847 sc->sc_rxtail->m_len
5848 -= (ETHER_CRC_LEN - m->m_len);
5849 m->m_len = 0;
5850 } else
5851 m->m_len -= ETHER_CRC_LEN;
5852 len = sc->sc_rxlen - ETHER_CRC_LEN;
5853 } else
5854 len = sc->sc_rxlen;
5855
5856 WM_RXCHAIN_LINK(sc, m);
5857
5858 *sc->sc_rxtailp = NULL;
5859 m = sc->sc_rxhead;
5860
5861 WM_RXCHAIN_RESET(sc);
5862
5863 DPRINTF(WM_DEBUG_RX,
5864 ("%s: RX: have entire packet, len -> %d\n",
5865 device_xname(sc->sc_dev), len));
5866
5867 /* If an error occurred, update stats and drop the packet. */
5868 if (errors &
5869 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
5870 if (errors & WRX_ER_SE)
5871 log(LOG_WARNING, "%s: symbol error\n",
5872 device_xname(sc->sc_dev));
5873 else if (errors & WRX_ER_SEQ)
5874 log(LOG_WARNING, "%s: receive sequence error\n",
5875 device_xname(sc->sc_dev));
5876 else if (errors & WRX_ER_CE)
5877 log(LOG_WARNING, "%s: CRC error\n",
5878 device_xname(sc->sc_dev));
5879 m_freem(m);
5880 continue;
5881 }
5882
5883 /* No errors. Receive the packet. */
5884 m->m_pkthdr.rcvif = ifp;
5885 m->m_pkthdr.len = len;
5886
5887 /*
5888 * If VLANs are enabled, VLAN packets have been unwrapped
5889 * for us. Associate the tag with the packet.
5890 */
5891 /* XXXX should check for i350 and i354 */
5892 if ((status & WRX_ST_VP) != 0) {
5893 VLAN_INPUT_TAG(ifp, m,
5894 le16toh(vlantag),
5895 continue);
5896 }
5897
5898 /* Set up checksum info for this packet. */
5899 if ((status & WRX_ST_IXSM) == 0) {
5900 if (status & WRX_ST_IPCS) {
5901 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
5902 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
5903 if (errors & WRX_ER_IPE)
5904 m->m_pkthdr.csum_flags |=
5905 M_CSUM_IPv4_BAD;
5906 }
5907 if (status & WRX_ST_TCPCS) {
5908 /*
5909 * Note: we don't know if this was TCP or UDP,
5910 * so we just set both bits, and expect the
5911 * upper layers to deal.
5912 */
5913 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
5914 m->m_pkthdr.csum_flags |=
5915 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
5916 M_CSUM_TCPv6 | M_CSUM_UDPv6;
5917 if (errors & WRX_ER_TCPE)
5918 m->m_pkthdr.csum_flags |=
5919 M_CSUM_TCP_UDP_BAD;
5920 }
5921 }
5922
5923 ifp->if_ipackets++;
5924
5925 WM_RX_UNLOCK(sc);
5926
5927 /* Pass this up to any BPF listeners. */
5928 bpf_mtap(ifp, m);
5929
5930 /* Pass it on. */
5931 (*ifp->if_input)(ifp, m);
5932
5933 WM_RX_LOCK(sc);
5934
5935 if (sc->sc_stopping)
5936 break;
5937 }
5938
5939 /* Update the receive pointer. */
5940 sc->sc_rxptr = i;
5941
5942 DPRINTF(WM_DEBUG_RX,
5943 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
5944 }
5945
5946 /*
5947 * wm_linkintr_gmii:
5948 *
5949 * Helper; handle link interrupts for GMII.
5950 */
5951 static void
5952 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
5953 {
5954
5955 KASSERT(WM_TX_LOCKED(sc));
5956
5957 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5958 __func__));
5959
5960 if (icr & ICR_LSC) {
5961 DPRINTF(WM_DEBUG_LINK,
5962 ("%s: LINK: LSC -> mii_pollstat\n",
5963 device_xname(sc->sc_dev)));
5964 mii_pollstat(&sc->sc_mii);
5965 if (sc->sc_type == WM_T_82543) {
5966 int miistatus, active;
5967
5968 /*
5969 * With 82543, we need to force speed and
5970 * duplex on the MAC equal to what the PHY
5971 * speed and duplex configuration is.
5972 */
5973 miistatus = sc->sc_mii.mii_media_status;
5974
5975 if (miistatus & IFM_ACTIVE) {
5976 active = sc->sc_mii.mii_media_active;
5977 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5978 switch (IFM_SUBTYPE(active)) {
5979 case IFM_10_T:
5980 sc->sc_ctrl |= CTRL_SPEED_10;
5981 break;
5982 case IFM_100_TX:
5983 sc->sc_ctrl |= CTRL_SPEED_100;
5984 break;
5985 case IFM_1000_T:
5986 sc->sc_ctrl |= CTRL_SPEED_1000;
5987 break;
5988 default:
5989 /*
5990 * fiber?
5991 * Shoud not enter here.
5992 */
5993 printf("unknown media (%x)\n",
5994 active);
5995 break;
5996 }
5997 if (active & IFM_FDX)
5998 sc->sc_ctrl |= CTRL_FD;
5999 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6000 }
6001 } else if ((sc->sc_type == WM_T_ICH8)
6002 && (sc->sc_phytype == WMPHY_IGP_3)) {
6003 wm_kmrn_lock_loss_workaround_ich8lan(sc);
6004 } else if (sc->sc_type == WM_T_PCH) {
6005 wm_k1_gig_workaround_hv(sc,
6006 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
6007 }
6008
6009 if ((sc->sc_phytype == WMPHY_82578)
6010 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
6011 == IFM_1000_T)) {
6012
6013 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
6014 delay(200*1000); /* XXX too big */
6015
6016 /* Link stall fix for link up */
6017 wm_gmii_hv_writereg(sc->sc_dev, 1,
6018 HV_MUX_DATA_CTRL,
6019 HV_MUX_DATA_CTRL_GEN_TO_MAC
6020 | HV_MUX_DATA_CTRL_FORCE_SPEED);
6021 wm_gmii_hv_writereg(sc->sc_dev, 1,
6022 HV_MUX_DATA_CTRL,
6023 HV_MUX_DATA_CTRL_GEN_TO_MAC);
6024 }
6025 }
6026 } else if (icr & ICR_RXSEQ) {
6027 DPRINTF(WM_DEBUG_LINK,
6028 ("%s: LINK Receive sequence error\n",
6029 device_xname(sc->sc_dev)));
6030 }
6031 }
6032
6033 /*
6034 * wm_linkintr_tbi:
6035 *
6036 * Helper; handle link interrupts for TBI mode.
6037 */
6038 static void
6039 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
6040 {
6041 uint32_t status;
6042
6043 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
6044 __func__));
6045
6046 status = CSR_READ(sc, WMREG_STATUS);
6047 if (icr & ICR_LSC) {
6048 if (status & STATUS_LU) {
6049 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
6050 device_xname(sc->sc_dev),
6051 (status & STATUS_FD) ? "FDX" : "HDX"));
6052 /*
6053 * NOTE: CTRL will update TFCE and RFCE automatically,
6054 * so we should update sc->sc_ctrl
6055 */
6056
6057 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
6058 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6059 sc->sc_fcrtl &= ~FCRTL_XONE;
6060 if (status & STATUS_FD)
6061 sc->sc_tctl |=
6062 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6063 else
6064 sc->sc_tctl |=
6065 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6066 if (sc->sc_ctrl & CTRL_TFCE)
6067 sc->sc_fcrtl |= FCRTL_XONE;
6068 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6069 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
6070 WMREG_OLD_FCRTL : WMREG_FCRTL,
6071 sc->sc_fcrtl);
6072 sc->sc_tbi_linkup = 1;
6073 } else {
6074 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
6075 device_xname(sc->sc_dev)));
6076 sc->sc_tbi_linkup = 0;
6077 }
6078 /* Update LED */
6079 wm_tbi_serdes_set_linkled(sc);
6080 } else if (icr & ICR_RXSEQ) {
6081 DPRINTF(WM_DEBUG_LINK,
6082 ("%s: LINK: Receive sequence error\n",
6083 device_xname(sc->sc_dev)));
6084 }
6085 }
6086
6087 /*
6088 * wm_linkintr_serdes:
6089 *
6090 * Helper; handle link interrupts for TBI mode.
6091 */
6092 static void
6093 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
6094 {
6095 struct mii_data *mii = &sc->sc_mii;
6096 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6097 uint32_t pcs_adv, pcs_lpab, reg;
6098
6099 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
6100 __func__));
6101
6102 if (icr & ICR_LSC) {
6103 /* Check PCS */
6104 reg = CSR_READ(sc, WMREG_PCS_LSTS);
6105 if ((reg & PCS_LSTS_LINKOK) != 0) {
6106 mii->mii_media_status |= IFM_ACTIVE;
6107 sc->sc_tbi_linkup = 1;
6108 } else {
6109 mii->mii_media_status |= IFM_NONE;
6110 sc->sc_tbi_linkup = 0;
6111 wm_tbi_serdes_set_linkled(sc);
6112 return;
6113 }
6114 mii->mii_media_active |= IFM_1000_SX;
6115 if ((reg & PCS_LSTS_FDX) != 0)
6116 mii->mii_media_active |= IFM_FDX;
6117 else
6118 mii->mii_media_active |= IFM_HDX;
6119 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
6120 /* Check flow */
6121 reg = CSR_READ(sc, WMREG_PCS_LSTS);
6122 if ((reg & PCS_LSTS_AN_COMP) == 0) {
6123 DPRINTF(WM_DEBUG_LINK,
6124 ("XXX LINKOK but not ACOMP\n"));
6125 return;
6126 }
6127 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
6128 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
6129 DPRINTF(WM_DEBUG_LINK,
6130 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
6131 if ((pcs_adv & TXCW_SYM_PAUSE)
6132 && (pcs_lpab & TXCW_SYM_PAUSE)) {
6133 mii->mii_media_active |= IFM_FLOW
6134 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
6135 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
6136 && (pcs_adv & TXCW_ASYM_PAUSE)
6137 && (pcs_lpab & TXCW_SYM_PAUSE)
6138 && (pcs_lpab & TXCW_ASYM_PAUSE))
6139 mii->mii_media_active |= IFM_FLOW
6140 | IFM_ETH_TXPAUSE;
6141 else if ((pcs_adv & TXCW_SYM_PAUSE)
6142 && (pcs_adv & TXCW_ASYM_PAUSE)
6143 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
6144 && (pcs_lpab & TXCW_ASYM_PAUSE))
6145 mii->mii_media_active |= IFM_FLOW
6146 | IFM_ETH_RXPAUSE;
6147 }
6148 /* Update LED */
6149 wm_tbi_serdes_set_linkled(sc);
6150 } else {
6151 DPRINTF(WM_DEBUG_LINK,
6152 ("%s: LINK: Receive sequence error\n",
6153 device_xname(sc->sc_dev)));
6154 }
6155 }
6156
6157 /*
6158 * wm_linkintr:
6159 *
6160 * Helper; handle link interrupts.
6161 */
6162 static void
6163 wm_linkintr(struct wm_softc *sc, uint32_t icr)
6164 {
6165
6166 if (sc->sc_flags & WM_F_HAS_MII)
6167 wm_linkintr_gmii(sc, icr);
6168 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
6169 && (sc->sc_type >= WM_T_82575))
6170 wm_linkintr_serdes(sc, icr);
6171 else
6172 wm_linkintr_tbi(sc, icr);
6173 }
6174
6175 /*
6176 * wm_intr:
6177 *
6178 * Interrupt service routine.
6179 */
6180 static int
6181 wm_intr(void *arg)
6182 {
6183 struct wm_softc *sc = arg;
6184 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6185 uint32_t icr;
6186 int handled = 0;
6187
6188 while (1 /* CONSTCOND */) {
6189 icr = CSR_READ(sc, WMREG_ICR);
6190 if ((icr & sc->sc_icr) == 0)
6191 break;
6192 rnd_add_uint32(&sc->rnd_source, icr);
6193
6194 WM_RX_LOCK(sc);
6195
6196 if (sc->sc_stopping) {
6197 WM_RX_UNLOCK(sc);
6198 break;
6199 }
6200
6201 handled = 1;
6202
6203 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
6204 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
6205 DPRINTF(WM_DEBUG_RX,
6206 ("%s: RX: got Rx intr 0x%08x\n",
6207 device_xname(sc->sc_dev),
6208 icr & (ICR_RXDMT0|ICR_RXT0)));
6209 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
6210 }
6211 #endif
6212 wm_rxintr(sc);
6213
6214 WM_RX_UNLOCK(sc);
6215 WM_TX_LOCK(sc);
6216
6217 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
6218 if (icr & ICR_TXDW) {
6219 DPRINTF(WM_DEBUG_TX,
6220 ("%s: TX: got TXDW interrupt\n",
6221 device_xname(sc->sc_dev)));
6222 WM_EVCNT_INCR(&sc->sc_ev_txdw);
6223 }
6224 #endif
6225 wm_txintr(sc);
6226
6227 if (icr & (ICR_LSC|ICR_RXSEQ)) {
6228 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
6229 wm_linkintr(sc, icr);
6230 }
6231
6232 WM_TX_UNLOCK(sc);
6233
6234 if (icr & ICR_RXO) {
6235 #if defined(WM_DEBUG)
6236 log(LOG_WARNING, "%s: Receive overrun\n",
6237 device_xname(sc->sc_dev));
6238 #endif /* defined(WM_DEBUG) */
6239 }
6240 }
6241
6242 if (handled) {
6243 /* Try to get more packets going. */
6244 ifp->if_start(ifp);
6245 }
6246
6247 return handled;
6248 }
6249
6250 /*
6251 * Media related.
6252 * GMII, SGMII, TBI (and SERDES)
6253 */
6254
6255 /* Common */
6256
6257 /*
6258 * wm_tbi_serdes_set_linkled:
6259 *
6260 * Update the link LED on TBI and SERDES devices.
6261 */
6262 static void
6263 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
6264 {
6265
6266 if (sc->sc_tbi_linkup)
6267 sc->sc_ctrl |= CTRL_SWDPIN(0);
6268 else
6269 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
6270
6271 /* 82540 or newer devices are active low */
6272 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
6273
6274 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6275 }
6276
6277 /* GMII related */
6278
6279 /*
6280 * wm_gmii_reset:
6281 *
6282 * Reset the PHY.
6283 */
6284 static void
6285 wm_gmii_reset(struct wm_softc *sc)
6286 {
6287 uint32_t reg;
6288 int rv;
6289
6290 /* get phy semaphore */
6291 switch (sc->sc_type) {
6292 case WM_T_82571:
6293 case WM_T_82572:
6294 case WM_T_82573:
6295 case WM_T_82574:
6296 case WM_T_82583:
6297 /* XXX should get sw semaphore, too */
6298 rv = wm_get_swsm_semaphore(sc);
6299 break;
6300 case WM_T_82575:
6301 case WM_T_82576:
6302 case WM_T_82580:
6303 case WM_T_I350:
6304 case WM_T_I354:
6305 case WM_T_I210:
6306 case WM_T_I211:
6307 case WM_T_80003:
6308 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6309 break;
6310 case WM_T_ICH8:
6311 case WM_T_ICH9:
6312 case WM_T_ICH10:
6313 case WM_T_PCH:
6314 case WM_T_PCH2:
6315 case WM_T_PCH_LPT:
6316 rv = wm_get_swfwhw_semaphore(sc);
6317 break;
6318 default:
6319 /* nothing to do*/
6320 rv = 0;
6321 break;
6322 }
6323 if (rv != 0) {
6324 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6325 __func__);
6326 return;
6327 }
6328
6329 switch (sc->sc_type) {
6330 case WM_T_82542_2_0:
6331 case WM_T_82542_2_1:
6332 /* null */
6333 break;
6334 case WM_T_82543:
6335 /*
6336 * With 82543, we need to force speed and duplex on the MAC
6337 * equal to what the PHY speed and duplex configuration is.
6338 * In addition, we need to perform a hardware reset on the PHY
6339 * to take it out of reset.
6340 */
6341 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6342 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6343
6344 /* The PHY reset pin is active-low. */
6345 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6346 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6347 CTRL_EXT_SWDPIN(4));
6348 reg |= CTRL_EXT_SWDPIO(4);
6349
6350 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6351 CSR_WRITE_FLUSH(sc);
6352 delay(10*1000);
6353
6354 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6355 CSR_WRITE_FLUSH(sc);
6356 delay(150);
6357 #if 0
6358 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6359 #endif
6360 delay(20*1000); /* XXX extra delay to get PHY ID? */
6361 break;
6362 case WM_T_82544: /* reset 10000us */
6363 case WM_T_82540:
6364 case WM_T_82545:
6365 case WM_T_82545_3:
6366 case WM_T_82546:
6367 case WM_T_82546_3:
6368 case WM_T_82541:
6369 case WM_T_82541_2:
6370 case WM_T_82547:
6371 case WM_T_82547_2:
6372 case WM_T_82571: /* reset 100us */
6373 case WM_T_82572:
6374 case WM_T_82573:
6375 case WM_T_82574:
6376 case WM_T_82575:
6377 case WM_T_82576:
6378 case WM_T_82580:
6379 case WM_T_I350:
6380 case WM_T_I354:
6381 case WM_T_I210:
6382 case WM_T_I211:
6383 case WM_T_82583:
6384 case WM_T_80003:
6385 /* generic reset */
6386 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6387 CSR_WRITE_FLUSH(sc);
6388 delay(20000);
6389 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6390 CSR_WRITE_FLUSH(sc);
6391 delay(20000);
6392
6393 if ((sc->sc_type == WM_T_82541)
6394 || (sc->sc_type == WM_T_82541_2)
6395 || (sc->sc_type == WM_T_82547)
6396 || (sc->sc_type == WM_T_82547_2)) {
6397 /* workaround for igp are done in igp_reset() */
6398 /* XXX add code to set LED after phy reset */
6399 }
6400 break;
6401 case WM_T_ICH8:
6402 case WM_T_ICH9:
6403 case WM_T_ICH10:
6404 case WM_T_PCH:
6405 case WM_T_PCH2:
6406 case WM_T_PCH_LPT:
6407 /* generic reset */
6408 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6409 CSR_WRITE_FLUSH(sc);
6410 delay(100);
6411 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6412 CSR_WRITE_FLUSH(sc);
6413 delay(150);
6414 break;
6415 default:
6416 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6417 __func__);
6418 break;
6419 }
6420
6421 /* release PHY semaphore */
6422 switch (sc->sc_type) {
6423 case WM_T_82571:
6424 case WM_T_82572:
6425 case WM_T_82573:
6426 case WM_T_82574:
6427 case WM_T_82583:
6428 /* XXX should put sw semaphore, too */
6429 wm_put_swsm_semaphore(sc);
6430 break;
6431 case WM_T_82575:
6432 case WM_T_82576:
6433 case WM_T_82580:
6434 case WM_T_I350:
6435 case WM_T_I354:
6436 case WM_T_I210:
6437 case WM_T_I211:
6438 case WM_T_80003:
6439 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6440 break;
6441 case WM_T_ICH8:
6442 case WM_T_ICH9:
6443 case WM_T_ICH10:
6444 case WM_T_PCH:
6445 case WM_T_PCH2:
6446 case WM_T_PCH_LPT:
6447 wm_put_swfwhw_semaphore(sc);
6448 break;
6449 default:
6450 /* nothing to do*/
6451 rv = 0;
6452 break;
6453 }
6454
6455 /* get_cfg_done */
6456 wm_get_cfg_done(sc);
6457
6458 /* extra setup */
6459 switch (sc->sc_type) {
6460 case WM_T_82542_2_0:
6461 case WM_T_82542_2_1:
6462 case WM_T_82543:
6463 case WM_T_82544:
6464 case WM_T_82540:
6465 case WM_T_82545:
6466 case WM_T_82545_3:
6467 case WM_T_82546:
6468 case WM_T_82546_3:
6469 case WM_T_82541_2:
6470 case WM_T_82547_2:
6471 case WM_T_82571:
6472 case WM_T_82572:
6473 case WM_T_82573:
6474 case WM_T_82574:
6475 case WM_T_82575:
6476 case WM_T_82576:
6477 case WM_T_82580:
6478 case WM_T_I350:
6479 case WM_T_I354:
6480 case WM_T_I210:
6481 case WM_T_I211:
6482 case WM_T_82583:
6483 case WM_T_80003:
6484 /* null */
6485 break;
6486 case WM_T_82541:
6487 case WM_T_82547:
6488 /* XXX Configure actively LED after PHY reset */
6489 break;
6490 case WM_T_ICH8:
6491 case WM_T_ICH9:
6492 case WM_T_ICH10:
6493 case WM_T_PCH:
6494 case WM_T_PCH2:
6495 case WM_T_PCH_LPT:
6496 /* Allow time for h/w to get to a quiescent state afer reset */
6497 delay(10*1000);
6498
6499 if (sc->sc_type == WM_T_PCH)
6500 wm_hv_phy_workaround_ich8lan(sc);
6501
6502 if (sc->sc_type == WM_T_PCH2)
6503 wm_lv_phy_workaround_ich8lan(sc);
6504
6505 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6506 /*
6507 * dummy read to clear the phy wakeup bit after lcd
6508 * reset
6509 */
6510 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6511 }
6512
6513 /*
6514 * XXX Configure the LCD with th extended configuration region
6515 * in NVM
6516 */
6517
6518 /* Configure the LCD with the OEM bits in NVM */
6519 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6520 || (sc->sc_type == WM_T_PCH_LPT)) {
6521 /*
6522 * Disable LPLU.
6523 * XXX It seems that 82567 has LPLU, too.
6524 */
6525 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6526 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6527 reg |= HV_OEM_BITS_ANEGNOW;
6528 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6529 }
6530 break;
6531 default:
6532 panic("%s: unknown type\n", __func__);
6533 break;
6534 }
6535 }
6536
6537 /*
6538 * wm_get_phy_id_82575:
6539 *
6540 * Return PHY ID. Return -1 if it failed.
6541 */
6542 static int
6543 wm_get_phy_id_82575(struct wm_softc *sc)
6544 {
6545 uint32_t reg;
6546 int phyid = -1;
6547
6548 /* XXX */
6549 if ((sc->sc_flags & WM_F_SGMII) == 0)
6550 return -1;
6551
6552 if (wm_sgmii_uses_mdio(sc)) {
6553 switch (sc->sc_type) {
6554 case WM_T_82575:
6555 case WM_T_82576:
6556 reg = CSR_READ(sc, WMREG_MDIC);
6557 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
6558 break;
6559 case WM_T_82580:
6560 case WM_T_I350:
6561 case WM_T_I354:
6562 case WM_T_I210:
6563 case WM_T_I211:
6564 reg = CSR_READ(sc, WMREG_MDICNFG);
6565 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
6566 break;
6567 default:
6568 return -1;
6569 }
6570 }
6571
6572 return phyid;
6573 }
6574
6575
6576 /*
6577 * wm_gmii_mediainit:
6578 *
6579 * Initialize media for use on 1000BASE-T devices.
6580 */
6581 static void
6582 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6583 {
6584 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6585 struct mii_data *mii = &sc->sc_mii;
6586 uint32_t reg;
6587
6588 /* We have GMII. */
6589 sc->sc_flags |= WM_F_HAS_MII;
6590
6591 if (sc->sc_type == WM_T_80003)
6592 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6593 else
6594 sc->sc_tipg = TIPG_1000T_DFLT;
6595
6596 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
6597 if ((sc->sc_type == WM_T_82580)
6598 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
6599 || (sc->sc_type == WM_T_I211)) {
6600 reg = CSR_READ(sc, WMREG_PHPM);
6601 reg &= ~PHPM_GO_LINK_D;
6602 CSR_WRITE(sc, WMREG_PHPM, reg);
6603 }
6604
6605 /*
6606 * Let the chip set speed/duplex on its own based on
6607 * signals from the PHY.
6608 * XXXbouyer - I'm not sure this is right for the 80003,
6609 * the em driver only sets CTRL_SLU here - but it seems to work.
6610 */
6611 sc->sc_ctrl |= CTRL_SLU;
6612 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6613
6614 /* Initialize our media structures and probe the GMII. */
6615 mii->mii_ifp = ifp;
6616
6617 /*
6618 * Determine the PHY access method.
6619 *
6620 * For SGMII, use SGMII specific method.
6621 *
6622 * For some devices, we can determine the PHY access method
6623 * from sc_type.
6624 *
6625 * For ICH and PCH variants, it's difficult to determine the PHY
6626 * access method by sc_type, so use the PCI product ID for some
6627 * devices.
6628 * For other ICH8 variants, try to use igp's method. If the PHY
6629 * can't detect, then use bm's method.
6630 */
6631 switch (prodid) {
6632 case PCI_PRODUCT_INTEL_PCH_M_LM:
6633 case PCI_PRODUCT_INTEL_PCH_M_LC:
6634 /* 82577 */
6635 sc->sc_phytype = WMPHY_82577;
6636 break;
6637 case PCI_PRODUCT_INTEL_PCH_D_DM:
6638 case PCI_PRODUCT_INTEL_PCH_D_DC:
6639 /* 82578 */
6640 sc->sc_phytype = WMPHY_82578;
6641 break;
6642 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6643 case PCI_PRODUCT_INTEL_PCH2_LV_V:
6644 /* 82579 */
6645 sc->sc_phytype = WMPHY_82579;
6646 break;
6647 case PCI_PRODUCT_INTEL_82801I_BM:
6648 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6649 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6650 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6651 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6652 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6653 /* 82567 */
6654 sc->sc_phytype = WMPHY_BM;
6655 mii->mii_readreg = wm_gmii_bm_readreg;
6656 mii->mii_writereg = wm_gmii_bm_writereg;
6657 break;
6658 default:
6659 if (((sc->sc_flags & WM_F_SGMII) != 0)
6660 && !wm_sgmii_uses_mdio(sc)){
6661 /* SGMII */
6662 mii->mii_readreg = wm_sgmii_readreg;
6663 mii->mii_writereg = wm_sgmii_writereg;
6664 } else if (sc->sc_type >= WM_T_80003) {
6665 /* 80003 */
6666 mii->mii_readreg = wm_gmii_i80003_readreg;
6667 mii->mii_writereg = wm_gmii_i80003_writereg;
6668 } else if (sc->sc_type >= WM_T_I210) {
6669 /* I210 and I211 */
6670 mii->mii_readreg = wm_gmii_gs40g_readreg;
6671 mii->mii_writereg = wm_gmii_gs40g_writereg;
6672 } else if (sc->sc_type >= WM_T_82580) {
6673 /* 82580, I350 and I354 */
6674 sc->sc_phytype = WMPHY_82580;
6675 mii->mii_readreg = wm_gmii_82580_readreg;
6676 mii->mii_writereg = wm_gmii_82580_writereg;
6677 } else if (sc->sc_type >= WM_T_82544) {
6678 /* 82544, 0, [56], [17], 8257[1234] and 82583 */
6679 mii->mii_readreg = wm_gmii_i82544_readreg;
6680 mii->mii_writereg = wm_gmii_i82544_writereg;
6681 } else {
6682 mii->mii_readreg = wm_gmii_i82543_readreg;
6683 mii->mii_writereg = wm_gmii_i82543_writereg;
6684 }
6685 break;
6686 }
6687 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) {
6688 /* All PCH* use _hv_ */
6689 mii->mii_readreg = wm_gmii_hv_readreg;
6690 mii->mii_writereg = wm_gmii_hv_writereg;
6691 }
6692 mii->mii_statchg = wm_gmii_statchg;
6693
6694 wm_gmii_reset(sc);
6695
6696 sc->sc_ethercom.ec_mii = &sc->sc_mii;
6697 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6698 wm_gmii_mediastatus);
6699
6700 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6701 || (sc->sc_type == WM_T_82580)
6702 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
6703 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
6704 if ((sc->sc_flags & WM_F_SGMII) == 0) {
6705 /* Attach only one port */
6706 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6707 MII_OFFSET_ANY, MIIF_DOPAUSE);
6708 } else {
6709 int i, id;
6710 uint32_t ctrl_ext;
6711
6712 id = wm_get_phy_id_82575(sc);
6713 if (id != -1) {
6714 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6715 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
6716 }
6717 if ((id == -1)
6718 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
6719 /* Power on sgmii phy if it is disabled */
6720 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6721 CSR_WRITE(sc, WMREG_CTRL_EXT,
6722 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6723 CSR_WRITE_FLUSH(sc);
6724 delay(300*1000); /* XXX too long */
6725
6726 /* from 1 to 8 */
6727 for (i = 1; i < 8; i++)
6728 mii_attach(sc->sc_dev, &sc->sc_mii,
6729 0xffffffff, i, MII_OFFSET_ANY,
6730 MIIF_DOPAUSE);
6731
6732 /* restore previous sfp cage power state */
6733 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6734 }
6735 }
6736 } else {
6737 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6738 MII_OFFSET_ANY, MIIF_DOPAUSE);
6739 }
6740
6741 /*
6742 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
6743 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6744 */
6745 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
6746 (LIST_FIRST(&mii->mii_phys) == NULL)) {
6747 wm_set_mdio_slow_mode_hv(sc);
6748 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6749 MII_OFFSET_ANY, MIIF_DOPAUSE);
6750 }
6751
6752 /*
6753 * (For ICH8 variants)
6754 * If PHY detection failed, use BM's r/w function and retry.
6755 */
6756 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6757 /* if failed, retry with *_bm_* */
6758 mii->mii_readreg = wm_gmii_bm_readreg;
6759 mii->mii_writereg = wm_gmii_bm_writereg;
6760
6761 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6762 MII_OFFSET_ANY, MIIF_DOPAUSE);
6763 }
6764
6765 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6766 /* Any PHY wasn't find */
6767 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6768 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6769 sc->sc_phytype = WMPHY_NONE;
6770 } else {
6771 /*
6772 * PHY Found!
6773 * Check PHY type.
6774 */
6775 uint32_t model;
6776 struct mii_softc *child;
6777
6778 child = LIST_FIRST(&mii->mii_phys);
6779 if (device_is_a(child->mii_dev, "igphy")) {
6780 struct igphy_softc *isc = (struct igphy_softc *)child;
6781
6782 model = isc->sc_mii.mii_mpd_model;
6783 if (model == MII_MODEL_yyINTEL_I82566)
6784 sc->sc_phytype = WMPHY_IGP_3;
6785 }
6786
6787 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6788 }
6789 }
6790
6791 /*
6792 * wm_gmii_mediachange: [ifmedia interface function]
6793 *
6794 * Set hardware to newly-selected media on a 1000BASE-T device.
6795 */
6796 static int
6797 wm_gmii_mediachange(struct ifnet *ifp)
6798 {
6799 struct wm_softc *sc = ifp->if_softc;
6800 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6801 int rc;
6802
6803 if ((ifp->if_flags & IFF_UP) == 0)
6804 return 0;
6805
6806 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6807 sc->sc_ctrl |= CTRL_SLU;
6808 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6809 || (sc->sc_type > WM_T_82543)) {
6810 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6811 } else {
6812 sc->sc_ctrl &= ~CTRL_ASDE;
6813 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6814 if (ife->ifm_media & IFM_FDX)
6815 sc->sc_ctrl |= CTRL_FD;
6816 switch (IFM_SUBTYPE(ife->ifm_media)) {
6817 case IFM_10_T:
6818 sc->sc_ctrl |= CTRL_SPEED_10;
6819 break;
6820 case IFM_100_TX:
6821 sc->sc_ctrl |= CTRL_SPEED_100;
6822 break;
6823 case IFM_1000_T:
6824 sc->sc_ctrl |= CTRL_SPEED_1000;
6825 break;
6826 default:
6827 panic("wm_gmii_mediachange: bad media 0x%x",
6828 ife->ifm_media);
6829 }
6830 }
6831 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6832 if (sc->sc_type <= WM_T_82543)
6833 wm_gmii_reset(sc);
6834
6835 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6836 return 0;
6837 return rc;
6838 }
6839
6840 /*
6841 * wm_gmii_mediastatus: [ifmedia interface function]
6842 *
6843 * Get the current interface media status on a 1000BASE-T device.
6844 */
6845 static void
6846 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6847 {
6848 struct wm_softc *sc = ifp->if_softc;
6849
6850 ether_mediastatus(ifp, ifmr);
6851 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6852 | sc->sc_flowflags;
6853 }
6854
6855 #define MDI_IO CTRL_SWDPIN(2)
6856 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
6857 #define MDI_CLK CTRL_SWDPIN(3)
6858
6859 static void
6860 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6861 {
6862 uint32_t i, v;
6863
6864 v = CSR_READ(sc, WMREG_CTRL);
6865 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6866 v |= MDI_DIR | CTRL_SWDPIO(3);
6867
6868 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6869 if (data & i)
6870 v |= MDI_IO;
6871 else
6872 v &= ~MDI_IO;
6873 CSR_WRITE(sc, WMREG_CTRL, v);
6874 CSR_WRITE_FLUSH(sc);
6875 delay(10);
6876 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6877 CSR_WRITE_FLUSH(sc);
6878 delay(10);
6879 CSR_WRITE(sc, WMREG_CTRL, v);
6880 CSR_WRITE_FLUSH(sc);
6881 delay(10);
6882 }
6883 }
6884
6885 static uint32_t
6886 wm_i82543_mii_recvbits(struct wm_softc *sc)
6887 {
6888 uint32_t v, i, data = 0;
6889
6890 v = CSR_READ(sc, WMREG_CTRL);
6891 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6892 v |= CTRL_SWDPIO(3);
6893
6894 CSR_WRITE(sc, WMREG_CTRL, v);
6895 CSR_WRITE_FLUSH(sc);
6896 delay(10);
6897 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6898 CSR_WRITE_FLUSH(sc);
6899 delay(10);
6900 CSR_WRITE(sc, WMREG_CTRL, v);
6901 CSR_WRITE_FLUSH(sc);
6902 delay(10);
6903
6904 for (i = 0; i < 16; i++) {
6905 data <<= 1;
6906 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6907 CSR_WRITE_FLUSH(sc);
6908 delay(10);
6909 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6910 data |= 1;
6911 CSR_WRITE(sc, WMREG_CTRL, v);
6912 CSR_WRITE_FLUSH(sc);
6913 delay(10);
6914 }
6915
6916 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6917 CSR_WRITE_FLUSH(sc);
6918 delay(10);
6919 CSR_WRITE(sc, WMREG_CTRL, v);
6920 CSR_WRITE_FLUSH(sc);
6921 delay(10);
6922
6923 return data;
6924 }
6925
6926 #undef MDI_IO
6927 #undef MDI_DIR
6928 #undef MDI_CLK
6929
6930 /*
6931 * wm_gmii_i82543_readreg: [mii interface function]
6932 *
6933 * Read a PHY register on the GMII (i82543 version).
6934 */
6935 static int
6936 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6937 {
6938 struct wm_softc *sc = device_private(self);
6939 int rv;
6940
6941 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6942 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
6943 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6944 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
6945
6946 DPRINTF(WM_DEBUG_GMII,
6947 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6948 device_xname(sc->sc_dev), phy, reg, rv));
6949
6950 return rv;
6951 }
6952
6953 /*
6954 * wm_gmii_i82543_writereg: [mii interface function]
6955 *
6956 * Write a PHY register on the GMII (i82543 version).
6957 */
6958 static void
6959 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6960 {
6961 struct wm_softc *sc = device_private(self);
6962
6963 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6964 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6965 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6966 (MII_COMMAND_START << 30), 32);
6967 }
6968
6969 /*
6970 * wm_gmii_i82544_readreg: [mii interface function]
6971 *
6972 * Read a PHY register on the GMII.
6973 */
6974 static int
6975 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6976 {
6977 struct wm_softc *sc = device_private(self);
6978 uint32_t mdic = 0;
6979 int i, rv;
6980
6981 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6982 MDIC_REGADD(reg));
6983
6984 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6985 mdic = CSR_READ(sc, WMREG_MDIC);
6986 if (mdic & MDIC_READY)
6987 break;
6988 delay(50);
6989 }
6990
6991 if ((mdic & MDIC_READY) == 0) {
6992 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6993 device_xname(sc->sc_dev), phy, reg);
6994 rv = 0;
6995 } else if (mdic & MDIC_E) {
6996 #if 0 /* This is normal if no PHY is present. */
6997 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6998 device_xname(sc->sc_dev), phy, reg);
6999 #endif
7000 rv = 0;
7001 } else {
7002 rv = MDIC_DATA(mdic);
7003 if (rv == 0xffff)
7004 rv = 0;
7005 }
7006
7007 return rv;
7008 }
7009
7010 /*
7011 * wm_gmii_i82544_writereg: [mii interface function]
7012 *
7013 * Write a PHY register on the GMII.
7014 */
7015 static void
7016 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
7017 {
7018 struct wm_softc *sc = device_private(self);
7019 uint32_t mdic = 0;
7020 int i;
7021
7022 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
7023 MDIC_REGADD(reg) | MDIC_DATA(val));
7024
7025 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
7026 mdic = CSR_READ(sc, WMREG_MDIC);
7027 if (mdic & MDIC_READY)
7028 break;
7029 delay(50);
7030 }
7031
7032 if ((mdic & MDIC_READY) == 0)
7033 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
7034 device_xname(sc->sc_dev), phy, reg);
7035 else if (mdic & MDIC_E)
7036 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
7037 device_xname(sc->sc_dev), phy, reg);
7038 }
7039
7040 /*
7041 * wm_gmii_i80003_readreg: [mii interface function]
7042 *
7043 * Read a PHY register on the kumeran
7044 * This could be handled by the PHY layer if we didn't have to lock the
7045 * ressource ...
7046 */
7047 static int
7048 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
7049 {
7050 struct wm_softc *sc = device_private(self);
7051 int sem;
7052 int rv;
7053
7054 if (phy != 1) /* only one PHY on kumeran bus */
7055 return 0;
7056
7057 sem = swfwphysem[sc->sc_funcid];
7058 if (wm_get_swfw_semaphore(sc, sem)) {
7059 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7060 __func__);
7061 return 0;
7062 }
7063
7064 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
7065 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
7066 reg >> GG82563_PAGE_SHIFT);
7067 } else {
7068 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
7069 reg >> GG82563_PAGE_SHIFT);
7070 }
7071 /* Wait more 200us for a bug of the ready bit in the MDIC register */
7072 delay(200);
7073 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
7074 delay(200);
7075
7076 wm_put_swfw_semaphore(sc, sem);
7077 return rv;
7078 }
7079
7080 /*
7081 * wm_gmii_i80003_writereg: [mii interface function]
7082 *
7083 * Write a PHY register on the kumeran.
7084 * This could be handled by the PHY layer if we didn't have to lock the
7085 * ressource ...
7086 */
7087 static void
7088 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
7089 {
7090 struct wm_softc *sc = device_private(self);
7091 int sem;
7092
7093 if (phy != 1) /* only one PHY on kumeran bus */
7094 return;
7095
7096 sem = swfwphysem[sc->sc_funcid];
7097 if (wm_get_swfw_semaphore(sc, sem)) {
7098 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7099 __func__);
7100 return;
7101 }
7102
7103 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
7104 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
7105 reg >> GG82563_PAGE_SHIFT);
7106 } else {
7107 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
7108 reg >> GG82563_PAGE_SHIFT);
7109 }
7110 /* Wait more 200us for a bug of the ready bit in the MDIC register */
7111 delay(200);
7112 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7113 delay(200);
7114
7115 wm_put_swfw_semaphore(sc, sem);
7116 }
7117
7118 /*
7119 * wm_gmii_bm_readreg: [mii interface function]
7120 *
7121 * Read a PHY register on the kumeran
7122 * This could be handled by the PHY layer if we didn't have to lock the
7123 * ressource ...
7124 */
7125 static int
7126 wm_gmii_bm_readreg(device_t self, int phy, int reg)
7127 {
7128 struct wm_softc *sc = device_private(self);
7129 int sem;
7130 int rv;
7131
7132 sem = swfwphysem[sc->sc_funcid];
7133 if (wm_get_swfw_semaphore(sc, sem)) {
7134 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7135 __func__);
7136 return 0;
7137 }
7138
7139 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7140 if (phy == 1)
7141 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7142 reg);
7143 else
7144 wm_gmii_i82544_writereg(self, phy,
7145 GG82563_PHY_PAGE_SELECT,
7146 reg >> GG82563_PAGE_SHIFT);
7147 }
7148
7149 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
7150 wm_put_swfw_semaphore(sc, sem);
7151 return rv;
7152 }
7153
7154 /*
7155 * wm_gmii_bm_writereg: [mii interface function]
7156 *
7157 * Write a PHY register on the kumeran.
7158 * This could be handled by the PHY layer if we didn't have to lock the
7159 * ressource ...
7160 */
7161 static void
7162 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
7163 {
7164 struct wm_softc *sc = device_private(self);
7165 int sem;
7166
7167 sem = swfwphysem[sc->sc_funcid];
7168 if (wm_get_swfw_semaphore(sc, sem)) {
7169 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7170 __func__);
7171 return;
7172 }
7173
7174 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7175 if (phy == 1)
7176 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7177 reg);
7178 else
7179 wm_gmii_i82544_writereg(self, phy,
7180 GG82563_PHY_PAGE_SELECT,
7181 reg >> GG82563_PAGE_SHIFT);
7182 }
7183
7184 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7185 wm_put_swfw_semaphore(sc, sem);
7186 }
7187
7188 static void
7189 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
7190 {
7191 struct wm_softc *sc = device_private(self);
7192 uint16_t regnum = BM_PHY_REG_NUM(offset);
7193 uint16_t wuce;
7194
7195 /* XXX Gig must be disabled for MDIO accesses to page 800 */
7196 if (sc->sc_type == WM_T_PCH) {
7197 /* XXX e1000 driver do nothing... why? */
7198 }
7199
7200 /* Set page 769 */
7201 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7202 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7203
7204 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
7205
7206 wuce &= ~BM_WUC_HOST_WU_BIT;
7207 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
7208 wuce | BM_WUC_ENABLE_BIT);
7209
7210 /* Select page 800 */
7211 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7212 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
7213
7214 /* Write page 800 */
7215 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
7216
7217 if (rd)
7218 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
7219 else
7220 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
7221
7222 /* Set page 769 */
7223 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7224 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7225
7226 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
7227 }
7228
7229 /*
7230 * wm_gmii_hv_readreg: [mii interface function]
7231 *
7232 * Read a PHY register on the kumeran
7233 * This could be handled by the PHY layer if we didn't have to lock the
7234 * ressource ...
7235 */
7236 static int
7237 wm_gmii_hv_readreg(device_t self, int phy, int reg)
7238 {
7239 struct wm_softc *sc = device_private(self);
7240 uint16_t page = BM_PHY_REG_PAGE(reg);
7241 uint16_t regnum = BM_PHY_REG_NUM(reg);
7242 uint16_t val;
7243 int rv;
7244
7245 if (wm_get_swfwhw_semaphore(sc)) {
7246 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7247 __func__);
7248 return 0;
7249 }
7250
7251 /* XXX Workaround failure in MDIO access while cable is disconnected */
7252 if (sc->sc_phytype == WMPHY_82577) {
7253 /* XXX must write */
7254 }
7255
7256 /* Page 800 works differently than the rest so it has its own func */
7257 if (page == BM_WUC_PAGE) {
7258 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
7259 return val;
7260 }
7261
7262 /*
7263 * Lower than page 768 works differently than the rest so it has its
7264 * own func
7265 */
7266 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7267 printf("gmii_hv_readreg!!!\n");
7268 return 0;
7269 }
7270
7271 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7272 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7273 page << BME1000_PAGE_SHIFT);
7274 }
7275
7276 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
7277 wm_put_swfwhw_semaphore(sc);
7278 return rv;
7279 }
7280
7281 /*
7282 * wm_gmii_hv_writereg: [mii interface function]
7283 *
7284 * Write a PHY register on the kumeran.
7285 * This could be handled by the PHY layer if we didn't have to lock the
7286 * ressource ...
7287 */
7288 static void
7289 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
7290 {
7291 struct wm_softc *sc = device_private(self);
7292 uint16_t page = BM_PHY_REG_PAGE(reg);
7293 uint16_t regnum = BM_PHY_REG_NUM(reg);
7294
7295 if (wm_get_swfwhw_semaphore(sc)) {
7296 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7297 __func__);
7298 return;
7299 }
7300
7301 /* XXX Workaround failure in MDIO access while cable is disconnected */
7302
7303 /* Page 800 works differently than the rest so it has its own func */
7304 if (page == BM_WUC_PAGE) {
7305 uint16_t tmp;
7306
7307 tmp = val;
7308 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
7309 return;
7310 }
7311
7312 /*
7313 * Lower than page 768 works differently than the rest so it has its
7314 * own func
7315 */
7316 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7317 printf("gmii_hv_writereg!!!\n");
7318 return;
7319 }
7320
7321 /*
7322 * XXX Workaround MDIO accesses being disabled after entering IEEE
7323 * Power Down (whenever bit 11 of the PHY control register is set)
7324 */
7325
7326 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7327 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7328 page << BME1000_PAGE_SHIFT);
7329 }
7330
7331 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
7332 wm_put_swfwhw_semaphore(sc);
7333 }
7334
7335 /*
7336 * wm_gmii_82580_readreg: [mii interface function]
7337 *
7338 * Read a PHY register on the 82580 and I350.
7339 * This could be handled by the PHY layer if we didn't have to lock the
7340 * ressource ...
7341 */
7342 static int
7343 wm_gmii_82580_readreg(device_t self, int phy, int reg)
7344 {
7345 struct wm_softc *sc = device_private(self);
7346 int sem;
7347 int rv;
7348
7349 sem = swfwphysem[sc->sc_funcid];
7350 if (wm_get_swfw_semaphore(sc, sem)) {
7351 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7352 __func__);
7353 return 0;
7354 }
7355
7356 rv = wm_gmii_i82544_readreg(self, phy, reg);
7357
7358 wm_put_swfw_semaphore(sc, sem);
7359 return rv;
7360 }
7361
7362 /*
7363 * wm_gmii_82580_writereg: [mii interface function]
7364 *
7365 * Write a PHY register on the 82580 and I350.
7366 * This could be handled by the PHY layer if we didn't have to lock the
7367 * ressource ...
7368 */
7369 static void
7370 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
7371 {
7372 struct wm_softc *sc = device_private(self);
7373 int sem;
7374
7375 sem = swfwphysem[sc->sc_funcid];
7376 if (wm_get_swfw_semaphore(sc, sem)) {
7377 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7378 __func__);
7379 return;
7380 }
7381
7382 wm_gmii_i82544_writereg(self, phy, reg, val);
7383
7384 wm_put_swfw_semaphore(sc, sem);
7385 }
7386
7387 /*
7388 * wm_gmii_gs40g_readreg: [mii interface function]
7389 *
7390 * Read a PHY register on the I2100 and I211.
7391 * This could be handled by the PHY layer if we didn't have to lock the
7392 * ressource ...
7393 */
7394 static int
7395 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
7396 {
7397 struct wm_softc *sc = device_private(self);
7398 int sem;
7399 int page, offset;
7400 int rv;
7401
7402 /* Acquire semaphore */
7403 sem = swfwphysem[sc->sc_funcid];
7404 if (wm_get_swfw_semaphore(sc, sem)) {
7405 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7406 __func__);
7407 return 0;
7408 }
7409
7410 /* Page select */
7411 page = reg >> GS40G_PAGE_SHIFT;
7412 wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
7413
7414 /* Read reg */
7415 offset = reg & GS40G_OFFSET_MASK;
7416 rv = wm_gmii_i82544_readreg(self, phy, offset);
7417
7418 wm_put_swfw_semaphore(sc, sem);
7419 return rv;
7420 }
7421
7422 /*
7423 * wm_gmii_gs40g_writereg: [mii interface function]
7424 *
7425 * Write a PHY register on the I210 and I211.
7426 * This could be handled by the PHY layer if we didn't have to lock the
7427 * ressource ...
7428 */
7429 static void
7430 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
7431 {
7432 struct wm_softc *sc = device_private(self);
7433 int sem;
7434 int page, offset;
7435
7436 /* Acquire semaphore */
7437 sem = swfwphysem[sc->sc_funcid];
7438 if (wm_get_swfw_semaphore(sc, sem)) {
7439 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7440 __func__);
7441 return;
7442 }
7443
7444 /* Page select */
7445 page = reg >> GS40G_PAGE_SHIFT;
7446 wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
7447
7448 /* Write reg */
7449 offset = reg & GS40G_OFFSET_MASK;
7450 wm_gmii_i82544_writereg(self, phy, offset, val);
7451
7452 /* Release semaphore */
7453 wm_put_swfw_semaphore(sc, sem);
7454 }
7455
7456 /*
7457 * wm_gmii_statchg: [mii interface function]
7458 *
7459 * Callback from MII layer when media changes.
7460 */
7461 static void
7462 wm_gmii_statchg(struct ifnet *ifp)
7463 {
7464 struct wm_softc *sc = ifp->if_softc;
7465 struct mii_data *mii = &sc->sc_mii;
7466
7467 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7468 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7469 sc->sc_fcrtl &= ~FCRTL_XONE;
7470
7471 /*
7472 * Get flow control negotiation result.
7473 */
7474 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7475 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7476 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7477 mii->mii_media_active &= ~IFM_ETH_FMASK;
7478 }
7479
7480 if (sc->sc_flowflags & IFM_FLOW) {
7481 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7482 sc->sc_ctrl |= CTRL_TFCE;
7483 sc->sc_fcrtl |= FCRTL_XONE;
7484 }
7485 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7486 sc->sc_ctrl |= CTRL_RFCE;
7487 }
7488
7489 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7490 DPRINTF(WM_DEBUG_LINK,
7491 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7492 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7493 } else {
7494 DPRINTF(WM_DEBUG_LINK,
7495 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7496 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7497 }
7498
7499 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7500 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7501 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7502 : WMREG_FCRTL, sc->sc_fcrtl);
7503 if (sc->sc_type == WM_T_80003) {
7504 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7505 case IFM_1000_T:
7506 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7507 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7508 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7509 break;
7510 default:
7511 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7512 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7513 sc->sc_tipg = TIPG_10_100_80003_DFLT;
7514 break;
7515 }
7516 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7517 }
7518 }
7519
7520 /*
7521 * wm_kmrn_readreg:
7522 *
7523 * Read a kumeran register
7524 */
7525 static int
7526 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7527 {
7528 int rv;
7529
7530 if (sc->sc_flags & WM_F_LOCK_SWFW) {
7531 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7532 aprint_error_dev(sc->sc_dev,
7533 "%s: failed to get semaphore\n", __func__);
7534 return 0;
7535 }
7536 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
7537 if (wm_get_swfwhw_semaphore(sc)) {
7538 aprint_error_dev(sc->sc_dev,
7539 "%s: failed to get semaphore\n", __func__);
7540 return 0;
7541 }
7542 }
7543
7544 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7545 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7546 KUMCTRLSTA_REN);
7547 CSR_WRITE_FLUSH(sc);
7548 delay(2);
7549
7550 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7551
7552 if (sc->sc_flags & WM_F_LOCK_SWFW)
7553 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7554 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
7555 wm_put_swfwhw_semaphore(sc);
7556
7557 return rv;
7558 }
7559
7560 /*
7561 * wm_kmrn_writereg:
7562 *
7563 * Write a kumeran register
7564 */
7565 static void
7566 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7567 {
7568
7569 if (sc->sc_flags & WM_F_LOCK_SWFW) {
7570 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7571 aprint_error_dev(sc->sc_dev,
7572 "%s: failed to get semaphore\n", __func__);
7573 return;
7574 }
7575 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
7576 if (wm_get_swfwhw_semaphore(sc)) {
7577 aprint_error_dev(sc->sc_dev,
7578 "%s: failed to get semaphore\n", __func__);
7579 return;
7580 }
7581 }
7582
7583 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7584 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7585 (val & KUMCTRLSTA_MASK));
7586
7587 if (sc->sc_flags & WM_F_LOCK_SWFW)
7588 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7589 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
7590 wm_put_swfwhw_semaphore(sc);
7591 }
7592
7593 /* SGMII related */
7594
7595 /*
7596 * wm_sgmii_uses_mdio
7597 *
7598 * Check whether the transaction is to the internal PHY or the external
7599 * MDIO interface. Return true if it's MDIO.
7600 */
7601 static bool
7602 wm_sgmii_uses_mdio(struct wm_softc *sc)
7603 {
7604 uint32_t reg;
7605 bool ismdio = false;
7606
7607 switch (sc->sc_type) {
7608 case WM_T_82575:
7609 case WM_T_82576:
7610 reg = CSR_READ(sc, WMREG_MDIC);
7611 ismdio = ((reg & MDIC_DEST) != 0);
7612 break;
7613 case WM_T_82580:
7614 case WM_T_I350:
7615 case WM_T_I354:
7616 case WM_T_I210:
7617 case WM_T_I211:
7618 reg = CSR_READ(sc, WMREG_MDICNFG);
7619 ismdio = ((reg & MDICNFG_DEST) != 0);
7620 break;
7621 default:
7622 break;
7623 }
7624
7625 return ismdio;
7626 }
7627
7628 /*
7629 * wm_sgmii_readreg: [mii interface function]
7630 *
7631 * Read a PHY register on the SGMII
7632 * This could be handled by the PHY layer if we didn't have to lock the
7633 * ressource ...
7634 */
7635 static int
7636 wm_sgmii_readreg(device_t self, int phy, int reg)
7637 {
7638 struct wm_softc *sc = device_private(self);
7639 uint32_t i2ccmd;
7640 int i, rv;
7641
7642 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7643 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7644 __func__);
7645 return 0;
7646 }
7647
7648 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7649 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7650 | I2CCMD_OPCODE_READ;
7651 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7652
7653 /* Poll the ready bit */
7654 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7655 delay(50);
7656 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7657 if (i2ccmd & I2CCMD_READY)
7658 break;
7659 }
7660 if ((i2ccmd & I2CCMD_READY) == 0)
7661 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7662 if ((i2ccmd & I2CCMD_ERROR) != 0)
7663 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7664
7665 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7666
7667 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7668 return rv;
7669 }
7670
7671 /*
7672 * wm_sgmii_writereg: [mii interface function]
7673 *
7674 * Write a PHY register on the SGMII.
7675 * This could be handled by the PHY layer if we didn't have to lock the
7676 * ressource ...
7677 */
7678 static void
7679 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7680 {
7681 struct wm_softc *sc = device_private(self);
7682 uint32_t i2ccmd;
7683 int i;
7684 int val_swapped;
7685
7686 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7687 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7688 __func__);
7689 return;
7690 }
7691 /* Swap the data bytes for the I2C interface */
7692 val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
7693 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7694 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7695 | I2CCMD_OPCODE_WRITE | val_swapped;
7696 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7697
7698 /* Poll the ready bit */
7699 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7700 delay(50);
7701 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7702 if (i2ccmd & I2CCMD_READY)
7703 break;
7704 }
7705 if ((i2ccmd & I2CCMD_READY) == 0)
7706 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7707 if ((i2ccmd & I2CCMD_ERROR) != 0)
7708 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7709
7710 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7711 }
7712
7713 /* TBI related */
7714
7715 /*
7716 * wm_tbi_mediainit:
7717 *
7718 * Initialize media for use on 1000BASE-X devices.
7719 */
7720 static void
7721 wm_tbi_mediainit(struct wm_softc *sc)
7722 {
7723 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7724 const char *sep = "";
7725
7726 if (sc->sc_type < WM_T_82543)
7727 sc->sc_tipg = TIPG_WM_DFLT;
7728 else
7729 sc->sc_tipg = TIPG_LG_DFLT;
7730
7731 sc->sc_tbi_serdes_anegticks = 5;
7732
7733 /* Initialize our media structures */
7734 sc->sc_mii.mii_ifp = ifp;
7735 sc->sc_ethercom.ec_mii = &sc->sc_mii;
7736
7737 if ((sc->sc_type >= WM_T_82575)
7738 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
7739 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
7740 wm_serdes_mediachange, wm_serdes_mediastatus);
7741 else
7742 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
7743 wm_tbi_mediachange, wm_tbi_mediastatus);
7744
7745 /*
7746 * SWD Pins:
7747 *
7748 * 0 = Link LED (output)
7749 * 1 = Loss Of Signal (input)
7750 */
7751 sc->sc_ctrl |= CTRL_SWDPIO(0);
7752
7753 /* XXX Perhaps this is only for TBI */
7754 if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
7755 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
7756
7757 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7758 sc->sc_ctrl &= ~CTRL_LRST;
7759
7760 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7761
7762 #define ADD(ss, mm, dd) \
7763 do { \
7764 aprint_normal("%s%s", sep, ss); \
7765 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
7766 sep = ", "; \
7767 } while (/*CONSTCOND*/0)
7768
7769 aprint_normal_dev(sc->sc_dev, "");
7770
7771 /* Only 82545 is LX */
7772 if (sc->sc_type == WM_T_82545) {
7773 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
7774 ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
7775 } else {
7776 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
7777 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
7778 }
7779 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
7780 aprint_normal("\n");
7781
7782 #undef ADD
7783
7784 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
7785 }
7786
7787 /*
7788 * wm_tbi_mediachange: [ifmedia interface function]
7789 *
7790 * Set hardware to newly-selected media on a 1000BASE-X device.
7791 */
7792 static int
7793 wm_tbi_mediachange(struct ifnet *ifp)
7794 {
7795 struct wm_softc *sc = ifp->if_softc;
7796 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7797 uint32_t status;
7798 int i;
7799
7800 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
7801 /* XXX need some work for >= 82571 and < 82575 */
7802 if (sc->sc_type < WM_T_82575)
7803 return 0;
7804 }
7805
7806 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
7807 || (sc->sc_type >= WM_T_82575))
7808 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
7809
7810 sc->sc_ctrl &= ~CTRL_LRST;
7811 sc->sc_txcw = TXCW_ANE;
7812 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
7813 sc->sc_txcw |= TXCW_FD | TXCW_HD;
7814 else if (ife->ifm_media & IFM_FDX)
7815 sc->sc_txcw |= TXCW_FD;
7816 else
7817 sc->sc_txcw |= TXCW_HD;
7818
7819 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
7820 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
7821
7822 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
7823 device_xname(sc->sc_dev), sc->sc_txcw));
7824 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7825 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7826 CSR_WRITE_FLUSH(sc);
7827 delay(1000);
7828
7829 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
7830 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
7831
7832 /*
7833 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
7834 * optics detect a signal, 0 if they don't.
7835 */
7836 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
7837 /* Have signal; wait for the link to come up. */
7838 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
7839 delay(10000);
7840 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
7841 break;
7842 }
7843
7844 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
7845 device_xname(sc->sc_dev),i));
7846
7847 status = CSR_READ(sc, WMREG_STATUS);
7848 DPRINTF(WM_DEBUG_LINK,
7849 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
7850 device_xname(sc->sc_dev),status, STATUS_LU));
7851 if (status & STATUS_LU) {
7852 /* Link is up. */
7853 DPRINTF(WM_DEBUG_LINK,
7854 ("%s: LINK: set media -> link up %s\n",
7855 device_xname(sc->sc_dev),
7856 (status & STATUS_FD) ? "FDX" : "HDX"));
7857
7858 /*
7859 * NOTE: CTRL will update TFCE and RFCE automatically,
7860 * so we should update sc->sc_ctrl
7861 */
7862 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7863 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7864 sc->sc_fcrtl &= ~FCRTL_XONE;
7865 if (status & STATUS_FD)
7866 sc->sc_tctl |=
7867 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7868 else
7869 sc->sc_tctl |=
7870 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7871 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
7872 sc->sc_fcrtl |= FCRTL_XONE;
7873 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7874 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7875 WMREG_OLD_FCRTL : WMREG_FCRTL,
7876 sc->sc_fcrtl);
7877 sc->sc_tbi_linkup = 1;
7878 } else {
7879 if (i == WM_LINKUP_TIMEOUT)
7880 wm_check_for_link(sc);
7881 /* Link is down. */
7882 DPRINTF(WM_DEBUG_LINK,
7883 ("%s: LINK: set media -> link down\n",
7884 device_xname(sc->sc_dev)));
7885 sc->sc_tbi_linkup = 0;
7886 }
7887 } else {
7888 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
7889 device_xname(sc->sc_dev)));
7890 sc->sc_tbi_linkup = 0;
7891 }
7892
7893 wm_tbi_serdes_set_linkled(sc);
7894
7895 return 0;
7896 }
7897
7898 /*
7899 * wm_tbi_mediastatus: [ifmedia interface function]
7900 *
7901 * Get the current interface media status on a 1000BASE-X device.
7902 */
7903 static void
7904 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
7905 {
7906 struct wm_softc *sc = ifp->if_softc;
7907 uint32_t ctrl, status;
7908
7909 ifmr->ifm_status = IFM_AVALID;
7910 ifmr->ifm_active = IFM_ETHER;
7911
7912 status = CSR_READ(sc, WMREG_STATUS);
7913 if ((status & STATUS_LU) == 0) {
7914 ifmr->ifm_active |= IFM_NONE;
7915 return;
7916 }
7917
7918 ifmr->ifm_status |= IFM_ACTIVE;
7919 /* Only 82545 is LX */
7920 if (sc->sc_type == WM_T_82545)
7921 ifmr->ifm_active |= IFM_1000_LX;
7922 else
7923 ifmr->ifm_active |= IFM_1000_SX;
7924 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
7925 ifmr->ifm_active |= IFM_FDX;
7926 else
7927 ifmr->ifm_active |= IFM_HDX;
7928 ctrl = CSR_READ(sc, WMREG_CTRL);
7929 if (ctrl & CTRL_RFCE)
7930 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
7931 if (ctrl & CTRL_TFCE)
7932 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
7933 }
7934
7935 /* XXX TBI only */
7936 static int
7937 wm_check_for_link(struct wm_softc *sc)
7938 {
7939 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7940 uint32_t rxcw;
7941 uint32_t ctrl;
7942 uint32_t status;
7943 uint32_t sig;
7944
7945 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
7946 /* XXX need some work for >= 82571 */
7947 if (sc->sc_type >= WM_T_82571) {
7948 sc->sc_tbi_linkup = 1;
7949 return 0;
7950 }
7951 }
7952
7953 rxcw = CSR_READ(sc, WMREG_RXCW);
7954 ctrl = CSR_READ(sc, WMREG_CTRL);
7955 status = CSR_READ(sc, WMREG_STATUS);
7956
7957 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7958
7959 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7960 device_xname(sc->sc_dev), __func__,
7961 ((ctrl & CTRL_SWDPIN(1)) == sig),
7962 ((status & STATUS_LU) != 0),
7963 ((rxcw & RXCW_C) != 0)
7964 ));
7965
7966 /*
7967 * SWDPIN LU RXCW
7968 * 0 0 0
7969 * 0 0 1 (should not happen)
7970 * 0 1 0 (should not happen)
7971 * 0 1 1 (should not happen)
7972 * 1 0 0 Disable autonego and force linkup
7973 * 1 0 1 got /C/ but not linkup yet
7974 * 1 1 0 (linkup)
7975 * 1 1 1 If IFM_AUTO, back to autonego
7976 *
7977 */
7978 if (((ctrl & CTRL_SWDPIN(1)) == sig)
7979 && ((status & STATUS_LU) == 0)
7980 && ((rxcw & RXCW_C) == 0)) {
7981 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7982 __func__));
7983 sc->sc_tbi_linkup = 0;
7984 /* Disable auto-negotiation in the TXCW register */
7985 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7986
7987 /*
7988 * Force link-up and also force full-duplex.
7989 *
7990 * NOTE: CTRL was updated TFCE and RFCE automatically,
7991 * so we should update sc->sc_ctrl
7992 */
7993 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7994 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7995 } else if (((status & STATUS_LU) != 0)
7996 && ((rxcw & RXCW_C) != 0)
7997 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7998 sc->sc_tbi_linkup = 1;
7999 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
8000 __func__));
8001 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8002 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
8003 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
8004 && ((rxcw & RXCW_C) != 0)) {
8005 DPRINTF(WM_DEBUG_LINK, ("/C/"));
8006 } else {
8007 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
8008 status));
8009 }
8010
8011 return 0;
8012 }
8013
8014 /*
8015 * wm_tbi_tick:
8016 *
8017 * Check the link on TBI devices.
8018 * This function acts as mii_tick().
8019 */
8020 static void
8021 wm_tbi_tick(struct wm_softc *sc)
8022 {
8023 struct mii_data *mii = &sc->sc_mii;
8024 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
8025 uint32_t status;
8026
8027 KASSERT(WM_TX_LOCKED(sc));
8028
8029 status = CSR_READ(sc, WMREG_STATUS);
8030
8031 /* XXX is this needed? */
8032 (void)CSR_READ(sc, WMREG_RXCW);
8033 (void)CSR_READ(sc, WMREG_CTRL);
8034
8035 /* set link status */
8036 if ((status & STATUS_LU) == 0) {
8037 DPRINTF(WM_DEBUG_LINK,
8038 ("%s: LINK: checklink -> down\n",
8039 device_xname(sc->sc_dev)));
8040 sc->sc_tbi_linkup = 0;
8041 } else if (sc->sc_tbi_linkup == 0) {
8042 DPRINTF(WM_DEBUG_LINK,
8043 ("%s: LINK: checklink -> up %s\n",
8044 device_xname(sc->sc_dev),
8045 (status & STATUS_FD) ? "FDX" : "HDX"));
8046 sc->sc_tbi_linkup = 1;
8047 sc->sc_tbi_serdes_ticks = 0;
8048 }
8049
8050 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
8051 goto setled;
8052
8053 if ((status & STATUS_LU) == 0) {
8054 sc->sc_tbi_linkup = 0;
8055 /* If the timer expired, retry autonegotiation */
8056 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8057 && (++sc->sc_tbi_serdes_ticks
8058 >= sc->sc_tbi_serdes_anegticks)) {
8059 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
8060 sc->sc_tbi_serdes_ticks = 0;
8061 /*
8062 * Reset the link, and let autonegotiation do
8063 * its thing
8064 */
8065 sc->sc_ctrl |= CTRL_LRST;
8066 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8067 CSR_WRITE_FLUSH(sc);
8068 delay(1000);
8069 sc->sc_ctrl &= ~CTRL_LRST;
8070 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8071 CSR_WRITE_FLUSH(sc);
8072 delay(1000);
8073 CSR_WRITE(sc, WMREG_TXCW,
8074 sc->sc_txcw & ~TXCW_ANE);
8075 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8076 }
8077 }
8078
8079 setled:
8080 wm_tbi_serdes_set_linkled(sc);
8081 }
8082
8083 /* SERDES related */
8084 static void
8085 wm_serdes_power_up_link_82575(struct wm_softc *sc)
8086 {
8087 uint32_t reg;
8088
8089 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
8090 && ((sc->sc_flags & WM_F_SGMII) == 0))
8091 return;
8092
8093 reg = CSR_READ(sc, WMREG_PCS_CFG);
8094 reg |= PCS_CFG_PCS_EN;
8095 CSR_WRITE(sc, WMREG_PCS_CFG, reg);
8096
8097 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8098 reg &= ~CTRL_EXT_SWDPIN(3);
8099 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8100 CSR_WRITE_FLUSH(sc);
8101 }
8102
8103 static int
8104 wm_serdes_mediachange(struct ifnet *ifp)
8105 {
8106 struct wm_softc *sc = ifp->if_softc;
8107 bool pcs_autoneg = true; /* XXX */
8108 uint32_t ctrl_ext, pcs_lctl, reg;
8109
8110 /* XXX Currently, this function is not called on 8257[12] */
8111 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
8112 || (sc->sc_type >= WM_T_82575))
8113 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
8114
8115 wm_serdes_power_up_link_82575(sc);
8116
8117 sc->sc_ctrl |= CTRL_SLU;
8118
8119 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
8120 sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
8121
8122 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8123 pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
8124 switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
8125 case CTRL_EXT_LINK_MODE_SGMII:
8126 pcs_autoneg = true;
8127 pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
8128 break;
8129 case CTRL_EXT_LINK_MODE_1000KX:
8130 pcs_autoneg = false;
8131 /* FALLTHROUGH */
8132 default:
8133 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)){
8134 if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
8135 pcs_autoneg = false;
8136 }
8137 sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
8138 | CTRL_FRCFDX;
8139 pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
8140 }
8141 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8142
8143 if (pcs_autoneg) {
8144 pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
8145 pcs_lctl &= ~PCS_LCTL_FORCE_FC;
8146
8147 reg = CSR_READ(sc, WMREG_PCS_ANADV);
8148 reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
8149 reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
8150 CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
8151 } else
8152 pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
8153
8154 CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
8155
8156
8157 return 0;
8158 }
8159
8160 static void
8161 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8162 {
8163 struct wm_softc *sc = ifp->if_softc;
8164 struct mii_data *mii = &sc->sc_mii;
8165 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8166 uint32_t pcs_adv, pcs_lpab, reg;
8167
8168 ifmr->ifm_status = IFM_AVALID;
8169 ifmr->ifm_active = IFM_ETHER;
8170
8171 /* Check PCS */
8172 reg = CSR_READ(sc, WMREG_PCS_LSTS);
8173 if ((reg & PCS_LSTS_LINKOK) == 0) {
8174 ifmr->ifm_active |= IFM_NONE;
8175 sc->sc_tbi_linkup = 0;
8176 goto setled;
8177 }
8178
8179 sc->sc_tbi_linkup = 1;
8180 ifmr->ifm_status |= IFM_ACTIVE;
8181 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
8182 if ((reg & PCS_LSTS_FDX) != 0)
8183 ifmr->ifm_active |= IFM_FDX;
8184 else
8185 ifmr->ifm_active |= IFM_HDX;
8186 mii->mii_media_active &= ~IFM_ETH_FMASK;
8187 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
8188 /* Check flow */
8189 reg = CSR_READ(sc, WMREG_PCS_LSTS);
8190 if ((reg & PCS_LSTS_AN_COMP) == 0) {
8191 printf("XXX LINKOK but not ACOMP\n");
8192 goto setled;
8193 }
8194 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
8195 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
8196 printf("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab);
8197 if ((pcs_adv & TXCW_SYM_PAUSE)
8198 && (pcs_lpab & TXCW_SYM_PAUSE)) {
8199 mii->mii_media_active |= IFM_FLOW
8200 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
8201 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
8202 && (pcs_adv & TXCW_ASYM_PAUSE)
8203 && (pcs_lpab & TXCW_SYM_PAUSE)
8204 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
8205 mii->mii_media_active |= IFM_FLOW
8206 | IFM_ETH_TXPAUSE;
8207 } else if ((pcs_adv & TXCW_SYM_PAUSE)
8208 && (pcs_adv & TXCW_ASYM_PAUSE)
8209 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
8210 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
8211 mii->mii_media_active |= IFM_FLOW
8212 | IFM_ETH_RXPAUSE;
8213 } else {
8214 }
8215 }
8216 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
8217 | (mii->mii_media_active & IFM_ETH_FMASK);
8218 setled:
8219 wm_tbi_serdes_set_linkled(sc);
8220 }
8221
8222 /*
8223 * wm_serdes_tick:
8224 *
8225 * Check the link on serdes devices.
8226 */
8227 static void
8228 wm_serdes_tick(struct wm_softc *sc)
8229 {
8230 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8231 struct mii_data *mii = &sc->sc_mii;
8232 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
8233 uint32_t reg;
8234
8235 KASSERT(WM_TX_LOCKED(sc));
8236
8237 mii->mii_media_status = IFM_AVALID;
8238 mii->mii_media_active = IFM_ETHER;
8239
8240 /* Check PCS */
8241 reg = CSR_READ(sc, WMREG_PCS_LSTS);
8242 if ((reg & PCS_LSTS_LINKOK) != 0) {
8243 mii->mii_media_status |= IFM_ACTIVE;
8244 sc->sc_tbi_linkup = 1;
8245 sc->sc_tbi_serdes_ticks = 0;
8246 mii->mii_media_active |= IFM_1000_SX; /* XXX */
8247 if ((reg & PCS_LSTS_FDX) != 0)
8248 mii->mii_media_active |= IFM_FDX;
8249 else
8250 mii->mii_media_active |= IFM_HDX;
8251 } else {
8252 mii->mii_media_status |= IFM_NONE;
8253 sc->sc_tbi_linkup = 0;
8254 /* If the timer expired, retry autonegotiation */
8255 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8256 && (++sc->sc_tbi_serdes_ticks
8257 >= sc->sc_tbi_serdes_anegticks)) {
8258 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
8259 sc->sc_tbi_serdes_ticks = 0;
8260 /* XXX */
8261 wm_serdes_mediachange(ifp);
8262 }
8263 }
8264
8265 wm_tbi_serdes_set_linkled(sc);
8266 }
8267
8268 /* SFP related */
8269
8270 static int
8271 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
8272 {
8273 uint32_t i2ccmd;
8274 int i;
8275
8276 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
8277 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
8278
8279 /* Poll the ready bit */
8280 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
8281 delay(50);
8282 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
8283 if (i2ccmd & I2CCMD_READY)
8284 break;
8285 }
8286 if ((i2ccmd & I2CCMD_READY) == 0)
8287 return -1;
8288 if ((i2ccmd & I2CCMD_ERROR) != 0)
8289 return -1;
8290
8291 *data = i2ccmd & 0x00ff;
8292
8293 return 0;
8294 }
8295
8296 static uint32_t
8297 wm_sfp_get_media_type(struct wm_softc *sc)
8298 {
8299 uint32_t ctrl_ext;
8300 uint8_t val = 0;
8301 int timeout = 3;
8302 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
8303 int rv = -1;
8304
8305 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8306 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
8307 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
8308 CSR_WRITE_FLUSH(sc);
8309
8310 /* Read SFP module data */
8311 while (timeout) {
8312 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
8313 if (rv == 0)
8314 break;
8315 delay(100*1000); /* XXX too big */
8316 timeout--;
8317 }
8318 if (rv != 0)
8319 goto out;
8320 switch (val) {
8321 case SFF_SFP_ID_SFF:
8322 aprint_normal_dev(sc->sc_dev,
8323 "Module/Connector soldered to board\n");
8324 break;
8325 case SFF_SFP_ID_SFP:
8326 aprint_normal_dev(sc->sc_dev, "SFP\n");
8327 break;
8328 case SFF_SFP_ID_UNKNOWN:
8329 goto out;
8330 default:
8331 break;
8332 }
8333
8334 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
8335 if (rv != 0) {
8336 goto out;
8337 }
8338
8339 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
8340 mediatype = WM_MEDIATYPE_SERDES;
8341 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
8342 sc->sc_flags |= WM_F_SGMII;
8343 mediatype = WM_MEDIATYPE_COPPER;
8344 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
8345 sc->sc_flags |= WM_F_SGMII;
8346 mediatype = WM_MEDIATYPE_SERDES;
8347 }
8348
8349 out:
8350 /* Restore I2C interface setting */
8351 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8352
8353 return mediatype;
8354 }
8355 /*
8356 * NVM related.
8357 * Microwire, SPI (w/wo EERD) and Flash.
8358 */
8359
8360 /* Both spi and uwire */
8361
8362 /*
8363 * wm_eeprom_sendbits:
8364 *
8365 * Send a series of bits to the EEPROM.
8366 */
8367 static void
8368 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
8369 {
8370 uint32_t reg;
8371 int x;
8372
8373 reg = CSR_READ(sc, WMREG_EECD);
8374
8375 for (x = nbits; x > 0; x--) {
8376 if (bits & (1U << (x - 1)))
8377 reg |= EECD_DI;
8378 else
8379 reg &= ~EECD_DI;
8380 CSR_WRITE(sc, WMREG_EECD, reg);
8381 CSR_WRITE_FLUSH(sc);
8382 delay(2);
8383 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
8384 CSR_WRITE_FLUSH(sc);
8385 delay(2);
8386 CSR_WRITE(sc, WMREG_EECD, reg);
8387 CSR_WRITE_FLUSH(sc);
8388 delay(2);
8389 }
8390 }
8391
8392 /*
8393 * wm_eeprom_recvbits:
8394 *
8395 * Receive a series of bits from the EEPROM.
8396 */
8397 static void
8398 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
8399 {
8400 uint32_t reg, val;
8401 int x;
8402
8403 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
8404
8405 val = 0;
8406 for (x = nbits; x > 0; x--) {
8407 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
8408 CSR_WRITE_FLUSH(sc);
8409 delay(2);
8410 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
8411 val |= (1U << (x - 1));
8412 CSR_WRITE(sc, WMREG_EECD, reg);
8413 CSR_WRITE_FLUSH(sc);
8414 delay(2);
8415 }
8416 *valp = val;
8417 }
8418
8419 /* Microwire */
8420
8421 /*
8422 * wm_nvm_read_uwire:
8423 *
8424 * Read a word from the EEPROM using the MicroWire protocol.
8425 */
8426 static int
8427 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8428 {
8429 uint32_t reg, val;
8430 int i;
8431
8432 for (i = 0; i < wordcnt; i++) {
8433 /* Clear SK and DI. */
8434 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
8435 CSR_WRITE(sc, WMREG_EECD, reg);
8436
8437 /*
8438 * XXX: workaround for a bug in qemu-0.12.x and prior
8439 * and Xen.
8440 *
8441 * We use this workaround only for 82540 because qemu's
8442 * e1000 act as 82540.
8443 */
8444 if (sc->sc_type == WM_T_82540) {
8445 reg |= EECD_SK;
8446 CSR_WRITE(sc, WMREG_EECD, reg);
8447 reg &= ~EECD_SK;
8448 CSR_WRITE(sc, WMREG_EECD, reg);
8449 CSR_WRITE_FLUSH(sc);
8450 delay(2);
8451 }
8452 /* XXX: end of workaround */
8453
8454 /* Set CHIP SELECT. */
8455 reg |= EECD_CS;
8456 CSR_WRITE(sc, WMREG_EECD, reg);
8457 CSR_WRITE_FLUSH(sc);
8458 delay(2);
8459
8460 /* Shift in the READ command. */
8461 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
8462
8463 /* Shift in address. */
8464 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
8465
8466 /* Shift out the data. */
8467 wm_eeprom_recvbits(sc, &val, 16);
8468 data[i] = val & 0xffff;
8469
8470 /* Clear CHIP SELECT. */
8471 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
8472 CSR_WRITE(sc, WMREG_EECD, reg);
8473 CSR_WRITE_FLUSH(sc);
8474 delay(2);
8475 }
8476
8477 return 0;
8478 }
8479
8480 /* SPI */
8481
8482 /*
8483 * Set SPI and FLASH related information from the EECD register.
8484 * For 82541 and 82547, the word size is taken from EEPROM.
8485 */
8486 static int
8487 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
8488 {
8489 int size;
8490 uint32_t reg;
8491 uint16_t data;
8492
8493 reg = CSR_READ(sc, WMREG_EECD);
8494 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
8495
8496 /* Read the size of NVM from EECD by default */
8497 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
8498 switch (sc->sc_type) {
8499 case WM_T_82541:
8500 case WM_T_82541_2:
8501 case WM_T_82547:
8502 case WM_T_82547_2:
8503 /* Set dummy value to access EEPROM */
8504 sc->sc_nvm_wordsize = 64;
8505 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
8506 reg = data;
8507 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
8508 if (size == 0)
8509 size = 6; /* 64 word size */
8510 else
8511 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
8512 break;
8513 case WM_T_80003:
8514 case WM_T_82571:
8515 case WM_T_82572:
8516 case WM_T_82573: /* SPI case */
8517 case WM_T_82574: /* SPI case */
8518 case WM_T_82583: /* SPI case */
8519 size += NVM_WORD_SIZE_BASE_SHIFT;
8520 if (size > 14)
8521 size = 14;
8522 break;
8523 case WM_T_82575:
8524 case WM_T_82576:
8525 case WM_T_82580:
8526 case WM_T_I350:
8527 case WM_T_I354:
8528 case WM_T_I210:
8529 case WM_T_I211:
8530 size += NVM_WORD_SIZE_BASE_SHIFT;
8531 if (size > 15)
8532 size = 15;
8533 break;
8534 default:
8535 aprint_error_dev(sc->sc_dev,
8536 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
8537 return -1;
8538 break;
8539 }
8540
8541 sc->sc_nvm_wordsize = 1 << size;
8542
8543 return 0;
8544 }
8545
8546 /*
8547 * wm_nvm_ready_spi:
8548 *
8549 * Wait for a SPI EEPROM to be ready for commands.
8550 */
8551 static int
8552 wm_nvm_ready_spi(struct wm_softc *sc)
8553 {
8554 uint32_t val;
8555 int usec;
8556
8557 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
8558 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
8559 wm_eeprom_recvbits(sc, &val, 8);
8560 if ((val & SPI_SR_RDY) == 0)
8561 break;
8562 }
8563 if (usec >= SPI_MAX_RETRIES) {
8564 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
8565 return 1;
8566 }
8567 return 0;
8568 }
8569
8570 /*
8571 * wm_nvm_read_spi:
8572 *
8573 * Read a work from the EEPROM using the SPI protocol.
8574 */
8575 static int
8576 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8577 {
8578 uint32_t reg, val;
8579 int i;
8580 uint8_t opc;
8581
8582 /* Clear SK and CS. */
8583 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
8584 CSR_WRITE(sc, WMREG_EECD, reg);
8585 CSR_WRITE_FLUSH(sc);
8586 delay(2);
8587
8588 if (wm_nvm_ready_spi(sc))
8589 return 1;
8590
8591 /* Toggle CS to flush commands. */
8592 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
8593 CSR_WRITE_FLUSH(sc);
8594 delay(2);
8595 CSR_WRITE(sc, WMREG_EECD, reg);
8596 CSR_WRITE_FLUSH(sc);
8597 delay(2);
8598
8599 opc = SPI_OPC_READ;
8600 if (sc->sc_nvm_addrbits == 8 && word >= 128)
8601 opc |= SPI_OPC_A8;
8602
8603 wm_eeprom_sendbits(sc, opc, 8);
8604 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
8605
8606 for (i = 0; i < wordcnt; i++) {
8607 wm_eeprom_recvbits(sc, &val, 16);
8608 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
8609 }
8610
8611 /* Raise CS and clear SK. */
8612 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
8613 CSR_WRITE(sc, WMREG_EECD, reg);
8614 CSR_WRITE_FLUSH(sc);
8615 delay(2);
8616
8617 return 0;
8618 }
8619
8620 /* Using with EERD */
8621
8622 static int
8623 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
8624 {
8625 uint32_t attempts = 100000;
8626 uint32_t i, reg = 0;
8627 int32_t done = -1;
8628
8629 for (i = 0; i < attempts; i++) {
8630 reg = CSR_READ(sc, rw);
8631
8632 if (reg & EERD_DONE) {
8633 done = 0;
8634 break;
8635 }
8636 delay(5);
8637 }
8638
8639 return done;
8640 }
8641
8642 static int
8643 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
8644 uint16_t *data)
8645 {
8646 int i, eerd = 0;
8647 int error = 0;
8648
8649 for (i = 0; i < wordcnt; i++) {
8650 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
8651
8652 CSR_WRITE(sc, WMREG_EERD, eerd);
8653 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
8654 if (error != 0)
8655 break;
8656
8657 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
8658 }
8659
8660 return error;
8661 }
8662
8663 /* Flash */
8664
8665 static int
8666 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
8667 {
8668 uint32_t eecd;
8669 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
8670 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
8671 uint8_t sig_byte = 0;
8672
8673 switch (sc->sc_type) {
8674 case WM_T_ICH8:
8675 case WM_T_ICH9:
8676 eecd = CSR_READ(sc, WMREG_EECD);
8677 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
8678 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
8679 return 0;
8680 }
8681 /* FALLTHROUGH */
8682 default:
8683 /* Default to 0 */
8684 *bank = 0;
8685
8686 /* Check bank 0 */
8687 wm_read_ich8_byte(sc, act_offset, &sig_byte);
8688 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
8689 *bank = 0;
8690 return 0;
8691 }
8692
8693 /* Check bank 1 */
8694 wm_read_ich8_byte(sc, act_offset + bank1_offset,
8695 &sig_byte);
8696 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
8697 *bank = 1;
8698 return 0;
8699 }
8700 }
8701
8702 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
8703 device_xname(sc->sc_dev)));
8704 return -1;
8705 }
8706
8707 /******************************************************************************
8708 * This function does initial flash setup so that a new read/write/erase cycle
8709 * can be started.
8710 *
8711 * sc - The pointer to the hw structure
8712 ****************************************************************************/
8713 static int32_t
8714 wm_ich8_cycle_init(struct wm_softc *sc)
8715 {
8716 uint16_t hsfsts;
8717 int32_t error = 1;
8718 int32_t i = 0;
8719
8720 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8721
8722 /* May be check the Flash Des Valid bit in Hw status */
8723 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
8724 return error;
8725 }
8726
8727 /* Clear FCERR in Hw status by writing 1 */
8728 /* Clear DAEL in Hw status by writing a 1 */
8729 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
8730
8731 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8732
8733 /*
8734 * Either we should have a hardware SPI cycle in progress bit to check
8735 * against, in order to start a new cycle or FDONE bit should be
8736 * changed in the hardware so that it is 1 after harware reset, which
8737 * can then be used as an indication whether a cycle is in progress or
8738 * has been completed .. we should also have some software semaphore
8739 * mechanism to guard FDONE or the cycle in progress bit so that two
8740 * threads access to those bits can be sequentiallized or a way so that
8741 * 2 threads dont start the cycle at the same time
8742 */
8743
8744 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8745 /*
8746 * There is no cycle running at present, so we can start a
8747 * cycle
8748 */
8749
8750 /* Begin by setting Flash Cycle Done. */
8751 hsfsts |= HSFSTS_DONE;
8752 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8753 error = 0;
8754 } else {
8755 /*
8756 * otherwise poll for sometime so the current cycle has a
8757 * chance to end before giving up.
8758 */
8759 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
8760 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8761 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8762 error = 0;
8763 break;
8764 }
8765 delay(1);
8766 }
8767 if (error == 0) {
8768 /*
8769 * Successful in waiting for previous cycle to timeout,
8770 * now set the Flash Cycle Done.
8771 */
8772 hsfsts |= HSFSTS_DONE;
8773 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8774 }
8775 }
8776 return error;
8777 }
8778
8779 /******************************************************************************
8780 * This function starts a flash cycle and waits for its completion
8781 *
8782 * sc - The pointer to the hw structure
8783 ****************************************************************************/
8784 static int32_t
8785 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
8786 {
8787 uint16_t hsflctl;
8788 uint16_t hsfsts;
8789 int32_t error = 1;
8790 uint32_t i = 0;
8791
8792 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
8793 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8794 hsflctl |= HSFCTL_GO;
8795 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8796
8797 /* Wait till FDONE bit is set to 1 */
8798 do {
8799 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8800 if (hsfsts & HSFSTS_DONE)
8801 break;
8802 delay(1);
8803 i++;
8804 } while (i < timeout);
8805 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
8806 error = 0;
8807
8808 return error;
8809 }
8810
8811 /******************************************************************************
8812 * Reads a byte or word from the NVM using the ICH8 flash access registers.
8813 *
8814 * sc - The pointer to the hw structure
8815 * index - The index of the byte or word to read.
8816 * size - Size of data to read, 1=byte 2=word
8817 * data - Pointer to the word to store the value read.
8818 *****************************************************************************/
8819 static int32_t
8820 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
8821 uint32_t size, uint16_t *data)
8822 {
8823 uint16_t hsfsts;
8824 uint16_t hsflctl;
8825 uint32_t flash_linear_address;
8826 uint32_t flash_data = 0;
8827 int32_t error = 1;
8828 int32_t count = 0;
8829
8830 if (size < 1 || size > 2 || data == 0x0 ||
8831 index > ICH_FLASH_LINEAR_ADDR_MASK)
8832 return error;
8833
8834 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
8835 sc->sc_ich8_flash_base;
8836
8837 do {
8838 delay(1);
8839 /* Steps */
8840 error = wm_ich8_cycle_init(sc);
8841 if (error)
8842 break;
8843
8844 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8845 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
8846 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
8847 & HSFCTL_BCOUNT_MASK;
8848 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
8849 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8850
8851 /*
8852 * Write the last 24 bits of index into Flash Linear address
8853 * field in Flash Address
8854 */
8855 /* TODO: TBD maybe check the index against the size of flash */
8856
8857 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
8858
8859 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
8860
8861 /*
8862 * Check if FCERR is set to 1, if set to 1, clear it and try
8863 * the whole sequence a few more times, else read in (shift in)
8864 * the Flash Data0, the order is least significant byte first
8865 * msb to lsb
8866 */
8867 if (error == 0) {
8868 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
8869 if (size == 1)
8870 *data = (uint8_t)(flash_data & 0x000000FF);
8871 else if (size == 2)
8872 *data = (uint16_t)(flash_data & 0x0000FFFF);
8873 break;
8874 } else {
8875 /*
8876 * If we've gotten here, then things are probably
8877 * completely hosed, but if the error condition is
8878 * detected, it won't hurt to give it another try...
8879 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
8880 */
8881 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8882 if (hsfsts & HSFSTS_ERR) {
8883 /* Repeat for some time before giving up. */
8884 continue;
8885 } else if ((hsfsts & HSFSTS_DONE) == 0)
8886 break;
8887 }
8888 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
8889
8890 return error;
8891 }
8892
8893 /******************************************************************************
8894 * Reads a single byte from the NVM using the ICH8 flash access registers.
8895 *
8896 * sc - pointer to wm_hw structure
8897 * index - The index of the byte to read.
8898 * data - Pointer to a byte to store the value read.
8899 *****************************************************************************/
8900 static int32_t
8901 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
8902 {
8903 int32_t status;
8904 uint16_t word = 0;
8905
8906 status = wm_read_ich8_data(sc, index, 1, &word);
8907 if (status == 0)
8908 *data = (uint8_t)word;
8909 else
8910 *data = 0;
8911
8912 return status;
8913 }
8914
8915 /******************************************************************************
8916 * Reads a word from the NVM using the ICH8 flash access registers.
8917 *
8918 * sc - pointer to wm_hw structure
8919 * index - The starting byte index of the word to read.
8920 * data - Pointer to a word to store the value read.
8921 *****************************************************************************/
8922 static int32_t
8923 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
8924 {
8925 int32_t status;
8926
8927 status = wm_read_ich8_data(sc, index, 2, data);
8928 return status;
8929 }
8930
8931 /******************************************************************************
8932 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
8933 * register.
8934 *
8935 * sc - Struct containing variables accessed by shared code
8936 * offset - offset of word in the EEPROM to read
8937 * data - word read from the EEPROM
8938 * words - number of words to read
8939 *****************************************************************************/
8940 static int
8941 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
8942 {
8943 int32_t error = 0;
8944 uint32_t flash_bank = 0;
8945 uint32_t act_offset = 0;
8946 uint32_t bank_offset = 0;
8947 uint16_t word = 0;
8948 uint16_t i = 0;
8949
8950 /*
8951 * We need to know which is the valid flash bank. In the event
8952 * that we didn't allocate eeprom_shadow_ram, we may not be
8953 * managing flash_bank. So it cannot be trusted and needs
8954 * to be updated with each read.
8955 */
8956 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
8957 if (error) {
8958 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
8959 device_xname(sc->sc_dev)));
8960 flash_bank = 0;
8961 }
8962
8963 /*
8964 * Adjust offset appropriately if we're on bank 1 - adjust for word
8965 * size
8966 */
8967 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
8968
8969 error = wm_get_swfwhw_semaphore(sc);
8970 if (error) {
8971 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8972 __func__);
8973 return error;
8974 }
8975
8976 for (i = 0; i < words; i++) {
8977 /* The NVM part needs a byte offset, hence * 2 */
8978 act_offset = bank_offset + ((offset + i) * 2);
8979 error = wm_read_ich8_word(sc, act_offset, &word);
8980 if (error) {
8981 aprint_error_dev(sc->sc_dev,
8982 "%s: failed to read NVM\n", __func__);
8983 break;
8984 }
8985 data[i] = word;
8986 }
8987
8988 wm_put_swfwhw_semaphore(sc);
8989 return error;
8990 }
8991
8992 /* iNVM */
8993
8994 static int
8995 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
8996 {
8997 int32_t rv = 0;
8998 uint32_t invm_dword;
8999 uint16_t i;
9000 uint8_t record_type, word_address;
9001
9002 for (i = 0; i < INVM_SIZE; i++) {
9003 invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
9004 /* Get record type */
9005 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
9006 if (record_type == INVM_UNINITIALIZED_STRUCTURE)
9007 break;
9008 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
9009 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
9010 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
9011 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
9012 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
9013 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
9014 if (word_address == address) {
9015 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
9016 rv = 0;
9017 break;
9018 }
9019 }
9020 }
9021
9022 return rv;
9023 }
9024
9025 static int
9026 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
9027 {
9028 int rv = 0;
9029 int i;
9030
9031 for (i = 0; i < words; i++) {
9032 switch (offset + i) {
9033 case NVM_OFF_MACADDR:
9034 case NVM_OFF_MACADDR1:
9035 case NVM_OFF_MACADDR2:
9036 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
9037 if (rv != 0) {
9038 data[i] = 0xffff;
9039 rv = -1;
9040 }
9041 break;
9042 case NVM_OFF_CFG2:
9043 rv = wm_nvm_read_word_invm(sc, offset, data);
9044 if (rv != 0) {
9045 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
9046 rv = 0;
9047 }
9048 break;
9049 case NVM_OFF_CFG4:
9050 rv = wm_nvm_read_word_invm(sc, offset, data);
9051 if (rv != 0) {
9052 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
9053 rv = 0;
9054 }
9055 break;
9056 case NVM_OFF_LED_1_CFG:
9057 rv = wm_nvm_read_word_invm(sc, offset, data);
9058 if (rv != 0) {
9059 *data = NVM_LED_1_CFG_DEFAULT_I211;
9060 rv = 0;
9061 }
9062 break;
9063 case NVM_OFF_LED_0_2_CFG:
9064 rv = wm_nvm_read_word_invm(sc, offset, data);
9065 if (rv != 0) {
9066 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
9067 rv = 0;
9068 }
9069 break;
9070 case NVM_OFF_ID_LED_SETTINGS:
9071 rv = wm_nvm_read_word_invm(sc, offset, data);
9072 if (rv != 0) {
9073 *data = ID_LED_RESERVED_FFFF;
9074 rv = 0;
9075 }
9076 break;
9077 default:
9078 DPRINTF(WM_DEBUG_NVM,
9079 ("NVM word 0x%02x is not mapped.\n", offset));
9080 *data = NVM_RESERVED_WORD;
9081 break;
9082 }
9083 }
9084
9085 return rv;
9086 }
9087
9088 /* Lock, detecting NVM type, validate checksum, version and read */
9089
9090 /*
9091 * wm_nvm_acquire:
9092 *
9093 * Perform the EEPROM handshake required on some chips.
9094 */
9095 static int
9096 wm_nvm_acquire(struct wm_softc *sc)
9097 {
9098 uint32_t reg;
9099 int x;
9100 int ret = 0;
9101
9102 /* always success */
9103 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
9104 return 0;
9105
9106 if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
9107 ret = wm_get_swfwhw_semaphore(sc);
9108 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
9109 /* This will also do wm_get_swsm_semaphore() if needed */
9110 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
9111 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
9112 ret = wm_get_swsm_semaphore(sc);
9113 }
9114
9115 if (ret) {
9116 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9117 __func__);
9118 return 1;
9119 }
9120
9121 if (sc->sc_flags & WM_F_LOCK_EECD) {
9122 reg = CSR_READ(sc, WMREG_EECD);
9123
9124 /* Request EEPROM access. */
9125 reg |= EECD_EE_REQ;
9126 CSR_WRITE(sc, WMREG_EECD, reg);
9127
9128 /* ..and wait for it to be granted. */
9129 for (x = 0; x < 1000; x++) {
9130 reg = CSR_READ(sc, WMREG_EECD);
9131 if (reg & EECD_EE_GNT)
9132 break;
9133 delay(5);
9134 }
9135 if ((reg & EECD_EE_GNT) == 0) {
9136 aprint_error_dev(sc->sc_dev,
9137 "could not acquire EEPROM GNT\n");
9138 reg &= ~EECD_EE_REQ;
9139 CSR_WRITE(sc, WMREG_EECD, reg);
9140 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
9141 wm_put_swfwhw_semaphore(sc);
9142 if (sc->sc_flags & WM_F_LOCK_SWFW)
9143 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
9144 else if (sc->sc_flags & WM_F_LOCK_SWSM)
9145 wm_put_swsm_semaphore(sc);
9146 return 1;
9147 }
9148 }
9149
9150 return 0;
9151 }
9152
9153 /*
9154 * wm_nvm_release:
9155 *
9156 * Release the EEPROM mutex.
9157 */
9158 static void
9159 wm_nvm_release(struct wm_softc *sc)
9160 {
9161 uint32_t reg;
9162
9163 /* always success */
9164 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
9165 return;
9166
9167 if (sc->sc_flags & WM_F_LOCK_EECD) {
9168 reg = CSR_READ(sc, WMREG_EECD);
9169 reg &= ~EECD_EE_REQ;
9170 CSR_WRITE(sc, WMREG_EECD, reg);
9171 }
9172
9173 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
9174 wm_put_swfwhw_semaphore(sc);
9175 if (sc->sc_flags & WM_F_LOCK_SWFW)
9176 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
9177 else if (sc->sc_flags & WM_F_LOCK_SWSM)
9178 wm_put_swsm_semaphore(sc);
9179 }
9180
9181 static int
9182 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
9183 {
9184 uint32_t eecd = 0;
9185
9186 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
9187 || sc->sc_type == WM_T_82583) {
9188 eecd = CSR_READ(sc, WMREG_EECD);
9189
9190 /* Isolate bits 15 & 16 */
9191 eecd = ((eecd >> 15) & 0x03);
9192
9193 /* If both bits are set, device is Flash type */
9194 if (eecd == 0x03)
9195 return 0;
9196 }
9197 return 1;
9198 }
9199
9200 static int
9201 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
9202 {
9203 uint32_t eec;
9204
9205 eec = CSR_READ(sc, WMREG_EEC);
9206 if ((eec & EEC_FLASH_DETECTED) != 0)
9207 return 1;
9208
9209 return 0;
9210 }
9211
9212 /*
9213 * wm_nvm_validate_checksum
9214 *
9215 * The checksum is defined as the sum of the first 64 (16 bit) words.
9216 */
9217 static int
9218 wm_nvm_validate_checksum(struct wm_softc *sc)
9219 {
9220 uint16_t checksum;
9221 uint16_t eeprom_data;
9222 #ifdef WM_DEBUG
9223 uint16_t csum_wordaddr, valid_checksum;
9224 #endif
9225 int i;
9226
9227 checksum = 0;
9228
9229 /* Don't check for I211 */
9230 if (sc->sc_type == WM_T_I211)
9231 return 0;
9232
9233 #ifdef WM_DEBUG
9234 if (sc->sc_type == WM_T_PCH_LPT) {
9235 csum_wordaddr = NVM_OFF_COMPAT;
9236 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
9237 } else {
9238 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
9239 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
9240 }
9241
9242 /* Dump EEPROM image for debug */
9243 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9244 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9245 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
9246 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
9247 if ((eeprom_data & valid_checksum) == 0) {
9248 DPRINTF(WM_DEBUG_NVM,
9249 ("%s: NVM need to be updated (%04x != %04x)\n",
9250 device_xname(sc->sc_dev), eeprom_data,
9251 valid_checksum));
9252 }
9253 }
9254
9255 if ((wm_debug & WM_DEBUG_NVM) != 0) {
9256 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
9257 for (i = 0; i < NVM_SIZE; i++) {
9258 if (wm_nvm_read(sc, i, 1, &eeprom_data))
9259 printf("XXXX ");
9260 else
9261 printf("%04hx ", eeprom_data);
9262 if (i % 8 == 7)
9263 printf("\n");
9264 }
9265 }
9266
9267 #endif /* WM_DEBUG */
9268
9269 for (i = 0; i < NVM_SIZE; i++) {
9270 if (wm_nvm_read(sc, i, 1, &eeprom_data))
9271 return 1;
9272 checksum += eeprom_data;
9273 }
9274
9275 if (checksum != (uint16_t) NVM_CHECKSUM) {
9276 #ifdef WM_DEBUG
9277 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
9278 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
9279 #endif
9280 }
9281
9282 return 0;
9283 }
9284
9285 static void
9286 wm_nvm_version(struct wm_softc *sc)
9287 {
9288 uint16_t major, minor, build, patch;
9289 uint16_t uid0, uid1;
9290 uint16_t nvm_data;
9291 uint16_t off;
9292 bool check_version = false;
9293 bool check_optionrom = false;
9294
9295 wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
9296 switch (sc->sc_type) {
9297 case WM_T_82575:
9298 case WM_T_82576:
9299 case WM_T_82580:
9300 if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
9301 check_version = true;
9302 break;
9303 case WM_T_I211:
9304 /* XXX wm_nvm_version_invm(sc); */
9305 return;
9306 case WM_T_I210:
9307 if (!wm_nvm_get_flash_presence_i210(sc)) {
9308 /* XXX wm_nvm_version_invm(sc); */
9309 return;
9310 }
9311 /* FALLTHROUGH */
9312 case WM_T_I350:
9313 case WM_T_I354:
9314 check_version = true;
9315 check_optionrom = true;
9316 break;
9317 default:
9318 /* XXX Should we print PXE boot agent's version? */
9319 return;
9320 }
9321 if (check_version) {
9322 bool have_build = false;
9323 wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
9324 major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
9325 if ((nvm_data & 0x0f00) == 0x0000)
9326 minor = nvm_data & 0x00ff;
9327 else {
9328 minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
9329 build = nvm_data & NVM_BUILD_MASK;
9330 have_build = true;
9331 }
9332 /* Decimal */
9333 minor = (minor / 16) * 10 + (minor % 16);
9334
9335 aprint_verbose(", version %d.%d", major, minor);
9336 if (have_build)
9337 aprint_verbose(" build %d", build);
9338 sc->sc_nvm_ver_major = major;
9339 sc->sc_nvm_ver_minor = minor;
9340 }
9341 if (check_optionrom) {
9342 wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
9343 /* Option ROM Version */
9344 if ((off != 0x0000) && (off != 0xffff)) {
9345 off += NVM_COMBO_VER_OFF;
9346 wm_nvm_read(sc, off + 1, 1, &uid1);
9347 wm_nvm_read(sc, off, 1, &uid0);
9348 if ((uid0 != 0) && (uid0 != 0xffff)
9349 && (uid1 != 0) && (uid1 != 0xffff)) {
9350 /* 16bits */
9351 major = uid0 >> 8;
9352 build = (uid0 << 8) | (uid1 >> 8);
9353 patch = uid1 & 0x00ff;
9354 aprint_verbose(", option ROM Version %d.%d.%d",
9355 major, build, patch);
9356 }
9357 }
9358 }
9359
9360 wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
9361 aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
9362 }
9363
9364 /*
9365 * wm_nvm_read:
9366 *
9367 * Read data from the serial EEPROM.
9368 */
9369 static int
9370 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9371 {
9372 int rv;
9373
9374 if (sc->sc_flags & WM_F_EEPROM_INVALID)
9375 return 1;
9376
9377 if (wm_nvm_acquire(sc))
9378 return 1;
9379
9380 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9381 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9382 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
9383 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
9384 else if (sc->sc_flags & WM_F_EEPROM_INVM)
9385 rv = wm_nvm_read_invm(sc, word, wordcnt, data);
9386 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
9387 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
9388 else if (sc->sc_flags & WM_F_EEPROM_SPI)
9389 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
9390 else
9391 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
9392
9393 wm_nvm_release(sc);
9394 return rv;
9395 }
9396
9397 /*
9398 * Hardware semaphores.
9399 * Very complexed...
9400 */
9401
9402 static int
9403 wm_get_swsm_semaphore(struct wm_softc *sc)
9404 {
9405 int32_t timeout;
9406 uint32_t swsm;
9407
9408 if (sc->sc_flags & WM_F_LOCK_SWSM) {
9409 /* Get the SW semaphore. */
9410 timeout = sc->sc_nvm_wordsize + 1;
9411 while (timeout) {
9412 swsm = CSR_READ(sc, WMREG_SWSM);
9413
9414 if ((swsm & SWSM_SMBI) == 0)
9415 break;
9416
9417 delay(50);
9418 timeout--;
9419 }
9420
9421 if (timeout == 0) {
9422 aprint_error_dev(sc->sc_dev,
9423 "could not acquire SWSM SMBI\n");
9424 return 1;
9425 }
9426 }
9427
9428 /* Get the FW semaphore. */
9429 timeout = sc->sc_nvm_wordsize + 1;
9430 while (timeout) {
9431 swsm = CSR_READ(sc, WMREG_SWSM);
9432 swsm |= SWSM_SWESMBI;
9433 CSR_WRITE(sc, WMREG_SWSM, swsm);
9434 /* If we managed to set the bit we got the semaphore. */
9435 swsm = CSR_READ(sc, WMREG_SWSM);
9436 if (swsm & SWSM_SWESMBI)
9437 break;
9438
9439 delay(50);
9440 timeout--;
9441 }
9442
9443 if (timeout == 0) {
9444 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
9445 /* Release semaphores */
9446 wm_put_swsm_semaphore(sc);
9447 return 1;
9448 }
9449 return 0;
9450 }
9451
9452 static void
9453 wm_put_swsm_semaphore(struct wm_softc *sc)
9454 {
9455 uint32_t swsm;
9456
9457 swsm = CSR_READ(sc, WMREG_SWSM);
9458 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
9459 CSR_WRITE(sc, WMREG_SWSM, swsm);
9460 }
9461
9462 static int
9463 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
9464 {
9465 uint32_t swfw_sync;
9466 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
9467 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
9468 int timeout = 200;
9469
9470 for (timeout = 0; timeout < 200; timeout++) {
9471 if (sc->sc_flags & WM_F_LOCK_SWSM) {
9472 if (wm_get_swsm_semaphore(sc)) {
9473 aprint_error_dev(sc->sc_dev,
9474 "%s: failed to get semaphore\n",
9475 __func__);
9476 return 1;
9477 }
9478 }
9479 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
9480 if ((swfw_sync & (swmask | fwmask)) == 0) {
9481 swfw_sync |= swmask;
9482 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
9483 if (sc->sc_flags & WM_F_LOCK_SWSM)
9484 wm_put_swsm_semaphore(sc);
9485 return 0;
9486 }
9487 if (sc->sc_flags & WM_F_LOCK_SWSM)
9488 wm_put_swsm_semaphore(sc);
9489 delay(5000);
9490 }
9491 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
9492 device_xname(sc->sc_dev), mask, swfw_sync);
9493 return 1;
9494 }
9495
9496 static void
9497 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
9498 {
9499 uint32_t swfw_sync;
9500
9501 if (sc->sc_flags & WM_F_LOCK_SWSM) {
9502 while (wm_get_swsm_semaphore(sc) != 0)
9503 continue;
9504 }
9505 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
9506 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
9507 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
9508 if (sc->sc_flags & WM_F_LOCK_SWSM)
9509 wm_put_swsm_semaphore(sc);
9510 }
9511
9512 static int
9513 wm_get_swfwhw_semaphore(struct wm_softc *sc)
9514 {
9515 uint32_t ext_ctrl;
9516 int timeout = 200;
9517
9518 for (timeout = 0; timeout < 200; timeout++) {
9519 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
9520 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
9521 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
9522
9523 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
9524 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
9525 return 0;
9526 delay(5000);
9527 }
9528 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
9529 device_xname(sc->sc_dev), ext_ctrl);
9530 return 1;
9531 }
9532
9533 static void
9534 wm_put_swfwhw_semaphore(struct wm_softc *sc)
9535 {
9536 uint32_t ext_ctrl;
9537 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
9538 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
9539 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
9540 }
9541
9542 static int
9543 wm_get_hw_semaphore_82573(struct wm_softc *sc)
9544 {
9545 int i = 0;
9546 uint32_t reg;
9547
9548 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
9549 do {
9550 CSR_WRITE(sc, WMREG_EXTCNFCTR,
9551 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
9552 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
9553 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
9554 break;
9555 delay(2*1000);
9556 i++;
9557 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
9558
9559 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
9560 wm_put_hw_semaphore_82573(sc);
9561 log(LOG_ERR, "%s: Driver can't access the PHY\n",
9562 device_xname(sc->sc_dev));
9563 return -1;
9564 }
9565
9566 return 0;
9567 }
9568
9569 static void
9570 wm_put_hw_semaphore_82573(struct wm_softc *sc)
9571 {
9572 uint32_t reg;
9573
9574 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
9575 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
9576 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
9577 }
9578
9579 /*
9580 * Management mode and power management related subroutines.
9581 * BMC, AMT, suspend/resume and EEE.
9582 */
9583
9584 static int
9585 wm_check_mng_mode(struct wm_softc *sc)
9586 {
9587 int rv;
9588
9589 switch (sc->sc_type) {
9590 case WM_T_ICH8:
9591 case WM_T_ICH9:
9592 case WM_T_ICH10:
9593 case WM_T_PCH:
9594 case WM_T_PCH2:
9595 case WM_T_PCH_LPT:
9596 rv = wm_check_mng_mode_ich8lan(sc);
9597 break;
9598 case WM_T_82574:
9599 case WM_T_82583:
9600 rv = wm_check_mng_mode_82574(sc);
9601 break;
9602 case WM_T_82571:
9603 case WM_T_82572:
9604 case WM_T_82573:
9605 case WM_T_80003:
9606 rv = wm_check_mng_mode_generic(sc);
9607 break;
9608 default:
9609 /* noting to do */
9610 rv = 0;
9611 break;
9612 }
9613
9614 return rv;
9615 }
9616
9617 static int
9618 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
9619 {
9620 uint32_t fwsm;
9621
9622 fwsm = CSR_READ(sc, WMREG_FWSM);
9623
9624 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
9625 return 1;
9626
9627 return 0;
9628 }
9629
9630 static int
9631 wm_check_mng_mode_82574(struct wm_softc *sc)
9632 {
9633 uint16_t data;
9634
9635 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
9636
9637 if ((data & NVM_CFG2_MNGM_MASK) != 0)
9638 return 1;
9639
9640 return 0;
9641 }
9642
9643 static int
9644 wm_check_mng_mode_generic(struct wm_softc *sc)
9645 {
9646 uint32_t fwsm;
9647
9648 fwsm = CSR_READ(sc, WMREG_FWSM);
9649
9650 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
9651 return 1;
9652
9653 return 0;
9654 }
9655
9656 static int
9657 wm_enable_mng_pass_thru(struct wm_softc *sc)
9658 {
9659 uint32_t manc, fwsm, factps;
9660
9661 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
9662 return 0;
9663
9664 manc = CSR_READ(sc, WMREG_MANC);
9665
9666 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
9667 device_xname(sc->sc_dev), manc));
9668 if ((manc & MANC_RECV_TCO_EN) == 0)
9669 return 0;
9670
9671 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
9672 fwsm = CSR_READ(sc, WMREG_FWSM);
9673 factps = CSR_READ(sc, WMREG_FACTPS);
9674 if (((factps & FACTPS_MNGCG) == 0)
9675 && ((fwsm & FWSM_MODE_MASK)
9676 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
9677 return 1;
9678 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
9679 uint16_t data;
9680
9681 factps = CSR_READ(sc, WMREG_FACTPS);
9682 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
9683 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
9684 device_xname(sc->sc_dev), factps, data));
9685 if (((factps & FACTPS_MNGCG) == 0)
9686 && ((data & NVM_CFG2_MNGM_MASK)
9687 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
9688 return 1;
9689 } else if (((manc & MANC_SMBUS_EN) != 0)
9690 && ((manc & MANC_ASF_EN) == 0))
9691 return 1;
9692
9693 return 0;
9694 }
9695
9696 static int
9697 wm_check_reset_block(struct wm_softc *sc)
9698 {
9699 uint32_t reg;
9700
9701 switch (sc->sc_type) {
9702 case WM_T_ICH8:
9703 case WM_T_ICH9:
9704 case WM_T_ICH10:
9705 case WM_T_PCH:
9706 case WM_T_PCH2:
9707 case WM_T_PCH_LPT:
9708 reg = CSR_READ(sc, WMREG_FWSM);
9709 if ((reg & FWSM_RSPCIPHY) != 0)
9710 return 0;
9711 else
9712 return -1;
9713 break;
9714 case WM_T_82571:
9715 case WM_T_82572:
9716 case WM_T_82573:
9717 case WM_T_82574:
9718 case WM_T_82583:
9719 case WM_T_80003:
9720 reg = CSR_READ(sc, WMREG_MANC);
9721 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
9722 return -1;
9723 else
9724 return 0;
9725 break;
9726 default:
9727 /* no problem */
9728 break;
9729 }
9730
9731 return 0;
9732 }
9733
9734 static void
9735 wm_get_hw_control(struct wm_softc *sc)
9736 {
9737 uint32_t reg;
9738
9739 switch (sc->sc_type) {
9740 case WM_T_82573:
9741 reg = CSR_READ(sc, WMREG_SWSM);
9742 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
9743 break;
9744 case WM_T_82571:
9745 case WM_T_82572:
9746 case WM_T_82574:
9747 case WM_T_82583:
9748 case WM_T_80003:
9749 case WM_T_ICH8:
9750 case WM_T_ICH9:
9751 case WM_T_ICH10:
9752 case WM_T_PCH:
9753 case WM_T_PCH2:
9754 case WM_T_PCH_LPT:
9755 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9756 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
9757 break;
9758 default:
9759 break;
9760 }
9761 }
9762
9763 static void
9764 wm_release_hw_control(struct wm_softc *sc)
9765 {
9766 uint32_t reg;
9767
9768 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
9769 return;
9770
9771 if (sc->sc_type == WM_T_82573) {
9772 reg = CSR_READ(sc, WMREG_SWSM);
9773 reg &= ~SWSM_DRV_LOAD;
9774 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
9775 } else {
9776 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9777 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
9778 }
9779 }
9780
9781 static void
9782 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
9783 {
9784 uint32_t reg;
9785
9786 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
9787
9788 if (on != 0)
9789 reg |= EXTCNFCTR_GATE_PHY_CFG;
9790 else
9791 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
9792
9793 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
9794 }
9795
9796 static void
9797 wm_smbustopci(struct wm_softc *sc)
9798 {
9799 uint32_t fwsm;
9800
9801 fwsm = CSR_READ(sc, WMREG_FWSM);
9802 if (((fwsm & FWSM_FW_VALID) == 0)
9803 && ((wm_check_reset_block(sc) == 0))) {
9804 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
9805 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
9806 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9807 CSR_WRITE_FLUSH(sc);
9808 delay(10);
9809 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
9810 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9811 CSR_WRITE_FLUSH(sc);
9812 delay(50*1000);
9813
9814 /*
9815 * Gate automatic PHY configuration by hardware on non-managed
9816 * 82579
9817 */
9818 if (sc->sc_type == WM_T_PCH2)
9819 wm_gate_hw_phy_config_ich8lan(sc, 1);
9820 }
9821 }
9822
9823 static void
9824 wm_init_manageability(struct wm_softc *sc)
9825 {
9826
9827 if (sc->sc_flags & WM_F_HAS_MANAGE) {
9828 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
9829 uint32_t manc = CSR_READ(sc, WMREG_MANC);
9830
9831 /* Disable hardware interception of ARP */
9832 manc &= ~MANC_ARP_EN;
9833
9834 /* Enable receiving management packets to the host */
9835 if (sc->sc_type >= WM_T_82571) {
9836 manc |= MANC_EN_MNG2HOST;
9837 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
9838 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
9839
9840 }
9841
9842 CSR_WRITE(sc, WMREG_MANC, manc);
9843 }
9844 }
9845
9846 static void
9847 wm_release_manageability(struct wm_softc *sc)
9848 {
9849
9850 if (sc->sc_flags & WM_F_HAS_MANAGE) {
9851 uint32_t manc = CSR_READ(sc, WMREG_MANC);
9852
9853 manc |= MANC_ARP_EN;
9854 if (sc->sc_type >= WM_T_82571)
9855 manc &= ~MANC_EN_MNG2HOST;
9856
9857 CSR_WRITE(sc, WMREG_MANC, manc);
9858 }
9859 }
9860
9861 static void
9862 wm_get_wakeup(struct wm_softc *sc)
9863 {
9864
9865 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
9866 switch (sc->sc_type) {
9867 case WM_T_82573:
9868 case WM_T_82583:
9869 sc->sc_flags |= WM_F_HAS_AMT;
9870 /* FALLTHROUGH */
9871 case WM_T_80003:
9872 case WM_T_82541:
9873 case WM_T_82547:
9874 case WM_T_82571:
9875 case WM_T_82572:
9876 case WM_T_82574:
9877 case WM_T_82575:
9878 case WM_T_82576:
9879 case WM_T_82580:
9880 case WM_T_I350:
9881 case WM_T_I354:
9882 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
9883 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
9884 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
9885 break;
9886 case WM_T_ICH8:
9887 case WM_T_ICH9:
9888 case WM_T_ICH10:
9889 case WM_T_PCH:
9890 case WM_T_PCH2:
9891 case WM_T_PCH_LPT:
9892 sc->sc_flags |= WM_F_HAS_AMT;
9893 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
9894 break;
9895 default:
9896 break;
9897 }
9898
9899 /* 1: HAS_MANAGE */
9900 if (wm_enable_mng_pass_thru(sc) != 0)
9901 sc->sc_flags |= WM_F_HAS_MANAGE;
9902
9903 #ifdef WM_DEBUG
9904 printf("\n");
9905 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
9906 printf("HAS_AMT,");
9907 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
9908 printf("ARC_SUBSYS_VALID,");
9909 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
9910 printf("ASF_FIRMWARE_PRES,");
9911 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
9912 printf("HAS_MANAGE,");
9913 printf("\n");
9914 #endif
9915 /*
9916 * Note that the WOL flags is set after the resetting of the eeprom
9917 * stuff
9918 */
9919 }
9920
9921 #ifdef WM_WOL
9922 /* WOL in the newer chipset interfaces (pchlan) */
9923 static void
9924 wm_enable_phy_wakeup(struct wm_softc *sc)
9925 {
9926 #if 0
9927 uint16_t preg;
9928
9929 /* Copy MAC RARs to PHY RARs */
9930
9931 /* Copy MAC MTA to PHY MTA */
9932
9933 /* Configure PHY Rx Control register */
9934
9935 /* Enable PHY wakeup in MAC register */
9936
9937 /* Configure and enable PHY wakeup in PHY registers */
9938
9939 /* Activate PHY wakeup */
9940
9941 /* XXX */
9942 #endif
9943 }
9944
9945 /* Power down workaround on D3 */
9946 static void
9947 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
9948 {
9949 uint32_t reg;
9950 int i;
9951
9952 for (i = 0; i < 2; i++) {
9953 /* Disable link */
9954 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9955 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9956 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9957
9958 /*
9959 * Call gig speed drop workaround on Gig disable before
9960 * accessing any PHY registers
9961 */
9962 if (sc->sc_type == WM_T_ICH8)
9963 wm_gig_downshift_workaround_ich8lan(sc);
9964
9965 /* Write VR power-down enable */
9966 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
9967 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
9968 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
9969 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
9970
9971 /* Read it back and test */
9972 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
9973 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
9974 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
9975 break;
9976
9977 /* Issue PHY reset and repeat at most one more time */
9978 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
9979 }
9980 }
9981
9982 static void
9983 wm_enable_wakeup(struct wm_softc *sc)
9984 {
9985 uint32_t reg, pmreg;
9986 pcireg_t pmode;
9987
9988 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
9989 &pmreg, NULL) == 0)
9990 return;
9991
9992 /* Advertise the wakeup capability */
9993 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
9994 | CTRL_SWDPIN(3));
9995 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
9996
9997 /* ICH workaround */
9998 switch (sc->sc_type) {
9999 case WM_T_ICH8:
10000 case WM_T_ICH9:
10001 case WM_T_ICH10:
10002 case WM_T_PCH:
10003 case WM_T_PCH2:
10004 case WM_T_PCH_LPT:
10005 /* Disable gig during WOL */
10006 reg = CSR_READ(sc, WMREG_PHY_CTRL);
10007 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
10008 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
10009 if (sc->sc_type == WM_T_PCH)
10010 wm_gmii_reset(sc);
10011
10012 /* Power down workaround */
10013 if (sc->sc_phytype == WMPHY_82577) {
10014 struct mii_softc *child;
10015
10016 /* Assume that the PHY is copper */
10017 child = LIST_FIRST(&sc->sc_mii.mii_phys);
10018 if (child->mii_mpd_rev <= 2)
10019 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
10020 (768 << 5) | 25, 0x0444); /* magic num */
10021 }
10022 break;
10023 default:
10024 break;
10025 }
10026
10027 /* Keep the laser running on fiber adapters */
10028 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
10029 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
10030 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10031 reg |= CTRL_EXT_SWDPIN(3);
10032 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10033 }
10034
10035 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
10036 #if 0 /* for the multicast packet */
10037 reg |= WUFC_MC;
10038 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
10039 #endif
10040
10041 if (sc->sc_type == WM_T_PCH) {
10042 wm_enable_phy_wakeup(sc);
10043 } else {
10044 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
10045 CSR_WRITE(sc, WMREG_WUFC, reg);
10046 }
10047
10048 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10049 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10050 || (sc->sc_type == WM_T_PCH2))
10051 && (sc->sc_phytype == WMPHY_IGP_3))
10052 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
10053
10054 /* Request PME */
10055 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
10056 #if 0
10057 /* Disable WOL */
10058 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
10059 #else
10060 /* For WOL */
10061 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
10062 #endif
10063 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
10064 }
10065 #endif /* WM_WOL */
10066
10067 /* EEE */
10068
10069 static void
10070 wm_set_eee_i350(struct wm_softc *sc)
10071 {
10072 uint32_t ipcnfg, eeer;
10073
10074 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
10075 eeer = CSR_READ(sc, WMREG_EEER);
10076
10077 if ((sc->sc_flags & WM_F_EEE) != 0) {
10078 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
10079 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
10080 | EEER_LPI_FC);
10081 } else {
10082 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
10083 ipcnfg &= ~IPCNFG_10BASE_TE;
10084 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
10085 | EEER_LPI_FC);
10086 }
10087
10088 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
10089 CSR_WRITE(sc, WMREG_EEER, eeer);
10090 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
10091 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
10092 }
10093
10094 /*
10095 * Workarounds (mainly PHY related).
10096 * Basically, PHY's workarounds are in the PHY drivers.
10097 */
10098
10099 /* Work-around for 82566 Kumeran PCS lock loss */
10100 static void
10101 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
10102 {
10103 int miistatus, active, i;
10104 int reg;
10105
10106 miistatus = sc->sc_mii.mii_media_status;
10107
10108 /* If the link is not up, do nothing */
10109 if ((miistatus & IFM_ACTIVE) != 0)
10110 return;
10111
10112 active = sc->sc_mii.mii_media_active;
10113
10114 /* Nothing to do if the link is other than 1Gbps */
10115 if (IFM_SUBTYPE(active) != IFM_1000_T)
10116 return;
10117
10118 for (i = 0; i < 10; i++) {
10119 /* read twice */
10120 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
10121 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
10122 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
10123 goto out; /* GOOD! */
10124
10125 /* Reset the PHY */
10126 wm_gmii_reset(sc);
10127 delay(5*1000);
10128 }
10129
10130 /* Disable GigE link negotiation */
10131 reg = CSR_READ(sc, WMREG_PHY_CTRL);
10132 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
10133 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
10134
10135 /*
10136 * Call gig speed drop workaround on Gig disable before accessing
10137 * any PHY registers.
10138 */
10139 wm_gig_downshift_workaround_ich8lan(sc);
10140
10141 out:
10142 return;
10143 }
10144
10145 /* WOL from S5 stops working */
10146 static void
10147 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
10148 {
10149 uint16_t kmrn_reg;
10150
10151 /* Only for igp3 */
10152 if (sc->sc_phytype == WMPHY_IGP_3) {
10153 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
10154 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
10155 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
10156 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
10157 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
10158 }
10159 }
10160
10161 /*
10162 * Workaround for pch's PHYs
10163 * XXX should be moved to new PHY driver?
10164 */
10165 static void
10166 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
10167 {
10168 if (sc->sc_phytype == WMPHY_82577)
10169 wm_set_mdio_slow_mode_hv(sc);
10170
10171 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
10172
10173 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
10174
10175 /* 82578 */
10176 if (sc->sc_phytype == WMPHY_82578) {
10177 /* PCH rev. < 3 */
10178 if (sc->sc_rev < 3) {
10179 /* XXX 6 bit shift? Why? Is it page2? */
10180 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
10181 0x66c0);
10182 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
10183 0xffff);
10184 }
10185
10186 /* XXX phy rev. < 2 */
10187 }
10188
10189 /* Select page 0 */
10190
10191 /* XXX acquire semaphore */
10192 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
10193 /* XXX release semaphore */
10194
10195 /*
10196 * Configure the K1 Si workaround during phy reset assuming there is
10197 * link so that it disables K1 if link is in 1Gbps.
10198 */
10199 wm_k1_gig_workaround_hv(sc, 1);
10200 }
10201
10202 static void
10203 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
10204 {
10205
10206 wm_set_mdio_slow_mode_hv(sc);
10207 }
10208
10209 static void
10210 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
10211 {
10212 int k1_enable = sc->sc_nvm_k1_enabled;
10213
10214 /* XXX acquire semaphore */
10215
10216 if (link) {
10217 k1_enable = 0;
10218
10219 /* Link stall fix for link up */
10220 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
10221 } else {
10222 /* Link stall fix for link down */
10223 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
10224 }
10225
10226 wm_configure_k1_ich8lan(sc, k1_enable);
10227
10228 /* XXX release semaphore */
10229 }
10230
10231 static void
10232 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
10233 {
10234 uint32_t reg;
10235
10236 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
10237 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
10238 reg | HV_KMRN_MDIO_SLOW);
10239 }
10240
10241 static void
10242 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
10243 {
10244 uint32_t ctrl, ctrl_ext, tmp;
10245 uint16_t kmrn_reg;
10246
10247 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
10248
10249 if (k1_enable)
10250 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
10251 else
10252 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
10253
10254 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
10255
10256 delay(20);
10257
10258 ctrl = CSR_READ(sc, WMREG_CTRL);
10259 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
10260
10261 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
10262 tmp |= CTRL_FRCSPD;
10263
10264 CSR_WRITE(sc, WMREG_CTRL, tmp);
10265 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
10266 CSR_WRITE_FLUSH(sc);
10267 delay(20);
10268
10269 CSR_WRITE(sc, WMREG_CTRL, ctrl);
10270 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
10271 CSR_WRITE_FLUSH(sc);
10272 delay(20);
10273 }
10274
10275 /* special case - for 82575 - need to do manual init ... */
10276 static void
10277 wm_reset_init_script_82575(struct wm_softc *sc)
10278 {
10279 /*
10280 * remark: this is untested code - we have no board without EEPROM
10281 * same setup as mentioned int the FreeBSD driver for the i82575
10282 */
10283
10284 /* SerDes configuration via SERDESCTRL */
10285 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
10286 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
10287 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
10288 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
10289
10290 /* CCM configuration via CCMCTL register */
10291 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
10292 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
10293
10294 /* PCIe lanes configuration */
10295 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
10296 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
10297 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
10298 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
10299
10300 /* PCIe PLL Configuration */
10301 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
10302 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
10303 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
10304 }
10305
10306 static void
10307 wm_reset_mdicnfg_82580(struct wm_softc *sc)
10308 {
10309 uint32_t reg;
10310 uint16_t nvmword;
10311 int rv;
10312
10313 if ((sc->sc_flags & WM_F_SGMII) == 0)
10314 return;
10315
10316 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
10317 + NVM_OFF_CFG3_PORTA, 1, &nvmword);
10318 if (rv != 0) {
10319 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
10320 __func__);
10321 return;
10322 }
10323
10324 reg = CSR_READ(sc, WMREG_MDICNFG);
10325 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
10326 reg |= MDICNFG_DEST;
10327 if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
10328 reg |= MDICNFG_COM_MDIO;
10329 CSR_WRITE(sc, WMREG_MDICNFG, reg);
10330 }
10331
10332 /*
10333 * I210 Errata 25 and I211 Errata 10
10334 * Slow System Clock.
10335 */
10336 static void
10337 wm_pll_workaround_i210(struct wm_softc *sc)
10338 {
10339 uint32_t mdicnfg, wuc;
10340 uint32_t reg;
10341 pcireg_t pcireg;
10342 uint32_t pmreg;
10343 uint16_t nvmword, tmp_nvmword;
10344 int phyval;
10345 bool wa_done = false;
10346 int i;
10347
10348 /* Save WUC and MDICNFG registers */
10349 wuc = CSR_READ(sc, WMREG_WUC);
10350 mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
10351
10352 reg = mdicnfg & ~MDICNFG_DEST;
10353 CSR_WRITE(sc, WMREG_MDICNFG, reg);
10354
10355 if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
10356 nvmword = INVM_DEFAULT_AL;
10357 tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
10358
10359 /* Get Power Management cap offset */
10360 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
10361 &pmreg, NULL) == 0)
10362 return;
10363 for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
10364 phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
10365 GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
10366
10367 if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
10368 break; /* OK */
10369 }
10370
10371 wa_done = true;
10372 /* Directly reset the internal PHY */
10373 reg = CSR_READ(sc, WMREG_CTRL);
10374 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
10375
10376 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10377 reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
10378 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10379
10380 CSR_WRITE(sc, WMREG_WUC, 0);
10381 reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
10382 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
10383
10384 pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
10385 pmreg + PCI_PMCSR);
10386 pcireg |= PCI_PMCSR_STATE_D3;
10387 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
10388 pmreg + PCI_PMCSR, pcireg);
10389 delay(1000);
10390 pcireg &= ~PCI_PMCSR_STATE_D3;
10391 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
10392 pmreg + PCI_PMCSR, pcireg);
10393
10394 reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
10395 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
10396
10397 /* Restore WUC register */
10398 CSR_WRITE(sc, WMREG_WUC, wuc);
10399 }
10400
10401 /* Restore MDICNFG setting */
10402 CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
10403 if (wa_done)
10404 aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
10405 }
10406