if_wm.c revision 1.327 1 /* $NetBSD: if_wm.c,v 1.327 2015/06/06 03:37:01 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - EEE (Energy Efficiency Ethernet)
77 * - MSI/MSI-X
78 * - Virtual Function
79 * - Set LED correctly (based on contents in EEPROM)
80 * - Rework how parameters are loaded from the EEPROM.
81 */
82
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.327 2015/06/06 03:37:01 msaitoh Exp $");
85
86 #ifdef _KERNEL_OPT
87 #include "opt_net_mpsafe.h"
88 #endif
89
90 #include <sys/param.h>
91 #include <sys/systm.h>
92 #include <sys/callout.h>
93 #include <sys/mbuf.h>
94 #include <sys/malloc.h>
95 #include <sys/kernel.h>
96 #include <sys/socket.h>
97 #include <sys/ioctl.h>
98 #include <sys/errno.h>
99 #include <sys/device.h>
100 #include <sys/queue.h>
101 #include <sys/syslog.h>
102
103 #include <sys/rndsource.h>
104
105 #include <net/if.h>
106 #include <net/if_dl.h>
107 #include <net/if_media.h>
108 #include <net/if_ether.h>
109
110 #include <net/bpf.h>
111
112 #include <netinet/in.h> /* XXX for struct ip */
113 #include <netinet/in_systm.h> /* XXX for struct ip */
114 #include <netinet/ip.h> /* XXX for struct ip */
115 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
116 #include <netinet/tcp.h> /* XXX for struct tcphdr */
117
118 #include <sys/bus.h>
119 #include <sys/intr.h>
120 #include <machine/endian.h>
121
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
124 #include <dev/mii/miidevs.h>
125 #include <dev/mii/mii_bitbang.h>
126 #include <dev/mii/ikphyreg.h>
127 #include <dev/mii/igphyreg.h>
128 #include <dev/mii/igphyvar.h>
129 #include <dev/mii/inbmphyreg.h>
130
131 #include <dev/pci/pcireg.h>
132 #include <dev/pci/pcivar.h>
133 #include <dev/pci/pcidevs.h>
134
135 #include <dev/pci/if_wmreg.h>
136 #include <dev/pci/if_wmvar.h>
137
138 #ifdef WM_DEBUG
139 #define WM_DEBUG_LINK 0x01
140 #define WM_DEBUG_TX 0x02
141 #define WM_DEBUG_RX 0x04
142 #define WM_DEBUG_GMII 0x08
143 #define WM_DEBUG_MANAGE 0x10
144 #define WM_DEBUG_NVM 0x20
145 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
146 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
147
148 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
149 #else
150 #define DPRINTF(x, y) /* nothing */
151 #endif /* WM_DEBUG */
152
153 #ifdef NET_MPSAFE
154 #define WM_MPSAFE 1
155 #endif
156
157 /*
158 * Transmit descriptor list size. Due to errata, we can only have
159 * 256 hardware descriptors in the ring on < 82544, but we use 4096
160 * on >= 82544. We tell the upper layers that they can queue a lot
161 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
162 * of them at a time.
163 *
164 * We allow up to 256 (!) DMA segments per packet. Pathological packet
165 * chains containing many small mbufs have been observed in zero-copy
166 * situations with jumbo frames.
167 */
168 #define WM_NTXSEGS 256
169 #define WM_IFQUEUELEN 256
170 #define WM_TXQUEUELEN_MAX 64
171 #define WM_TXQUEUELEN_MAX_82547 16
172 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
173 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
174 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
175 #define WM_NTXDESC_82542 256
176 #define WM_NTXDESC_82544 4096
177 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
178 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
179 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
180 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
181 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
182
183 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
184
185 /*
186 * Receive descriptor list size. We have one Rx buffer for normal
187 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
188 * packet. We allocate 256 receive descriptors, each with a 2k
189 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
190 */
191 #define WM_NRXDESC 256
192 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
193 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
194 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
195
196 /*
197 * Control structures are DMA'd to the i82542 chip. We allocate them in
198 * a single clump that maps to a single DMA segment to make several things
199 * easier.
200 */
201 struct wm_control_data_82544 {
202 /*
203 * The receive descriptors.
204 */
205 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
206
207 /*
208 * The transmit descriptors. Put these at the end, because
209 * we might use a smaller number of them.
210 */
211 union {
212 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
213 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
214 } wdc_u;
215 };
216
217 struct wm_control_data_82542 {
218 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
219 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
220 };
221
222 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
223 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
224 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
225
226 /*
227 * Software state for transmit jobs.
228 */
229 struct wm_txsoft {
230 struct mbuf *txs_mbuf; /* head of our mbuf chain */
231 bus_dmamap_t txs_dmamap; /* our DMA map */
232 int txs_firstdesc; /* first descriptor in packet */
233 int txs_lastdesc; /* last descriptor in packet */
234 int txs_ndesc; /* # of descriptors used */
235 };
236
237 /*
238 * Software state for receive buffers. Each descriptor gets a
239 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
240 * more than one buffer, we chain them together.
241 */
242 struct wm_rxsoft {
243 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
244 bus_dmamap_t rxs_dmamap; /* our DMA map */
245 };
246
247 #define WM_LINKUP_TIMEOUT 50
248
249 static uint16_t swfwphysem[] = {
250 SWFW_PHY0_SM,
251 SWFW_PHY1_SM,
252 SWFW_PHY2_SM,
253 SWFW_PHY3_SM
254 };
255
256 static const uint32_t wm_82580_rxpbs_table[] = {
257 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
258 };
259
260 /*
261 * Software state per device.
262 */
263 struct wm_softc {
264 device_t sc_dev; /* generic device information */
265 bus_space_tag_t sc_st; /* bus space tag */
266 bus_space_handle_t sc_sh; /* bus space handle */
267 bus_size_t sc_ss; /* bus space size */
268 bus_space_tag_t sc_iot; /* I/O space tag */
269 bus_space_handle_t sc_ioh; /* I/O space handle */
270 bus_size_t sc_ios; /* I/O space size */
271 bus_space_tag_t sc_flasht; /* flash registers space tag */
272 bus_space_handle_t sc_flashh; /* flash registers space handle */
273 bus_dma_tag_t sc_dmat; /* bus DMA tag */
274
275 struct ethercom sc_ethercom; /* ethernet common data */
276 struct mii_data sc_mii; /* MII/media information */
277
278 pci_chipset_tag_t sc_pc;
279 pcitag_t sc_pcitag;
280 int sc_bus_speed; /* PCI/PCIX bus speed */
281 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
282
283 uint16_t sc_pcidevid; /* PCI device ID */
284 wm_chip_type sc_type; /* MAC type */
285 int sc_rev; /* MAC revision */
286 wm_phy_type sc_phytype; /* PHY type */
287 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
288 #define WM_MEDIATYPE_UNKNOWN 0x00
289 #define WM_MEDIATYPE_FIBER 0x01
290 #define WM_MEDIATYPE_COPPER 0x02
291 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
292 int sc_funcid; /* unit number of the chip (0 to 3) */
293 int sc_flags; /* flags; see below */
294 int sc_if_flags; /* last if_flags */
295 int sc_flowflags; /* 802.3x flow control flags */
296 int sc_align_tweak;
297
298 void *sc_ih; /* interrupt cookie */
299 callout_t sc_tick_ch; /* tick callout */
300 bool sc_stopping;
301
302 int sc_nvm_addrbits; /* NVM address bits */
303 unsigned int sc_nvm_wordsize; /* NVM word size */
304 int sc_ich8_flash_base;
305 int sc_ich8_flash_bank_size;
306 int sc_nvm_k1_enabled;
307
308 /* Software state for the transmit and receive descriptors. */
309 int sc_txnum; /* must be a power of two */
310 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
311 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
312
313 /* Control data structures. */
314 int sc_ntxdesc; /* must be a power of two */
315 struct wm_control_data_82544 *sc_control_data;
316 bus_dmamap_t sc_cddmamap; /* control data DMA map */
317 bus_dma_segment_t sc_cd_seg; /* control data segment */
318 int sc_cd_rseg; /* real number of control segment */
319 size_t sc_cd_size; /* control data size */
320 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
321 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
322 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
323 #define sc_rxdescs sc_control_data->wcd_rxdescs
324
325 #ifdef WM_EVENT_COUNTERS
326 /* Event counters. */
327 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
328 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
329 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
330 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
331 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
332 struct evcnt sc_ev_rxintr; /* Rx interrupts */
333 struct evcnt sc_ev_linkintr; /* Link interrupts */
334
335 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
336 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
337 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
338 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
339 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
340 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
341 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
342 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
343
344 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
345 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
346
347 struct evcnt sc_ev_tu; /* Tx underrun */
348
349 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
350 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
351 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
352 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
353 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
354 #endif /* WM_EVENT_COUNTERS */
355
356 bus_addr_t sc_tdt_reg; /* offset of TDT register */
357
358 int sc_txfree; /* number of free Tx descriptors */
359 int sc_txnext; /* next ready Tx descriptor */
360
361 int sc_txsfree; /* number of free Tx jobs */
362 int sc_txsnext; /* next free Tx job */
363 int sc_txsdirty; /* dirty Tx jobs */
364
365 /* These 5 variables are used only on the 82547. */
366 int sc_txfifo_size; /* Tx FIFO size */
367 int sc_txfifo_head; /* current head of FIFO */
368 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
369 int sc_txfifo_stall; /* Tx FIFO is stalled */
370 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
371
372 bus_addr_t sc_rdt_reg; /* offset of RDT register */
373
374 int sc_rxptr; /* next ready Rx descriptor/queue ent */
375 int sc_rxdiscard;
376 int sc_rxlen;
377 struct mbuf *sc_rxhead;
378 struct mbuf *sc_rxtail;
379 struct mbuf **sc_rxtailp;
380
381 uint32_t sc_ctrl; /* prototype CTRL register */
382 #if 0
383 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
384 #endif
385 uint32_t sc_icr; /* prototype interrupt bits */
386 uint32_t sc_itr; /* prototype intr throttling reg */
387 uint32_t sc_tctl; /* prototype TCTL register */
388 uint32_t sc_rctl; /* prototype RCTL register */
389 uint32_t sc_txcw; /* prototype TXCW register */
390 uint32_t sc_tipg; /* prototype TIPG register */
391 uint32_t sc_fcrtl; /* prototype FCRTL register */
392 uint32_t sc_pba; /* prototype PBA register */
393
394 int sc_tbi_linkup; /* TBI link status */
395 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
396 int sc_tbi_serdes_ticks; /* tbi ticks */
397
398 int sc_mchash_type; /* multicast filter offset */
399
400 krndsource_t rnd_source; /* random source */
401
402 kmutex_t *sc_tx_lock; /* lock for tx operations */
403 kmutex_t *sc_rx_lock; /* lock for rx operations */
404 };
405
406 #define WM_TX_LOCK(_sc) if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
407 #define WM_TX_UNLOCK(_sc) if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
408 #define WM_TX_LOCKED(_sc) (!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
409 #define WM_RX_LOCK(_sc) if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
410 #define WM_RX_UNLOCK(_sc) if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
411 #define WM_RX_LOCKED(_sc) (!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
412 #define WM_BOTH_LOCK(_sc) do {WM_TX_LOCK(_sc); WM_RX_LOCK(_sc);} while (0)
413 #define WM_BOTH_UNLOCK(_sc) do {WM_RX_UNLOCK(_sc); WM_TX_UNLOCK(_sc);} while (0)
414 #define WM_BOTH_LOCKED(_sc) (WM_TX_LOCKED(_sc) && WM_RX_LOCKED(_sc))
415
416 #ifdef WM_MPSAFE
417 #define CALLOUT_FLAGS CALLOUT_MPSAFE
418 #else
419 #define CALLOUT_FLAGS 0
420 #endif
421
422 #define WM_RXCHAIN_RESET(sc) \
423 do { \
424 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
425 *(sc)->sc_rxtailp = NULL; \
426 (sc)->sc_rxlen = 0; \
427 } while (/*CONSTCOND*/0)
428
429 #define WM_RXCHAIN_LINK(sc, m) \
430 do { \
431 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
432 (sc)->sc_rxtailp = &(m)->m_next; \
433 } while (/*CONSTCOND*/0)
434
435 #ifdef WM_EVENT_COUNTERS
436 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
437 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
438 #else
439 #define WM_EVCNT_INCR(ev) /* nothing */
440 #define WM_EVCNT_ADD(ev, val) /* nothing */
441 #endif
442
443 #define CSR_READ(sc, reg) \
444 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
445 #define CSR_WRITE(sc, reg, val) \
446 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
447 #define CSR_WRITE_FLUSH(sc) \
448 (void) CSR_READ((sc), WMREG_STATUS)
449
450 #define ICH8_FLASH_READ32(sc, reg) \
451 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
452 #define ICH8_FLASH_WRITE32(sc, reg, data) \
453 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
454
455 #define ICH8_FLASH_READ16(sc, reg) \
456 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
457 #define ICH8_FLASH_WRITE16(sc, reg, data) \
458 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
459
460 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
461 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
462
463 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
464 #define WM_CDTXADDR_HI(sc, x) \
465 (sizeof(bus_addr_t) == 8 ? \
466 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
467
468 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
469 #define WM_CDRXADDR_HI(sc, x) \
470 (sizeof(bus_addr_t) == 8 ? \
471 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
472
473 #define WM_CDTXSYNC(sc, x, n, ops) \
474 do { \
475 int __x, __n; \
476 \
477 __x = (x); \
478 __n = (n); \
479 \
480 /* If it will wrap around, sync to the end of the ring. */ \
481 if ((__x + __n) > WM_NTXDESC(sc)) { \
482 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
483 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
484 (WM_NTXDESC(sc) - __x), (ops)); \
485 __n -= (WM_NTXDESC(sc) - __x); \
486 __x = 0; \
487 } \
488 \
489 /* Now sync whatever is left. */ \
490 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
491 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
492 } while (/*CONSTCOND*/0)
493
494 #define WM_CDRXSYNC(sc, x, ops) \
495 do { \
496 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
497 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
498 } while (/*CONSTCOND*/0)
499
500 #define WM_INIT_RXDESC(sc, x) \
501 do { \
502 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
503 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
504 struct mbuf *__m = __rxs->rxs_mbuf; \
505 \
506 /* \
507 * Note: We scoot the packet forward 2 bytes in the buffer \
508 * so that the payload after the Ethernet header is aligned \
509 * to a 4-byte boundary. \
510 * \
511 * XXX BRAINDAMAGE ALERT! \
512 * The stupid chip uses the same size for every buffer, which \
513 * is set in the Receive Control register. We are using the 2K \
514 * size option, but what we REALLY want is (2K - 2)! For this \
515 * reason, we can't "scoot" packets longer than the standard \
516 * Ethernet MTU. On strict-alignment platforms, if the total \
517 * size exceeds (2K - 2) we set align_tweak to 0 and let \
518 * the upper layer copy the headers. \
519 */ \
520 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
521 \
522 wm_set_dma_addr(&__rxd->wrx_addr, \
523 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
524 __rxd->wrx_len = 0; \
525 __rxd->wrx_cksum = 0; \
526 __rxd->wrx_status = 0; \
527 __rxd->wrx_errors = 0; \
528 __rxd->wrx_special = 0; \
529 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
530 \
531 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
532 } while (/*CONSTCOND*/0)
533
534 /*
535 * Register read/write functions.
536 * Other than CSR_{READ|WRITE}().
537 */
538 #if 0
539 static inline uint32_t wm_io_read(struct wm_softc *, int);
540 #endif
541 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
542 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
543 uint32_t, uint32_t);
544 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
545
546 /*
547 * Device driver interface functions and commonly used functions.
548 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
549 */
550 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
551 static int wm_match(device_t, cfdata_t, void *);
552 static void wm_attach(device_t, device_t, void *);
553 static int wm_detach(device_t, int);
554 static bool wm_suspend(device_t, const pmf_qual_t *);
555 static bool wm_resume(device_t, const pmf_qual_t *);
556 static void wm_watchdog(struct ifnet *);
557 static void wm_tick(void *);
558 static int wm_ifflags_cb(struct ethercom *);
559 static int wm_ioctl(struct ifnet *, u_long, void *);
560 /* MAC address related */
561 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
562 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
563 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
564 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
565 static void wm_set_filter(struct wm_softc *);
566 /* Reset and init related */
567 static void wm_set_vlan(struct wm_softc *);
568 static void wm_set_pcie_completion_timeout(struct wm_softc *);
569 static void wm_get_auto_rd_done(struct wm_softc *);
570 static void wm_lan_init_done(struct wm_softc *);
571 static void wm_get_cfg_done(struct wm_softc *);
572 static void wm_initialize_hardware_bits(struct wm_softc *);
573 static uint32_t wm_rxpbs_adjust_82580(uint32_t);
574 static void wm_reset(struct wm_softc *);
575 static int wm_add_rxbuf(struct wm_softc *, int);
576 static void wm_rxdrain(struct wm_softc *);
577 static int wm_init(struct ifnet *);
578 static int wm_init_locked(struct ifnet *);
579 static void wm_stop(struct ifnet *, int);
580 static void wm_stop_locked(struct ifnet *, int);
581 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
582 uint32_t *, uint8_t *);
583 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
584 static void wm_82547_txfifo_stall(void *);
585 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
586 /* Start */
587 static void wm_start(struct ifnet *);
588 static void wm_start_locked(struct ifnet *);
589 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
590 uint32_t *, uint32_t *, bool *);
591 static void wm_nq_start(struct ifnet *);
592 static void wm_nq_start_locked(struct ifnet *);
593 /* Interrupt */
594 static void wm_txintr(struct wm_softc *);
595 static void wm_rxintr(struct wm_softc *);
596 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
597 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
598 static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
599 static void wm_linkintr(struct wm_softc *, uint32_t);
600 static int wm_intr(void *);
601
602 /*
603 * Media related.
604 * GMII, SGMII, TBI, SERDES and SFP.
605 */
606 /* Common */
607 static void wm_tbi_serdes_set_linkled(struct wm_softc *);
608 /* GMII related */
609 static void wm_gmii_reset(struct wm_softc *);
610 static int wm_get_phy_id_82575(struct wm_softc *);
611 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
612 static int wm_gmii_mediachange(struct ifnet *);
613 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
614 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
615 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
616 static int wm_gmii_i82543_readreg(device_t, int, int);
617 static void wm_gmii_i82543_writereg(device_t, int, int, int);
618 static int wm_gmii_i82544_readreg(device_t, int, int);
619 static void wm_gmii_i82544_writereg(device_t, int, int, int);
620 static int wm_gmii_i80003_readreg(device_t, int, int);
621 static void wm_gmii_i80003_writereg(device_t, int, int, int);
622 static int wm_gmii_bm_readreg(device_t, int, int);
623 static void wm_gmii_bm_writereg(device_t, int, int, int);
624 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
625 static int wm_gmii_hv_readreg(device_t, int, int);
626 static void wm_gmii_hv_writereg(device_t, int, int, int);
627 static int wm_gmii_82580_readreg(device_t, int, int);
628 static void wm_gmii_82580_writereg(device_t, int, int, int);
629 static void wm_gmii_statchg(struct ifnet *);
630 static int wm_kmrn_readreg(struct wm_softc *, int);
631 static void wm_kmrn_writereg(struct wm_softc *, int, int);
632 /* SGMII */
633 static bool wm_sgmii_uses_mdio(struct wm_softc *);
634 static int wm_sgmii_readreg(device_t, int, int);
635 static void wm_sgmii_writereg(device_t, int, int, int);
636 /* TBI related */
637 static void wm_tbi_mediainit(struct wm_softc *);
638 static int wm_tbi_mediachange(struct ifnet *);
639 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
640 static int wm_check_for_link(struct wm_softc *);
641 static void wm_tbi_tick(struct wm_softc *);
642 /* SERDES related */
643 static void wm_serdes_power_up_link_82575(struct wm_softc *);
644 static int wm_serdes_mediachange(struct ifnet *);
645 static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
646 static void wm_serdes_tick(struct wm_softc *);
647 /* SFP related */
648 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
649 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
650
651 /*
652 * NVM related.
653 * Microwire, SPI (w/wo EERD) and Flash.
654 */
655 /* Misc functions */
656 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
657 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
658 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
659 /* Microwire */
660 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
661 /* SPI */
662 static int wm_nvm_ready_spi(struct wm_softc *);
663 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
664 /* Using with EERD */
665 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
666 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
667 /* Flash */
668 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
669 unsigned int *);
670 static int32_t wm_ich8_cycle_init(struct wm_softc *);
671 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
672 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
673 uint16_t *);
674 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
675 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
676 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
677 /* iNVM */
678 static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
679 static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
680 /* Lock, detecting NVM type, validate checksum and read */
681 static int wm_nvm_acquire(struct wm_softc *);
682 static void wm_nvm_release(struct wm_softc *);
683 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
684 static int wm_nvm_get_flash_presence_i210(struct wm_softc *);
685 static int wm_nvm_validate_checksum(struct wm_softc *);
686 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
687
688 /*
689 * Hardware semaphores.
690 * Very complexed...
691 */
692 static int wm_get_swsm_semaphore(struct wm_softc *);
693 static void wm_put_swsm_semaphore(struct wm_softc *);
694 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
695 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
696 static int wm_get_swfwhw_semaphore(struct wm_softc *);
697 static void wm_put_swfwhw_semaphore(struct wm_softc *);
698 static int wm_get_hw_semaphore_82573(struct wm_softc *);
699 static void wm_put_hw_semaphore_82573(struct wm_softc *);
700
701 /*
702 * Management mode and power management related subroutines.
703 * BMC, AMT, suspend/resume and EEE.
704 */
705 static int wm_check_mng_mode(struct wm_softc *);
706 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
707 static int wm_check_mng_mode_82574(struct wm_softc *);
708 static int wm_check_mng_mode_generic(struct wm_softc *);
709 static int wm_enable_mng_pass_thru(struct wm_softc *);
710 static int wm_check_reset_block(struct wm_softc *);
711 static void wm_get_hw_control(struct wm_softc *);
712 static void wm_release_hw_control(struct wm_softc *);
713 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
714 static void wm_smbustopci(struct wm_softc *);
715 static void wm_init_manageability(struct wm_softc *);
716 static void wm_release_manageability(struct wm_softc *);
717 static void wm_get_wakeup(struct wm_softc *);
718 #ifdef WM_WOL
719 static void wm_enable_phy_wakeup(struct wm_softc *);
720 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
721 static void wm_enable_wakeup(struct wm_softc *);
722 #endif
723 /* EEE */
724 static void wm_set_eee_i350(struct wm_softc *);
725
726 /*
727 * Workarounds (mainly PHY related).
728 * Basically, PHY's workarounds are in the PHY drivers.
729 */
730 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
731 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
732 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
733 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
734 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
735 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
736 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
737 static void wm_reset_init_script_82575(struct wm_softc *);
738 static void wm_reset_mdicnfg_82580(struct wm_softc *);
739
740 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
741 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
742
743 /*
744 * Devices supported by this driver.
745 */
746 static const struct wm_product {
747 pci_vendor_id_t wmp_vendor;
748 pci_product_id_t wmp_product;
749 const char *wmp_name;
750 wm_chip_type wmp_type;
751 uint32_t wmp_flags;
752 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
753 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
754 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
755 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
756 #define WMP_MEDIATYPE(x) ((x) & 0x03)
757 } wm_products[] = {
758 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
759 "Intel i82542 1000BASE-X Ethernet",
760 WM_T_82542_2_1, WMP_F_FIBER },
761
762 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
763 "Intel i82543GC 1000BASE-X Ethernet",
764 WM_T_82543, WMP_F_FIBER },
765
766 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
767 "Intel i82543GC 1000BASE-T Ethernet",
768 WM_T_82543, WMP_F_COPPER },
769
770 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
771 "Intel i82544EI 1000BASE-T Ethernet",
772 WM_T_82544, WMP_F_COPPER },
773
774 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
775 "Intel i82544EI 1000BASE-X Ethernet",
776 WM_T_82544, WMP_F_FIBER },
777
778 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
779 "Intel i82544GC 1000BASE-T Ethernet",
780 WM_T_82544, WMP_F_COPPER },
781
782 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
783 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
784 WM_T_82544, WMP_F_COPPER },
785
786 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
787 "Intel i82540EM 1000BASE-T Ethernet",
788 WM_T_82540, WMP_F_COPPER },
789
790 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
791 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
792 WM_T_82540, WMP_F_COPPER },
793
794 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
795 "Intel i82540EP 1000BASE-T Ethernet",
796 WM_T_82540, WMP_F_COPPER },
797
798 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
799 "Intel i82540EP 1000BASE-T Ethernet",
800 WM_T_82540, WMP_F_COPPER },
801
802 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
803 "Intel i82540EP 1000BASE-T Ethernet",
804 WM_T_82540, WMP_F_COPPER },
805
806 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
807 "Intel i82545EM 1000BASE-T Ethernet",
808 WM_T_82545, WMP_F_COPPER },
809
810 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
811 "Intel i82545GM 1000BASE-T Ethernet",
812 WM_T_82545_3, WMP_F_COPPER },
813
814 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
815 "Intel i82545GM 1000BASE-X Ethernet",
816 WM_T_82545_3, WMP_F_FIBER },
817
818 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
819 "Intel i82545GM Gigabit Ethernet (SERDES)",
820 WM_T_82545_3, WMP_F_SERDES },
821
822 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
823 "Intel i82546EB 1000BASE-T Ethernet",
824 WM_T_82546, WMP_F_COPPER },
825
826 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
827 "Intel i82546EB 1000BASE-T Ethernet",
828 WM_T_82546, WMP_F_COPPER },
829
830 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
831 "Intel i82545EM 1000BASE-X Ethernet",
832 WM_T_82545, WMP_F_FIBER },
833
834 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
835 "Intel i82546EB 1000BASE-X Ethernet",
836 WM_T_82546, WMP_F_FIBER },
837
838 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
839 "Intel i82546GB 1000BASE-T Ethernet",
840 WM_T_82546_3, WMP_F_COPPER },
841
842 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
843 "Intel i82546GB 1000BASE-X Ethernet",
844 WM_T_82546_3, WMP_F_FIBER },
845
846 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
847 "Intel i82546GB Gigabit Ethernet (SERDES)",
848 WM_T_82546_3, WMP_F_SERDES },
849
850 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
851 "i82546GB quad-port Gigabit Ethernet",
852 WM_T_82546_3, WMP_F_COPPER },
853
854 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
855 "i82546GB quad-port Gigabit Ethernet (KSP3)",
856 WM_T_82546_3, WMP_F_COPPER },
857
858 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
859 "Intel PRO/1000MT (82546GB)",
860 WM_T_82546_3, WMP_F_COPPER },
861
862 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
863 "Intel i82541EI 1000BASE-T Ethernet",
864 WM_T_82541, WMP_F_COPPER },
865
866 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
867 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
868 WM_T_82541, WMP_F_COPPER },
869
870 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
871 "Intel i82541EI Mobile 1000BASE-T Ethernet",
872 WM_T_82541, WMP_F_COPPER },
873
874 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
875 "Intel i82541ER 1000BASE-T Ethernet",
876 WM_T_82541_2, WMP_F_COPPER },
877
878 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
879 "Intel i82541GI 1000BASE-T Ethernet",
880 WM_T_82541_2, WMP_F_COPPER },
881
882 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
883 "Intel i82541GI Mobile 1000BASE-T Ethernet",
884 WM_T_82541_2, WMP_F_COPPER },
885
886 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
887 "Intel i82541PI 1000BASE-T Ethernet",
888 WM_T_82541_2, WMP_F_COPPER },
889
890 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
891 "Intel i82547EI 1000BASE-T Ethernet",
892 WM_T_82547, WMP_F_COPPER },
893
894 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
895 "Intel i82547EI Mobile 1000BASE-T Ethernet",
896 WM_T_82547, WMP_F_COPPER },
897
898 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
899 "Intel i82547GI 1000BASE-T Ethernet",
900 WM_T_82547_2, WMP_F_COPPER },
901
902 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
903 "Intel PRO/1000 PT (82571EB)",
904 WM_T_82571, WMP_F_COPPER },
905
906 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
907 "Intel PRO/1000 PF (82571EB)",
908 WM_T_82571, WMP_F_FIBER },
909
910 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
911 "Intel PRO/1000 PB (82571EB)",
912 WM_T_82571, WMP_F_SERDES },
913
914 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
915 "Intel PRO/1000 QT (82571EB)",
916 WM_T_82571, WMP_F_COPPER },
917
918 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
919 "Intel PRO/1000 PT Quad Port Server Adapter",
920 WM_T_82571, WMP_F_COPPER, },
921
922 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
923 "Intel Gigabit PT Quad Port Server ExpressModule",
924 WM_T_82571, WMP_F_COPPER, },
925
926 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
927 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
928 WM_T_82571, WMP_F_SERDES, },
929
930 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
931 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
932 WM_T_82571, WMP_F_SERDES, },
933
934 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
935 "Intel 82571EB Quad 1000baseX Ethernet",
936 WM_T_82571, WMP_F_FIBER, },
937
938 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
939 "Intel i82572EI 1000baseT Ethernet",
940 WM_T_82572, WMP_F_COPPER },
941
942 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
943 "Intel i82572EI 1000baseX Ethernet",
944 WM_T_82572, WMP_F_FIBER },
945
946 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
947 "Intel i82572EI Gigabit Ethernet (SERDES)",
948 WM_T_82572, WMP_F_SERDES },
949
950 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
951 "Intel i82572EI 1000baseT Ethernet",
952 WM_T_82572, WMP_F_COPPER },
953
954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
955 "Intel i82573E",
956 WM_T_82573, WMP_F_COPPER },
957
958 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
959 "Intel i82573E IAMT",
960 WM_T_82573, WMP_F_COPPER },
961
962 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
963 "Intel i82573L Gigabit Ethernet",
964 WM_T_82573, WMP_F_COPPER },
965
966 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
967 "Intel i82574L",
968 WM_T_82574, WMP_F_COPPER },
969
970 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
971 "Intel i82574L",
972 WM_T_82574, WMP_F_COPPER },
973
974 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
975 "Intel i82583V",
976 WM_T_82583, WMP_F_COPPER },
977
978 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
979 "i80003 dual 1000baseT Ethernet",
980 WM_T_80003, WMP_F_COPPER },
981
982 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
983 "i80003 dual 1000baseX Ethernet",
984 WM_T_80003, WMP_F_COPPER },
985
986 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
987 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
988 WM_T_80003, WMP_F_SERDES },
989
990 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
991 "Intel i80003 1000baseT Ethernet",
992 WM_T_80003, WMP_F_COPPER },
993
994 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
995 "Intel i80003 Gigabit Ethernet (SERDES)",
996 WM_T_80003, WMP_F_SERDES },
997
998 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
999 "Intel i82801H (M_AMT) LAN Controller",
1000 WM_T_ICH8, WMP_F_COPPER },
1001 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
1002 "Intel i82801H (AMT) LAN Controller",
1003 WM_T_ICH8, WMP_F_COPPER },
1004 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
1005 "Intel i82801H LAN Controller",
1006 WM_T_ICH8, WMP_F_COPPER },
1007 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1008 "Intel i82801H (IFE) LAN Controller",
1009 WM_T_ICH8, WMP_F_COPPER },
1010 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
1011 "Intel i82801H (M) LAN Controller",
1012 WM_T_ICH8, WMP_F_COPPER },
1013 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1014 "Intel i82801H IFE (GT) LAN Controller",
1015 WM_T_ICH8, WMP_F_COPPER },
1016 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1017 "Intel i82801H IFE (G) LAN Controller",
1018 WM_T_ICH8, WMP_F_COPPER },
1019 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1020 "82801I (AMT) LAN Controller",
1021 WM_T_ICH9, WMP_F_COPPER },
1022 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1023 "82801I LAN Controller",
1024 WM_T_ICH9, WMP_F_COPPER },
1025 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1026 "82801I (G) LAN Controller",
1027 WM_T_ICH9, WMP_F_COPPER },
1028 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1029 "82801I (GT) LAN Controller",
1030 WM_T_ICH9, WMP_F_COPPER },
1031 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1032 "82801I (C) LAN Controller",
1033 WM_T_ICH9, WMP_F_COPPER },
1034 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1035 "82801I mobile LAN Controller",
1036 WM_T_ICH9, WMP_F_COPPER },
1037 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1038 "82801I mobile (V) LAN Controller",
1039 WM_T_ICH9, WMP_F_COPPER },
1040 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1041 "82801I mobile (AMT) LAN Controller",
1042 WM_T_ICH9, WMP_F_COPPER },
1043 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1044 "82567LM-4 LAN Controller",
1045 WM_T_ICH9, WMP_F_COPPER },
1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
1047 "82567V-3 LAN Controller",
1048 WM_T_ICH9, WMP_F_COPPER },
1049 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1050 "82567LM-2 LAN Controller",
1051 WM_T_ICH10, WMP_F_COPPER },
1052 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1053 "82567LF-2 LAN Controller",
1054 WM_T_ICH10, WMP_F_COPPER },
1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1056 "82567LM-3 LAN Controller",
1057 WM_T_ICH10, WMP_F_COPPER },
1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1059 "82567LF-3 LAN Controller",
1060 WM_T_ICH10, WMP_F_COPPER },
1061 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1062 "82567V-2 LAN Controller",
1063 WM_T_ICH10, WMP_F_COPPER },
1064 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1065 "82567V-3? LAN Controller",
1066 WM_T_ICH10, WMP_F_COPPER },
1067 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1068 "HANKSVILLE LAN Controller",
1069 WM_T_ICH10, WMP_F_COPPER },
1070 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1071 "PCH LAN (82577LM) Controller",
1072 WM_T_PCH, WMP_F_COPPER },
1073 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1074 "PCH LAN (82577LC) Controller",
1075 WM_T_PCH, WMP_F_COPPER },
1076 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1077 "PCH LAN (82578DM) Controller",
1078 WM_T_PCH, WMP_F_COPPER },
1079 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1080 "PCH LAN (82578DC) Controller",
1081 WM_T_PCH, WMP_F_COPPER },
1082 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1083 "PCH2 LAN (82579LM) Controller",
1084 WM_T_PCH2, WMP_F_COPPER },
1085 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1086 "PCH2 LAN (82579V) Controller",
1087 WM_T_PCH2, WMP_F_COPPER },
1088 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1089 "82575EB dual-1000baseT Ethernet",
1090 WM_T_82575, WMP_F_COPPER },
1091 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1092 "82575EB dual-1000baseX Ethernet (SERDES)",
1093 WM_T_82575, WMP_F_SERDES },
1094 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1095 "82575GB quad-1000baseT Ethernet",
1096 WM_T_82575, WMP_F_COPPER },
1097 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1098 "82575GB quad-1000baseT Ethernet (PM)",
1099 WM_T_82575, WMP_F_COPPER },
1100 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1101 "82576 1000BaseT Ethernet",
1102 WM_T_82576, WMP_F_COPPER },
1103 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1104 "82576 1000BaseX Ethernet",
1105 WM_T_82576, WMP_F_FIBER },
1106
1107 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1108 "82576 gigabit Ethernet (SERDES)",
1109 WM_T_82576, WMP_F_SERDES },
1110
1111 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1112 "82576 quad-1000BaseT Ethernet",
1113 WM_T_82576, WMP_F_COPPER },
1114
1115 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1116 "82576 Gigabit ET2 Quad Port Server Adapter",
1117 WM_T_82576, WMP_F_COPPER },
1118
1119 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1120 "82576 gigabit Ethernet",
1121 WM_T_82576, WMP_F_COPPER },
1122
1123 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1124 "82576 gigabit Ethernet (SERDES)",
1125 WM_T_82576, WMP_F_SERDES },
1126 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1127 "82576 quad-gigabit Ethernet (SERDES)",
1128 WM_T_82576, WMP_F_SERDES },
1129
1130 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1131 "82580 1000BaseT Ethernet",
1132 WM_T_82580, WMP_F_COPPER },
1133 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1134 "82580 1000BaseX Ethernet",
1135 WM_T_82580, WMP_F_FIBER },
1136
1137 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1138 "82580 1000BaseT Ethernet (SERDES)",
1139 WM_T_82580, WMP_F_SERDES },
1140
1141 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1142 "82580 gigabit Ethernet (SGMII)",
1143 WM_T_82580, WMP_F_COPPER },
1144 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1145 "82580 dual-1000BaseT Ethernet",
1146 WM_T_82580, WMP_F_COPPER },
1147
1148 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1149 "82580 quad-1000BaseX Ethernet",
1150 WM_T_82580, WMP_F_FIBER },
1151
1152 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1153 "DH89XXCC Gigabit Ethernet (SGMII)",
1154 WM_T_82580, WMP_F_COPPER },
1155
1156 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1157 "DH89XXCC Gigabit Ethernet (SERDES)",
1158 WM_T_82580, WMP_F_SERDES },
1159
1160 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1161 "DH89XXCC 1000BASE-KX Ethernet",
1162 WM_T_82580, WMP_F_SERDES },
1163
1164 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1165 "DH89XXCC Gigabit Ethernet (SFP)",
1166 WM_T_82580, WMP_F_SERDES },
1167
1168 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1169 "I350 Gigabit Network Connection",
1170 WM_T_I350, WMP_F_COPPER },
1171
1172 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1173 "I350 Gigabit Fiber Network Connection",
1174 WM_T_I350, WMP_F_FIBER },
1175
1176 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1177 "I350 Gigabit Backplane Connection",
1178 WM_T_I350, WMP_F_SERDES },
1179
1180 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1181 "I350 Quad Port Gigabit Ethernet",
1182 WM_T_I350, WMP_F_SERDES },
1183
1184 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1185 "I350 Gigabit Connection",
1186 WM_T_I350, WMP_F_COPPER },
1187
1188 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1189 "I354 Gigabit Ethernet (KX)",
1190 WM_T_I354, WMP_F_SERDES },
1191
1192 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1193 "I354 Gigabit Ethernet (SGMII)",
1194 WM_T_I354, WMP_F_COPPER },
1195
1196 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1197 "I354 Gigabit Ethernet (2.5G)",
1198 WM_T_I354, WMP_F_COPPER },
1199
1200 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1201 "I210-T1 Ethernet Server Adapter",
1202 WM_T_I210, WMP_F_COPPER },
1203
1204 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1205 "I210 Ethernet (Copper OEM)",
1206 WM_T_I210, WMP_F_COPPER },
1207
1208 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1209 "I210 Ethernet (Copper IT)",
1210 WM_T_I210, WMP_F_COPPER },
1211
1212 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1213 "I210 Ethernet (FLASH less)",
1214 WM_T_I210, WMP_F_COPPER },
1215
1216 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1217 "I210 Gigabit Ethernet (Fiber)",
1218 WM_T_I210, WMP_F_FIBER },
1219
1220 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1221 "I210 Gigabit Ethernet (SERDES)",
1222 WM_T_I210, WMP_F_SERDES },
1223
1224 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1225 "I210 Gigabit Ethernet (FLASH less)",
1226 WM_T_I210, WMP_F_SERDES },
1227
1228 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1229 "I210 Gigabit Ethernet (SGMII)",
1230 WM_T_I210, WMP_F_COPPER },
1231
1232 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1233 "I211 Ethernet (COPPER)",
1234 WM_T_I211, WMP_F_COPPER },
1235 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1236 "I217 V Ethernet Connection",
1237 WM_T_PCH_LPT, WMP_F_COPPER },
1238 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1239 "I217 LM Ethernet Connection",
1240 WM_T_PCH_LPT, WMP_F_COPPER },
1241 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1242 "I218 V Ethernet Connection",
1243 WM_T_PCH_LPT, WMP_F_COPPER },
1244 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1245 "I218 V Ethernet Connection",
1246 WM_T_PCH_LPT, WMP_F_COPPER },
1247 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1248 "I218 V Ethernet Connection",
1249 WM_T_PCH_LPT, WMP_F_COPPER },
1250 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1251 "I218 LM Ethernet Connection",
1252 WM_T_PCH_LPT, WMP_F_COPPER },
1253 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1254 "I218 LM Ethernet Connection",
1255 WM_T_PCH_LPT, WMP_F_COPPER },
1256 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1257 "I218 LM Ethernet Connection",
1258 WM_T_PCH_LPT, WMP_F_COPPER },
1259 { 0, 0,
1260 NULL,
1261 0, 0 },
1262 };
1263
1264 #ifdef WM_EVENT_COUNTERS
1265 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1266 #endif /* WM_EVENT_COUNTERS */
1267
1268
1269 /*
1270 * Register read/write functions.
1271 * Other than CSR_{READ|WRITE}().
1272 */
1273
1274 #if 0 /* Not currently used */
1275 static inline uint32_t
1276 wm_io_read(struct wm_softc *sc, int reg)
1277 {
1278
1279 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1280 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1281 }
1282 #endif
1283
1284 static inline void
1285 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1286 {
1287
1288 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1289 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1290 }
1291
1292 static inline void
1293 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1294 uint32_t data)
1295 {
1296 uint32_t regval;
1297 int i;
1298
1299 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1300
1301 CSR_WRITE(sc, reg, regval);
1302
1303 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1304 delay(5);
1305 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1306 break;
1307 }
1308 if (i == SCTL_CTL_POLL_TIMEOUT) {
1309 aprint_error("%s: WARNING:"
1310 " i82575 reg 0x%08x setup did not indicate ready\n",
1311 device_xname(sc->sc_dev), reg);
1312 }
1313 }
1314
1315 static inline void
1316 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1317 {
1318 wa->wa_low = htole32(v & 0xffffffffU);
1319 if (sizeof(bus_addr_t) == 8)
1320 wa->wa_high = htole32((uint64_t) v >> 32);
1321 else
1322 wa->wa_high = 0;
1323 }
1324
1325 /*
1326 * Device driver interface functions and commonly used functions.
1327 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1328 */
1329
1330 /* Lookup supported device table */
1331 static const struct wm_product *
1332 wm_lookup(const struct pci_attach_args *pa)
1333 {
1334 const struct wm_product *wmp;
1335
1336 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1337 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1338 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1339 return wmp;
1340 }
1341 return NULL;
1342 }
1343
1344 /* The match function (ca_match) */
1345 static int
1346 wm_match(device_t parent, cfdata_t cf, void *aux)
1347 {
1348 struct pci_attach_args *pa = aux;
1349
1350 if (wm_lookup(pa) != NULL)
1351 return 1;
1352
1353 return 0;
1354 }
1355
1356 /* The attach function (ca_attach) */
1357 static void
1358 wm_attach(device_t parent, device_t self, void *aux)
1359 {
1360 struct wm_softc *sc = device_private(self);
1361 struct pci_attach_args *pa = aux;
1362 prop_dictionary_t dict;
1363 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1364 pci_chipset_tag_t pc = pa->pa_pc;
1365 pci_intr_handle_t ih;
1366 const char *intrstr = NULL;
1367 const char *eetype, *xname;
1368 bus_space_tag_t memt;
1369 bus_space_handle_t memh;
1370 bus_size_t memsize;
1371 int memh_valid;
1372 int i, error;
1373 const struct wm_product *wmp;
1374 prop_data_t ea;
1375 prop_number_t pn;
1376 uint8_t enaddr[ETHER_ADDR_LEN];
1377 uint16_t cfg1, cfg2, swdpin, nvmword;
1378 pcireg_t preg, memtype;
1379 uint16_t eeprom_data, apme_mask;
1380 bool force_clear_smbi;
1381 uint32_t link_mode;
1382 uint32_t reg;
1383 char intrbuf[PCI_INTRSTR_LEN];
1384
1385 sc->sc_dev = self;
1386 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1387 sc->sc_stopping = false;
1388
1389 wmp = wm_lookup(pa);
1390 #ifdef DIAGNOSTIC
1391 if (wmp == NULL) {
1392 printf("\n");
1393 panic("wm_attach: impossible");
1394 }
1395 #endif
1396 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1397
1398 sc->sc_pc = pa->pa_pc;
1399 sc->sc_pcitag = pa->pa_tag;
1400
1401 if (pci_dma64_available(pa))
1402 sc->sc_dmat = pa->pa_dmat64;
1403 else
1404 sc->sc_dmat = pa->pa_dmat;
1405
1406 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1407 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1408 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1409
1410 sc->sc_type = wmp->wmp_type;
1411 if (sc->sc_type < WM_T_82543) {
1412 if (sc->sc_rev < 2) {
1413 aprint_error_dev(sc->sc_dev,
1414 "i82542 must be at least rev. 2\n");
1415 return;
1416 }
1417 if (sc->sc_rev < 3)
1418 sc->sc_type = WM_T_82542_2_0;
1419 }
1420
1421 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1422 || (sc->sc_type == WM_T_82580)
1423 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1424 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1425 sc->sc_flags |= WM_F_NEWQUEUE;
1426
1427 /* Set device properties (mactype) */
1428 dict = device_properties(sc->sc_dev);
1429 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1430
1431 /*
1432 * Map the device. All devices support memory-mapped acccess,
1433 * and it is really required for normal operation.
1434 */
1435 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1436 switch (memtype) {
1437 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1438 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1439 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1440 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1441 break;
1442 default:
1443 memh_valid = 0;
1444 break;
1445 }
1446
1447 if (memh_valid) {
1448 sc->sc_st = memt;
1449 sc->sc_sh = memh;
1450 sc->sc_ss = memsize;
1451 } else {
1452 aprint_error_dev(sc->sc_dev,
1453 "unable to map device registers\n");
1454 return;
1455 }
1456
1457 /*
1458 * In addition, i82544 and later support I/O mapped indirect
1459 * register access. It is not desirable (nor supported in
1460 * this driver) to use it for normal operation, though it is
1461 * required to work around bugs in some chip versions.
1462 */
1463 if (sc->sc_type >= WM_T_82544) {
1464 /* First we have to find the I/O BAR. */
1465 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1466 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1467 if (memtype == PCI_MAPREG_TYPE_IO)
1468 break;
1469 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1470 PCI_MAPREG_MEM_TYPE_64BIT)
1471 i += 4; /* skip high bits, too */
1472 }
1473 if (i < PCI_MAPREG_END) {
1474 /*
1475 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1476 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1477 * It's no problem because newer chips has no this
1478 * bug.
1479 *
1480 * The i8254x doesn't apparently respond when the
1481 * I/O BAR is 0, which looks somewhat like it's not
1482 * been configured.
1483 */
1484 preg = pci_conf_read(pc, pa->pa_tag, i);
1485 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1486 aprint_error_dev(sc->sc_dev,
1487 "WARNING: I/O BAR at zero.\n");
1488 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1489 0, &sc->sc_iot, &sc->sc_ioh,
1490 NULL, &sc->sc_ios) == 0) {
1491 sc->sc_flags |= WM_F_IOH_VALID;
1492 } else {
1493 aprint_error_dev(sc->sc_dev,
1494 "WARNING: unable to map I/O space\n");
1495 }
1496 }
1497
1498 }
1499
1500 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1501 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1502 preg |= PCI_COMMAND_MASTER_ENABLE;
1503 if (sc->sc_type < WM_T_82542_2_1)
1504 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1505 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1506
1507 /* power up chip */
1508 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1509 NULL)) && error != EOPNOTSUPP) {
1510 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1511 return;
1512 }
1513
1514 /*
1515 * Map and establish our interrupt.
1516 */
1517 if (pci_intr_map(pa, &ih)) {
1518 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1519 return;
1520 }
1521 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1522 #ifdef WM_MPSAFE
1523 pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
1524 #endif
1525 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1526 if (sc->sc_ih == NULL) {
1527 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1528 if (intrstr != NULL)
1529 aprint_error(" at %s", intrstr);
1530 aprint_error("\n");
1531 return;
1532 }
1533 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1534
1535 /*
1536 * Check the function ID (unit number of the chip).
1537 */
1538 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1539 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1540 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1541 || (sc->sc_type == WM_T_82580)
1542 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1543 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1544 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1545 else
1546 sc->sc_funcid = 0;
1547
1548 /*
1549 * Determine a few things about the bus we're connected to.
1550 */
1551 if (sc->sc_type < WM_T_82543) {
1552 /* We don't really know the bus characteristics here. */
1553 sc->sc_bus_speed = 33;
1554 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1555 /*
1556 * CSA (Communication Streaming Architecture) is about as fast
1557 * a 32-bit 66MHz PCI Bus.
1558 */
1559 sc->sc_flags |= WM_F_CSA;
1560 sc->sc_bus_speed = 66;
1561 aprint_verbose_dev(sc->sc_dev,
1562 "Communication Streaming Architecture\n");
1563 if (sc->sc_type == WM_T_82547) {
1564 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1565 callout_setfunc(&sc->sc_txfifo_ch,
1566 wm_82547_txfifo_stall, sc);
1567 aprint_verbose_dev(sc->sc_dev,
1568 "using 82547 Tx FIFO stall work-around\n");
1569 }
1570 } else if (sc->sc_type >= WM_T_82571) {
1571 sc->sc_flags |= WM_F_PCIE;
1572 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1573 && (sc->sc_type != WM_T_ICH10)
1574 && (sc->sc_type != WM_T_PCH)
1575 && (sc->sc_type != WM_T_PCH2)
1576 && (sc->sc_type != WM_T_PCH_LPT)) {
1577 /* ICH* and PCH* have no PCIe capability registers */
1578 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1579 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1580 NULL) == 0)
1581 aprint_error_dev(sc->sc_dev,
1582 "unable to find PCIe capability\n");
1583 }
1584 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1585 } else {
1586 reg = CSR_READ(sc, WMREG_STATUS);
1587 if (reg & STATUS_BUS64)
1588 sc->sc_flags |= WM_F_BUS64;
1589 if ((reg & STATUS_PCIX_MODE) != 0) {
1590 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1591
1592 sc->sc_flags |= WM_F_PCIX;
1593 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1594 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1595 aprint_error_dev(sc->sc_dev,
1596 "unable to find PCIX capability\n");
1597 else if (sc->sc_type != WM_T_82545_3 &&
1598 sc->sc_type != WM_T_82546_3) {
1599 /*
1600 * Work around a problem caused by the BIOS
1601 * setting the max memory read byte count
1602 * incorrectly.
1603 */
1604 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1605 sc->sc_pcixe_capoff + PCIX_CMD);
1606 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1607 sc->sc_pcixe_capoff + PCIX_STATUS);
1608
1609 bytecnt =
1610 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1611 PCIX_CMD_BYTECNT_SHIFT;
1612 maxb =
1613 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1614 PCIX_STATUS_MAXB_SHIFT;
1615 if (bytecnt > maxb) {
1616 aprint_verbose_dev(sc->sc_dev,
1617 "resetting PCI-X MMRBC: %d -> %d\n",
1618 512 << bytecnt, 512 << maxb);
1619 pcix_cmd = (pcix_cmd &
1620 ~PCIX_CMD_BYTECNT_MASK) |
1621 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1622 pci_conf_write(pa->pa_pc, pa->pa_tag,
1623 sc->sc_pcixe_capoff + PCIX_CMD,
1624 pcix_cmd);
1625 }
1626 }
1627 }
1628 /*
1629 * The quad port adapter is special; it has a PCIX-PCIX
1630 * bridge on the board, and can run the secondary bus at
1631 * a higher speed.
1632 */
1633 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1634 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1635 : 66;
1636 } else if (sc->sc_flags & WM_F_PCIX) {
1637 switch (reg & STATUS_PCIXSPD_MASK) {
1638 case STATUS_PCIXSPD_50_66:
1639 sc->sc_bus_speed = 66;
1640 break;
1641 case STATUS_PCIXSPD_66_100:
1642 sc->sc_bus_speed = 100;
1643 break;
1644 case STATUS_PCIXSPD_100_133:
1645 sc->sc_bus_speed = 133;
1646 break;
1647 default:
1648 aprint_error_dev(sc->sc_dev,
1649 "unknown PCIXSPD %d; assuming 66MHz\n",
1650 reg & STATUS_PCIXSPD_MASK);
1651 sc->sc_bus_speed = 66;
1652 break;
1653 }
1654 } else
1655 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1656 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1657 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1658 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1659 }
1660
1661 /*
1662 * Allocate the control data structures, and create and load the
1663 * DMA map for it.
1664 *
1665 * NOTE: All Tx descriptors must be in the same 4G segment of
1666 * memory. So must Rx descriptors. We simplify by allocating
1667 * both sets within the same 4G segment.
1668 */
1669 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1670 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1671 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1672 sizeof(struct wm_control_data_82542) :
1673 sizeof(struct wm_control_data_82544);
1674 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1675 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1676 &sc->sc_cd_rseg, 0)) != 0) {
1677 aprint_error_dev(sc->sc_dev,
1678 "unable to allocate control data, error = %d\n",
1679 error);
1680 goto fail_0;
1681 }
1682
1683 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1684 sc->sc_cd_rseg, sc->sc_cd_size,
1685 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1686 aprint_error_dev(sc->sc_dev,
1687 "unable to map control data, error = %d\n", error);
1688 goto fail_1;
1689 }
1690
1691 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1692 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1693 aprint_error_dev(sc->sc_dev,
1694 "unable to create control data DMA map, error = %d\n",
1695 error);
1696 goto fail_2;
1697 }
1698
1699 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1700 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1701 aprint_error_dev(sc->sc_dev,
1702 "unable to load control data DMA map, error = %d\n",
1703 error);
1704 goto fail_3;
1705 }
1706
1707 /* Create the transmit buffer DMA maps. */
1708 WM_TXQUEUELEN(sc) =
1709 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1710 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1711 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1712 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1713 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1714 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1715 aprint_error_dev(sc->sc_dev,
1716 "unable to create Tx DMA map %d, error = %d\n",
1717 i, error);
1718 goto fail_4;
1719 }
1720 }
1721
1722 /* Create the receive buffer DMA maps. */
1723 for (i = 0; i < WM_NRXDESC; i++) {
1724 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1725 MCLBYTES, 0, 0,
1726 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1727 aprint_error_dev(sc->sc_dev,
1728 "unable to create Rx DMA map %d error = %d\n",
1729 i, error);
1730 goto fail_5;
1731 }
1732 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1733 }
1734
1735 /* clear interesting stat counters */
1736 CSR_READ(sc, WMREG_COLC);
1737 CSR_READ(sc, WMREG_RXERRC);
1738
1739 /* get PHY control from SMBus to PCIe */
1740 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1741 || (sc->sc_type == WM_T_PCH_LPT))
1742 wm_smbustopci(sc);
1743
1744 /* Reset the chip to a known state. */
1745 wm_reset(sc);
1746
1747 /* Get some information about the EEPROM. */
1748 switch (sc->sc_type) {
1749 case WM_T_82542_2_0:
1750 case WM_T_82542_2_1:
1751 case WM_T_82543:
1752 case WM_T_82544:
1753 /* Microwire */
1754 sc->sc_nvm_wordsize = 64;
1755 sc->sc_nvm_addrbits = 6;
1756 break;
1757 case WM_T_82540:
1758 case WM_T_82545:
1759 case WM_T_82545_3:
1760 case WM_T_82546:
1761 case WM_T_82546_3:
1762 /* Microwire */
1763 reg = CSR_READ(sc, WMREG_EECD);
1764 if (reg & EECD_EE_SIZE) {
1765 sc->sc_nvm_wordsize = 256;
1766 sc->sc_nvm_addrbits = 8;
1767 } else {
1768 sc->sc_nvm_wordsize = 64;
1769 sc->sc_nvm_addrbits = 6;
1770 }
1771 sc->sc_flags |= WM_F_LOCK_EECD;
1772 break;
1773 case WM_T_82541:
1774 case WM_T_82541_2:
1775 case WM_T_82547:
1776 case WM_T_82547_2:
1777 sc->sc_flags |= WM_F_LOCK_EECD;
1778 reg = CSR_READ(sc, WMREG_EECD);
1779 if (reg & EECD_EE_TYPE) {
1780 /* SPI */
1781 sc->sc_flags |= WM_F_EEPROM_SPI;
1782 wm_nvm_set_addrbits_size_eecd(sc);
1783 } else {
1784 /* Microwire */
1785 if ((reg & EECD_EE_ABITS) != 0) {
1786 sc->sc_nvm_wordsize = 256;
1787 sc->sc_nvm_addrbits = 8;
1788 } else {
1789 sc->sc_nvm_wordsize = 64;
1790 sc->sc_nvm_addrbits = 6;
1791 }
1792 }
1793 break;
1794 case WM_T_82571:
1795 case WM_T_82572:
1796 /* SPI */
1797 sc->sc_flags |= WM_F_EEPROM_SPI;
1798 wm_nvm_set_addrbits_size_eecd(sc);
1799 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1800 break;
1801 case WM_T_82573:
1802 sc->sc_flags |= WM_F_LOCK_SWSM;
1803 /* FALLTHROUGH */
1804 case WM_T_82574:
1805 case WM_T_82583:
1806 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1807 sc->sc_flags |= WM_F_EEPROM_FLASH;
1808 sc->sc_nvm_wordsize = 2048;
1809 } else {
1810 /* SPI */
1811 sc->sc_flags |= WM_F_EEPROM_SPI;
1812 wm_nvm_set_addrbits_size_eecd(sc);
1813 }
1814 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1815 break;
1816 case WM_T_82575:
1817 case WM_T_82576:
1818 case WM_T_82580:
1819 case WM_T_I350:
1820 case WM_T_I354:
1821 case WM_T_80003:
1822 /* SPI */
1823 sc->sc_flags |= WM_F_EEPROM_SPI;
1824 wm_nvm_set_addrbits_size_eecd(sc);
1825 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1826 | WM_F_LOCK_SWSM;
1827 break;
1828 case WM_T_ICH8:
1829 case WM_T_ICH9:
1830 case WM_T_ICH10:
1831 case WM_T_PCH:
1832 case WM_T_PCH2:
1833 case WM_T_PCH_LPT:
1834 /* FLASH */
1835 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1836 sc->sc_nvm_wordsize = 2048;
1837 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1838 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1839 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1840 aprint_error_dev(sc->sc_dev,
1841 "can't map FLASH registers\n");
1842 goto fail_5;
1843 }
1844 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1845 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1846 ICH_FLASH_SECTOR_SIZE;
1847 sc->sc_ich8_flash_bank_size =
1848 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1849 sc->sc_ich8_flash_bank_size -=
1850 (reg & ICH_GFPREG_BASE_MASK);
1851 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1852 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1853 break;
1854 case WM_T_I210:
1855 case WM_T_I211:
1856 if (wm_nvm_get_flash_presence_i210(sc)) {
1857 wm_nvm_set_addrbits_size_eecd(sc);
1858 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1859 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1860 } else {
1861 sc->sc_nvm_wordsize = INVM_SIZE;
1862 sc->sc_flags |= WM_F_EEPROM_INVM;
1863 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1864 }
1865 break;
1866 default:
1867 break;
1868 }
1869
1870 /* Ensure the SMBI bit is clear before first NVM or PHY access */
1871 switch (sc->sc_type) {
1872 case WM_T_82571:
1873 case WM_T_82572:
1874 reg = CSR_READ(sc, WMREG_SWSM2);
1875 if ((reg & SWSM2_LOCK) == 0) {
1876 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
1877 force_clear_smbi = true;
1878 } else
1879 force_clear_smbi = false;
1880 break;
1881 case WM_T_82573:
1882 case WM_T_82574:
1883 case WM_T_82583:
1884 force_clear_smbi = true;
1885 break;
1886 default:
1887 force_clear_smbi = false;
1888 break;
1889 }
1890 if (force_clear_smbi) {
1891 reg = CSR_READ(sc, WMREG_SWSM);
1892 if ((reg & SWSM_SMBI) != 0)
1893 aprint_error_dev(sc->sc_dev,
1894 "Please update the Bootagent\n");
1895 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
1896 }
1897
1898 /*
1899 * Defer printing the EEPROM type until after verifying the checksum
1900 * This allows the EEPROM type to be printed correctly in the case
1901 * that no EEPROM is attached.
1902 */
1903 /*
1904 * Validate the EEPROM checksum. If the checksum fails, flag
1905 * this for later, so we can fail future reads from the EEPROM.
1906 */
1907 if (wm_nvm_validate_checksum(sc)) {
1908 /*
1909 * Read twice again because some PCI-e parts fail the
1910 * first check due to the link being in sleep state.
1911 */
1912 if (wm_nvm_validate_checksum(sc))
1913 sc->sc_flags |= WM_F_EEPROM_INVALID;
1914 }
1915
1916 /* Set device properties (macflags) */
1917 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1918
1919 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1920 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1921 else {
1922 aprint_verbose_dev(sc->sc_dev, "%u words ",
1923 sc->sc_nvm_wordsize);
1924 if (sc->sc_flags & WM_F_EEPROM_INVM)
1925 aprint_verbose("iNVM\n");
1926 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
1927 aprint_verbose("FLASH(HW)\n");
1928 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
1929 aprint_verbose("FLASH\n");
1930 else {
1931 if (sc->sc_flags & WM_F_EEPROM_SPI)
1932 eetype = "SPI";
1933 else
1934 eetype = "MicroWire";
1935 aprint_verbose("(%d address bits) %s EEPROM\n",
1936 sc->sc_nvm_addrbits, eetype);
1937 }
1938 }
1939
1940 switch (sc->sc_type) {
1941 case WM_T_82571:
1942 case WM_T_82572:
1943 case WM_T_82573:
1944 case WM_T_82574:
1945 case WM_T_82583:
1946 case WM_T_80003:
1947 case WM_T_ICH8:
1948 case WM_T_ICH9:
1949 case WM_T_ICH10:
1950 case WM_T_PCH:
1951 case WM_T_PCH2:
1952 case WM_T_PCH_LPT:
1953 if (wm_check_mng_mode(sc) != 0)
1954 wm_get_hw_control(sc);
1955 break;
1956 default:
1957 break;
1958 }
1959 wm_get_wakeup(sc);
1960 /*
1961 * Read the Ethernet address from the EEPROM, if not first found
1962 * in device properties.
1963 */
1964 ea = prop_dictionary_get(dict, "mac-address");
1965 if (ea != NULL) {
1966 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1967 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1968 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1969 } else {
1970 if (wm_read_mac_addr(sc, enaddr) != 0) {
1971 aprint_error_dev(sc->sc_dev,
1972 "unable to read Ethernet address\n");
1973 goto fail_5;
1974 }
1975 }
1976
1977 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1978 ether_sprintf(enaddr));
1979
1980 /*
1981 * Read the config info from the EEPROM, and set up various
1982 * bits in the control registers based on their contents.
1983 */
1984 pn = prop_dictionary_get(dict, "i82543-cfg1");
1985 if (pn != NULL) {
1986 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1987 cfg1 = (uint16_t) prop_number_integer_value(pn);
1988 } else {
1989 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
1990 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1991 goto fail_5;
1992 }
1993 }
1994
1995 pn = prop_dictionary_get(dict, "i82543-cfg2");
1996 if (pn != NULL) {
1997 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1998 cfg2 = (uint16_t) prop_number_integer_value(pn);
1999 } else {
2000 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2001 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2002 goto fail_5;
2003 }
2004 }
2005
2006 /* check for WM_F_WOL */
2007 switch (sc->sc_type) {
2008 case WM_T_82542_2_0:
2009 case WM_T_82542_2_1:
2010 case WM_T_82543:
2011 /* dummy? */
2012 eeprom_data = 0;
2013 apme_mask = NVM_CFG3_APME;
2014 break;
2015 case WM_T_82544:
2016 apme_mask = NVM_CFG2_82544_APM_EN;
2017 eeprom_data = cfg2;
2018 break;
2019 case WM_T_82546:
2020 case WM_T_82546_3:
2021 case WM_T_82571:
2022 case WM_T_82572:
2023 case WM_T_82573:
2024 case WM_T_82574:
2025 case WM_T_82583:
2026 case WM_T_80003:
2027 default:
2028 apme_mask = NVM_CFG3_APME;
2029 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2030 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2031 break;
2032 case WM_T_82575:
2033 case WM_T_82576:
2034 case WM_T_82580:
2035 case WM_T_I350:
2036 case WM_T_I354: /* XXX ok? */
2037 case WM_T_ICH8:
2038 case WM_T_ICH9:
2039 case WM_T_ICH10:
2040 case WM_T_PCH:
2041 case WM_T_PCH2:
2042 case WM_T_PCH_LPT:
2043 /* XXX The funcid should be checked on some devices */
2044 apme_mask = WUC_APME;
2045 eeprom_data = CSR_READ(sc, WMREG_WUC);
2046 break;
2047 }
2048
2049 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2050 if ((eeprom_data & apme_mask) != 0)
2051 sc->sc_flags |= WM_F_WOL;
2052 #ifdef WM_DEBUG
2053 if ((sc->sc_flags & WM_F_WOL) != 0)
2054 printf("WOL\n");
2055 #endif
2056
2057 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2058 /* Check NVM for autonegotiation */
2059 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2060 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2061 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2062 }
2063 }
2064
2065 /*
2066 * XXX need special handling for some multiple port cards
2067 * to disable a paticular port.
2068 */
2069
2070 if (sc->sc_type >= WM_T_82544) {
2071 pn = prop_dictionary_get(dict, "i82543-swdpin");
2072 if (pn != NULL) {
2073 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2074 swdpin = (uint16_t) prop_number_integer_value(pn);
2075 } else {
2076 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2077 aprint_error_dev(sc->sc_dev,
2078 "unable to read SWDPIN\n");
2079 goto fail_5;
2080 }
2081 }
2082 }
2083
2084 if (cfg1 & NVM_CFG1_ILOS)
2085 sc->sc_ctrl |= CTRL_ILOS;
2086
2087 /*
2088 * XXX
2089 * This code isn't correct because pin 2 and 3 are located
2090 * in different position on newer chips. Check all datasheet.
2091 *
2092 * Until resolve this problem, check if a chip < 82580
2093 */
2094 if (sc->sc_type <= WM_T_82580) {
2095 if (sc->sc_type >= WM_T_82544) {
2096 sc->sc_ctrl |=
2097 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2098 CTRL_SWDPIO_SHIFT;
2099 sc->sc_ctrl |=
2100 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2101 CTRL_SWDPINS_SHIFT;
2102 } else {
2103 sc->sc_ctrl |=
2104 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2105 CTRL_SWDPIO_SHIFT;
2106 }
2107 }
2108
2109 /* XXX For other than 82580? */
2110 if (sc->sc_type == WM_T_82580) {
2111 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2112 printf("CFG3 = %08x\n", (uint32_t)nvmword);
2113 if (nvmword & __BIT(13)) {
2114 printf("SET ILOS\n");
2115 sc->sc_ctrl |= CTRL_ILOS;
2116 }
2117 }
2118
2119 #if 0
2120 if (sc->sc_type >= WM_T_82544) {
2121 if (cfg1 & NVM_CFG1_IPS0)
2122 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2123 if (cfg1 & NVM_CFG1_IPS1)
2124 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2125 sc->sc_ctrl_ext |=
2126 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2127 CTRL_EXT_SWDPIO_SHIFT;
2128 sc->sc_ctrl_ext |=
2129 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2130 CTRL_EXT_SWDPINS_SHIFT;
2131 } else {
2132 sc->sc_ctrl_ext |=
2133 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2134 CTRL_EXT_SWDPIO_SHIFT;
2135 }
2136 #endif
2137
2138 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2139 #if 0
2140 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2141 #endif
2142
2143 /*
2144 * Set up some register offsets that are different between
2145 * the i82542 and the i82543 and later chips.
2146 */
2147 if (sc->sc_type < WM_T_82543) {
2148 sc->sc_rdt_reg = WMREG_OLD_RDT0;
2149 sc->sc_tdt_reg = WMREG_OLD_TDT;
2150 } else {
2151 sc->sc_rdt_reg = WMREG_RDT;
2152 sc->sc_tdt_reg = WMREG_TDT;
2153 }
2154
2155 if (sc->sc_type == WM_T_PCH) {
2156 uint16_t val;
2157
2158 /* Save the NVM K1 bit setting */
2159 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2160
2161 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2162 sc->sc_nvm_k1_enabled = 1;
2163 else
2164 sc->sc_nvm_k1_enabled = 0;
2165 }
2166
2167 /*
2168 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2169 * media structures accordingly.
2170 */
2171 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2172 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2173 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2174 || sc->sc_type == WM_T_82573
2175 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2176 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2177 wm_gmii_mediainit(sc, wmp->wmp_product);
2178 } else if (sc->sc_type < WM_T_82543 ||
2179 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2180 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2181 aprint_error_dev(sc->sc_dev,
2182 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2183 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2184 }
2185 wm_tbi_mediainit(sc);
2186 } else {
2187 switch (sc->sc_type) {
2188 case WM_T_82575:
2189 case WM_T_82576:
2190 case WM_T_82580:
2191 case WM_T_I350:
2192 case WM_T_I354:
2193 case WM_T_I210:
2194 case WM_T_I211:
2195 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2196 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2197 switch (link_mode) {
2198 case CTRL_EXT_LINK_MODE_1000KX:
2199 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2200 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2201 break;
2202 case CTRL_EXT_LINK_MODE_SGMII:
2203 if (wm_sgmii_uses_mdio(sc)) {
2204 aprint_verbose_dev(sc->sc_dev,
2205 "SGMII(MDIO)\n");
2206 sc->sc_flags |= WM_F_SGMII;
2207 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2208 break;
2209 }
2210 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2211 /*FALLTHROUGH*/
2212 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2213 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2214 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2215 if (link_mode
2216 == CTRL_EXT_LINK_MODE_SGMII) {
2217 sc->sc_mediatype
2218 = WM_MEDIATYPE_COPPER;
2219 sc->sc_flags |= WM_F_SGMII;
2220 } else {
2221 sc->sc_mediatype
2222 = WM_MEDIATYPE_SERDES;
2223 aprint_verbose_dev(sc->sc_dev,
2224 "SERDES\n");
2225 }
2226 break;
2227 }
2228 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2229 aprint_verbose_dev(sc->sc_dev,
2230 "SERDES\n");
2231
2232 /* Change current link mode setting */
2233 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2234 switch (sc->sc_mediatype) {
2235 case WM_MEDIATYPE_COPPER:
2236 reg |= CTRL_EXT_LINK_MODE_SGMII;
2237 break;
2238 case WM_MEDIATYPE_SERDES:
2239 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2240 break;
2241 default:
2242 break;
2243 }
2244 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2245 break;
2246 case CTRL_EXT_LINK_MODE_GMII:
2247 default:
2248 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2249 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2250 break;
2251 }
2252
2253 reg &= ~CTRL_EXT_I2C_ENA;
2254 if ((sc->sc_flags & WM_F_SGMII) != 0)
2255 reg |= CTRL_EXT_I2C_ENA;
2256 else
2257 reg &= ~CTRL_EXT_I2C_ENA;
2258 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2259
2260 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2261 wm_gmii_mediainit(sc, wmp->wmp_product);
2262 else
2263 wm_tbi_mediainit(sc);
2264 break;
2265 default:
2266 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
2267 aprint_error_dev(sc->sc_dev,
2268 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2269 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2270 wm_gmii_mediainit(sc, wmp->wmp_product);
2271 }
2272 }
2273
2274 ifp = &sc->sc_ethercom.ec_if;
2275 xname = device_xname(sc->sc_dev);
2276 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2277 ifp->if_softc = sc;
2278 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2279 ifp->if_ioctl = wm_ioctl;
2280 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2281 ifp->if_start = wm_nq_start;
2282 else
2283 ifp->if_start = wm_start;
2284 ifp->if_watchdog = wm_watchdog;
2285 ifp->if_init = wm_init;
2286 ifp->if_stop = wm_stop;
2287 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2288 IFQ_SET_READY(&ifp->if_snd);
2289
2290 /* Check for jumbo frame */
2291 switch (sc->sc_type) {
2292 case WM_T_82573:
2293 /* XXX limited to 9234 if ASPM is disabled */
2294 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2295 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2296 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2297 break;
2298 case WM_T_82571:
2299 case WM_T_82572:
2300 case WM_T_82574:
2301 case WM_T_82575:
2302 case WM_T_82576:
2303 case WM_T_82580:
2304 case WM_T_I350:
2305 case WM_T_I354: /* XXXX ok? */
2306 case WM_T_I210:
2307 case WM_T_I211:
2308 case WM_T_80003:
2309 case WM_T_ICH9:
2310 case WM_T_ICH10:
2311 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2312 case WM_T_PCH_LPT:
2313 /* XXX limited to 9234 */
2314 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2315 break;
2316 case WM_T_PCH:
2317 /* XXX limited to 4096 */
2318 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2319 break;
2320 case WM_T_82542_2_0:
2321 case WM_T_82542_2_1:
2322 case WM_T_82583:
2323 case WM_T_ICH8:
2324 /* No support for jumbo frame */
2325 break;
2326 default:
2327 /* ETHER_MAX_LEN_JUMBO */
2328 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2329 break;
2330 }
2331
2332 /* If we're a i82543 or greater, we can support VLANs. */
2333 if (sc->sc_type >= WM_T_82543)
2334 sc->sc_ethercom.ec_capabilities |=
2335 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2336
2337 /*
2338 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2339 * on i82543 and later.
2340 */
2341 if (sc->sc_type >= WM_T_82543) {
2342 ifp->if_capabilities |=
2343 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2344 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2345 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2346 IFCAP_CSUM_TCPv6_Tx |
2347 IFCAP_CSUM_UDPv6_Tx;
2348 }
2349
2350 /*
2351 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2352 *
2353 * 82541GI (8086:1076) ... no
2354 * 82572EI (8086:10b9) ... yes
2355 */
2356 if (sc->sc_type >= WM_T_82571) {
2357 ifp->if_capabilities |=
2358 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2359 }
2360
2361 /*
2362 * If we're a i82544 or greater (except i82547), we can do
2363 * TCP segmentation offload.
2364 */
2365 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2366 ifp->if_capabilities |= IFCAP_TSOv4;
2367 }
2368
2369 if (sc->sc_type >= WM_T_82571) {
2370 ifp->if_capabilities |= IFCAP_TSOv6;
2371 }
2372
2373 #ifdef WM_MPSAFE
2374 sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2375 sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2376 #else
2377 sc->sc_tx_lock = NULL;
2378 sc->sc_rx_lock = NULL;
2379 #endif
2380
2381 /* Attach the interface. */
2382 if_attach(ifp);
2383 ether_ifattach(ifp, enaddr);
2384 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2385 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2386 RND_FLAG_DEFAULT);
2387
2388 #ifdef WM_EVENT_COUNTERS
2389 /* Attach event counters. */
2390 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2391 NULL, xname, "txsstall");
2392 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2393 NULL, xname, "txdstall");
2394 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2395 NULL, xname, "txfifo_stall");
2396 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2397 NULL, xname, "txdw");
2398 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2399 NULL, xname, "txqe");
2400 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2401 NULL, xname, "rxintr");
2402 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2403 NULL, xname, "linkintr");
2404
2405 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2406 NULL, xname, "rxipsum");
2407 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2408 NULL, xname, "rxtusum");
2409 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2410 NULL, xname, "txipsum");
2411 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2412 NULL, xname, "txtusum");
2413 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2414 NULL, xname, "txtusum6");
2415
2416 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2417 NULL, xname, "txtso");
2418 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2419 NULL, xname, "txtso6");
2420 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2421 NULL, xname, "txtsopain");
2422
2423 for (i = 0; i < WM_NTXSEGS; i++) {
2424 snprintf(wm_txseg_evcnt_names[i],
2425 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2426 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2427 NULL, xname, wm_txseg_evcnt_names[i]);
2428 }
2429
2430 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2431 NULL, xname, "txdrop");
2432
2433 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2434 NULL, xname, "tu");
2435
2436 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2437 NULL, xname, "tx_xoff");
2438 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2439 NULL, xname, "tx_xon");
2440 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2441 NULL, xname, "rx_xoff");
2442 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2443 NULL, xname, "rx_xon");
2444 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2445 NULL, xname, "rx_macctl");
2446 #endif /* WM_EVENT_COUNTERS */
2447
2448 if (pmf_device_register(self, wm_suspend, wm_resume))
2449 pmf_class_network_register(self, ifp);
2450 else
2451 aprint_error_dev(self, "couldn't establish power handler\n");
2452
2453 sc->sc_flags |= WM_F_ATTACHED;
2454 return;
2455
2456 /*
2457 * Free any resources we've allocated during the failed attach
2458 * attempt. Do this in reverse order and fall through.
2459 */
2460 fail_5:
2461 for (i = 0; i < WM_NRXDESC; i++) {
2462 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2463 bus_dmamap_destroy(sc->sc_dmat,
2464 sc->sc_rxsoft[i].rxs_dmamap);
2465 }
2466 fail_4:
2467 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2468 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2469 bus_dmamap_destroy(sc->sc_dmat,
2470 sc->sc_txsoft[i].txs_dmamap);
2471 }
2472 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2473 fail_3:
2474 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2475 fail_2:
2476 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2477 sc->sc_cd_size);
2478 fail_1:
2479 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2480 fail_0:
2481 return;
2482 }
2483
2484 /* The detach function (ca_detach) */
2485 static int
2486 wm_detach(device_t self, int flags __unused)
2487 {
2488 struct wm_softc *sc = device_private(self);
2489 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2490 int i;
2491 #ifndef WM_MPSAFE
2492 int s;
2493 #endif
2494
2495 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2496 return 0;
2497
2498 #ifndef WM_MPSAFE
2499 s = splnet();
2500 #endif
2501 /* Stop the interface. Callouts are stopped in it. */
2502 wm_stop(ifp, 1);
2503
2504 #ifndef WM_MPSAFE
2505 splx(s);
2506 #endif
2507
2508 pmf_device_deregister(self);
2509
2510 /* Tell the firmware about the release */
2511 WM_BOTH_LOCK(sc);
2512 wm_release_manageability(sc);
2513 wm_release_hw_control(sc);
2514 WM_BOTH_UNLOCK(sc);
2515
2516 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2517
2518 /* Delete all remaining media. */
2519 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2520
2521 ether_ifdetach(ifp);
2522 if_detach(ifp);
2523
2524
2525 /* Unload RX dmamaps and free mbufs */
2526 WM_RX_LOCK(sc);
2527 wm_rxdrain(sc);
2528 WM_RX_UNLOCK(sc);
2529 /* Must unlock here */
2530
2531 /* Free dmamap. It's the same as the end of the wm_attach() function */
2532 for (i = 0; i < WM_NRXDESC; i++) {
2533 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2534 bus_dmamap_destroy(sc->sc_dmat,
2535 sc->sc_rxsoft[i].rxs_dmamap);
2536 }
2537 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2538 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2539 bus_dmamap_destroy(sc->sc_dmat,
2540 sc->sc_txsoft[i].txs_dmamap);
2541 }
2542 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2543 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2544 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2545 sc->sc_cd_size);
2546 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2547
2548 /* Disestablish the interrupt handler */
2549 if (sc->sc_ih != NULL) {
2550 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2551 sc->sc_ih = NULL;
2552 }
2553
2554 /* Unmap the registers */
2555 if (sc->sc_ss) {
2556 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2557 sc->sc_ss = 0;
2558 }
2559
2560 if (sc->sc_ios) {
2561 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2562 sc->sc_ios = 0;
2563 }
2564
2565 if (sc->sc_tx_lock)
2566 mutex_obj_free(sc->sc_tx_lock);
2567 if (sc->sc_rx_lock)
2568 mutex_obj_free(sc->sc_rx_lock);
2569
2570 return 0;
2571 }
2572
2573 static bool
2574 wm_suspend(device_t self, const pmf_qual_t *qual)
2575 {
2576 struct wm_softc *sc = device_private(self);
2577
2578 wm_release_manageability(sc);
2579 wm_release_hw_control(sc);
2580 #ifdef WM_WOL
2581 wm_enable_wakeup(sc);
2582 #endif
2583
2584 return true;
2585 }
2586
2587 static bool
2588 wm_resume(device_t self, const pmf_qual_t *qual)
2589 {
2590 struct wm_softc *sc = device_private(self);
2591
2592 wm_init_manageability(sc);
2593
2594 return true;
2595 }
2596
2597 /*
2598 * wm_watchdog: [ifnet interface function]
2599 *
2600 * Watchdog timer handler.
2601 */
2602 static void
2603 wm_watchdog(struct ifnet *ifp)
2604 {
2605 struct wm_softc *sc = ifp->if_softc;
2606
2607 /*
2608 * Since we're using delayed interrupts, sweep up
2609 * before we report an error.
2610 */
2611 WM_TX_LOCK(sc);
2612 wm_txintr(sc);
2613 WM_TX_UNLOCK(sc);
2614
2615 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2616 #ifdef WM_DEBUG
2617 int i, j;
2618 struct wm_txsoft *txs;
2619 #endif
2620 log(LOG_ERR,
2621 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2622 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2623 sc->sc_txnext);
2624 ifp->if_oerrors++;
2625 #ifdef WM_DEBUG
2626 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
2627 i = WM_NEXTTXS(sc, i)) {
2628 txs = &sc->sc_txsoft[i];
2629 printf("txs %d tx %d -> %d\n",
2630 i, txs->txs_firstdesc, txs->txs_lastdesc);
2631 for (j = txs->txs_firstdesc; ;
2632 j = WM_NEXTTX(sc, j)) {
2633 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2634 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
2635 printf("\t %#08x%08x\n",
2636 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
2637 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
2638 if (j == txs->txs_lastdesc)
2639 break;
2640 }
2641 }
2642 #endif
2643 /* Reset the interface. */
2644 (void) wm_init(ifp);
2645 }
2646
2647 /* Try to get more packets going. */
2648 ifp->if_start(ifp);
2649 }
2650
2651 /*
2652 * wm_tick:
2653 *
2654 * One second timer, used to check link status, sweep up
2655 * completed transmit jobs, etc.
2656 */
2657 static void
2658 wm_tick(void *arg)
2659 {
2660 struct wm_softc *sc = arg;
2661 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2662 #ifndef WM_MPSAFE
2663 int s;
2664
2665 s = splnet();
2666 #endif
2667
2668 WM_TX_LOCK(sc);
2669
2670 if (sc->sc_stopping)
2671 goto out;
2672
2673 if (sc->sc_type >= WM_T_82542_2_1) {
2674 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2675 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2676 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2677 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2678 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2679 }
2680
2681 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2682 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2683 + CSR_READ(sc, WMREG_CRCERRS)
2684 + CSR_READ(sc, WMREG_ALGNERRC)
2685 + CSR_READ(sc, WMREG_SYMERRC)
2686 + CSR_READ(sc, WMREG_RXERRC)
2687 + CSR_READ(sc, WMREG_SEC)
2688 + CSR_READ(sc, WMREG_CEXTERR)
2689 + CSR_READ(sc, WMREG_RLEC);
2690 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2691
2692 if (sc->sc_flags & WM_F_HAS_MII)
2693 mii_tick(&sc->sc_mii);
2694 else if ((sc->sc_type >= WM_T_82575)
2695 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
2696 wm_serdes_tick(sc);
2697 else
2698 wm_tbi_tick(sc);
2699
2700 out:
2701 WM_TX_UNLOCK(sc);
2702 #ifndef WM_MPSAFE
2703 splx(s);
2704 #endif
2705
2706 if (!sc->sc_stopping)
2707 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2708 }
2709
2710 static int
2711 wm_ifflags_cb(struct ethercom *ec)
2712 {
2713 struct ifnet *ifp = &ec->ec_if;
2714 struct wm_softc *sc = ifp->if_softc;
2715 int change = ifp->if_flags ^ sc->sc_if_flags;
2716 int rc = 0;
2717
2718 WM_BOTH_LOCK(sc);
2719
2720 if (change != 0)
2721 sc->sc_if_flags = ifp->if_flags;
2722
2723 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
2724 rc = ENETRESET;
2725 goto out;
2726 }
2727
2728 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2729 wm_set_filter(sc);
2730
2731 wm_set_vlan(sc);
2732
2733 out:
2734 WM_BOTH_UNLOCK(sc);
2735
2736 return rc;
2737 }
2738
2739 /*
2740 * wm_ioctl: [ifnet interface function]
2741 *
2742 * Handle control requests from the operator.
2743 */
2744 static int
2745 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2746 {
2747 struct wm_softc *sc = ifp->if_softc;
2748 struct ifreq *ifr = (struct ifreq *) data;
2749 struct ifaddr *ifa = (struct ifaddr *)data;
2750 struct sockaddr_dl *sdl;
2751 int s, error;
2752
2753 #ifndef WM_MPSAFE
2754 s = splnet();
2755 #endif
2756 switch (cmd) {
2757 case SIOCSIFMEDIA:
2758 case SIOCGIFMEDIA:
2759 WM_BOTH_LOCK(sc);
2760 /* Flow control requires full-duplex mode. */
2761 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2762 (ifr->ifr_media & IFM_FDX) == 0)
2763 ifr->ifr_media &= ~IFM_ETH_FMASK;
2764 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2765 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2766 /* We can do both TXPAUSE and RXPAUSE. */
2767 ifr->ifr_media |=
2768 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2769 }
2770 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2771 }
2772 WM_BOTH_UNLOCK(sc);
2773 #ifdef WM_MPSAFE
2774 s = splnet();
2775 #endif
2776 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2777 #ifdef WM_MPSAFE
2778 splx(s);
2779 #endif
2780 break;
2781 case SIOCINITIFADDR:
2782 WM_BOTH_LOCK(sc);
2783 if (ifa->ifa_addr->sa_family == AF_LINK) {
2784 sdl = satosdl(ifp->if_dl->ifa_addr);
2785 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2786 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2787 /* unicast address is first multicast entry */
2788 wm_set_filter(sc);
2789 error = 0;
2790 WM_BOTH_UNLOCK(sc);
2791 break;
2792 }
2793 WM_BOTH_UNLOCK(sc);
2794 /*FALLTHROUGH*/
2795 default:
2796 #ifdef WM_MPSAFE
2797 s = splnet();
2798 #endif
2799 /* It may call wm_start, so unlock here */
2800 error = ether_ioctl(ifp, cmd, data);
2801 #ifdef WM_MPSAFE
2802 splx(s);
2803 #endif
2804 if (error != ENETRESET)
2805 break;
2806
2807 error = 0;
2808
2809 if (cmd == SIOCSIFCAP) {
2810 error = (*ifp->if_init)(ifp);
2811 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2812 ;
2813 else if (ifp->if_flags & IFF_RUNNING) {
2814 /*
2815 * Multicast list has changed; set the hardware filter
2816 * accordingly.
2817 */
2818 WM_BOTH_LOCK(sc);
2819 wm_set_filter(sc);
2820 WM_BOTH_UNLOCK(sc);
2821 }
2822 break;
2823 }
2824
2825 /* Try to get more packets going. */
2826 ifp->if_start(ifp);
2827
2828 #ifndef WM_MPSAFE
2829 splx(s);
2830 #endif
2831 return error;
2832 }
2833
2834 /* MAC address related */
2835
2836 /*
2837 * Get the offset of MAC address and return it.
2838 * If error occured, use offset 0.
2839 */
2840 static uint16_t
2841 wm_check_alt_mac_addr(struct wm_softc *sc)
2842 {
2843 uint16_t myea[ETHER_ADDR_LEN / 2];
2844 uint16_t offset = NVM_OFF_MACADDR;
2845
2846 /* Try to read alternative MAC address pointer */
2847 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2848 return 0;
2849
2850 /* Check pointer if it's valid or not. */
2851 if ((offset == 0x0000) || (offset == 0xffff))
2852 return 0;
2853
2854 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
2855 /*
2856 * Check whether alternative MAC address is valid or not.
2857 * Some cards have non 0xffff pointer but those don't use
2858 * alternative MAC address in reality.
2859 *
2860 * Check whether the broadcast bit is set or not.
2861 */
2862 if (wm_nvm_read(sc, offset, 1, myea) == 0)
2863 if (((myea[0] & 0xff) & 0x01) == 0)
2864 return offset; /* Found */
2865
2866 /* Not found */
2867 return 0;
2868 }
2869
2870 static int
2871 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2872 {
2873 uint16_t myea[ETHER_ADDR_LEN / 2];
2874 uint16_t offset = NVM_OFF_MACADDR;
2875 int do_invert = 0;
2876
2877 switch (sc->sc_type) {
2878 case WM_T_82580:
2879 case WM_T_I350:
2880 case WM_T_I354:
2881 /* EEPROM Top Level Partitioning */
2882 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
2883 break;
2884 case WM_T_82571:
2885 case WM_T_82575:
2886 case WM_T_82576:
2887 case WM_T_80003:
2888 case WM_T_I210:
2889 case WM_T_I211:
2890 offset = wm_check_alt_mac_addr(sc);
2891 if (offset == 0)
2892 if ((sc->sc_funcid & 0x01) == 1)
2893 do_invert = 1;
2894 break;
2895 default:
2896 if ((sc->sc_funcid & 0x01) == 1)
2897 do_invert = 1;
2898 break;
2899 }
2900
2901 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
2902 myea) != 0)
2903 goto bad;
2904
2905 enaddr[0] = myea[0] & 0xff;
2906 enaddr[1] = myea[0] >> 8;
2907 enaddr[2] = myea[1] & 0xff;
2908 enaddr[3] = myea[1] >> 8;
2909 enaddr[4] = myea[2] & 0xff;
2910 enaddr[5] = myea[2] >> 8;
2911
2912 /*
2913 * Toggle the LSB of the MAC address on the second port
2914 * of some dual port cards.
2915 */
2916 if (do_invert != 0)
2917 enaddr[5] ^= 1;
2918
2919 return 0;
2920
2921 bad:
2922 return -1;
2923 }
2924
2925 /*
2926 * wm_set_ral:
2927 *
2928 * Set an entery in the receive address list.
2929 */
2930 static void
2931 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2932 {
2933 uint32_t ral_lo, ral_hi;
2934
2935 if (enaddr != NULL) {
2936 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2937 (enaddr[3] << 24);
2938 ral_hi = enaddr[4] | (enaddr[5] << 8);
2939 ral_hi |= RAL_AV;
2940 } else {
2941 ral_lo = 0;
2942 ral_hi = 0;
2943 }
2944
2945 if (sc->sc_type >= WM_T_82544) {
2946 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2947 ral_lo);
2948 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2949 ral_hi);
2950 } else {
2951 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2952 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2953 }
2954 }
2955
2956 /*
2957 * wm_mchash:
2958 *
2959 * Compute the hash of the multicast address for the 4096-bit
2960 * multicast filter.
2961 */
2962 static uint32_t
2963 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2964 {
2965 static const int lo_shift[4] = { 4, 3, 2, 0 };
2966 static const int hi_shift[4] = { 4, 5, 6, 8 };
2967 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
2968 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
2969 uint32_t hash;
2970
2971 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2972 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2973 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
2974 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
2975 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
2976 return (hash & 0x3ff);
2977 }
2978 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2979 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2980
2981 return (hash & 0xfff);
2982 }
2983
2984 /*
2985 * wm_set_filter:
2986 *
2987 * Set up the receive filter.
2988 */
2989 static void
2990 wm_set_filter(struct wm_softc *sc)
2991 {
2992 struct ethercom *ec = &sc->sc_ethercom;
2993 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2994 struct ether_multi *enm;
2995 struct ether_multistep step;
2996 bus_addr_t mta_reg;
2997 uint32_t hash, reg, bit;
2998 int i, size;
2999
3000 if (sc->sc_type >= WM_T_82544)
3001 mta_reg = WMREG_CORDOVA_MTA;
3002 else
3003 mta_reg = WMREG_MTA;
3004
3005 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3006
3007 if (ifp->if_flags & IFF_BROADCAST)
3008 sc->sc_rctl |= RCTL_BAM;
3009 if (ifp->if_flags & IFF_PROMISC) {
3010 sc->sc_rctl |= RCTL_UPE;
3011 goto allmulti;
3012 }
3013
3014 /*
3015 * Set the station address in the first RAL slot, and
3016 * clear the remaining slots.
3017 */
3018 if (sc->sc_type == WM_T_ICH8)
3019 size = WM_RAL_TABSIZE_ICH8 -1;
3020 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3021 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
3022 || (sc->sc_type == WM_T_PCH_LPT))
3023 size = WM_RAL_TABSIZE_ICH8;
3024 else if (sc->sc_type == WM_T_82575)
3025 size = WM_RAL_TABSIZE_82575;
3026 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3027 size = WM_RAL_TABSIZE_82576;
3028 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3029 size = WM_RAL_TABSIZE_I350;
3030 else
3031 size = WM_RAL_TABSIZE;
3032 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3033 for (i = 1; i < size; i++)
3034 wm_set_ral(sc, NULL, i);
3035
3036 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3037 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3038 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
3039 size = WM_ICH8_MC_TABSIZE;
3040 else
3041 size = WM_MC_TABSIZE;
3042 /* Clear out the multicast table. */
3043 for (i = 0; i < size; i++)
3044 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3045
3046 ETHER_FIRST_MULTI(step, ec, enm);
3047 while (enm != NULL) {
3048 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3049 /*
3050 * We must listen to a range of multicast addresses.
3051 * For now, just accept all multicasts, rather than
3052 * trying to set only those filter bits needed to match
3053 * the range. (At this time, the only use of address
3054 * ranges is for IP multicast routing, for which the
3055 * range is big enough to require all bits set.)
3056 */
3057 goto allmulti;
3058 }
3059
3060 hash = wm_mchash(sc, enm->enm_addrlo);
3061
3062 reg = (hash >> 5);
3063 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3064 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3065 || (sc->sc_type == WM_T_PCH2)
3066 || (sc->sc_type == WM_T_PCH_LPT))
3067 reg &= 0x1f;
3068 else
3069 reg &= 0x7f;
3070 bit = hash & 0x1f;
3071
3072 hash = CSR_READ(sc, mta_reg + (reg << 2));
3073 hash |= 1U << bit;
3074
3075 /* XXX Hardware bug?? */
3076 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3077 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3078 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3079 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3080 } else
3081 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3082
3083 ETHER_NEXT_MULTI(step, enm);
3084 }
3085
3086 ifp->if_flags &= ~IFF_ALLMULTI;
3087 goto setit;
3088
3089 allmulti:
3090 ifp->if_flags |= IFF_ALLMULTI;
3091 sc->sc_rctl |= RCTL_MPE;
3092
3093 setit:
3094 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3095 }
3096
3097 /* Reset and init related */
3098
3099 static void
3100 wm_set_vlan(struct wm_softc *sc)
3101 {
3102 /* Deal with VLAN enables. */
3103 if (VLAN_ATTACHED(&sc->sc_ethercom))
3104 sc->sc_ctrl |= CTRL_VME;
3105 else
3106 sc->sc_ctrl &= ~CTRL_VME;
3107
3108 /* Write the control registers. */
3109 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3110 }
3111
3112 static void
3113 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3114 {
3115 uint32_t gcr;
3116 pcireg_t ctrl2;
3117
3118 gcr = CSR_READ(sc, WMREG_GCR);
3119
3120 /* Only take action if timeout value is defaulted to 0 */
3121 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3122 goto out;
3123
3124 if ((gcr & GCR_CAP_VER2) == 0) {
3125 gcr |= GCR_CMPL_TMOUT_10MS;
3126 goto out;
3127 }
3128
3129 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3130 sc->sc_pcixe_capoff + PCIE_DCSR2);
3131 ctrl2 |= WM_PCIE_DCSR2_16MS;
3132 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3133 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3134
3135 out:
3136 /* Disable completion timeout resend */
3137 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3138
3139 CSR_WRITE(sc, WMREG_GCR, gcr);
3140 }
3141
3142 void
3143 wm_get_auto_rd_done(struct wm_softc *sc)
3144 {
3145 int i;
3146
3147 /* wait for eeprom to reload */
3148 switch (sc->sc_type) {
3149 case WM_T_82571:
3150 case WM_T_82572:
3151 case WM_T_82573:
3152 case WM_T_82574:
3153 case WM_T_82583:
3154 case WM_T_82575:
3155 case WM_T_82576:
3156 case WM_T_82580:
3157 case WM_T_I350:
3158 case WM_T_I354:
3159 case WM_T_I210:
3160 case WM_T_I211:
3161 case WM_T_80003:
3162 case WM_T_ICH8:
3163 case WM_T_ICH9:
3164 for (i = 0; i < 10; i++) {
3165 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3166 break;
3167 delay(1000);
3168 }
3169 if (i == 10) {
3170 log(LOG_ERR, "%s: auto read from eeprom failed to "
3171 "complete\n", device_xname(sc->sc_dev));
3172 }
3173 break;
3174 default:
3175 break;
3176 }
3177 }
3178
3179 void
3180 wm_lan_init_done(struct wm_softc *sc)
3181 {
3182 uint32_t reg = 0;
3183 int i;
3184
3185 /* wait for eeprom to reload */
3186 switch (sc->sc_type) {
3187 case WM_T_ICH10:
3188 case WM_T_PCH:
3189 case WM_T_PCH2:
3190 case WM_T_PCH_LPT:
3191 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3192 reg = CSR_READ(sc, WMREG_STATUS);
3193 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3194 break;
3195 delay(100);
3196 }
3197 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3198 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3199 "complete\n", device_xname(sc->sc_dev), __func__);
3200 }
3201 break;
3202 default:
3203 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3204 __func__);
3205 break;
3206 }
3207
3208 reg &= ~STATUS_LAN_INIT_DONE;
3209 CSR_WRITE(sc, WMREG_STATUS, reg);
3210 }
3211
3212 void
3213 wm_get_cfg_done(struct wm_softc *sc)
3214 {
3215 int mask;
3216 uint32_t reg;
3217 int i;
3218
3219 /* wait for eeprom to reload */
3220 switch (sc->sc_type) {
3221 case WM_T_82542_2_0:
3222 case WM_T_82542_2_1:
3223 /* null */
3224 break;
3225 case WM_T_82543:
3226 case WM_T_82544:
3227 case WM_T_82540:
3228 case WM_T_82545:
3229 case WM_T_82545_3:
3230 case WM_T_82546:
3231 case WM_T_82546_3:
3232 case WM_T_82541:
3233 case WM_T_82541_2:
3234 case WM_T_82547:
3235 case WM_T_82547_2:
3236 case WM_T_82573:
3237 case WM_T_82574:
3238 case WM_T_82583:
3239 /* generic */
3240 delay(10*1000);
3241 break;
3242 case WM_T_80003:
3243 case WM_T_82571:
3244 case WM_T_82572:
3245 case WM_T_82575:
3246 case WM_T_82576:
3247 case WM_T_82580:
3248 case WM_T_I350:
3249 case WM_T_I354:
3250 case WM_T_I210:
3251 case WM_T_I211:
3252 if (sc->sc_type == WM_T_82571) {
3253 /* Only 82571 shares port 0 */
3254 mask = EEMNGCTL_CFGDONE_0;
3255 } else
3256 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3257 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3258 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3259 break;
3260 delay(1000);
3261 }
3262 if (i >= WM_PHY_CFG_TIMEOUT) {
3263 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3264 device_xname(sc->sc_dev), __func__));
3265 }
3266 break;
3267 case WM_T_ICH8:
3268 case WM_T_ICH9:
3269 case WM_T_ICH10:
3270 case WM_T_PCH:
3271 case WM_T_PCH2:
3272 case WM_T_PCH_LPT:
3273 delay(10*1000);
3274 if (sc->sc_type >= WM_T_ICH10)
3275 wm_lan_init_done(sc);
3276 else
3277 wm_get_auto_rd_done(sc);
3278
3279 reg = CSR_READ(sc, WMREG_STATUS);
3280 if ((reg & STATUS_PHYRA) != 0)
3281 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3282 break;
3283 default:
3284 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3285 __func__);
3286 break;
3287 }
3288 }
3289
3290 /* Init hardware bits */
3291 void
3292 wm_initialize_hardware_bits(struct wm_softc *sc)
3293 {
3294 uint32_t tarc0, tarc1, reg;
3295
3296 /* For 82571 variant, 80003 and ICHs */
3297 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3298 || (sc->sc_type >= WM_T_80003)) {
3299
3300 /* Transmit Descriptor Control 0 */
3301 reg = CSR_READ(sc, WMREG_TXDCTL(0));
3302 reg |= TXDCTL_COUNT_DESC;
3303 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3304
3305 /* Transmit Descriptor Control 1 */
3306 reg = CSR_READ(sc, WMREG_TXDCTL(1));
3307 reg |= TXDCTL_COUNT_DESC;
3308 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3309
3310 /* TARC0 */
3311 tarc0 = CSR_READ(sc, WMREG_TARC0);
3312 switch (sc->sc_type) {
3313 case WM_T_82571:
3314 case WM_T_82572:
3315 case WM_T_82573:
3316 case WM_T_82574:
3317 case WM_T_82583:
3318 case WM_T_80003:
3319 /* Clear bits 30..27 */
3320 tarc0 &= ~__BITS(30, 27);
3321 break;
3322 default:
3323 break;
3324 }
3325
3326 switch (sc->sc_type) {
3327 case WM_T_82571:
3328 case WM_T_82572:
3329 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3330
3331 tarc1 = CSR_READ(sc, WMREG_TARC1);
3332 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3333 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3334 /* 8257[12] Errata No.7 */
3335 tarc1 |= __BIT(22); /* TARC1 bits 22 */
3336
3337 /* TARC1 bit 28 */
3338 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3339 tarc1 &= ~__BIT(28);
3340 else
3341 tarc1 |= __BIT(28);
3342 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3343
3344 /*
3345 * 8257[12] Errata No.13
3346 * Disable Dyamic Clock Gating.
3347 */
3348 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3349 reg &= ~CTRL_EXT_DMA_DYN_CLK;
3350 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3351 break;
3352 case WM_T_82573:
3353 case WM_T_82574:
3354 case WM_T_82583:
3355 if ((sc->sc_type == WM_T_82574)
3356 || (sc->sc_type == WM_T_82583))
3357 tarc0 |= __BIT(26); /* TARC0 bit 26 */
3358
3359 /* Extended Device Control */
3360 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3361 reg &= ~__BIT(23); /* Clear bit 23 */
3362 reg |= __BIT(22); /* Set bit 22 */
3363 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3364
3365 /* Device Control */
3366 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
3367 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3368
3369 /* PCIe Control Register */
3370 if ((sc->sc_type == WM_T_82574)
3371 || (sc->sc_type == WM_T_82583)) {
3372 /*
3373 * Document says this bit must be set for
3374 * proper operation.
3375 */
3376 reg = CSR_READ(sc, WMREG_GCR);
3377 reg |= __BIT(22);
3378 CSR_WRITE(sc, WMREG_GCR, reg);
3379
3380 /*
3381 * Apply workaround for hardware errata
3382 * documented in errata docs Fixes issue where
3383 * some error prone or unreliable PCIe
3384 * completions are occurring, particularly
3385 * with ASPM enabled. Without fix, issue can
3386 * cause Tx timeouts.
3387 */
3388 reg = CSR_READ(sc, WMREG_GCR2);
3389 reg |= __BIT(0);
3390 CSR_WRITE(sc, WMREG_GCR2, reg);
3391 }
3392 break;
3393 case WM_T_80003:
3394 /* TARC0 */
3395 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3396 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3397 tarc0 &= ~__BIT(20); /* Clear bits 20 */
3398
3399 /* TARC1 bit 28 */
3400 tarc1 = CSR_READ(sc, WMREG_TARC1);
3401 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3402 tarc1 &= ~__BIT(28);
3403 else
3404 tarc1 |= __BIT(28);
3405 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3406 break;
3407 case WM_T_ICH8:
3408 case WM_T_ICH9:
3409 case WM_T_ICH10:
3410 case WM_T_PCH:
3411 case WM_T_PCH2:
3412 case WM_T_PCH_LPT:
3413 /* TARC 0 */
3414 if (sc->sc_type == WM_T_ICH8) {
3415 /* Set TARC0 bits 29 and 28 */
3416 tarc0 |= __BITS(29, 28);
3417 }
3418 /* Set TARC0 bits 23,24,26,27 */
3419 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3420
3421 /* CTRL_EXT */
3422 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3423 reg |= __BIT(22); /* Set bit 22 */
3424 /*
3425 * Enable PHY low-power state when MAC is at D3
3426 * w/o WoL
3427 */
3428 if (sc->sc_type >= WM_T_PCH)
3429 reg |= CTRL_EXT_PHYPDEN;
3430 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3431
3432 /* TARC1 */
3433 tarc1 = CSR_READ(sc, WMREG_TARC1);
3434 /* bit 28 */
3435 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3436 tarc1 &= ~__BIT(28);
3437 else
3438 tarc1 |= __BIT(28);
3439 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3440 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3441
3442 /* Device Status */
3443 if (sc->sc_type == WM_T_ICH8) {
3444 reg = CSR_READ(sc, WMREG_STATUS);
3445 reg &= ~__BIT(31);
3446 CSR_WRITE(sc, WMREG_STATUS, reg);
3447
3448 }
3449
3450 /*
3451 * Work-around descriptor data corruption issue during
3452 * NFS v2 UDP traffic, just disable the NFS filtering
3453 * capability.
3454 */
3455 reg = CSR_READ(sc, WMREG_RFCTL);
3456 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3457 CSR_WRITE(sc, WMREG_RFCTL, reg);
3458 break;
3459 default:
3460 break;
3461 }
3462 CSR_WRITE(sc, WMREG_TARC0, tarc0);
3463
3464 /*
3465 * 8257[12] Errata No.52 and some others.
3466 * Avoid RSS Hash Value bug.
3467 */
3468 switch (sc->sc_type) {
3469 case WM_T_82571:
3470 case WM_T_82572:
3471 case WM_T_82573:
3472 case WM_T_80003:
3473 case WM_T_ICH8:
3474 reg = CSR_READ(sc, WMREG_RFCTL);
3475 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3476 CSR_WRITE(sc, WMREG_RFCTL, reg);
3477 break;
3478 default:
3479 break;
3480 }
3481 }
3482 }
3483
3484 static uint32_t
3485 wm_rxpbs_adjust_82580(uint32_t val)
3486 {
3487 uint32_t rv = 0;
3488
3489 if (val < __arraycount(wm_82580_rxpbs_table))
3490 rv = wm_82580_rxpbs_table[val];
3491
3492 return rv;
3493 }
3494
3495 /*
3496 * wm_reset:
3497 *
3498 * Reset the i82542 chip.
3499 */
3500 static void
3501 wm_reset(struct wm_softc *sc)
3502 {
3503 int phy_reset = 0;
3504 int error = 0;
3505 uint32_t reg, mask;
3506
3507 /*
3508 * Allocate on-chip memory according to the MTU size.
3509 * The Packet Buffer Allocation register must be written
3510 * before the chip is reset.
3511 */
3512 switch (sc->sc_type) {
3513 case WM_T_82547:
3514 case WM_T_82547_2:
3515 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3516 PBA_22K : PBA_30K;
3517 sc->sc_txfifo_head = 0;
3518 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3519 sc->sc_txfifo_size =
3520 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3521 sc->sc_txfifo_stall = 0;
3522 break;
3523 case WM_T_82571:
3524 case WM_T_82572:
3525 case WM_T_82575: /* XXX need special handing for jumbo frames */
3526 case WM_T_80003:
3527 sc->sc_pba = PBA_32K;
3528 break;
3529 case WM_T_82573:
3530 sc->sc_pba = PBA_12K;
3531 break;
3532 case WM_T_82574:
3533 case WM_T_82583:
3534 sc->sc_pba = PBA_20K;
3535 break;
3536 case WM_T_82576:
3537 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
3538 sc->sc_pba &= RXPBS_SIZE_MASK_82576;
3539 break;
3540 case WM_T_82580:
3541 case WM_T_I350:
3542 case WM_T_I354:
3543 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
3544 break;
3545 case WM_T_I210:
3546 case WM_T_I211:
3547 sc->sc_pba = PBA_34K;
3548 break;
3549 case WM_T_ICH8:
3550 /* Workaround for a bit corruption issue in FIFO memory */
3551 sc->sc_pba = PBA_8K;
3552 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3553 break;
3554 case WM_T_ICH9:
3555 case WM_T_ICH10:
3556 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3557 PBA_14K : PBA_10K;
3558 break;
3559 case WM_T_PCH:
3560 case WM_T_PCH2:
3561 case WM_T_PCH_LPT:
3562 sc->sc_pba = PBA_26K;
3563 break;
3564 default:
3565 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3566 PBA_40K : PBA_48K;
3567 break;
3568 }
3569 /*
3570 * Only old or non-multiqueue devices have the PBA register
3571 * XXX Need special handling for 82575.
3572 */
3573 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3574 || (sc->sc_type == WM_T_82575))
3575 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3576
3577 /* Prevent the PCI-E bus from sticking */
3578 if (sc->sc_flags & WM_F_PCIE) {
3579 int timeout = 800;
3580
3581 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3582 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3583
3584 while (timeout--) {
3585 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3586 == 0)
3587 break;
3588 delay(100);
3589 }
3590 }
3591
3592 /* Set the completion timeout for interface */
3593 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3594 || (sc->sc_type == WM_T_82580)
3595 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3596 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3597 wm_set_pcie_completion_timeout(sc);
3598
3599 /* Clear interrupt */
3600 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3601
3602 /* Stop the transmit and receive processes. */
3603 CSR_WRITE(sc, WMREG_RCTL, 0);
3604 sc->sc_rctl &= ~RCTL_EN;
3605 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3606 CSR_WRITE_FLUSH(sc);
3607
3608 /* XXX set_tbi_sbp_82543() */
3609
3610 delay(10*1000);
3611
3612 /* Must acquire the MDIO ownership before MAC reset */
3613 switch (sc->sc_type) {
3614 case WM_T_82573:
3615 case WM_T_82574:
3616 case WM_T_82583:
3617 error = wm_get_hw_semaphore_82573(sc);
3618 break;
3619 default:
3620 break;
3621 }
3622
3623 /*
3624 * 82541 Errata 29? & 82547 Errata 28?
3625 * See also the description about PHY_RST bit in CTRL register
3626 * in 8254x_GBe_SDM.pdf.
3627 */
3628 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3629 CSR_WRITE(sc, WMREG_CTRL,
3630 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3631 CSR_WRITE_FLUSH(sc);
3632 delay(5000);
3633 }
3634
3635 switch (sc->sc_type) {
3636 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3637 case WM_T_82541:
3638 case WM_T_82541_2:
3639 case WM_T_82547:
3640 case WM_T_82547_2:
3641 /*
3642 * On some chipsets, a reset through a memory-mapped write
3643 * cycle can cause the chip to reset before completing the
3644 * write cycle. This causes major headache that can be
3645 * avoided by issuing the reset via indirect register writes
3646 * through I/O space.
3647 *
3648 * So, if we successfully mapped the I/O BAR at attach time,
3649 * use that. Otherwise, try our luck with a memory-mapped
3650 * reset.
3651 */
3652 if (sc->sc_flags & WM_F_IOH_VALID)
3653 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3654 else
3655 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3656 break;
3657 case WM_T_82545_3:
3658 case WM_T_82546_3:
3659 /* Use the shadow control register on these chips. */
3660 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3661 break;
3662 case WM_T_80003:
3663 mask = swfwphysem[sc->sc_funcid];
3664 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3665 wm_get_swfw_semaphore(sc, mask);
3666 CSR_WRITE(sc, WMREG_CTRL, reg);
3667 wm_put_swfw_semaphore(sc, mask);
3668 break;
3669 case WM_T_ICH8:
3670 case WM_T_ICH9:
3671 case WM_T_ICH10:
3672 case WM_T_PCH:
3673 case WM_T_PCH2:
3674 case WM_T_PCH_LPT:
3675 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3676 if (wm_check_reset_block(sc) == 0) {
3677 /*
3678 * Gate automatic PHY configuration by hardware on
3679 * non-managed 82579
3680 */
3681 if ((sc->sc_type == WM_T_PCH2)
3682 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3683 != 0))
3684 wm_gate_hw_phy_config_ich8lan(sc, 1);
3685
3686
3687 reg |= CTRL_PHY_RESET;
3688 phy_reset = 1;
3689 }
3690 wm_get_swfwhw_semaphore(sc);
3691 CSR_WRITE(sc, WMREG_CTRL, reg);
3692 /* Don't insert a completion barrier when reset */
3693 delay(20*1000);
3694 wm_put_swfwhw_semaphore(sc);
3695 break;
3696 case WM_T_82580:
3697 case WM_T_I350:
3698 case WM_T_I354:
3699 case WM_T_I210:
3700 case WM_T_I211:
3701 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3702 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3703 CSR_WRITE_FLUSH(sc);
3704 delay(5000);
3705 break;
3706 case WM_T_82542_2_0:
3707 case WM_T_82542_2_1:
3708 case WM_T_82543:
3709 case WM_T_82540:
3710 case WM_T_82545:
3711 case WM_T_82546:
3712 case WM_T_82571:
3713 case WM_T_82572:
3714 case WM_T_82573:
3715 case WM_T_82574:
3716 case WM_T_82575:
3717 case WM_T_82576:
3718 case WM_T_82583:
3719 default:
3720 /* Everything else can safely use the documented method. */
3721 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3722 break;
3723 }
3724
3725 /* Must release the MDIO ownership after MAC reset */
3726 switch (sc->sc_type) {
3727 case WM_T_82573:
3728 case WM_T_82574:
3729 case WM_T_82583:
3730 if (error == 0)
3731 wm_put_hw_semaphore_82573(sc);
3732 break;
3733 default:
3734 break;
3735 }
3736
3737 if (phy_reset != 0)
3738 wm_get_cfg_done(sc);
3739
3740 /* reload EEPROM */
3741 switch (sc->sc_type) {
3742 case WM_T_82542_2_0:
3743 case WM_T_82542_2_1:
3744 case WM_T_82543:
3745 case WM_T_82544:
3746 delay(10);
3747 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3748 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3749 CSR_WRITE_FLUSH(sc);
3750 delay(2000);
3751 break;
3752 case WM_T_82540:
3753 case WM_T_82545:
3754 case WM_T_82545_3:
3755 case WM_T_82546:
3756 case WM_T_82546_3:
3757 delay(5*1000);
3758 /* XXX Disable HW ARPs on ASF enabled adapters */
3759 break;
3760 case WM_T_82541:
3761 case WM_T_82541_2:
3762 case WM_T_82547:
3763 case WM_T_82547_2:
3764 delay(20000);
3765 /* XXX Disable HW ARPs on ASF enabled adapters */
3766 break;
3767 case WM_T_82571:
3768 case WM_T_82572:
3769 case WM_T_82573:
3770 case WM_T_82574:
3771 case WM_T_82583:
3772 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3773 delay(10);
3774 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3775 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3776 CSR_WRITE_FLUSH(sc);
3777 }
3778 /* check EECD_EE_AUTORD */
3779 wm_get_auto_rd_done(sc);
3780 /*
3781 * Phy configuration from NVM just starts after EECD_AUTO_RD
3782 * is set.
3783 */
3784 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3785 || (sc->sc_type == WM_T_82583))
3786 delay(25*1000);
3787 break;
3788 case WM_T_82575:
3789 case WM_T_82576:
3790 case WM_T_82580:
3791 case WM_T_I350:
3792 case WM_T_I354:
3793 case WM_T_I210:
3794 case WM_T_I211:
3795 case WM_T_80003:
3796 /* check EECD_EE_AUTORD */
3797 wm_get_auto_rd_done(sc);
3798 break;
3799 case WM_T_ICH8:
3800 case WM_T_ICH9:
3801 case WM_T_ICH10:
3802 case WM_T_PCH:
3803 case WM_T_PCH2:
3804 case WM_T_PCH_LPT:
3805 break;
3806 default:
3807 panic("%s: unknown type\n", __func__);
3808 }
3809
3810 /* Check whether EEPROM is present or not */
3811 switch (sc->sc_type) {
3812 case WM_T_82575:
3813 case WM_T_82576:
3814 case WM_T_82580:
3815 case WM_T_I350:
3816 case WM_T_I354:
3817 case WM_T_ICH8:
3818 case WM_T_ICH9:
3819 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3820 /* Not found */
3821 sc->sc_flags |= WM_F_EEPROM_INVALID;
3822 if (sc->sc_type == WM_T_82575)
3823 wm_reset_init_script_82575(sc);
3824 }
3825 break;
3826 default:
3827 break;
3828 }
3829
3830 if ((sc->sc_type == WM_T_82580)
3831 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
3832 /* clear global device reset status bit */
3833 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3834 }
3835
3836 /* Clear any pending interrupt events. */
3837 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3838 reg = CSR_READ(sc, WMREG_ICR);
3839
3840 /* reload sc_ctrl */
3841 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3842
3843 if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
3844 wm_set_eee_i350(sc);
3845
3846 /* dummy read from WUC */
3847 if (sc->sc_type == WM_T_PCH)
3848 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3849 /*
3850 * For PCH, this write will make sure that any noise will be detected
3851 * as a CRC error and be dropped rather than show up as a bad packet
3852 * to the DMA engine
3853 */
3854 if (sc->sc_type == WM_T_PCH)
3855 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3856
3857 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3858 CSR_WRITE(sc, WMREG_WUC, 0);
3859
3860 wm_reset_mdicnfg_82580(sc);
3861 }
3862
3863 /*
3864 * wm_add_rxbuf:
3865 *
3866 * Add a receive buffer to the indiciated descriptor.
3867 */
3868 static int
3869 wm_add_rxbuf(struct wm_softc *sc, int idx)
3870 {
3871 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3872 struct mbuf *m;
3873 int error;
3874
3875 KASSERT(WM_RX_LOCKED(sc));
3876
3877 MGETHDR(m, M_DONTWAIT, MT_DATA);
3878 if (m == NULL)
3879 return ENOBUFS;
3880
3881 MCLGET(m, M_DONTWAIT);
3882 if ((m->m_flags & M_EXT) == 0) {
3883 m_freem(m);
3884 return ENOBUFS;
3885 }
3886
3887 if (rxs->rxs_mbuf != NULL)
3888 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3889
3890 rxs->rxs_mbuf = m;
3891
3892 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3893 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3894 BUS_DMA_READ|BUS_DMA_NOWAIT);
3895 if (error) {
3896 /* XXX XXX XXX */
3897 aprint_error_dev(sc->sc_dev,
3898 "unable to load rx DMA map %d, error = %d\n",
3899 idx, error);
3900 panic("wm_add_rxbuf");
3901 }
3902
3903 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3904 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3905
3906 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3907 if ((sc->sc_rctl & RCTL_EN) != 0)
3908 WM_INIT_RXDESC(sc, idx);
3909 } else
3910 WM_INIT_RXDESC(sc, idx);
3911
3912 return 0;
3913 }
3914
3915 /*
3916 * wm_rxdrain:
3917 *
3918 * Drain the receive queue.
3919 */
3920 static void
3921 wm_rxdrain(struct wm_softc *sc)
3922 {
3923 struct wm_rxsoft *rxs;
3924 int i;
3925
3926 KASSERT(WM_RX_LOCKED(sc));
3927
3928 for (i = 0; i < WM_NRXDESC; i++) {
3929 rxs = &sc->sc_rxsoft[i];
3930 if (rxs->rxs_mbuf != NULL) {
3931 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3932 m_freem(rxs->rxs_mbuf);
3933 rxs->rxs_mbuf = NULL;
3934 }
3935 }
3936 }
3937
3938 /*
3939 * wm_init: [ifnet interface function]
3940 *
3941 * Initialize the interface.
3942 */
3943 static int
3944 wm_init(struct ifnet *ifp)
3945 {
3946 struct wm_softc *sc = ifp->if_softc;
3947 int ret;
3948
3949 WM_BOTH_LOCK(sc);
3950 ret = wm_init_locked(ifp);
3951 WM_BOTH_UNLOCK(sc);
3952
3953 return ret;
3954 }
3955
3956 static int
3957 wm_init_locked(struct ifnet *ifp)
3958 {
3959 struct wm_softc *sc = ifp->if_softc;
3960 struct wm_rxsoft *rxs;
3961 int i, j, trynum, error = 0;
3962 uint32_t reg;
3963
3964 KASSERT(WM_BOTH_LOCKED(sc));
3965 /*
3966 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3967 * There is a small but measurable benefit to avoiding the adjusment
3968 * of the descriptor so that the headers are aligned, for normal mtu,
3969 * on such platforms. One possibility is that the DMA itself is
3970 * slightly more efficient if the front of the entire packet (instead
3971 * of the front of the headers) is aligned.
3972 *
3973 * Note we must always set align_tweak to 0 if we are using
3974 * jumbo frames.
3975 */
3976 #ifdef __NO_STRICT_ALIGNMENT
3977 sc->sc_align_tweak = 0;
3978 #else
3979 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3980 sc->sc_align_tweak = 0;
3981 else
3982 sc->sc_align_tweak = 2;
3983 #endif /* __NO_STRICT_ALIGNMENT */
3984
3985 /* Cancel any pending I/O. */
3986 wm_stop_locked(ifp, 0);
3987
3988 /* update statistics before reset */
3989 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3990 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3991
3992 /* Reset the chip to a known state. */
3993 wm_reset(sc);
3994
3995 switch (sc->sc_type) {
3996 case WM_T_82571:
3997 case WM_T_82572:
3998 case WM_T_82573:
3999 case WM_T_82574:
4000 case WM_T_82583:
4001 case WM_T_80003:
4002 case WM_T_ICH8:
4003 case WM_T_ICH9:
4004 case WM_T_ICH10:
4005 case WM_T_PCH:
4006 case WM_T_PCH2:
4007 case WM_T_PCH_LPT:
4008 if (wm_check_mng_mode(sc) != 0)
4009 wm_get_hw_control(sc);
4010 break;
4011 default:
4012 break;
4013 }
4014
4015 /* Init hardware bits */
4016 wm_initialize_hardware_bits(sc);
4017
4018 /* Reset the PHY. */
4019 if (sc->sc_flags & WM_F_HAS_MII)
4020 wm_gmii_reset(sc);
4021
4022 /* Calculate (E)ITR value */
4023 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4024 sc->sc_itr = 450; /* For EITR */
4025 } else if (sc->sc_type >= WM_T_82543) {
4026 /*
4027 * Set up the interrupt throttling register (units of 256ns)
4028 * Note that a footnote in Intel's documentation says this
4029 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4030 * or 10Mbit mode. Empirically, it appears to be the case
4031 * that that is also true for the 1024ns units of the other
4032 * interrupt-related timer registers -- so, really, we ought
4033 * to divide this value by 4 when the link speed is low.
4034 *
4035 * XXX implement this division at link speed change!
4036 */
4037
4038 /*
4039 * For N interrupts/sec, set this value to:
4040 * 1000000000 / (N * 256). Note that we set the
4041 * absolute and packet timer values to this value
4042 * divided by 4 to get "simple timer" behavior.
4043 */
4044
4045 sc->sc_itr = 1500; /* 2604 ints/sec */
4046 }
4047
4048 /* Initialize the transmit descriptor ring. */
4049 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
4050 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
4051 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4052 sc->sc_txfree = WM_NTXDESC(sc);
4053 sc->sc_txnext = 0;
4054
4055 if (sc->sc_type < WM_T_82543) {
4056 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
4057 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
4058 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
4059 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
4060 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
4061 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
4062 } else {
4063 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
4064 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
4065 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
4066 CSR_WRITE(sc, WMREG_TDH, 0);
4067
4068 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4069 /*
4070 * Don't write TDT before TCTL.EN is set.
4071 * See the document.
4072 */
4073 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_QUEUE_ENABLE
4074 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
4075 | TXDCTL_WTHRESH(0));
4076 else {
4077 /* ITR / 4 */
4078 CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
4079 if (sc->sc_type >= WM_T_82540) {
4080 /* should be same */
4081 CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
4082 }
4083
4084 CSR_WRITE(sc, WMREG_TDT, 0);
4085 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_PTHRESH(0) |
4086 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
4087 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
4088 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
4089 }
4090 }
4091
4092 /* Initialize the transmit job descriptors. */
4093 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
4094 sc->sc_txsoft[i].txs_mbuf = NULL;
4095 sc->sc_txsfree = WM_TXQUEUELEN(sc);
4096 sc->sc_txsnext = 0;
4097 sc->sc_txsdirty = 0;
4098
4099 /*
4100 * Initialize the receive descriptor and receive job
4101 * descriptor rings.
4102 */
4103 if (sc->sc_type < WM_T_82543) {
4104 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4105 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4106 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4107 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4108 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4109 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4110
4111 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4112 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4113 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4114 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4115 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4116 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4117 } else {
4118 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4119 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4120 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4121
4122 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4123 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4124 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4125 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4126 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4127 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4128 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4129 | RXDCTL_WTHRESH(1));
4130 } else {
4131 CSR_WRITE(sc, WMREG_RDH, 0);
4132 CSR_WRITE(sc, WMREG_RDT, 0);
4133 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
4134 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
4135 }
4136 }
4137 for (i = 0; i < WM_NRXDESC; i++) {
4138 rxs = &sc->sc_rxsoft[i];
4139 if (rxs->rxs_mbuf == NULL) {
4140 if ((error = wm_add_rxbuf(sc, i)) != 0) {
4141 log(LOG_ERR, "%s: unable to allocate or map "
4142 "rx buffer %d, error = %d\n",
4143 device_xname(sc->sc_dev), i, error);
4144 /*
4145 * XXX Should attempt to run with fewer receive
4146 * XXX buffers instead of just failing.
4147 */
4148 wm_rxdrain(sc);
4149 goto out;
4150 }
4151 } else {
4152 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4153 WM_INIT_RXDESC(sc, i);
4154 /*
4155 * For 82575 and newer device, the RX descriptors
4156 * must be initialized after the setting of RCTL.EN in
4157 * wm_set_filter()
4158 */
4159 }
4160 }
4161 sc->sc_rxptr = 0;
4162 sc->sc_rxdiscard = 0;
4163 WM_RXCHAIN_RESET(sc);
4164
4165 /*
4166 * Clear out the VLAN table -- we don't use it (yet).
4167 */
4168 CSR_WRITE(sc, WMREG_VET, 0);
4169 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4170 trynum = 10; /* Due to hw errata */
4171 else
4172 trynum = 1;
4173 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4174 for (j = 0; j < trynum; j++)
4175 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4176
4177 /*
4178 * Set up flow-control parameters.
4179 *
4180 * XXX Values could probably stand some tuning.
4181 */
4182 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4183 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4184 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
4185 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4186 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4187 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4188 }
4189
4190 sc->sc_fcrtl = FCRTL_DFLT;
4191 if (sc->sc_type < WM_T_82543) {
4192 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4193 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4194 } else {
4195 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4196 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4197 }
4198
4199 if (sc->sc_type == WM_T_80003)
4200 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4201 else
4202 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4203
4204 /* Writes the control register. */
4205 wm_set_vlan(sc);
4206
4207 if (sc->sc_flags & WM_F_HAS_MII) {
4208 int val;
4209
4210 switch (sc->sc_type) {
4211 case WM_T_80003:
4212 case WM_T_ICH8:
4213 case WM_T_ICH9:
4214 case WM_T_ICH10:
4215 case WM_T_PCH:
4216 case WM_T_PCH2:
4217 case WM_T_PCH_LPT:
4218 /*
4219 * Set the mac to wait the maximum time between each
4220 * iteration and increase the max iterations when
4221 * polling the phy; this fixes erroneous timeouts at
4222 * 10Mbps.
4223 */
4224 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4225 0xFFFF);
4226 val = wm_kmrn_readreg(sc,
4227 KUMCTRLSTA_OFFSET_INB_PARAM);
4228 val |= 0x3F;
4229 wm_kmrn_writereg(sc,
4230 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4231 break;
4232 default:
4233 break;
4234 }
4235
4236 if (sc->sc_type == WM_T_80003) {
4237 val = CSR_READ(sc, WMREG_CTRL_EXT);
4238 val &= ~CTRL_EXT_LINK_MODE_MASK;
4239 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4240
4241 /* Bypass RX and TX FIFO's */
4242 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4243 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4244 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4245 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4246 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4247 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4248 }
4249 }
4250 #if 0
4251 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4252 #endif
4253
4254 /* Set up checksum offload parameters. */
4255 reg = CSR_READ(sc, WMREG_RXCSUM);
4256 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4257 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4258 reg |= RXCSUM_IPOFL;
4259 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4260 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4261 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4262 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4263 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4264
4265 /* Set up the interrupt registers. */
4266 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4267 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4268 ICR_RXO | ICR_RXT0;
4269 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4270
4271 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4272 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4273 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4274 reg = CSR_READ(sc, WMREG_KABGTXD);
4275 reg |= KABGTXD_BGSQLBIAS;
4276 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4277 }
4278
4279 /* Set up the inter-packet gap. */
4280 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4281
4282 if (sc->sc_type >= WM_T_82543) {
4283 /*
4284 * XXX 82574 has both ITR and EITR. SET EITR when we use
4285 * the multi queue function with MSI-X.
4286 */
4287 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4288 CSR_WRITE(sc, WMREG_EITR(0), sc->sc_itr);
4289 else
4290 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4291 }
4292
4293 /* Set the VLAN ethernetype. */
4294 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4295
4296 /*
4297 * Set up the transmit control register; we start out with
4298 * a collision distance suitable for FDX, but update it whe
4299 * we resolve the media type.
4300 */
4301 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4302 | TCTL_CT(TX_COLLISION_THRESHOLD)
4303 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4304 if (sc->sc_type >= WM_T_82571)
4305 sc->sc_tctl |= TCTL_MULR;
4306 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4307
4308 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4309 /* Write TDT after TCTL.EN is set. See the document. */
4310 CSR_WRITE(sc, WMREG_TDT, 0);
4311 }
4312
4313 if (sc->sc_type == WM_T_80003) {
4314 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4315 reg &= ~TCTL_EXT_GCEX_MASK;
4316 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4317 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4318 }
4319
4320 /* Set the media. */
4321 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4322 goto out;
4323
4324 /* Configure for OS presence */
4325 wm_init_manageability(sc);
4326
4327 /*
4328 * Set up the receive control register; we actually program
4329 * the register when we set the receive filter. Use multicast
4330 * address offset type 0.
4331 *
4332 * Only the i82544 has the ability to strip the incoming
4333 * CRC, so we don't enable that feature.
4334 */
4335 sc->sc_mchash_type = 0;
4336 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4337 | RCTL_MO(sc->sc_mchash_type);
4338
4339 /*
4340 * The I350 has a bug where it always strips the CRC whether
4341 * asked to or not. So ask for stripped CRC here and cope in rxeof
4342 */
4343 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4344 || (sc->sc_type == WM_T_I210))
4345 sc->sc_rctl |= RCTL_SECRC;
4346
4347 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4348 && (ifp->if_mtu > ETHERMTU)) {
4349 sc->sc_rctl |= RCTL_LPE;
4350 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4351 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4352 }
4353
4354 if (MCLBYTES == 2048) {
4355 sc->sc_rctl |= RCTL_2k;
4356 } else {
4357 if (sc->sc_type >= WM_T_82543) {
4358 switch (MCLBYTES) {
4359 case 4096:
4360 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4361 break;
4362 case 8192:
4363 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4364 break;
4365 case 16384:
4366 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4367 break;
4368 default:
4369 panic("wm_init: MCLBYTES %d unsupported",
4370 MCLBYTES);
4371 break;
4372 }
4373 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4374 }
4375
4376 /* Set the receive filter. */
4377 wm_set_filter(sc);
4378
4379 /* Enable ECC */
4380 switch (sc->sc_type) {
4381 case WM_T_82571:
4382 reg = CSR_READ(sc, WMREG_PBA_ECC);
4383 reg |= PBA_ECC_CORR_EN;
4384 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4385 break;
4386 case WM_T_PCH_LPT:
4387 reg = CSR_READ(sc, WMREG_PBECCSTS);
4388 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4389 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4390
4391 reg = CSR_READ(sc, WMREG_CTRL);
4392 reg |= CTRL_MEHE;
4393 CSR_WRITE(sc, WMREG_CTRL, reg);
4394 break;
4395 default:
4396 break;
4397 }
4398
4399 /* On 575 and later set RDT only if RX enabled */
4400 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4401 for (i = 0; i < WM_NRXDESC; i++)
4402 WM_INIT_RXDESC(sc, i);
4403
4404 sc->sc_stopping = false;
4405
4406 /* Start the one second link check clock. */
4407 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4408
4409 /* ...all done! */
4410 ifp->if_flags |= IFF_RUNNING;
4411 ifp->if_flags &= ~IFF_OACTIVE;
4412
4413 out:
4414 sc->sc_if_flags = ifp->if_flags;
4415 if (error)
4416 log(LOG_ERR, "%s: interface not running\n",
4417 device_xname(sc->sc_dev));
4418 return error;
4419 }
4420
4421 /*
4422 * wm_stop: [ifnet interface function]
4423 *
4424 * Stop transmission on the interface.
4425 */
4426 static void
4427 wm_stop(struct ifnet *ifp, int disable)
4428 {
4429 struct wm_softc *sc = ifp->if_softc;
4430
4431 WM_BOTH_LOCK(sc);
4432 wm_stop_locked(ifp, disable);
4433 WM_BOTH_UNLOCK(sc);
4434 }
4435
4436 static void
4437 wm_stop_locked(struct ifnet *ifp, int disable)
4438 {
4439 struct wm_softc *sc = ifp->if_softc;
4440 struct wm_txsoft *txs;
4441 int i;
4442
4443 KASSERT(WM_BOTH_LOCKED(sc));
4444
4445 sc->sc_stopping = true;
4446
4447 /* Stop the one second clock. */
4448 callout_stop(&sc->sc_tick_ch);
4449
4450 /* Stop the 82547 Tx FIFO stall check timer. */
4451 if (sc->sc_type == WM_T_82547)
4452 callout_stop(&sc->sc_txfifo_ch);
4453
4454 if (sc->sc_flags & WM_F_HAS_MII) {
4455 /* Down the MII. */
4456 mii_down(&sc->sc_mii);
4457 } else {
4458 #if 0
4459 /* Should we clear PHY's status properly? */
4460 wm_reset(sc);
4461 #endif
4462 }
4463
4464 /* Stop the transmit and receive processes. */
4465 CSR_WRITE(sc, WMREG_TCTL, 0);
4466 CSR_WRITE(sc, WMREG_RCTL, 0);
4467 sc->sc_rctl &= ~RCTL_EN;
4468
4469 /*
4470 * Clear the interrupt mask to ensure the device cannot assert its
4471 * interrupt line.
4472 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4473 * any currently pending or shared interrupt.
4474 */
4475 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4476 sc->sc_icr = 0;
4477
4478 /* Release any queued transmit buffers. */
4479 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4480 txs = &sc->sc_txsoft[i];
4481 if (txs->txs_mbuf != NULL) {
4482 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4483 m_freem(txs->txs_mbuf);
4484 txs->txs_mbuf = NULL;
4485 }
4486 }
4487
4488 /* Mark the interface as down and cancel the watchdog timer. */
4489 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4490 ifp->if_timer = 0;
4491
4492 if (disable)
4493 wm_rxdrain(sc);
4494
4495 #if 0 /* notyet */
4496 if (sc->sc_type >= WM_T_82544)
4497 CSR_WRITE(sc, WMREG_WUC, 0);
4498 #endif
4499 }
4500
4501 /*
4502 * wm_tx_offload:
4503 *
4504 * Set up TCP/IP checksumming parameters for the
4505 * specified packet.
4506 */
4507 static int
4508 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
4509 uint8_t *fieldsp)
4510 {
4511 struct mbuf *m0 = txs->txs_mbuf;
4512 struct livengood_tcpip_ctxdesc *t;
4513 uint32_t ipcs, tucs, cmd, cmdlen, seg;
4514 uint32_t ipcse;
4515 struct ether_header *eh;
4516 int offset, iphl;
4517 uint8_t fields;
4518
4519 /*
4520 * XXX It would be nice if the mbuf pkthdr had offset
4521 * fields for the protocol headers.
4522 */
4523
4524 eh = mtod(m0, struct ether_header *);
4525 switch (htons(eh->ether_type)) {
4526 case ETHERTYPE_IP:
4527 case ETHERTYPE_IPV6:
4528 offset = ETHER_HDR_LEN;
4529 break;
4530
4531 case ETHERTYPE_VLAN:
4532 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4533 break;
4534
4535 default:
4536 /*
4537 * Don't support this protocol or encapsulation.
4538 */
4539 *fieldsp = 0;
4540 *cmdp = 0;
4541 return 0;
4542 }
4543
4544 if ((m0->m_pkthdr.csum_flags &
4545 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
4546 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4547 } else {
4548 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4549 }
4550 ipcse = offset + iphl - 1;
4551
4552 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
4553 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
4554 seg = 0;
4555 fields = 0;
4556
4557 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4558 int hlen = offset + iphl;
4559 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4560
4561 if (__predict_false(m0->m_len <
4562 (hlen + sizeof(struct tcphdr)))) {
4563 /*
4564 * TCP/IP headers are not in the first mbuf; we need
4565 * to do this the slow and painful way. Let's just
4566 * hope this doesn't happen very often.
4567 */
4568 struct tcphdr th;
4569
4570 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4571
4572 m_copydata(m0, hlen, sizeof(th), &th);
4573 if (v4) {
4574 struct ip ip;
4575
4576 m_copydata(m0, offset, sizeof(ip), &ip);
4577 ip.ip_len = 0;
4578 m_copyback(m0,
4579 offset + offsetof(struct ip, ip_len),
4580 sizeof(ip.ip_len), &ip.ip_len);
4581 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4582 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4583 } else {
4584 struct ip6_hdr ip6;
4585
4586 m_copydata(m0, offset, sizeof(ip6), &ip6);
4587 ip6.ip6_plen = 0;
4588 m_copyback(m0,
4589 offset + offsetof(struct ip6_hdr, ip6_plen),
4590 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4591 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4592 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4593 }
4594 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4595 sizeof(th.th_sum), &th.th_sum);
4596
4597 hlen += th.th_off << 2;
4598 } else {
4599 /*
4600 * TCP/IP headers are in the first mbuf; we can do
4601 * this the easy way.
4602 */
4603 struct tcphdr *th;
4604
4605 if (v4) {
4606 struct ip *ip =
4607 (void *)(mtod(m0, char *) + offset);
4608 th = (void *)(mtod(m0, char *) + hlen);
4609
4610 ip->ip_len = 0;
4611 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4612 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4613 } else {
4614 struct ip6_hdr *ip6 =
4615 (void *)(mtod(m0, char *) + offset);
4616 th = (void *)(mtod(m0, char *) + hlen);
4617
4618 ip6->ip6_plen = 0;
4619 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4620 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4621 }
4622 hlen += th->th_off << 2;
4623 }
4624
4625 if (v4) {
4626 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4627 cmdlen |= WTX_TCPIP_CMD_IP;
4628 } else {
4629 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4630 ipcse = 0;
4631 }
4632 cmd |= WTX_TCPIP_CMD_TSE;
4633 cmdlen |= WTX_TCPIP_CMD_TSE |
4634 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
4635 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
4636 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
4637 }
4638
4639 /*
4640 * NOTE: Even if we're not using the IP or TCP/UDP checksum
4641 * offload feature, if we load the context descriptor, we
4642 * MUST provide valid values for IPCSS and TUCSS fields.
4643 */
4644
4645 ipcs = WTX_TCPIP_IPCSS(offset) |
4646 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
4647 WTX_TCPIP_IPCSE(ipcse);
4648 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
4649 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
4650 fields |= WTX_IXSM;
4651 }
4652
4653 offset += iphl;
4654
4655 if (m0->m_pkthdr.csum_flags &
4656 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
4657 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
4658 fields |= WTX_TXSM;
4659 tucs = WTX_TCPIP_TUCSS(offset) |
4660 WTX_TCPIP_TUCSO(offset +
4661 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
4662 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4663 } else if ((m0->m_pkthdr.csum_flags &
4664 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
4665 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
4666 fields |= WTX_TXSM;
4667 tucs = WTX_TCPIP_TUCSS(offset) |
4668 WTX_TCPIP_TUCSO(offset +
4669 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
4670 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4671 } else {
4672 /* Just initialize it to a valid TCP context. */
4673 tucs = WTX_TCPIP_TUCSS(offset) |
4674 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
4675 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4676 }
4677
4678 /* Fill in the context descriptor. */
4679 t = (struct livengood_tcpip_ctxdesc *)
4680 &sc->sc_txdescs[sc->sc_txnext];
4681 t->tcpip_ipcs = htole32(ipcs);
4682 t->tcpip_tucs = htole32(tucs);
4683 t->tcpip_cmdlen = htole32(cmdlen);
4684 t->tcpip_seg = htole32(seg);
4685 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
4686
4687 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
4688 txs->txs_ndesc++;
4689
4690 *cmdp = cmd;
4691 *fieldsp = fields;
4692
4693 return 0;
4694 }
4695
4696 static void
4697 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
4698 {
4699 struct mbuf *m;
4700 int i;
4701
4702 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
4703 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
4704 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
4705 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
4706 m->m_data, m->m_len, m->m_flags);
4707 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
4708 i, i == 1 ? "" : "s");
4709 }
4710
4711 /*
4712 * wm_82547_txfifo_stall:
4713 *
4714 * Callout used to wait for the 82547 Tx FIFO to drain,
4715 * reset the FIFO pointers, and restart packet transmission.
4716 */
4717 static void
4718 wm_82547_txfifo_stall(void *arg)
4719 {
4720 struct wm_softc *sc = arg;
4721 #ifndef WM_MPSAFE
4722 int s;
4723
4724 s = splnet();
4725 #endif
4726 WM_TX_LOCK(sc);
4727
4728 if (sc->sc_stopping)
4729 goto out;
4730
4731 if (sc->sc_txfifo_stall) {
4732 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
4733 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
4734 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
4735 /*
4736 * Packets have drained. Stop transmitter, reset
4737 * FIFO pointers, restart transmitter, and kick
4738 * the packet queue.
4739 */
4740 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
4741 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
4742 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
4743 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
4744 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
4745 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
4746 CSR_WRITE(sc, WMREG_TCTL, tctl);
4747 CSR_WRITE_FLUSH(sc);
4748
4749 sc->sc_txfifo_head = 0;
4750 sc->sc_txfifo_stall = 0;
4751 wm_start_locked(&sc->sc_ethercom.ec_if);
4752 } else {
4753 /*
4754 * Still waiting for packets to drain; try again in
4755 * another tick.
4756 */
4757 callout_schedule(&sc->sc_txfifo_ch, 1);
4758 }
4759 }
4760
4761 out:
4762 WM_TX_UNLOCK(sc);
4763 #ifndef WM_MPSAFE
4764 splx(s);
4765 #endif
4766 }
4767
4768 /*
4769 * wm_82547_txfifo_bugchk:
4770 *
4771 * Check for bug condition in the 82547 Tx FIFO. We need to
4772 * prevent enqueueing a packet that would wrap around the end
4773 * if the Tx FIFO ring buffer, otherwise the chip will croak.
4774 *
4775 * We do this by checking the amount of space before the end
4776 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
4777 * the Tx FIFO, wait for all remaining packets to drain, reset
4778 * the internal FIFO pointers to the beginning, and restart
4779 * transmission on the interface.
4780 */
4781 #define WM_FIFO_HDR 0x10
4782 #define WM_82547_PAD_LEN 0x3e0
4783 static int
4784 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
4785 {
4786 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
4787 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
4788
4789 /* Just return if already stalled. */
4790 if (sc->sc_txfifo_stall)
4791 return 1;
4792
4793 if (sc->sc_mii.mii_media_active & IFM_FDX) {
4794 /* Stall only occurs in half-duplex mode. */
4795 goto send_packet;
4796 }
4797
4798 if (len >= WM_82547_PAD_LEN + space) {
4799 sc->sc_txfifo_stall = 1;
4800 callout_schedule(&sc->sc_txfifo_ch, 1);
4801 return 1;
4802 }
4803
4804 send_packet:
4805 sc->sc_txfifo_head += len;
4806 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
4807 sc->sc_txfifo_head -= sc->sc_txfifo_size;
4808
4809 return 0;
4810 }
4811
4812 /*
4813 * wm_start: [ifnet interface function]
4814 *
4815 * Start packet transmission on the interface.
4816 */
4817 static void
4818 wm_start(struct ifnet *ifp)
4819 {
4820 struct wm_softc *sc = ifp->if_softc;
4821
4822 WM_TX_LOCK(sc);
4823 if (!sc->sc_stopping)
4824 wm_start_locked(ifp);
4825 WM_TX_UNLOCK(sc);
4826 }
4827
4828 static void
4829 wm_start_locked(struct ifnet *ifp)
4830 {
4831 struct wm_softc *sc = ifp->if_softc;
4832 struct mbuf *m0;
4833 struct m_tag *mtag;
4834 struct wm_txsoft *txs;
4835 bus_dmamap_t dmamap;
4836 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
4837 bus_addr_t curaddr;
4838 bus_size_t seglen, curlen;
4839 uint32_t cksumcmd;
4840 uint8_t cksumfields;
4841
4842 KASSERT(WM_TX_LOCKED(sc));
4843
4844 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
4845 return;
4846
4847 /* Remember the previous number of free descriptors. */
4848 ofree = sc->sc_txfree;
4849
4850 /*
4851 * Loop through the send queue, setting up transmit descriptors
4852 * until we drain the queue, or use up all available transmit
4853 * descriptors.
4854 */
4855 for (;;) {
4856 m0 = NULL;
4857
4858 /* Get a work queue entry. */
4859 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
4860 wm_txintr(sc);
4861 if (sc->sc_txsfree == 0) {
4862 DPRINTF(WM_DEBUG_TX,
4863 ("%s: TX: no free job descriptors\n",
4864 device_xname(sc->sc_dev)));
4865 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
4866 break;
4867 }
4868 }
4869
4870 /* Grab a packet off the queue. */
4871 IFQ_DEQUEUE(&ifp->if_snd, m0);
4872 if (m0 == NULL)
4873 break;
4874
4875 DPRINTF(WM_DEBUG_TX,
4876 ("%s: TX: have packet to transmit: %p\n",
4877 device_xname(sc->sc_dev), m0));
4878
4879 txs = &sc->sc_txsoft[sc->sc_txsnext];
4880 dmamap = txs->txs_dmamap;
4881
4882 use_tso = (m0->m_pkthdr.csum_flags &
4883 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
4884
4885 /*
4886 * So says the Linux driver:
4887 * The controller does a simple calculation to make sure
4888 * there is enough room in the FIFO before initiating the
4889 * DMA for each buffer. The calc is:
4890 * 4 = ceil(buffer len / MSS)
4891 * To make sure we don't overrun the FIFO, adjust the max
4892 * buffer len if the MSS drops.
4893 */
4894 dmamap->dm_maxsegsz =
4895 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
4896 ? m0->m_pkthdr.segsz << 2
4897 : WTX_MAX_LEN;
4898
4899 /*
4900 * Load the DMA map. If this fails, the packet either
4901 * didn't fit in the allotted number of segments, or we
4902 * were short on resources. For the too-many-segments
4903 * case, we simply report an error and drop the packet,
4904 * since we can't sanely copy a jumbo packet to a single
4905 * buffer.
4906 */
4907 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
4908 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
4909 if (error) {
4910 if (error == EFBIG) {
4911 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4912 log(LOG_ERR, "%s: Tx packet consumes too many "
4913 "DMA segments, dropping...\n",
4914 device_xname(sc->sc_dev));
4915 wm_dump_mbuf_chain(sc, m0);
4916 m_freem(m0);
4917 continue;
4918 }
4919 /* Short on resources, just stop for now. */
4920 DPRINTF(WM_DEBUG_TX,
4921 ("%s: TX: dmamap load failed: %d\n",
4922 device_xname(sc->sc_dev), error));
4923 break;
4924 }
4925
4926 segs_needed = dmamap->dm_nsegs;
4927 if (use_tso) {
4928 /* For sentinel descriptor; see below. */
4929 segs_needed++;
4930 }
4931
4932 /*
4933 * Ensure we have enough descriptors free to describe
4934 * the packet. Note, we always reserve one descriptor
4935 * at the end of the ring due to the semantics of the
4936 * TDT register, plus one more in the event we need
4937 * to load offload context.
4938 */
4939 if (segs_needed > sc->sc_txfree - 2) {
4940 /*
4941 * Not enough free descriptors to transmit this
4942 * packet. We haven't committed anything yet,
4943 * so just unload the DMA map, put the packet
4944 * pack on the queue, and punt. Notify the upper
4945 * layer that there are no more slots left.
4946 */
4947 DPRINTF(WM_DEBUG_TX,
4948 ("%s: TX: need %d (%d) descriptors, have %d\n",
4949 device_xname(sc->sc_dev), dmamap->dm_nsegs,
4950 segs_needed, sc->sc_txfree - 1));
4951 ifp->if_flags |= IFF_OACTIVE;
4952 bus_dmamap_unload(sc->sc_dmat, dmamap);
4953 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
4954 break;
4955 }
4956
4957 /*
4958 * Check for 82547 Tx FIFO bug. We need to do this
4959 * once we know we can transmit the packet, since we
4960 * do some internal FIFO space accounting here.
4961 */
4962 if (sc->sc_type == WM_T_82547 &&
4963 wm_82547_txfifo_bugchk(sc, m0)) {
4964 DPRINTF(WM_DEBUG_TX,
4965 ("%s: TX: 82547 Tx FIFO bug detected\n",
4966 device_xname(sc->sc_dev)));
4967 ifp->if_flags |= IFF_OACTIVE;
4968 bus_dmamap_unload(sc->sc_dmat, dmamap);
4969 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
4970 break;
4971 }
4972
4973 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
4974
4975 DPRINTF(WM_DEBUG_TX,
4976 ("%s: TX: packet has %d (%d) DMA segments\n",
4977 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
4978
4979 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
4980
4981 /*
4982 * Store a pointer to the packet so that we can free it
4983 * later.
4984 *
4985 * Initially, we consider the number of descriptors the
4986 * packet uses the number of DMA segments. This may be
4987 * incremented by 1 if we do checksum offload (a descriptor
4988 * is used to set the checksum context).
4989 */
4990 txs->txs_mbuf = m0;
4991 txs->txs_firstdesc = sc->sc_txnext;
4992 txs->txs_ndesc = segs_needed;
4993
4994 /* Set up offload parameters for this packet. */
4995 if (m0->m_pkthdr.csum_flags &
4996 (M_CSUM_TSOv4|M_CSUM_TSOv6|
4997 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
4998 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
4999 if (wm_tx_offload(sc, txs, &cksumcmd,
5000 &cksumfields) != 0) {
5001 /* Error message already displayed. */
5002 bus_dmamap_unload(sc->sc_dmat, dmamap);
5003 continue;
5004 }
5005 } else {
5006 cksumcmd = 0;
5007 cksumfields = 0;
5008 }
5009
5010 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
5011
5012 /* Sync the DMA map. */
5013 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5014 BUS_DMASYNC_PREWRITE);
5015
5016 /* Initialize the transmit descriptor. */
5017 for (nexttx = sc->sc_txnext, seg = 0;
5018 seg < dmamap->dm_nsegs; seg++) {
5019 for (seglen = dmamap->dm_segs[seg].ds_len,
5020 curaddr = dmamap->dm_segs[seg].ds_addr;
5021 seglen != 0;
5022 curaddr += curlen, seglen -= curlen,
5023 nexttx = WM_NEXTTX(sc, nexttx)) {
5024 curlen = seglen;
5025
5026 /*
5027 * So says the Linux driver:
5028 * Work around for premature descriptor
5029 * write-backs in TSO mode. Append a
5030 * 4-byte sentinel descriptor.
5031 */
5032 if (use_tso &&
5033 seg == dmamap->dm_nsegs - 1 &&
5034 curlen > 8)
5035 curlen -= 4;
5036
5037 wm_set_dma_addr(
5038 &sc->sc_txdescs[nexttx].wtx_addr,
5039 curaddr);
5040 sc->sc_txdescs[nexttx].wtx_cmdlen =
5041 htole32(cksumcmd | curlen);
5042 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
5043 0;
5044 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
5045 cksumfields;
5046 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
5047 lasttx = nexttx;
5048
5049 DPRINTF(WM_DEBUG_TX,
5050 ("%s: TX: desc %d: low %#" PRIx64 ", "
5051 "len %#04zx\n",
5052 device_xname(sc->sc_dev), nexttx,
5053 (uint64_t)curaddr, curlen));
5054 }
5055 }
5056
5057 KASSERT(lasttx != -1);
5058
5059 /*
5060 * Set up the command byte on the last descriptor of
5061 * the packet. If we're in the interrupt delay window,
5062 * delay the interrupt.
5063 */
5064 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5065 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5066
5067 /*
5068 * If VLANs are enabled and the packet has a VLAN tag, set
5069 * up the descriptor to encapsulate the packet for us.
5070 *
5071 * This is only valid on the last descriptor of the packet.
5072 */
5073 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
5074 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5075 htole32(WTX_CMD_VLE);
5076 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
5077 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5078 }
5079
5080 txs->txs_lastdesc = lasttx;
5081
5082 DPRINTF(WM_DEBUG_TX,
5083 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5084 device_xname(sc->sc_dev),
5085 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5086
5087 /* Sync the descriptors we're using. */
5088 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5089 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5090
5091 /* Give the packet to the chip. */
5092 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5093
5094 DPRINTF(WM_DEBUG_TX,
5095 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5096
5097 DPRINTF(WM_DEBUG_TX,
5098 ("%s: TX: finished transmitting packet, job %d\n",
5099 device_xname(sc->sc_dev), sc->sc_txsnext));
5100
5101 /* Advance the tx pointer. */
5102 sc->sc_txfree -= txs->txs_ndesc;
5103 sc->sc_txnext = nexttx;
5104
5105 sc->sc_txsfree--;
5106 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5107
5108 /* Pass the packet to any BPF listeners. */
5109 bpf_mtap(ifp, m0);
5110 }
5111
5112 if (m0 != NULL) {
5113 ifp->if_flags |= IFF_OACTIVE;
5114 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5115 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5116 m_freem(m0);
5117 }
5118
5119 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5120 /* No more slots; notify upper layer. */
5121 ifp->if_flags |= IFF_OACTIVE;
5122 }
5123
5124 if (sc->sc_txfree != ofree) {
5125 /* Set a watchdog timer in case the chip flakes out. */
5126 ifp->if_timer = 5;
5127 }
5128 }
5129
5130 /*
5131 * wm_nq_tx_offload:
5132 *
5133 * Set up TCP/IP checksumming parameters for the
5134 * specified packet, for NEWQUEUE devices
5135 */
5136 static int
5137 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
5138 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
5139 {
5140 struct mbuf *m0 = txs->txs_mbuf;
5141 struct m_tag *mtag;
5142 uint32_t vl_len, mssidx, cmdc;
5143 struct ether_header *eh;
5144 int offset, iphl;
5145
5146 /*
5147 * XXX It would be nice if the mbuf pkthdr had offset
5148 * fields for the protocol headers.
5149 */
5150 *cmdlenp = 0;
5151 *fieldsp = 0;
5152
5153 eh = mtod(m0, struct ether_header *);
5154 switch (htons(eh->ether_type)) {
5155 case ETHERTYPE_IP:
5156 case ETHERTYPE_IPV6:
5157 offset = ETHER_HDR_LEN;
5158 break;
5159
5160 case ETHERTYPE_VLAN:
5161 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5162 break;
5163
5164 default:
5165 /* Don't support this protocol or encapsulation. */
5166 *do_csum = false;
5167 return 0;
5168 }
5169 *do_csum = true;
5170 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
5171 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
5172
5173 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
5174 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
5175
5176 if ((m0->m_pkthdr.csum_flags &
5177 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
5178 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
5179 } else {
5180 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
5181 }
5182 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
5183 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
5184
5185 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
5186 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
5187 << NQTXC_VLLEN_VLAN_SHIFT);
5188 *cmdlenp |= NQTX_CMD_VLE;
5189 }
5190
5191 mssidx = 0;
5192
5193 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
5194 int hlen = offset + iphl;
5195 int tcp_hlen;
5196 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
5197
5198 if (__predict_false(m0->m_len <
5199 (hlen + sizeof(struct tcphdr)))) {
5200 /*
5201 * TCP/IP headers are not in the first mbuf; we need
5202 * to do this the slow and painful way. Let's just
5203 * hope this doesn't happen very often.
5204 */
5205 struct tcphdr th;
5206
5207 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
5208
5209 m_copydata(m0, hlen, sizeof(th), &th);
5210 if (v4) {
5211 struct ip ip;
5212
5213 m_copydata(m0, offset, sizeof(ip), &ip);
5214 ip.ip_len = 0;
5215 m_copyback(m0,
5216 offset + offsetof(struct ip, ip_len),
5217 sizeof(ip.ip_len), &ip.ip_len);
5218 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
5219 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
5220 } else {
5221 struct ip6_hdr ip6;
5222
5223 m_copydata(m0, offset, sizeof(ip6), &ip6);
5224 ip6.ip6_plen = 0;
5225 m_copyback(m0,
5226 offset + offsetof(struct ip6_hdr, ip6_plen),
5227 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
5228 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
5229 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
5230 }
5231 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
5232 sizeof(th.th_sum), &th.th_sum);
5233
5234 tcp_hlen = th.th_off << 2;
5235 } else {
5236 /*
5237 * TCP/IP headers are in the first mbuf; we can do
5238 * this the easy way.
5239 */
5240 struct tcphdr *th;
5241
5242 if (v4) {
5243 struct ip *ip =
5244 (void *)(mtod(m0, char *) + offset);
5245 th = (void *)(mtod(m0, char *) + hlen);
5246
5247 ip->ip_len = 0;
5248 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
5249 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
5250 } else {
5251 struct ip6_hdr *ip6 =
5252 (void *)(mtod(m0, char *) + offset);
5253 th = (void *)(mtod(m0, char *) + hlen);
5254
5255 ip6->ip6_plen = 0;
5256 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
5257 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
5258 }
5259 tcp_hlen = th->th_off << 2;
5260 }
5261 hlen += tcp_hlen;
5262 *cmdlenp |= NQTX_CMD_TSE;
5263
5264 if (v4) {
5265 WM_EVCNT_INCR(&sc->sc_ev_txtso);
5266 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
5267 } else {
5268 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
5269 *fieldsp |= NQTXD_FIELDS_TUXSM;
5270 }
5271 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
5272 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5273 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
5274 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
5275 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
5276 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
5277 } else {
5278 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
5279 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5280 }
5281
5282 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
5283 *fieldsp |= NQTXD_FIELDS_IXSM;
5284 cmdc |= NQTXC_CMD_IP4;
5285 }
5286
5287 if (m0->m_pkthdr.csum_flags &
5288 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5289 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5290 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5291 cmdc |= NQTXC_CMD_TCP;
5292 } else {
5293 cmdc |= NQTXC_CMD_UDP;
5294 }
5295 cmdc |= NQTXC_CMD_IP4;
5296 *fieldsp |= NQTXD_FIELDS_TUXSM;
5297 }
5298 if (m0->m_pkthdr.csum_flags &
5299 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5300 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5301 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5302 cmdc |= NQTXC_CMD_TCP;
5303 } else {
5304 cmdc |= NQTXC_CMD_UDP;
5305 }
5306 cmdc |= NQTXC_CMD_IP6;
5307 *fieldsp |= NQTXD_FIELDS_TUXSM;
5308 }
5309
5310 /* Fill in the context descriptor. */
5311 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
5312 htole32(vl_len);
5313 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
5314 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
5315 htole32(cmdc);
5316 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
5317 htole32(mssidx);
5318 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
5319 DPRINTF(WM_DEBUG_TX,
5320 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
5321 sc->sc_txnext, 0, vl_len));
5322 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
5323 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
5324 txs->txs_ndesc++;
5325 return 0;
5326 }
5327
5328 /*
5329 * wm_nq_start: [ifnet interface function]
5330 *
5331 * Start packet transmission on the interface for NEWQUEUE devices
5332 */
5333 static void
5334 wm_nq_start(struct ifnet *ifp)
5335 {
5336 struct wm_softc *sc = ifp->if_softc;
5337
5338 WM_TX_LOCK(sc);
5339 if (!sc->sc_stopping)
5340 wm_nq_start_locked(ifp);
5341 WM_TX_UNLOCK(sc);
5342 }
5343
5344 static void
5345 wm_nq_start_locked(struct ifnet *ifp)
5346 {
5347 struct wm_softc *sc = ifp->if_softc;
5348 struct mbuf *m0;
5349 struct m_tag *mtag;
5350 struct wm_txsoft *txs;
5351 bus_dmamap_t dmamap;
5352 int error, nexttx, lasttx = -1, seg, segs_needed;
5353 bool do_csum, sent;
5354
5355 KASSERT(WM_TX_LOCKED(sc));
5356
5357 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5358 return;
5359
5360 sent = false;
5361
5362 /*
5363 * Loop through the send queue, setting up transmit descriptors
5364 * until we drain the queue, or use up all available transmit
5365 * descriptors.
5366 */
5367 for (;;) {
5368 m0 = NULL;
5369
5370 /* Get a work queue entry. */
5371 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
5372 wm_txintr(sc);
5373 if (sc->sc_txsfree == 0) {
5374 DPRINTF(WM_DEBUG_TX,
5375 ("%s: TX: no free job descriptors\n",
5376 device_xname(sc->sc_dev)));
5377 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
5378 break;
5379 }
5380 }
5381
5382 /* Grab a packet off the queue. */
5383 IFQ_DEQUEUE(&ifp->if_snd, m0);
5384 if (m0 == NULL)
5385 break;
5386
5387 DPRINTF(WM_DEBUG_TX,
5388 ("%s: TX: have packet to transmit: %p\n",
5389 device_xname(sc->sc_dev), m0));
5390
5391 txs = &sc->sc_txsoft[sc->sc_txsnext];
5392 dmamap = txs->txs_dmamap;
5393
5394 /*
5395 * Load the DMA map. If this fails, the packet either
5396 * didn't fit in the allotted number of segments, or we
5397 * were short on resources. For the too-many-segments
5398 * case, we simply report an error and drop the packet,
5399 * since we can't sanely copy a jumbo packet to a single
5400 * buffer.
5401 */
5402 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5403 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5404 if (error) {
5405 if (error == EFBIG) {
5406 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5407 log(LOG_ERR, "%s: Tx packet consumes too many "
5408 "DMA segments, dropping...\n",
5409 device_xname(sc->sc_dev));
5410 wm_dump_mbuf_chain(sc, m0);
5411 m_freem(m0);
5412 continue;
5413 }
5414 /* Short on resources, just stop for now. */
5415 DPRINTF(WM_DEBUG_TX,
5416 ("%s: TX: dmamap load failed: %d\n",
5417 device_xname(sc->sc_dev), error));
5418 break;
5419 }
5420
5421 segs_needed = dmamap->dm_nsegs;
5422
5423 /*
5424 * Ensure we have enough descriptors free to describe
5425 * the packet. Note, we always reserve one descriptor
5426 * at the end of the ring due to the semantics of the
5427 * TDT register, plus one more in the event we need
5428 * to load offload context.
5429 */
5430 if (segs_needed > sc->sc_txfree - 2) {
5431 /*
5432 * Not enough free descriptors to transmit this
5433 * packet. We haven't committed anything yet,
5434 * so just unload the DMA map, put the packet
5435 * pack on the queue, and punt. Notify the upper
5436 * layer that there are no more slots left.
5437 */
5438 DPRINTF(WM_DEBUG_TX,
5439 ("%s: TX: need %d (%d) descriptors, have %d\n",
5440 device_xname(sc->sc_dev), dmamap->dm_nsegs,
5441 segs_needed, sc->sc_txfree - 1));
5442 ifp->if_flags |= IFF_OACTIVE;
5443 bus_dmamap_unload(sc->sc_dmat, dmamap);
5444 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5445 break;
5446 }
5447
5448 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5449
5450 DPRINTF(WM_DEBUG_TX,
5451 ("%s: TX: packet has %d (%d) DMA segments\n",
5452 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5453
5454 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5455
5456 /*
5457 * Store a pointer to the packet so that we can free it
5458 * later.
5459 *
5460 * Initially, we consider the number of descriptors the
5461 * packet uses the number of DMA segments. This may be
5462 * incremented by 1 if we do checksum offload (a descriptor
5463 * is used to set the checksum context).
5464 */
5465 txs->txs_mbuf = m0;
5466 txs->txs_firstdesc = sc->sc_txnext;
5467 txs->txs_ndesc = segs_needed;
5468
5469 /* Set up offload parameters for this packet. */
5470 uint32_t cmdlen, fields, dcmdlen;
5471 if (m0->m_pkthdr.csum_flags &
5472 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5473 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5474 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5475 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
5476 &do_csum) != 0) {
5477 /* Error message already displayed. */
5478 bus_dmamap_unload(sc->sc_dmat, dmamap);
5479 continue;
5480 }
5481 } else {
5482 do_csum = false;
5483 cmdlen = 0;
5484 fields = 0;
5485 }
5486
5487 /* Sync the DMA map. */
5488 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5489 BUS_DMASYNC_PREWRITE);
5490
5491 /* Initialize the first transmit descriptor. */
5492 nexttx = sc->sc_txnext;
5493 if (!do_csum) {
5494 /* setup a legacy descriptor */
5495 wm_set_dma_addr(
5496 &sc->sc_txdescs[nexttx].wtx_addr,
5497 dmamap->dm_segs[0].ds_addr);
5498 sc->sc_txdescs[nexttx].wtx_cmdlen =
5499 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
5500 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
5501 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
5502 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
5503 NULL) {
5504 sc->sc_txdescs[nexttx].wtx_cmdlen |=
5505 htole32(WTX_CMD_VLE);
5506 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
5507 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5508 } else {
5509 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
5510 }
5511 dcmdlen = 0;
5512 } else {
5513 /* setup an advanced data descriptor */
5514 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5515 htole64(dmamap->dm_segs[0].ds_addr);
5516 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
5517 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5518 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
5519 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
5520 htole32(fields);
5521 DPRINTF(WM_DEBUG_TX,
5522 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
5523 device_xname(sc->sc_dev), nexttx,
5524 (uint64_t)dmamap->dm_segs[0].ds_addr));
5525 DPRINTF(WM_DEBUG_TX,
5526 ("\t 0x%08x%08x\n", fields,
5527 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
5528 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
5529 }
5530
5531 lasttx = nexttx;
5532 nexttx = WM_NEXTTX(sc, nexttx);
5533 /*
5534 * fill in the next descriptors. legacy or adcanced format
5535 * is the same here
5536 */
5537 for (seg = 1; seg < dmamap->dm_nsegs;
5538 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
5539 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5540 htole64(dmamap->dm_segs[seg].ds_addr);
5541 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5542 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
5543 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
5544 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
5545 lasttx = nexttx;
5546
5547 DPRINTF(WM_DEBUG_TX,
5548 ("%s: TX: desc %d: %#" PRIx64 ", "
5549 "len %#04zx\n",
5550 device_xname(sc->sc_dev), nexttx,
5551 (uint64_t)dmamap->dm_segs[seg].ds_addr,
5552 dmamap->dm_segs[seg].ds_len));
5553 }
5554
5555 KASSERT(lasttx != -1);
5556
5557 /*
5558 * Set up the command byte on the last descriptor of
5559 * the packet. If we're in the interrupt delay window,
5560 * delay the interrupt.
5561 */
5562 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
5563 (NQTX_CMD_EOP | NQTX_CMD_RS));
5564 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5565 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5566
5567 txs->txs_lastdesc = lasttx;
5568
5569 DPRINTF(WM_DEBUG_TX,
5570 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5571 device_xname(sc->sc_dev),
5572 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5573
5574 /* Sync the descriptors we're using. */
5575 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5576 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5577
5578 /* Give the packet to the chip. */
5579 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5580 sent = true;
5581
5582 DPRINTF(WM_DEBUG_TX,
5583 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5584
5585 DPRINTF(WM_DEBUG_TX,
5586 ("%s: TX: finished transmitting packet, job %d\n",
5587 device_xname(sc->sc_dev), sc->sc_txsnext));
5588
5589 /* Advance the tx pointer. */
5590 sc->sc_txfree -= txs->txs_ndesc;
5591 sc->sc_txnext = nexttx;
5592
5593 sc->sc_txsfree--;
5594 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5595
5596 /* Pass the packet to any BPF listeners. */
5597 bpf_mtap(ifp, m0);
5598 }
5599
5600 if (m0 != NULL) {
5601 ifp->if_flags |= IFF_OACTIVE;
5602 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5603 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5604 m_freem(m0);
5605 }
5606
5607 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5608 /* No more slots; notify upper layer. */
5609 ifp->if_flags |= IFF_OACTIVE;
5610 }
5611
5612 if (sent) {
5613 /* Set a watchdog timer in case the chip flakes out. */
5614 ifp->if_timer = 5;
5615 }
5616 }
5617
5618 /* Interrupt */
5619
5620 /*
5621 * wm_txintr:
5622 *
5623 * Helper; handle transmit interrupts.
5624 */
5625 static void
5626 wm_txintr(struct wm_softc *sc)
5627 {
5628 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5629 struct wm_txsoft *txs;
5630 uint8_t status;
5631 int i;
5632
5633 if (sc->sc_stopping)
5634 return;
5635
5636 ifp->if_flags &= ~IFF_OACTIVE;
5637
5638 /*
5639 * Go through the Tx list and free mbufs for those
5640 * frames which have been transmitted.
5641 */
5642 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
5643 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
5644 txs = &sc->sc_txsoft[i];
5645
5646 DPRINTF(WM_DEBUG_TX,
5647 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
5648
5649 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
5650 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5651
5652 status =
5653 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
5654 if ((status & WTX_ST_DD) == 0) {
5655 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
5656 BUS_DMASYNC_PREREAD);
5657 break;
5658 }
5659
5660 DPRINTF(WM_DEBUG_TX,
5661 ("%s: TX: job %d done: descs %d..%d\n",
5662 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
5663 txs->txs_lastdesc));
5664
5665 /*
5666 * XXX We should probably be using the statistics
5667 * XXX registers, but I don't know if they exist
5668 * XXX on chips before the i82544.
5669 */
5670
5671 #ifdef WM_EVENT_COUNTERS
5672 if (status & WTX_ST_TU)
5673 WM_EVCNT_INCR(&sc->sc_ev_tu);
5674 #endif /* WM_EVENT_COUNTERS */
5675
5676 if (status & (WTX_ST_EC|WTX_ST_LC)) {
5677 ifp->if_oerrors++;
5678 if (status & WTX_ST_LC)
5679 log(LOG_WARNING, "%s: late collision\n",
5680 device_xname(sc->sc_dev));
5681 else if (status & WTX_ST_EC) {
5682 ifp->if_collisions += 16;
5683 log(LOG_WARNING, "%s: excessive collisions\n",
5684 device_xname(sc->sc_dev));
5685 }
5686 } else
5687 ifp->if_opackets++;
5688
5689 sc->sc_txfree += txs->txs_ndesc;
5690 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
5691 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
5692 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
5693 m_freem(txs->txs_mbuf);
5694 txs->txs_mbuf = NULL;
5695 }
5696
5697 /* Update the dirty transmit buffer pointer. */
5698 sc->sc_txsdirty = i;
5699 DPRINTF(WM_DEBUG_TX,
5700 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
5701
5702 /*
5703 * If there are no more pending transmissions, cancel the watchdog
5704 * timer.
5705 */
5706 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
5707 ifp->if_timer = 0;
5708 }
5709
5710 /*
5711 * wm_rxintr:
5712 *
5713 * Helper; handle receive interrupts.
5714 */
5715 static void
5716 wm_rxintr(struct wm_softc *sc)
5717 {
5718 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5719 struct wm_rxsoft *rxs;
5720 struct mbuf *m;
5721 int i, len;
5722 uint8_t status, errors;
5723 uint16_t vlantag;
5724
5725 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
5726 rxs = &sc->sc_rxsoft[i];
5727
5728 DPRINTF(WM_DEBUG_RX,
5729 ("%s: RX: checking descriptor %d\n",
5730 device_xname(sc->sc_dev), i));
5731
5732 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5733
5734 status = sc->sc_rxdescs[i].wrx_status;
5735 errors = sc->sc_rxdescs[i].wrx_errors;
5736 len = le16toh(sc->sc_rxdescs[i].wrx_len);
5737 vlantag = sc->sc_rxdescs[i].wrx_special;
5738
5739 if ((status & WRX_ST_DD) == 0) {
5740 /* We have processed all of the receive descriptors. */
5741 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
5742 break;
5743 }
5744
5745 if (__predict_false(sc->sc_rxdiscard)) {
5746 DPRINTF(WM_DEBUG_RX,
5747 ("%s: RX: discarding contents of descriptor %d\n",
5748 device_xname(sc->sc_dev), i));
5749 WM_INIT_RXDESC(sc, i);
5750 if (status & WRX_ST_EOP) {
5751 /* Reset our state. */
5752 DPRINTF(WM_DEBUG_RX,
5753 ("%s: RX: resetting rxdiscard -> 0\n",
5754 device_xname(sc->sc_dev)));
5755 sc->sc_rxdiscard = 0;
5756 }
5757 continue;
5758 }
5759
5760 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5761 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
5762
5763 m = rxs->rxs_mbuf;
5764
5765 /*
5766 * Add a new receive buffer to the ring, unless of
5767 * course the length is zero. Treat the latter as a
5768 * failed mapping.
5769 */
5770 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
5771 /*
5772 * Failed, throw away what we've done so
5773 * far, and discard the rest of the packet.
5774 */
5775 ifp->if_ierrors++;
5776 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5777 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5778 WM_INIT_RXDESC(sc, i);
5779 if ((status & WRX_ST_EOP) == 0)
5780 sc->sc_rxdiscard = 1;
5781 if (sc->sc_rxhead != NULL)
5782 m_freem(sc->sc_rxhead);
5783 WM_RXCHAIN_RESET(sc);
5784 DPRINTF(WM_DEBUG_RX,
5785 ("%s: RX: Rx buffer allocation failed, "
5786 "dropping packet%s\n", device_xname(sc->sc_dev),
5787 sc->sc_rxdiscard ? " (discard)" : ""));
5788 continue;
5789 }
5790
5791 m->m_len = len;
5792 sc->sc_rxlen += len;
5793 DPRINTF(WM_DEBUG_RX,
5794 ("%s: RX: buffer at %p len %d\n",
5795 device_xname(sc->sc_dev), m->m_data, len));
5796
5797 /* If this is not the end of the packet, keep looking. */
5798 if ((status & WRX_ST_EOP) == 0) {
5799 WM_RXCHAIN_LINK(sc, m);
5800 DPRINTF(WM_DEBUG_RX,
5801 ("%s: RX: not yet EOP, rxlen -> %d\n",
5802 device_xname(sc->sc_dev), sc->sc_rxlen));
5803 continue;
5804 }
5805
5806 /*
5807 * Okay, we have the entire packet now. The chip is
5808 * configured to include the FCS except I350 and I21[01]
5809 * (not all chips can be configured to strip it),
5810 * so we need to trim it.
5811 * May need to adjust length of previous mbuf in the
5812 * chain if the current mbuf is too short.
5813 * For an eratta, the RCTL_SECRC bit in RCTL register
5814 * is always set in I350, so we don't trim it.
5815 */
5816 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
5817 && (sc->sc_type != WM_T_I210)
5818 && (sc->sc_type != WM_T_I211)) {
5819 if (m->m_len < ETHER_CRC_LEN) {
5820 sc->sc_rxtail->m_len
5821 -= (ETHER_CRC_LEN - m->m_len);
5822 m->m_len = 0;
5823 } else
5824 m->m_len -= ETHER_CRC_LEN;
5825 len = sc->sc_rxlen - ETHER_CRC_LEN;
5826 } else
5827 len = sc->sc_rxlen;
5828
5829 WM_RXCHAIN_LINK(sc, m);
5830
5831 *sc->sc_rxtailp = NULL;
5832 m = sc->sc_rxhead;
5833
5834 WM_RXCHAIN_RESET(sc);
5835
5836 DPRINTF(WM_DEBUG_RX,
5837 ("%s: RX: have entire packet, len -> %d\n",
5838 device_xname(sc->sc_dev), len));
5839
5840 /* If an error occurred, update stats and drop the packet. */
5841 if (errors &
5842 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
5843 if (errors & WRX_ER_SE)
5844 log(LOG_WARNING, "%s: symbol error\n",
5845 device_xname(sc->sc_dev));
5846 else if (errors & WRX_ER_SEQ)
5847 log(LOG_WARNING, "%s: receive sequence error\n",
5848 device_xname(sc->sc_dev));
5849 else if (errors & WRX_ER_CE)
5850 log(LOG_WARNING, "%s: CRC error\n",
5851 device_xname(sc->sc_dev));
5852 m_freem(m);
5853 continue;
5854 }
5855
5856 /* No errors. Receive the packet. */
5857 m->m_pkthdr.rcvif = ifp;
5858 m->m_pkthdr.len = len;
5859
5860 /*
5861 * If VLANs are enabled, VLAN packets have been unwrapped
5862 * for us. Associate the tag with the packet.
5863 */
5864 /* XXXX should check for i350 and i354 */
5865 if ((status & WRX_ST_VP) != 0) {
5866 VLAN_INPUT_TAG(ifp, m,
5867 le16toh(vlantag),
5868 continue);
5869 }
5870
5871 /* Set up checksum info for this packet. */
5872 if ((status & WRX_ST_IXSM) == 0) {
5873 if (status & WRX_ST_IPCS) {
5874 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
5875 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
5876 if (errors & WRX_ER_IPE)
5877 m->m_pkthdr.csum_flags |=
5878 M_CSUM_IPv4_BAD;
5879 }
5880 if (status & WRX_ST_TCPCS) {
5881 /*
5882 * Note: we don't know if this was TCP or UDP,
5883 * so we just set both bits, and expect the
5884 * upper layers to deal.
5885 */
5886 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
5887 m->m_pkthdr.csum_flags |=
5888 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
5889 M_CSUM_TCPv6 | M_CSUM_UDPv6;
5890 if (errors & WRX_ER_TCPE)
5891 m->m_pkthdr.csum_flags |=
5892 M_CSUM_TCP_UDP_BAD;
5893 }
5894 }
5895
5896 ifp->if_ipackets++;
5897
5898 WM_RX_UNLOCK(sc);
5899
5900 /* Pass this up to any BPF listeners. */
5901 bpf_mtap(ifp, m);
5902
5903 /* Pass it on. */
5904 (*ifp->if_input)(ifp, m);
5905
5906 WM_RX_LOCK(sc);
5907
5908 if (sc->sc_stopping)
5909 break;
5910 }
5911
5912 /* Update the receive pointer. */
5913 sc->sc_rxptr = i;
5914
5915 DPRINTF(WM_DEBUG_RX,
5916 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
5917 }
5918
5919 /*
5920 * wm_linkintr_gmii:
5921 *
5922 * Helper; handle link interrupts for GMII.
5923 */
5924 static void
5925 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
5926 {
5927
5928 KASSERT(WM_TX_LOCKED(sc));
5929
5930 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5931 __func__));
5932
5933 if (icr & ICR_LSC) {
5934 DPRINTF(WM_DEBUG_LINK,
5935 ("%s: LINK: LSC -> mii_pollstat\n",
5936 device_xname(sc->sc_dev)));
5937 mii_pollstat(&sc->sc_mii);
5938 if (sc->sc_type == WM_T_82543) {
5939 int miistatus, active;
5940
5941 /*
5942 * With 82543, we need to force speed and
5943 * duplex on the MAC equal to what the PHY
5944 * speed and duplex configuration is.
5945 */
5946 miistatus = sc->sc_mii.mii_media_status;
5947
5948 if (miistatus & IFM_ACTIVE) {
5949 active = sc->sc_mii.mii_media_active;
5950 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5951 switch (IFM_SUBTYPE(active)) {
5952 case IFM_10_T:
5953 sc->sc_ctrl |= CTRL_SPEED_10;
5954 break;
5955 case IFM_100_TX:
5956 sc->sc_ctrl |= CTRL_SPEED_100;
5957 break;
5958 case IFM_1000_T:
5959 sc->sc_ctrl |= CTRL_SPEED_1000;
5960 break;
5961 default:
5962 /*
5963 * fiber?
5964 * Shoud not enter here.
5965 */
5966 printf("unknown media (%x)\n",
5967 active);
5968 break;
5969 }
5970 if (active & IFM_FDX)
5971 sc->sc_ctrl |= CTRL_FD;
5972 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5973 }
5974 } else if ((sc->sc_type == WM_T_ICH8)
5975 && (sc->sc_phytype == WMPHY_IGP_3)) {
5976 wm_kmrn_lock_loss_workaround_ich8lan(sc);
5977 } else if (sc->sc_type == WM_T_PCH) {
5978 wm_k1_gig_workaround_hv(sc,
5979 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
5980 }
5981
5982 if ((sc->sc_phytype == WMPHY_82578)
5983 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
5984 == IFM_1000_T)) {
5985
5986 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
5987 delay(200*1000); /* XXX too big */
5988
5989 /* Link stall fix for link up */
5990 wm_gmii_hv_writereg(sc->sc_dev, 1,
5991 HV_MUX_DATA_CTRL,
5992 HV_MUX_DATA_CTRL_GEN_TO_MAC
5993 | HV_MUX_DATA_CTRL_FORCE_SPEED);
5994 wm_gmii_hv_writereg(sc->sc_dev, 1,
5995 HV_MUX_DATA_CTRL,
5996 HV_MUX_DATA_CTRL_GEN_TO_MAC);
5997 }
5998 }
5999 } else if (icr & ICR_RXSEQ) {
6000 DPRINTF(WM_DEBUG_LINK,
6001 ("%s: LINK Receive sequence error\n",
6002 device_xname(sc->sc_dev)));
6003 }
6004 }
6005
6006 /*
6007 * wm_linkintr_tbi:
6008 *
6009 * Helper; handle link interrupts for TBI mode.
6010 */
6011 static void
6012 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
6013 {
6014 uint32_t status;
6015
6016 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
6017 __func__));
6018
6019 status = CSR_READ(sc, WMREG_STATUS);
6020 if (icr & ICR_LSC) {
6021 if (status & STATUS_LU) {
6022 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
6023 device_xname(sc->sc_dev),
6024 (status & STATUS_FD) ? "FDX" : "HDX"));
6025 /*
6026 * NOTE: CTRL will update TFCE and RFCE automatically,
6027 * so we should update sc->sc_ctrl
6028 */
6029
6030 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
6031 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6032 sc->sc_fcrtl &= ~FCRTL_XONE;
6033 if (status & STATUS_FD)
6034 sc->sc_tctl |=
6035 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6036 else
6037 sc->sc_tctl |=
6038 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6039 if (sc->sc_ctrl & CTRL_TFCE)
6040 sc->sc_fcrtl |= FCRTL_XONE;
6041 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6042 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
6043 WMREG_OLD_FCRTL : WMREG_FCRTL,
6044 sc->sc_fcrtl);
6045 sc->sc_tbi_linkup = 1;
6046 } else {
6047 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
6048 device_xname(sc->sc_dev)));
6049 sc->sc_tbi_linkup = 0;
6050 }
6051 /* Update LED */
6052 wm_tbi_serdes_set_linkled(sc);
6053 } else if (icr & ICR_RXSEQ) {
6054 DPRINTF(WM_DEBUG_LINK,
6055 ("%s: LINK: Receive sequence error\n",
6056 device_xname(sc->sc_dev)));
6057 }
6058 }
6059
6060 /*
6061 * wm_linkintr_serdes:
6062 *
6063 * Helper; handle link interrupts for TBI mode.
6064 */
6065 static void
6066 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
6067 {
6068 struct mii_data *mii = &sc->sc_mii;
6069 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6070 uint32_t pcs_adv, pcs_lpab, reg;
6071
6072 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
6073 __func__));
6074
6075 if (icr & ICR_LSC) {
6076 /* Check PCS */
6077 reg = CSR_READ(sc, WMREG_PCS_LSTS);
6078 if ((reg & PCS_LSTS_LINKOK) != 0) {
6079 mii->mii_media_status |= IFM_ACTIVE;
6080 sc->sc_tbi_linkup = 1;
6081 } else {
6082 mii->mii_media_status |= IFM_NONE;
6083 sc->sc_tbi_linkup = 0;
6084 wm_tbi_serdes_set_linkled(sc);
6085 return;
6086 }
6087 mii->mii_media_active |= IFM_1000_SX;
6088 if ((reg & PCS_LSTS_FDX) != 0)
6089 mii->mii_media_active |= IFM_FDX;
6090 else
6091 mii->mii_media_active |= IFM_HDX;
6092 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
6093 /* Check flow */
6094 reg = CSR_READ(sc, WMREG_PCS_LSTS);
6095 if ((reg & PCS_LSTS_AN_COMP) == 0) {
6096 DPRINTF(WM_DEBUG_LINK,
6097 ("XXX LINKOK but not ACOMP\n"));
6098 return;
6099 }
6100 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
6101 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
6102 DPRINTF(WM_DEBUG_LINK,
6103 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
6104 if ((pcs_adv & TXCW_SYM_PAUSE)
6105 && (pcs_lpab & TXCW_SYM_PAUSE)) {
6106 mii->mii_media_active |= IFM_FLOW
6107 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
6108 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
6109 && (pcs_adv & TXCW_ASYM_PAUSE)
6110 && (pcs_lpab & TXCW_SYM_PAUSE)
6111 && (pcs_lpab & TXCW_ASYM_PAUSE))
6112 mii->mii_media_active |= IFM_FLOW
6113 | IFM_ETH_TXPAUSE;
6114 else if ((pcs_adv & TXCW_SYM_PAUSE)
6115 && (pcs_adv & TXCW_ASYM_PAUSE)
6116 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
6117 && (pcs_lpab & TXCW_ASYM_PAUSE))
6118 mii->mii_media_active |= IFM_FLOW
6119 | IFM_ETH_RXPAUSE;
6120 }
6121 /* Update LED */
6122 wm_tbi_serdes_set_linkled(sc);
6123 } else {
6124 DPRINTF(WM_DEBUG_LINK,
6125 ("%s: LINK: Receive sequence error\n",
6126 device_xname(sc->sc_dev)));
6127 }
6128 }
6129
6130 /*
6131 * wm_linkintr:
6132 *
6133 * Helper; handle link interrupts.
6134 */
6135 static void
6136 wm_linkintr(struct wm_softc *sc, uint32_t icr)
6137 {
6138
6139 if (sc->sc_flags & WM_F_HAS_MII)
6140 wm_linkintr_gmii(sc, icr);
6141 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
6142 && (sc->sc_type >= WM_T_82575))
6143 wm_linkintr_serdes(sc, icr);
6144 else
6145 wm_linkintr_tbi(sc, icr);
6146 }
6147
6148 /*
6149 * wm_intr:
6150 *
6151 * Interrupt service routine.
6152 */
6153 static int
6154 wm_intr(void *arg)
6155 {
6156 struct wm_softc *sc = arg;
6157 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6158 uint32_t icr;
6159 int handled = 0;
6160
6161 while (1 /* CONSTCOND */) {
6162 icr = CSR_READ(sc, WMREG_ICR);
6163 if ((icr & sc->sc_icr) == 0)
6164 break;
6165 rnd_add_uint32(&sc->rnd_source, icr);
6166
6167 WM_RX_LOCK(sc);
6168
6169 if (sc->sc_stopping) {
6170 WM_RX_UNLOCK(sc);
6171 break;
6172 }
6173
6174 handled = 1;
6175
6176 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
6177 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
6178 DPRINTF(WM_DEBUG_RX,
6179 ("%s: RX: got Rx intr 0x%08x\n",
6180 device_xname(sc->sc_dev),
6181 icr & (ICR_RXDMT0|ICR_RXT0)));
6182 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
6183 }
6184 #endif
6185 wm_rxintr(sc);
6186
6187 WM_RX_UNLOCK(sc);
6188 WM_TX_LOCK(sc);
6189
6190 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
6191 if (icr & ICR_TXDW) {
6192 DPRINTF(WM_DEBUG_TX,
6193 ("%s: TX: got TXDW interrupt\n",
6194 device_xname(sc->sc_dev)));
6195 WM_EVCNT_INCR(&sc->sc_ev_txdw);
6196 }
6197 #endif
6198 wm_txintr(sc);
6199
6200 if (icr & (ICR_LSC|ICR_RXSEQ)) {
6201 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
6202 wm_linkintr(sc, icr);
6203 }
6204
6205 WM_TX_UNLOCK(sc);
6206
6207 if (icr & ICR_RXO) {
6208 #if defined(WM_DEBUG)
6209 log(LOG_WARNING, "%s: Receive overrun\n",
6210 device_xname(sc->sc_dev));
6211 #endif /* defined(WM_DEBUG) */
6212 }
6213 }
6214
6215 if (handled) {
6216 /* Try to get more packets going. */
6217 ifp->if_start(ifp);
6218 }
6219
6220 return handled;
6221 }
6222
6223 /*
6224 * Media related.
6225 * GMII, SGMII, TBI (and SERDES)
6226 */
6227
6228 /* Common */
6229
6230 /*
6231 * wm_tbi_serdes_set_linkled:
6232 *
6233 * Update the link LED on TBI and SERDES devices.
6234 */
6235 static void
6236 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
6237 {
6238
6239 if (sc->sc_tbi_linkup)
6240 sc->sc_ctrl |= CTRL_SWDPIN(0);
6241 else
6242 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
6243
6244 /* 82540 or newer devices are active low */
6245 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
6246
6247 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6248 }
6249
6250 /* GMII related */
6251
6252 /*
6253 * wm_gmii_reset:
6254 *
6255 * Reset the PHY.
6256 */
6257 static void
6258 wm_gmii_reset(struct wm_softc *sc)
6259 {
6260 uint32_t reg;
6261 int rv;
6262
6263 /* get phy semaphore */
6264 switch (sc->sc_type) {
6265 case WM_T_82571:
6266 case WM_T_82572:
6267 case WM_T_82573:
6268 case WM_T_82574:
6269 case WM_T_82583:
6270 /* XXX should get sw semaphore, too */
6271 rv = wm_get_swsm_semaphore(sc);
6272 break;
6273 case WM_T_82575:
6274 case WM_T_82576:
6275 case WM_T_82580:
6276 case WM_T_I350:
6277 case WM_T_I354:
6278 case WM_T_I210:
6279 case WM_T_I211:
6280 case WM_T_80003:
6281 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6282 break;
6283 case WM_T_ICH8:
6284 case WM_T_ICH9:
6285 case WM_T_ICH10:
6286 case WM_T_PCH:
6287 case WM_T_PCH2:
6288 case WM_T_PCH_LPT:
6289 rv = wm_get_swfwhw_semaphore(sc);
6290 break;
6291 default:
6292 /* nothing to do*/
6293 rv = 0;
6294 break;
6295 }
6296 if (rv != 0) {
6297 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6298 __func__);
6299 return;
6300 }
6301
6302 switch (sc->sc_type) {
6303 case WM_T_82542_2_0:
6304 case WM_T_82542_2_1:
6305 /* null */
6306 break;
6307 case WM_T_82543:
6308 /*
6309 * With 82543, we need to force speed and duplex on the MAC
6310 * equal to what the PHY speed and duplex configuration is.
6311 * In addition, we need to perform a hardware reset on the PHY
6312 * to take it out of reset.
6313 */
6314 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6315 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6316
6317 /* The PHY reset pin is active-low. */
6318 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6319 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6320 CTRL_EXT_SWDPIN(4));
6321 reg |= CTRL_EXT_SWDPIO(4);
6322
6323 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6324 CSR_WRITE_FLUSH(sc);
6325 delay(10*1000);
6326
6327 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6328 CSR_WRITE_FLUSH(sc);
6329 delay(150);
6330 #if 0
6331 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6332 #endif
6333 delay(20*1000); /* XXX extra delay to get PHY ID? */
6334 break;
6335 case WM_T_82544: /* reset 10000us */
6336 case WM_T_82540:
6337 case WM_T_82545:
6338 case WM_T_82545_3:
6339 case WM_T_82546:
6340 case WM_T_82546_3:
6341 case WM_T_82541:
6342 case WM_T_82541_2:
6343 case WM_T_82547:
6344 case WM_T_82547_2:
6345 case WM_T_82571: /* reset 100us */
6346 case WM_T_82572:
6347 case WM_T_82573:
6348 case WM_T_82574:
6349 case WM_T_82575:
6350 case WM_T_82576:
6351 case WM_T_82580:
6352 case WM_T_I350:
6353 case WM_T_I354:
6354 case WM_T_I210:
6355 case WM_T_I211:
6356 case WM_T_82583:
6357 case WM_T_80003:
6358 /* generic reset */
6359 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6360 CSR_WRITE_FLUSH(sc);
6361 delay(20000);
6362 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6363 CSR_WRITE_FLUSH(sc);
6364 delay(20000);
6365
6366 if ((sc->sc_type == WM_T_82541)
6367 || (sc->sc_type == WM_T_82541_2)
6368 || (sc->sc_type == WM_T_82547)
6369 || (sc->sc_type == WM_T_82547_2)) {
6370 /* workaround for igp are done in igp_reset() */
6371 /* XXX add code to set LED after phy reset */
6372 }
6373 break;
6374 case WM_T_ICH8:
6375 case WM_T_ICH9:
6376 case WM_T_ICH10:
6377 case WM_T_PCH:
6378 case WM_T_PCH2:
6379 case WM_T_PCH_LPT:
6380 /* generic reset */
6381 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6382 CSR_WRITE_FLUSH(sc);
6383 delay(100);
6384 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6385 CSR_WRITE_FLUSH(sc);
6386 delay(150);
6387 break;
6388 default:
6389 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6390 __func__);
6391 break;
6392 }
6393
6394 /* release PHY semaphore */
6395 switch (sc->sc_type) {
6396 case WM_T_82571:
6397 case WM_T_82572:
6398 case WM_T_82573:
6399 case WM_T_82574:
6400 case WM_T_82583:
6401 /* XXX should put sw semaphore, too */
6402 wm_put_swsm_semaphore(sc);
6403 break;
6404 case WM_T_82575:
6405 case WM_T_82576:
6406 case WM_T_82580:
6407 case WM_T_I350:
6408 case WM_T_I354:
6409 case WM_T_I210:
6410 case WM_T_I211:
6411 case WM_T_80003:
6412 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6413 break;
6414 case WM_T_ICH8:
6415 case WM_T_ICH9:
6416 case WM_T_ICH10:
6417 case WM_T_PCH:
6418 case WM_T_PCH2:
6419 case WM_T_PCH_LPT:
6420 wm_put_swfwhw_semaphore(sc);
6421 break;
6422 default:
6423 /* nothing to do*/
6424 rv = 0;
6425 break;
6426 }
6427
6428 /* get_cfg_done */
6429 wm_get_cfg_done(sc);
6430
6431 /* extra setup */
6432 switch (sc->sc_type) {
6433 case WM_T_82542_2_0:
6434 case WM_T_82542_2_1:
6435 case WM_T_82543:
6436 case WM_T_82544:
6437 case WM_T_82540:
6438 case WM_T_82545:
6439 case WM_T_82545_3:
6440 case WM_T_82546:
6441 case WM_T_82546_3:
6442 case WM_T_82541_2:
6443 case WM_T_82547_2:
6444 case WM_T_82571:
6445 case WM_T_82572:
6446 case WM_T_82573:
6447 case WM_T_82574:
6448 case WM_T_82575:
6449 case WM_T_82576:
6450 case WM_T_82580:
6451 case WM_T_I350:
6452 case WM_T_I354:
6453 case WM_T_I210:
6454 case WM_T_I211:
6455 case WM_T_82583:
6456 case WM_T_80003:
6457 /* null */
6458 break;
6459 case WM_T_82541:
6460 case WM_T_82547:
6461 /* XXX Configure actively LED after PHY reset */
6462 break;
6463 case WM_T_ICH8:
6464 case WM_T_ICH9:
6465 case WM_T_ICH10:
6466 case WM_T_PCH:
6467 case WM_T_PCH2:
6468 case WM_T_PCH_LPT:
6469 /* Allow time for h/w to get to a quiescent state afer reset */
6470 delay(10*1000);
6471
6472 if (sc->sc_type == WM_T_PCH)
6473 wm_hv_phy_workaround_ich8lan(sc);
6474
6475 if (sc->sc_type == WM_T_PCH2)
6476 wm_lv_phy_workaround_ich8lan(sc);
6477
6478 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6479 /*
6480 * dummy read to clear the phy wakeup bit after lcd
6481 * reset
6482 */
6483 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6484 }
6485
6486 /*
6487 * XXX Configure the LCD with th extended configuration region
6488 * in NVM
6489 */
6490
6491 /* Configure the LCD with the OEM bits in NVM */
6492 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6493 || (sc->sc_type == WM_T_PCH_LPT)) {
6494 /*
6495 * Disable LPLU.
6496 * XXX It seems that 82567 has LPLU, too.
6497 */
6498 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6499 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6500 reg |= HV_OEM_BITS_ANEGNOW;
6501 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6502 }
6503 break;
6504 default:
6505 panic("%s: unknown type\n", __func__);
6506 break;
6507 }
6508 }
6509
6510 /*
6511 * wm_get_phy_id_82575:
6512 *
6513 * Return PHY ID. Return -1 if it failed.
6514 */
6515 static int
6516 wm_get_phy_id_82575(struct wm_softc *sc)
6517 {
6518 uint32_t reg;
6519 int phyid = -1;
6520
6521 /* XXX */
6522 if ((sc->sc_flags & WM_F_SGMII) == 0)
6523 return -1;
6524
6525 if (wm_sgmii_uses_mdio(sc)) {
6526 switch (sc->sc_type) {
6527 case WM_T_82575:
6528 case WM_T_82576:
6529 reg = CSR_READ(sc, WMREG_MDIC);
6530 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
6531 break;
6532 case WM_T_82580:
6533 case WM_T_I350:
6534 case WM_T_I354:
6535 case WM_T_I210:
6536 case WM_T_I211:
6537 reg = CSR_READ(sc, WMREG_MDICNFG);
6538 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
6539 break;
6540 default:
6541 return -1;
6542 }
6543 }
6544
6545 return phyid;
6546 }
6547
6548
6549 /*
6550 * wm_gmii_mediainit:
6551 *
6552 * Initialize media for use on 1000BASE-T devices.
6553 */
6554 static void
6555 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6556 {
6557 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6558 struct mii_data *mii = &sc->sc_mii;
6559 uint32_t reg;
6560
6561 /* We have GMII. */
6562 sc->sc_flags |= WM_F_HAS_MII;
6563
6564 if (sc->sc_type == WM_T_80003)
6565 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6566 else
6567 sc->sc_tipg = TIPG_1000T_DFLT;
6568
6569 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
6570 if ((sc->sc_type == WM_T_82580)
6571 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
6572 || (sc->sc_type == WM_T_I211)) {
6573 reg = CSR_READ(sc, WMREG_PHPM);
6574 reg &= ~PHPM_GO_LINK_D;
6575 CSR_WRITE(sc, WMREG_PHPM, reg);
6576 }
6577
6578 /*
6579 * Let the chip set speed/duplex on its own based on
6580 * signals from the PHY.
6581 * XXXbouyer - I'm not sure this is right for the 80003,
6582 * the em driver only sets CTRL_SLU here - but it seems to work.
6583 */
6584 sc->sc_ctrl |= CTRL_SLU;
6585 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6586
6587 /* Initialize our media structures and probe the GMII. */
6588 mii->mii_ifp = ifp;
6589
6590 /*
6591 * Determine the PHY access method.
6592 *
6593 * For SGMII, use SGMII specific method.
6594 *
6595 * For some devices, we can determine the PHY access method
6596 * from sc_type.
6597 *
6598 * For ICH and PCH variants, it's difficult to determine the PHY
6599 * access method by sc_type, so use the PCI product ID for some
6600 * devices.
6601 * For other ICH8 variants, try to use igp's method. If the PHY
6602 * can't detect, then use bm's method.
6603 */
6604 switch (prodid) {
6605 case PCI_PRODUCT_INTEL_PCH_M_LM:
6606 case PCI_PRODUCT_INTEL_PCH_M_LC:
6607 /* 82577 */
6608 sc->sc_phytype = WMPHY_82577;
6609 break;
6610 case PCI_PRODUCT_INTEL_PCH_D_DM:
6611 case PCI_PRODUCT_INTEL_PCH_D_DC:
6612 /* 82578 */
6613 sc->sc_phytype = WMPHY_82578;
6614 break;
6615 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6616 case PCI_PRODUCT_INTEL_PCH2_LV_V:
6617 /* 82579 */
6618 sc->sc_phytype = WMPHY_82579;
6619 break;
6620 case PCI_PRODUCT_INTEL_82801I_BM:
6621 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6622 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6623 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6624 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6625 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6626 /* 82567 */
6627 sc->sc_phytype = WMPHY_BM;
6628 mii->mii_readreg = wm_gmii_bm_readreg;
6629 mii->mii_writereg = wm_gmii_bm_writereg;
6630 break;
6631 default:
6632 if (((sc->sc_flags & WM_F_SGMII) != 0)
6633 && !wm_sgmii_uses_mdio(sc)){
6634 mii->mii_readreg = wm_sgmii_readreg;
6635 mii->mii_writereg = wm_sgmii_writereg;
6636 } else if (sc->sc_type >= WM_T_80003) {
6637 mii->mii_readreg = wm_gmii_i80003_readreg;
6638 mii->mii_writereg = wm_gmii_i80003_writereg;
6639 } else if (sc->sc_type >= WM_T_I210) {
6640 mii->mii_readreg = wm_gmii_i82544_readreg;
6641 mii->mii_writereg = wm_gmii_i82544_writereg;
6642 } else if (sc->sc_type >= WM_T_82580) {
6643 sc->sc_phytype = WMPHY_82580;
6644 mii->mii_readreg = wm_gmii_82580_readreg;
6645 mii->mii_writereg = wm_gmii_82580_writereg;
6646 } else if (sc->sc_type >= WM_T_82544) {
6647 mii->mii_readreg = wm_gmii_i82544_readreg;
6648 mii->mii_writereg = wm_gmii_i82544_writereg;
6649 } else {
6650 mii->mii_readreg = wm_gmii_i82543_readreg;
6651 mii->mii_writereg = wm_gmii_i82543_writereg;
6652 }
6653 break;
6654 }
6655 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) {
6656 /* All PCH* use _hv_ */
6657 mii->mii_readreg = wm_gmii_hv_readreg;
6658 mii->mii_writereg = wm_gmii_hv_writereg;
6659 }
6660 mii->mii_statchg = wm_gmii_statchg;
6661
6662 wm_gmii_reset(sc);
6663
6664 sc->sc_ethercom.ec_mii = &sc->sc_mii;
6665 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6666 wm_gmii_mediastatus);
6667
6668 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6669 || (sc->sc_type == WM_T_82580)
6670 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
6671 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
6672 if ((sc->sc_flags & WM_F_SGMII) == 0) {
6673 /* Attach only one port */
6674 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6675 MII_OFFSET_ANY, MIIF_DOPAUSE);
6676 } else {
6677 int i, id;
6678 uint32_t ctrl_ext;
6679
6680 id = wm_get_phy_id_82575(sc);
6681 if (id != -1) {
6682 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6683 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
6684 }
6685 if ((id == -1)
6686 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
6687 /* Power on sgmii phy if it is disabled */
6688 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6689 CSR_WRITE(sc, WMREG_CTRL_EXT,
6690 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6691 CSR_WRITE_FLUSH(sc);
6692 delay(300*1000); /* XXX too long */
6693
6694 /* from 1 to 8 */
6695 for (i = 1; i < 8; i++)
6696 mii_attach(sc->sc_dev, &sc->sc_mii,
6697 0xffffffff, i, MII_OFFSET_ANY,
6698 MIIF_DOPAUSE);
6699
6700 /* restore previous sfp cage power state */
6701 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6702 }
6703 }
6704 } else {
6705 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6706 MII_OFFSET_ANY, MIIF_DOPAUSE);
6707 }
6708
6709 /*
6710 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
6711 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6712 */
6713 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
6714 (LIST_FIRST(&mii->mii_phys) == NULL)) {
6715 wm_set_mdio_slow_mode_hv(sc);
6716 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6717 MII_OFFSET_ANY, MIIF_DOPAUSE);
6718 }
6719
6720 /*
6721 * (For ICH8 variants)
6722 * If PHY detection failed, use BM's r/w function and retry.
6723 */
6724 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6725 /* if failed, retry with *_bm_* */
6726 mii->mii_readreg = wm_gmii_bm_readreg;
6727 mii->mii_writereg = wm_gmii_bm_writereg;
6728
6729 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6730 MII_OFFSET_ANY, MIIF_DOPAUSE);
6731 }
6732
6733 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6734 /* Any PHY wasn't find */
6735 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6736 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6737 sc->sc_phytype = WMPHY_NONE;
6738 } else {
6739 /*
6740 * PHY Found!
6741 * Check PHY type.
6742 */
6743 uint32_t model;
6744 struct mii_softc *child;
6745
6746 child = LIST_FIRST(&mii->mii_phys);
6747 if (device_is_a(child->mii_dev, "igphy")) {
6748 struct igphy_softc *isc = (struct igphy_softc *)child;
6749
6750 model = isc->sc_mii.mii_mpd_model;
6751 if (model == MII_MODEL_yyINTEL_I82566)
6752 sc->sc_phytype = WMPHY_IGP_3;
6753 }
6754
6755 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6756 }
6757 }
6758
6759 /*
6760 * wm_gmii_mediachange: [ifmedia interface function]
6761 *
6762 * Set hardware to newly-selected media on a 1000BASE-T device.
6763 */
6764 static int
6765 wm_gmii_mediachange(struct ifnet *ifp)
6766 {
6767 struct wm_softc *sc = ifp->if_softc;
6768 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6769 int rc;
6770
6771 if ((ifp->if_flags & IFF_UP) == 0)
6772 return 0;
6773
6774 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6775 sc->sc_ctrl |= CTRL_SLU;
6776 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6777 || (sc->sc_type > WM_T_82543)) {
6778 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6779 } else {
6780 sc->sc_ctrl &= ~CTRL_ASDE;
6781 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6782 if (ife->ifm_media & IFM_FDX)
6783 sc->sc_ctrl |= CTRL_FD;
6784 switch (IFM_SUBTYPE(ife->ifm_media)) {
6785 case IFM_10_T:
6786 sc->sc_ctrl |= CTRL_SPEED_10;
6787 break;
6788 case IFM_100_TX:
6789 sc->sc_ctrl |= CTRL_SPEED_100;
6790 break;
6791 case IFM_1000_T:
6792 sc->sc_ctrl |= CTRL_SPEED_1000;
6793 break;
6794 default:
6795 panic("wm_gmii_mediachange: bad media 0x%x",
6796 ife->ifm_media);
6797 }
6798 }
6799 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6800 if (sc->sc_type <= WM_T_82543)
6801 wm_gmii_reset(sc);
6802
6803 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6804 return 0;
6805 return rc;
6806 }
6807
6808 /*
6809 * wm_gmii_mediastatus: [ifmedia interface function]
6810 *
6811 * Get the current interface media status on a 1000BASE-T device.
6812 */
6813 static void
6814 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6815 {
6816 struct wm_softc *sc = ifp->if_softc;
6817
6818 ether_mediastatus(ifp, ifmr);
6819 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6820 | sc->sc_flowflags;
6821 }
6822
6823 #define MDI_IO CTRL_SWDPIN(2)
6824 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
6825 #define MDI_CLK CTRL_SWDPIN(3)
6826
6827 static void
6828 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6829 {
6830 uint32_t i, v;
6831
6832 v = CSR_READ(sc, WMREG_CTRL);
6833 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6834 v |= MDI_DIR | CTRL_SWDPIO(3);
6835
6836 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6837 if (data & i)
6838 v |= MDI_IO;
6839 else
6840 v &= ~MDI_IO;
6841 CSR_WRITE(sc, WMREG_CTRL, v);
6842 CSR_WRITE_FLUSH(sc);
6843 delay(10);
6844 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6845 CSR_WRITE_FLUSH(sc);
6846 delay(10);
6847 CSR_WRITE(sc, WMREG_CTRL, v);
6848 CSR_WRITE_FLUSH(sc);
6849 delay(10);
6850 }
6851 }
6852
6853 static uint32_t
6854 wm_i82543_mii_recvbits(struct wm_softc *sc)
6855 {
6856 uint32_t v, i, data = 0;
6857
6858 v = CSR_READ(sc, WMREG_CTRL);
6859 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6860 v |= CTRL_SWDPIO(3);
6861
6862 CSR_WRITE(sc, WMREG_CTRL, v);
6863 CSR_WRITE_FLUSH(sc);
6864 delay(10);
6865 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6866 CSR_WRITE_FLUSH(sc);
6867 delay(10);
6868 CSR_WRITE(sc, WMREG_CTRL, v);
6869 CSR_WRITE_FLUSH(sc);
6870 delay(10);
6871
6872 for (i = 0; i < 16; i++) {
6873 data <<= 1;
6874 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6875 CSR_WRITE_FLUSH(sc);
6876 delay(10);
6877 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6878 data |= 1;
6879 CSR_WRITE(sc, WMREG_CTRL, v);
6880 CSR_WRITE_FLUSH(sc);
6881 delay(10);
6882 }
6883
6884 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6885 CSR_WRITE_FLUSH(sc);
6886 delay(10);
6887 CSR_WRITE(sc, WMREG_CTRL, v);
6888 CSR_WRITE_FLUSH(sc);
6889 delay(10);
6890
6891 return data;
6892 }
6893
6894 #undef MDI_IO
6895 #undef MDI_DIR
6896 #undef MDI_CLK
6897
6898 /*
6899 * wm_gmii_i82543_readreg: [mii interface function]
6900 *
6901 * Read a PHY register on the GMII (i82543 version).
6902 */
6903 static int
6904 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6905 {
6906 struct wm_softc *sc = device_private(self);
6907 int rv;
6908
6909 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6910 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
6911 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6912 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
6913
6914 DPRINTF(WM_DEBUG_GMII,
6915 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6916 device_xname(sc->sc_dev), phy, reg, rv));
6917
6918 return rv;
6919 }
6920
6921 /*
6922 * wm_gmii_i82543_writereg: [mii interface function]
6923 *
6924 * Write a PHY register on the GMII (i82543 version).
6925 */
6926 static void
6927 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6928 {
6929 struct wm_softc *sc = device_private(self);
6930
6931 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6932 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6933 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6934 (MII_COMMAND_START << 30), 32);
6935 }
6936
6937 /*
6938 * wm_gmii_i82544_readreg: [mii interface function]
6939 *
6940 * Read a PHY register on the GMII.
6941 */
6942 static int
6943 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6944 {
6945 struct wm_softc *sc = device_private(self);
6946 uint32_t mdic = 0;
6947 int i, rv;
6948
6949 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6950 MDIC_REGADD(reg));
6951
6952 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6953 mdic = CSR_READ(sc, WMREG_MDIC);
6954 if (mdic & MDIC_READY)
6955 break;
6956 delay(50);
6957 }
6958
6959 if ((mdic & MDIC_READY) == 0) {
6960 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6961 device_xname(sc->sc_dev), phy, reg);
6962 rv = 0;
6963 } else if (mdic & MDIC_E) {
6964 #if 0 /* This is normal if no PHY is present. */
6965 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6966 device_xname(sc->sc_dev), phy, reg);
6967 #endif
6968 rv = 0;
6969 } else {
6970 rv = MDIC_DATA(mdic);
6971 if (rv == 0xffff)
6972 rv = 0;
6973 }
6974
6975 return rv;
6976 }
6977
6978 /*
6979 * wm_gmii_i82544_writereg: [mii interface function]
6980 *
6981 * Write a PHY register on the GMII.
6982 */
6983 static void
6984 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6985 {
6986 struct wm_softc *sc = device_private(self);
6987 uint32_t mdic = 0;
6988 int i;
6989
6990 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6991 MDIC_REGADD(reg) | MDIC_DATA(val));
6992
6993 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6994 mdic = CSR_READ(sc, WMREG_MDIC);
6995 if (mdic & MDIC_READY)
6996 break;
6997 delay(50);
6998 }
6999
7000 if ((mdic & MDIC_READY) == 0)
7001 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
7002 device_xname(sc->sc_dev), phy, reg);
7003 else if (mdic & MDIC_E)
7004 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
7005 device_xname(sc->sc_dev), phy, reg);
7006 }
7007
7008 /*
7009 * wm_gmii_i80003_readreg: [mii interface function]
7010 *
7011 * Read a PHY register on the kumeran
7012 * This could be handled by the PHY layer if we didn't have to lock the
7013 * ressource ...
7014 */
7015 static int
7016 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
7017 {
7018 struct wm_softc *sc = device_private(self);
7019 int sem;
7020 int rv;
7021
7022 if (phy != 1) /* only one PHY on kumeran bus */
7023 return 0;
7024
7025 sem = swfwphysem[sc->sc_funcid];
7026 if (wm_get_swfw_semaphore(sc, sem)) {
7027 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7028 __func__);
7029 return 0;
7030 }
7031
7032 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
7033 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
7034 reg >> GG82563_PAGE_SHIFT);
7035 } else {
7036 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
7037 reg >> GG82563_PAGE_SHIFT);
7038 }
7039 /* Wait more 200us for a bug of the ready bit in the MDIC register */
7040 delay(200);
7041 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
7042 delay(200);
7043
7044 wm_put_swfw_semaphore(sc, sem);
7045 return rv;
7046 }
7047
7048 /*
7049 * wm_gmii_i80003_writereg: [mii interface function]
7050 *
7051 * Write a PHY register on the kumeran.
7052 * This could be handled by the PHY layer if we didn't have to lock the
7053 * ressource ...
7054 */
7055 static void
7056 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
7057 {
7058 struct wm_softc *sc = device_private(self);
7059 int sem;
7060
7061 if (phy != 1) /* only one PHY on kumeran bus */
7062 return;
7063
7064 sem = swfwphysem[sc->sc_funcid];
7065 if (wm_get_swfw_semaphore(sc, sem)) {
7066 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7067 __func__);
7068 return;
7069 }
7070
7071 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
7072 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
7073 reg >> GG82563_PAGE_SHIFT);
7074 } else {
7075 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
7076 reg >> GG82563_PAGE_SHIFT);
7077 }
7078 /* Wait more 200us for a bug of the ready bit in the MDIC register */
7079 delay(200);
7080 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7081 delay(200);
7082
7083 wm_put_swfw_semaphore(sc, sem);
7084 }
7085
7086 /*
7087 * wm_gmii_bm_readreg: [mii interface function]
7088 *
7089 * Read a PHY register on the kumeran
7090 * This could be handled by the PHY layer if we didn't have to lock the
7091 * ressource ...
7092 */
7093 static int
7094 wm_gmii_bm_readreg(device_t self, int phy, int reg)
7095 {
7096 struct wm_softc *sc = device_private(self);
7097 int sem;
7098 int rv;
7099
7100 sem = swfwphysem[sc->sc_funcid];
7101 if (wm_get_swfw_semaphore(sc, sem)) {
7102 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7103 __func__);
7104 return 0;
7105 }
7106
7107 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7108 if (phy == 1)
7109 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7110 reg);
7111 else
7112 wm_gmii_i82544_writereg(self, phy,
7113 GG82563_PHY_PAGE_SELECT,
7114 reg >> GG82563_PAGE_SHIFT);
7115 }
7116
7117 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
7118 wm_put_swfw_semaphore(sc, sem);
7119 return rv;
7120 }
7121
7122 /*
7123 * wm_gmii_bm_writereg: [mii interface function]
7124 *
7125 * Write a PHY register on the kumeran.
7126 * This could be handled by the PHY layer if we didn't have to lock the
7127 * ressource ...
7128 */
7129 static void
7130 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
7131 {
7132 struct wm_softc *sc = device_private(self);
7133 int sem;
7134
7135 sem = swfwphysem[sc->sc_funcid];
7136 if (wm_get_swfw_semaphore(sc, sem)) {
7137 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7138 __func__);
7139 return;
7140 }
7141
7142 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7143 if (phy == 1)
7144 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7145 reg);
7146 else
7147 wm_gmii_i82544_writereg(self, phy,
7148 GG82563_PHY_PAGE_SELECT,
7149 reg >> GG82563_PAGE_SHIFT);
7150 }
7151
7152 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7153 wm_put_swfw_semaphore(sc, sem);
7154 }
7155
7156 static void
7157 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
7158 {
7159 struct wm_softc *sc = device_private(self);
7160 uint16_t regnum = BM_PHY_REG_NUM(offset);
7161 uint16_t wuce;
7162
7163 /* XXX Gig must be disabled for MDIO accesses to page 800 */
7164 if (sc->sc_type == WM_T_PCH) {
7165 /* XXX e1000 driver do nothing... why? */
7166 }
7167
7168 /* Set page 769 */
7169 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7170 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7171
7172 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
7173
7174 wuce &= ~BM_WUC_HOST_WU_BIT;
7175 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
7176 wuce | BM_WUC_ENABLE_BIT);
7177
7178 /* Select page 800 */
7179 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7180 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
7181
7182 /* Write page 800 */
7183 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
7184
7185 if (rd)
7186 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
7187 else
7188 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
7189
7190 /* Set page 769 */
7191 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7192 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7193
7194 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
7195 }
7196
7197 /*
7198 * wm_gmii_hv_readreg: [mii interface function]
7199 *
7200 * Read a PHY register on the kumeran
7201 * This could be handled by the PHY layer if we didn't have to lock the
7202 * ressource ...
7203 */
7204 static int
7205 wm_gmii_hv_readreg(device_t self, int phy, int reg)
7206 {
7207 struct wm_softc *sc = device_private(self);
7208 uint16_t page = BM_PHY_REG_PAGE(reg);
7209 uint16_t regnum = BM_PHY_REG_NUM(reg);
7210 uint16_t val;
7211 int rv;
7212
7213 if (wm_get_swfwhw_semaphore(sc)) {
7214 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7215 __func__);
7216 return 0;
7217 }
7218
7219 /* XXX Workaround failure in MDIO access while cable is disconnected */
7220 if (sc->sc_phytype == WMPHY_82577) {
7221 /* XXX must write */
7222 }
7223
7224 /* Page 800 works differently than the rest so it has its own func */
7225 if (page == BM_WUC_PAGE) {
7226 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
7227 return val;
7228 }
7229
7230 /*
7231 * Lower than page 768 works differently than the rest so it has its
7232 * own func
7233 */
7234 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7235 printf("gmii_hv_readreg!!!\n");
7236 return 0;
7237 }
7238
7239 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7240 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7241 page << BME1000_PAGE_SHIFT);
7242 }
7243
7244 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
7245 wm_put_swfwhw_semaphore(sc);
7246 return rv;
7247 }
7248
7249 /*
7250 * wm_gmii_hv_writereg: [mii interface function]
7251 *
7252 * Write a PHY register on the kumeran.
7253 * This could be handled by the PHY layer if we didn't have to lock the
7254 * ressource ...
7255 */
7256 static void
7257 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
7258 {
7259 struct wm_softc *sc = device_private(self);
7260 uint16_t page = BM_PHY_REG_PAGE(reg);
7261 uint16_t regnum = BM_PHY_REG_NUM(reg);
7262
7263 if (wm_get_swfwhw_semaphore(sc)) {
7264 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7265 __func__);
7266 return;
7267 }
7268
7269 /* XXX Workaround failure in MDIO access while cable is disconnected */
7270
7271 /* Page 800 works differently than the rest so it has its own func */
7272 if (page == BM_WUC_PAGE) {
7273 uint16_t tmp;
7274
7275 tmp = val;
7276 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
7277 return;
7278 }
7279
7280 /*
7281 * Lower than page 768 works differently than the rest so it has its
7282 * own func
7283 */
7284 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7285 printf("gmii_hv_writereg!!!\n");
7286 return;
7287 }
7288
7289 /*
7290 * XXX Workaround MDIO accesses being disabled after entering IEEE
7291 * Power Down (whenever bit 11 of the PHY control register is set)
7292 */
7293
7294 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7295 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7296 page << BME1000_PAGE_SHIFT);
7297 }
7298
7299 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
7300 wm_put_swfwhw_semaphore(sc);
7301 }
7302
7303 /*
7304 * wm_gmii_82580_readreg: [mii interface function]
7305 *
7306 * Read a PHY register on the 82580 and I350.
7307 * This could be handled by the PHY layer if we didn't have to lock the
7308 * ressource ...
7309 */
7310 static int
7311 wm_gmii_82580_readreg(device_t self, int phy, int reg)
7312 {
7313 struct wm_softc *sc = device_private(self);
7314 int sem;
7315 int rv;
7316
7317 sem = swfwphysem[sc->sc_funcid];
7318 if (wm_get_swfw_semaphore(sc, sem)) {
7319 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7320 __func__);
7321 return 0;
7322 }
7323
7324 rv = wm_gmii_i82544_readreg(self, phy, reg);
7325
7326 wm_put_swfw_semaphore(sc, sem);
7327 return rv;
7328 }
7329
7330 /*
7331 * wm_gmii_82580_writereg: [mii interface function]
7332 *
7333 * Write a PHY register on the 82580 and I350.
7334 * This could be handled by the PHY layer if we didn't have to lock the
7335 * ressource ...
7336 */
7337 static void
7338 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
7339 {
7340 struct wm_softc *sc = device_private(self);
7341 int sem;
7342
7343 sem = swfwphysem[sc->sc_funcid];
7344 if (wm_get_swfw_semaphore(sc, sem)) {
7345 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7346 __func__);
7347 return;
7348 }
7349
7350 wm_gmii_i82544_writereg(self, phy, reg, val);
7351
7352 wm_put_swfw_semaphore(sc, sem);
7353 }
7354
7355 /*
7356 * wm_gmii_statchg: [mii interface function]
7357 *
7358 * Callback from MII layer when media changes.
7359 */
7360 static void
7361 wm_gmii_statchg(struct ifnet *ifp)
7362 {
7363 struct wm_softc *sc = ifp->if_softc;
7364 struct mii_data *mii = &sc->sc_mii;
7365
7366 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7367 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7368 sc->sc_fcrtl &= ~FCRTL_XONE;
7369
7370 /*
7371 * Get flow control negotiation result.
7372 */
7373 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7374 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7375 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7376 mii->mii_media_active &= ~IFM_ETH_FMASK;
7377 }
7378
7379 if (sc->sc_flowflags & IFM_FLOW) {
7380 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7381 sc->sc_ctrl |= CTRL_TFCE;
7382 sc->sc_fcrtl |= FCRTL_XONE;
7383 }
7384 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7385 sc->sc_ctrl |= CTRL_RFCE;
7386 }
7387
7388 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7389 DPRINTF(WM_DEBUG_LINK,
7390 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7391 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7392 } else {
7393 DPRINTF(WM_DEBUG_LINK,
7394 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7395 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7396 }
7397
7398 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7399 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7400 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7401 : WMREG_FCRTL, sc->sc_fcrtl);
7402 if (sc->sc_type == WM_T_80003) {
7403 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7404 case IFM_1000_T:
7405 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7406 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7407 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7408 break;
7409 default:
7410 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7411 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7412 sc->sc_tipg = TIPG_10_100_80003_DFLT;
7413 break;
7414 }
7415 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7416 }
7417 }
7418
7419 /*
7420 * wm_kmrn_readreg:
7421 *
7422 * Read a kumeran register
7423 */
7424 static int
7425 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7426 {
7427 int rv;
7428
7429 if (sc->sc_flags & WM_F_LOCK_SWFW) {
7430 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7431 aprint_error_dev(sc->sc_dev,
7432 "%s: failed to get semaphore\n", __func__);
7433 return 0;
7434 }
7435 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
7436 if (wm_get_swfwhw_semaphore(sc)) {
7437 aprint_error_dev(sc->sc_dev,
7438 "%s: failed to get semaphore\n", __func__);
7439 return 0;
7440 }
7441 }
7442
7443 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7444 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7445 KUMCTRLSTA_REN);
7446 CSR_WRITE_FLUSH(sc);
7447 delay(2);
7448
7449 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7450
7451 if (sc->sc_flags & WM_F_LOCK_SWFW)
7452 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7453 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
7454 wm_put_swfwhw_semaphore(sc);
7455
7456 return rv;
7457 }
7458
7459 /*
7460 * wm_kmrn_writereg:
7461 *
7462 * Write a kumeran register
7463 */
7464 static void
7465 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7466 {
7467
7468 if (sc->sc_flags & WM_F_LOCK_SWFW) {
7469 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7470 aprint_error_dev(sc->sc_dev,
7471 "%s: failed to get semaphore\n", __func__);
7472 return;
7473 }
7474 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
7475 if (wm_get_swfwhw_semaphore(sc)) {
7476 aprint_error_dev(sc->sc_dev,
7477 "%s: failed to get semaphore\n", __func__);
7478 return;
7479 }
7480 }
7481
7482 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7483 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7484 (val & KUMCTRLSTA_MASK));
7485
7486 if (sc->sc_flags & WM_F_LOCK_SWFW)
7487 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7488 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
7489 wm_put_swfwhw_semaphore(sc);
7490 }
7491
7492 /* SGMII related */
7493
7494 /*
7495 * wm_sgmii_uses_mdio
7496 *
7497 * Check whether the transaction is to the internal PHY or the external
7498 * MDIO interface. Return true if it's MDIO.
7499 */
7500 static bool
7501 wm_sgmii_uses_mdio(struct wm_softc *sc)
7502 {
7503 uint32_t reg;
7504 bool ismdio = false;
7505
7506 switch (sc->sc_type) {
7507 case WM_T_82575:
7508 case WM_T_82576:
7509 reg = CSR_READ(sc, WMREG_MDIC);
7510 ismdio = ((reg & MDIC_DEST) != 0);
7511 break;
7512 case WM_T_82580:
7513 case WM_T_I350:
7514 case WM_T_I354:
7515 case WM_T_I210:
7516 case WM_T_I211:
7517 reg = CSR_READ(sc, WMREG_MDICNFG);
7518 ismdio = ((reg & MDICNFG_DEST) != 0);
7519 break;
7520 default:
7521 break;
7522 }
7523
7524 return ismdio;
7525 }
7526
7527 /*
7528 * wm_sgmii_readreg: [mii interface function]
7529 *
7530 * Read a PHY register on the SGMII
7531 * This could be handled by the PHY layer if we didn't have to lock the
7532 * ressource ...
7533 */
7534 static int
7535 wm_sgmii_readreg(device_t self, int phy, int reg)
7536 {
7537 struct wm_softc *sc = device_private(self);
7538 uint32_t i2ccmd;
7539 int i, rv;
7540
7541 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7542 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7543 __func__);
7544 return 0;
7545 }
7546
7547 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7548 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7549 | I2CCMD_OPCODE_READ;
7550 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7551
7552 /* Poll the ready bit */
7553 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7554 delay(50);
7555 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7556 if (i2ccmd & I2CCMD_READY)
7557 break;
7558 }
7559 if ((i2ccmd & I2CCMD_READY) == 0)
7560 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7561 if ((i2ccmd & I2CCMD_ERROR) != 0)
7562 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7563
7564 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7565
7566 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7567 return rv;
7568 }
7569
7570 /*
7571 * wm_sgmii_writereg: [mii interface function]
7572 *
7573 * Write a PHY register on the SGMII.
7574 * This could be handled by the PHY layer if we didn't have to lock the
7575 * ressource ...
7576 */
7577 static void
7578 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7579 {
7580 struct wm_softc *sc = device_private(self);
7581 uint32_t i2ccmd;
7582 int i;
7583 int val_swapped;
7584
7585 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7586 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7587 __func__);
7588 return;
7589 }
7590 /* Swap the data bytes for the I2C interface */
7591 val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
7592 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7593 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7594 | I2CCMD_OPCODE_WRITE | val_swapped;
7595 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7596
7597 /* Poll the ready bit */
7598 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7599 delay(50);
7600 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7601 if (i2ccmd & I2CCMD_READY)
7602 break;
7603 }
7604 if ((i2ccmd & I2CCMD_READY) == 0)
7605 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7606 if ((i2ccmd & I2CCMD_ERROR) != 0)
7607 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7608
7609 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7610 }
7611
7612 /* TBI related */
7613
7614 /*
7615 * wm_tbi_mediainit:
7616 *
7617 * Initialize media for use on 1000BASE-X devices.
7618 */
7619 static void
7620 wm_tbi_mediainit(struct wm_softc *sc)
7621 {
7622 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7623 const char *sep = "";
7624
7625 if (sc->sc_type < WM_T_82543)
7626 sc->sc_tipg = TIPG_WM_DFLT;
7627 else
7628 sc->sc_tipg = TIPG_LG_DFLT;
7629
7630 sc->sc_tbi_serdes_anegticks = 5;
7631
7632 /* Initialize our media structures */
7633 sc->sc_mii.mii_ifp = ifp;
7634 sc->sc_ethercom.ec_mii = &sc->sc_mii;
7635
7636 if ((sc->sc_type >= WM_T_82575)
7637 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
7638 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
7639 wm_serdes_mediachange, wm_serdes_mediastatus);
7640 else
7641 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
7642 wm_tbi_mediachange, wm_tbi_mediastatus);
7643
7644 /*
7645 * SWD Pins:
7646 *
7647 * 0 = Link LED (output)
7648 * 1 = Loss Of Signal (input)
7649 */
7650 sc->sc_ctrl |= CTRL_SWDPIO(0);
7651
7652 /* XXX Perhaps this is only for TBI */
7653 if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
7654 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
7655
7656 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7657 sc->sc_ctrl &= ~CTRL_LRST;
7658
7659 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7660
7661 #define ADD(ss, mm, dd) \
7662 do { \
7663 aprint_normal("%s%s", sep, ss); \
7664 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
7665 sep = ", "; \
7666 } while (/*CONSTCOND*/0)
7667
7668 aprint_normal_dev(sc->sc_dev, "");
7669
7670 /* Only 82545 is LX */
7671 if (sc->sc_type == WM_T_82545) {
7672 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
7673 ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
7674 } else {
7675 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
7676 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
7677 }
7678 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
7679 aprint_normal("\n");
7680
7681 #undef ADD
7682
7683 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
7684 }
7685
7686 /*
7687 * wm_tbi_mediachange: [ifmedia interface function]
7688 *
7689 * Set hardware to newly-selected media on a 1000BASE-X device.
7690 */
7691 static int
7692 wm_tbi_mediachange(struct ifnet *ifp)
7693 {
7694 struct wm_softc *sc = ifp->if_softc;
7695 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7696 uint32_t status;
7697 int i;
7698
7699 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
7700 /* XXX need some work for >= 82571 and < 82575 */
7701 if (sc->sc_type < WM_T_82575)
7702 return 0;
7703 }
7704
7705 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
7706 || (sc->sc_type >= WM_T_82575))
7707 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
7708
7709 sc->sc_ctrl &= ~CTRL_LRST;
7710 sc->sc_txcw = TXCW_ANE;
7711 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
7712 sc->sc_txcw |= TXCW_FD | TXCW_HD;
7713 else if (ife->ifm_media & IFM_FDX)
7714 sc->sc_txcw |= TXCW_FD;
7715 else
7716 sc->sc_txcw |= TXCW_HD;
7717
7718 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
7719 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
7720
7721 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
7722 device_xname(sc->sc_dev), sc->sc_txcw));
7723 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7724 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7725 CSR_WRITE_FLUSH(sc);
7726 delay(1000);
7727
7728 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
7729 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
7730
7731 /*
7732 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
7733 * optics detect a signal, 0 if they don't.
7734 */
7735 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
7736 /* Have signal; wait for the link to come up. */
7737 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
7738 delay(10000);
7739 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
7740 break;
7741 }
7742
7743 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
7744 device_xname(sc->sc_dev),i));
7745
7746 status = CSR_READ(sc, WMREG_STATUS);
7747 DPRINTF(WM_DEBUG_LINK,
7748 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
7749 device_xname(sc->sc_dev),status, STATUS_LU));
7750 if (status & STATUS_LU) {
7751 /* Link is up. */
7752 DPRINTF(WM_DEBUG_LINK,
7753 ("%s: LINK: set media -> link up %s\n",
7754 device_xname(sc->sc_dev),
7755 (status & STATUS_FD) ? "FDX" : "HDX"));
7756
7757 /*
7758 * NOTE: CTRL will update TFCE and RFCE automatically,
7759 * so we should update sc->sc_ctrl
7760 */
7761 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7762 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7763 sc->sc_fcrtl &= ~FCRTL_XONE;
7764 if (status & STATUS_FD)
7765 sc->sc_tctl |=
7766 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7767 else
7768 sc->sc_tctl |=
7769 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7770 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
7771 sc->sc_fcrtl |= FCRTL_XONE;
7772 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7773 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7774 WMREG_OLD_FCRTL : WMREG_FCRTL,
7775 sc->sc_fcrtl);
7776 sc->sc_tbi_linkup = 1;
7777 } else {
7778 if (i == WM_LINKUP_TIMEOUT)
7779 wm_check_for_link(sc);
7780 /* Link is down. */
7781 DPRINTF(WM_DEBUG_LINK,
7782 ("%s: LINK: set media -> link down\n",
7783 device_xname(sc->sc_dev)));
7784 sc->sc_tbi_linkup = 0;
7785 }
7786 } else {
7787 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
7788 device_xname(sc->sc_dev)));
7789 sc->sc_tbi_linkup = 0;
7790 }
7791
7792 wm_tbi_serdes_set_linkled(sc);
7793
7794 return 0;
7795 }
7796
7797 /*
7798 * wm_tbi_mediastatus: [ifmedia interface function]
7799 *
7800 * Get the current interface media status on a 1000BASE-X device.
7801 */
7802 static void
7803 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
7804 {
7805 struct wm_softc *sc = ifp->if_softc;
7806 uint32_t ctrl, status;
7807
7808 ifmr->ifm_status = IFM_AVALID;
7809 ifmr->ifm_active = IFM_ETHER;
7810
7811 status = CSR_READ(sc, WMREG_STATUS);
7812 if ((status & STATUS_LU) == 0) {
7813 ifmr->ifm_active |= IFM_NONE;
7814 return;
7815 }
7816
7817 ifmr->ifm_status |= IFM_ACTIVE;
7818 /* Only 82545 is LX */
7819 if (sc->sc_type == WM_T_82545)
7820 ifmr->ifm_active |= IFM_1000_LX;
7821 else
7822 ifmr->ifm_active |= IFM_1000_SX;
7823 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
7824 ifmr->ifm_active |= IFM_FDX;
7825 else
7826 ifmr->ifm_active |= IFM_HDX;
7827 ctrl = CSR_READ(sc, WMREG_CTRL);
7828 if (ctrl & CTRL_RFCE)
7829 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
7830 if (ctrl & CTRL_TFCE)
7831 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
7832 }
7833
7834 /* XXX TBI only */
7835 static int
7836 wm_check_for_link(struct wm_softc *sc)
7837 {
7838 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7839 uint32_t rxcw;
7840 uint32_t ctrl;
7841 uint32_t status;
7842 uint32_t sig;
7843
7844 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
7845 /* XXX need some work for >= 82571 */
7846 if (sc->sc_type >= WM_T_82571) {
7847 sc->sc_tbi_linkup = 1;
7848 return 0;
7849 }
7850 }
7851
7852 rxcw = CSR_READ(sc, WMREG_RXCW);
7853 ctrl = CSR_READ(sc, WMREG_CTRL);
7854 status = CSR_READ(sc, WMREG_STATUS);
7855
7856 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7857
7858 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7859 device_xname(sc->sc_dev), __func__,
7860 ((ctrl & CTRL_SWDPIN(1)) == sig),
7861 ((status & STATUS_LU) != 0),
7862 ((rxcw & RXCW_C) != 0)
7863 ));
7864
7865 /*
7866 * SWDPIN LU RXCW
7867 * 0 0 0
7868 * 0 0 1 (should not happen)
7869 * 0 1 0 (should not happen)
7870 * 0 1 1 (should not happen)
7871 * 1 0 0 Disable autonego and force linkup
7872 * 1 0 1 got /C/ but not linkup yet
7873 * 1 1 0 (linkup)
7874 * 1 1 1 If IFM_AUTO, back to autonego
7875 *
7876 */
7877 if (((ctrl & CTRL_SWDPIN(1)) == sig)
7878 && ((status & STATUS_LU) == 0)
7879 && ((rxcw & RXCW_C) == 0)) {
7880 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7881 __func__));
7882 sc->sc_tbi_linkup = 0;
7883 /* Disable auto-negotiation in the TXCW register */
7884 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7885
7886 /*
7887 * Force link-up and also force full-duplex.
7888 *
7889 * NOTE: CTRL was updated TFCE and RFCE automatically,
7890 * so we should update sc->sc_ctrl
7891 */
7892 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7893 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7894 } else if (((status & STATUS_LU) != 0)
7895 && ((rxcw & RXCW_C) != 0)
7896 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7897 sc->sc_tbi_linkup = 1;
7898 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7899 __func__));
7900 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7901 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7902 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7903 && ((rxcw & RXCW_C) != 0)) {
7904 DPRINTF(WM_DEBUG_LINK, ("/C/"));
7905 } else {
7906 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7907 status));
7908 }
7909
7910 return 0;
7911 }
7912
7913 /*
7914 * wm_tbi_tick:
7915 *
7916 * Check the link on TBI devices.
7917 * This function acts as mii_tick().
7918 */
7919 static void
7920 wm_tbi_tick(struct wm_softc *sc)
7921 {
7922 struct mii_data *mii = &sc->sc_mii;
7923 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
7924 uint32_t status;
7925
7926 KASSERT(WM_TX_LOCKED(sc));
7927
7928 status = CSR_READ(sc, WMREG_STATUS);
7929
7930 /* XXX is this needed? */
7931 (void)CSR_READ(sc, WMREG_RXCW);
7932 (void)CSR_READ(sc, WMREG_CTRL);
7933
7934 /* set link status */
7935 if ((status & STATUS_LU) == 0) {
7936 DPRINTF(WM_DEBUG_LINK,
7937 ("%s: LINK: checklink -> down\n",
7938 device_xname(sc->sc_dev)));
7939 sc->sc_tbi_linkup = 0;
7940 } else if (sc->sc_tbi_linkup == 0) {
7941 DPRINTF(WM_DEBUG_LINK,
7942 ("%s: LINK: checklink -> up %s\n",
7943 device_xname(sc->sc_dev),
7944 (status & STATUS_FD) ? "FDX" : "HDX"));
7945 sc->sc_tbi_linkup = 1;
7946 sc->sc_tbi_serdes_ticks = 0;
7947 }
7948
7949 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
7950 goto setled;
7951
7952 if ((status & STATUS_LU) == 0) {
7953 sc->sc_tbi_linkup = 0;
7954 /* If the timer expired, retry autonegotiation */
7955 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
7956 && (++sc->sc_tbi_serdes_ticks
7957 >= sc->sc_tbi_serdes_anegticks)) {
7958 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
7959 sc->sc_tbi_serdes_ticks = 0;
7960 /*
7961 * Reset the link, and let autonegotiation do
7962 * its thing
7963 */
7964 sc->sc_ctrl |= CTRL_LRST;
7965 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7966 CSR_WRITE_FLUSH(sc);
7967 delay(1000);
7968 sc->sc_ctrl &= ~CTRL_LRST;
7969 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7970 CSR_WRITE_FLUSH(sc);
7971 delay(1000);
7972 CSR_WRITE(sc, WMREG_TXCW,
7973 sc->sc_txcw & ~TXCW_ANE);
7974 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7975 }
7976 }
7977
7978 setled:
7979 wm_tbi_serdes_set_linkled(sc);
7980 }
7981
7982 /* SERDES related */
7983 static void
7984 wm_serdes_power_up_link_82575(struct wm_softc *sc)
7985 {
7986 uint32_t reg;
7987
7988 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
7989 && ((sc->sc_flags & WM_F_SGMII) == 0))
7990 return;
7991
7992 reg = CSR_READ(sc, WMREG_PCS_CFG);
7993 reg |= PCS_CFG_PCS_EN;
7994 CSR_WRITE(sc, WMREG_PCS_CFG, reg);
7995
7996 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7997 reg &= ~CTRL_EXT_SWDPIN(3);
7998 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7999 CSR_WRITE_FLUSH(sc);
8000 }
8001
8002 static int
8003 wm_serdes_mediachange(struct ifnet *ifp)
8004 {
8005 struct wm_softc *sc = ifp->if_softc;
8006 bool pcs_autoneg = true; /* XXX */
8007 uint32_t ctrl_ext, pcs_lctl, reg;
8008
8009 /* XXX Currently, this function is not called on 8257[12] */
8010 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
8011 || (sc->sc_type >= WM_T_82575))
8012 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
8013
8014 wm_serdes_power_up_link_82575(sc);
8015
8016 sc->sc_ctrl |= CTRL_SLU;
8017
8018 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
8019 sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
8020
8021 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8022 pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
8023 switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
8024 case CTRL_EXT_LINK_MODE_SGMII:
8025 pcs_autoneg = true;
8026 pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
8027 break;
8028 case CTRL_EXT_LINK_MODE_1000KX:
8029 pcs_autoneg = false;
8030 /* FALLTHROUGH */
8031 default:
8032 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)){
8033 if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
8034 pcs_autoneg = false;
8035 }
8036 sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
8037 | CTRL_FRCFDX;
8038 pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
8039 }
8040 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8041
8042 if (pcs_autoneg) {
8043 pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
8044 pcs_lctl &= ~PCS_LCTL_FORCE_FC;
8045
8046 reg = CSR_READ(sc, WMREG_PCS_ANADV);
8047 reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
8048 reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
8049 CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
8050 } else
8051 pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
8052
8053 CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
8054
8055
8056 return 0;
8057 }
8058
8059 static void
8060 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8061 {
8062 struct wm_softc *sc = ifp->if_softc;
8063 struct mii_data *mii = &sc->sc_mii;
8064 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8065 uint32_t pcs_adv, pcs_lpab, reg;
8066
8067 ifmr->ifm_status = IFM_AVALID;
8068 ifmr->ifm_active = IFM_ETHER;
8069
8070 /* Check PCS */
8071 reg = CSR_READ(sc, WMREG_PCS_LSTS);
8072 if ((reg & PCS_LSTS_LINKOK) == 0) {
8073 ifmr->ifm_active |= IFM_NONE;
8074 sc->sc_tbi_linkup = 0;
8075 goto setled;
8076 }
8077
8078 sc->sc_tbi_linkup = 1;
8079 ifmr->ifm_status |= IFM_ACTIVE;
8080 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
8081 if ((reg & PCS_LSTS_FDX) != 0)
8082 ifmr->ifm_active |= IFM_FDX;
8083 else
8084 ifmr->ifm_active |= IFM_HDX;
8085 mii->mii_media_active &= ~IFM_ETH_FMASK;
8086 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
8087 /* Check flow */
8088 reg = CSR_READ(sc, WMREG_PCS_LSTS);
8089 if ((reg & PCS_LSTS_AN_COMP) == 0) {
8090 printf("XXX LINKOK but not ACOMP\n");
8091 goto setled;
8092 }
8093 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
8094 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
8095 printf("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab);
8096 if ((pcs_adv & TXCW_SYM_PAUSE)
8097 && (pcs_lpab & TXCW_SYM_PAUSE)) {
8098 mii->mii_media_active |= IFM_FLOW
8099 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
8100 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
8101 && (pcs_adv & TXCW_ASYM_PAUSE)
8102 && (pcs_lpab & TXCW_SYM_PAUSE)
8103 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
8104 mii->mii_media_active |= IFM_FLOW
8105 | IFM_ETH_TXPAUSE;
8106 } else if ((pcs_adv & TXCW_SYM_PAUSE)
8107 && (pcs_adv & TXCW_ASYM_PAUSE)
8108 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
8109 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
8110 mii->mii_media_active |= IFM_FLOW
8111 | IFM_ETH_RXPAUSE;
8112 } else {
8113 }
8114 }
8115 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
8116 | (mii->mii_media_active & IFM_ETH_FMASK);
8117 setled:
8118 wm_tbi_serdes_set_linkled(sc);
8119 }
8120
8121 /*
8122 * wm_serdes_tick:
8123 *
8124 * Check the link on serdes devices.
8125 */
8126 static void
8127 wm_serdes_tick(struct wm_softc *sc)
8128 {
8129 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8130 struct mii_data *mii = &sc->sc_mii;
8131 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
8132 uint32_t reg;
8133
8134 KASSERT(WM_TX_LOCKED(sc));
8135
8136 mii->mii_media_status = IFM_AVALID;
8137 mii->mii_media_active = IFM_ETHER;
8138
8139 /* Check PCS */
8140 reg = CSR_READ(sc, WMREG_PCS_LSTS);
8141 if ((reg & PCS_LSTS_LINKOK) != 0) {
8142 mii->mii_media_status |= IFM_ACTIVE;
8143 sc->sc_tbi_linkup = 1;
8144 sc->sc_tbi_serdes_ticks = 0;
8145 mii->mii_media_active |= IFM_1000_SX; /* XXX */
8146 if ((reg & PCS_LSTS_FDX) != 0)
8147 mii->mii_media_active |= IFM_FDX;
8148 else
8149 mii->mii_media_active |= IFM_HDX;
8150 } else {
8151 mii->mii_media_status |= IFM_NONE;
8152 sc->sc_tbi_linkup = 0;
8153 /* If the timer expired, retry autonegotiation */
8154 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8155 && (++sc->sc_tbi_serdes_ticks
8156 >= sc->sc_tbi_serdes_anegticks)) {
8157 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
8158 sc->sc_tbi_serdes_ticks = 0;
8159 /* XXX */
8160 wm_serdes_mediachange(ifp);
8161 }
8162 }
8163
8164 wm_tbi_serdes_set_linkled(sc);
8165 }
8166
8167 /* SFP related */
8168
8169 static int
8170 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
8171 {
8172 uint32_t i2ccmd;
8173 int i;
8174
8175 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
8176 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
8177
8178 /* Poll the ready bit */
8179 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
8180 delay(50);
8181 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
8182 if (i2ccmd & I2CCMD_READY)
8183 break;
8184 }
8185 if ((i2ccmd & I2CCMD_READY) == 0)
8186 return -1;
8187 if ((i2ccmd & I2CCMD_ERROR) != 0)
8188 return -1;
8189
8190 *data = i2ccmd & 0x00ff;
8191
8192 return 0;
8193 }
8194
8195 static uint32_t
8196 wm_sfp_get_media_type(struct wm_softc *sc)
8197 {
8198 uint32_t ctrl_ext;
8199 uint8_t val = 0;
8200 int timeout = 3;
8201 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
8202 int rv = -1;
8203
8204 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8205 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
8206 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
8207 CSR_WRITE_FLUSH(sc);
8208
8209 /* Read SFP module data */
8210 while (timeout) {
8211 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
8212 if (rv == 0)
8213 break;
8214 delay(100*1000); /* XXX too big */
8215 timeout--;
8216 }
8217 if (rv != 0)
8218 goto out;
8219 switch (val) {
8220 case SFF_SFP_ID_SFF:
8221 aprint_normal_dev(sc->sc_dev,
8222 "Module/Connector soldered to board\n");
8223 break;
8224 case SFF_SFP_ID_SFP:
8225 aprint_normal_dev(sc->sc_dev, "SFP\n");
8226 break;
8227 case SFF_SFP_ID_UNKNOWN:
8228 goto out;
8229 default:
8230 break;
8231 }
8232
8233 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
8234 if (rv != 0) {
8235 goto out;
8236 }
8237
8238 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
8239 mediatype = WM_MEDIATYPE_SERDES;
8240 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
8241 sc->sc_flags |= WM_F_SGMII;
8242 mediatype = WM_MEDIATYPE_COPPER;
8243 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
8244 sc->sc_flags |= WM_F_SGMII;
8245 mediatype = WM_MEDIATYPE_SERDES;
8246 }
8247
8248 out:
8249 /* Restore I2C interface setting */
8250 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8251
8252 return mediatype;
8253 }
8254 /*
8255 * NVM related.
8256 * Microwire, SPI (w/wo EERD) and Flash.
8257 */
8258
8259 /* Both spi and uwire */
8260
8261 /*
8262 * wm_eeprom_sendbits:
8263 *
8264 * Send a series of bits to the EEPROM.
8265 */
8266 static void
8267 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
8268 {
8269 uint32_t reg;
8270 int x;
8271
8272 reg = CSR_READ(sc, WMREG_EECD);
8273
8274 for (x = nbits; x > 0; x--) {
8275 if (bits & (1U << (x - 1)))
8276 reg |= EECD_DI;
8277 else
8278 reg &= ~EECD_DI;
8279 CSR_WRITE(sc, WMREG_EECD, reg);
8280 CSR_WRITE_FLUSH(sc);
8281 delay(2);
8282 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
8283 CSR_WRITE_FLUSH(sc);
8284 delay(2);
8285 CSR_WRITE(sc, WMREG_EECD, reg);
8286 CSR_WRITE_FLUSH(sc);
8287 delay(2);
8288 }
8289 }
8290
8291 /*
8292 * wm_eeprom_recvbits:
8293 *
8294 * Receive a series of bits from the EEPROM.
8295 */
8296 static void
8297 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
8298 {
8299 uint32_t reg, val;
8300 int x;
8301
8302 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
8303
8304 val = 0;
8305 for (x = nbits; x > 0; x--) {
8306 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
8307 CSR_WRITE_FLUSH(sc);
8308 delay(2);
8309 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
8310 val |= (1U << (x - 1));
8311 CSR_WRITE(sc, WMREG_EECD, reg);
8312 CSR_WRITE_FLUSH(sc);
8313 delay(2);
8314 }
8315 *valp = val;
8316 }
8317
8318 /* Microwire */
8319
8320 /*
8321 * wm_nvm_read_uwire:
8322 *
8323 * Read a word from the EEPROM using the MicroWire protocol.
8324 */
8325 static int
8326 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8327 {
8328 uint32_t reg, val;
8329 int i;
8330
8331 for (i = 0; i < wordcnt; i++) {
8332 /* Clear SK and DI. */
8333 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
8334 CSR_WRITE(sc, WMREG_EECD, reg);
8335
8336 /*
8337 * XXX: workaround for a bug in qemu-0.12.x and prior
8338 * and Xen.
8339 *
8340 * We use this workaround only for 82540 because qemu's
8341 * e1000 act as 82540.
8342 */
8343 if (sc->sc_type == WM_T_82540) {
8344 reg |= EECD_SK;
8345 CSR_WRITE(sc, WMREG_EECD, reg);
8346 reg &= ~EECD_SK;
8347 CSR_WRITE(sc, WMREG_EECD, reg);
8348 CSR_WRITE_FLUSH(sc);
8349 delay(2);
8350 }
8351 /* XXX: end of workaround */
8352
8353 /* Set CHIP SELECT. */
8354 reg |= EECD_CS;
8355 CSR_WRITE(sc, WMREG_EECD, reg);
8356 CSR_WRITE_FLUSH(sc);
8357 delay(2);
8358
8359 /* Shift in the READ command. */
8360 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
8361
8362 /* Shift in address. */
8363 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
8364
8365 /* Shift out the data. */
8366 wm_eeprom_recvbits(sc, &val, 16);
8367 data[i] = val & 0xffff;
8368
8369 /* Clear CHIP SELECT. */
8370 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
8371 CSR_WRITE(sc, WMREG_EECD, reg);
8372 CSR_WRITE_FLUSH(sc);
8373 delay(2);
8374 }
8375
8376 return 0;
8377 }
8378
8379 /* SPI */
8380
8381 /*
8382 * Set SPI and FLASH related information from the EECD register.
8383 * For 82541 and 82547, the word size is taken from EEPROM.
8384 */
8385 static int
8386 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
8387 {
8388 int size;
8389 uint32_t reg;
8390 uint16_t data;
8391
8392 reg = CSR_READ(sc, WMREG_EECD);
8393 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
8394
8395 /* Read the size of NVM from EECD by default */
8396 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
8397 switch (sc->sc_type) {
8398 case WM_T_82541:
8399 case WM_T_82541_2:
8400 case WM_T_82547:
8401 case WM_T_82547_2:
8402 /* Set dummy value to access EEPROM */
8403 sc->sc_nvm_wordsize = 64;
8404 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
8405 reg = data;
8406 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
8407 if (size == 0)
8408 size = 6; /* 64 word size */
8409 else
8410 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
8411 break;
8412 case WM_T_80003:
8413 case WM_T_82571:
8414 case WM_T_82572:
8415 case WM_T_82573: /* SPI case */
8416 case WM_T_82574: /* SPI case */
8417 case WM_T_82583: /* SPI case */
8418 size += NVM_WORD_SIZE_BASE_SHIFT;
8419 if (size > 14)
8420 size = 14;
8421 break;
8422 case WM_T_82575:
8423 case WM_T_82576:
8424 case WM_T_82580:
8425 case WM_T_I350:
8426 case WM_T_I354:
8427 case WM_T_I210:
8428 case WM_T_I211:
8429 size += NVM_WORD_SIZE_BASE_SHIFT;
8430 if (size > 15)
8431 size = 15;
8432 break;
8433 default:
8434 aprint_error_dev(sc->sc_dev,
8435 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
8436 return -1;
8437 break;
8438 }
8439
8440 sc->sc_nvm_wordsize = 1 << size;
8441
8442 return 0;
8443 }
8444
8445 /*
8446 * wm_nvm_ready_spi:
8447 *
8448 * Wait for a SPI EEPROM to be ready for commands.
8449 */
8450 static int
8451 wm_nvm_ready_spi(struct wm_softc *sc)
8452 {
8453 uint32_t val;
8454 int usec;
8455
8456 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
8457 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
8458 wm_eeprom_recvbits(sc, &val, 8);
8459 if ((val & SPI_SR_RDY) == 0)
8460 break;
8461 }
8462 if (usec >= SPI_MAX_RETRIES) {
8463 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
8464 return 1;
8465 }
8466 return 0;
8467 }
8468
8469 /*
8470 * wm_nvm_read_spi:
8471 *
8472 * Read a work from the EEPROM using the SPI protocol.
8473 */
8474 static int
8475 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8476 {
8477 uint32_t reg, val;
8478 int i;
8479 uint8_t opc;
8480
8481 /* Clear SK and CS. */
8482 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
8483 CSR_WRITE(sc, WMREG_EECD, reg);
8484 CSR_WRITE_FLUSH(sc);
8485 delay(2);
8486
8487 if (wm_nvm_ready_spi(sc))
8488 return 1;
8489
8490 /* Toggle CS to flush commands. */
8491 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
8492 CSR_WRITE_FLUSH(sc);
8493 delay(2);
8494 CSR_WRITE(sc, WMREG_EECD, reg);
8495 CSR_WRITE_FLUSH(sc);
8496 delay(2);
8497
8498 opc = SPI_OPC_READ;
8499 if (sc->sc_nvm_addrbits == 8 && word >= 128)
8500 opc |= SPI_OPC_A8;
8501
8502 wm_eeprom_sendbits(sc, opc, 8);
8503 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
8504
8505 for (i = 0; i < wordcnt; i++) {
8506 wm_eeprom_recvbits(sc, &val, 16);
8507 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
8508 }
8509
8510 /* Raise CS and clear SK. */
8511 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
8512 CSR_WRITE(sc, WMREG_EECD, reg);
8513 CSR_WRITE_FLUSH(sc);
8514 delay(2);
8515
8516 return 0;
8517 }
8518
8519 /* Using with EERD */
8520
8521 static int
8522 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
8523 {
8524 uint32_t attempts = 100000;
8525 uint32_t i, reg = 0;
8526 int32_t done = -1;
8527
8528 for (i = 0; i < attempts; i++) {
8529 reg = CSR_READ(sc, rw);
8530
8531 if (reg & EERD_DONE) {
8532 done = 0;
8533 break;
8534 }
8535 delay(5);
8536 }
8537
8538 return done;
8539 }
8540
8541 static int
8542 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
8543 uint16_t *data)
8544 {
8545 int i, eerd = 0;
8546 int error = 0;
8547
8548 for (i = 0; i < wordcnt; i++) {
8549 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
8550
8551 CSR_WRITE(sc, WMREG_EERD, eerd);
8552 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
8553 if (error != 0)
8554 break;
8555
8556 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
8557 }
8558
8559 return error;
8560 }
8561
8562 /* Flash */
8563
8564 static int
8565 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
8566 {
8567 uint32_t eecd;
8568 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
8569 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
8570 uint8_t sig_byte = 0;
8571
8572 switch (sc->sc_type) {
8573 case WM_T_ICH8:
8574 case WM_T_ICH9:
8575 eecd = CSR_READ(sc, WMREG_EECD);
8576 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
8577 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
8578 return 0;
8579 }
8580 /* FALLTHROUGH */
8581 default:
8582 /* Default to 0 */
8583 *bank = 0;
8584
8585 /* Check bank 0 */
8586 wm_read_ich8_byte(sc, act_offset, &sig_byte);
8587 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
8588 *bank = 0;
8589 return 0;
8590 }
8591
8592 /* Check bank 1 */
8593 wm_read_ich8_byte(sc, act_offset + bank1_offset,
8594 &sig_byte);
8595 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
8596 *bank = 1;
8597 return 0;
8598 }
8599 }
8600
8601 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
8602 device_xname(sc->sc_dev)));
8603 return -1;
8604 }
8605
8606 /******************************************************************************
8607 * This function does initial flash setup so that a new read/write/erase cycle
8608 * can be started.
8609 *
8610 * sc - The pointer to the hw structure
8611 ****************************************************************************/
8612 static int32_t
8613 wm_ich8_cycle_init(struct wm_softc *sc)
8614 {
8615 uint16_t hsfsts;
8616 int32_t error = 1;
8617 int32_t i = 0;
8618
8619 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8620
8621 /* May be check the Flash Des Valid bit in Hw status */
8622 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
8623 return error;
8624 }
8625
8626 /* Clear FCERR in Hw status by writing 1 */
8627 /* Clear DAEL in Hw status by writing a 1 */
8628 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
8629
8630 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8631
8632 /*
8633 * Either we should have a hardware SPI cycle in progress bit to check
8634 * against, in order to start a new cycle or FDONE bit should be
8635 * changed in the hardware so that it is 1 after harware reset, which
8636 * can then be used as an indication whether a cycle is in progress or
8637 * has been completed .. we should also have some software semaphore
8638 * mechanism to guard FDONE or the cycle in progress bit so that two
8639 * threads access to those bits can be sequentiallized or a way so that
8640 * 2 threads dont start the cycle at the same time
8641 */
8642
8643 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8644 /*
8645 * There is no cycle running at present, so we can start a
8646 * cycle
8647 */
8648
8649 /* Begin by setting Flash Cycle Done. */
8650 hsfsts |= HSFSTS_DONE;
8651 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8652 error = 0;
8653 } else {
8654 /*
8655 * otherwise poll for sometime so the current cycle has a
8656 * chance to end before giving up.
8657 */
8658 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
8659 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8660 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8661 error = 0;
8662 break;
8663 }
8664 delay(1);
8665 }
8666 if (error == 0) {
8667 /*
8668 * Successful in waiting for previous cycle to timeout,
8669 * now set the Flash Cycle Done.
8670 */
8671 hsfsts |= HSFSTS_DONE;
8672 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8673 }
8674 }
8675 return error;
8676 }
8677
8678 /******************************************************************************
8679 * This function starts a flash cycle and waits for its completion
8680 *
8681 * sc - The pointer to the hw structure
8682 ****************************************************************************/
8683 static int32_t
8684 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
8685 {
8686 uint16_t hsflctl;
8687 uint16_t hsfsts;
8688 int32_t error = 1;
8689 uint32_t i = 0;
8690
8691 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
8692 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8693 hsflctl |= HSFCTL_GO;
8694 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8695
8696 /* Wait till FDONE bit is set to 1 */
8697 do {
8698 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8699 if (hsfsts & HSFSTS_DONE)
8700 break;
8701 delay(1);
8702 i++;
8703 } while (i < timeout);
8704 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
8705 error = 0;
8706
8707 return error;
8708 }
8709
8710 /******************************************************************************
8711 * Reads a byte or word from the NVM using the ICH8 flash access registers.
8712 *
8713 * sc - The pointer to the hw structure
8714 * index - The index of the byte or word to read.
8715 * size - Size of data to read, 1=byte 2=word
8716 * data - Pointer to the word to store the value read.
8717 *****************************************************************************/
8718 static int32_t
8719 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
8720 uint32_t size, uint16_t *data)
8721 {
8722 uint16_t hsfsts;
8723 uint16_t hsflctl;
8724 uint32_t flash_linear_address;
8725 uint32_t flash_data = 0;
8726 int32_t error = 1;
8727 int32_t count = 0;
8728
8729 if (size < 1 || size > 2 || data == 0x0 ||
8730 index > ICH_FLASH_LINEAR_ADDR_MASK)
8731 return error;
8732
8733 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
8734 sc->sc_ich8_flash_base;
8735
8736 do {
8737 delay(1);
8738 /* Steps */
8739 error = wm_ich8_cycle_init(sc);
8740 if (error)
8741 break;
8742
8743 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8744 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
8745 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
8746 & HSFCTL_BCOUNT_MASK;
8747 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
8748 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8749
8750 /*
8751 * Write the last 24 bits of index into Flash Linear address
8752 * field in Flash Address
8753 */
8754 /* TODO: TBD maybe check the index against the size of flash */
8755
8756 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
8757
8758 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
8759
8760 /*
8761 * Check if FCERR is set to 1, if set to 1, clear it and try
8762 * the whole sequence a few more times, else read in (shift in)
8763 * the Flash Data0, the order is least significant byte first
8764 * msb to lsb
8765 */
8766 if (error == 0) {
8767 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
8768 if (size == 1)
8769 *data = (uint8_t)(flash_data & 0x000000FF);
8770 else if (size == 2)
8771 *data = (uint16_t)(flash_data & 0x0000FFFF);
8772 break;
8773 } else {
8774 /*
8775 * If we've gotten here, then things are probably
8776 * completely hosed, but if the error condition is
8777 * detected, it won't hurt to give it another try...
8778 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
8779 */
8780 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8781 if (hsfsts & HSFSTS_ERR) {
8782 /* Repeat for some time before giving up. */
8783 continue;
8784 } else if ((hsfsts & HSFSTS_DONE) == 0)
8785 break;
8786 }
8787 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
8788
8789 return error;
8790 }
8791
8792 /******************************************************************************
8793 * Reads a single byte from the NVM using the ICH8 flash access registers.
8794 *
8795 * sc - pointer to wm_hw structure
8796 * index - The index of the byte to read.
8797 * data - Pointer to a byte to store the value read.
8798 *****************************************************************************/
8799 static int32_t
8800 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
8801 {
8802 int32_t status;
8803 uint16_t word = 0;
8804
8805 status = wm_read_ich8_data(sc, index, 1, &word);
8806 if (status == 0)
8807 *data = (uint8_t)word;
8808 else
8809 *data = 0;
8810
8811 return status;
8812 }
8813
8814 /******************************************************************************
8815 * Reads a word from the NVM using the ICH8 flash access registers.
8816 *
8817 * sc - pointer to wm_hw structure
8818 * index - The starting byte index of the word to read.
8819 * data - Pointer to a word to store the value read.
8820 *****************************************************************************/
8821 static int32_t
8822 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
8823 {
8824 int32_t status;
8825
8826 status = wm_read_ich8_data(sc, index, 2, data);
8827 return status;
8828 }
8829
8830 /******************************************************************************
8831 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
8832 * register.
8833 *
8834 * sc - Struct containing variables accessed by shared code
8835 * offset - offset of word in the EEPROM to read
8836 * data - word read from the EEPROM
8837 * words - number of words to read
8838 *****************************************************************************/
8839 static int
8840 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
8841 {
8842 int32_t error = 0;
8843 uint32_t flash_bank = 0;
8844 uint32_t act_offset = 0;
8845 uint32_t bank_offset = 0;
8846 uint16_t word = 0;
8847 uint16_t i = 0;
8848
8849 /*
8850 * We need to know which is the valid flash bank. In the event
8851 * that we didn't allocate eeprom_shadow_ram, we may not be
8852 * managing flash_bank. So it cannot be trusted and needs
8853 * to be updated with each read.
8854 */
8855 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
8856 if (error) {
8857 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
8858 device_xname(sc->sc_dev)));
8859 flash_bank = 0;
8860 }
8861
8862 /*
8863 * Adjust offset appropriately if we're on bank 1 - adjust for word
8864 * size
8865 */
8866 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
8867
8868 error = wm_get_swfwhw_semaphore(sc);
8869 if (error) {
8870 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8871 __func__);
8872 return error;
8873 }
8874
8875 for (i = 0; i < words; i++) {
8876 /* The NVM part needs a byte offset, hence * 2 */
8877 act_offset = bank_offset + ((offset + i) * 2);
8878 error = wm_read_ich8_word(sc, act_offset, &word);
8879 if (error) {
8880 aprint_error_dev(sc->sc_dev,
8881 "%s: failed to read NVM\n", __func__);
8882 break;
8883 }
8884 data[i] = word;
8885 }
8886
8887 wm_put_swfwhw_semaphore(sc);
8888 return error;
8889 }
8890
8891 /* iNVM */
8892
8893 static int
8894 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
8895 {
8896 int32_t rv = 0;
8897 uint32_t invm_dword;
8898 uint16_t i;
8899 uint8_t record_type, word_address;
8900
8901 for (i = 0; i < INVM_SIZE; i++) {
8902 invm_dword = CSR_READ(sc, E1000_INVM_DATA_REG(i));
8903 /* Get record type */
8904 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
8905 if (record_type == INVM_UNINITIALIZED_STRUCTURE)
8906 break;
8907 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
8908 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
8909 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
8910 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
8911 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
8912 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
8913 if (word_address == address) {
8914 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
8915 rv = 0;
8916 break;
8917 }
8918 }
8919 }
8920
8921 return rv;
8922 }
8923
8924 static int
8925 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
8926 {
8927 int rv = 0;
8928 int i;
8929
8930 for (i = 0; i < words; i++) {
8931 switch (offset + i) {
8932 case NVM_OFF_MACADDR:
8933 case NVM_OFF_MACADDR1:
8934 case NVM_OFF_MACADDR2:
8935 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
8936 if (rv != 0) {
8937 data[i] = 0xffff;
8938 rv = -1;
8939 }
8940 break;
8941 case NVM_OFF_CFG2:
8942 rv = wm_nvm_read_word_invm(sc, offset, data);
8943 if (rv != 0) {
8944 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
8945 rv = 0;
8946 }
8947 break;
8948 case NVM_OFF_CFG4:
8949 rv = wm_nvm_read_word_invm(sc, offset, data);
8950 if (rv != 0) {
8951 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
8952 rv = 0;
8953 }
8954 break;
8955 case NVM_OFF_LED_1_CFG:
8956 rv = wm_nvm_read_word_invm(sc, offset, data);
8957 if (rv != 0) {
8958 *data = NVM_LED_1_CFG_DEFAULT_I211;
8959 rv = 0;
8960 }
8961 break;
8962 case NVM_OFF_LED_0_2_CFG:
8963 rv = wm_nvm_read_word_invm(sc, offset, data);
8964 if (rv != 0) {
8965 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
8966 rv = 0;
8967 }
8968 break;
8969 case NVM_OFF_ID_LED_SETTINGS:
8970 rv = wm_nvm_read_word_invm(sc, offset, data);
8971 if (rv != 0) {
8972 *data = ID_LED_RESERVED_FFFF;
8973 rv = 0;
8974 }
8975 break;
8976 default:
8977 DPRINTF(WM_DEBUG_NVM,
8978 ("NVM word 0x%02x is not mapped.\n", offset));
8979 *data = NVM_RESERVED_WORD;
8980 break;
8981 }
8982 }
8983
8984 return rv;
8985 }
8986
8987 /* Lock, detecting NVM type, validate checksum and read */
8988
8989 /*
8990 * wm_nvm_acquire:
8991 *
8992 * Perform the EEPROM handshake required on some chips.
8993 */
8994 static int
8995 wm_nvm_acquire(struct wm_softc *sc)
8996 {
8997 uint32_t reg;
8998 int x;
8999 int ret = 0;
9000
9001 /* always success */
9002 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
9003 return 0;
9004
9005 if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
9006 ret = wm_get_swfwhw_semaphore(sc);
9007 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
9008 /* This will also do wm_get_swsm_semaphore() if needed */
9009 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
9010 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
9011 ret = wm_get_swsm_semaphore(sc);
9012 }
9013
9014 if (ret) {
9015 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9016 __func__);
9017 return 1;
9018 }
9019
9020 if (sc->sc_flags & WM_F_LOCK_EECD) {
9021 reg = CSR_READ(sc, WMREG_EECD);
9022
9023 /* Request EEPROM access. */
9024 reg |= EECD_EE_REQ;
9025 CSR_WRITE(sc, WMREG_EECD, reg);
9026
9027 /* ..and wait for it to be granted. */
9028 for (x = 0; x < 1000; x++) {
9029 reg = CSR_READ(sc, WMREG_EECD);
9030 if (reg & EECD_EE_GNT)
9031 break;
9032 delay(5);
9033 }
9034 if ((reg & EECD_EE_GNT) == 0) {
9035 aprint_error_dev(sc->sc_dev,
9036 "could not acquire EEPROM GNT\n");
9037 reg &= ~EECD_EE_REQ;
9038 CSR_WRITE(sc, WMREG_EECD, reg);
9039 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
9040 wm_put_swfwhw_semaphore(sc);
9041 if (sc->sc_flags & WM_F_LOCK_SWFW)
9042 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
9043 else if (sc->sc_flags & WM_F_LOCK_SWSM)
9044 wm_put_swsm_semaphore(sc);
9045 return 1;
9046 }
9047 }
9048
9049 return 0;
9050 }
9051
9052 /*
9053 * wm_nvm_release:
9054 *
9055 * Release the EEPROM mutex.
9056 */
9057 static void
9058 wm_nvm_release(struct wm_softc *sc)
9059 {
9060 uint32_t reg;
9061
9062 /* always success */
9063 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
9064 return;
9065
9066 if (sc->sc_flags & WM_F_LOCK_EECD) {
9067 reg = CSR_READ(sc, WMREG_EECD);
9068 reg &= ~EECD_EE_REQ;
9069 CSR_WRITE(sc, WMREG_EECD, reg);
9070 }
9071
9072 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
9073 wm_put_swfwhw_semaphore(sc);
9074 if (sc->sc_flags & WM_F_LOCK_SWFW)
9075 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
9076 else if (sc->sc_flags & WM_F_LOCK_SWSM)
9077 wm_put_swsm_semaphore(sc);
9078 }
9079
9080 static int
9081 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
9082 {
9083 uint32_t eecd = 0;
9084
9085 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
9086 || sc->sc_type == WM_T_82583) {
9087 eecd = CSR_READ(sc, WMREG_EECD);
9088
9089 /* Isolate bits 15 & 16 */
9090 eecd = ((eecd >> 15) & 0x03);
9091
9092 /* If both bits are set, device is Flash type */
9093 if (eecd == 0x03)
9094 return 0;
9095 }
9096 return 1;
9097 }
9098
9099 static int
9100 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
9101 {
9102 uint32_t eec;
9103
9104 eec = CSR_READ(sc, WMREG_EEC);
9105 if ((eec & EEC_FLASH_DETECTED) != 0)
9106 return 1;
9107
9108 return 0;
9109 }
9110
9111 /*
9112 * wm_nvm_validate_checksum
9113 *
9114 * The checksum is defined as the sum of the first 64 (16 bit) words.
9115 */
9116 static int
9117 wm_nvm_validate_checksum(struct wm_softc *sc)
9118 {
9119 uint16_t checksum;
9120 uint16_t eeprom_data;
9121 #ifdef WM_DEBUG
9122 uint16_t csum_wordaddr, valid_checksum;
9123 #endif
9124 int i;
9125
9126 checksum = 0;
9127
9128 /* Don't check for I211 */
9129 if (sc->sc_type == WM_T_I211)
9130 return 0;
9131
9132 #ifdef WM_DEBUG
9133 if (sc->sc_type == WM_T_PCH_LPT) {
9134 csum_wordaddr = NVM_OFF_COMPAT;
9135 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
9136 } else {
9137 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
9138 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
9139 }
9140
9141 /* Dump EEPROM image for debug */
9142 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9143 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9144 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
9145 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
9146 if ((eeprom_data & valid_checksum) == 0) {
9147 DPRINTF(WM_DEBUG_NVM,
9148 ("%s: NVM need to be updated (%04x != %04x)\n",
9149 device_xname(sc->sc_dev), eeprom_data,
9150 valid_checksum));
9151 }
9152 }
9153
9154 if ((wm_debug & WM_DEBUG_NVM) != 0) {
9155 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
9156 for (i = 0; i < NVM_SIZE; i++) {
9157 if (wm_nvm_read(sc, i, 1, &eeprom_data))
9158 printf("XXXX ");
9159 else
9160 printf("%04hx ", eeprom_data);
9161 if (i % 8 == 7)
9162 printf("\n");
9163 }
9164 }
9165
9166 #endif /* WM_DEBUG */
9167
9168 for (i = 0; i < NVM_SIZE; i++) {
9169 if (wm_nvm_read(sc, i, 1, &eeprom_data))
9170 return 1;
9171 checksum += eeprom_data;
9172 }
9173
9174 if (checksum != (uint16_t) NVM_CHECKSUM) {
9175 #ifdef WM_DEBUG
9176 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
9177 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
9178 #endif
9179 }
9180
9181 return 0;
9182 }
9183
9184 /*
9185 * wm_nvm_read:
9186 *
9187 * Read data from the serial EEPROM.
9188 */
9189 static int
9190 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9191 {
9192 int rv;
9193
9194 if (sc->sc_flags & WM_F_EEPROM_INVALID)
9195 return 1;
9196
9197 if (wm_nvm_acquire(sc))
9198 return 1;
9199
9200 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9201 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9202 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
9203 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
9204 else if (sc->sc_flags & WM_F_EEPROM_INVM)
9205 rv = wm_nvm_read_invm(sc, word, wordcnt, data);
9206 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
9207 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
9208 else if (sc->sc_flags & WM_F_EEPROM_SPI)
9209 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
9210 else
9211 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
9212
9213 wm_nvm_release(sc);
9214 return rv;
9215 }
9216
9217 /*
9218 * Hardware semaphores.
9219 * Very complexed...
9220 */
9221
9222 static int
9223 wm_get_swsm_semaphore(struct wm_softc *sc)
9224 {
9225 int32_t timeout;
9226 uint32_t swsm;
9227
9228 if (sc->sc_flags & WM_F_LOCK_SWSM) {
9229 /* Get the SW semaphore. */
9230 timeout = sc->sc_nvm_wordsize + 1;
9231 while (timeout) {
9232 swsm = CSR_READ(sc, WMREG_SWSM);
9233
9234 if ((swsm & SWSM_SMBI) == 0)
9235 break;
9236
9237 delay(50);
9238 timeout--;
9239 }
9240
9241 if (timeout == 0) {
9242 aprint_error_dev(sc->sc_dev,
9243 "could not acquire SWSM SMBI\n");
9244 return 1;
9245 }
9246 }
9247
9248 /* Get the FW semaphore. */
9249 timeout = sc->sc_nvm_wordsize + 1;
9250 while (timeout) {
9251 swsm = CSR_READ(sc, WMREG_SWSM);
9252 swsm |= SWSM_SWESMBI;
9253 CSR_WRITE(sc, WMREG_SWSM, swsm);
9254 /* If we managed to set the bit we got the semaphore. */
9255 swsm = CSR_READ(sc, WMREG_SWSM);
9256 if (swsm & SWSM_SWESMBI)
9257 break;
9258
9259 delay(50);
9260 timeout--;
9261 }
9262
9263 if (timeout == 0) {
9264 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
9265 /* Release semaphores */
9266 wm_put_swsm_semaphore(sc);
9267 return 1;
9268 }
9269 return 0;
9270 }
9271
9272 static void
9273 wm_put_swsm_semaphore(struct wm_softc *sc)
9274 {
9275 uint32_t swsm;
9276
9277 swsm = CSR_READ(sc, WMREG_SWSM);
9278 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
9279 CSR_WRITE(sc, WMREG_SWSM, swsm);
9280 }
9281
9282 static int
9283 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
9284 {
9285 uint32_t swfw_sync;
9286 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
9287 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
9288 int timeout = 200;
9289
9290 for (timeout = 0; timeout < 200; timeout++) {
9291 if (sc->sc_flags & WM_F_LOCK_SWSM) {
9292 if (wm_get_swsm_semaphore(sc)) {
9293 aprint_error_dev(sc->sc_dev,
9294 "%s: failed to get semaphore\n",
9295 __func__);
9296 return 1;
9297 }
9298 }
9299 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
9300 if ((swfw_sync & (swmask | fwmask)) == 0) {
9301 swfw_sync |= swmask;
9302 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
9303 if (sc->sc_flags & WM_F_LOCK_SWSM)
9304 wm_put_swsm_semaphore(sc);
9305 return 0;
9306 }
9307 if (sc->sc_flags & WM_F_LOCK_SWSM)
9308 wm_put_swsm_semaphore(sc);
9309 delay(5000);
9310 }
9311 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
9312 device_xname(sc->sc_dev), mask, swfw_sync);
9313 return 1;
9314 }
9315
9316 static void
9317 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
9318 {
9319 uint32_t swfw_sync;
9320
9321 if (sc->sc_flags & WM_F_LOCK_SWSM) {
9322 while (wm_get_swsm_semaphore(sc) != 0)
9323 continue;
9324 }
9325 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
9326 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
9327 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
9328 if (sc->sc_flags & WM_F_LOCK_SWSM)
9329 wm_put_swsm_semaphore(sc);
9330 }
9331
9332 static int
9333 wm_get_swfwhw_semaphore(struct wm_softc *sc)
9334 {
9335 uint32_t ext_ctrl;
9336 int timeout = 200;
9337
9338 for (timeout = 0; timeout < 200; timeout++) {
9339 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
9340 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
9341 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
9342
9343 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
9344 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
9345 return 0;
9346 delay(5000);
9347 }
9348 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
9349 device_xname(sc->sc_dev), ext_ctrl);
9350 return 1;
9351 }
9352
9353 static void
9354 wm_put_swfwhw_semaphore(struct wm_softc *sc)
9355 {
9356 uint32_t ext_ctrl;
9357 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
9358 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
9359 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
9360 }
9361
9362 static int
9363 wm_get_hw_semaphore_82573(struct wm_softc *sc)
9364 {
9365 int i = 0;
9366 uint32_t reg;
9367
9368 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
9369 do {
9370 CSR_WRITE(sc, WMREG_EXTCNFCTR,
9371 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
9372 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
9373 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
9374 break;
9375 delay(2*1000);
9376 i++;
9377 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
9378
9379 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
9380 wm_put_hw_semaphore_82573(sc);
9381 log(LOG_ERR, "%s: Driver can't access the PHY\n",
9382 device_xname(sc->sc_dev));
9383 return -1;
9384 }
9385
9386 return 0;
9387 }
9388
9389 static void
9390 wm_put_hw_semaphore_82573(struct wm_softc *sc)
9391 {
9392 uint32_t reg;
9393
9394 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
9395 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
9396 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
9397 }
9398
9399 /*
9400 * Management mode and power management related subroutines.
9401 * BMC, AMT, suspend/resume and EEE.
9402 */
9403
9404 static int
9405 wm_check_mng_mode(struct wm_softc *sc)
9406 {
9407 int rv;
9408
9409 switch (sc->sc_type) {
9410 case WM_T_ICH8:
9411 case WM_T_ICH9:
9412 case WM_T_ICH10:
9413 case WM_T_PCH:
9414 case WM_T_PCH2:
9415 case WM_T_PCH_LPT:
9416 rv = wm_check_mng_mode_ich8lan(sc);
9417 break;
9418 case WM_T_82574:
9419 case WM_T_82583:
9420 rv = wm_check_mng_mode_82574(sc);
9421 break;
9422 case WM_T_82571:
9423 case WM_T_82572:
9424 case WM_T_82573:
9425 case WM_T_80003:
9426 rv = wm_check_mng_mode_generic(sc);
9427 break;
9428 default:
9429 /* noting to do */
9430 rv = 0;
9431 break;
9432 }
9433
9434 return rv;
9435 }
9436
9437 static int
9438 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
9439 {
9440 uint32_t fwsm;
9441
9442 fwsm = CSR_READ(sc, WMREG_FWSM);
9443
9444 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
9445 return 1;
9446
9447 return 0;
9448 }
9449
9450 static int
9451 wm_check_mng_mode_82574(struct wm_softc *sc)
9452 {
9453 uint16_t data;
9454
9455 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
9456
9457 if ((data & NVM_CFG2_MNGM_MASK) != 0)
9458 return 1;
9459
9460 return 0;
9461 }
9462
9463 static int
9464 wm_check_mng_mode_generic(struct wm_softc *sc)
9465 {
9466 uint32_t fwsm;
9467
9468 fwsm = CSR_READ(sc, WMREG_FWSM);
9469
9470 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
9471 return 1;
9472
9473 return 0;
9474 }
9475
9476 static int
9477 wm_enable_mng_pass_thru(struct wm_softc *sc)
9478 {
9479 uint32_t manc, fwsm, factps;
9480
9481 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
9482 return 0;
9483
9484 manc = CSR_READ(sc, WMREG_MANC);
9485
9486 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
9487 device_xname(sc->sc_dev), manc));
9488 if ((manc & MANC_RECV_TCO_EN) == 0)
9489 return 0;
9490
9491 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
9492 fwsm = CSR_READ(sc, WMREG_FWSM);
9493 factps = CSR_READ(sc, WMREG_FACTPS);
9494 if (((factps & FACTPS_MNGCG) == 0)
9495 && ((fwsm & FWSM_MODE_MASK)
9496 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
9497 return 1;
9498 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
9499 uint16_t data;
9500
9501 factps = CSR_READ(sc, WMREG_FACTPS);
9502 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
9503 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
9504 device_xname(sc->sc_dev), factps, data));
9505 if (((factps & FACTPS_MNGCG) == 0)
9506 && ((data & NVM_CFG2_MNGM_MASK)
9507 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
9508 return 1;
9509 } else if (((manc & MANC_SMBUS_EN) != 0)
9510 && ((manc & MANC_ASF_EN) == 0))
9511 return 1;
9512
9513 return 0;
9514 }
9515
9516 static int
9517 wm_check_reset_block(struct wm_softc *sc)
9518 {
9519 uint32_t reg;
9520
9521 switch (sc->sc_type) {
9522 case WM_T_ICH8:
9523 case WM_T_ICH9:
9524 case WM_T_ICH10:
9525 case WM_T_PCH:
9526 case WM_T_PCH2:
9527 case WM_T_PCH_LPT:
9528 reg = CSR_READ(sc, WMREG_FWSM);
9529 if ((reg & FWSM_RSPCIPHY) != 0)
9530 return 0;
9531 else
9532 return -1;
9533 break;
9534 case WM_T_82571:
9535 case WM_T_82572:
9536 case WM_T_82573:
9537 case WM_T_82574:
9538 case WM_T_82583:
9539 case WM_T_80003:
9540 reg = CSR_READ(sc, WMREG_MANC);
9541 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
9542 return -1;
9543 else
9544 return 0;
9545 break;
9546 default:
9547 /* no problem */
9548 break;
9549 }
9550
9551 return 0;
9552 }
9553
9554 static void
9555 wm_get_hw_control(struct wm_softc *sc)
9556 {
9557 uint32_t reg;
9558
9559 switch (sc->sc_type) {
9560 case WM_T_82573:
9561 reg = CSR_READ(sc, WMREG_SWSM);
9562 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
9563 break;
9564 case WM_T_82571:
9565 case WM_T_82572:
9566 case WM_T_82574:
9567 case WM_T_82583:
9568 case WM_T_80003:
9569 case WM_T_ICH8:
9570 case WM_T_ICH9:
9571 case WM_T_ICH10:
9572 case WM_T_PCH:
9573 case WM_T_PCH2:
9574 case WM_T_PCH_LPT:
9575 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9576 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
9577 break;
9578 default:
9579 break;
9580 }
9581 }
9582
9583 static void
9584 wm_release_hw_control(struct wm_softc *sc)
9585 {
9586 uint32_t reg;
9587
9588 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
9589 return;
9590
9591 if (sc->sc_type == WM_T_82573) {
9592 reg = CSR_READ(sc, WMREG_SWSM);
9593 reg &= ~SWSM_DRV_LOAD;
9594 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
9595 } else {
9596 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9597 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
9598 }
9599 }
9600
9601 static void
9602 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
9603 {
9604 uint32_t reg;
9605
9606 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
9607
9608 if (on != 0)
9609 reg |= EXTCNFCTR_GATE_PHY_CFG;
9610 else
9611 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
9612
9613 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
9614 }
9615
9616 static void
9617 wm_smbustopci(struct wm_softc *sc)
9618 {
9619 uint32_t fwsm;
9620
9621 fwsm = CSR_READ(sc, WMREG_FWSM);
9622 if (((fwsm & FWSM_FW_VALID) == 0)
9623 && ((wm_check_reset_block(sc) == 0))) {
9624 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
9625 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
9626 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9627 CSR_WRITE_FLUSH(sc);
9628 delay(10);
9629 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
9630 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9631 CSR_WRITE_FLUSH(sc);
9632 delay(50*1000);
9633
9634 /*
9635 * Gate automatic PHY configuration by hardware on non-managed
9636 * 82579
9637 */
9638 if (sc->sc_type == WM_T_PCH2)
9639 wm_gate_hw_phy_config_ich8lan(sc, 1);
9640 }
9641 }
9642
9643 static void
9644 wm_init_manageability(struct wm_softc *sc)
9645 {
9646
9647 if (sc->sc_flags & WM_F_HAS_MANAGE) {
9648 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
9649 uint32_t manc = CSR_READ(sc, WMREG_MANC);
9650
9651 /* Disable hardware interception of ARP */
9652 manc &= ~MANC_ARP_EN;
9653
9654 /* Enable receiving management packets to the host */
9655 if (sc->sc_type >= WM_T_82571) {
9656 manc |= MANC_EN_MNG2HOST;
9657 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
9658 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
9659
9660 }
9661
9662 CSR_WRITE(sc, WMREG_MANC, manc);
9663 }
9664 }
9665
9666 static void
9667 wm_release_manageability(struct wm_softc *sc)
9668 {
9669
9670 if (sc->sc_flags & WM_F_HAS_MANAGE) {
9671 uint32_t manc = CSR_READ(sc, WMREG_MANC);
9672
9673 manc |= MANC_ARP_EN;
9674 if (sc->sc_type >= WM_T_82571)
9675 manc &= ~MANC_EN_MNG2HOST;
9676
9677 CSR_WRITE(sc, WMREG_MANC, manc);
9678 }
9679 }
9680
9681 static void
9682 wm_get_wakeup(struct wm_softc *sc)
9683 {
9684
9685 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
9686 switch (sc->sc_type) {
9687 case WM_T_82573:
9688 case WM_T_82583:
9689 sc->sc_flags |= WM_F_HAS_AMT;
9690 /* FALLTHROUGH */
9691 case WM_T_80003:
9692 case WM_T_82541:
9693 case WM_T_82547:
9694 case WM_T_82571:
9695 case WM_T_82572:
9696 case WM_T_82574:
9697 case WM_T_82575:
9698 case WM_T_82576:
9699 case WM_T_82580:
9700 case WM_T_I350:
9701 case WM_T_I354:
9702 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
9703 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
9704 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
9705 break;
9706 case WM_T_ICH8:
9707 case WM_T_ICH9:
9708 case WM_T_ICH10:
9709 case WM_T_PCH:
9710 case WM_T_PCH2:
9711 case WM_T_PCH_LPT:
9712 sc->sc_flags |= WM_F_HAS_AMT;
9713 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
9714 break;
9715 default:
9716 break;
9717 }
9718
9719 /* 1: HAS_MANAGE */
9720 if (wm_enable_mng_pass_thru(sc) != 0)
9721 sc->sc_flags |= WM_F_HAS_MANAGE;
9722
9723 #ifdef WM_DEBUG
9724 printf("\n");
9725 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
9726 printf("HAS_AMT,");
9727 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
9728 printf("ARC_SUBSYS_VALID,");
9729 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
9730 printf("ASF_FIRMWARE_PRES,");
9731 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
9732 printf("HAS_MANAGE,");
9733 printf("\n");
9734 #endif
9735 /*
9736 * Note that the WOL flags is set after the resetting of the eeprom
9737 * stuff
9738 */
9739 }
9740
9741 #ifdef WM_WOL
9742 /* WOL in the newer chipset interfaces (pchlan) */
9743 static void
9744 wm_enable_phy_wakeup(struct wm_softc *sc)
9745 {
9746 #if 0
9747 uint16_t preg;
9748
9749 /* Copy MAC RARs to PHY RARs */
9750
9751 /* Copy MAC MTA to PHY MTA */
9752
9753 /* Configure PHY Rx Control register */
9754
9755 /* Enable PHY wakeup in MAC register */
9756
9757 /* Configure and enable PHY wakeup in PHY registers */
9758
9759 /* Activate PHY wakeup */
9760
9761 /* XXX */
9762 #endif
9763 }
9764
9765 /* Power down workaround on D3 */
9766 static void
9767 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
9768 {
9769 uint32_t reg;
9770 int i;
9771
9772 for (i = 0; i < 2; i++) {
9773 /* Disable link */
9774 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9775 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9776 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9777
9778 /*
9779 * Call gig speed drop workaround on Gig disable before
9780 * accessing any PHY registers
9781 */
9782 if (sc->sc_type == WM_T_ICH8)
9783 wm_gig_downshift_workaround_ich8lan(sc);
9784
9785 /* Write VR power-down enable */
9786 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
9787 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
9788 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
9789 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
9790
9791 /* Read it back and test */
9792 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
9793 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
9794 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
9795 break;
9796
9797 /* Issue PHY reset and repeat at most one more time */
9798 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
9799 }
9800 }
9801
9802 static void
9803 wm_enable_wakeup(struct wm_softc *sc)
9804 {
9805 uint32_t reg, pmreg;
9806 pcireg_t pmode;
9807
9808 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
9809 &pmreg, NULL) == 0)
9810 return;
9811
9812 /* Advertise the wakeup capability */
9813 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
9814 | CTRL_SWDPIN(3));
9815 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
9816
9817 /* ICH workaround */
9818 switch (sc->sc_type) {
9819 case WM_T_ICH8:
9820 case WM_T_ICH9:
9821 case WM_T_ICH10:
9822 case WM_T_PCH:
9823 case WM_T_PCH2:
9824 case WM_T_PCH_LPT:
9825 /* Disable gig during WOL */
9826 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9827 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
9828 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9829 if (sc->sc_type == WM_T_PCH)
9830 wm_gmii_reset(sc);
9831
9832 /* Power down workaround */
9833 if (sc->sc_phytype == WMPHY_82577) {
9834 struct mii_softc *child;
9835
9836 /* Assume that the PHY is copper */
9837 child = LIST_FIRST(&sc->sc_mii.mii_phys);
9838 if (child->mii_mpd_rev <= 2)
9839 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
9840 (768 << 5) | 25, 0x0444); /* magic num */
9841 }
9842 break;
9843 default:
9844 break;
9845 }
9846
9847 /* Keep the laser running on fiber adapters */
9848 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
9849 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
9850 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9851 reg |= CTRL_EXT_SWDPIN(3);
9852 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9853 }
9854
9855 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
9856 #if 0 /* for the multicast packet */
9857 reg |= WUFC_MC;
9858 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
9859 #endif
9860
9861 if (sc->sc_type == WM_T_PCH) {
9862 wm_enable_phy_wakeup(sc);
9863 } else {
9864 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
9865 CSR_WRITE(sc, WMREG_WUFC, reg);
9866 }
9867
9868 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9869 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9870 || (sc->sc_type == WM_T_PCH2))
9871 && (sc->sc_phytype == WMPHY_IGP_3))
9872 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
9873
9874 /* Request PME */
9875 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
9876 #if 0
9877 /* Disable WOL */
9878 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
9879 #else
9880 /* For WOL */
9881 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
9882 #endif
9883 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
9884 }
9885 #endif /* WM_WOL */
9886
9887 /* EEE */
9888
9889 static void
9890 wm_set_eee_i350(struct wm_softc *sc)
9891 {
9892 uint32_t ipcnfg, eeer;
9893
9894 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
9895 eeer = CSR_READ(sc, WMREG_EEER);
9896
9897 if ((sc->sc_flags & WM_F_EEE) != 0) {
9898 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9899 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
9900 | EEER_LPI_FC);
9901 } else {
9902 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9903 ipcnfg &= ~IPCNFG_10BASE_TE;
9904 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
9905 | EEER_LPI_FC);
9906 }
9907
9908 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
9909 CSR_WRITE(sc, WMREG_EEER, eeer);
9910 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
9911 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
9912 }
9913
9914 /*
9915 * Workarounds (mainly PHY related).
9916 * Basically, PHY's workarounds are in the PHY drivers.
9917 */
9918
9919 /* Work-around for 82566 Kumeran PCS lock loss */
9920 static void
9921 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
9922 {
9923 int miistatus, active, i;
9924 int reg;
9925
9926 miistatus = sc->sc_mii.mii_media_status;
9927
9928 /* If the link is not up, do nothing */
9929 if ((miistatus & IFM_ACTIVE) != 0)
9930 return;
9931
9932 active = sc->sc_mii.mii_media_active;
9933
9934 /* Nothing to do if the link is other than 1Gbps */
9935 if (IFM_SUBTYPE(active) != IFM_1000_T)
9936 return;
9937
9938 for (i = 0; i < 10; i++) {
9939 /* read twice */
9940 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9941 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9942 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
9943 goto out; /* GOOD! */
9944
9945 /* Reset the PHY */
9946 wm_gmii_reset(sc);
9947 delay(5*1000);
9948 }
9949
9950 /* Disable GigE link negotiation */
9951 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9952 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9953 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9954
9955 /*
9956 * Call gig speed drop workaround on Gig disable before accessing
9957 * any PHY registers.
9958 */
9959 wm_gig_downshift_workaround_ich8lan(sc);
9960
9961 out:
9962 return;
9963 }
9964
9965 /* WOL from S5 stops working */
9966 static void
9967 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
9968 {
9969 uint16_t kmrn_reg;
9970
9971 /* Only for igp3 */
9972 if (sc->sc_phytype == WMPHY_IGP_3) {
9973 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
9974 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
9975 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9976 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
9977 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9978 }
9979 }
9980
9981 /*
9982 * Workaround for pch's PHYs
9983 * XXX should be moved to new PHY driver?
9984 */
9985 static void
9986 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
9987 {
9988 if (sc->sc_phytype == WMPHY_82577)
9989 wm_set_mdio_slow_mode_hv(sc);
9990
9991 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
9992
9993 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
9994
9995 /* 82578 */
9996 if (sc->sc_phytype == WMPHY_82578) {
9997 /* PCH rev. < 3 */
9998 if (sc->sc_rev < 3) {
9999 /* XXX 6 bit shift? Why? Is it page2? */
10000 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
10001 0x66c0);
10002 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
10003 0xffff);
10004 }
10005
10006 /* XXX phy rev. < 2 */
10007 }
10008
10009 /* Select page 0 */
10010
10011 /* XXX acquire semaphore */
10012 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
10013 /* XXX release semaphore */
10014
10015 /*
10016 * Configure the K1 Si workaround during phy reset assuming there is
10017 * link so that it disables K1 if link is in 1Gbps.
10018 */
10019 wm_k1_gig_workaround_hv(sc, 1);
10020 }
10021
10022 static void
10023 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
10024 {
10025
10026 wm_set_mdio_slow_mode_hv(sc);
10027 }
10028
10029 static void
10030 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
10031 {
10032 int k1_enable = sc->sc_nvm_k1_enabled;
10033
10034 /* XXX acquire semaphore */
10035
10036 if (link) {
10037 k1_enable = 0;
10038
10039 /* Link stall fix for link up */
10040 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
10041 } else {
10042 /* Link stall fix for link down */
10043 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
10044 }
10045
10046 wm_configure_k1_ich8lan(sc, k1_enable);
10047
10048 /* XXX release semaphore */
10049 }
10050
10051 static void
10052 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
10053 {
10054 uint32_t reg;
10055
10056 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
10057 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
10058 reg | HV_KMRN_MDIO_SLOW);
10059 }
10060
10061 static void
10062 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
10063 {
10064 uint32_t ctrl, ctrl_ext, tmp;
10065 uint16_t kmrn_reg;
10066
10067 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
10068
10069 if (k1_enable)
10070 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
10071 else
10072 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
10073
10074 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
10075
10076 delay(20);
10077
10078 ctrl = CSR_READ(sc, WMREG_CTRL);
10079 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
10080
10081 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
10082 tmp |= CTRL_FRCSPD;
10083
10084 CSR_WRITE(sc, WMREG_CTRL, tmp);
10085 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
10086 CSR_WRITE_FLUSH(sc);
10087 delay(20);
10088
10089 CSR_WRITE(sc, WMREG_CTRL, ctrl);
10090 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
10091 CSR_WRITE_FLUSH(sc);
10092 delay(20);
10093 }
10094
10095 /* special case - for 82575 - need to do manual init ... */
10096 static void
10097 wm_reset_init_script_82575(struct wm_softc *sc)
10098 {
10099 /*
10100 * remark: this is untested code - we have no board without EEPROM
10101 * same setup as mentioned int the FreeBSD driver for the i82575
10102 */
10103
10104 /* SerDes configuration via SERDESCTRL */
10105 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
10106 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
10107 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
10108 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
10109
10110 /* CCM configuration via CCMCTL register */
10111 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
10112 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
10113
10114 /* PCIe lanes configuration */
10115 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
10116 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
10117 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
10118 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
10119
10120 /* PCIe PLL Configuration */
10121 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
10122 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
10123 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
10124 }
10125
10126 static void
10127 wm_reset_mdicnfg_82580(struct wm_softc *sc)
10128 {
10129 uint32_t reg;
10130 uint16_t nvmword;
10131 int rv;
10132
10133 if ((sc->sc_flags & WM_F_SGMII) == 0)
10134 return;
10135
10136 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
10137 + NVM_OFF_CFG3_PORTA, 1, &nvmword);
10138 if (rv != 0) {
10139 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
10140 __func__);
10141 return;
10142 }
10143
10144 reg = CSR_READ(sc, WMREG_MDICNFG);
10145 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
10146 reg |= MDICNFG_DEST;
10147 if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
10148 reg |= MDICNFG_COM_MDIO;
10149 CSR_WRITE(sc, WMREG_MDICNFG, reg);
10150 }
10151