if_wm.c revision 1.318 1 /* $NetBSD: if_wm.c,v 1.318 2015/05/04 06:51:08 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - EEE (Energy Efficiency Ethernet)
77 * - MSI/MSI-X
78 * - Virtual Function
79 * - Set LED correctly (based on contents in EEPROM)
80 * - Rework how parameters are loaded from the EEPROM.
81 */
82
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.318 2015/05/04 06:51:08 msaitoh Exp $");
85
86 #ifdef _KERNEL_OPT
87 #include "opt_net_mpsafe.h"
88 #endif
89
90 #include <sys/param.h>
91 #include <sys/systm.h>
92 #include <sys/callout.h>
93 #include <sys/mbuf.h>
94 #include <sys/malloc.h>
95 #include <sys/kernel.h>
96 #include <sys/socket.h>
97 #include <sys/ioctl.h>
98 #include <sys/errno.h>
99 #include <sys/device.h>
100 #include <sys/queue.h>
101 #include <sys/syslog.h>
102
103 #include <sys/rndsource.h>
104
105 #include <net/if.h>
106 #include <net/if_dl.h>
107 #include <net/if_media.h>
108 #include <net/if_ether.h>
109
110 #include <net/bpf.h>
111
112 #include <netinet/in.h> /* XXX for struct ip */
113 #include <netinet/in_systm.h> /* XXX for struct ip */
114 #include <netinet/ip.h> /* XXX for struct ip */
115 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
116 #include <netinet/tcp.h> /* XXX for struct tcphdr */
117
118 #include <sys/bus.h>
119 #include <sys/intr.h>
120 #include <machine/endian.h>
121
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
124 #include <dev/mii/miidevs.h>
125 #include <dev/mii/mii_bitbang.h>
126 #include <dev/mii/ikphyreg.h>
127 #include <dev/mii/igphyreg.h>
128 #include <dev/mii/igphyvar.h>
129 #include <dev/mii/inbmphyreg.h>
130
131 #include <dev/pci/pcireg.h>
132 #include <dev/pci/pcivar.h>
133 #include <dev/pci/pcidevs.h>
134
135 #include <dev/pci/if_wmreg.h>
136 #include <dev/pci/if_wmvar.h>
137
138 #ifdef WM_DEBUG
139 #define WM_DEBUG_LINK 0x01
140 #define WM_DEBUG_TX 0x02
141 #define WM_DEBUG_RX 0x04
142 #define WM_DEBUG_GMII 0x08
143 #define WM_DEBUG_MANAGE 0x10
144 #define WM_DEBUG_NVM 0x20
145 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
146 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
147
148 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
149 #else
150 #define DPRINTF(x, y) /* nothing */
151 #endif /* WM_DEBUG */
152
153 #ifdef NET_MPSAFE
154 #define WM_MPSAFE 1
155 #endif
156
157 /*
158 * Transmit descriptor list size. Due to errata, we can only have
159 * 256 hardware descriptors in the ring on < 82544, but we use 4096
160 * on >= 82544. We tell the upper layers that they can queue a lot
161 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
162 * of them at a time.
163 *
164 * We allow up to 256 (!) DMA segments per packet. Pathological packet
165 * chains containing many small mbufs have been observed in zero-copy
166 * situations with jumbo frames.
167 */
168 #define WM_NTXSEGS 256
169 #define WM_IFQUEUELEN 256
170 #define WM_TXQUEUELEN_MAX 64
171 #define WM_TXQUEUELEN_MAX_82547 16
172 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
173 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
174 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
175 #define WM_NTXDESC_82542 256
176 #define WM_NTXDESC_82544 4096
177 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
178 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
179 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
180 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
181 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
182
183 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
184
185 /*
186 * Receive descriptor list size. We have one Rx buffer for normal
187 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
188 * packet. We allocate 256 receive descriptors, each with a 2k
189 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
190 */
191 #define WM_NRXDESC 256
192 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
193 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
194 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
195
196 /*
197 * Control structures are DMA'd to the i82542 chip. We allocate them in
198 * a single clump that maps to a single DMA segment to make several things
199 * easier.
200 */
201 struct wm_control_data_82544 {
202 /*
203 * The receive descriptors.
204 */
205 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
206
207 /*
208 * The transmit descriptors. Put these at the end, because
209 * we might use a smaller number of them.
210 */
211 union {
212 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
213 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
214 } wdc_u;
215 };
216
217 struct wm_control_data_82542 {
218 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
219 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
220 };
221
222 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
223 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
224 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
225
226 /*
227 * Software state for transmit jobs.
228 */
229 struct wm_txsoft {
230 struct mbuf *txs_mbuf; /* head of our mbuf chain */
231 bus_dmamap_t txs_dmamap; /* our DMA map */
232 int txs_firstdesc; /* first descriptor in packet */
233 int txs_lastdesc; /* last descriptor in packet */
234 int txs_ndesc; /* # of descriptors used */
235 };
236
237 /*
238 * Software state for receive buffers. Each descriptor gets a
239 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
240 * more than one buffer, we chain them together.
241 */
242 struct wm_rxsoft {
243 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
244 bus_dmamap_t rxs_dmamap; /* our DMA map */
245 };
246
247 #define WM_LINKUP_TIMEOUT 50
248
249 static uint16_t swfwphysem[] = {
250 SWFW_PHY0_SM,
251 SWFW_PHY1_SM,
252 SWFW_PHY2_SM,
253 SWFW_PHY3_SM
254 };
255
256 /*
257 * Software state per device.
258 */
259 struct wm_softc {
260 device_t sc_dev; /* generic device information */
261 bus_space_tag_t sc_st; /* bus space tag */
262 bus_space_handle_t sc_sh; /* bus space handle */
263 bus_size_t sc_ss; /* bus space size */
264 bus_space_tag_t sc_iot; /* I/O space tag */
265 bus_space_handle_t sc_ioh; /* I/O space handle */
266 bus_size_t sc_ios; /* I/O space size */
267 bus_space_tag_t sc_flasht; /* flash registers space tag */
268 bus_space_handle_t sc_flashh; /* flash registers space handle */
269 bus_dma_tag_t sc_dmat; /* bus DMA tag */
270
271 struct ethercom sc_ethercom; /* ethernet common data */
272 struct mii_data sc_mii; /* MII/media information */
273
274 pci_chipset_tag_t sc_pc;
275 pcitag_t sc_pcitag;
276 int sc_bus_speed; /* PCI/PCIX bus speed */
277 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
278
279 uint16_t sc_pcidevid; /* PCI device ID */
280 wm_chip_type sc_type; /* MAC type */
281 int sc_rev; /* MAC revision */
282 wm_phy_type sc_phytype; /* PHY type */
283 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
284 #define WM_MEDIATYPE_UNKNOWN 0x00
285 #define WM_MEDIATYPE_FIBER 0x01
286 #define WM_MEDIATYPE_COPPER 0x02
287 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
288 int sc_funcid; /* unit number of the chip (0 to 3) */
289 int sc_flags; /* flags; see below */
290 int sc_if_flags; /* last if_flags */
291 int sc_flowflags; /* 802.3x flow control flags */
292 int sc_align_tweak;
293
294 void *sc_ih; /* interrupt cookie */
295 callout_t sc_tick_ch; /* tick callout */
296 bool sc_stopping;
297
298 int sc_nvm_addrbits; /* NVM address bits */
299 unsigned int sc_nvm_wordsize; /* NVM word size */
300 int sc_ich8_flash_base;
301 int sc_ich8_flash_bank_size;
302 int sc_nvm_k1_enabled;
303
304 /* Software state for the transmit and receive descriptors. */
305 int sc_txnum; /* must be a power of two */
306 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
307 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
308
309 /* Control data structures. */
310 int sc_ntxdesc; /* must be a power of two */
311 struct wm_control_data_82544 *sc_control_data;
312 bus_dmamap_t sc_cddmamap; /* control data DMA map */
313 bus_dma_segment_t sc_cd_seg; /* control data segment */
314 int sc_cd_rseg; /* real number of control segment */
315 size_t sc_cd_size; /* control data size */
316 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
317 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
318 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
319 #define sc_rxdescs sc_control_data->wcd_rxdescs
320
321 #ifdef WM_EVENT_COUNTERS
322 /* Event counters. */
323 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
324 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
325 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
326 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
327 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
328 struct evcnt sc_ev_rxintr; /* Rx interrupts */
329 struct evcnt sc_ev_linkintr; /* Link interrupts */
330
331 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
332 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
333 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
334 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
335 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
336 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
337 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
338 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
339
340 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
341 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
342
343 struct evcnt sc_ev_tu; /* Tx underrun */
344
345 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
346 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
347 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
348 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
349 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
350 #endif /* WM_EVENT_COUNTERS */
351
352 bus_addr_t sc_tdt_reg; /* offset of TDT register */
353
354 int sc_txfree; /* number of free Tx descriptors */
355 int sc_txnext; /* next ready Tx descriptor */
356
357 int sc_txsfree; /* number of free Tx jobs */
358 int sc_txsnext; /* next free Tx job */
359 int sc_txsdirty; /* dirty Tx jobs */
360
361 /* These 5 variables are used only on the 82547. */
362 int sc_txfifo_size; /* Tx FIFO size */
363 int sc_txfifo_head; /* current head of FIFO */
364 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
365 int sc_txfifo_stall; /* Tx FIFO is stalled */
366 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
367
368 bus_addr_t sc_rdt_reg; /* offset of RDT register */
369
370 int sc_rxptr; /* next ready Rx descriptor/queue ent */
371 int sc_rxdiscard;
372 int sc_rxlen;
373 struct mbuf *sc_rxhead;
374 struct mbuf *sc_rxtail;
375 struct mbuf **sc_rxtailp;
376
377 uint32_t sc_ctrl; /* prototype CTRL register */
378 #if 0
379 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
380 #endif
381 uint32_t sc_icr; /* prototype interrupt bits */
382 uint32_t sc_itr; /* prototype intr throttling reg */
383 uint32_t sc_tctl; /* prototype TCTL register */
384 uint32_t sc_rctl; /* prototype RCTL register */
385 uint32_t sc_txcw; /* prototype TXCW register */
386 uint32_t sc_tipg; /* prototype TIPG register */
387 uint32_t sc_fcrtl; /* prototype FCRTL register */
388 uint32_t sc_pba; /* prototype PBA register */
389
390 int sc_tbi_linkup; /* TBI link status */
391 int sc_tbi_anegticks; /* autonegotiation ticks */
392 int sc_tbi_ticks; /* tbi ticks */
393
394 int sc_mchash_type; /* multicast filter offset */
395
396 krndsource_t rnd_source; /* random source */
397
398 kmutex_t *sc_tx_lock; /* lock for tx operations */
399 kmutex_t *sc_rx_lock; /* lock for rx operations */
400 };
401
402 #define WM_TX_LOCK(_sc) if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
403 #define WM_TX_UNLOCK(_sc) if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
404 #define WM_TX_LOCKED(_sc) (!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
405 #define WM_RX_LOCK(_sc) if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
406 #define WM_RX_UNLOCK(_sc) if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
407 #define WM_RX_LOCKED(_sc) (!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
408 #define WM_BOTH_LOCK(_sc) do {WM_TX_LOCK(_sc); WM_RX_LOCK(_sc);} while (0)
409 #define WM_BOTH_UNLOCK(_sc) do {WM_RX_UNLOCK(_sc); WM_TX_UNLOCK(_sc);} while (0)
410 #define WM_BOTH_LOCKED(_sc) (WM_TX_LOCKED(_sc) && WM_RX_LOCKED(_sc))
411
412 #ifdef WM_MPSAFE
413 #define CALLOUT_FLAGS CALLOUT_MPSAFE
414 #else
415 #define CALLOUT_FLAGS 0
416 #endif
417
418 #define WM_RXCHAIN_RESET(sc) \
419 do { \
420 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
421 *(sc)->sc_rxtailp = NULL; \
422 (sc)->sc_rxlen = 0; \
423 } while (/*CONSTCOND*/0)
424
425 #define WM_RXCHAIN_LINK(sc, m) \
426 do { \
427 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
428 (sc)->sc_rxtailp = &(m)->m_next; \
429 } while (/*CONSTCOND*/0)
430
431 #ifdef WM_EVENT_COUNTERS
432 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
433 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
434 #else
435 #define WM_EVCNT_INCR(ev) /* nothing */
436 #define WM_EVCNT_ADD(ev, val) /* nothing */
437 #endif
438
439 #define CSR_READ(sc, reg) \
440 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
441 #define CSR_WRITE(sc, reg, val) \
442 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
443 #define CSR_WRITE_FLUSH(sc) \
444 (void) CSR_READ((sc), WMREG_STATUS)
445
446 #define ICH8_FLASH_READ32(sc, reg) \
447 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
448 #define ICH8_FLASH_WRITE32(sc, reg, data) \
449 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
450
451 #define ICH8_FLASH_READ16(sc, reg) \
452 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
453 #define ICH8_FLASH_WRITE16(sc, reg, data) \
454 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
455
456 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
457 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
458
459 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
460 #define WM_CDTXADDR_HI(sc, x) \
461 (sizeof(bus_addr_t) == 8 ? \
462 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
463
464 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
465 #define WM_CDRXADDR_HI(sc, x) \
466 (sizeof(bus_addr_t) == 8 ? \
467 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
468
469 #define WM_CDTXSYNC(sc, x, n, ops) \
470 do { \
471 int __x, __n; \
472 \
473 __x = (x); \
474 __n = (n); \
475 \
476 /* If it will wrap around, sync to the end of the ring. */ \
477 if ((__x + __n) > WM_NTXDESC(sc)) { \
478 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
479 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
480 (WM_NTXDESC(sc) - __x), (ops)); \
481 __n -= (WM_NTXDESC(sc) - __x); \
482 __x = 0; \
483 } \
484 \
485 /* Now sync whatever is left. */ \
486 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
487 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
488 } while (/*CONSTCOND*/0)
489
490 #define WM_CDRXSYNC(sc, x, ops) \
491 do { \
492 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
493 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
494 } while (/*CONSTCOND*/0)
495
496 #define WM_INIT_RXDESC(sc, x) \
497 do { \
498 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
499 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
500 struct mbuf *__m = __rxs->rxs_mbuf; \
501 \
502 /* \
503 * Note: We scoot the packet forward 2 bytes in the buffer \
504 * so that the payload after the Ethernet header is aligned \
505 * to a 4-byte boundary. \
506 * \
507 * XXX BRAINDAMAGE ALERT! \
508 * The stupid chip uses the same size for every buffer, which \
509 * is set in the Receive Control register. We are using the 2K \
510 * size option, but what we REALLY want is (2K - 2)! For this \
511 * reason, we can't "scoot" packets longer than the standard \
512 * Ethernet MTU. On strict-alignment platforms, if the total \
513 * size exceeds (2K - 2) we set align_tweak to 0 and let \
514 * the upper layer copy the headers. \
515 */ \
516 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
517 \
518 wm_set_dma_addr(&__rxd->wrx_addr, \
519 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
520 __rxd->wrx_len = 0; \
521 __rxd->wrx_cksum = 0; \
522 __rxd->wrx_status = 0; \
523 __rxd->wrx_errors = 0; \
524 __rxd->wrx_special = 0; \
525 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
526 \
527 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
528 } while (/*CONSTCOND*/0)
529
530 /*
531 * Register read/write functions.
532 * Other than CSR_{READ|WRITE}().
533 */
534 #if 0
535 static inline uint32_t wm_io_read(struct wm_softc *, int);
536 #endif
537 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
538 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
539 uint32_t, uint32_t);
540 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
541
542 /*
543 * Device driver interface functions and commonly used functions.
544 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
545 */
546 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
547 static int wm_match(device_t, cfdata_t, void *);
548 static void wm_attach(device_t, device_t, void *);
549 static int wm_detach(device_t, int);
550 static bool wm_suspend(device_t, const pmf_qual_t *);
551 static bool wm_resume(device_t, const pmf_qual_t *);
552 static void wm_watchdog(struct ifnet *);
553 static void wm_tick(void *);
554 static int wm_ifflags_cb(struct ethercom *);
555 static int wm_ioctl(struct ifnet *, u_long, void *);
556 /* MAC address related */
557 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
558 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
559 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
560 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
561 static void wm_set_filter(struct wm_softc *);
562 /* Reset and init related */
563 static void wm_set_vlan(struct wm_softc *);
564 static void wm_set_pcie_completion_timeout(struct wm_softc *);
565 static void wm_get_auto_rd_done(struct wm_softc *);
566 static void wm_lan_init_done(struct wm_softc *);
567 static void wm_get_cfg_done(struct wm_softc *);
568 static void wm_initialize_hardware_bits(struct wm_softc *);
569 static void wm_reset(struct wm_softc *);
570 static int wm_add_rxbuf(struct wm_softc *, int);
571 static void wm_rxdrain(struct wm_softc *);
572 static int wm_init(struct ifnet *);
573 static int wm_init_locked(struct ifnet *);
574 static void wm_stop(struct ifnet *, int);
575 static void wm_stop_locked(struct ifnet *, int);
576 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
577 uint32_t *, uint8_t *);
578 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
579 static void wm_82547_txfifo_stall(void *);
580 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
581 /* Start */
582 static void wm_start(struct ifnet *);
583 static void wm_start_locked(struct ifnet *);
584 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
585 uint32_t *, uint32_t *, bool *);
586 static void wm_nq_start(struct ifnet *);
587 static void wm_nq_start_locked(struct ifnet *);
588 /* Interrupt */
589 static void wm_txintr(struct wm_softc *);
590 static void wm_rxintr(struct wm_softc *);
591 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
592 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
593 static void wm_linkintr(struct wm_softc *, uint32_t);
594 static int wm_intr(void *);
595
596 /*
597 * Media related.
598 * GMII, SGMII, TBI, SERDES and SFP.
599 */
600 /* GMII related */
601 static void wm_gmii_reset(struct wm_softc *);
602 static int wm_get_phy_id_82575(struct wm_softc *);
603 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
604 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
605 static int wm_gmii_mediachange(struct ifnet *);
606 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
607 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
608 static int wm_gmii_i82543_readreg(device_t, int, int);
609 static void wm_gmii_i82543_writereg(device_t, int, int, int);
610 static int wm_gmii_i82544_readreg(device_t, int, int);
611 static void wm_gmii_i82544_writereg(device_t, int, int, int);
612 static int wm_gmii_i80003_readreg(device_t, int, int);
613 static void wm_gmii_i80003_writereg(device_t, int, int, int);
614 static int wm_gmii_bm_readreg(device_t, int, int);
615 static void wm_gmii_bm_writereg(device_t, int, int, int);
616 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
617 static int wm_gmii_hv_readreg(device_t, int, int);
618 static void wm_gmii_hv_writereg(device_t, int, int, int);
619 static int wm_gmii_82580_readreg(device_t, int, int);
620 static void wm_gmii_82580_writereg(device_t, int, int, int);
621 static void wm_gmii_statchg(struct ifnet *);
622 static int wm_kmrn_readreg(struct wm_softc *, int);
623 static void wm_kmrn_writereg(struct wm_softc *, int, int);
624 /* SGMII */
625 static bool wm_sgmii_uses_mdio(struct wm_softc *);
626 static int wm_sgmii_readreg(device_t, int, int);
627 static void wm_sgmii_writereg(device_t, int, int, int);
628 /* TBI related */
629 static int wm_check_for_link(struct wm_softc *);
630 static void wm_tbi_mediainit(struct wm_softc *);
631 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
632 static int wm_tbi_mediachange(struct ifnet *);
633 static void wm_tbi_set_linkled(struct wm_softc *);
634 static void wm_tbi_check_link(struct wm_softc *);
635 /* SFP related */
636 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
637 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
638
639 /*
640 * NVM related.
641 * Microwire, SPI (w/wo EERD) and Flash.
642 */
643 /* Misc functions */
644 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
645 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
646 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
647 /* Microwire */
648 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
649 /* SPI */
650 static int wm_nvm_ready_spi(struct wm_softc *);
651 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
652 /* Using with EERD */
653 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
654 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
655 /* Flash */
656 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
657 unsigned int *);
658 static int32_t wm_ich8_cycle_init(struct wm_softc *);
659 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
660 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
661 uint16_t *);
662 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
663 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
664 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
665 /* Lock, detecting NVM type, validate checksum and read */
666 static int wm_nvm_acquire(struct wm_softc *);
667 static void wm_nvm_release(struct wm_softc *);
668 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
669 static int wm_nvm_validate_checksum(struct wm_softc *);
670 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
671
672 /*
673 * Hardware semaphores.
674 * Very complexed...
675 */
676 static int wm_get_swsm_semaphore(struct wm_softc *);
677 static void wm_put_swsm_semaphore(struct wm_softc *);
678 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
679 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
680 static int wm_get_swfwhw_semaphore(struct wm_softc *);
681 static void wm_put_swfwhw_semaphore(struct wm_softc *);
682 static int wm_get_hw_semaphore_82573(struct wm_softc *);
683 static void wm_put_hw_semaphore_82573(struct wm_softc *);
684
685 /*
686 * Management mode and power management related subroutines.
687 * BMC, AMT, suspend/resume and EEE.
688 */
689 static int wm_check_mng_mode(struct wm_softc *);
690 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
691 static int wm_check_mng_mode_82574(struct wm_softc *);
692 static int wm_check_mng_mode_generic(struct wm_softc *);
693 static int wm_enable_mng_pass_thru(struct wm_softc *);
694 static int wm_check_reset_block(struct wm_softc *);
695 static void wm_get_hw_control(struct wm_softc *);
696 static void wm_release_hw_control(struct wm_softc *);
697 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
698 static void wm_smbustopci(struct wm_softc *);
699 static void wm_init_manageability(struct wm_softc *);
700 static void wm_release_manageability(struct wm_softc *);
701 static void wm_get_wakeup(struct wm_softc *);
702 #ifdef WM_WOL
703 static void wm_enable_phy_wakeup(struct wm_softc *);
704 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
705 static void wm_enable_wakeup(struct wm_softc *);
706 #endif
707 /* EEE */
708 static void wm_set_eee_i350(struct wm_softc *);
709
710 /*
711 * Workarounds (mainly PHY related).
712 * Basically, PHY's workarounds are in the PHY drivers.
713 */
714 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
715 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
716 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
717 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
718 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
719 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
720 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
721 static void wm_reset_init_script_82575(struct wm_softc *);
722
723 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
724 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
725
726 /*
727 * Devices supported by this driver.
728 */
729 static const struct wm_product {
730 pci_vendor_id_t wmp_vendor;
731 pci_product_id_t wmp_product;
732 const char *wmp_name;
733 wm_chip_type wmp_type;
734 uint32_t wmp_flags;
735 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
736 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
737 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
738 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
739 #define WMP_MEDIATYPE(x) ((x) & 0x03)
740 } wm_products[] = {
741 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
742 "Intel i82542 1000BASE-X Ethernet",
743 WM_T_82542_2_1, WMP_F_FIBER },
744
745 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
746 "Intel i82543GC 1000BASE-X Ethernet",
747 WM_T_82543, WMP_F_FIBER },
748
749 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
750 "Intel i82543GC 1000BASE-T Ethernet",
751 WM_T_82543, WMP_F_COPPER },
752
753 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
754 "Intel i82544EI 1000BASE-T Ethernet",
755 WM_T_82544, WMP_F_COPPER },
756
757 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
758 "Intel i82544EI 1000BASE-X Ethernet",
759 WM_T_82544, WMP_F_FIBER },
760
761 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
762 "Intel i82544GC 1000BASE-T Ethernet",
763 WM_T_82544, WMP_F_COPPER },
764
765 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
766 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
767 WM_T_82544, WMP_F_COPPER },
768
769 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
770 "Intel i82540EM 1000BASE-T Ethernet",
771 WM_T_82540, WMP_F_COPPER },
772
773 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
774 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
775 WM_T_82540, WMP_F_COPPER },
776
777 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
778 "Intel i82540EP 1000BASE-T Ethernet",
779 WM_T_82540, WMP_F_COPPER },
780
781 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
782 "Intel i82540EP 1000BASE-T Ethernet",
783 WM_T_82540, WMP_F_COPPER },
784
785 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
786 "Intel i82540EP 1000BASE-T Ethernet",
787 WM_T_82540, WMP_F_COPPER },
788
789 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
790 "Intel i82545EM 1000BASE-T Ethernet",
791 WM_T_82545, WMP_F_COPPER },
792
793 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
794 "Intel i82545GM 1000BASE-T Ethernet",
795 WM_T_82545_3, WMP_F_COPPER },
796
797 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
798 "Intel i82545GM 1000BASE-X Ethernet",
799 WM_T_82545_3, WMP_F_FIBER },
800
801 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
802 "Intel i82545GM Gigabit Ethernet (SERDES)",
803 WM_T_82545_3, WMP_F_SERDES },
804
805 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
806 "Intel i82546EB 1000BASE-T Ethernet",
807 WM_T_82546, WMP_F_COPPER },
808
809 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
810 "Intel i82546EB 1000BASE-T Ethernet",
811 WM_T_82546, WMP_F_COPPER },
812
813 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
814 "Intel i82545EM 1000BASE-X Ethernet",
815 WM_T_82545, WMP_F_FIBER },
816
817 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
818 "Intel i82546EB 1000BASE-X Ethernet",
819 WM_T_82546, WMP_F_FIBER },
820
821 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
822 "Intel i82546GB 1000BASE-T Ethernet",
823 WM_T_82546_3, WMP_F_COPPER },
824
825 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
826 "Intel i82546GB 1000BASE-X Ethernet",
827 WM_T_82546_3, WMP_F_FIBER },
828
829 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
830 "Intel i82546GB Gigabit Ethernet (SERDES)",
831 WM_T_82546_3, WMP_F_SERDES },
832
833 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
834 "i82546GB quad-port Gigabit Ethernet",
835 WM_T_82546_3, WMP_F_COPPER },
836
837 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
838 "i82546GB quad-port Gigabit Ethernet (KSP3)",
839 WM_T_82546_3, WMP_F_COPPER },
840
841 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
842 "Intel PRO/1000MT (82546GB)",
843 WM_T_82546_3, WMP_F_COPPER },
844
845 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
846 "Intel i82541EI 1000BASE-T Ethernet",
847 WM_T_82541, WMP_F_COPPER },
848
849 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
850 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
851 WM_T_82541, WMP_F_COPPER },
852
853 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
854 "Intel i82541EI Mobile 1000BASE-T Ethernet",
855 WM_T_82541, WMP_F_COPPER },
856
857 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
858 "Intel i82541ER 1000BASE-T Ethernet",
859 WM_T_82541_2, WMP_F_COPPER },
860
861 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
862 "Intel i82541GI 1000BASE-T Ethernet",
863 WM_T_82541_2, WMP_F_COPPER },
864
865 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
866 "Intel i82541GI Mobile 1000BASE-T Ethernet",
867 WM_T_82541_2, WMP_F_COPPER },
868
869 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
870 "Intel i82541PI 1000BASE-T Ethernet",
871 WM_T_82541_2, WMP_F_COPPER },
872
873 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
874 "Intel i82547EI 1000BASE-T Ethernet",
875 WM_T_82547, WMP_F_COPPER },
876
877 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
878 "Intel i82547EI Mobile 1000BASE-T Ethernet",
879 WM_T_82547, WMP_F_COPPER },
880
881 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
882 "Intel i82547GI 1000BASE-T Ethernet",
883 WM_T_82547_2, WMP_F_COPPER },
884
885 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
886 "Intel PRO/1000 PT (82571EB)",
887 WM_T_82571, WMP_F_COPPER },
888
889 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
890 "Intel PRO/1000 PF (82571EB)",
891 WM_T_82571, WMP_F_FIBER },
892
893 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
894 "Intel PRO/1000 PB (82571EB)",
895 WM_T_82571, WMP_F_SERDES },
896
897 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
898 "Intel PRO/1000 QT (82571EB)",
899 WM_T_82571, WMP_F_COPPER },
900
901 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
902 "Intel PRO/1000 PT Quad Port Server Adapter",
903 WM_T_82571, WMP_F_COPPER, },
904
905 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
906 "Intel Gigabit PT Quad Port Server ExpressModule",
907 WM_T_82571, WMP_F_COPPER, },
908
909 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
910 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
911 WM_T_82571, WMP_F_SERDES, },
912
913 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
914 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
915 WM_T_82571, WMP_F_SERDES, },
916
917 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
918 "Intel 82571EB Quad 1000baseX Ethernet",
919 WM_T_82571, WMP_F_FIBER, },
920
921 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
922 "Intel i82572EI 1000baseT Ethernet",
923 WM_T_82572, WMP_F_COPPER },
924
925 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
926 "Intel i82572EI 1000baseX Ethernet",
927 WM_T_82572, WMP_F_FIBER },
928
929 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
930 "Intel i82572EI Gigabit Ethernet (SERDES)",
931 WM_T_82572, WMP_F_SERDES },
932
933 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
934 "Intel i82572EI 1000baseT Ethernet",
935 WM_T_82572, WMP_F_COPPER },
936
937 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
938 "Intel i82573E",
939 WM_T_82573, WMP_F_COPPER },
940
941 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
942 "Intel i82573E IAMT",
943 WM_T_82573, WMP_F_COPPER },
944
945 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
946 "Intel i82573L Gigabit Ethernet",
947 WM_T_82573, WMP_F_COPPER },
948
949 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
950 "Intel i82574L",
951 WM_T_82574, WMP_F_COPPER },
952
953 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
954 "Intel i82574L",
955 WM_T_82574, WMP_F_COPPER },
956
957 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
958 "Intel i82583V",
959 WM_T_82583, WMP_F_COPPER },
960
961 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
962 "i80003 dual 1000baseT Ethernet",
963 WM_T_80003, WMP_F_COPPER },
964
965 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
966 "i80003 dual 1000baseX Ethernet",
967 WM_T_80003, WMP_F_COPPER },
968
969 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
970 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
971 WM_T_80003, WMP_F_SERDES },
972
973 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
974 "Intel i80003 1000baseT Ethernet",
975 WM_T_80003, WMP_F_COPPER },
976
977 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
978 "Intel i80003 Gigabit Ethernet (SERDES)",
979 WM_T_80003, WMP_F_SERDES },
980
981 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
982 "Intel i82801H (M_AMT) LAN Controller",
983 WM_T_ICH8, WMP_F_COPPER },
984 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
985 "Intel i82801H (AMT) LAN Controller",
986 WM_T_ICH8, WMP_F_COPPER },
987 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
988 "Intel i82801H LAN Controller",
989 WM_T_ICH8, WMP_F_COPPER },
990 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
991 "Intel i82801H (IFE) LAN Controller",
992 WM_T_ICH8, WMP_F_COPPER },
993 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
994 "Intel i82801H (M) LAN Controller",
995 WM_T_ICH8, WMP_F_COPPER },
996 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
997 "Intel i82801H IFE (GT) LAN Controller",
998 WM_T_ICH8, WMP_F_COPPER },
999 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1000 "Intel i82801H IFE (G) LAN Controller",
1001 WM_T_ICH8, WMP_F_COPPER },
1002 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1003 "82801I (AMT) LAN Controller",
1004 WM_T_ICH9, WMP_F_COPPER },
1005 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1006 "82801I LAN Controller",
1007 WM_T_ICH9, WMP_F_COPPER },
1008 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1009 "82801I (G) LAN Controller",
1010 WM_T_ICH9, WMP_F_COPPER },
1011 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1012 "82801I (GT) LAN Controller",
1013 WM_T_ICH9, WMP_F_COPPER },
1014 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1015 "82801I (C) LAN Controller",
1016 WM_T_ICH9, WMP_F_COPPER },
1017 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1018 "82801I mobile LAN Controller",
1019 WM_T_ICH9, WMP_F_COPPER },
1020 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1021 "82801I mobile (V) LAN Controller",
1022 WM_T_ICH9, WMP_F_COPPER },
1023 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1024 "82801I mobile (AMT) LAN Controller",
1025 WM_T_ICH9, WMP_F_COPPER },
1026 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1027 "82567LM-4 LAN Controller",
1028 WM_T_ICH9, WMP_F_COPPER },
1029 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
1030 "82567V-3 LAN Controller",
1031 WM_T_ICH9, WMP_F_COPPER },
1032 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1033 "82567LM-2 LAN Controller",
1034 WM_T_ICH10, WMP_F_COPPER },
1035 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1036 "82567LF-2 LAN Controller",
1037 WM_T_ICH10, WMP_F_COPPER },
1038 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1039 "82567LM-3 LAN Controller",
1040 WM_T_ICH10, WMP_F_COPPER },
1041 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1042 "82567LF-3 LAN Controller",
1043 WM_T_ICH10, WMP_F_COPPER },
1044 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1045 "82567V-2 LAN Controller",
1046 WM_T_ICH10, WMP_F_COPPER },
1047 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1048 "82567V-3? LAN Controller",
1049 WM_T_ICH10, WMP_F_COPPER },
1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1051 "HANKSVILLE LAN Controller",
1052 WM_T_ICH10, WMP_F_COPPER },
1053 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1054 "PCH LAN (82577LM) Controller",
1055 WM_T_PCH, WMP_F_COPPER },
1056 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1057 "PCH LAN (82577LC) Controller",
1058 WM_T_PCH, WMP_F_COPPER },
1059 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1060 "PCH LAN (82578DM) Controller",
1061 WM_T_PCH, WMP_F_COPPER },
1062 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1063 "PCH LAN (82578DC) Controller",
1064 WM_T_PCH, WMP_F_COPPER },
1065 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1066 "PCH2 LAN (82579LM) Controller",
1067 WM_T_PCH2, WMP_F_COPPER },
1068 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1069 "PCH2 LAN (82579V) Controller",
1070 WM_T_PCH2, WMP_F_COPPER },
1071 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1072 "82575EB dual-1000baseT Ethernet",
1073 WM_T_82575, WMP_F_COPPER },
1074 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1075 "82575EB dual-1000baseX Ethernet (SERDES)",
1076 WM_T_82575, WMP_F_SERDES },
1077 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1078 "82575GB quad-1000baseT Ethernet",
1079 WM_T_82575, WMP_F_COPPER },
1080 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1081 "82575GB quad-1000baseT Ethernet (PM)",
1082 WM_T_82575, WMP_F_COPPER },
1083 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1084 "82576 1000BaseT Ethernet",
1085 WM_T_82576, WMP_F_COPPER },
1086 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1087 "82576 1000BaseX Ethernet",
1088 WM_T_82576, WMP_F_FIBER },
1089
1090 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1091 "82576 gigabit Ethernet (SERDES)",
1092 WM_T_82576, WMP_F_SERDES },
1093
1094 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1095 "82576 quad-1000BaseT Ethernet",
1096 WM_T_82576, WMP_F_COPPER },
1097
1098 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1099 "82576 Gigabit ET2 Quad Port Server Adapter",
1100 WM_T_82576, WMP_F_COPPER },
1101
1102 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1103 "82576 gigabit Ethernet",
1104 WM_T_82576, WMP_F_COPPER },
1105
1106 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1107 "82576 gigabit Ethernet (SERDES)",
1108 WM_T_82576, WMP_F_SERDES },
1109 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1110 "82576 quad-gigabit Ethernet (SERDES)",
1111 WM_T_82576, WMP_F_SERDES },
1112
1113 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1114 "82580 1000BaseT Ethernet",
1115 WM_T_82580, WMP_F_COPPER },
1116 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1117 "82580 1000BaseX Ethernet",
1118 WM_T_82580, WMP_F_FIBER },
1119
1120 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1121 "82580 1000BaseT Ethernet (SERDES)",
1122 WM_T_82580, WMP_F_SERDES },
1123
1124 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1125 "82580 gigabit Ethernet (SGMII)",
1126 WM_T_82580, WMP_F_COPPER },
1127 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1128 "82580 dual-1000BaseT Ethernet",
1129 WM_T_82580, WMP_F_COPPER },
1130
1131 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1132 "82580 quad-1000BaseX Ethernet",
1133 WM_T_82580, WMP_F_FIBER },
1134
1135 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1136 "DH89XXCC Gigabit Ethernet (SGMII)",
1137 WM_T_82580, WMP_F_COPPER },
1138
1139 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1140 "DH89XXCC Gigabit Ethernet (SERDES)",
1141 WM_T_82580, WMP_F_SERDES },
1142
1143 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1144 "DH89XXCC 1000BASE-KX Ethernet",
1145 WM_T_82580, WMP_F_SERDES },
1146
1147 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1148 "DH89XXCC Gigabit Ethernet (SFP)",
1149 WM_T_82580, WMP_F_SERDES },
1150
1151 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1152 "I350 Gigabit Network Connection",
1153 WM_T_I350, WMP_F_COPPER },
1154
1155 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1156 "I350 Gigabit Fiber Network Connection",
1157 WM_T_I350, WMP_F_FIBER },
1158
1159 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1160 "I350 Gigabit Backplane Connection",
1161 WM_T_I350, WMP_F_SERDES },
1162
1163 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1164 "I350 Quad Port Gigabit Ethernet",
1165 WM_T_I350, WMP_F_SERDES },
1166
1167 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1168 "I350 Gigabit Connection",
1169 WM_T_I350, WMP_F_COPPER },
1170
1171 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1172 "I354 Gigabit Ethernet (KX)",
1173 WM_T_I354, WMP_F_SERDES },
1174
1175 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1176 "I354 Gigabit Ethernet (SGMII)",
1177 WM_T_I354, WMP_F_COPPER },
1178
1179 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1180 "I354 Gigabit Ethernet (2.5G)",
1181 WM_T_I354, WMP_F_COPPER },
1182
1183 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1184 "I210-T1 Ethernet Server Adapter",
1185 WM_T_I210, WMP_F_COPPER },
1186
1187 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1188 "I210 Ethernet (Copper OEM)",
1189 WM_T_I210, WMP_F_COPPER },
1190
1191 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1192 "I210 Ethernet (Copper IT)",
1193 WM_T_I210, WMP_F_COPPER },
1194
1195 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1196 "I210 Ethernet (FLASH less)",
1197 WM_T_I210, WMP_F_COPPER },
1198
1199 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1200 "I210 Gigabit Ethernet (Fiber)",
1201 WM_T_I210, WMP_F_FIBER },
1202
1203 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1204 "I210 Gigabit Ethernet (SERDES)",
1205 WM_T_I210, WMP_F_SERDES },
1206
1207 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1208 "I210 Gigabit Ethernet (FLASH less)",
1209 WM_T_I210, WMP_F_SERDES },
1210
1211 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1212 "I210 Gigabit Ethernet (SGMII)",
1213 WM_T_I210, WMP_F_COPPER },
1214
1215 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1216 "I211 Ethernet (COPPER)",
1217 WM_T_I211, WMP_F_COPPER },
1218 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1219 "I217 V Ethernet Connection",
1220 WM_T_PCH_LPT, WMP_F_COPPER },
1221 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1222 "I217 LM Ethernet Connection",
1223 WM_T_PCH_LPT, WMP_F_COPPER },
1224 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1225 "I218 V Ethernet Connection",
1226 WM_T_PCH_LPT, WMP_F_COPPER },
1227 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1228 "I218 V Ethernet Connection",
1229 WM_T_PCH_LPT, WMP_F_COPPER },
1230 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1231 "I218 V Ethernet Connection",
1232 WM_T_PCH_LPT, WMP_F_COPPER },
1233 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1234 "I218 LM Ethernet Connection",
1235 WM_T_PCH_LPT, WMP_F_COPPER },
1236 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1237 "I218 LM Ethernet Connection",
1238 WM_T_PCH_LPT, WMP_F_COPPER },
1239 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1240 "I218 LM Ethernet Connection",
1241 WM_T_PCH_LPT, WMP_F_COPPER },
1242 { 0, 0,
1243 NULL,
1244 0, 0 },
1245 };
1246
1247 #ifdef WM_EVENT_COUNTERS
1248 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1249 #endif /* WM_EVENT_COUNTERS */
1250
1251
1252 /*
1253 * Register read/write functions.
1254 * Other than CSR_{READ|WRITE}().
1255 */
1256
1257 #if 0 /* Not currently used */
1258 static inline uint32_t
1259 wm_io_read(struct wm_softc *sc, int reg)
1260 {
1261
1262 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1263 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1264 }
1265 #endif
1266
1267 static inline void
1268 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1269 {
1270
1271 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1272 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1273 }
1274
1275 static inline void
1276 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1277 uint32_t data)
1278 {
1279 uint32_t regval;
1280 int i;
1281
1282 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1283
1284 CSR_WRITE(sc, reg, regval);
1285
1286 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1287 delay(5);
1288 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1289 break;
1290 }
1291 if (i == SCTL_CTL_POLL_TIMEOUT) {
1292 aprint_error("%s: WARNING:"
1293 " i82575 reg 0x%08x setup did not indicate ready\n",
1294 device_xname(sc->sc_dev), reg);
1295 }
1296 }
1297
1298 static inline void
1299 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1300 {
1301 wa->wa_low = htole32(v & 0xffffffffU);
1302 if (sizeof(bus_addr_t) == 8)
1303 wa->wa_high = htole32((uint64_t) v >> 32);
1304 else
1305 wa->wa_high = 0;
1306 }
1307
1308 /*
1309 * Device driver interface functions and commonly used functions.
1310 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1311 */
1312
1313 /* Lookup supported device table */
1314 static const struct wm_product *
1315 wm_lookup(const struct pci_attach_args *pa)
1316 {
1317 const struct wm_product *wmp;
1318
1319 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1320 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1321 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1322 return wmp;
1323 }
1324 return NULL;
1325 }
1326
1327 /* The match function (ca_match) */
1328 static int
1329 wm_match(device_t parent, cfdata_t cf, void *aux)
1330 {
1331 struct pci_attach_args *pa = aux;
1332
1333 if (wm_lookup(pa) != NULL)
1334 return 1;
1335
1336 return 0;
1337 }
1338
1339 /* The attach function (ca_attach) */
1340 static void
1341 wm_attach(device_t parent, device_t self, void *aux)
1342 {
1343 struct wm_softc *sc = device_private(self);
1344 struct pci_attach_args *pa = aux;
1345 prop_dictionary_t dict;
1346 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1347 pci_chipset_tag_t pc = pa->pa_pc;
1348 pci_intr_handle_t ih;
1349 const char *intrstr = NULL;
1350 const char *eetype, *xname;
1351 bus_space_tag_t memt;
1352 bus_space_handle_t memh;
1353 bus_size_t memsize;
1354 int memh_valid;
1355 int i, error;
1356 const struct wm_product *wmp;
1357 prop_data_t ea;
1358 prop_number_t pn;
1359 uint8_t enaddr[ETHER_ADDR_LEN];
1360 uint16_t cfg1, cfg2, swdpin, io3;
1361 pcireg_t preg, memtype;
1362 uint16_t eeprom_data, apme_mask;
1363 bool force_clear_smbi;
1364 uint32_t link_mode;
1365 uint32_t reg;
1366 char intrbuf[PCI_INTRSTR_LEN];
1367
1368 sc->sc_dev = self;
1369 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1370 sc->sc_stopping = false;
1371
1372 wmp = wm_lookup(pa);
1373 #ifdef DIAGNOSTIC
1374 if (wmp == NULL) {
1375 printf("\n");
1376 panic("wm_attach: impossible");
1377 }
1378 #endif
1379 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1380
1381 sc->sc_pc = pa->pa_pc;
1382 sc->sc_pcitag = pa->pa_tag;
1383
1384 if (pci_dma64_available(pa))
1385 sc->sc_dmat = pa->pa_dmat64;
1386 else
1387 sc->sc_dmat = pa->pa_dmat;
1388
1389 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1390 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1391 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1392
1393 sc->sc_type = wmp->wmp_type;
1394 if (sc->sc_type < WM_T_82543) {
1395 if (sc->sc_rev < 2) {
1396 aprint_error_dev(sc->sc_dev,
1397 "i82542 must be at least rev. 2\n");
1398 return;
1399 }
1400 if (sc->sc_rev < 3)
1401 sc->sc_type = WM_T_82542_2_0;
1402 }
1403
1404 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1405 || (sc->sc_type == WM_T_82580)
1406 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1407 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1408 sc->sc_flags |= WM_F_NEWQUEUE;
1409
1410 /* Set device properties (mactype) */
1411 dict = device_properties(sc->sc_dev);
1412 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1413
1414 /*
1415 * Map the device. All devices support memory-mapped acccess,
1416 * and it is really required for normal operation.
1417 */
1418 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1419 switch (memtype) {
1420 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1421 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1422 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1423 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1424 break;
1425 default:
1426 memh_valid = 0;
1427 break;
1428 }
1429
1430 if (memh_valid) {
1431 sc->sc_st = memt;
1432 sc->sc_sh = memh;
1433 sc->sc_ss = memsize;
1434 } else {
1435 aprint_error_dev(sc->sc_dev,
1436 "unable to map device registers\n");
1437 return;
1438 }
1439
1440 /*
1441 * In addition, i82544 and later support I/O mapped indirect
1442 * register access. It is not desirable (nor supported in
1443 * this driver) to use it for normal operation, though it is
1444 * required to work around bugs in some chip versions.
1445 */
1446 if (sc->sc_type >= WM_T_82544) {
1447 /* First we have to find the I/O BAR. */
1448 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1449 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1450 if (memtype == PCI_MAPREG_TYPE_IO)
1451 break;
1452 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1453 PCI_MAPREG_MEM_TYPE_64BIT)
1454 i += 4; /* skip high bits, too */
1455 }
1456 if (i < PCI_MAPREG_END) {
1457 /*
1458 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1459 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1460 * It's no problem because newer chips has no this
1461 * bug.
1462 *
1463 * The i8254x doesn't apparently respond when the
1464 * I/O BAR is 0, which looks somewhat like it's not
1465 * been configured.
1466 */
1467 preg = pci_conf_read(pc, pa->pa_tag, i);
1468 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1469 aprint_error_dev(sc->sc_dev,
1470 "WARNING: I/O BAR at zero.\n");
1471 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1472 0, &sc->sc_iot, &sc->sc_ioh,
1473 NULL, &sc->sc_ios) == 0) {
1474 sc->sc_flags |= WM_F_IOH_VALID;
1475 } else {
1476 aprint_error_dev(sc->sc_dev,
1477 "WARNING: unable to map I/O space\n");
1478 }
1479 }
1480
1481 }
1482
1483 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1484 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1485 preg |= PCI_COMMAND_MASTER_ENABLE;
1486 if (sc->sc_type < WM_T_82542_2_1)
1487 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1488 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1489
1490 /* power up chip */
1491 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1492 NULL)) && error != EOPNOTSUPP) {
1493 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1494 return;
1495 }
1496
1497 /*
1498 * Map and establish our interrupt.
1499 */
1500 if (pci_intr_map(pa, &ih)) {
1501 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1502 return;
1503 }
1504 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1505 #ifdef WM_MPSAFE
1506 pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
1507 #endif
1508 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1509 if (sc->sc_ih == NULL) {
1510 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1511 if (intrstr != NULL)
1512 aprint_error(" at %s", intrstr);
1513 aprint_error("\n");
1514 return;
1515 }
1516 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1517
1518 /*
1519 * Check the function ID (unit number of the chip).
1520 */
1521 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1522 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1523 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1524 || (sc->sc_type == WM_T_82580)
1525 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1526 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1527 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1528 else
1529 sc->sc_funcid = 0;
1530
1531 /*
1532 * Determine a few things about the bus we're connected to.
1533 */
1534 if (sc->sc_type < WM_T_82543) {
1535 /* We don't really know the bus characteristics here. */
1536 sc->sc_bus_speed = 33;
1537 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1538 /*
1539 * CSA (Communication Streaming Architecture) is about as fast
1540 * a 32-bit 66MHz PCI Bus.
1541 */
1542 sc->sc_flags |= WM_F_CSA;
1543 sc->sc_bus_speed = 66;
1544 aprint_verbose_dev(sc->sc_dev,
1545 "Communication Streaming Architecture\n");
1546 if (sc->sc_type == WM_T_82547) {
1547 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1548 callout_setfunc(&sc->sc_txfifo_ch,
1549 wm_82547_txfifo_stall, sc);
1550 aprint_verbose_dev(sc->sc_dev,
1551 "using 82547 Tx FIFO stall work-around\n");
1552 }
1553 } else if (sc->sc_type >= WM_T_82571) {
1554 sc->sc_flags |= WM_F_PCIE;
1555 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1556 && (sc->sc_type != WM_T_ICH10)
1557 && (sc->sc_type != WM_T_PCH)
1558 && (sc->sc_type != WM_T_PCH2)
1559 && (sc->sc_type != WM_T_PCH_LPT)) {
1560 /* ICH* and PCH* have no PCIe capability registers */
1561 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1562 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1563 NULL) == 0)
1564 aprint_error_dev(sc->sc_dev,
1565 "unable to find PCIe capability\n");
1566 }
1567 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1568 } else {
1569 reg = CSR_READ(sc, WMREG_STATUS);
1570 if (reg & STATUS_BUS64)
1571 sc->sc_flags |= WM_F_BUS64;
1572 if ((reg & STATUS_PCIX_MODE) != 0) {
1573 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1574
1575 sc->sc_flags |= WM_F_PCIX;
1576 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1577 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1578 aprint_error_dev(sc->sc_dev,
1579 "unable to find PCIX capability\n");
1580 else if (sc->sc_type != WM_T_82545_3 &&
1581 sc->sc_type != WM_T_82546_3) {
1582 /*
1583 * Work around a problem caused by the BIOS
1584 * setting the max memory read byte count
1585 * incorrectly.
1586 */
1587 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1588 sc->sc_pcixe_capoff + PCIX_CMD);
1589 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1590 sc->sc_pcixe_capoff + PCIX_STATUS);
1591
1592 bytecnt =
1593 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1594 PCIX_CMD_BYTECNT_SHIFT;
1595 maxb =
1596 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1597 PCIX_STATUS_MAXB_SHIFT;
1598 if (bytecnt > maxb) {
1599 aprint_verbose_dev(sc->sc_dev,
1600 "resetting PCI-X MMRBC: %d -> %d\n",
1601 512 << bytecnt, 512 << maxb);
1602 pcix_cmd = (pcix_cmd &
1603 ~PCIX_CMD_BYTECNT_MASK) |
1604 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1605 pci_conf_write(pa->pa_pc, pa->pa_tag,
1606 sc->sc_pcixe_capoff + PCIX_CMD,
1607 pcix_cmd);
1608 }
1609 }
1610 }
1611 /*
1612 * The quad port adapter is special; it has a PCIX-PCIX
1613 * bridge on the board, and can run the secondary bus at
1614 * a higher speed.
1615 */
1616 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1617 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1618 : 66;
1619 } else if (sc->sc_flags & WM_F_PCIX) {
1620 switch (reg & STATUS_PCIXSPD_MASK) {
1621 case STATUS_PCIXSPD_50_66:
1622 sc->sc_bus_speed = 66;
1623 break;
1624 case STATUS_PCIXSPD_66_100:
1625 sc->sc_bus_speed = 100;
1626 break;
1627 case STATUS_PCIXSPD_100_133:
1628 sc->sc_bus_speed = 133;
1629 break;
1630 default:
1631 aprint_error_dev(sc->sc_dev,
1632 "unknown PCIXSPD %d; assuming 66MHz\n",
1633 reg & STATUS_PCIXSPD_MASK);
1634 sc->sc_bus_speed = 66;
1635 break;
1636 }
1637 } else
1638 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1639 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1640 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1641 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1642 }
1643
1644 /*
1645 * Allocate the control data structures, and create and load the
1646 * DMA map for it.
1647 *
1648 * NOTE: All Tx descriptors must be in the same 4G segment of
1649 * memory. So must Rx descriptors. We simplify by allocating
1650 * both sets within the same 4G segment.
1651 */
1652 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1653 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1654 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1655 sizeof(struct wm_control_data_82542) :
1656 sizeof(struct wm_control_data_82544);
1657 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1658 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1659 &sc->sc_cd_rseg, 0)) != 0) {
1660 aprint_error_dev(sc->sc_dev,
1661 "unable to allocate control data, error = %d\n",
1662 error);
1663 goto fail_0;
1664 }
1665
1666 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1667 sc->sc_cd_rseg, sc->sc_cd_size,
1668 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1669 aprint_error_dev(sc->sc_dev,
1670 "unable to map control data, error = %d\n", error);
1671 goto fail_1;
1672 }
1673
1674 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1675 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1676 aprint_error_dev(sc->sc_dev,
1677 "unable to create control data DMA map, error = %d\n",
1678 error);
1679 goto fail_2;
1680 }
1681
1682 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1683 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1684 aprint_error_dev(sc->sc_dev,
1685 "unable to load control data DMA map, error = %d\n",
1686 error);
1687 goto fail_3;
1688 }
1689
1690 /* Create the transmit buffer DMA maps. */
1691 WM_TXQUEUELEN(sc) =
1692 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1693 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1694 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1695 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1696 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1697 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1698 aprint_error_dev(sc->sc_dev,
1699 "unable to create Tx DMA map %d, error = %d\n",
1700 i, error);
1701 goto fail_4;
1702 }
1703 }
1704
1705 /* Create the receive buffer DMA maps. */
1706 for (i = 0; i < WM_NRXDESC; i++) {
1707 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1708 MCLBYTES, 0, 0,
1709 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1710 aprint_error_dev(sc->sc_dev,
1711 "unable to create Rx DMA map %d error = %d\n",
1712 i, error);
1713 goto fail_5;
1714 }
1715 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1716 }
1717
1718 /* clear interesting stat counters */
1719 CSR_READ(sc, WMREG_COLC);
1720 CSR_READ(sc, WMREG_RXERRC);
1721
1722 /* get PHY control from SMBus to PCIe */
1723 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1724 || (sc->sc_type == WM_T_PCH_LPT))
1725 wm_smbustopci(sc);
1726
1727 /* Reset the chip to a known state. */
1728 wm_reset(sc);
1729
1730 /* Get some information about the EEPROM. */
1731 switch (sc->sc_type) {
1732 case WM_T_82542_2_0:
1733 case WM_T_82542_2_1:
1734 case WM_T_82543:
1735 case WM_T_82544:
1736 /* Microwire */
1737 sc->sc_nvm_wordsize = 64;
1738 sc->sc_nvm_addrbits = 6;
1739 break;
1740 case WM_T_82540:
1741 case WM_T_82545:
1742 case WM_T_82545_3:
1743 case WM_T_82546:
1744 case WM_T_82546_3:
1745 /* Microwire */
1746 reg = CSR_READ(sc, WMREG_EECD);
1747 if (reg & EECD_EE_SIZE) {
1748 sc->sc_nvm_wordsize = 256;
1749 sc->sc_nvm_addrbits = 8;
1750 } else {
1751 sc->sc_nvm_wordsize = 64;
1752 sc->sc_nvm_addrbits = 6;
1753 }
1754 sc->sc_flags |= WM_F_LOCK_EECD;
1755 break;
1756 case WM_T_82541:
1757 case WM_T_82541_2:
1758 case WM_T_82547:
1759 case WM_T_82547_2:
1760 sc->sc_flags |= WM_F_LOCK_EECD;
1761 reg = CSR_READ(sc, WMREG_EECD);
1762 if (reg & EECD_EE_TYPE) {
1763 /* SPI */
1764 sc->sc_flags |= WM_F_EEPROM_SPI;
1765 wm_nvm_set_addrbits_size_eecd(sc);
1766 } else {
1767 /* Microwire */
1768 if ((reg & EECD_EE_ABITS) != 0) {
1769 sc->sc_nvm_wordsize = 256;
1770 sc->sc_nvm_addrbits = 8;
1771 } else {
1772 sc->sc_nvm_wordsize = 64;
1773 sc->sc_nvm_addrbits = 6;
1774 }
1775 }
1776 break;
1777 case WM_T_82571:
1778 case WM_T_82572:
1779 /* SPI */
1780 sc->sc_flags |= WM_F_EEPROM_SPI;
1781 wm_nvm_set_addrbits_size_eecd(sc);
1782 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1783 break;
1784 case WM_T_82573:
1785 sc->sc_flags |= WM_F_LOCK_SWSM;
1786 /* FALLTHROUGH */
1787 case WM_T_82574:
1788 case WM_T_82583:
1789 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1790 sc->sc_flags |= WM_F_EEPROM_FLASH;
1791 sc->sc_nvm_wordsize = 2048;
1792 } else {
1793 /* SPI */
1794 sc->sc_flags |= WM_F_EEPROM_SPI;
1795 wm_nvm_set_addrbits_size_eecd(sc);
1796 }
1797 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1798 break;
1799 case WM_T_82575:
1800 case WM_T_82576:
1801 case WM_T_82580:
1802 case WM_T_I350:
1803 case WM_T_I354:
1804 case WM_T_80003:
1805 /* SPI */
1806 sc->sc_flags |= WM_F_EEPROM_SPI;
1807 wm_nvm_set_addrbits_size_eecd(sc);
1808 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1809 | WM_F_LOCK_SWSM;
1810 break;
1811 case WM_T_ICH8:
1812 case WM_T_ICH9:
1813 case WM_T_ICH10:
1814 case WM_T_PCH:
1815 case WM_T_PCH2:
1816 case WM_T_PCH_LPT:
1817 /* FLASH */
1818 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1819 sc->sc_nvm_wordsize = 2048;
1820 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1821 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1822 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1823 aprint_error_dev(sc->sc_dev,
1824 "can't map FLASH registers\n");
1825 goto fail_5;
1826 }
1827 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1828 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1829 ICH_FLASH_SECTOR_SIZE;
1830 sc->sc_ich8_flash_bank_size =
1831 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1832 sc->sc_ich8_flash_bank_size -=
1833 (reg & ICH_GFPREG_BASE_MASK);
1834 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1835 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1836 break;
1837 case WM_T_I210:
1838 case WM_T_I211:
1839 wm_nvm_set_addrbits_size_eecd(sc);
1840 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1841 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1842 break;
1843 default:
1844 break;
1845 }
1846
1847 /* Ensure the SMBI bit is clear before first NVM or PHY access */
1848 switch (sc->sc_type) {
1849 case WM_T_82571:
1850 case WM_T_82572:
1851 reg = CSR_READ(sc, WMREG_SWSM2);
1852 if ((reg & SWSM2_LOCK) == 0) {
1853 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
1854 force_clear_smbi = true;
1855 } else
1856 force_clear_smbi = false;
1857 break;
1858 case WM_T_82573:
1859 case WM_T_82574:
1860 case WM_T_82583:
1861 force_clear_smbi = true;
1862 break;
1863 default:
1864 force_clear_smbi = false;
1865 break;
1866 }
1867 if (force_clear_smbi) {
1868 reg = CSR_READ(sc, WMREG_SWSM);
1869 if ((reg & SWSM_SMBI) != 0)
1870 aprint_error_dev(sc->sc_dev,
1871 "Please update the Bootagent\n");
1872 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
1873 }
1874
1875 /*
1876 * Defer printing the EEPROM type until after verifying the checksum
1877 * This allows the EEPROM type to be printed correctly in the case
1878 * that no EEPROM is attached.
1879 */
1880 /*
1881 * Validate the EEPROM checksum. If the checksum fails, flag
1882 * this for later, so we can fail future reads from the EEPROM.
1883 */
1884 if (wm_nvm_validate_checksum(sc)) {
1885 /*
1886 * Read twice again because some PCI-e parts fail the
1887 * first check due to the link being in sleep state.
1888 */
1889 if (wm_nvm_validate_checksum(sc))
1890 sc->sc_flags |= WM_F_EEPROM_INVALID;
1891 }
1892
1893 /* Set device properties (macflags) */
1894 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1895
1896 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1897 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1898 else {
1899 aprint_verbose_dev(sc->sc_dev, "%u words ",
1900 sc->sc_nvm_wordsize);
1901 if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
1902 aprint_verbose("FLASH(HW)\n");
1903 } else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1904 aprint_verbose("FLASH\n");
1905 } else {
1906 if (sc->sc_flags & WM_F_EEPROM_SPI)
1907 eetype = "SPI";
1908 else
1909 eetype = "MicroWire";
1910 aprint_verbose("(%d address bits) %s EEPROM\n",
1911 sc->sc_nvm_addrbits, eetype);
1912 }
1913 }
1914
1915 switch (sc->sc_type) {
1916 case WM_T_82571:
1917 case WM_T_82572:
1918 case WM_T_82573:
1919 case WM_T_82574:
1920 case WM_T_82583:
1921 case WM_T_80003:
1922 case WM_T_ICH8:
1923 case WM_T_ICH9:
1924 case WM_T_ICH10:
1925 case WM_T_PCH:
1926 case WM_T_PCH2:
1927 case WM_T_PCH_LPT:
1928 if (wm_check_mng_mode(sc) != 0)
1929 wm_get_hw_control(sc);
1930 break;
1931 default:
1932 break;
1933 }
1934 wm_get_wakeup(sc);
1935 /*
1936 * Read the Ethernet address from the EEPROM, if not first found
1937 * in device properties.
1938 */
1939 ea = prop_dictionary_get(dict, "mac-address");
1940 if (ea != NULL) {
1941 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1942 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1943 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1944 } else {
1945 if (wm_read_mac_addr(sc, enaddr) != 0) {
1946 aprint_error_dev(sc->sc_dev,
1947 "unable to read Ethernet address\n");
1948 goto fail_5;
1949 }
1950 }
1951
1952 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1953 ether_sprintf(enaddr));
1954
1955 /*
1956 * Read the config info from the EEPROM, and set up various
1957 * bits in the control registers based on their contents.
1958 */
1959 pn = prop_dictionary_get(dict, "i82543-cfg1");
1960 if (pn != NULL) {
1961 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1962 cfg1 = (uint16_t) prop_number_integer_value(pn);
1963 } else {
1964 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
1965 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1966 goto fail_5;
1967 }
1968 }
1969
1970 pn = prop_dictionary_get(dict, "i82543-cfg2");
1971 if (pn != NULL) {
1972 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1973 cfg2 = (uint16_t) prop_number_integer_value(pn);
1974 } else {
1975 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
1976 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1977 goto fail_5;
1978 }
1979 }
1980
1981 /* check for WM_F_WOL */
1982 switch (sc->sc_type) {
1983 case WM_T_82542_2_0:
1984 case WM_T_82542_2_1:
1985 case WM_T_82543:
1986 /* dummy? */
1987 eeprom_data = 0;
1988 apme_mask = NVM_CFG3_APME;
1989 break;
1990 case WM_T_82544:
1991 apme_mask = NVM_CFG2_82544_APM_EN;
1992 eeprom_data = cfg2;
1993 break;
1994 case WM_T_82546:
1995 case WM_T_82546_3:
1996 case WM_T_82571:
1997 case WM_T_82572:
1998 case WM_T_82573:
1999 case WM_T_82574:
2000 case WM_T_82583:
2001 case WM_T_80003:
2002 default:
2003 apme_mask = NVM_CFG3_APME;
2004 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2005 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2006 break;
2007 case WM_T_82575:
2008 case WM_T_82576:
2009 case WM_T_82580:
2010 case WM_T_I350:
2011 case WM_T_I354: /* XXX ok? */
2012 case WM_T_ICH8:
2013 case WM_T_ICH9:
2014 case WM_T_ICH10:
2015 case WM_T_PCH:
2016 case WM_T_PCH2:
2017 case WM_T_PCH_LPT:
2018 /* XXX The funcid should be checked on some devices */
2019 apme_mask = WUC_APME;
2020 eeprom_data = CSR_READ(sc, WMREG_WUC);
2021 break;
2022 }
2023
2024 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2025 if ((eeprom_data & apme_mask) != 0)
2026 sc->sc_flags |= WM_F_WOL;
2027 #ifdef WM_DEBUG
2028 if ((sc->sc_flags & WM_F_WOL) != 0)
2029 printf("WOL\n");
2030 #endif
2031
2032 /*
2033 * XXX need special handling for some multiple port cards
2034 * to disable a paticular port.
2035 */
2036
2037 if (sc->sc_type >= WM_T_82544) {
2038 pn = prop_dictionary_get(dict, "i82543-swdpin");
2039 if (pn != NULL) {
2040 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2041 swdpin = (uint16_t) prop_number_integer_value(pn);
2042 } else {
2043 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2044 aprint_error_dev(sc->sc_dev,
2045 "unable to read SWDPIN\n");
2046 goto fail_5;
2047 }
2048 }
2049 }
2050
2051 if (cfg1 & NVM_CFG1_ILOS)
2052 sc->sc_ctrl |= CTRL_ILOS;
2053 if (sc->sc_type >= WM_T_82544) {
2054 sc->sc_ctrl |=
2055 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2056 CTRL_SWDPIO_SHIFT;
2057 sc->sc_ctrl |=
2058 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2059 CTRL_SWDPINS_SHIFT;
2060 } else {
2061 sc->sc_ctrl |=
2062 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2063 CTRL_SWDPIO_SHIFT;
2064 }
2065
2066 #if 0
2067 if (sc->sc_type >= WM_T_82544) {
2068 if (cfg1 & NVM_CFG1_IPS0)
2069 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2070 if (cfg1 & NVM_CFG1_IPS1)
2071 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2072 sc->sc_ctrl_ext |=
2073 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2074 CTRL_EXT_SWDPIO_SHIFT;
2075 sc->sc_ctrl_ext |=
2076 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2077 CTRL_EXT_SWDPINS_SHIFT;
2078 } else {
2079 sc->sc_ctrl_ext |=
2080 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2081 CTRL_EXT_SWDPIO_SHIFT;
2082 }
2083 #endif
2084
2085 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2086 #if 0
2087 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2088 #endif
2089
2090 /*
2091 * Set up some register offsets that are different between
2092 * the i82542 and the i82543 and later chips.
2093 */
2094 if (sc->sc_type < WM_T_82543) {
2095 sc->sc_rdt_reg = WMREG_OLD_RDT0;
2096 sc->sc_tdt_reg = WMREG_OLD_TDT;
2097 } else {
2098 sc->sc_rdt_reg = WMREG_RDT;
2099 sc->sc_tdt_reg = WMREG_TDT;
2100 }
2101
2102 if (sc->sc_type == WM_T_PCH) {
2103 uint16_t val;
2104
2105 /* Save the NVM K1 bit setting */
2106 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2107
2108 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2109 sc->sc_nvm_k1_enabled = 1;
2110 else
2111 sc->sc_nvm_k1_enabled = 0;
2112 }
2113
2114 /*
2115 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2116 * media structures accordingly.
2117 */
2118 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2119 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2120 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2121 || sc->sc_type == WM_T_82573
2122 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2123 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2124 wm_gmii_mediainit(sc, wmp->wmp_product);
2125 } else if (sc->sc_type < WM_T_82543 ||
2126 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2127 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2128 aprint_error_dev(sc->sc_dev,
2129 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2130 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2131 }
2132 wm_tbi_mediainit(sc);
2133 } else {
2134 switch (sc->sc_type) {
2135 case WM_T_82575:
2136 case WM_T_82576:
2137 case WM_T_82580:
2138 case WM_T_I350:
2139 case WM_T_I354:
2140 case WM_T_I210:
2141 case WM_T_I211:
2142 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2143 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2144 switch (link_mode) {
2145 case CTRL_EXT_LINK_MODE_1000KX:
2146 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2147 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2148 break;
2149 case CTRL_EXT_LINK_MODE_SGMII:
2150 if (wm_sgmii_uses_mdio(sc)) {
2151 aprint_verbose_dev(sc->sc_dev,
2152 "SGMII(MDIO)\n");
2153 sc->sc_flags |= WM_F_SGMII;
2154 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2155 break;
2156 }
2157 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2158 /*FALLTHROUGH*/
2159 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2160 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2161 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2162 if (link_mode
2163 == CTRL_EXT_LINK_MODE_SGMII) {
2164 sc->sc_mediatype
2165 = WM_MEDIATYPE_COPPER;
2166 sc->sc_flags |= WM_F_SGMII;
2167 } else {
2168 sc->sc_mediatype
2169 = WM_MEDIATYPE_SERDES;
2170 aprint_verbose_dev(sc->sc_dev,
2171 "SERDES\n");
2172 }
2173 break;
2174 }
2175 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2176 aprint_verbose_dev(sc->sc_dev,
2177 "SERDES\n");
2178
2179 /* Change current link mode setting */
2180 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2181 switch (sc->sc_mediatype) {
2182 case WM_MEDIATYPE_COPPER:
2183 reg |= CTRL_EXT_LINK_MODE_SGMII;
2184 break;
2185 case WM_MEDIATYPE_SERDES:
2186 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2187 break;
2188 default:
2189 break;
2190 }
2191 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2192 break;
2193 case CTRL_EXT_LINK_MODE_GMII:
2194 default:
2195 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2196 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2197 break;
2198 }
2199
2200 reg &= ~CTRL_EXT_I2C_ENA;
2201 if ((sc->sc_flags & WM_F_SGMII) != 0)
2202 reg |= CTRL_EXT_I2C_ENA;
2203 else
2204 reg &= ~CTRL_EXT_I2C_ENA;
2205 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2206
2207 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2208 wm_gmii_mediainit(sc, wmp->wmp_product);
2209 else
2210 wm_tbi_mediainit(sc);
2211 break;
2212 default:
2213 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
2214 aprint_error_dev(sc->sc_dev,
2215 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2216 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2217 wm_gmii_mediainit(sc, wmp->wmp_product);
2218 }
2219 }
2220
2221 ifp = &sc->sc_ethercom.ec_if;
2222 xname = device_xname(sc->sc_dev);
2223 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2224 ifp->if_softc = sc;
2225 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2226 ifp->if_ioctl = wm_ioctl;
2227 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2228 ifp->if_start = wm_nq_start;
2229 else
2230 ifp->if_start = wm_start;
2231 ifp->if_watchdog = wm_watchdog;
2232 ifp->if_init = wm_init;
2233 ifp->if_stop = wm_stop;
2234 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2235 IFQ_SET_READY(&ifp->if_snd);
2236
2237 /* Check for jumbo frame */
2238 switch (sc->sc_type) {
2239 case WM_T_82573:
2240 /* XXX limited to 9234 if ASPM is disabled */
2241 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &io3);
2242 if ((io3 & NVM_3GIO_3_ASPM_MASK) != 0)
2243 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2244 break;
2245 case WM_T_82571:
2246 case WM_T_82572:
2247 case WM_T_82574:
2248 case WM_T_82575:
2249 case WM_T_82576:
2250 case WM_T_82580:
2251 case WM_T_I350:
2252 case WM_T_I354: /* XXXX ok? */
2253 case WM_T_I210:
2254 case WM_T_I211:
2255 case WM_T_80003:
2256 case WM_T_ICH9:
2257 case WM_T_ICH10:
2258 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2259 case WM_T_PCH_LPT:
2260 /* XXX limited to 9234 */
2261 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2262 break;
2263 case WM_T_PCH:
2264 /* XXX limited to 4096 */
2265 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2266 break;
2267 case WM_T_82542_2_0:
2268 case WM_T_82542_2_1:
2269 case WM_T_82583:
2270 case WM_T_ICH8:
2271 /* No support for jumbo frame */
2272 break;
2273 default:
2274 /* ETHER_MAX_LEN_JUMBO */
2275 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2276 break;
2277 }
2278
2279 /* If we're a i82543 or greater, we can support VLANs. */
2280 if (sc->sc_type >= WM_T_82543)
2281 sc->sc_ethercom.ec_capabilities |=
2282 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2283
2284 /*
2285 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2286 * on i82543 and later.
2287 */
2288 if (sc->sc_type >= WM_T_82543) {
2289 ifp->if_capabilities |=
2290 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2291 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2292 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2293 IFCAP_CSUM_TCPv6_Tx |
2294 IFCAP_CSUM_UDPv6_Tx;
2295 }
2296
2297 /*
2298 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2299 *
2300 * 82541GI (8086:1076) ... no
2301 * 82572EI (8086:10b9) ... yes
2302 */
2303 if (sc->sc_type >= WM_T_82571) {
2304 ifp->if_capabilities |=
2305 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2306 }
2307
2308 /*
2309 * If we're a i82544 or greater (except i82547), we can do
2310 * TCP segmentation offload.
2311 */
2312 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2313 ifp->if_capabilities |= IFCAP_TSOv4;
2314 }
2315
2316 if (sc->sc_type >= WM_T_82571) {
2317 ifp->if_capabilities |= IFCAP_TSOv6;
2318 }
2319
2320 #ifdef WM_MPSAFE
2321 sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2322 sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2323 #else
2324 sc->sc_tx_lock = NULL;
2325 sc->sc_rx_lock = NULL;
2326 #endif
2327
2328 /* Attach the interface. */
2329 if_attach(ifp);
2330 ether_ifattach(ifp, enaddr);
2331 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2332 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2333 RND_FLAG_DEFAULT);
2334
2335 #ifdef WM_EVENT_COUNTERS
2336 /* Attach event counters. */
2337 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2338 NULL, xname, "txsstall");
2339 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2340 NULL, xname, "txdstall");
2341 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2342 NULL, xname, "txfifo_stall");
2343 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2344 NULL, xname, "txdw");
2345 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2346 NULL, xname, "txqe");
2347 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2348 NULL, xname, "rxintr");
2349 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2350 NULL, xname, "linkintr");
2351
2352 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2353 NULL, xname, "rxipsum");
2354 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2355 NULL, xname, "rxtusum");
2356 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2357 NULL, xname, "txipsum");
2358 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2359 NULL, xname, "txtusum");
2360 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2361 NULL, xname, "txtusum6");
2362
2363 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2364 NULL, xname, "txtso");
2365 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2366 NULL, xname, "txtso6");
2367 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2368 NULL, xname, "txtsopain");
2369
2370 for (i = 0; i < WM_NTXSEGS; i++) {
2371 snprintf(wm_txseg_evcnt_names[i],
2372 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2373 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2374 NULL, xname, wm_txseg_evcnt_names[i]);
2375 }
2376
2377 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2378 NULL, xname, "txdrop");
2379
2380 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2381 NULL, xname, "tu");
2382
2383 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2384 NULL, xname, "tx_xoff");
2385 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2386 NULL, xname, "tx_xon");
2387 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2388 NULL, xname, "rx_xoff");
2389 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2390 NULL, xname, "rx_xon");
2391 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2392 NULL, xname, "rx_macctl");
2393 #endif /* WM_EVENT_COUNTERS */
2394
2395 if (pmf_device_register(self, wm_suspend, wm_resume))
2396 pmf_class_network_register(self, ifp);
2397 else
2398 aprint_error_dev(self, "couldn't establish power handler\n");
2399
2400 sc->sc_flags |= WM_F_ATTACHED;
2401 return;
2402
2403 /*
2404 * Free any resources we've allocated during the failed attach
2405 * attempt. Do this in reverse order and fall through.
2406 */
2407 fail_5:
2408 for (i = 0; i < WM_NRXDESC; i++) {
2409 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2410 bus_dmamap_destroy(sc->sc_dmat,
2411 sc->sc_rxsoft[i].rxs_dmamap);
2412 }
2413 fail_4:
2414 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2415 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2416 bus_dmamap_destroy(sc->sc_dmat,
2417 sc->sc_txsoft[i].txs_dmamap);
2418 }
2419 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2420 fail_3:
2421 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2422 fail_2:
2423 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2424 sc->sc_cd_size);
2425 fail_1:
2426 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2427 fail_0:
2428 return;
2429 }
2430
2431 /* The detach function (ca_detach) */
2432 static int
2433 wm_detach(device_t self, int flags __unused)
2434 {
2435 struct wm_softc *sc = device_private(self);
2436 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2437 int i;
2438 #ifndef WM_MPSAFE
2439 int s;
2440 #endif
2441
2442 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2443 return 0;
2444
2445 #ifndef WM_MPSAFE
2446 s = splnet();
2447 #endif
2448 /* Stop the interface. Callouts are stopped in it. */
2449 wm_stop(ifp, 1);
2450
2451 #ifndef WM_MPSAFE
2452 splx(s);
2453 #endif
2454
2455 pmf_device_deregister(self);
2456
2457 /* Tell the firmware about the release */
2458 WM_BOTH_LOCK(sc);
2459 wm_release_manageability(sc);
2460 wm_release_hw_control(sc);
2461 WM_BOTH_UNLOCK(sc);
2462
2463 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2464
2465 /* Delete all remaining media. */
2466 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2467
2468 ether_ifdetach(ifp);
2469 if_detach(ifp);
2470
2471
2472 /* Unload RX dmamaps and free mbufs */
2473 WM_RX_LOCK(sc);
2474 wm_rxdrain(sc);
2475 WM_RX_UNLOCK(sc);
2476 /* Must unlock here */
2477
2478 /* Free dmamap. It's the same as the end of the wm_attach() function */
2479 for (i = 0; i < WM_NRXDESC; i++) {
2480 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2481 bus_dmamap_destroy(sc->sc_dmat,
2482 sc->sc_rxsoft[i].rxs_dmamap);
2483 }
2484 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2485 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2486 bus_dmamap_destroy(sc->sc_dmat,
2487 sc->sc_txsoft[i].txs_dmamap);
2488 }
2489 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2490 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2491 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2492 sc->sc_cd_size);
2493 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2494
2495 /* Disestablish the interrupt handler */
2496 if (sc->sc_ih != NULL) {
2497 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2498 sc->sc_ih = NULL;
2499 }
2500
2501 /* Unmap the registers */
2502 if (sc->sc_ss) {
2503 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2504 sc->sc_ss = 0;
2505 }
2506
2507 if (sc->sc_ios) {
2508 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2509 sc->sc_ios = 0;
2510 }
2511
2512 if (sc->sc_tx_lock)
2513 mutex_obj_free(sc->sc_tx_lock);
2514 if (sc->sc_rx_lock)
2515 mutex_obj_free(sc->sc_rx_lock);
2516
2517 return 0;
2518 }
2519
2520 static bool
2521 wm_suspend(device_t self, const pmf_qual_t *qual)
2522 {
2523 struct wm_softc *sc = device_private(self);
2524
2525 wm_release_manageability(sc);
2526 wm_release_hw_control(sc);
2527 #ifdef WM_WOL
2528 wm_enable_wakeup(sc);
2529 #endif
2530
2531 return true;
2532 }
2533
2534 static bool
2535 wm_resume(device_t self, const pmf_qual_t *qual)
2536 {
2537 struct wm_softc *sc = device_private(self);
2538
2539 wm_init_manageability(sc);
2540
2541 return true;
2542 }
2543
2544 /*
2545 * wm_watchdog: [ifnet interface function]
2546 *
2547 * Watchdog timer handler.
2548 */
2549 static void
2550 wm_watchdog(struct ifnet *ifp)
2551 {
2552 struct wm_softc *sc = ifp->if_softc;
2553
2554 /*
2555 * Since we're using delayed interrupts, sweep up
2556 * before we report an error.
2557 */
2558 WM_TX_LOCK(sc);
2559 wm_txintr(sc);
2560 WM_TX_UNLOCK(sc);
2561
2562 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2563 #ifdef WM_DEBUG
2564 int i, j;
2565 struct wm_txsoft *txs;
2566 #endif
2567 log(LOG_ERR,
2568 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2569 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2570 sc->sc_txnext);
2571 ifp->if_oerrors++;
2572 #ifdef WM_DEBUG
2573 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
2574 i = WM_NEXTTXS(sc, i)) {
2575 txs = &sc->sc_txsoft[i];
2576 printf("txs %d tx %d -> %d\n",
2577 i, txs->txs_firstdesc, txs->txs_lastdesc);
2578 for (j = txs->txs_firstdesc; ;
2579 j = WM_NEXTTX(sc, j)) {
2580 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2581 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
2582 printf("\t %#08x%08x\n",
2583 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
2584 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
2585 if (j == txs->txs_lastdesc)
2586 break;
2587 }
2588 }
2589 #endif
2590 /* Reset the interface. */
2591 (void) wm_init(ifp);
2592 }
2593
2594 /* Try to get more packets going. */
2595 ifp->if_start(ifp);
2596 }
2597
2598 /*
2599 * wm_tick:
2600 *
2601 * One second timer, used to check link status, sweep up
2602 * completed transmit jobs, etc.
2603 */
2604 static void
2605 wm_tick(void *arg)
2606 {
2607 struct wm_softc *sc = arg;
2608 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2609 #ifndef WM_MPSAFE
2610 int s;
2611
2612 s = splnet();
2613 #endif
2614
2615 WM_TX_LOCK(sc);
2616
2617 if (sc->sc_stopping)
2618 goto out;
2619
2620 if (sc->sc_type >= WM_T_82542_2_1) {
2621 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2622 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2623 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2624 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2625 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2626 }
2627
2628 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2629 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2630 + CSR_READ(sc, WMREG_CRCERRS)
2631 + CSR_READ(sc, WMREG_ALGNERRC)
2632 + CSR_READ(sc, WMREG_SYMERRC)
2633 + CSR_READ(sc, WMREG_RXERRC)
2634 + CSR_READ(sc, WMREG_SEC)
2635 + CSR_READ(sc, WMREG_CEXTERR)
2636 + CSR_READ(sc, WMREG_RLEC);
2637 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2638
2639 if (sc->sc_flags & WM_F_HAS_MII)
2640 mii_tick(&sc->sc_mii);
2641 else
2642 wm_tbi_check_link(sc);
2643
2644 out:
2645 WM_TX_UNLOCK(sc);
2646 #ifndef WM_MPSAFE
2647 splx(s);
2648 #endif
2649
2650 if (!sc->sc_stopping)
2651 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2652 }
2653
2654 static int
2655 wm_ifflags_cb(struct ethercom *ec)
2656 {
2657 struct ifnet *ifp = &ec->ec_if;
2658 struct wm_softc *sc = ifp->if_softc;
2659 int change = ifp->if_flags ^ sc->sc_if_flags;
2660 int rc = 0;
2661
2662 WM_BOTH_LOCK(sc);
2663
2664 if (change != 0)
2665 sc->sc_if_flags = ifp->if_flags;
2666
2667 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
2668 rc = ENETRESET;
2669 goto out;
2670 }
2671
2672 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2673 wm_set_filter(sc);
2674
2675 wm_set_vlan(sc);
2676
2677 out:
2678 WM_BOTH_UNLOCK(sc);
2679
2680 return rc;
2681 }
2682
2683 /*
2684 * wm_ioctl: [ifnet interface function]
2685 *
2686 * Handle control requests from the operator.
2687 */
2688 static int
2689 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2690 {
2691 struct wm_softc *sc = ifp->if_softc;
2692 struct ifreq *ifr = (struct ifreq *) data;
2693 struct ifaddr *ifa = (struct ifaddr *)data;
2694 struct sockaddr_dl *sdl;
2695 int s, error;
2696
2697 #ifndef WM_MPSAFE
2698 s = splnet();
2699 #endif
2700 switch (cmd) {
2701 case SIOCSIFMEDIA:
2702 case SIOCGIFMEDIA:
2703 WM_BOTH_LOCK(sc);
2704 /* Flow control requires full-duplex mode. */
2705 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2706 (ifr->ifr_media & IFM_FDX) == 0)
2707 ifr->ifr_media &= ~IFM_ETH_FMASK;
2708 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2709 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2710 /* We can do both TXPAUSE and RXPAUSE. */
2711 ifr->ifr_media |=
2712 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2713 }
2714 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2715 }
2716 WM_BOTH_UNLOCK(sc);
2717 #ifdef WM_MPSAFE
2718 s = splnet();
2719 #endif
2720 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2721 #ifdef WM_MPSAFE
2722 splx(s);
2723 #endif
2724 break;
2725 case SIOCINITIFADDR:
2726 WM_BOTH_LOCK(sc);
2727 if (ifa->ifa_addr->sa_family == AF_LINK) {
2728 sdl = satosdl(ifp->if_dl->ifa_addr);
2729 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2730 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2731 /* unicast address is first multicast entry */
2732 wm_set_filter(sc);
2733 error = 0;
2734 WM_BOTH_UNLOCK(sc);
2735 break;
2736 }
2737 WM_BOTH_UNLOCK(sc);
2738 /*FALLTHROUGH*/
2739 default:
2740 #ifdef WM_MPSAFE
2741 s = splnet();
2742 #endif
2743 /* It may call wm_start, so unlock here */
2744 error = ether_ioctl(ifp, cmd, data);
2745 #ifdef WM_MPSAFE
2746 splx(s);
2747 #endif
2748 if (error != ENETRESET)
2749 break;
2750
2751 error = 0;
2752
2753 if (cmd == SIOCSIFCAP) {
2754 error = (*ifp->if_init)(ifp);
2755 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2756 ;
2757 else if (ifp->if_flags & IFF_RUNNING) {
2758 /*
2759 * Multicast list has changed; set the hardware filter
2760 * accordingly.
2761 */
2762 WM_BOTH_LOCK(sc);
2763 wm_set_filter(sc);
2764 WM_BOTH_UNLOCK(sc);
2765 }
2766 break;
2767 }
2768
2769 /* Try to get more packets going. */
2770 ifp->if_start(ifp);
2771
2772 #ifndef WM_MPSAFE
2773 splx(s);
2774 #endif
2775 return error;
2776 }
2777
2778 /* MAC address related */
2779
2780 /*
2781 * Get the offset of MAC address and return it.
2782 * If error occured, use offset 0.
2783 */
2784 static uint16_t
2785 wm_check_alt_mac_addr(struct wm_softc *sc)
2786 {
2787 uint16_t myea[ETHER_ADDR_LEN / 2];
2788 uint16_t offset = NVM_OFF_MACADDR;
2789
2790 /* Try to read alternative MAC address pointer */
2791 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2792 return 0;
2793
2794 /* Check pointer if it's valid or not. */
2795 if ((offset == 0x0000) || (offset == 0xffff))
2796 return 0;
2797
2798 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
2799 /*
2800 * Check whether alternative MAC address is valid or not.
2801 * Some cards have non 0xffff pointer but those don't use
2802 * alternative MAC address in reality.
2803 *
2804 * Check whether the broadcast bit is set or not.
2805 */
2806 if (wm_nvm_read(sc, offset, 1, myea) == 0)
2807 if (((myea[0] & 0xff) & 0x01) == 0)
2808 return offset; /* Found */
2809
2810 /* Not found */
2811 return 0;
2812 }
2813
2814 static int
2815 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2816 {
2817 uint16_t myea[ETHER_ADDR_LEN / 2];
2818 uint16_t offset = NVM_OFF_MACADDR;
2819 int do_invert = 0;
2820
2821 switch (sc->sc_type) {
2822 case WM_T_82580:
2823 case WM_T_I350:
2824 case WM_T_I354:
2825 /* EEPROM Top Level Partitioning */
2826 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
2827 break;
2828 case WM_T_82571:
2829 case WM_T_82575:
2830 case WM_T_82576:
2831 case WM_T_80003:
2832 case WM_T_I210:
2833 case WM_T_I211:
2834 offset = wm_check_alt_mac_addr(sc);
2835 if (offset == 0)
2836 if ((sc->sc_funcid & 0x01) == 1)
2837 do_invert = 1;
2838 break;
2839 default:
2840 if ((sc->sc_funcid & 0x01) == 1)
2841 do_invert = 1;
2842 break;
2843 }
2844
2845 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
2846 myea) != 0)
2847 goto bad;
2848
2849 enaddr[0] = myea[0] & 0xff;
2850 enaddr[1] = myea[0] >> 8;
2851 enaddr[2] = myea[1] & 0xff;
2852 enaddr[3] = myea[1] >> 8;
2853 enaddr[4] = myea[2] & 0xff;
2854 enaddr[5] = myea[2] >> 8;
2855
2856 /*
2857 * Toggle the LSB of the MAC address on the second port
2858 * of some dual port cards.
2859 */
2860 if (do_invert != 0)
2861 enaddr[5] ^= 1;
2862
2863 return 0;
2864
2865 bad:
2866 return -1;
2867 }
2868
2869 /*
2870 * wm_set_ral:
2871 *
2872 * Set an entery in the receive address list.
2873 */
2874 static void
2875 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2876 {
2877 uint32_t ral_lo, ral_hi;
2878
2879 if (enaddr != NULL) {
2880 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2881 (enaddr[3] << 24);
2882 ral_hi = enaddr[4] | (enaddr[5] << 8);
2883 ral_hi |= RAL_AV;
2884 } else {
2885 ral_lo = 0;
2886 ral_hi = 0;
2887 }
2888
2889 if (sc->sc_type >= WM_T_82544) {
2890 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2891 ral_lo);
2892 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2893 ral_hi);
2894 } else {
2895 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2896 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2897 }
2898 }
2899
2900 /*
2901 * wm_mchash:
2902 *
2903 * Compute the hash of the multicast address for the 4096-bit
2904 * multicast filter.
2905 */
2906 static uint32_t
2907 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2908 {
2909 static const int lo_shift[4] = { 4, 3, 2, 0 };
2910 static const int hi_shift[4] = { 4, 5, 6, 8 };
2911 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
2912 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
2913 uint32_t hash;
2914
2915 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2916 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2917 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
2918 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
2919 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
2920 return (hash & 0x3ff);
2921 }
2922 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2923 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2924
2925 return (hash & 0xfff);
2926 }
2927
2928 /*
2929 * wm_set_filter:
2930 *
2931 * Set up the receive filter.
2932 */
2933 static void
2934 wm_set_filter(struct wm_softc *sc)
2935 {
2936 struct ethercom *ec = &sc->sc_ethercom;
2937 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2938 struct ether_multi *enm;
2939 struct ether_multistep step;
2940 bus_addr_t mta_reg;
2941 uint32_t hash, reg, bit;
2942 int i, size;
2943
2944 if (sc->sc_type >= WM_T_82544)
2945 mta_reg = WMREG_CORDOVA_MTA;
2946 else
2947 mta_reg = WMREG_MTA;
2948
2949 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2950
2951 if (ifp->if_flags & IFF_BROADCAST)
2952 sc->sc_rctl |= RCTL_BAM;
2953 if (ifp->if_flags & IFF_PROMISC) {
2954 sc->sc_rctl |= RCTL_UPE;
2955 goto allmulti;
2956 }
2957
2958 /*
2959 * Set the station address in the first RAL slot, and
2960 * clear the remaining slots.
2961 */
2962 if (sc->sc_type == WM_T_ICH8)
2963 size = WM_RAL_TABSIZE_ICH8 -1;
2964 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
2965 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
2966 || (sc->sc_type == WM_T_PCH_LPT))
2967 size = WM_RAL_TABSIZE_ICH8;
2968 else if (sc->sc_type == WM_T_82575)
2969 size = WM_RAL_TABSIZE_82575;
2970 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
2971 size = WM_RAL_TABSIZE_82576;
2972 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2973 size = WM_RAL_TABSIZE_I350;
2974 else
2975 size = WM_RAL_TABSIZE;
2976 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
2977 for (i = 1; i < size; i++)
2978 wm_set_ral(sc, NULL, i);
2979
2980 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2981 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2982 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
2983 size = WM_ICH8_MC_TABSIZE;
2984 else
2985 size = WM_MC_TABSIZE;
2986 /* Clear out the multicast table. */
2987 for (i = 0; i < size; i++)
2988 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2989
2990 ETHER_FIRST_MULTI(step, ec, enm);
2991 while (enm != NULL) {
2992 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2993 /*
2994 * We must listen to a range of multicast addresses.
2995 * For now, just accept all multicasts, rather than
2996 * trying to set only those filter bits needed to match
2997 * the range. (At this time, the only use of address
2998 * ranges is for IP multicast routing, for which the
2999 * range is big enough to require all bits set.)
3000 */
3001 goto allmulti;
3002 }
3003
3004 hash = wm_mchash(sc, enm->enm_addrlo);
3005
3006 reg = (hash >> 5);
3007 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3008 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3009 || (sc->sc_type == WM_T_PCH2)
3010 || (sc->sc_type == WM_T_PCH_LPT))
3011 reg &= 0x1f;
3012 else
3013 reg &= 0x7f;
3014 bit = hash & 0x1f;
3015
3016 hash = CSR_READ(sc, mta_reg + (reg << 2));
3017 hash |= 1U << bit;
3018
3019 /* XXX Hardware bug?? */
3020 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3021 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3022 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3023 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3024 } else
3025 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3026
3027 ETHER_NEXT_MULTI(step, enm);
3028 }
3029
3030 ifp->if_flags &= ~IFF_ALLMULTI;
3031 goto setit;
3032
3033 allmulti:
3034 ifp->if_flags |= IFF_ALLMULTI;
3035 sc->sc_rctl |= RCTL_MPE;
3036
3037 setit:
3038 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3039 }
3040
3041 /* Reset and init related */
3042
3043 static void
3044 wm_set_vlan(struct wm_softc *sc)
3045 {
3046 /* Deal with VLAN enables. */
3047 if (VLAN_ATTACHED(&sc->sc_ethercom))
3048 sc->sc_ctrl |= CTRL_VME;
3049 else
3050 sc->sc_ctrl &= ~CTRL_VME;
3051
3052 /* Write the control registers. */
3053 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3054 }
3055
3056 static void
3057 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3058 {
3059 uint32_t gcr;
3060 pcireg_t ctrl2;
3061
3062 gcr = CSR_READ(sc, WMREG_GCR);
3063
3064 /* Only take action if timeout value is defaulted to 0 */
3065 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3066 goto out;
3067
3068 if ((gcr & GCR_CAP_VER2) == 0) {
3069 gcr |= GCR_CMPL_TMOUT_10MS;
3070 goto out;
3071 }
3072
3073 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3074 sc->sc_pcixe_capoff + PCIE_DCSR2);
3075 ctrl2 |= WM_PCIE_DCSR2_16MS;
3076 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3077 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3078
3079 out:
3080 /* Disable completion timeout resend */
3081 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3082
3083 CSR_WRITE(sc, WMREG_GCR, gcr);
3084 }
3085
3086 void
3087 wm_get_auto_rd_done(struct wm_softc *sc)
3088 {
3089 int i;
3090
3091 /* wait for eeprom to reload */
3092 switch (sc->sc_type) {
3093 case WM_T_82571:
3094 case WM_T_82572:
3095 case WM_T_82573:
3096 case WM_T_82574:
3097 case WM_T_82583:
3098 case WM_T_82575:
3099 case WM_T_82576:
3100 case WM_T_82580:
3101 case WM_T_I350:
3102 case WM_T_I354:
3103 case WM_T_I210:
3104 case WM_T_I211:
3105 case WM_T_80003:
3106 case WM_T_ICH8:
3107 case WM_T_ICH9:
3108 for (i = 0; i < 10; i++) {
3109 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3110 break;
3111 delay(1000);
3112 }
3113 if (i == 10) {
3114 log(LOG_ERR, "%s: auto read from eeprom failed to "
3115 "complete\n", device_xname(sc->sc_dev));
3116 }
3117 break;
3118 default:
3119 break;
3120 }
3121 }
3122
3123 void
3124 wm_lan_init_done(struct wm_softc *sc)
3125 {
3126 uint32_t reg = 0;
3127 int i;
3128
3129 /* wait for eeprom to reload */
3130 switch (sc->sc_type) {
3131 case WM_T_ICH10:
3132 case WM_T_PCH:
3133 case WM_T_PCH2:
3134 case WM_T_PCH_LPT:
3135 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3136 reg = CSR_READ(sc, WMREG_STATUS);
3137 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3138 break;
3139 delay(100);
3140 }
3141 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3142 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3143 "complete\n", device_xname(sc->sc_dev), __func__);
3144 }
3145 break;
3146 default:
3147 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3148 __func__);
3149 break;
3150 }
3151
3152 reg &= ~STATUS_LAN_INIT_DONE;
3153 CSR_WRITE(sc, WMREG_STATUS, reg);
3154 }
3155
3156 void
3157 wm_get_cfg_done(struct wm_softc *sc)
3158 {
3159 int mask;
3160 uint32_t reg;
3161 int i;
3162
3163 /* wait for eeprom to reload */
3164 switch (sc->sc_type) {
3165 case WM_T_82542_2_0:
3166 case WM_T_82542_2_1:
3167 /* null */
3168 break;
3169 case WM_T_82543:
3170 case WM_T_82544:
3171 case WM_T_82540:
3172 case WM_T_82545:
3173 case WM_T_82545_3:
3174 case WM_T_82546:
3175 case WM_T_82546_3:
3176 case WM_T_82541:
3177 case WM_T_82541_2:
3178 case WM_T_82547:
3179 case WM_T_82547_2:
3180 case WM_T_82573:
3181 case WM_T_82574:
3182 case WM_T_82583:
3183 /* generic */
3184 delay(10*1000);
3185 break;
3186 case WM_T_80003:
3187 case WM_T_82571:
3188 case WM_T_82572:
3189 case WM_T_82575:
3190 case WM_T_82576:
3191 case WM_T_82580:
3192 case WM_T_I350:
3193 case WM_T_I354:
3194 case WM_T_I210:
3195 case WM_T_I211:
3196 if (sc->sc_type == WM_T_82571) {
3197 /* Only 82571 shares port 0 */
3198 mask = EEMNGCTL_CFGDONE_0;
3199 } else
3200 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3201 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3202 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3203 break;
3204 delay(1000);
3205 }
3206 if (i >= WM_PHY_CFG_TIMEOUT) {
3207 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3208 device_xname(sc->sc_dev), __func__));
3209 }
3210 break;
3211 case WM_T_ICH8:
3212 case WM_T_ICH9:
3213 case WM_T_ICH10:
3214 case WM_T_PCH:
3215 case WM_T_PCH2:
3216 case WM_T_PCH_LPT:
3217 delay(10*1000);
3218 if (sc->sc_type >= WM_T_ICH10)
3219 wm_lan_init_done(sc);
3220 else
3221 wm_get_auto_rd_done(sc);
3222
3223 reg = CSR_READ(sc, WMREG_STATUS);
3224 if ((reg & STATUS_PHYRA) != 0)
3225 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3226 break;
3227 default:
3228 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3229 __func__);
3230 break;
3231 }
3232 }
3233
3234 /* Init hardware bits */
3235 void
3236 wm_initialize_hardware_bits(struct wm_softc *sc)
3237 {
3238 uint32_t tarc0, tarc1, reg;
3239
3240 /* For 82571 variant, 80003 and ICHs */
3241 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3242 || (sc->sc_type >= WM_T_80003)) {
3243
3244 /* Transmit Descriptor Control 0 */
3245 reg = CSR_READ(sc, WMREG_TXDCTL(0));
3246 reg |= TXDCTL_COUNT_DESC;
3247 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3248
3249 /* Transmit Descriptor Control 1 */
3250 reg = CSR_READ(sc, WMREG_TXDCTL(1));
3251 reg |= TXDCTL_COUNT_DESC;
3252 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3253
3254 /* TARC0 */
3255 tarc0 = CSR_READ(sc, WMREG_TARC0);
3256 switch (sc->sc_type) {
3257 case WM_T_82571:
3258 case WM_T_82572:
3259 case WM_T_82573:
3260 case WM_T_82574:
3261 case WM_T_82583:
3262 case WM_T_80003:
3263 /* Clear bits 30..27 */
3264 tarc0 &= ~__BITS(30, 27);
3265 break;
3266 default:
3267 break;
3268 }
3269
3270 switch (sc->sc_type) {
3271 case WM_T_82571:
3272 case WM_T_82572:
3273 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3274
3275 tarc1 = CSR_READ(sc, WMREG_TARC1);
3276 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3277 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3278 /* 8257[12] Errata No.7 */
3279 tarc1 |= __BIT(22); /* TARC1 bits 22 */
3280
3281 /* TARC1 bit 28 */
3282 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3283 tarc1 &= ~__BIT(28);
3284 else
3285 tarc1 |= __BIT(28);
3286 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3287
3288 /*
3289 * 8257[12] Errata No.13
3290 * Disable Dyamic Clock Gating.
3291 */
3292 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3293 reg &= ~CTRL_EXT_DMA_DYN_CLK;
3294 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3295 break;
3296 case WM_T_82573:
3297 case WM_T_82574:
3298 case WM_T_82583:
3299 if ((sc->sc_type == WM_T_82574)
3300 || (sc->sc_type == WM_T_82583))
3301 tarc0 |= __BIT(26); /* TARC0 bit 26 */
3302
3303 /* Extended Device Control */
3304 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3305 reg &= ~__BIT(23); /* Clear bit 23 */
3306 reg |= __BIT(22); /* Set bit 22 */
3307 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3308
3309 /* Device Control */
3310 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
3311 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3312
3313 /* PCIe Control Register */
3314 if ((sc->sc_type == WM_T_82574)
3315 || (sc->sc_type == WM_T_82583)) {
3316 /*
3317 * Document says this bit must be set for
3318 * proper operation.
3319 */
3320 reg = CSR_READ(sc, WMREG_GCR);
3321 reg |= __BIT(22);
3322 CSR_WRITE(sc, WMREG_GCR, reg);
3323
3324 /*
3325 * Apply workaround for hardware errata
3326 * documented in errata docs Fixes issue where
3327 * some error prone or unreliable PCIe
3328 * completions are occurring, particularly
3329 * with ASPM enabled. Without fix, issue can
3330 * cause Tx timeouts.
3331 */
3332 reg = CSR_READ(sc, WMREG_GCR2);
3333 reg |= __BIT(0);
3334 CSR_WRITE(sc, WMREG_GCR2, reg);
3335 }
3336 break;
3337 case WM_T_80003:
3338 /* TARC0 */
3339 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3340 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3341 tarc0 &= ~__BIT(20); /* Clear bits 20 */
3342
3343 /* TARC1 bit 28 */
3344 tarc1 = CSR_READ(sc, WMREG_TARC1);
3345 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3346 tarc1 &= ~__BIT(28);
3347 else
3348 tarc1 |= __BIT(28);
3349 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3350 break;
3351 case WM_T_ICH8:
3352 case WM_T_ICH9:
3353 case WM_T_ICH10:
3354 case WM_T_PCH:
3355 case WM_T_PCH2:
3356 case WM_T_PCH_LPT:
3357 /* TARC 0 */
3358 if (sc->sc_type == WM_T_ICH8) {
3359 /* Set TARC0 bits 29 and 28 */
3360 tarc0 |= __BITS(29, 28);
3361 }
3362 /* Set TARC0 bits 23,24,26,27 */
3363 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3364
3365 /* CTRL_EXT */
3366 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3367 reg |= __BIT(22); /* Set bit 22 */
3368 /*
3369 * Enable PHY low-power state when MAC is at D3
3370 * w/o WoL
3371 */
3372 if (sc->sc_type >= WM_T_PCH)
3373 reg |= CTRL_EXT_PHYPDEN;
3374 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3375
3376 /* TARC1 */
3377 tarc1 = CSR_READ(sc, WMREG_TARC1);
3378 /* bit 28 */
3379 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3380 tarc1 &= ~__BIT(28);
3381 else
3382 tarc1 |= __BIT(28);
3383 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3384 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3385
3386 /* Device Status */
3387 if (sc->sc_type == WM_T_ICH8) {
3388 reg = CSR_READ(sc, WMREG_STATUS);
3389 reg &= ~__BIT(31);
3390 CSR_WRITE(sc, WMREG_STATUS, reg);
3391
3392 }
3393
3394 /*
3395 * Work-around descriptor data corruption issue during
3396 * NFS v2 UDP traffic, just disable the NFS filtering
3397 * capability.
3398 */
3399 reg = CSR_READ(sc, WMREG_RFCTL);
3400 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3401 CSR_WRITE(sc, WMREG_RFCTL, reg);
3402 break;
3403 default:
3404 break;
3405 }
3406 CSR_WRITE(sc, WMREG_TARC0, tarc0);
3407
3408 /*
3409 * 8257[12] Errata No.52 and some others.
3410 * Avoid RSS Hash Value bug.
3411 */
3412 switch (sc->sc_type) {
3413 case WM_T_82571:
3414 case WM_T_82572:
3415 case WM_T_82573:
3416 case WM_T_80003:
3417 case WM_T_ICH8:
3418 reg = CSR_READ(sc, WMREG_RFCTL);
3419 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3420 CSR_WRITE(sc, WMREG_RFCTL, reg);
3421 break;
3422 default:
3423 break;
3424 }
3425 }
3426 }
3427
3428 /*
3429 * wm_reset:
3430 *
3431 * Reset the i82542 chip.
3432 */
3433 static void
3434 wm_reset(struct wm_softc *sc)
3435 {
3436 int phy_reset = 0;
3437 int error = 0;
3438 uint32_t reg, mask;
3439
3440 /*
3441 * Allocate on-chip memory according to the MTU size.
3442 * The Packet Buffer Allocation register must be written
3443 * before the chip is reset.
3444 */
3445 switch (sc->sc_type) {
3446 case WM_T_82547:
3447 case WM_T_82547_2:
3448 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3449 PBA_22K : PBA_30K;
3450 sc->sc_txfifo_head = 0;
3451 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3452 sc->sc_txfifo_size =
3453 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3454 sc->sc_txfifo_stall = 0;
3455 break;
3456 case WM_T_82571:
3457 case WM_T_82572:
3458 case WM_T_82575: /* XXX need special handing for jumbo frames */
3459 case WM_T_I350:
3460 case WM_T_I354:
3461 case WM_T_80003:
3462 sc->sc_pba = PBA_32K;
3463 break;
3464 case WM_T_82580:
3465 sc->sc_pba = PBA_35K;
3466 break;
3467 case WM_T_I210:
3468 case WM_T_I211:
3469 sc->sc_pba = PBA_34K;
3470 break;
3471 case WM_T_82576:
3472 sc->sc_pba = PBA_64K;
3473 break;
3474 case WM_T_82573:
3475 sc->sc_pba = PBA_12K;
3476 break;
3477 case WM_T_82574:
3478 case WM_T_82583:
3479 sc->sc_pba = PBA_20K;
3480 break;
3481 case WM_T_ICH8:
3482 /* Workaround for a bit corruption issue in FIFO memory */
3483 sc->sc_pba = PBA_8K;
3484 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3485 break;
3486 case WM_T_ICH9:
3487 case WM_T_ICH10:
3488 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3489 PBA_14K : PBA_10K;
3490 break;
3491 case WM_T_PCH:
3492 case WM_T_PCH2:
3493 case WM_T_PCH_LPT:
3494 sc->sc_pba = PBA_26K;
3495 break;
3496 default:
3497 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3498 PBA_40K : PBA_48K;
3499 break;
3500 }
3501 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3502
3503 /* Prevent the PCI-E bus from sticking */
3504 if (sc->sc_flags & WM_F_PCIE) {
3505 int timeout = 800;
3506
3507 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3508 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3509
3510 while (timeout--) {
3511 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3512 == 0)
3513 break;
3514 delay(100);
3515 }
3516 }
3517
3518 /* Set the completion timeout for interface */
3519 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3520 || (sc->sc_type == WM_T_82580)
3521 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3522 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3523 wm_set_pcie_completion_timeout(sc);
3524
3525 /* Clear interrupt */
3526 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3527
3528 /* Stop the transmit and receive processes. */
3529 CSR_WRITE(sc, WMREG_RCTL, 0);
3530 sc->sc_rctl &= ~RCTL_EN;
3531 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3532 CSR_WRITE_FLUSH(sc);
3533
3534 /* XXX set_tbi_sbp_82543() */
3535
3536 delay(10*1000);
3537
3538 /* Must acquire the MDIO ownership before MAC reset */
3539 switch (sc->sc_type) {
3540 case WM_T_82573:
3541 case WM_T_82574:
3542 case WM_T_82583:
3543 error = wm_get_hw_semaphore_82573(sc);
3544 break;
3545 default:
3546 break;
3547 }
3548
3549 /*
3550 * 82541 Errata 29? & 82547 Errata 28?
3551 * See also the description about PHY_RST bit in CTRL register
3552 * in 8254x_GBe_SDM.pdf.
3553 */
3554 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3555 CSR_WRITE(sc, WMREG_CTRL,
3556 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3557 CSR_WRITE_FLUSH(sc);
3558 delay(5000);
3559 }
3560
3561 switch (sc->sc_type) {
3562 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3563 case WM_T_82541:
3564 case WM_T_82541_2:
3565 case WM_T_82547:
3566 case WM_T_82547_2:
3567 /*
3568 * On some chipsets, a reset through a memory-mapped write
3569 * cycle can cause the chip to reset before completing the
3570 * write cycle. This causes major headache that can be
3571 * avoided by issuing the reset via indirect register writes
3572 * through I/O space.
3573 *
3574 * So, if we successfully mapped the I/O BAR at attach time,
3575 * use that. Otherwise, try our luck with a memory-mapped
3576 * reset.
3577 */
3578 if (sc->sc_flags & WM_F_IOH_VALID)
3579 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3580 else
3581 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3582 break;
3583 case WM_T_82545_3:
3584 case WM_T_82546_3:
3585 /* Use the shadow control register on these chips. */
3586 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3587 break;
3588 case WM_T_80003:
3589 mask = swfwphysem[sc->sc_funcid];
3590 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3591 wm_get_swfw_semaphore(sc, mask);
3592 CSR_WRITE(sc, WMREG_CTRL, reg);
3593 wm_put_swfw_semaphore(sc, mask);
3594 break;
3595 case WM_T_ICH8:
3596 case WM_T_ICH9:
3597 case WM_T_ICH10:
3598 case WM_T_PCH:
3599 case WM_T_PCH2:
3600 case WM_T_PCH_LPT:
3601 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3602 if (wm_check_reset_block(sc) == 0) {
3603 /*
3604 * Gate automatic PHY configuration by hardware on
3605 * non-managed 82579
3606 */
3607 if ((sc->sc_type == WM_T_PCH2)
3608 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3609 != 0))
3610 wm_gate_hw_phy_config_ich8lan(sc, 1);
3611
3612
3613 reg |= CTRL_PHY_RESET;
3614 phy_reset = 1;
3615 }
3616 wm_get_swfwhw_semaphore(sc);
3617 CSR_WRITE(sc, WMREG_CTRL, reg);
3618 /* Don't insert a completion barrier when reset */
3619 delay(20*1000);
3620 wm_put_swfwhw_semaphore(sc);
3621 break;
3622 case WM_T_82580:
3623 case WM_T_I350:
3624 case WM_T_I354:
3625 case WM_T_I210:
3626 case WM_T_I211:
3627 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3628 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3629 CSR_WRITE_FLUSH(sc);
3630 delay(5000);
3631 break;
3632 case WM_T_82542_2_0:
3633 case WM_T_82542_2_1:
3634 case WM_T_82543:
3635 case WM_T_82540:
3636 case WM_T_82545:
3637 case WM_T_82546:
3638 case WM_T_82571:
3639 case WM_T_82572:
3640 case WM_T_82573:
3641 case WM_T_82574:
3642 case WM_T_82575:
3643 case WM_T_82576:
3644 case WM_T_82583:
3645 default:
3646 /* Everything else can safely use the documented method. */
3647 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3648 break;
3649 }
3650
3651 /* Must release the MDIO ownership after MAC reset */
3652 switch (sc->sc_type) {
3653 case WM_T_82573:
3654 case WM_T_82574:
3655 case WM_T_82583:
3656 if (error == 0)
3657 wm_put_hw_semaphore_82573(sc);
3658 break;
3659 default:
3660 break;
3661 }
3662
3663 if (phy_reset != 0)
3664 wm_get_cfg_done(sc);
3665
3666 /* reload EEPROM */
3667 switch (sc->sc_type) {
3668 case WM_T_82542_2_0:
3669 case WM_T_82542_2_1:
3670 case WM_T_82543:
3671 case WM_T_82544:
3672 delay(10);
3673 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3674 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3675 CSR_WRITE_FLUSH(sc);
3676 delay(2000);
3677 break;
3678 case WM_T_82540:
3679 case WM_T_82545:
3680 case WM_T_82545_3:
3681 case WM_T_82546:
3682 case WM_T_82546_3:
3683 delay(5*1000);
3684 /* XXX Disable HW ARPs on ASF enabled adapters */
3685 break;
3686 case WM_T_82541:
3687 case WM_T_82541_2:
3688 case WM_T_82547:
3689 case WM_T_82547_2:
3690 delay(20000);
3691 /* XXX Disable HW ARPs on ASF enabled adapters */
3692 break;
3693 case WM_T_82571:
3694 case WM_T_82572:
3695 case WM_T_82573:
3696 case WM_T_82574:
3697 case WM_T_82583:
3698 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3699 delay(10);
3700 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3701 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3702 CSR_WRITE_FLUSH(sc);
3703 }
3704 /* check EECD_EE_AUTORD */
3705 wm_get_auto_rd_done(sc);
3706 /*
3707 * Phy configuration from NVM just starts after EECD_AUTO_RD
3708 * is set.
3709 */
3710 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3711 || (sc->sc_type == WM_T_82583))
3712 delay(25*1000);
3713 break;
3714 case WM_T_82575:
3715 case WM_T_82576:
3716 case WM_T_82580:
3717 case WM_T_I350:
3718 case WM_T_I354:
3719 case WM_T_I210:
3720 case WM_T_I211:
3721 case WM_T_80003:
3722 /* check EECD_EE_AUTORD */
3723 wm_get_auto_rd_done(sc);
3724 break;
3725 case WM_T_ICH8:
3726 case WM_T_ICH9:
3727 case WM_T_ICH10:
3728 case WM_T_PCH:
3729 case WM_T_PCH2:
3730 case WM_T_PCH_LPT:
3731 break;
3732 default:
3733 panic("%s: unknown type\n", __func__);
3734 }
3735
3736 /* Check whether EEPROM is present or not */
3737 switch (sc->sc_type) {
3738 case WM_T_82575:
3739 case WM_T_82576:
3740 #if 0 /* XXX */
3741 case WM_T_82580:
3742 #endif
3743 case WM_T_I350:
3744 case WM_T_I354:
3745 case WM_T_ICH8:
3746 case WM_T_ICH9:
3747 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3748 /* Not found */
3749 sc->sc_flags |= WM_F_EEPROM_INVALID;
3750 if ((sc->sc_type == WM_T_82575)
3751 || (sc->sc_type == WM_T_82576)
3752 || (sc->sc_type == WM_T_82580)
3753 || (sc->sc_type == WM_T_I350)
3754 || (sc->sc_type == WM_T_I354))
3755 wm_reset_init_script_82575(sc);
3756 }
3757 break;
3758 default:
3759 break;
3760 }
3761
3762 if ((sc->sc_type == WM_T_82580)
3763 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
3764 /* clear global device reset status bit */
3765 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3766 }
3767
3768 /* Clear any pending interrupt events. */
3769 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3770 reg = CSR_READ(sc, WMREG_ICR);
3771
3772 /* reload sc_ctrl */
3773 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3774
3775 if (sc->sc_type == WM_T_I350)
3776 wm_set_eee_i350(sc);
3777
3778 /* dummy read from WUC */
3779 if (sc->sc_type == WM_T_PCH)
3780 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3781 /*
3782 * For PCH, this write will make sure that any noise will be detected
3783 * as a CRC error and be dropped rather than show up as a bad packet
3784 * to the DMA engine
3785 */
3786 if (sc->sc_type == WM_T_PCH)
3787 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3788
3789 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3790 CSR_WRITE(sc, WMREG_WUC, 0);
3791
3792 /* XXX need special handling for 82580 */
3793 }
3794
3795 /*
3796 * wm_add_rxbuf:
3797 *
3798 * Add a receive buffer to the indiciated descriptor.
3799 */
3800 static int
3801 wm_add_rxbuf(struct wm_softc *sc, int idx)
3802 {
3803 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3804 struct mbuf *m;
3805 int error;
3806
3807 KASSERT(WM_RX_LOCKED(sc));
3808
3809 MGETHDR(m, M_DONTWAIT, MT_DATA);
3810 if (m == NULL)
3811 return ENOBUFS;
3812
3813 MCLGET(m, M_DONTWAIT);
3814 if ((m->m_flags & M_EXT) == 0) {
3815 m_freem(m);
3816 return ENOBUFS;
3817 }
3818
3819 if (rxs->rxs_mbuf != NULL)
3820 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3821
3822 rxs->rxs_mbuf = m;
3823
3824 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3825 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3826 BUS_DMA_READ|BUS_DMA_NOWAIT);
3827 if (error) {
3828 /* XXX XXX XXX */
3829 aprint_error_dev(sc->sc_dev,
3830 "unable to load rx DMA map %d, error = %d\n",
3831 idx, error);
3832 panic("wm_add_rxbuf");
3833 }
3834
3835 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3836 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3837
3838 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3839 if ((sc->sc_rctl & RCTL_EN) != 0)
3840 WM_INIT_RXDESC(sc, idx);
3841 } else
3842 WM_INIT_RXDESC(sc, idx);
3843
3844 return 0;
3845 }
3846
3847 /*
3848 * wm_rxdrain:
3849 *
3850 * Drain the receive queue.
3851 */
3852 static void
3853 wm_rxdrain(struct wm_softc *sc)
3854 {
3855 struct wm_rxsoft *rxs;
3856 int i;
3857
3858 KASSERT(WM_RX_LOCKED(sc));
3859
3860 for (i = 0; i < WM_NRXDESC; i++) {
3861 rxs = &sc->sc_rxsoft[i];
3862 if (rxs->rxs_mbuf != NULL) {
3863 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3864 m_freem(rxs->rxs_mbuf);
3865 rxs->rxs_mbuf = NULL;
3866 }
3867 }
3868 }
3869
3870 /*
3871 * wm_init: [ifnet interface function]
3872 *
3873 * Initialize the interface.
3874 */
3875 static int
3876 wm_init(struct ifnet *ifp)
3877 {
3878 struct wm_softc *sc = ifp->if_softc;
3879 int ret;
3880
3881 WM_BOTH_LOCK(sc);
3882 ret = wm_init_locked(ifp);
3883 WM_BOTH_UNLOCK(sc);
3884
3885 return ret;
3886 }
3887
3888 static int
3889 wm_init_locked(struct ifnet *ifp)
3890 {
3891 struct wm_softc *sc = ifp->if_softc;
3892 struct wm_rxsoft *rxs;
3893 int i, j, trynum, error = 0;
3894 uint32_t reg;
3895
3896 KASSERT(WM_BOTH_LOCKED(sc));
3897 /*
3898 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3899 * There is a small but measurable benefit to avoiding the adjusment
3900 * of the descriptor so that the headers are aligned, for normal mtu,
3901 * on such platforms. One possibility is that the DMA itself is
3902 * slightly more efficient if the front of the entire packet (instead
3903 * of the front of the headers) is aligned.
3904 *
3905 * Note we must always set align_tweak to 0 if we are using
3906 * jumbo frames.
3907 */
3908 #ifdef __NO_STRICT_ALIGNMENT
3909 sc->sc_align_tweak = 0;
3910 #else
3911 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3912 sc->sc_align_tweak = 0;
3913 else
3914 sc->sc_align_tweak = 2;
3915 #endif /* __NO_STRICT_ALIGNMENT */
3916
3917 /* Cancel any pending I/O. */
3918 wm_stop_locked(ifp, 0);
3919
3920 /* update statistics before reset */
3921 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3922 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3923
3924 /* Reset the chip to a known state. */
3925 wm_reset(sc);
3926
3927 switch (sc->sc_type) {
3928 case WM_T_82571:
3929 case WM_T_82572:
3930 case WM_T_82573:
3931 case WM_T_82574:
3932 case WM_T_82583:
3933 case WM_T_80003:
3934 case WM_T_ICH8:
3935 case WM_T_ICH9:
3936 case WM_T_ICH10:
3937 case WM_T_PCH:
3938 case WM_T_PCH2:
3939 case WM_T_PCH_LPT:
3940 if (wm_check_mng_mode(sc) != 0)
3941 wm_get_hw_control(sc);
3942 break;
3943 default:
3944 break;
3945 }
3946
3947 /* Init hardware bits */
3948 wm_initialize_hardware_bits(sc);
3949
3950 /* Reset the PHY. */
3951 if (sc->sc_flags & WM_F_HAS_MII)
3952 wm_gmii_reset(sc);
3953
3954 /* Initialize the transmit descriptor ring. */
3955 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3956 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3957 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3958 sc->sc_txfree = WM_NTXDESC(sc);
3959 sc->sc_txnext = 0;
3960
3961 if (sc->sc_type < WM_T_82543) {
3962 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
3963 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
3964 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3965 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3966 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3967 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3968 } else {
3969 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
3970 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
3971 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3972 CSR_WRITE(sc, WMREG_TDH, 0);
3973 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3974 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3975
3976 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3977 /*
3978 * Don't write TDT before TCTL.EN is set.
3979 * See the document.
3980 */
3981 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_QUEUE_ENABLE
3982 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
3983 | TXDCTL_WTHRESH(0));
3984 else {
3985 CSR_WRITE(sc, WMREG_TDT, 0);
3986 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_PTHRESH(0) |
3987 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3988 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3989 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3990 }
3991 }
3992
3993 /* Initialize the transmit job descriptors. */
3994 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3995 sc->sc_txsoft[i].txs_mbuf = NULL;
3996 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3997 sc->sc_txsnext = 0;
3998 sc->sc_txsdirty = 0;
3999
4000 /*
4001 * Initialize the receive descriptor and receive job
4002 * descriptor rings.
4003 */
4004 if (sc->sc_type < WM_T_82543) {
4005 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4006 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4007 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4008 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4009 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4010 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4011
4012 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4013 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4014 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4015 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4016 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4017 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4018 } else {
4019 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4020 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4021 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4022 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4023 CSR_WRITE(sc, WMREG_EITR(0), 450);
4024 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4025 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4026 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4027 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4028 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4029 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4030 | RXDCTL_WTHRESH(1));
4031 } else {
4032 CSR_WRITE(sc, WMREG_RDH, 0);
4033 CSR_WRITE(sc, WMREG_RDT, 0);
4034 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
4035 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
4036 }
4037 }
4038 for (i = 0; i < WM_NRXDESC; i++) {
4039 rxs = &sc->sc_rxsoft[i];
4040 if (rxs->rxs_mbuf == NULL) {
4041 if ((error = wm_add_rxbuf(sc, i)) != 0) {
4042 log(LOG_ERR, "%s: unable to allocate or map "
4043 "rx buffer %d, error = %d\n",
4044 device_xname(sc->sc_dev), i, error);
4045 /*
4046 * XXX Should attempt to run with fewer receive
4047 * XXX buffers instead of just failing.
4048 */
4049 wm_rxdrain(sc);
4050 goto out;
4051 }
4052 } else {
4053 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4054 WM_INIT_RXDESC(sc, i);
4055 /*
4056 * For 82575 and newer device, the RX descriptors
4057 * must be initialized after the setting of RCTL.EN in
4058 * wm_set_filter()
4059 */
4060 }
4061 }
4062 sc->sc_rxptr = 0;
4063 sc->sc_rxdiscard = 0;
4064 WM_RXCHAIN_RESET(sc);
4065
4066 /*
4067 * Clear out the VLAN table -- we don't use it (yet).
4068 */
4069 CSR_WRITE(sc, WMREG_VET, 0);
4070 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4071 trynum = 10; /* Due to hw errata */
4072 else
4073 trynum = 1;
4074 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4075 for (j = 0; j < trynum; j++)
4076 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4077
4078 /*
4079 * Set up flow-control parameters.
4080 *
4081 * XXX Values could probably stand some tuning.
4082 */
4083 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4084 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4085 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
4086 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4087 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4088 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4089 }
4090
4091 sc->sc_fcrtl = FCRTL_DFLT;
4092 if (sc->sc_type < WM_T_82543) {
4093 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4094 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4095 } else {
4096 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4097 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4098 }
4099
4100 if (sc->sc_type == WM_T_80003)
4101 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4102 else
4103 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4104
4105 /* Writes the control register. */
4106 wm_set_vlan(sc);
4107
4108 if (sc->sc_flags & WM_F_HAS_MII) {
4109 int val;
4110
4111 switch (sc->sc_type) {
4112 case WM_T_80003:
4113 case WM_T_ICH8:
4114 case WM_T_ICH9:
4115 case WM_T_ICH10:
4116 case WM_T_PCH:
4117 case WM_T_PCH2:
4118 case WM_T_PCH_LPT:
4119 /*
4120 * Set the mac to wait the maximum time between each
4121 * iteration and increase the max iterations when
4122 * polling the phy; this fixes erroneous timeouts at
4123 * 10Mbps.
4124 */
4125 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4126 0xFFFF);
4127 val = wm_kmrn_readreg(sc,
4128 KUMCTRLSTA_OFFSET_INB_PARAM);
4129 val |= 0x3F;
4130 wm_kmrn_writereg(sc,
4131 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4132 break;
4133 default:
4134 break;
4135 }
4136
4137 if (sc->sc_type == WM_T_80003) {
4138 val = CSR_READ(sc, WMREG_CTRL_EXT);
4139 val &= ~CTRL_EXT_LINK_MODE_MASK;
4140 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4141
4142 /* Bypass RX and TX FIFO's */
4143 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4144 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4145 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4146 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4147 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4148 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4149 }
4150 }
4151 #if 0
4152 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4153 #endif
4154
4155 /* Set up checksum offload parameters. */
4156 reg = CSR_READ(sc, WMREG_RXCSUM);
4157 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4158 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4159 reg |= RXCSUM_IPOFL;
4160 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4161 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4162 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4163 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4164 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4165
4166 /* Set up the interrupt registers. */
4167 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4168 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4169 ICR_RXO | ICR_RXT0;
4170 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4171
4172 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4173 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4174 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4175 reg = CSR_READ(sc, WMREG_KABGTXD);
4176 reg |= KABGTXD_BGSQLBIAS;
4177 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4178 }
4179
4180 /* Set up the inter-packet gap. */
4181 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4182
4183 if (sc->sc_type >= WM_T_82543) {
4184 /*
4185 * Set up the interrupt throttling register (units of 256ns)
4186 * Note that a footnote in Intel's documentation says this
4187 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4188 * or 10Mbit mode. Empirically, it appears to be the case
4189 * that that is also true for the 1024ns units of the other
4190 * interrupt-related timer registers -- so, really, we ought
4191 * to divide this value by 4 when the link speed is low.
4192 *
4193 * XXX implement this division at link speed change!
4194 */
4195
4196 /*
4197 * For N interrupts/sec, set this value to:
4198 * 1000000000 / (N * 256). Note that we set the
4199 * absolute and packet timer values to this value
4200 * divided by 4 to get "simple timer" behavior.
4201 */
4202
4203 sc->sc_itr = 1500; /* 2604 ints/sec */
4204 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4205 }
4206
4207 /* Set the VLAN ethernetype. */
4208 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4209
4210 /*
4211 * Set up the transmit control register; we start out with
4212 * a collision distance suitable for FDX, but update it whe
4213 * we resolve the media type.
4214 */
4215 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4216 | TCTL_CT(TX_COLLISION_THRESHOLD)
4217 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4218 if (sc->sc_type >= WM_T_82571)
4219 sc->sc_tctl |= TCTL_MULR;
4220 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4221
4222 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4223 /* Write TDT after TCTL.EN is set. See the document. */
4224 CSR_WRITE(sc, WMREG_TDT, 0);
4225 }
4226
4227 if (sc->sc_type == WM_T_80003) {
4228 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4229 reg &= ~TCTL_EXT_GCEX_MASK;
4230 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4231 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4232 }
4233
4234 /* Set the media. */
4235 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4236 goto out;
4237
4238 /* Configure for OS presence */
4239 wm_init_manageability(sc);
4240
4241 /*
4242 * Set up the receive control register; we actually program
4243 * the register when we set the receive filter. Use multicast
4244 * address offset type 0.
4245 *
4246 * Only the i82544 has the ability to strip the incoming
4247 * CRC, so we don't enable that feature.
4248 */
4249 sc->sc_mchash_type = 0;
4250 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4251 | RCTL_MO(sc->sc_mchash_type);
4252
4253 /*
4254 * The I350 has a bug where it always strips the CRC whether
4255 * asked to or not. So ask for stripped CRC here and cope in rxeof
4256 */
4257 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4258 || (sc->sc_type == WM_T_I210))
4259 sc->sc_rctl |= RCTL_SECRC;
4260
4261 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4262 && (ifp->if_mtu > ETHERMTU)) {
4263 sc->sc_rctl |= RCTL_LPE;
4264 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4265 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4266 }
4267
4268 if (MCLBYTES == 2048) {
4269 sc->sc_rctl |= RCTL_2k;
4270 } else {
4271 if (sc->sc_type >= WM_T_82543) {
4272 switch (MCLBYTES) {
4273 case 4096:
4274 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4275 break;
4276 case 8192:
4277 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4278 break;
4279 case 16384:
4280 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4281 break;
4282 default:
4283 panic("wm_init: MCLBYTES %d unsupported",
4284 MCLBYTES);
4285 break;
4286 }
4287 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4288 }
4289
4290 /* Set the receive filter. */
4291 wm_set_filter(sc);
4292
4293 /* Enable ECC */
4294 switch (sc->sc_type) {
4295 case WM_T_82571:
4296 reg = CSR_READ(sc, WMREG_PBA_ECC);
4297 reg |= PBA_ECC_CORR_EN;
4298 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4299 break;
4300 case WM_T_PCH_LPT:
4301 reg = CSR_READ(sc, WMREG_PBECCSTS);
4302 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4303 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4304
4305 reg = CSR_READ(sc, WMREG_CTRL);
4306 reg |= CTRL_MEHE;
4307 CSR_WRITE(sc, WMREG_CTRL, reg);
4308 break;
4309 default:
4310 break;
4311 }
4312
4313 /* On 575 and later set RDT only if RX enabled */
4314 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4315 for (i = 0; i < WM_NRXDESC; i++)
4316 WM_INIT_RXDESC(sc, i);
4317
4318 sc->sc_stopping = false;
4319
4320 /* Start the one second link check clock. */
4321 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4322
4323 /* ...all done! */
4324 ifp->if_flags |= IFF_RUNNING;
4325 ifp->if_flags &= ~IFF_OACTIVE;
4326
4327 out:
4328 sc->sc_if_flags = ifp->if_flags;
4329 if (error)
4330 log(LOG_ERR, "%s: interface not running\n",
4331 device_xname(sc->sc_dev));
4332 return error;
4333 }
4334
4335 /*
4336 * wm_stop: [ifnet interface function]
4337 *
4338 * Stop transmission on the interface.
4339 */
4340 static void
4341 wm_stop(struct ifnet *ifp, int disable)
4342 {
4343 struct wm_softc *sc = ifp->if_softc;
4344
4345 WM_BOTH_LOCK(sc);
4346 wm_stop_locked(ifp, disable);
4347 WM_BOTH_UNLOCK(sc);
4348 }
4349
4350 static void
4351 wm_stop_locked(struct ifnet *ifp, int disable)
4352 {
4353 struct wm_softc *sc = ifp->if_softc;
4354 struct wm_txsoft *txs;
4355 int i;
4356
4357 KASSERT(WM_BOTH_LOCKED(sc));
4358
4359 sc->sc_stopping = true;
4360
4361 /* Stop the one second clock. */
4362 callout_stop(&sc->sc_tick_ch);
4363
4364 /* Stop the 82547 Tx FIFO stall check timer. */
4365 if (sc->sc_type == WM_T_82547)
4366 callout_stop(&sc->sc_txfifo_ch);
4367
4368 if (sc->sc_flags & WM_F_HAS_MII) {
4369 /* Down the MII. */
4370 mii_down(&sc->sc_mii);
4371 } else {
4372 #if 0
4373 /* Should we clear PHY's status properly? */
4374 wm_reset(sc);
4375 #endif
4376 }
4377
4378 /* Stop the transmit and receive processes. */
4379 CSR_WRITE(sc, WMREG_TCTL, 0);
4380 CSR_WRITE(sc, WMREG_RCTL, 0);
4381 sc->sc_rctl &= ~RCTL_EN;
4382
4383 /*
4384 * Clear the interrupt mask to ensure the device cannot assert its
4385 * interrupt line.
4386 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4387 * any currently pending or shared interrupt.
4388 */
4389 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4390 sc->sc_icr = 0;
4391
4392 /* Release any queued transmit buffers. */
4393 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4394 txs = &sc->sc_txsoft[i];
4395 if (txs->txs_mbuf != NULL) {
4396 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4397 m_freem(txs->txs_mbuf);
4398 txs->txs_mbuf = NULL;
4399 }
4400 }
4401
4402 /* Mark the interface as down and cancel the watchdog timer. */
4403 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4404 ifp->if_timer = 0;
4405
4406 if (disable)
4407 wm_rxdrain(sc);
4408
4409 #if 0 /* notyet */
4410 if (sc->sc_type >= WM_T_82544)
4411 CSR_WRITE(sc, WMREG_WUC, 0);
4412 #endif
4413 }
4414
4415 /*
4416 * wm_tx_offload:
4417 *
4418 * Set up TCP/IP checksumming parameters for the
4419 * specified packet.
4420 */
4421 static int
4422 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
4423 uint8_t *fieldsp)
4424 {
4425 struct mbuf *m0 = txs->txs_mbuf;
4426 struct livengood_tcpip_ctxdesc *t;
4427 uint32_t ipcs, tucs, cmd, cmdlen, seg;
4428 uint32_t ipcse;
4429 struct ether_header *eh;
4430 int offset, iphl;
4431 uint8_t fields;
4432
4433 /*
4434 * XXX It would be nice if the mbuf pkthdr had offset
4435 * fields for the protocol headers.
4436 */
4437
4438 eh = mtod(m0, struct ether_header *);
4439 switch (htons(eh->ether_type)) {
4440 case ETHERTYPE_IP:
4441 case ETHERTYPE_IPV6:
4442 offset = ETHER_HDR_LEN;
4443 break;
4444
4445 case ETHERTYPE_VLAN:
4446 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4447 break;
4448
4449 default:
4450 /*
4451 * Don't support this protocol or encapsulation.
4452 */
4453 *fieldsp = 0;
4454 *cmdp = 0;
4455 return 0;
4456 }
4457
4458 if ((m0->m_pkthdr.csum_flags &
4459 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
4460 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4461 } else {
4462 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4463 }
4464 ipcse = offset + iphl - 1;
4465
4466 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
4467 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
4468 seg = 0;
4469 fields = 0;
4470
4471 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4472 int hlen = offset + iphl;
4473 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4474
4475 if (__predict_false(m0->m_len <
4476 (hlen + sizeof(struct tcphdr)))) {
4477 /*
4478 * TCP/IP headers are not in the first mbuf; we need
4479 * to do this the slow and painful way. Let's just
4480 * hope this doesn't happen very often.
4481 */
4482 struct tcphdr th;
4483
4484 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4485
4486 m_copydata(m0, hlen, sizeof(th), &th);
4487 if (v4) {
4488 struct ip ip;
4489
4490 m_copydata(m0, offset, sizeof(ip), &ip);
4491 ip.ip_len = 0;
4492 m_copyback(m0,
4493 offset + offsetof(struct ip, ip_len),
4494 sizeof(ip.ip_len), &ip.ip_len);
4495 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4496 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4497 } else {
4498 struct ip6_hdr ip6;
4499
4500 m_copydata(m0, offset, sizeof(ip6), &ip6);
4501 ip6.ip6_plen = 0;
4502 m_copyback(m0,
4503 offset + offsetof(struct ip6_hdr, ip6_plen),
4504 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4505 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4506 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4507 }
4508 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4509 sizeof(th.th_sum), &th.th_sum);
4510
4511 hlen += th.th_off << 2;
4512 } else {
4513 /*
4514 * TCP/IP headers are in the first mbuf; we can do
4515 * this the easy way.
4516 */
4517 struct tcphdr *th;
4518
4519 if (v4) {
4520 struct ip *ip =
4521 (void *)(mtod(m0, char *) + offset);
4522 th = (void *)(mtod(m0, char *) + hlen);
4523
4524 ip->ip_len = 0;
4525 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4526 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4527 } else {
4528 struct ip6_hdr *ip6 =
4529 (void *)(mtod(m0, char *) + offset);
4530 th = (void *)(mtod(m0, char *) + hlen);
4531
4532 ip6->ip6_plen = 0;
4533 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4534 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4535 }
4536 hlen += th->th_off << 2;
4537 }
4538
4539 if (v4) {
4540 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4541 cmdlen |= WTX_TCPIP_CMD_IP;
4542 } else {
4543 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4544 ipcse = 0;
4545 }
4546 cmd |= WTX_TCPIP_CMD_TSE;
4547 cmdlen |= WTX_TCPIP_CMD_TSE |
4548 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
4549 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
4550 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
4551 }
4552
4553 /*
4554 * NOTE: Even if we're not using the IP or TCP/UDP checksum
4555 * offload feature, if we load the context descriptor, we
4556 * MUST provide valid values for IPCSS and TUCSS fields.
4557 */
4558
4559 ipcs = WTX_TCPIP_IPCSS(offset) |
4560 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
4561 WTX_TCPIP_IPCSE(ipcse);
4562 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
4563 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
4564 fields |= WTX_IXSM;
4565 }
4566
4567 offset += iphl;
4568
4569 if (m0->m_pkthdr.csum_flags &
4570 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
4571 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
4572 fields |= WTX_TXSM;
4573 tucs = WTX_TCPIP_TUCSS(offset) |
4574 WTX_TCPIP_TUCSO(offset +
4575 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
4576 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4577 } else if ((m0->m_pkthdr.csum_flags &
4578 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
4579 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
4580 fields |= WTX_TXSM;
4581 tucs = WTX_TCPIP_TUCSS(offset) |
4582 WTX_TCPIP_TUCSO(offset +
4583 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
4584 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4585 } else {
4586 /* Just initialize it to a valid TCP context. */
4587 tucs = WTX_TCPIP_TUCSS(offset) |
4588 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
4589 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4590 }
4591
4592 /* Fill in the context descriptor. */
4593 t = (struct livengood_tcpip_ctxdesc *)
4594 &sc->sc_txdescs[sc->sc_txnext];
4595 t->tcpip_ipcs = htole32(ipcs);
4596 t->tcpip_tucs = htole32(tucs);
4597 t->tcpip_cmdlen = htole32(cmdlen);
4598 t->tcpip_seg = htole32(seg);
4599 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
4600
4601 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
4602 txs->txs_ndesc++;
4603
4604 *cmdp = cmd;
4605 *fieldsp = fields;
4606
4607 return 0;
4608 }
4609
4610 static void
4611 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
4612 {
4613 struct mbuf *m;
4614 int i;
4615
4616 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
4617 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
4618 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
4619 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
4620 m->m_data, m->m_len, m->m_flags);
4621 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
4622 i, i == 1 ? "" : "s");
4623 }
4624
4625 /*
4626 * wm_82547_txfifo_stall:
4627 *
4628 * Callout used to wait for the 82547 Tx FIFO to drain,
4629 * reset the FIFO pointers, and restart packet transmission.
4630 */
4631 static void
4632 wm_82547_txfifo_stall(void *arg)
4633 {
4634 struct wm_softc *sc = arg;
4635 #ifndef WM_MPSAFE
4636 int s;
4637
4638 s = splnet();
4639 #endif
4640 WM_TX_LOCK(sc);
4641
4642 if (sc->sc_stopping)
4643 goto out;
4644
4645 if (sc->sc_txfifo_stall) {
4646 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
4647 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
4648 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
4649 /*
4650 * Packets have drained. Stop transmitter, reset
4651 * FIFO pointers, restart transmitter, and kick
4652 * the packet queue.
4653 */
4654 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
4655 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
4656 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
4657 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
4658 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
4659 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
4660 CSR_WRITE(sc, WMREG_TCTL, tctl);
4661 CSR_WRITE_FLUSH(sc);
4662
4663 sc->sc_txfifo_head = 0;
4664 sc->sc_txfifo_stall = 0;
4665 wm_start_locked(&sc->sc_ethercom.ec_if);
4666 } else {
4667 /*
4668 * Still waiting for packets to drain; try again in
4669 * another tick.
4670 */
4671 callout_schedule(&sc->sc_txfifo_ch, 1);
4672 }
4673 }
4674
4675 out:
4676 WM_TX_UNLOCK(sc);
4677 #ifndef WM_MPSAFE
4678 splx(s);
4679 #endif
4680 }
4681
4682 /*
4683 * wm_82547_txfifo_bugchk:
4684 *
4685 * Check for bug condition in the 82547 Tx FIFO. We need to
4686 * prevent enqueueing a packet that would wrap around the end
4687 * if the Tx FIFO ring buffer, otherwise the chip will croak.
4688 *
4689 * We do this by checking the amount of space before the end
4690 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
4691 * the Tx FIFO, wait for all remaining packets to drain, reset
4692 * the internal FIFO pointers to the beginning, and restart
4693 * transmission on the interface.
4694 */
4695 #define WM_FIFO_HDR 0x10
4696 #define WM_82547_PAD_LEN 0x3e0
4697 static int
4698 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
4699 {
4700 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
4701 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
4702
4703 /* Just return if already stalled. */
4704 if (sc->sc_txfifo_stall)
4705 return 1;
4706
4707 if (sc->sc_mii.mii_media_active & IFM_FDX) {
4708 /* Stall only occurs in half-duplex mode. */
4709 goto send_packet;
4710 }
4711
4712 if (len >= WM_82547_PAD_LEN + space) {
4713 sc->sc_txfifo_stall = 1;
4714 callout_schedule(&sc->sc_txfifo_ch, 1);
4715 return 1;
4716 }
4717
4718 send_packet:
4719 sc->sc_txfifo_head += len;
4720 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
4721 sc->sc_txfifo_head -= sc->sc_txfifo_size;
4722
4723 return 0;
4724 }
4725
4726 /*
4727 * wm_start: [ifnet interface function]
4728 *
4729 * Start packet transmission on the interface.
4730 */
4731 static void
4732 wm_start(struct ifnet *ifp)
4733 {
4734 struct wm_softc *sc = ifp->if_softc;
4735
4736 WM_TX_LOCK(sc);
4737 if (!sc->sc_stopping)
4738 wm_start_locked(ifp);
4739 WM_TX_UNLOCK(sc);
4740 }
4741
4742 static void
4743 wm_start_locked(struct ifnet *ifp)
4744 {
4745 struct wm_softc *sc = ifp->if_softc;
4746 struct mbuf *m0;
4747 struct m_tag *mtag;
4748 struct wm_txsoft *txs;
4749 bus_dmamap_t dmamap;
4750 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
4751 bus_addr_t curaddr;
4752 bus_size_t seglen, curlen;
4753 uint32_t cksumcmd;
4754 uint8_t cksumfields;
4755
4756 KASSERT(WM_TX_LOCKED(sc));
4757
4758 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
4759 return;
4760
4761 /* Remember the previous number of free descriptors. */
4762 ofree = sc->sc_txfree;
4763
4764 /*
4765 * Loop through the send queue, setting up transmit descriptors
4766 * until we drain the queue, or use up all available transmit
4767 * descriptors.
4768 */
4769 for (;;) {
4770 m0 = NULL;
4771
4772 /* Get a work queue entry. */
4773 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
4774 wm_txintr(sc);
4775 if (sc->sc_txsfree == 0) {
4776 DPRINTF(WM_DEBUG_TX,
4777 ("%s: TX: no free job descriptors\n",
4778 device_xname(sc->sc_dev)));
4779 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
4780 break;
4781 }
4782 }
4783
4784 /* Grab a packet off the queue. */
4785 IFQ_DEQUEUE(&ifp->if_snd, m0);
4786 if (m0 == NULL)
4787 break;
4788
4789 DPRINTF(WM_DEBUG_TX,
4790 ("%s: TX: have packet to transmit: %p\n",
4791 device_xname(sc->sc_dev), m0));
4792
4793 txs = &sc->sc_txsoft[sc->sc_txsnext];
4794 dmamap = txs->txs_dmamap;
4795
4796 use_tso = (m0->m_pkthdr.csum_flags &
4797 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
4798
4799 /*
4800 * So says the Linux driver:
4801 * The controller does a simple calculation to make sure
4802 * there is enough room in the FIFO before initiating the
4803 * DMA for each buffer. The calc is:
4804 * 4 = ceil(buffer len / MSS)
4805 * To make sure we don't overrun the FIFO, adjust the max
4806 * buffer len if the MSS drops.
4807 */
4808 dmamap->dm_maxsegsz =
4809 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
4810 ? m0->m_pkthdr.segsz << 2
4811 : WTX_MAX_LEN;
4812
4813 /*
4814 * Load the DMA map. If this fails, the packet either
4815 * didn't fit in the allotted number of segments, or we
4816 * were short on resources. For the too-many-segments
4817 * case, we simply report an error and drop the packet,
4818 * since we can't sanely copy a jumbo packet to a single
4819 * buffer.
4820 */
4821 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
4822 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
4823 if (error) {
4824 if (error == EFBIG) {
4825 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4826 log(LOG_ERR, "%s: Tx packet consumes too many "
4827 "DMA segments, dropping...\n",
4828 device_xname(sc->sc_dev));
4829 wm_dump_mbuf_chain(sc, m0);
4830 m_freem(m0);
4831 continue;
4832 }
4833 /* Short on resources, just stop for now. */
4834 DPRINTF(WM_DEBUG_TX,
4835 ("%s: TX: dmamap load failed: %d\n",
4836 device_xname(sc->sc_dev), error));
4837 break;
4838 }
4839
4840 segs_needed = dmamap->dm_nsegs;
4841 if (use_tso) {
4842 /* For sentinel descriptor; see below. */
4843 segs_needed++;
4844 }
4845
4846 /*
4847 * Ensure we have enough descriptors free to describe
4848 * the packet. Note, we always reserve one descriptor
4849 * at the end of the ring due to the semantics of the
4850 * TDT register, plus one more in the event we need
4851 * to load offload context.
4852 */
4853 if (segs_needed > sc->sc_txfree - 2) {
4854 /*
4855 * Not enough free descriptors to transmit this
4856 * packet. We haven't committed anything yet,
4857 * so just unload the DMA map, put the packet
4858 * pack on the queue, and punt. Notify the upper
4859 * layer that there are no more slots left.
4860 */
4861 DPRINTF(WM_DEBUG_TX,
4862 ("%s: TX: need %d (%d) descriptors, have %d\n",
4863 device_xname(sc->sc_dev), dmamap->dm_nsegs,
4864 segs_needed, sc->sc_txfree - 1));
4865 ifp->if_flags |= IFF_OACTIVE;
4866 bus_dmamap_unload(sc->sc_dmat, dmamap);
4867 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
4868 break;
4869 }
4870
4871 /*
4872 * Check for 82547 Tx FIFO bug. We need to do this
4873 * once we know we can transmit the packet, since we
4874 * do some internal FIFO space accounting here.
4875 */
4876 if (sc->sc_type == WM_T_82547 &&
4877 wm_82547_txfifo_bugchk(sc, m0)) {
4878 DPRINTF(WM_DEBUG_TX,
4879 ("%s: TX: 82547 Tx FIFO bug detected\n",
4880 device_xname(sc->sc_dev)));
4881 ifp->if_flags |= IFF_OACTIVE;
4882 bus_dmamap_unload(sc->sc_dmat, dmamap);
4883 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
4884 break;
4885 }
4886
4887 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
4888
4889 DPRINTF(WM_DEBUG_TX,
4890 ("%s: TX: packet has %d (%d) DMA segments\n",
4891 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
4892
4893 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
4894
4895 /*
4896 * Store a pointer to the packet so that we can free it
4897 * later.
4898 *
4899 * Initially, we consider the number of descriptors the
4900 * packet uses the number of DMA segments. This may be
4901 * incremented by 1 if we do checksum offload (a descriptor
4902 * is used to set the checksum context).
4903 */
4904 txs->txs_mbuf = m0;
4905 txs->txs_firstdesc = sc->sc_txnext;
4906 txs->txs_ndesc = segs_needed;
4907
4908 /* Set up offload parameters for this packet. */
4909 if (m0->m_pkthdr.csum_flags &
4910 (M_CSUM_TSOv4|M_CSUM_TSOv6|
4911 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
4912 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
4913 if (wm_tx_offload(sc, txs, &cksumcmd,
4914 &cksumfields) != 0) {
4915 /* Error message already displayed. */
4916 bus_dmamap_unload(sc->sc_dmat, dmamap);
4917 continue;
4918 }
4919 } else {
4920 cksumcmd = 0;
4921 cksumfields = 0;
4922 }
4923
4924 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
4925
4926 /* Sync the DMA map. */
4927 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
4928 BUS_DMASYNC_PREWRITE);
4929
4930 /* Initialize the transmit descriptor. */
4931 for (nexttx = sc->sc_txnext, seg = 0;
4932 seg < dmamap->dm_nsegs; seg++) {
4933 for (seglen = dmamap->dm_segs[seg].ds_len,
4934 curaddr = dmamap->dm_segs[seg].ds_addr;
4935 seglen != 0;
4936 curaddr += curlen, seglen -= curlen,
4937 nexttx = WM_NEXTTX(sc, nexttx)) {
4938 curlen = seglen;
4939
4940 /*
4941 * So says the Linux driver:
4942 * Work around for premature descriptor
4943 * write-backs in TSO mode. Append a
4944 * 4-byte sentinel descriptor.
4945 */
4946 if (use_tso &&
4947 seg == dmamap->dm_nsegs - 1 &&
4948 curlen > 8)
4949 curlen -= 4;
4950
4951 wm_set_dma_addr(
4952 &sc->sc_txdescs[nexttx].wtx_addr,
4953 curaddr);
4954 sc->sc_txdescs[nexttx].wtx_cmdlen =
4955 htole32(cksumcmd | curlen);
4956 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
4957 0;
4958 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
4959 cksumfields;
4960 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
4961 lasttx = nexttx;
4962
4963 DPRINTF(WM_DEBUG_TX,
4964 ("%s: TX: desc %d: low %#" PRIx64 ", "
4965 "len %#04zx\n",
4966 device_xname(sc->sc_dev), nexttx,
4967 (uint64_t)curaddr, curlen));
4968 }
4969 }
4970
4971 KASSERT(lasttx != -1);
4972
4973 /*
4974 * Set up the command byte on the last descriptor of
4975 * the packet. If we're in the interrupt delay window,
4976 * delay the interrupt.
4977 */
4978 sc->sc_txdescs[lasttx].wtx_cmdlen |=
4979 htole32(WTX_CMD_EOP | WTX_CMD_RS);
4980
4981 /*
4982 * If VLANs are enabled and the packet has a VLAN tag, set
4983 * up the descriptor to encapsulate the packet for us.
4984 *
4985 * This is only valid on the last descriptor of the packet.
4986 */
4987 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
4988 sc->sc_txdescs[lasttx].wtx_cmdlen |=
4989 htole32(WTX_CMD_VLE);
4990 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
4991 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
4992 }
4993
4994 txs->txs_lastdesc = lasttx;
4995
4996 DPRINTF(WM_DEBUG_TX,
4997 ("%s: TX: desc %d: cmdlen 0x%08x\n",
4998 device_xname(sc->sc_dev),
4999 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5000
5001 /* Sync the descriptors we're using. */
5002 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5003 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5004
5005 /* Give the packet to the chip. */
5006 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5007
5008 DPRINTF(WM_DEBUG_TX,
5009 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5010
5011 DPRINTF(WM_DEBUG_TX,
5012 ("%s: TX: finished transmitting packet, job %d\n",
5013 device_xname(sc->sc_dev), sc->sc_txsnext));
5014
5015 /* Advance the tx pointer. */
5016 sc->sc_txfree -= txs->txs_ndesc;
5017 sc->sc_txnext = nexttx;
5018
5019 sc->sc_txsfree--;
5020 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5021
5022 /* Pass the packet to any BPF listeners. */
5023 bpf_mtap(ifp, m0);
5024 }
5025
5026 if (m0 != NULL) {
5027 ifp->if_flags |= IFF_OACTIVE;
5028 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5029 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5030 m_freem(m0);
5031 }
5032
5033 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5034 /* No more slots; notify upper layer. */
5035 ifp->if_flags |= IFF_OACTIVE;
5036 }
5037
5038 if (sc->sc_txfree != ofree) {
5039 /* Set a watchdog timer in case the chip flakes out. */
5040 ifp->if_timer = 5;
5041 }
5042 }
5043
5044 /*
5045 * wm_nq_tx_offload:
5046 *
5047 * Set up TCP/IP checksumming parameters for the
5048 * specified packet, for NEWQUEUE devices
5049 */
5050 static int
5051 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
5052 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
5053 {
5054 struct mbuf *m0 = txs->txs_mbuf;
5055 struct m_tag *mtag;
5056 uint32_t vl_len, mssidx, cmdc;
5057 struct ether_header *eh;
5058 int offset, iphl;
5059
5060 /*
5061 * XXX It would be nice if the mbuf pkthdr had offset
5062 * fields for the protocol headers.
5063 */
5064 *cmdlenp = 0;
5065 *fieldsp = 0;
5066
5067 eh = mtod(m0, struct ether_header *);
5068 switch (htons(eh->ether_type)) {
5069 case ETHERTYPE_IP:
5070 case ETHERTYPE_IPV6:
5071 offset = ETHER_HDR_LEN;
5072 break;
5073
5074 case ETHERTYPE_VLAN:
5075 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5076 break;
5077
5078 default:
5079 /* Don't support this protocol or encapsulation. */
5080 *do_csum = false;
5081 return 0;
5082 }
5083 *do_csum = true;
5084 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
5085 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
5086
5087 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
5088 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
5089
5090 if ((m0->m_pkthdr.csum_flags &
5091 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
5092 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
5093 } else {
5094 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
5095 }
5096 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
5097 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
5098
5099 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
5100 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
5101 << NQTXC_VLLEN_VLAN_SHIFT);
5102 *cmdlenp |= NQTX_CMD_VLE;
5103 }
5104
5105 mssidx = 0;
5106
5107 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
5108 int hlen = offset + iphl;
5109 int tcp_hlen;
5110 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
5111
5112 if (__predict_false(m0->m_len <
5113 (hlen + sizeof(struct tcphdr)))) {
5114 /*
5115 * TCP/IP headers are not in the first mbuf; we need
5116 * to do this the slow and painful way. Let's just
5117 * hope this doesn't happen very often.
5118 */
5119 struct tcphdr th;
5120
5121 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
5122
5123 m_copydata(m0, hlen, sizeof(th), &th);
5124 if (v4) {
5125 struct ip ip;
5126
5127 m_copydata(m0, offset, sizeof(ip), &ip);
5128 ip.ip_len = 0;
5129 m_copyback(m0,
5130 offset + offsetof(struct ip, ip_len),
5131 sizeof(ip.ip_len), &ip.ip_len);
5132 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
5133 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
5134 } else {
5135 struct ip6_hdr ip6;
5136
5137 m_copydata(m0, offset, sizeof(ip6), &ip6);
5138 ip6.ip6_plen = 0;
5139 m_copyback(m0,
5140 offset + offsetof(struct ip6_hdr, ip6_plen),
5141 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
5142 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
5143 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
5144 }
5145 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
5146 sizeof(th.th_sum), &th.th_sum);
5147
5148 tcp_hlen = th.th_off << 2;
5149 } else {
5150 /*
5151 * TCP/IP headers are in the first mbuf; we can do
5152 * this the easy way.
5153 */
5154 struct tcphdr *th;
5155
5156 if (v4) {
5157 struct ip *ip =
5158 (void *)(mtod(m0, char *) + offset);
5159 th = (void *)(mtod(m0, char *) + hlen);
5160
5161 ip->ip_len = 0;
5162 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
5163 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
5164 } else {
5165 struct ip6_hdr *ip6 =
5166 (void *)(mtod(m0, char *) + offset);
5167 th = (void *)(mtod(m0, char *) + hlen);
5168
5169 ip6->ip6_plen = 0;
5170 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
5171 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
5172 }
5173 tcp_hlen = th->th_off << 2;
5174 }
5175 hlen += tcp_hlen;
5176 *cmdlenp |= NQTX_CMD_TSE;
5177
5178 if (v4) {
5179 WM_EVCNT_INCR(&sc->sc_ev_txtso);
5180 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
5181 } else {
5182 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
5183 *fieldsp |= NQTXD_FIELDS_TUXSM;
5184 }
5185 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
5186 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5187 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
5188 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
5189 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
5190 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
5191 } else {
5192 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
5193 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5194 }
5195
5196 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
5197 *fieldsp |= NQTXD_FIELDS_IXSM;
5198 cmdc |= NQTXC_CMD_IP4;
5199 }
5200
5201 if (m0->m_pkthdr.csum_flags &
5202 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5203 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5204 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5205 cmdc |= NQTXC_CMD_TCP;
5206 } else {
5207 cmdc |= NQTXC_CMD_UDP;
5208 }
5209 cmdc |= NQTXC_CMD_IP4;
5210 *fieldsp |= NQTXD_FIELDS_TUXSM;
5211 }
5212 if (m0->m_pkthdr.csum_flags &
5213 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5214 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5215 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5216 cmdc |= NQTXC_CMD_TCP;
5217 } else {
5218 cmdc |= NQTXC_CMD_UDP;
5219 }
5220 cmdc |= NQTXC_CMD_IP6;
5221 *fieldsp |= NQTXD_FIELDS_TUXSM;
5222 }
5223
5224 /* Fill in the context descriptor. */
5225 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
5226 htole32(vl_len);
5227 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
5228 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
5229 htole32(cmdc);
5230 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
5231 htole32(mssidx);
5232 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
5233 DPRINTF(WM_DEBUG_TX,
5234 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
5235 sc->sc_txnext, 0, vl_len));
5236 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
5237 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
5238 txs->txs_ndesc++;
5239 return 0;
5240 }
5241
5242 /*
5243 * wm_nq_start: [ifnet interface function]
5244 *
5245 * Start packet transmission on the interface for NEWQUEUE devices
5246 */
5247 static void
5248 wm_nq_start(struct ifnet *ifp)
5249 {
5250 struct wm_softc *sc = ifp->if_softc;
5251
5252 WM_TX_LOCK(sc);
5253 if (!sc->sc_stopping)
5254 wm_nq_start_locked(ifp);
5255 WM_TX_UNLOCK(sc);
5256 }
5257
5258 static void
5259 wm_nq_start_locked(struct ifnet *ifp)
5260 {
5261 struct wm_softc *sc = ifp->if_softc;
5262 struct mbuf *m0;
5263 struct m_tag *mtag;
5264 struct wm_txsoft *txs;
5265 bus_dmamap_t dmamap;
5266 int error, nexttx, lasttx = -1, seg, segs_needed;
5267 bool do_csum, sent;
5268
5269 KASSERT(WM_TX_LOCKED(sc));
5270
5271 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5272 return;
5273
5274 sent = false;
5275
5276 /*
5277 * Loop through the send queue, setting up transmit descriptors
5278 * until we drain the queue, or use up all available transmit
5279 * descriptors.
5280 */
5281 for (;;) {
5282 m0 = NULL;
5283
5284 /* Get a work queue entry. */
5285 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
5286 wm_txintr(sc);
5287 if (sc->sc_txsfree == 0) {
5288 DPRINTF(WM_DEBUG_TX,
5289 ("%s: TX: no free job descriptors\n",
5290 device_xname(sc->sc_dev)));
5291 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
5292 break;
5293 }
5294 }
5295
5296 /* Grab a packet off the queue. */
5297 IFQ_DEQUEUE(&ifp->if_snd, m0);
5298 if (m0 == NULL)
5299 break;
5300
5301 DPRINTF(WM_DEBUG_TX,
5302 ("%s: TX: have packet to transmit: %p\n",
5303 device_xname(sc->sc_dev), m0));
5304
5305 txs = &sc->sc_txsoft[sc->sc_txsnext];
5306 dmamap = txs->txs_dmamap;
5307
5308 /*
5309 * Load the DMA map. If this fails, the packet either
5310 * didn't fit in the allotted number of segments, or we
5311 * were short on resources. For the too-many-segments
5312 * case, we simply report an error and drop the packet,
5313 * since we can't sanely copy a jumbo packet to a single
5314 * buffer.
5315 */
5316 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5317 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5318 if (error) {
5319 if (error == EFBIG) {
5320 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5321 log(LOG_ERR, "%s: Tx packet consumes too many "
5322 "DMA segments, dropping...\n",
5323 device_xname(sc->sc_dev));
5324 wm_dump_mbuf_chain(sc, m0);
5325 m_freem(m0);
5326 continue;
5327 }
5328 /* Short on resources, just stop for now. */
5329 DPRINTF(WM_DEBUG_TX,
5330 ("%s: TX: dmamap load failed: %d\n",
5331 device_xname(sc->sc_dev), error));
5332 break;
5333 }
5334
5335 segs_needed = dmamap->dm_nsegs;
5336
5337 /*
5338 * Ensure we have enough descriptors free to describe
5339 * the packet. Note, we always reserve one descriptor
5340 * at the end of the ring due to the semantics of the
5341 * TDT register, plus one more in the event we need
5342 * to load offload context.
5343 */
5344 if (segs_needed > sc->sc_txfree - 2) {
5345 /*
5346 * Not enough free descriptors to transmit this
5347 * packet. We haven't committed anything yet,
5348 * so just unload the DMA map, put the packet
5349 * pack on the queue, and punt. Notify the upper
5350 * layer that there are no more slots left.
5351 */
5352 DPRINTF(WM_DEBUG_TX,
5353 ("%s: TX: need %d (%d) descriptors, have %d\n",
5354 device_xname(sc->sc_dev), dmamap->dm_nsegs,
5355 segs_needed, sc->sc_txfree - 1));
5356 ifp->if_flags |= IFF_OACTIVE;
5357 bus_dmamap_unload(sc->sc_dmat, dmamap);
5358 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5359 break;
5360 }
5361
5362 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5363
5364 DPRINTF(WM_DEBUG_TX,
5365 ("%s: TX: packet has %d (%d) DMA segments\n",
5366 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5367
5368 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5369
5370 /*
5371 * Store a pointer to the packet so that we can free it
5372 * later.
5373 *
5374 * Initially, we consider the number of descriptors the
5375 * packet uses the number of DMA segments. This may be
5376 * incremented by 1 if we do checksum offload (a descriptor
5377 * is used to set the checksum context).
5378 */
5379 txs->txs_mbuf = m0;
5380 txs->txs_firstdesc = sc->sc_txnext;
5381 txs->txs_ndesc = segs_needed;
5382
5383 /* Set up offload parameters for this packet. */
5384 uint32_t cmdlen, fields, dcmdlen;
5385 if (m0->m_pkthdr.csum_flags &
5386 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5387 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5388 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5389 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
5390 &do_csum) != 0) {
5391 /* Error message already displayed. */
5392 bus_dmamap_unload(sc->sc_dmat, dmamap);
5393 continue;
5394 }
5395 } else {
5396 do_csum = false;
5397 cmdlen = 0;
5398 fields = 0;
5399 }
5400
5401 /* Sync the DMA map. */
5402 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5403 BUS_DMASYNC_PREWRITE);
5404
5405 /* Initialize the first transmit descriptor. */
5406 nexttx = sc->sc_txnext;
5407 if (!do_csum) {
5408 /* setup a legacy descriptor */
5409 wm_set_dma_addr(
5410 &sc->sc_txdescs[nexttx].wtx_addr,
5411 dmamap->dm_segs[0].ds_addr);
5412 sc->sc_txdescs[nexttx].wtx_cmdlen =
5413 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
5414 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
5415 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
5416 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
5417 NULL) {
5418 sc->sc_txdescs[nexttx].wtx_cmdlen |=
5419 htole32(WTX_CMD_VLE);
5420 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
5421 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5422 } else {
5423 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
5424 }
5425 dcmdlen = 0;
5426 } else {
5427 /* setup an advanced data descriptor */
5428 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5429 htole64(dmamap->dm_segs[0].ds_addr);
5430 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
5431 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5432 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
5433 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
5434 htole32(fields);
5435 DPRINTF(WM_DEBUG_TX,
5436 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
5437 device_xname(sc->sc_dev), nexttx,
5438 (uint64_t)dmamap->dm_segs[0].ds_addr));
5439 DPRINTF(WM_DEBUG_TX,
5440 ("\t 0x%08x%08x\n", fields,
5441 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
5442 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
5443 }
5444
5445 lasttx = nexttx;
5446 nexttx = WM_NEXTTX(sc, nexttx);
5447 /*
5448 * fill in the next descriptors. legacy or adcanced format
5449 * is the same here
5450 */
5451 for (seg = 1; seg < dmamap->dm_nsegs;
5452 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
5453 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5454 htole64(dmamap->dm_segs[seg].ds_addr);
5455 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5456 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
5457 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
5458 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
5459 lasttx = nexttx;
5460
5461 DPRINTF(WM_DEBUG_TX,
5462 ("%s: TX: desc %d: %#" PRIx64 ", "
5463 "len %#04zx\n",
5464 device_xname(sc->sc_dev), nexttx,
5465 (uint64_t)dmamap->dm_segs[seg].ds_addr,
5466 dmamap->dm_segs[seg].ds_len));
5467 }
5468
5469 KASSERT(lasttx != -1);
5470
5471 /*
5472 * Set up the command byte on the last descriptor of
5473 * the packet. If we're in the interrupt delay window,
5474 * delay the interrupt.
5475 */
5476 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
5477 (NQTX_CMD_EOP | NQTX_CMD_RS));
5478 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5479 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5480
5481 txs->txs_lastdesc = lasttx;
5482
5483 DPRINTF(WM_DEBUG_TX,
5484 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5485 device_xname(sc->sc_dev),
5486 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5487
5488 /* Sync the descriptors we're using. */
5489 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5490 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5491
5492 /* Give the packet to the chip. */
5493 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5494 sent = true;
5495
5496 DPRINTF(WM_DEBUG_TX,
5497 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5498
5499 DPRINTF(WM_DEBUG_TX,
5500 ("%s: TX: finished transmitting packet, job %d\n",
5501 device_xname(sc->sc_dev), sc->sc_txsnext));
5502
5503 /* Advance the tx pointer. */
5504 sc->sc_txfree -= txs->txs_ndesc;
5505 sc->sc_txnext = nexttx;
5506
5507 sc->sc_txsfree--;
5508 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5509
5510 /* Pass the packet to any BPF listeners. */
5511 bpf_mtap(ifp, m0);
5512 }
5513
5514 if (m0 != NULL) {
5515 ifp->if_flags |= IFF_OACTIVE;
5516 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5517 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5518 m_freem(m0);
5519 }
5520
5521 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5522 /* No more slots; notify upper layer. */
5523 ifp->if_flags |= IFF_OACTIVE;
5524 }
5525
5526 if (sent) {
5527 /* Set a watchdog timer in case the chip flakes out. */
5528 ifp->if_timer = 5;
5529 }
5530 }
5531
5532 /* Interrupt */
5533
5534 /*
5535 * wm_txintr:
5536 *
5537 * Helper; handle transmit interrupts.
5538 */
5539 static void
5540 wm_txintr(struct wm_softc *sc)
5541 {
5542 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5543 struct wm_txsoft *txs;
5544 uint8_t status;
5545 int i;
5546
5547 if (sc->sc_stopping)
5548 return;
5549
5550 ifp->if_flags &= ~IFF_OACTIVE;
5551
5552 /*
5553 * Go through the Tx list and free mbufs for those
5554 * frames which have been transmitted.
5555 */
5556 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
5557 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
5558 txs = &sc->sc_txsoft[i];
5559
5560 DPRINTF(WM_DEBUG_TX,
5561 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
5562
5563 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
5564 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5565
5566 status =
5567 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
5568 if ((status & WTX_ST_DD) == 0) {
5569 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
5570 BUS_DMASYNC_PREREAD);
5571 break;
5572 }
5573
5574 DPRINTF(WM_DEBUG_TX,
5575 ("%s: TX: job %d done: descs %d..%d\n",
5576 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
5577 txs->txs_lastdesc));
5578
5579 /*
5580 * XXX We should probably be using the statistics
5581 * XXX registers, but I don't know if they exist
5582 * XXX on chips before the i82544.
5583 */
5584
5585 #ifdef WM_EVENT_COUNTERS
5586 if (status & WTX_ST_TU)
5587 WM_EVCNT_INCR(&sc->sc_ev_tu);
5588 #endif /* WM_EVENT_COUNTERS */
5589
5590 if (status & (WTX_ST_EC|WTX_ST_LC)) {
5591 ifp->if_oerrors++;
5592 if (status & WTX_ST_LC)
5593 log(LOG_WARNING, "%s: late collision\n",
5594 device_xname(sc->sc_dev));
5595 else if (status & WTX_ST_EC) {
5596 ifp->if_collisions += 16;
5597 log(LOG_WARNING, "%s: excessive collisions\n",
5598 device_xname(sc->sc_dev));
5599 }
5600 } else
5601 ifp->if_opackets++;
5602
5603 sc->sc_txfree += txs->txs_ndesc;
5604 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
5605 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
5606 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
5607 m_freem(txs->txs_mbuf);
5608 txs->txs_mbuf = NULL;
5609 }
5610
5611 /* Update the dirty transmit buffer pointer. */
5612 sc->sc_txsdirty = i;
5613 DPRINTF(WM_DEBUG_TX,
5614 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
5615
5616 /*
5617 * If there are no more pending transmissions, cancel the watchdog
5618 * timer.
5619 */
5620 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
5621 ifp->if_timer = 0;
5622 }
5623
5624 /*
5625 * wm_rxintr:
5626 *
5627 * Helper; handle receive interrupts.
5628 */
5629 static void
5630 wm_rxintr(struct wm_softc *sc)
5631 {
5632 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5633 struct wm_rxsoft *rxs;
5634 struct mbuf *m;
5635 int i, len;
5636 uint8_t status, errors;
5637 uint16_t vlantag;
5638
5639 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
5640 rxs = &sc->sc_rxsoft[i];
5641
5642 DPRINTF(WM_DEBUG_RX,
5643 ("%s: RX: checking descriptor %d\n",
5644 device_xname(sc->sc_dev), i));
5645
5646 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5647
5648 status = sc->sc_rxdescs[i].wrx_status;
5649 errors = sc->sc_rxdescs[i].wrx_errors;
5650 len = le16toh(sc->sc_rxdescs[i].wrx_len);
5651 vlantag = sc->sc_rxdescs[i].wrx_special;
5652
5653 if ((status & WRX_ST_DD) == 0) {
5654 /* We have processed all of the receive descriptors. */
5655 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
5656 break;
5657 }
5658
5659 if (__predict_false(sc->sc_rxdiscard)) {
5660 DPRINTF(WM_DEBUG_RX,
5661 ("%s: RX: discarding contents of descriptor %d\n",
5662 device_xname(sc->sc_dev), i));
5663 WM_INIT_RXDESC(sc, i);
5664 if (status & WRX_ST_EOP) {
5665 /* Reset our state. */
5666 DPRINTF(WM_DEBUG_RX,
5667 ("%s: RX: resetting rxdiscard -> 0\n",
5668 device_xname(sc->sc_dev)));
5669 sc->sc_rxdiscard = 0;
5670 }
5671 continue;
5672 }
5673
5674 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5675 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
5676
5677 m = rxs->rxs_mbuf;
5678
5679 /*
5680 * Add a new receive buffer to the ring, unless of
5681 * course the length is zero. Treat the latter as a
5682 * failed mapping.
5683 */
5684 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
5685 /*
5686 * Failed, throw away what we've done so
5687 * far, and discard the rest of the packet.
5688 */
5689 ifp->if_ierrors++;
5690 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5691 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5692 WM_INIT_RXDESC(sc, i);
5693 if ((status & WRX_ST_EOP) == 0)
5694 sc->sc_rxdiscard = 1;
5695 if (sc->sc_rxhead != NULL)
5696 m_freem(sc->sc_rxhead);
5697 WM_RXCHAIN_RESET(sc);
5698 DPRINTF(WM_DEBUG_RX,
5699 ("%s: RX: Rx buffer allocation failed, "
5700 "dropping packet%s\n", device_xname(sc->sc_dev),
5701 sc->sc_rxdiscard ? " (discard)" : ""));
5702 continue;
5703 }
5704
5705 m->m_len = len;
5706 sc->sc_rxlen += len;
5707 DPRINTF(WM_DEBUG_RX,
5708 ("%s: RX: buffer at %p len %d\n",
5709 device_xname(sc->sc_dev), m->m_data, len));
5710
5711 /* If this is not the end of the packet, keep looking. */
5712 if ((status & WRX_ST_EOP) == 0) {
5713 WM_RXCHAIN_LINK(sc, m);
5714 DPRINTF(WM_DEBUG_RX,
5715 ("%s: RX: not yet EOP, rxlen -> %d\n",
5716 device_xname(sc->sc_dev), sc->sc_rxlen));
5717 continue;
5718 }
5719
5720 /*
5721 * Okay, we have the entire packet now. The chip is
5722 * configured to include the FCS except I350 and I21[01]
5723 * (not all chips can be configured to strip it),
5724 * so we need to trim it.
5725 * May need to adjust length of previous mbuf in the
5726 * chain if the current mbuf is too short.
5727 * For an eratta, the RCTL_SECRC bit in RCTL register
5728 * is always set in I350, so we don't trim it.
5729 */
5730 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
5731 && (sc->sc_type != WM_T_I210)
5732 && (sc->sc_type != WM_T_I211)) {
5733 if (m->m_len < ETHER_CRC_LEN) {
5734 sc->sc_rxtail->m_len
5735 -= (ETHER_CRC_LEN - m->m_len);
5736 m->m_len = 0;
5737 } else
5738 m->m_len -= ETHER_CRC_LEN;
5739 len = sc->sc_rxlen - ETHER_CRC_LEN;
5740 } else
5741 len = sc->sc_rxlen;
5742
5743 WM_RXCHAIN_LINK(sc, m);
5744
5745 *sc->sc_rxtailp = NULL;
5746 m = sc->sc_rxhead;
5747
5748 WM_RXCHAIN_RESET(sc);
5749
5750 DPRINTF(WM_DEBUG_RX,
5751 ("%s: RX: have entire packet, len -> %d\n",
5752 device_xname(sc->sc_dev), len));
5753
5754 /* If an error occurred, update stats and drop the packet. */
5755 if (errors &
5756 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
5757 if (errors & WRX_ER_SE)
5758 log(LOG_WARNING, "%s: symbol error\n",
5759 device_xname(sc->sc_dev));
5760 else if (errors & WRX_ER_SEQ)
5761 log(LOG_WARNING, "%s: receive sequence error\n",
5762 device_xname(sc->sc_dev));
5763 else if (errors & WRX_ER_CE)
5764 log(LOG_WARNING, "%s: CRC error\n",
5765 device_xname(sc->sc_dev));
5766 m_freem(m);
5767 continue;
5768 }
5769
5770 /* No errors. Receive the packet. */
5771 m->m_pkthdr.rcvif = ifp;
5772 m->m_pkthdr.len = len;
5773
5774 /*
5775 * If VLANs are enabled, VLAN packets have been unwrapped
5776 * for us. Associate the tag with the packet.
5777 */
5778 /* XXXX should check for i350 and i354 */
5779 if ((status & WRX_ST_VP) != 0) {
5780 VLAN_INPUT_TAG(ifp, m,
5781 le16toh(vlantag),
5782 continue);
5783 }
5784
5785 /* Set up checksum info for this packet. */
5786 if ((status & WRX_ST_IXSM) == 0) {
5787 if (status & WRX_ST_IPCS) {
5788 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
5789 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
5790 if (errors & WRX_ER_IPE)
5791 m->m_pkthdr.csum_flags |=
5792 M_CSUM_IPv4_BAD;
5793 }
5794 if (status & WRX_ST_TCPCS) {
5795 /*
5796 * Note: we don't know if this was TCP or UDP,
5797 * so we just set both bits, and expect the
5798 * upper layers to deal.
5799 */
5800 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
5801 m->m_pkthdr.csum_flags |=
5802 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
5803 M_CSUM_TCPv6 | M_CSUM_UDPv6;
5804 if (errors & WRX_ER_TCPE)
5805 m->m_pkthdr.csum_flags |=
5806 M_CSUM_TCP_UDP_BAD;
5807 }
5808 }
5809
5810 ifp->if_ipackets++;
5811
5812 WM_RX_UNLOCK(sc);
5813
5814 /* Pass this up to any BPF listeners. */
5815 bpf_mtap(ifp, m);
5816
5817 /* Pass it on. */
5818 (*ifp->if_input)(ifp, m);
5819
5820 WM_RX_LOCK(sc);
5821
5822 if (sc->sc_stopping)
5823 break;
5824 }
5825
5826 /* Update the receive pointer. */
5827 sc->sc_rxptr = i;
5828
5829 DPRINTF(WM_DEBUG_RX,
5830 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
5831 }
5832
5833 /*
5834 * wm_linkintr_gmii:
5835 *
5836 * Helper; handle link interrupts for GMII.
5837 */
5838 static void
5839 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
5840 {
5841
5842 KASSERT(WM_TX_LOCKED(sc));
5843
5844 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5845 __func__));
5846
5847 if (icr & ICR_LSC) {
5848 DPRINTF(WM_DEBUG_LINK,
5849 ("%s: LINK: LSC -> mii_pollstat\n",
5850 device_xname(sc->sc_dev)));
5851 mii_pollstat(&sc->sc_mii);
5852 if (sc->sc_type == WM_T_82543) {
5853 int miistatus, active;
5854
5855 /*
5856 * With 82543, we need to force speed and
5857 * duplex on the MAC equal to what the PHY
5858 * speed and duplex configuration is.
5859 */
5860 miistatus = sc->sc_mii.mii_media_status;
5861
5862 if (miistatus & IFM_ACTIVE) {
5863 active = sc->sc_mii.mii_media_active;
5864 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5865 switch (IFM_SUBTYPE(active)) {
5866 case IFM_10_T:
5867 sc->sc_ctrl |= CTRL_SPEED_10;
5868 break;
5869 case IFM_100_TX:
5870 sc->sc_ctrl |= CTRL_SPEED_100;
5871 break;
5872 case IFM_1000_T:
5873 sc->sc_ctrl |= CTRL_SPEED_1000;
5874 break;
5875 default:
5876 /*
5877 * fiber?
5878 * Shoud not enter here.
5879 */
5880 printf("unknown media (%x)\n",
5881 active);
5882 break;
5883 }
5884 if (active & IFM_FDX)
5885 sc->sc_ctrl |= CTRL_FD;
5886 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5887 }
5888 } else if ((sc->sc_type == WM_T_ICH8)
5889 && (sc->sc_phytype == WMPHY_IGP_3)) {
5890 wm_kmrn_lock_loss_workaround_ich8lan(sc);
5891 } else if (sc->sc_type == WM_T_PCH) {
5892 wm_k1_gig_workaround_hv(sc,
5893 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
5894 }
5895
5896 if ((sc->sc_phytype == WMPHY_82578)
5897 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
5898 == IFM_1000_T)) {
5899
5900 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
5901 delay(200*1000); /* XXX too big */
5902
5903 /* Link stall fix for link up */
5904 wm_gmii_hv_writereg(sc->sc_dev, 1,
5905 HV_MUX_DATA_CTRL,
5906 HV_MUX_DATA_CTRL_GEN_TO_MAC
5907 | HV_MUX_DATA_CTRL_FORCE_SPEED);
5908 wm_gmii_hv_writereg(sc->sc_dev, 1,
5909 HV_MUX_DATA_CTRL,
5910 HV_MUX_DATA_CTRL_GEN_TO_MAC);
5911 }
5912 }
5913 } else if (icr & ICR_RXSEQ) {
5914 DPRINTF(WM_DEBUG_LINK,
5915 ("%s: LINK Receive sequence error\n",
5916 device_xname(sc->sc_dev)));
5917 }
5918 }
5919
5920 /*
5921 * wm_linkintr_tbi:
5922 *
5923 * Helper; handle link interrupts for TBI mode.
5924 */
5925 static void
5926 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
5927 {
5928 uint32_t status;
5929
5930 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5931 __func__));
5932
5933 status = CSR_READ(sc, WMREG_STATUS);
5934 if (icr & ICR_LSC) {
5935 if (status & STATUS_LU) {
5936 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
5937 device_xname(sc->sc_dev),
5938 (status & STATUS_FD) ? "FDX" : "HDX"));
5939 /*
5940 * NOTE: CTRL will update TFCE and RFCE automatically,
5941 * so we should update sc->sc_ctrl
5942 */
5943
5944 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5945 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5946 sc->sc_fcrtl &= ~FCRTL_XONE;
5947 if (status & STATUS_FD)
5948 sc->sc_tctl |=
5949 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5950 else
5951 sc->sc_tctl |=
5952 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5953 if (sc->sc_ctrl & CTRL_TFCE)
5954 sc->sc_fcrtl |= FCRTL_XONE;
5955 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5956 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5957 WMREG_OLD_FCRTL : WMREG_FCRTL,
5958 sc->sc_fcrtl);
5959 sc->sc_tbi_linkup = 1;
5960 } else {
5961 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
5962 device_xname(sc->sc_dev)));
5963 sc->sc_tbi_linkup = 0;
5964 }
5965 wm_tbi_set_linkled(sc);
5966 } else if (icr & ICR_RXSEQ) {
5967 DPRINTF(WM_DEBUG_LINK,
5968 ("%s: LINK: Receive sequence error\n",
5969 device_xname(sc->sc_dev)));
5970 }
5971 }
5972
5973 /*
5974 * wm_linkintr:
5975 *
5976 * Helper; handle link interrupts.
5977 */
5978 static void
5979 wm_linkintr(struct wm_softc *sc, uint32_t icr)
5980 {
5981
5982 if (sc->sc_flags & WM_F_HAS_MII)
5983 wm_linkintr_gmii(sc, icr);
5984 else
5985 wm_linkintr_tbi(sc, icr);
5986 }
5987
5988 /*
5989 * wm_intr:
5990 *
5991 * Interrupt service routine.
5992 */
5993 static int
5994 wm_intr(void *arg)
5995 {
5996 struct wm_softc *sc = arg;
5997 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5998 uint32_t icr;
5999 int handled = 0;
6000
6001 while (1 /* CONSTCOND */) {
6002 icr = CSR_READ(sc, WMREG_ICR);
6003 if ((icr & sc->sc_icr) == 0)
6004 break;
6005 rnd_add_uint32(&sc->rnd_source, icr);
6006
6007 WM_RX_LOCK(sc);
6008
6009 if (sc->sc_stopping) {
6010 WM_RX_UNLOCK(sc);
6011 break;
6012 }
6013
6014 handled = 1;
6015
6016 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
6017 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
6018 DPRINTF(WM_DEBUG_RX,
6019 ("%s: RX: got Rx intr 0x%08x\n",
6020 device_xname(sc->sc_dev),
6021 icr & (ICR_RXDMT0|ICR_RXT0)));
6022 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
6023 }
6024 #endif
6025 wm_rxintr(sc);
6026
6027 WM_RX_UNLOCK(sc);
6028 WM_TX_LOCK(sc);
6029
6030 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
6031 if (icr & ICR_TXDW) {
6032 DPRINTF(WM_DEBUG_TX,
6033 ("%s: TX: got TXDW interrupt\n",
6034 device_xname(sc->sc_dev)));
6035 WM_EVCNT_INCR(&sc->sc_ev_txdw);
6036 }
6037 #endif
6038 wm_txintr(sc);
6039
6040 if (icr & (ICR_LSC|ICR_RXSEQ)) {
6041 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
6042 wm_linkintr(sc, icr);
6043 }
6044
6045 WM_TX_UNLOCK(sc);
6046
6047 if (icr & ICR_RXO) {
6048 #if defined(WM_DEBUG)
6049 log(LOG_WARNING, "%s: Receive overrun\n",
6050 device_xname(sc->sc_dev));
6051 #endif /* defined(WM_DEBUG) */
6052 }
6053 }
6054
6055 if (handled) {
6056 /* Try to get more packets going. */
6057 ifp->if_start(ifp);
6058 }
6059
6060 return handled;
6061 }
6062
6063 /*
6064 * Media related.
6065 * GMII, SGMII, TBI (and SERDES)
6066 */
6067
6068 /* GMII related */
6069
6070 /*
6071 * wm_gmii_reset:
6072 *
6073 * Reset the PHY.
6074 */
6075 static void
6076 wm_gmii_reset(struct wm_softc *sc)
6077 {
6078 uint32_t reg;
6079 int rv;
6080
6081 /* get phy semaphore */
6082 switch (sc->sc_type) {
6083 case WM_T_82571:
6084 case WM_T_82572:
6085 case WM_T_82573:
6086 case WM_T_82574:
6087 case WM_T_82583:
6088 /* XXX should get sw semaphore, too */
6089 rv = wm_get_swsm_semaphore(sc);
6090 break;
6091 case WM_T_82575:
6092 case WM_T_82576:
6093 case WM_T_82580:
6094 case WM_T_I350:
6095 case WM_T_I354:
6096 case WM_T_I210:
6097 case WM_T_I211:
6098 case WM_T_80003:
6099 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6100 break;
6101 case WM_T_ICH8:
6102 case WM_T_ICH9:
6103 case WM_T_ICH10:
6104 case WM_T_PCH:
6105 case WM_T_PCH2:
6106 case WM_T_PCH_LPT:
6107 rv = wm_get_swfwhw_semaphore(sc);
6108 break;
6109 default:
6110 /* nothing to do*/
6111 rv = 0;
6112 break;
6113 }
6114 if (rv != 0) {
6115 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6116 __func__);
6117 return;
6118 }
6119
6120 switch (sc->sc_type) {
6121 case WM_T_82542_2_0:
6122 case WM_T_82542_2_1:
6123 /* null */
6124 break;
6125 case WM_T_82543:
6126 /*
6127 * With 82543, we need to force speed and duplex on the MAC
6128 * equal to what the PHY speed and duplex configuration is.
6129 * In addition, we need to perform a hardware reset on the PHY
6130 * to take it out of reset.
6131 */
6132 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6133 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6134
6135 /* The PHY reset pin is active-low. */
6136 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6137 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6138 CTRL_EXT_SWDPIN(4));
6139 reg |= CTRL_EXT_SWDPIO(4);
6140
6141 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6142 CSR_WRITE_FLUSH(sc);
6143 delay(10*1000);
6144
6145 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6146 CSR_WRITE_FLUSH(sc);
6147 delay(150);
6148 #if 0
6149 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6150 #endif
6151 delay(20*1000); /* XXX extra delay to get PHY ID? */
6152 break;
6153 case WM_T_82544: /* reset 10000us */
6154 case WM_T_82540:
6155 case WM_T_82545:
6156 case WM_T_82545_3:
6157 case WM_T_82546:
6158 case WM_T_82546_3:
6159 case WM_T_82541:
6160 case WM_T_82541_2:
6161 case WM_T_82547:
6162 case WM_T_82547_2:
6163 case WM_T_82571: /* reset 100us */
6164 case WM_T_82572:
6165 case WM_T_82573:
6166 case WM_T_82574:
6167 case WM_T_82575:
6168 case WM_T_82576:
6169 case WM_T_82580:
6170 case WM_T_I350:
6171 case WM_T_I354:
6172 case WM_T_I210:
6173 case WM_T_I211:
6174 case WM_T_82583:
6175 case WM_T_80003:
6176 /* generic reset */
6177 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6178 CSR_WRITE_FLUSH(sc);
6179 delay(20000);
6180 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6181 CSR_WRITE_FLUSH(sc);
6182 delay(20000);
6183
6184 if ((sc->sc_type == WM_T_82541)
6185 || (sc->sc_type == WM_T_82541_2)
6186 || (sc->sc_type == WM_T_82547)
6187 || (sc->sc_type == WM_T_82547_2)) {
6188 /* workaround for igp are done in igp_reset() */
6189 /* XXX add code to set LED after phy reset */
6190 }
6191 break;
6192 case WM_T_ICH8:
6193 case WM_T_ICH9:
6194 case WM_T_ICH10:
6195 case WM_T_PCH:
6196 case WM_T_PCH2:
6197 case WM_T_PCH_LPT:
6198 /* generic reset */
6199 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6200 CSR_WRITE_FLUSH(sc);
6201 delay(100);
6202 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6203 CSR_WRITE_FLUSH(sc);
6204 delay(150);
6205 break;
6206 default:
6207 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6208 __func__);
6209 break;
6210 }
6211
6212 /* release PHY semaphore */
6213 switch (sc->sc_type) {
6214 case WM_T_82571:
6215 case WM_T_82572:
6216 case WM_T_82573:
6217 case WM_T_82574:
6218 case WM_T_82583:
6219 /* XXX should put sw semaphore, too */
6220 wm_put_swsm_semaphore(sc);
6221 break;
6222 case WM_T_82575:
6223 case WM_T_82576:
6224 case WM_T_82580:
6225 case WM_T_I350:
6226 case WM_T_I354:
6227 case WM_T_I210:
6228 case WM_T_I211:
6229 case WM_T_80003:
6230 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6231 break;
6232 case WM_T_ICH8:
6233 case WM_T_ICH9:
6234 case WM_T_ICH10:
6235 case WM_T_PCH:
6236 case WM_T_PCH2:
6237 case WM_T_PCH_LPT:
6238 wm_put_swfwhw_semaphore(sc);
6239 break;
6240 default:
6241 /* nothing to do*/
6242 rv = 0;
6243 break;
6244 }
6245
6246 /* get_cfg_done */
6247 wm_get_cfg_done(sc);
6248
6249 /* extra setup */
6250 switch (sc->sc_type) {
6251 case WM_T_82542_2_0:
6252 case WM_T_82542_2_1:
6253 case WM_T_82543:
6254 case WM_T_82544:
6255 case WM_T_82540:
6256 case WM_T_82545:
6257 case WM_T_82545_3:
6258 case WM_T_82546:
6259 case WM_T_82546_3:
6260 case WM_T_82541_2:
6261 case WM_T_82547_2:
6262 case WM_T_82571:
6263 case WM_T_82572:
6264 case WM_T_82573:
6265 case WM_T_82574:
6266 case WM_T_82575:
6267 case WM_T_82576:
6268 case WM_T_82580:
6269 case WM_T_I350:
6270 case WM_T_I354:
6271 case WM_T_I210:
6272 case WM_T_I211:
6273 case WM_T_82583:
6274 case WM_T_80003:
6275 /* null */
6276 break;
6277 case WM_T_82541:
6278 case WM_T_82547:
6279 /* XXX Configure actively LED after PHY reset */
6280 break;
6281 case WM_T_ICH8:
6282 case WM_T_ICH9:
6283 case WM_T_ICH10:
6284 case WM_T_PCH:
6285 case WM_T_PCH2:
6286 case WM_T_PCH_LPT:
6287 /* Allow time for h/w to get to a quiescent state afer reset */
6288 delay(10*1000);
6289
6290 if (sc->sc_type == WM_T_PCH)
6291 wm_hv_phy_workaround_ich8lan(sc);
6292
6293 if (sc->sc_type == WM_T_PCH2)
6294 wm_lv_phy_workaround_ich8lan(sc);
6295
6296 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6297 /*
6298 * dummy read to clear the phy wakeup bit after lcd
6299 * reset
6300 */
6301 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6302 }
6303
6304 /*
6305 * XXX Configure the LCD with th extended configuration region
6306 * in NVM
6307 */
6308
6309 /* Configure the LCD with the OEM bits in NVM */
6310 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6311 || (sc->sc_type == WM_T_PCH_LPT)) {
6312 /*
6313 * Disable LPLU.
6314 * XXX It seems that 82567 has LPLU, too.
6315 */
6316 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6317 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6318 reg |= HV_OEM_BITS_ANEGNOW;
6319 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6320 }
6321 break;
6322 default:
6323 panic("%s: unknown type\n", __func__);
6324 break;
6325 }
6326 }
6327
6328 /*
6329 * wm_get_phy_id_82575:
6330 *
6331 * Return PHY ID. Return -1 if it failed.
6332 */
6333 static int
6334 wm_get_phy_id_82575(struct wm_softc *sc)
6335 {
6336 uint32_t reg;
6337 int phyid = -1;
6338
6339 /* XXX */
6340 if ((sc->sc_flags & WM_F_SGMII) == 0)
6341 return -1;
6342
6343 if (wm_sgmii_uses_mdio(sc)) {
6344 switch (sc->sc_type) {
6345 case WM_T_82575:
6346 case WM_T_82576:
6347 reg = CSR_READ(sc, WMREG_MDIC);
6348 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
6349 break;
6350 case WM_T_82580:
6351 case WM_T_I350:
6352 case WM_T_I354:
6353 case WM_T_I210:
6354 case WM_T_I211:
6355 reg = CSR_READ(sc, WMREG_MDICNFG);
6356 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
6357 break;
6358 default:
6359 return -1;
6360 }
6361 }
6362
6363 return phyid;
6364 }
6365
6366
6367 /*
6368 * wm_gmii_mediainit:
6369 *
6370 * Initialize media for use on 1000BASE-T devices.
6371 */
6372 static void
6373 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6374 {
6375 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6376 struct mii_data *mii = &sc->sc_mii;
6377 uint32_t reg;
6378
6379 /* We have GMII. */
6380 sc->sc_flags |= WM_F_HAS_MII;
6381
6382 if (sc->sc_type == WM_T_80003)
6383 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6384 else
6385 sc->sc_tipg = TIPG_1000T_DFLT;
6386
6387 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
6388 if ((sc->sc_type == WM_T_82580)
6389 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
6390 || (sc->sc_type == WM_T_I211)) {
6391 reg = CSR_READ(sc, WMREG_PHPM);
6392 reg &= ~PHPM_GO_LINK_D;
6393 CSR_WRITE(sc, WMREG_PHPM, reg);
6394 }
6395
6396 /*
6397 * Let the chip set speed/duplex on its own based on
6398 * signals from the PHY.
6399 * XXXbouyer - I'm not sure this is right for the 80003,
6400 * the em driver only sets CTRL_SLU here - but it seems to work.
6401 */
6402 sc->sc_ctrl |= CTRL_SLU;
6403 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6404
6405 /* Initialize our media structures and probe the GMII. */
6406 mii->mii_ifp = ifp;
6407
6408 /*
6409 * Determine the PHY access method.
6410 *
6411 * For SGMII, use SGMII specific method.
6412 *
6413 * For some devices, we can determine the PHY access method
6414 * from sc_type.
6415 *
6416 * For ICH and PCH variants, it's difficult to determine the PHY
6417 * access method by sc_type, so use the PCI product ID for some
6418 * devices.
6419 * For other ICH8 variants, try to use igp's method. If the PHY
6420 * can't detect, then use bm's method.
6421 */
6422 switch (prodid) {
6423 case PCI_PRODUCT_INTEL_PCH_M_LM:
6424 case PCI_PRODUCT_INTEL_PCH_M_LC:
6425 /* 82577 */
6426 sc->sc_phytype = WMPHY_82577;
6427 break;
6428 case PCI_PRODUCT_INTEL_PCH_D_DM:
6429 case PCI_PRODUCT_INTEL_PCH_D_DC:
6430 /* 82578 */
6431 sc->sc_phytype = WMPHY_82578;
6432 break;
6433 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6434 case PCI_PRODUCT_INTEL_PCH2_LV_V:
6435 /* 82579 */
6436 sc->sc_phytype = WMPHY_82579;
6437 break;
6438 case PCI_PRODUCT_INTEL_82801I_BM:
6439 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6440 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6441 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6442 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6443 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6444 /* 82567 */
6445 sc->sc_phytype = WMPHY_BM;
6446 mii->mii_readreg = wm_gmii_bm_readreg;
6447 mii->mii_writereg = wm_gmii_bm_writereg;
6448 break;
6449 default:
6450 if (((sc->sc_flags & WM_F_SGMII) != 0)
6451 && !wm_sgmii_uses_mdio(sc)){
6452 mii->mii_readreg = wm_sgmii_readreg;
6453 mii->mii_writereg = wm_sgmii_writereg;
6454 } else if (sc->sc_type >= WM_T_80003) {
6455 mii->mii_readreg = wm_gmii_i80003_readreg;
6456 mii->mii_writereg = wm_gmii_i80003_writereg;
6457 } else if (sc->sc_type >= WM_T_I210) {
6458 mii->mii_readreg = wm_gmii_i82544_readreg;
6459 mii->mii_writereg = wm_gmii_i82544_writereg;
6460 } else if (sc->sc_type >= WM_T_82580) {
6461 sc->sc_phytype = WMPHY_82580;
6462 mii->mii_readreg = wm_gmii_82580_readreg;
6463 mii->mii_writereg = wm_gmii_82580_writereg;
6464 } else if (sc->sc_type >= WM_T_82544) {
6465 mii->mii_readreg = wm_gmii_i82544_readreg;
6466 mii->mii_writereg = wm_gmii_i82544_writereg;
6467 } else {
6468 mii->mii_readreg = wm_gmii_i82543_readreg;
6469 mii->mii_writereg = wm_gmii_i82543_writereg;
6470 }
6471 break;
6472 }
6473 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) {
6474 /* All PCH* use _hv_ */
6475 mii->mii_readreg = wm_gmii_hv_readreg;
6476 mii->mii_writereg = wm_gmii_hv_writereg;
6477 }
6478 mii->mii_statchg = wm_gmii_statchg;
6479
6480 wm_gmii_reset(sc);
6481
6482 sc->sc_ethercom.ec_mii = &sc->sc_mii;
6483 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6484 wm_gmii_mediastatus);
6485
6486 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6487 || (sc->sc_type == WM_T_82580)
6488 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
6489 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
6490 if ((sc->sc_flags & WM_F_SGMII) == 0) {
6491 /* Attach only one port */
6492 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6493 MII_OFFSET_ANY, MIIF_DOPAUSE);
6494 } else {
6495 int i, id;
6496 uint32_t ctrl_ext;
6497
6498 id = wm_get_phy_id_82575(sc);
6499 if (id != -1) {
6500 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6501 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
6502 }
6503 if ((id == -1)
6504 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
6505 /* Power on sgmii phy if it is disabled */
6506 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6507 CSR_WRITE(sc, WMREG_CTRL_EXT,
6508 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6509 CSR_WRITE_FLUSH(sc);
6510 delay(300*1000); /* XXX too long */
6511
6512 /* from 1 to 8 */
6513 for (i = 1; i < 8; i++)
6514 mii_attach(sc->sc_dev, &sc->sc_mii,
6515 0xffffffff, i, MII_OFFSET_ANY,
6516 MIIF_DOPAUSE);
6517
6518 /* restore previous sfp cage power state */
6519 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6520 }
6521 }
6522 } else {
6523 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6524 MII_OFFSET_ANY, MIIF_DOPAUSE);
6525 }
6526
6527 /*
6528 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
6529 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6530 */
6531 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
6532 (LIST_FIRST(&mii->mii_phys) == NULL)) {
6533 wm_set_mdio_slow_mode_hv(sc);
6534 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6535 MII_OFFSET_ANY, MIIF_DOPAUSE);
6536 }
6537
6538 /*
6539 * (For ICH8 variants)
6540 * If PHY detection failed, use BM's r/w function and retry.
6541 */
6542 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6543 /* if failed, retry with *_bm_* */
6544 mii->mii_readreg = wm_gmii_bm_readreg;
6545 mii->mii_writereg = wm_gmii_bm_writereg;
6546
6547 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6548 MII_OFFSET_ANY, MIIF_DOPAUSE);
6549 }
6550
6551 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6552 /* Any PHY wasn't find */
6553 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6554 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6555 sc->sc_phytype = WMPHY_NONE;
6556 } else {
6557 /*
6558 * PHY Found!
6559 * Check PHY type.
6560 */
6561 uint32_t model;
6562 struct mii_softc *child;
6563
6564 child = LIST_FIRST(&mii->mii_phys);
6565 if (device_is_a(child->mii_dev, "igphy")) {
6566 struct igphy_softc *isc = (struct igphy_softc *)child;
6567
6568 model = isc->sc_mii.mii_mpd_model;
6569 if (model == MII_MODEL_yyINTEL_I82566)
6570 sc->sc_phytype = WMPHY_IGP_3;
6571 }
6572
6573 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6574 }
6575 }
6576
6577 /*
6578 * wm_gmii_mediastatus: [ifmedia interface function]
6579 *
6580 * Get the current interface media status on a 1000BASE-T device.
6581 */
6582 static void
6583 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6584 {
6585 struct wm_softc *sc = ifp->if_softc;
6586
6587 ether_mediastatus(ifp, ifmr);
6588 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6589 | sc->sc_flowflags;
6590 }
6591
6592 /*
6593 * wm_gmii_mediachange: [ifmedia interface function]
6594 *
6595 * Set hardware to newly-selected media on a 1000BASE-T device.
6596 */
6597 static int
6598 wm_gmii_mediachange(struct ifnet *ifp)
6599 {
6600 struct wm_softc *sc = ifp->if_softc;
6601 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6602 int rc;
6603
6604 if ((ifp->if_flags & IFF_UP) == 0)
6605 return 0;
6606
6607 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6608 sc->sc_ctrl |= CTRL_SLU;
6609 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6610 || (sc->sc_type > WM_T_82543)) {
6611 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6612 } else {
6613 sc->sc_ctrl &= ~CTRL_ASDE;
6614 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6615 if (ife->ifm_media & IFM_FDX)
6616 sc->sc_ctrl |= CTRL_FD;
6617 switch (IFM_SUBTYPE(ife->ifm_media)) {
6618 case IFM_10_T:
6619 sc->sc_ctrl |= CTRL_SPEED_10;
6620 break;
6621 case IFM_100_TX:
6622 sc->sc_ctrl |= CTRL_SPEED_100;
6623 break;
6624 case IFM_1000_T:
6625 sc->sc_ctrl |= CTRL_SPEED_1000;
6626 break;
6627 default:
6628 panic("wm_gmii_mediachange: bad media 0x%x",
6629 ife->ifm_media);
6630 }
6631 }
6632 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6633 if (sc->sc_type <= WM_T_82543)
6634 wm_gmii_reset(sc);
6635
6636 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6637 return 0;
6638 return rc;
6639 }
6640
6641 #define MDI_IO CTRL_SWDPIN(2)
6642 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
6643 #define MDI_CLK CTRL_SWDPIN(3)
6644
6645 static void
6646 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6647 {
6648 uint32_t i, v;
6649
6650 v = CSR_READ(sc, WMREG_CTRL);
6651 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6652 v |= MDI_DIR | CTRL_SWDPIO(3);
6653
6654 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6655 if (data & i)
6656 v |= MDI_IO;
6657 else
6658 v &= ~MDI_IO;
6659 CSR_WRITE(sc, WMREG_CTRL, v);
6660 CSR_WRITE_FLUSH(sc);
6661 delay(10);
6662 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6663 CSR_WRITE_FLUSH(sc);
6664 delay(10);
6665 CSR_WRITE(sc, WMREG_CTRL, v);
6666 CSR_WRITE_FLUSH(sc);
6667 delay(10);
6668 }
6669 }
6670
6671 static uint32_t
6672 wm_i82543_mii_recvbits(struct wm_softc *sc)
6673 {
6674 uint32_t v, i, data = 0;
6675
6676 v = CSR_READ(sc, WMREG_CTRL);
6677 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6678 v |= CTRL_SWDPIO(3);
6679
6680 CSR_WRITE(sc, WMREG_CTRL, v);
6681 CSR_WRITE_FLUSH(sc);
6682 delay(10);
6683 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6684 CSR_WRITE_FLUSH(sc);
6685 delay(10);
6686 CSR_WRITE(sc, WMREG_CTRL, v);
6687 CSR_WRITE_FLUSH(sc);
6688 delay(10);
6689
6690 for (i = 0; i < 16; i++) {
6691 data <<= 1;
6692 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6693 CSR_WRITE_FLUSH(sc);
6694 delay(10);
6695 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6696 data |= 1;
6697 CSR_WRITE(sc, WMREG_CTRL, v);
6698 CSR_WRITE_FLUSH(sc);
6699 delay(10);
6700 }
6701
6702 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6703 CSR_WRITE_FLUSH(sc);
6704 delay(10);
6705 CSR_WRITE(sc, WMREG_CTRL, v);
6706 CSR_WRITE_FLUSH(sc);
6707 delay(10);
6708
6709 return data;
6710 }
6711
6712 #undef MDI_IO
6713 #undef MDI_DIR
6714 #undef MDI_CLK
6715
6716 /*
6717 * wm_gmii_i82543_readreg: [mii interface function]
6718 *
6719 * Read a PHY register on the GMII (i82543 version).
6720 */
6721 static int
6722 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6723 {
6724 struct wm_softc *sc = device_private(self);
6725 int rv;
6726
6727 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6728 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
6729 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6730 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
6731
6732 DPRINTF(WM_DEBUG_GMII,
6733 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6734 device_xname(sc->sc_dev), phy, reg, rv));
6735
6736 return rv;
6737 }
6738
6739 /*
6740 * wm_gmii_i82543_writereg: [mii interface function]
6741 *
6742 * Write a PHY register on the GMII (i82543 version).
6743 */
6744 static void
6745 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6746 {
6747 struct wm_softc *sc = device_private(self);
6748
6749 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6750 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6751 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6752 (MII_COMMAND_START << 30), 32);
6753 }
6754
6755 /*
6756 * wm_gmii_i82544_readreg: [mii interface function]
6757 *
6758 * Read a PHY register on the GMII.
6759 */
6760 static int
6761 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6762 {
6763 struct wm_softc *sc = device_private(self);
6764 uint32_t mdic = 0;
6765 int i, rv;
6766
6767 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6768 MDIC_REGADD(reg));
6769
6770 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6771 mdic = CSR_READ(sc, WMREG_MDIC);
6772 if (mdic & MDIC_READY)
6773 break;
6774 delay(50);
6775 }
6776
6777 if ((mdic & MDIC_READY) == 0) {
6778 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6779 device_xname(sc->sc_dev), phy, reg);
6780 rv = 0;
6781 } else if (mdic & MDIC_E) {
6782 #if 0 /* This is normal if no PHY is present. */
6783 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6784 device_xname(sc->sc_dev), phy, reg);
6785 #endif
6786 rv = 0;
6787 } else {
6788 rv = MDIC_DATA(mdic);
6789 if (rv == 0xffff)
6790 rv = 0;
6791 }
6792
6793 return rv;
6794 }
6795
6796 /*
6797 * wm_gmii_i82544_writereg: [mii interface function]
6798 *
6799 * Write a PHY register on the GMII.
6800 */
6801 static void
6802 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6803 {
6804 struct wm_softc *sc = device_private(self);
6805 uint32_t mdic = 0;
6806 int i;
6807
6808 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6809 MDIC_REGADD(reg) | MDIC_DATA(val));
6810
6811 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6812 mdic = CSR_READ(sc, WMREG_MDIC);
6813 if (mdic & MDIC_READY)
6814 break;
6815 delay(50);
6816 }
6817
6818 if ((mdic & MDIC_READY) == 0)
6819 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6820 device_xname(sc->sc_dev), phy, reg);
6821 else if (mdic & MDIC_E)
6822 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6823 device_xname(sc->sc_dev), phy, reg);
6824 }
6825
6826 /*
6827 * wm_gmii_i80003_readreg: [mii interface function]
6828 *
6829 * Read a PHY register on the kumeran
6830 * This could be handled by the PHY layer if we didn't have to lock the
6831 * ressource ...
6832 */
6833 static int
6834 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6835 {
6836 struct wm_softc *sc = device_private(self);
6837 int sem;
6838 int rv;
6839
6840 if (phy != 1) /* only one PHY on kumeran bus */
6841 return 0;
6842
6843 sem = swfwphysem[sc->sc_funcid];
6844 if (wm_get_swfw_semaphore(sc, sem)) {
6845 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6846 __func__);
6847 return 0;
6848 }
6849
6850 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6851 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6852 reg >> GG82563_PAGE_SHIFT);
6853 } else {
6854 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6855 reg >> GG82563_PAGE_SHIFT);
6856 }
6857 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6858 delay(200);
6859 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6860 delay(200);
6861
6862 wm_put_swfw_semaphore(sc, sem);
6863 return rv;
6864 }
6865
6866 /*
6867 * wm_gmii_i80003_writereg: [mii interface function]
6868 *
6869 * Write a PHY register on the kumeran.
6870 * This could be handled by the PHY layer if we didn't have to lock the
6871 * ressource ...
6872 */
6873 static void
6874 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6875 {
6876 struct wm_softc *sc = device_private(self);
6877 int sem;
6878
6879 if (phy != 1) /* only one PHY on kumeran bus */
6880 return;
6881
6882 sem = swfwphysem[sc->sc_funcid];
6883 if (wm_get_swfw_semaphore(sc, sem)) {
6884 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6885 __func__);
6886 return;
6887 }
6888
6889 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6890 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6891 reg >> GG82563_PAGE_SHIFT);
6892 } else {
6893 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6894 reg >> GG82563_PAGE_SHIFT);
6895 }
6896 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6897 delay(200);
6898 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6899 delay(200);
6900
6901 wm_put_swfw_semaphore(sc, sem);
6902 }
6903
6904 /*
6905 * wm_gmii_bm_readreg: [mii interface function]
6906 *
6907 * Read a PHY register on the kumeran
6908 * This could be handled by the PHY layer if we didn't have to lock the
6909 * ressource ...
6910 */
6911 static int
6912 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6913 {
6914 struct wm_softc *sc = device_private(self);
6915 int sem;
6916 int rv;
6917
6918 sem = swfwphysem[sc->sc_funcid];
6919 if (wm_get_swfw_semaphore(sc, sem)) {
6920 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6921 __func__);
6922 return 0;
6923 }
6924
6925 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6926 if (phy == 1)
6927 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6928 reg);
6929 else
6930 wm_gmii_i82544_writereg(self, phy,
6931 GG82563_PHY_PAGE_SELECT,
6932 reg >> GG82563_PAGE_SHIFT);
6933 }
6934
6935 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6936 wm_put_swfw_semaphore(sc, sem);
6937 return rv;
6938 }
6939
6940 /*
6941 * wm_gmii_bm_writereg: [mii interface function]
6942 *
6943 * Write a PHY register on the kumeran.
6944 * This could be handled by the PHY layer if we didn't have to lock the
6945 * ressource ...
6946 */
6947 static void
6948 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6949 {
6950 struct wm_softc *sc = device_private(self);
6951 int sem;
6952
6953 sem = swfwphysem[sc->sc_funcid];
6954 if (wm_get_swfw_semaphore(sc, sem)) {
6955 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6956 __func__);
6957 return;
6958 }
6959
6960 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6961 if (phy == 1)
6962 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6963 reg);
6964 else
6965 wm_gmii_i82544_writereg(self, phy,
6966 GG82563_PHY_PAGE_SELECT,
6967 reg >> GG82563_PAGE_SHIFT);
6968 }
6969
6970 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6971 wm_put_swfw_semaphore(sc, sem);
6972 }
6973
6974 static void
6975 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6976 {
6977 struct wm_softc *sc = device_private(self);
6978 uint16_t regnum = BM_PHY_REG_NUM(offset);
6979 uint16_t wuce;
6980
6981 /* XXX Gig must be disabled for MDIO accesses to page 800 */
6982 if (sc->sc_type == WM_T_PCH) {
6983 /* XXX e1000 driver do nothing... why? */
6984 }
6985
6986 /* Set page 769 */
6987 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6988 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6989
6990 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6991
6992 wuce &= ~BM_WUC_HOST_WU_BIT;
6993 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6994 wuce | BM_WUC_ENABLE_BIT);
6995
6996 /* Select page 800 */
6997 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6998 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6999
7000 /* Write page 800 */
7001 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
7002
7003 if (rd)
7004 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
7005 else
7006 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
7007
7008 /* Set page 769 */
7009 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7010 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7011
7012 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
7013 }
7014
7015 /*
7016 * wm_gmii_hv_readreg: [mii interface function]
7017 *
7018 * Read a PHY register on the kumeran
7019 * This could be handled by the PHY layer if we didn't have to lock the
7020 * ressource ...
7021 */
7022 static int
7023 wm_gmii_hv_readreg(device_t self, int phy, int reg)
7024 {
7025 struct wm_softc *sc = device_private(self);
7026 uint16_t page = BM_PHY_REG_PAGE(reg);
7027 uint16_t regnum = BM_PHY_REG_NUM(reg);
7028 uint16_t val;
7029 int rv;
7030
7031 if (wm_get_swfwhw_semaphore(sc)) {
7032 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7033 __func__);
7034 return 0;
7035 }
7036
7037 /* XXX Workaround failure in MDIO access while cable is disconnected */
7038 if (sc->sc_phytype == WMPHY_82577) {
7039 /* XXX must write */
7040 }
7041
7042 /* Page 800 works differently than the rest so it has its own func */
7043 if (page == BM_WUC_PAGE) {
7044 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
7045 return val;
7046 }
7047
7048 /*
7049 * Lower than page 768 works differently than the rest so it has its
7050 * own func
7051 */
7052 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7053 printf("gmii_hv_readreg!!!\n");
7054 return 0;
7055 }
7056
7057 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7058 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7059 page << BME1000_PAGE_SHIFT);
7060 }
7061
7062 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
7063 wm_put_swfwhw_semaphore(sc);
7064 return rv;
7065 }
7066
7067 /*
7068 * wm_gmii_hv_writereg: [mii interface function]
7069 *
7070 * Write a PHY register on the kumeran.
7071 * This could be handled by the PHY layer if we didn't have to lock the
7072 * ressource ...
7073 */
7074 static void
7075 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
7076 {
7077 struct wm_softc *sc = device_private(self);
7078 uint16_t page = BM_PHY_REG_PAGE(reg);
7079 uint16_t regnum = BM_PHY_REG_NUM(reg);
7080
7081 if (wm_get_swfwhw_semaphore(sc)) {
7082 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7083 __func__);
7084 return;
7085 }
7086
7087 /* XXX Workaround failure in MDIO access while cable is disconnected */
7088
7089 /* Page 800 works differently than the rest so it has its own func */
7090 if (page == BM_WUC_PAGE) {
7091 uint16_t tmp;
7092
7093 tmp = val;
7094 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
7095 return;
7096 }
7097
7098 /*
7099 * Lower than page 768 works differently than the rest so it has its
7100 * own func
7101 */
7102 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7103 printf("gmii_hv_writereg!!!\n");
7104 return;
7105 }
7106
7107 /*
7108 * XXX Workaround MDIO accesses being disabled after entering IEEE
7109 * Power Down (whenever bit 11 of the PHY control register is set)
7110 */
7111
7112 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7113 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7114 page << BME1000_PAGE_SHIFT);
7115 }
7116
7117 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
7118 wm_put_swfwhw_semaphore(sc);
7119 }
7120
7121 /*
7122 * wm_gmii_82580_readreg: [mii interface function]
7123 *
7124 * Read a PHY register on the 82580 and I350.
7125 * This could be handled by the PHY layer if we didn't have to lock the
7126 * ressource ...
7127 */
7128 static int
7129 wm_gmii_82580_readreg(device_t self, int phy, int reg)
7130 {
7131 struct wm_softc *sc = device_private(self);
7132 int sem;
7133 int rv;
7134
7135 sem = swfwphysem[sc->sc_funcid];
7136 if (wm_get_swfw_semaphore(sc, sem)) {
7137 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7138 __func__);
7139 return 0;
7140 }
7141
7142 rv = wm_gmii_i82544_readreg(self, phy, reg);
7143
7144 wm_put_swfw_semaphore(sc, sem);
7145 return rv;
7146 }
7147
7148 /*
7149 * wm_gmii_82580_writereg: [mii interface function]
7150 *
7151 * Write a PHY register on the 82580 and I350.
7152 * This could be handled by the PHY layer if we didn't have to lock the
7153 * ressource ...
7154 */
7155 static void
7156 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
7157 {
7158 struct wm_softc *sc = device_private(self);
7159 int sem;
7160
7161 sem = swfwphysem[sc->sc_funcid];
7162 if (wm_get_swfw_semaphore(sc, sem)) {
7163 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7164 __func__);
7165 return;
7166 }
7167
7168 wm_gmii_i82544_writereg(self, phy, reg, val);
7169
7170 wm_put_swfw_semaphore(sc, sem);
7171 }
7172
7173 /*
7174 * wm_gmii_statchg: [mii interface function]
7175 *
7176 * Callback from MII layer when media changes.
7177 */
7178 static void
7179 wm_gmii_statchg(struct ifnet *ifp)
7180 {
7181 struct wm_softc *sc = ifp->if_softc;
7182 struct mii_data *mii = &sc->sc_mii;
7183
7184 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7185 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7186 sc->sc_fcrtl &= ~FCRTL_XONE;
7187
7188 /*
7189 * Get flow control negotiation result.
7190 */
7191 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7192 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7193 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7194 mii->mii_media_active &= ~IFM_ETH_FMASK;
7195 }
7196
7197 if (sc->sc_flowflags & IFM_FLOW) {
7198 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7199 sc->sc_ctrl |= CTRL_TFCE;
7200 sc->sc_fcrtl |= FCRTL_XONE;
7201 }
7202 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7203 sc->sc_ctrl |= CTRL_RFCE;
7204 }
7205
7206 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7207 DPRINTF(WM_DEBUG_LINK,
7208 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7209 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7210 } else {
7211 DPRINTF(WM_DEBUG_LINK,
7212 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7213 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7214 }
7215
7216 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7217 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7218 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7219 : WMREG_FCRTL, sc->sc_fcrtl);
7220 if (sc->sc_type == WM_T_80003) {
7221 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7222 case IFM_1000_T:
7223 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7224 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7225 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7226 break;
7227 default:
7228 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7229 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7230 sc->sc_tipg = TIPG_10_100_80003_DFLT;
7231 break;
7232 }
7233 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7234 }
7235 }
7236
7237 /*
7238 * wm_kmrn_readreg:
7239 *
7240 * Read a kumeran register
7241 */
7242 static int
7243 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7244 {
7245 int rv;
7246
7247 if (sc->sc_flags == WM_F_LOCK_SWFW) {
7248 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7249 aprint_error_dev(sc->sc_dev,
7250 "%s: failed to get semaphore\n", __func__);
7251 return 0;
7252 }
7253 } else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
7254 if (wm_get_swfwhw_semaphore(sc)) {
7255 aprint_error_dev(sc->sc_dev,
7256 "%s: failed to get semaphore\n", __func__);
7257 return 0;
7258 }
7259 }
7260
7261 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7262 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7263 KUMCTRLSTA_REN);
7264 CSR_WRITE_FLUSH(sc);
7265 delay(2);
7266
7267 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7268
7269 if (sc->sc_flags == WM_F_LOCK_SWFW)
7270 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7271 else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
7272 wm_put_swfwhw_semaphore(sc);
7273
7274 return rv;
7275 }
7276
7277 /*
7278 * wm_kmrn_writereg:
7279 *
7280 * Write a kumeran register
7281 */
7282 static void
7283 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7284 {
7285
7286 if (sc->sc_flags == WM_F_LOCK_SWFW) {
7287 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7288 aprint_error_dev(sc->sc_dev,
7289 "%s: failed to get semaphore\n", __func__);
7290 return;
7291 }
7292 } else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
7293 if (wm_get_swfwhw_semaphore(sc)) {
7294 aprint_error_dev(sc->sc_dev,
7295 "%s: failed to get semaphore\n", __func__);
7296 return;
7297 }
7298 }
7299
7300 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7301 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7302 (val & KUMCTRLSTA_MASK));
7303
7304 if (sc->sc_flags == WM_F_LOCK_SWFW)
7305 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7306 else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
7307 wm_put_swfwhw_semaphore(sc);
7308 }
7309
7310 /* SGMII related */
7311
7312 /*
7313 * wm_sgmii_uses_mdio
7314 *
7315 * Check whether the transaction is to the internal PHY or the external
7316 * MDIO interface. Return true if it's MDIO.
7317 */
7318 static bool
7319 wm_sgmii_uses_mdio(struct wm_softc *sc)
7320 {
7321 uint32_t reg;
7322 bool ismdio = false;
7323
7324 switch (sc->sc_type) {
7325 case WM_T_82575:
7326 case WM_T_82576:
7327 reg = CSR_READ(sc, WMREG_MDIC);
7328 ismdio = ((reg & MDIC_DEST) != 0);
7329 break;
7330 case WM_T_82580:
7331 case WM_T_I350:
7332 case WM_T_I354:
7333 case WM_T_I210:
7334 case WM_T_I211:
7335 reg = CSR_READ(sc, WMREG_MDICNFG);
7336 ismdio = ((reg & MDICNFG_DEST) != 0);
7337 break;
7338 default:
7339 break;
7340 }
7341
7342 return ismdio;
7343 }
7344
7345 /*
7346 * wm_sgmii_readreg: [mii interface function]
7347 *
7348 * Read a PHY register on the SGMII
7349 * This could be handled by the PHY layer if we didn't have to lock the
7350 * ressource ...
7351 */
7352 static int
7353 wm_sgmii_readreg(device_t self, int phy, int reg)
7354 {
7355 struct wm_softc *sc = device_private(self);
7356 uint32_t i2ccmd;
7357 int i, rv;
7358
7359 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7360 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7361 __func__);
7362 return 0;
7363 }
7364
7365 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7366 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7367 | I2CCMD_OPCODE_READ;
7368 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7369
7370 /* Poll the ready bit */
7371 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7372 delay(50);
7373 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7374 if (i2ccmd & I2CCMD_READY)
7375 break;
7376 }
7377 if ((i2ccmd & I2CCMD_READY) == 0)
7378 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7379 if ((i2ccmd & I2CCMD_ERROR) != 0)
7380 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7381
7382 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7383
7384 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7385 return rv;
7386 }
7387
7388 /*
7389 * wm_sgmii_writereg: [mii interface function]
7390 *
7391 * Write a PHY register on the SGMII.
7392 * This could be handled by the PHY layer if we didn't have to lock the
7393 * ressource ...
7394 */
7395 static void
7396 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7397 {
7398 struct wm_softc *sc = device_private(self);
7399 uint32_t i2ccmd;
7400 int i;
7401 int val_swapped;
7402
7403 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7404 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7405 __func__);
7406 return;
7407 }
7408 /* Swap the data bytes for the I2C interface */
7409 val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
7410 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7411 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7412 | I2CCMD_OPCODE_WRITE | val_swapped;
7413 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7414
7415 /* Poll the ready bit */
7416 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7417 delay(50);
7418 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7419 if (i2ccmd & I2CCMD_READY)
7420 break;
7421 }
7422 if ((i2ccmd & I2CCMD_READY) == 0)
7423 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7424 if ((i2ccmd & I2CCMD_ERROR) != 0)
7425 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7426
7427 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7428 }
7429
7430 /* TBI related */
7431
7432 /* XXX Currently TBI only */
7433 static int
7434 wm_check_for_link(struct wm_softc *sc)
7435 {
7436 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7437 uint32_t rxcw;
7438 uint32_t ctrl;
7439 uint32_t status;
7440 uint32_t sig;
7441
7442 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
7443 sc->sc_tbi_linkup = 1;
7444 return 0;
7445 }
7446
7447 rxcw = CSR_READ(sc, WMREG_RXCW);
7448 ctrl = CSR_READ(sc, WMREG_CTRL);
7449 status = CSR_READ(sc, WMREG_STATUS);
7450
7451 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7452
7453 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7454 device_xname(sc->sc_dev), __func__,
7455 ((ctrl & CTRL_SWDPIN(1)) == sig),
7456 ((status & STATUS_LU) != 0),
7457 ((rxcw & RXCW_C) != 0)
7458 ));
7459
7460 /*
7461 * SWDPIN LU RXCW
7462 * 0 0 0
7463 * 0 0 1 (should not happen)
7464 * 0 1 0 (should not happen)
7465 * 0 1 1 (should not happen)
7466 * 1 0 0 Disable autonego and force linkup
7467 * 1 0 1 got /C/ but not linkup yet
7468 * 1 1 0 (linkup)
7469 * 1 1 1 If IFM_AUTO, back to autonego
7470 *
7471 */
7472 if (((ctrl & CTRL_SWDPIN(1)) == sig)
7473 && ((status & STATUS_LU) == 0)
7474 && ((rxcw & RXCW_C) == 0)) {
7475 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7476 __func__));
7477 sc->sc_tbi_linkup = 0;
7478 /* Disable auto-negotiation in the TXCW register */
7479 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7480
7481 /*
7482 * Force link-up and also force full-duplex.
7483 *
7484 * NOTE: CTRL was updated TFCE and RFCE automatically,
7485 * so we should update sc->sc_ctrl
7486 */
7487 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7488 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7489 } else if (((status & STATUS_LU) != 0)
7490 && ((rxcw & RXCW_C) != 0)
7491 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7492 sc->sc_tbi_linkup = 1;
7493 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7494 __func__));
7495 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7496 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7497 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7498 && ((rxcw & RXCW_C) != 0)) {
7499 DPRINTF(WM_DEBUG_LINK, ("/C/"));
7500 } else {
7501 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7502 status));
7503 }
7504
7505 return 0;
7506 }
7507
7508 /*
7509 * wm_tbi_mediainit:
7510 *
7511 * Initialize media for use on 1000BASE-X devices.
7512 */
7513 static void
7514 wm_tbi_mediainit(struct wm_softc *sc)
7515 {
7516 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7517 const char *sep = "";
7518
7519 if (sc->sc_type < WM_T_82543)
7520 sc->sc_tipg = TIPG_WM_DFLT;
7521 else
7522 sc->sc_tipg = TIPG_LG_DFLT;
7523
7524 sc->sc_tbi_anegticks = 5;
7525
7526 /* Initialize our media structures */
7527 sc->sc_mii.mii_ifp = ifp;
7528
7529 sc->sc_ethercom.ec_mii = &sc->sc_mii;
7530 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
7531 wm_tbi_mediastatus);
7532
7533 /*
7534 * SWD Pins:
7535 *
7536 * 0 = Link LED (output)
7537 * 1 = Loss Of Signal (input)
7538 */
7539 sc->sc_ctrl |= CTRL_SWDPIO(0);
7540 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
7541 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7542 sc->sc_ctrl &= ~CTRL_LRST;
7543
7544 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7545
7546 #define ADD(ss, mm, dd) \
7547 do { \
7548 aprint_normal("%s%s", sep, ss); \
7549 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
7550 sep = ", "; \
7551 } while (/*CONSTCOND*/0)
7552
7553 aprint_normal_dev(sc->sc_dev, "");
7554
7555 /* Only 82545 is LX */
7556 if (sc->sc_type == WM_T_82545) {
7557 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
7558 ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
7559 } else {
7560 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
7561 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
7562 }
7563 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
7564 aprint_normal("\n");
7565
7566 #undef ADD
7567
7568 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
7569 }
7570
7571 /*
7572 * wm_tbi_mediastatus: [ifmedia interface function]
7573 *
7574 * Get the current interface media status on a 1000BASE-X device.
7575 */
7576 static void
7577 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
7578 {
7579 struct wm_softc *sc = ifp->if_softc;
7580 uint32_t ctrl, status;
7581
7582 ifmr->ifm_status = IFM_AVALID;
7583 ifmr->ifm_active = IFM_ETHER;
7584
7585 status = CSR_READ(sc, WMREG_STATUS);
7586 if ((status & STATUS_LU) == 0) {
7587 ifmr->ifm_active |= IFM_NONE;
7588 return;
7589 }
7590
7591 ifmr->ifm_status |= IFM_ACTIVE;
7592 /* Only 82545 is LX */
7593 if (sc->sc_type == WM_T_82545)
7594 ifmr->ifm_active |= IFM_1000_LX;
7595 else
7596 ifmr->ifm_active |= IFM_1000_SX;
7597 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
7598 ifmr->ifm_active |= IFM_FDX;
7599 else
7600 ifmr->ifm_active |= IFM_HDX;
7601 ctrl = CSR_READ(sc, WMREG_CTRL);
7602 if (ctrl & CTRL_RFCE)
7603 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
7604 if (ctrl & CTRL_TFCE)
7605 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
7606 }
7607
7608 /*
7609 * wm_tbi_mediachange: [ifmedia interface function]
7610 *
7611 * Set hardware to newly-selected media on a 1000BASE-X device.
7612 */
7613 static int
7614 wm_tbi_mediachange(struct ifnet *ifp)
7615 {
7616 struct wm_softc *sc = ifp->if_softc;
7617 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7618 uint32_t status;
7619 int i;
7620
7621 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7622 return 0;
7623
7624 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
7625 || (sc->sc_type >= WM_T_82575))
7626 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
7627
7628 /* XXX power_up_serdes_link_82575() */
7629
7630 sc->sc_ctrl &= ~CTRL_LRST;
7631 sc->sc_txcw = TXCW_ANE;
7632 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
7633 sc->sc_txcw |= TXCW_FD | TXCW_HD;
7634 else if (ife->ifm_media & IFM_FDX)
7635 sc->sc_txcw |= TXCW_FD;
7636 else
7637 sc->sc_txcw |= TXCW_HD;
7638
7639 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
7640 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
7641
7642 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
7643 device_xname(sc->sc_dev), sc->sc_txcw));
7644 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7645 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7646 CSR_WRITE_FLUSH(sc);
7647 delay(1000);
7648
7649 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
7650 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
7651
7652 /*
7653 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
7654 * optics detect a signal, 0 if they don't.
7655 */
7656 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
7657 /* Have signal; wait for the link to come up. */
7658 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
7659 delay(10000);
7660 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
7661 break;
7662 }
7663
7664 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
7665 device_xname(sc->sc_dev),i));
7666
7667 status = CSR_READ(sc, WMREG_STATUS);
7668 DPRINTF(WM_DEBUG_LINK,
7669 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
7670 device_xname(sc->sc_dev),status, STATUS_LU));
7671 if (status & STATUS_LU) {
7672 /* Link is up. */
7673 DPRINTF(WM_DEBUG_LINK,
7674 ("%s: LINK: set media -> link up %s\n",
7675 device_xname(sc->sc_dev),
7676 (status & STATUS_FD) ? "FDX" : "HDX"));
7677
7678 /*
7679 * NOTE: CTRL will update TFCE and RFCE automatically,
7680 * so we should update sc->sc_ctrl
7681 */
7682 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7683 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7684 sc->sc_fcrtl &= ~FCRTL_XONE;
7685 if (status & STATUS_FD)
7686 sc->sc_tctl |=
7687 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7688 else
7689 sc->sc_tctl |=
7690 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7691 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
7692 sc->sc_fcrtl |= FCRTL_XONE;
7693 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7694 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7695 WMREG_OLD_FCRTL : WMREG_FCRTL,
7696 sc->sc_fcrtl);
7697 sc->sc_tbi_linkup = 1;
7698 } else {
7699 if (i == WM_LINKUP_TIMEOUT)
7700 wm_check_for_link(sc);
7701 /* Link is down. */
7702 DPRINTF(WM_DEBUG_LINK,
7703 ("%s: LINK: set media -> link down\n",
7704 device_xname(sc->sc_dev)));
7705 sc->sc_tbi_linkup = 0;
7706 }
7707 } else {
7708 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
7709 device_xname(sc->sc_dev)));
7710 sc->sc_tbi_linkup = 0;
7711 }
7712
7713 wm_tbi_set_linkled(sc);
7714
7715 return 0;
7716 }
7717
7718 /*
7719 * wm_tbi_set_linkled:
7720 *
7721 * Update the link LED on 1000BASE-X devices.
7722 */
7723 static void
7724 wm_tbi_set_linkled(struct wm_softc *sc)
7725 {
7726
7727 if (sc->sc_tbi_linkup)
7728 sc->sc_ctrl |= CTRL_SWDPIN(0);
7729 else
7730 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7731
7732 /* 82540 or newer devices are active low */
7733 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7734
7735 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7736 }
7737
7738 /*
7739 * wm_tbi_check_link:
7740 *
7741 * Check the link on 1000BASE-X devices.
7742 */
7743 static void
7744 wm_tbi_check_link(struct wm_softc *sc)
7745 {
7746 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7747 uint32_t status;
7748
7749 KASSERT(WM_TX_LOCKED(sc));
7750
7751 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
7752 sc->sc_tbi_linkup = 1;
7753 return;
7754 }
7755
7756 status = CSR_READ(sc, WMREG_STATUS);
7757
7758 /* XXX is this needed? */
7759 (void)CSR_READ(sc, WMREG_RXCW);
7760 (void)CSR_READ(sc, WMREG_CTRL);
7761
7762 /* set link status */
7763 if ((status & STATUS_LU) == 0) {
7764 DPRINTF(WM_DEBUG_LINK,
7765 ("%s: LINK: checklink -> down\n",
7766 device_xname(sc->sc_dev)));
7767 sc->sc_tbi_linkup = 0;
7768 } else if (sc->sc_tbi_linkup == 0) {
7769 DPRINTF(WM_DEBUG_LINK,
7770 ("%s: LINK: checklink -> up %s\n",
7771 device_xname(sc->sc_dev),
7772 (status & STATUS_FD) ? "FDX" : "HDX"));
7773 sc->sc_tbi_linkup = 1;
7774 }
7775
7776 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
7777 && ((status & STATUS_LU) == 0)) {
7778 sc->sc_tbi_linkup = 0;
7779 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7780 /* If the timer expired, retry autonegotiation */
7781 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
7782 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
7783 sc->sc_tbi_ticks = 0;
7784 /*
7785 * Reset the link, and let autonegotiation do
7786 * its thing
7787 */
7788 sc->sc_ctrl |= CTRL_LRST;
7789 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7790 CSR_WRITE_FLUSH(sc);
7791 delay(1000);
7792 sc->sc_ctrl &= ~CTRL_LRST;
7793 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7794 CSR_WRITE_FLUSH(sc);
7795 delay(1000);
7796 CSR_WRITE(sc, WMREG_TXCW,
7797 sc->sc_txcw & ~TXCW_ANE);
7798 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7799 }
7800 }
7801 }
7802
7803 wm_tbi_set_linkled(sc);
7804 }
7805
7806 /* SFP related */
7807
7808 static int
7809 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
7810 {
7811 uint32_t i2ccmd;
7812 int i;
7813
7814 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
7815 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7816
7817 /* Poll the ready bit */
7818 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7819 delay(50);
7820 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7821 if (i2ccmd & I2CCMD_READY)
7822 break;
7823 }
7824 if ((i2ccmd & I2CCMD_READY) == 0)
7825 return -1;
7826 if ((i2ccmd & I2CCMD_ERROR) != 0)
7827 return -1;
7828
7829 *data = i2ccmd & 0x00ff;
7830
7831 return 0;
7832 }
7833
7834 static uint32_t
7835 wm_sfp_get_media_type(struct wm_softc *sc)
7836 {
7837 uint32_t ctrl_ext;
7838 uint8_t val = 0;
7839 int timeout = 3;
7840 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
7841 int rv = -1;
7842
7843 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7844 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
7845 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
7846 CSR_WRITE_FLUSH(sc);
7847
7848 /* Read SFP module data */
7849 while (timeout) {
7850 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
7851 if (rv == 0)
7852 break;
7853 delay(100*1000); /* XXX too big */
7854 timeout--;
7855 }
7856 if (rv != 0)
7857 goto out;
7858 switch (val) {
7859 case SFF_SFP_ID_SFF:
7860 aprint_normal_dev(sc->sc_dev,
7861 "Module/Connector soldered to board\n");
7862 break;
7863 case SFF_SFP_ID_SFP:
7864 aprint_normal_dev(sc->sc_dev, "SFP\n");
7865 break;
7866 case SFF_SFP_ID_UNKNOWN:
7867 goto out;
7868 default:
7869 break;
7870 }
7871
7872 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
7873 if (rv != 0) {
7874 goto out;
7875 }
7876
7877 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
7878 mediatype = WM_MEDIATYPE_SERDES;
7879 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
7880 sc->sc_flags |= WM_F_SGMII;
7881 mediatype = WM_MEDIATYPE_COPPER;
7882 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
7883 sc->sc_flags |= WM_F_SGMII;
7884 mediatype = WM_MEDIATYPE_SERDES;
7885 }
7886
7887 out:
7888 /* Restore I2C interface setting */
7889 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7890
7891 return mediatype;
7892 }
7893 /*
7894 * NVM related.
7895 * Microwire, SPI (w/wo EERD) and Flash.
7896 */
7897
7898 /* Both spi and uwire */
7899
7900 /*
7901 * wm_eeprom_sendbits:
7902 *
7903 * Send a series of bits to the EEPROM.
7904 */
7905 static void
7906 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
7907 {
7908 uint32_t reg;
7909 int x;
7910
7911 reg = CSR_READ(sc, WMREG_EECD);
7912
7913 for (x = nbits; x > 0; x--) {
7914 if (bits & (1U << (x - 1)))
7915 reg |= EECD_DI;
7916 else
7917 reg &= ~EECD_DI;
7918 CSR_WRITE(sc, WMREG_EECD, reg);
7919 CSR_WRITE_FLUSH(sc);
7920 delay(2);
7921 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7922 CSR_WRITE_FLUSH(sc);
7923 delay(2);
7924 CSR_WRITE(sc, WMREG_EECD, reg);
7925 CSR_WRITE_FLUSH(sc);
7926 delay(2);
7927 }
7928 }
7929
7930 /*
7931 * wm_eeprom_recvbits:
7932 *
7933 * Receive a series of bits from the EEPROM.
7934 */
7935 static void
7936 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
7937 {
7938 uint32_t reg, val;
7939 int x;
7940
7941 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
7942
7943 val = 0;
7944 for (x = nbits; x > 0; x--) {
7945 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7946 CSR_WRITE_FLUSH(sc);
7947 delay(2);
7948 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
7949 val |= (1U << (x - 1));
7950 CSR_WRITE(sc, WMREG_EECD, reg);
7951 CSR_WRITE_FLUSH(sc);
7952 delay(2);
7953 }
7954 *valp = val;
7955 }
7956
7957 /* Microwire */
7958
7959 /*
7960 * wm_nvm_read_uwire:
7961 *
7962 * Read a word from the EEPROM using the MicroWire protocol.
7963 */
7964 static int
7965 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
7966 {
7967 uint32_t reg, val;
7968 int i;
7969
7970 for (i = 0; i < wordcnt; i++) {
7971 /* Clear SK and DI. */
7972 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
7973 CSR_WRITE(sc, WMREG_EECD, reg);
7974
7975 /*
7976 * XXX: workaround for a bug in qemu-0.12.x and prior
7977 * and Xen.
7978 *
7979 * We use this workaround only for 82540 because qemu's
7980 * e1000 act as 82540.
7981 */
7982 if (sc->sc_type == WM_T_82540) {
7983 reg |= EECD_SK;
7984 CSR_WRITE(sc, WMREG_EECD, reg);
7985 reg &= ~EECD_SK;
7986 CSR_WRITE(sc, WMREG_EECD, reg);
7987 CSR_WRITE_FLUSH(sc);
7988 delay(2);
7989 }
7990 /* XXX: end of workaround */
7991
7992 /* Set CHIP SELECT. */
7993 reg |= EECD_CS;
7994 CSR_WRITE(sc, WMREG_EECD, reg);
7995 CSR_WRITE_FLUSH(sc);
7996 delay(2);
7997
7998 /* Shift in the READ command. */
7999 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
8000
8001 /* Shift in address. */
8002 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
8003
8004 /* Shift out the data. */
8005 wm_eeprom_recvbits(sc, &val, 16);
8006 data[i] = val & 0xffff;
8007
8008 /* Clear CHIP SELECT. */
8009 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
8010 CSR_WRITE(sc, WMREG_EECD, reg);
8011 CSR_WRITE_FLUSH(sc);
8012 delay(2);
8013 }
8014
8015 return 0;
8016 }
8017
8018 /* SPI */
8019
8020 /*
8021 * Set SPI and FLASH related information from the EECD register.
8022 * For 82541 and 82547, the word size is taken from EEPROM.
8023 */
8024 static int
8025 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
8026 {
8027 int size;
8028 uint32_t reg;
8029 uint16_t data;
8030
8031 reg = CSR_READ(sc, WMREG_EECD);
8032 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
8033
8034 /* Read the size of NVM from EECD by default */
8035 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
8036 switch (sc->sc_type) {
8037 case WM_T_82541:
8038 case WM_T_82541_2:
8039 case WM_T_82547:
8040 case WM_T_82547_2:
8041 /* Set dummy value to access EEPROM */
8042 sc->sc_nvm_wordsize = 64;
8043 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
8044 reg = data;
8045 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
8046 if (size == 0)
8047 size = 6; /* 64 word size */
8048 else
8049 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
8050 break;
8051 case WM_T_80003:
8052 case WM_T_82571:
8053 case WM_T_82572:
8054 case WM_T_82573: /* SPI case */
8055 case WM_T_82574: /* SPI case */
8056 case WM_T_82583: /* SPI case */
8057 size += NVM_WORD_SIZE_BASE_SHIFT;
8058 if (size > 14)
8059 size = 14;
8060 break;
8061 case WM_T_82575:
8062 case WM_T_82576:
8063 case WM_T_82580:
8064 case WM_T_I350:
8065 case WM_T_I354:
8066 case WM_T_I210:
8067 case WM_T_I211:
8068 size += NVM_WORD_SIZE_BASE_SHIFT;
8069 if (size > 15)
8070 size = 15;
8071 break;
8072 default:
8073 aprint_error_dev(sc->sc_dev,
8074 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
8075 return -1;
8076 break;
8077 }
8078
8079 sc->sc_nvm_wordsize = 1 << size;
8080
8081 return 0;
8082 }
8083
8084 /*
8085 * wm_nvm_ready_spi:
8086 *
8087 * Wait for a SPI EEPROM to be ready for commands.
8088 */
8089 static int
8090 wm_nvm_ready_spi(struct wm_softc *sc)
8091 {
8092 uint32_t val;
8093 int usec;
8094
8095 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
8096 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
8097 wm_eeprom_recvbits(sc, &val, 8);
8098 if ((val & SPI_SR_RDY) == 0)
8099 break;
8100 }
8101 if (usec >= SPI_MAX_RETRIES) {
8102 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
8103 return 1;
8104 }
8105 return 0;
8106 }
8107
8108 /*
8109 * wm_nvm_read_spi:
8110 *
8111 * Read a work from the EEPROM using the SPI protocol.
8112 */
8113 static int
8114 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8115 {
8116 uint32_t reg, val;
8117 int i;
8118 uint8_t opc;
8119
8120 /* Clear SK and CS. */
8121 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
8122 CSR_WRITE(sc, WMREG_EECD, reg);
8123 CSR_WRITE_FLUSH(sc);
8124 delay(2);
8125
8126 if (wm_nvm_ready_spi(sc))
8127 return 1;
8128
8129 /* Toggle CS to flush commands. */
8130 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
8131 CSR_WRITE_FLUSH(sc);
8132 delay(2);
8133 CSR_WRITE(sc, WMREG_EECD, reg);
8134 CSR_WRITE_FLUSH(sc);
8135 delay(2);
8136
8137 opc = SPI_OPC_READ;
8138 if (sc->sc_nvm_addrbits == 8 && word >= 128)
8139 opc |= SPI_OPC_A8;
8140
8141 wm_eeprom_sendbits(sc, opc, 8);
8142 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
8143
8144 for (i = 0; i < wordcnt; i++) {
8145 wm_eeprom_recvbits(sc, &val, 16);
8146 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
8147 }
8148
8149 /* Raise CS and clear SK. */
8150 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
8151 CSR_WRITE(sc, WMREG_EECD, reg);
8152 CSR_WRITE_FLUSH(sc);
8153 delay(2);
8154
8155 return 0;
8156 }
8157
8158 /* Using with EERD */
8159
8160 static int
8161 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
8162 {
8163 uint32_t attempts = 100000;
8164 uint32_t i, reg = 0;
8165 int32_t done = -1;
8166
8167 for (i = 0; i < attempts; i++) {
8168 reg = CSR_READ(sc, rw);
8169
8170 if (reg & EERD_DONE) {
8171 done = 0;
8172 break;
8173 }
8174 delay(5);
8175 }
8176
8177 return done;
8178 }
8179
8180 static int
8181 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
8182 uint16_t *data)
8183 {
8184 int i, eerd = 0;
8185 int error = 0;
8186
8187 for (i = 0; i < wordcnt; i++) {
8188 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
8189
8190 CSR_WRITE(sc, WMREG_EERD, eerd);
8191 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
8192 if (error != 0)
8193 break;
8194
8195 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
8196 }
8197
8198 return error;
8199 }
8200
8201 /* Flash */
8202
8203 static int
8204 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
8205 {
8206 uint32_t eecd;
8207 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
8208 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
8209 uint8_t sig_byte = 0;
8210
8211 switch (sc->sc_type) {
8212 case WM_T_ICH8:
8213 case WM_T_ICH9:
8214 eecd = CSR_READ(sc, WMREG_EECD);
8215 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
8216 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
8217 return 0;
8218 }
8219 /* FALLTHROUGH */
8220 default:
8221 /* Default to 0 */
8222 *bank = 0;
8223
8224 /* Check bank 0 */
8225 wm_read_ich8_byte(sc, act_offset, &sig_byte);
8226 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
8227 *bank = 0;
8228 return 0;
8229 }
8230
8231 /* Check bank 1 */
8232 wm_read_ich8_byte(sc, act_offset + bank1_offset,
8233 &sig_byte);
8234 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
8235 *bank = 1;
8236 return 0;
8237 }
8238 }
8239
8240 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
8241 device_xname(sc->sc_dev)));
8242 return -1;
8243 }
8244
8245 /******************************************************************************
8246 * This function does initial flash setup so that a new read/write/erase cycle
8247 * can be started.
8248 *
8249 * sc - The pointer to the hw structure
8250 ****************************************************************************/
8251 static int32_t
8252 wm_ich8_cycle_init(struct wm_softc *sc)
8253 {
8254 uint16_t hsfsts;
8255 int32_t error = 1;
8256 int32_t i = 0;
8257
8258 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8259
8260 /* May be check the Flash Des Valid bit in Hw status */
8261 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
8262 return error;
8263 }
8264
8265 /* Clear FCERR in Hw status by writing 1 */
8266 /* Clear DAEL in Hw status by writing a 1 */
8267 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
8268
8269 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8270
8271 /*
8272 * Either we should have a hardware SPI cycle in progress bit to check
8273 * against, in order to start a new cycle or FDONE bit should be
8274 * changed in the hardware so that it is 1 after harware reset, which
8275 * can then be used as an indication whether a cycle is in progress or
8276 * has been completed .. we should also have some software semaphore
8277 * mechanism to guard FDONE or the cycle in progress bit so that two
8278 * threads access to those bits can be sequentiallized or a way so that
8279 * 2 threads dont start the cycle at the same time
8280 */
8281
8282 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8283 /*
8284 * There is no cycle running at present, so we can start a
8285 * cycle
8286 */
8287
8288 /* Begin by setting Flash Cycle Done. */
8289 hsfsts |= HSFSTS_DONE;
8290 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8291 error = 0;
8292 } else {
8293 /*
8294 * otherwise poll for sometime so the current cycle has a
8295 * chance to end before giving up.
8296 */
8297 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
8298 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8299 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8300 error = 0;
8301 break;
8302 }
8303 delay(1);
8304 }
8305 if (error == 0) {
8306 /*
8307 * Successful in waiting for previous cycle to timeout,
8308 * now set the Flash Cycle Done.
8309 */
8310 hsfsts |= HSFSTS_DONE;
8311 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8312 }
8313 }
8314 return error;
8315 }
8316
8317 /******************************************************************************
8318 * This function starts a flash cycle and waits for its completion
8319 *
8320 * sc - The pointer to the hw structure
8321 ****************************************************************************/
8322 static int32_t
8323 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
8324 {
8325 uint16_t hsflctl;
8326 uint16_t hsfsts;
8327 int32_t error = 1;
8328 uint32_t i = 0;
8329
8330 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
8331 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8332 hsflctl |= HSFCTL_GO;
8333 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8334
8335 /* Wait till FDONE bit is set to 1 */
8336 do {
8337 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8338 if (hsfsts & HSFSTS_DONE)
8339 break;
8340 delay(1);
8341 i++;
8342 } while (i < timeout);
8343 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
8344 error = 0;
8345
8346 return error;
8347 }
8348
8349 /******************************************************************************
8350 * Reads a byte or word from the NVM using the ICH8 flash access registers.
8351 *
8352 * sc - The pointer to the hw structure
8353 * index - The index of the byte or word to read.
8354 * size - Size of data to read, 1=byte 2=word
8355 * data - Pointer to the word to store the value read.
8356 *****************************************************************************/
8357 static int32_t
8358 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
8359 uint32_t size, uint16_t *data)
8360 {
8361 uint16_t hsfsts;
8362 uint16_t hsflctl;
8363 uint32_t flash_linear_address;
8364 uint32_t flash_data = 0;
8365 int32_t error = 1;
8366 int32_t count = 0;
8367
8368 if (size < 1 || size > 2 || data == 0x0 ||
8369 index > ICH_FLASH_LINEAR_ADDR_MASK)
8370 return error;
8371
8372 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
8373 sc->sc_ich8_flash_base;
8374
8375 do {
8376 delay(1);
8377 /* Steps */
8378 error = wm_ich8_cycle_init(sc);
8379 if (error)
8380 break;
8381
8382 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8383 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
8384 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
8385 & HSFCTL_BCOUNT_MASK;
8386 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
8387 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8388
8389 /*
8390 * Write the last 24 bits of index into Flash Linear address
8391 * field in Flash Address
8392 */
8393 /* TODO: TBD maybe check the index against the size of flash */
8394
8395 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
8396
8397 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
8398
8399 /*
8400 * Check if FCERR is set to 1, if set to 1, clear it and try
8401 * the whole sequence a few more times, else read in (shift in)
8402 * the Flash Data0, the order is least significant byte first
8403 * msb to lsb
8404 */
8405 if (error == 0) {
8406 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
8407 if (size == 1)
8408 *data = (uint8_t)(flash_data & 0x000000FF);
8409 else if (size == 2)
8410 *data = (uint16_t)(flash_data & 0x0000FFFF);
8411 break;
8412 } else {
8413 /*
8414 * If we've gotten here, then things are probably
8415 * completely hosed, but if the error condition is
8416 * detected, it won't hurt to give it another try...
8417 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
8418 */
8419 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8420 if (hsfsts & HSFSTS_ERR) {
8421 /* Repeat for some time before giving up. */
8422 continue;
8423 } else if ((hsfsts & HSFSTS_DONE) == 0)
8424 break;
8425 }
8426 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
8427
8428 return error;
8429 }
8430
8431 /******************************************************************************
8432 * Reads a single byte from the NVM using the ICH8 flash access registers.
8433 *
8434 * sc - pointer to wm_hw structure
8435 * index - The index of the byte to read.
8436 * data - Pointer to a byte to store the value read.
8437 *****************************************************************************/
8438 static int32_t
8439 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
8440 {
8441 int32_t status;
8442 uint16_t word = 0;
8443
8444 status = wm_read_ich8_data(sc, index, 1, &word);
8445 if (status == 0)
8446 *data = (uint8_t)word;
8447 else
8448 *data = 0;
8449
8450 return status;
8451 }
8452
8453 /******************************************************************************
8454 * Reads a word from the NVM using the ICH8 flash access registers.
8455 *
8456 * sc - pointer to wm_hw structure
8457 * index - The starting byte index of the word to read.
8458 * data - Pointer to a word to store the value read.
8459 *****************************************************************************/
8460 static int32_t
8461 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
8462 {
8463 int32_t status;
8464
8465 status = wm_read_ich8_data(sc, index, 2, data);
8466 return status;
8467 }
8468
8469 /******************************************************************************
8470 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
8471 * register.
8472 *
8473 * sc - Struct containing variables accessed by shared code
8474 * offset - offset of word in the EEPROM to read
8475 * data - word read from the EEPROM
8476 * words - number of words to read
8477 *****************************************************************************/
8478 static int
8479 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
8480 {
8481 int32_t error = 0;
8482 uint32_t flash_bank = 0;
8483 uint32_t act_offset = 0;
8484 uint32_t bank_offset = 0;
8485 uint16_t word = 0;
8486 uint16_t i = 0;
8487
8488 /*
8489 * We need to know which is the valid flash bank. In the event
8490 * that we didn't allocate eeprom_shadow_ram, we may not be
8491 * managing flash_bank. So it cannot be trusted and needs
8492 * to be updated with each read.
8493 */
8494 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
8495 if (error) {
8496 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
8497 device_xname(sc->sc_dev)));
8498 flash_bank = 0;
8499 }
8500
8501 /*
8502 * Adjust offset appropriately if we're on bank 1 - adjust for word
8503 * size
8504 */
8505 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
8506
8507 error = wm_get_swfwhw_semaphore(sc);
8508 if (error) {
8509 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8510 __func__);
8511 return error;
8512 }
8513
8514 for (i = 0; i < words; i++) {
8515 /* The NVM part needs a byte offset, hence * 2 */
8516 act_offset = bank_offset + ((offset + i) * 2);
8517 error = wm_read_ich8_word(sc, act_offset, &word);
8518 if (error) {
8519 aprint_error_dev(sc->sc_dev,
8520 "%s: failed to read NVM\n", __func__);
8521 break;
8522 }
8523 data[i] = word;
8524 }
8525
8526 wm_put_swfwhw_semaphore(sc);
8527 return error;
8528 }
8529
8530 /* Lock, detecting NVM type, validate checksum and read */
8531
8532 /*
8533 * wm_nvm_acquire:
8534 *
8535 * Perform the EEPROM handshake required on some chips.
8536 */
8537 static int
8538 wm_nvm_acquire(struct wm_softc *sc)
8539 {
8540 uint32_t reg;
8541 int x;
8542 int ret = 0;
8543
8544 /* always success */
8545 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8546 return 0;
8547
8548 if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8549 ret = wm_get_swfwhw_semaphore(sc);
8550 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
8551 /* This will also do wm_get_swsm_semaphore() if needed */
8552 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
8553 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
8554 ret = wm_get_swsm_semaphore(sc);
8555 }
8556
8557 if (ret) {
8558 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8559 __func__);
8560 return 1;
8561 }
8562
8563 if (sc->sc_flags & WM_F_LOCK_EECD) {
8564 reg = CSR_READ(sc, WMREG_EECD);
8565
8566 /* Request EEPROM access. */
8567 reg |= EECD_EE_REQ;
8568 CSR_WRITE(sc, WMREG_EECD, reg);
8569
8570 /* ..and wait for it to be granted. */
8571 for (x = 0; x < 1000; x++) {
8572 reg = CSR_READ(sc, WMREG_EECD);
8573 if (reg & EECD_EE_GNT)
8574 break;
8575 delay(5);
8576 }
8577 if ((reg & EECD_EE_GNT) == 0) {
8578 aprint_error_dev(sc->sc_dev,
8579 "could not acquire EEPROM GNT\n");
8580 reg &= ~EECD_EE_REQ;
8581 CSR_WRITE(sc, WMREG_EECD, reg);
8582 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8583 wm_put_swfwhw_semaphore(sc);
8584 if (sc->sc_flags & WM_F_LOCK_SWFW)
8585 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8586 else if (sc->sc_flags & WM_F_LOCK_SWSM)
8587 wm_put_swsm_semaphore(sc);
8588 return 1;
8589 }
8590 }
8591
8592 return 0;
8593 }
8594
8595 /*
8596 * wm_nvm_release:
8597 *
8598 * Release the EEPROM mutex.
8599 */
8600 static void
8601 wm_nvm_release(struct wm_softc *sc)
8602 {
8603 uint32_t reg;
8604
8605 /* always success */
8606 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8607 return;
8608
8609 if (sc->sc_flags & WM_F_LOCK_EECD) {
8610 reg = CSR_READ(sc, WMREG_EECD);
8611 reg &= ~EECD_EE_REQ;
8612 CSR_WRITE(sc, WMREG_EECD, reg);
8613 }
8614
8615 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8616 wm_put_swfwhw_semaphore(sc);
8617 if (sc->sc_flags & WM_F_LOCK_SWFW)
8618 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8619 else if (sc->sc_flags & WM_F_LOCK_SWSM)
8620 wm_put_swsm_semaphore(sc);
8621 }
8622
8623 static int
8624 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
8625 {
8626 uint32_t eecd = 0;
8627
8628 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
8629 || sc->sc_type == WM_T_82583) {
8630 eecd = CSR_READ(sc, WMREG_EECD);
8631
8632 /* Isolate bits 15 & 16 */
8633 eecd = ((eecd >> 15) & 0x03);
8634
8635 /* If both bits are set, device is Flash type */
8636 if (eecd == 0x03)
8637 return 0;
8638 }
8639 return 1;
8640 }
8641
8642 /*
8643 * wm_nvm_validate_checksum
8644 *
8645 * The checksum is defined as the sum of the first 64 (16 bit) words.
8646 */
8647 static int
8648 wm_nvm_validate_checksum(struct wm_softc *sc)
8649 {
8650 uint16_t checksum;
8651 uint16_t eeprom_data;
8652 #ifdef WM_DEBUG
8653 uint16_t csum_wordaddr, valid_checksum;
8654 #endif
8655 int i;
8656
8657 checksum = 0;
8658
8659 /* Don't check for I211 */
8660 if (sc->sc_type == WM_T_I211)
8661 return 0;
8662
8663 #ifdef WM_DEBUG
8664 if (sc->sc_type == WM_T_PCH_LPT) {
8665 csum_wordaddr = NVM_OFF_COMPAT;
8666 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
8667 } else {
8668 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
8669 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
8670 }
8671
8672 /* Dump EEPROM image for debug */
8673 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8674 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8675 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
8676 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
8677 if ((eeprom_data & valid_checksum) == 0) {
8678 DPRINTF(WM_DEBUG_NVM,
8679 ("%s: NVM need to be updated (%04x != %04x)\n",
8680 device_xname(sc->sc_dev), eeprom_data,
8681 valid_checksum));
8682 }
8683 }
8684
8685 if ((wm_debug & WM_DEBUG_NVM) != 0) {
8686 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
8687 for (i = 0; i < NVM_SIZE; i++) {
8688 if (wm_nvm_read(sc, i, 1, &eeprom_data))
8689 printf("XXXX ");
8690 else
8691 printf("%04hx ", eeprom_data);
8692 if (i % 8 == 7)
8693 printf("\n");
8694 }
8695 }
8696
8697 #endif /* WM_DEBUG */
8698
8699 for (i = 0; i < NVM_SIZE; i++) {
8700 if (wm_nvm_read(sc, i, 1, &eeprom_data))
8701 return 1;
8702 checksum += eeprom_data;
8703 }
8704
8705 if (checksum != (uint16_t) NVM_CHECKSUM) {
8706 #ifdef WM_DEBUG
8707 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
8708 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
8709 #endif
8710 }
8711
8712 return 0;
8713 }
8714
8715 /*
8716 * wm_nvm_read:
8717 *
8718 * Read data from the serial EEPROM.
8719 */
8720 static int
8721 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8722 {
8723 int rv;
8724
8725 if (sc->sc_flags & WM_F_EEPROM_INVALID)
8726 return 1;
8727
8728 if (wm_nvm_acquire(sc))
8729 return 1;
8730
8731 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8732 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8733 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
8734 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
8735 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
8736 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
8737 else if (sc->sc_flags & WM_F_EEPROM_SPI)
8738 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
8739 else
8740 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
8741
8742 wm_nvm_release(sc);
8743 return rv;
8744 }
8745
8746 /*
8747 * Hardware semaphores.
8748 * Very complexed...
8749 */
8750
8751 static int
8752 wm_get_swsm_semaphore(struct wm_softc *sc)
8753 {
8754 int32_t timeout;
8755 uint32_t swsm;
8756
8757 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8758 /* Get the SW semaphore. */
8759 timeout = sc->sc_nvm_wordsize + 1;
8760 while (timeout) {
8761 swsm = CSR_READ(sc, WMREG_SWSM);
8762
8763 if ((swsm & SWSM_SMBI) == 0)
8764 break;
8765
8766 delay(50);
8767 timeout--;
8768 }
8769
8770 if (timeout == 0) {
8771 aprint_error_dev(sc->sc_dev,
8772 "could not acquire SWSM SMBI\n");
8773 return 1;
8774 }
8775 }
8776
8777 /* Get the FW semaphore. */
8778 timeout = sc->sc_nvm_wordsize + 1;
8779 while (timeout) {
8780 swsm = CSR_READ(sc, WMREG_SWSM);
8781 swsm |= SWSM_SWESMBI;
8782 CSR_WRITE(sc, WMREG_SWSM, swsm);
8783 /* If we managed to set the bit we got the semaphore. */
8784 swsm = CSR_READ(sc, WMREG_SWSM);
8785 if (swsm & SWSM_SWESMBI)
8786 break;
8787
8788 delay(50);
8789 timeout--;
8790 }
8791
8792 if (timeout == 0) {
8793 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
8794 /* Release semaphores */
8795 wm_put_swsm_semaphore(sc);
8796 return 1;
8797 }
8798 return 0;
8799 }
8800
8801 static void
8802 wm_put_swsm_semaphore(struct wm_softc *sc)
8803 {
8804 uint32_t swsm;
8805
8806 swsm = CSR_READ(sc, WMREG_SWSM);
8807 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
8808 CSR_WRITE(sc, WMREG_SWSM, swsm);
8809 }
8810
8811 static int
8812 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8813 {
8814 uint32_t swfw_sync;
8815 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
8816 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
8817 int timeout = 200;
8818
8819 for (timeout = 0; timeout < 200; timeout++) {
8820 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8821 if (wm_get_swsm_semaphore(sc)) {
8822 aprint_error_dev(sc->sc_dev,
8823 "%s: failed to get semaphore\n",
8824 __func__);
8825 return 1;
8826 }
8827 }
8828 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8829 if ((swfw_sync & (swmask | fwmask)) == 0) {
8830 swfw_sync |= swmask;
8831 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8832 if (sc->sc_flags & WM_F_LOCK_SWSM)
8833 wm_put_swsm_semaphore(sc);
8834 return 0;
8835 }
8836 if (sc->sc_flags & WM_F_LOCK_SWSM)
8837 wm_put_swsm_semaphore(sc);
8838 delay(5000);
8839 }
8840 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
8841 device_xname(sc->sc_dev), mask, swfw_sync);
8842 return 1;
8843 }
8844
8845 static void
8846 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8847 {
8848 uint32_t swfw_sync;
8849
8850 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8851 while (wm_get_swsm_semaphore(sc) != 0)
8852 continue;
8853 }
8854 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8855 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
8856 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8857 if (sc->sc_flags & WM_F_LOCK_SWSM)
8858 wm_put_swsm_semaphore(sc);
8859 }
8860
8861 static int
8862 wm_get_swfwhw_semaphore(struct wm_softc *sc)
8863 {
8864 uint32_t ext_ctrl;
8865 int timeout = 200;
8866
8867 for (timeout = 0; timeout < 200; timeout++) {
8868 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8869 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
8870 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8871
8872 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8873 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
8874 return 0;
8875 delay(5000);
8876 }
8877 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
8878 device_xname(sc->sc_dev), ext_ctrl);
8879 return 1;
8880 }
8881
8882 static void
8883 wm_put_swfwhw_semaphore(struct wm_softc *sc)
8884 {
8885 uint32_t ext_ctrl;
8886 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8887 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
8888 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8889 }
8890
8891 static int
8892 wm_get_hw_semaphore_82573(struct wm_softc *sc)
8893 {
8894 int i = 0;
8895 uint32_t reg;
8896
8897 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8898 do {
8899 CSR_WRITE(sc, WMREG_EXTCNFCTR,
8900 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
8901 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8902 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
8903 break;
8904 delay(2*1000);
8905 i++;
8906 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
8907
8908 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
8909 wm_put_hw_semaphore_82573(sc);
8910 log(LOG_ERR, "%s: Driver can't access the PHY\n",
8911 device_xname(sc->sc_dev));
8912 return -1;
8913 }
8914
8915 return 0;
8916 }
8917
8918 static void
8919 wm_put_hw_semaphore_82573(struct wm_softc *sc)
8920 {
8921 uint32_t reg;
8922
8923 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8924 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
8925 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
8926 }
8927
8928 /*
8929 * Management mode and power management related subroutines.
8930 * BMC, AMT, suspend/resume and EEE.
8931 */
8932
8933 static int
8934 wm_check_mng_mode(struct wm_softc *sc)
8935 {
8936 int rv;
8937
8938 switch (sc->sc_type) {
8939 case WM_T_ICH8:
8940 case WM_T_ICH9:
8941 case WM_T_ICH10:
8942 case WM_T_PCH:
8943 case WM_T_PCH2:
8944 case WM_T_PCH_LPT:
8945 rv = wm_check_mng_mode_ich8lan(sc);
8946 break;
8947 case WM_T_82574:
8948 case WM_T_82583:
8949 rv = wm_check_mng_mode_82574(sc);
8950 break;
8951 case WM_T_82571:
8952 case WM_T_82572:
8953 case WM_T_82573:
8954 case WM_T_80003:
8955 rv = wm_check_mng_mode_generic(sc);
8956 break;
8957 default:
8958 /* noting to do */
8959 rv = 0;
8960 break;
8961 }
8962
8963 return rv;
8964 }
8965
8966 static int
8967 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
8968 {
8969 uint32_t fwsm;
8970
8971 fwsm = CSR_READ(sc, WMREG_FWSM);
8972
8973 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
8974 return 1;
8975
8976 return 0;
8977 }
8978
8979 static int
8980 wm_check_mng_mode_82574(struct wm_softc *sc)
8981 {
8982 uint16_t data;
8983
8984 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
8985
8986 if ((data & NVM_CFG2_MNGM_MASK) != 0)
8987 return 1;
8988
8989 return 0;
8990 }
8991
8992 static int
8993 wm_check_mng_mode_generic(struct wm_softc *sc)
8994 {
8995 uint32_t fwsm;
8996
8997 fwsm = CSR_READ(sc, WMREG_FWSM);
8998
8999 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
9000 return 1;
9001
9002 return 0;
9003 }
9004
9005 static int
9006 wm_enable_mng_pass_thru(struct wm_softc *sc)
9007 {
9008 uint32_t manc, fwsm, factps;
9009
9010 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
9011 return 0;
9012
9013 manc = CSR_READ(sc, WMREG_MANC);
9014
9015 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
9016 device_xname(sc->sc_dev), manc));
9017 if ((manc & MANC_RECV_TCO_EN) == 0)
9018 return 0;
9019
9020 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
9021 fwsm = CSR_READ(sc, WMREG_FWSM);
9022 factps = CSR_READ(sc, WMREG_FACTPS);
9023 if (((factps & FACTPS_MNGCG) == 0)
9024 && ((fwsm & FWSM_MODE_MASK)
9025 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
9026 return 1;
9027 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
9028 uint16_t data;
9029
9030 factps = CSR_READ(sc, WMREG_FACTPS);
9031 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
9032 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
9033 device_xname(sc->sc_dev), factps, data));
9034 if (((factps & FACTPS_MNGCG) == 0)
9035 && ((data & NVM_CFG2_MNGM_MASK)
9036 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
9037 return 1;
9038 } else if (((manc & MANC_SMBUS_EN) != 0)
9039 && ((manc & MANC_ASF_EN) == 0))
9040 return 1;
9041
9042 return 0;
9043 }
9044
9045 static int
9046 wm_check_reset_block(struct wm_softc *sc)
9047 {
9048 uint32_t reg;
9049
9050 switch (sc->sc_type) {
9051 case WM_T_ICH8:
9052 case WM_T_ICH9:
9053 case WM_T_ICH10:
9054 case WM_T_PCH:
9055 case WM_T_PCH2:
9056 case WM_T_PCH_LPT:
9057 reg = CSR_READ(sc, WMREG_FWSM);
9058 if ((reg & FWSM_RSPCIPHY) != 0)
9059 return 0;
9060 else
9061 return -1;
9062 break;
9063 case WM_T_82571:
9064 case WM_T_82572:
9065 case WM_T_82573:
9066 case WM_T_82574:
9067 case WM_T_82583:
9068 case WM_T_80003:
9069 reg = CSR_READ(sc, WMREG_MANC);
9070 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
9071 return -1;
9072 else
9073 return 0;
9074 break;
9075 default:
9076 /* no problem */
9077 break;
9078 }
9079
9080 return 0;
9081 }
9082
9083 static void
9084 wm_get_hw_control(struct wm_softc *sc)
9085 {
9086 uint32_t reg;
9087
9088 switch (sc->sc_type) {
9089 case WM_T_82573:
9090 reg = CSR_READ(sc, WMREG_SWSM);
9091 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
9092 break;
9093 case WM_T_82571:
9094 case WM_T_82572:
9095 case WM_T_82574:
9096 case WM_T_82583:
9097 case WM_T_80003:
9098 case WM_T_ICH8:
9099 case WM_T_ICH9:
9100 case WM_T_ICH10:
9101 case WM_T_PCH:
9102 case WM_T_PCH2:
9103 case WM_T_PCH_LPT:
9104 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9105 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
9106 break;
9107 default:
9108 break;
9109 }
9110 }
9111
9112 static void
9113 wm_release_hw_control(struct wm_softc *sc)
9114 {
9115 uint32_t reg;
9116
9117 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
9118 return;
9119
9120 if (sc->sc_type == WM_T_82573) {
9121 reg = CSR_READ(sc, WMREG_SWSM);
9122 reg &= ~SWSM_DRV_LOAD;
9123 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
9124 } else {
9125 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9126 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
9127 }
9128 }
9129
9130 static void
9131 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
9132 {
9133 uint32_t reg;
9134
9135 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
9136
9137 if (on != 0)
9138 reg |= EXTCNFCTR_GATE_PHY_CFG;
9139 else
9140 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
9141
9142 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
9143 }
9144
9145 static void
9146 wm_smbustopci(struct wm_softc *sc)
9147 {
9148 uint32_t fwsm;
9149
9150 fwsm = CSR_READ(sc, WMREG_FWSM);
9151 if (((fwsm & FWSM_FW_VALID) == 0)
9152 && ((wm_check_reset_block(sc) == 0))) {
9153 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
9154 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
9155 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9156 CSR_WRITE_FLUSH(sc);
9157 delay(10);
9158 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
9159 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9160 CSR_WRITE_FLUSH(sc);
9161 delay(50*1000);
9162
9163 /*
9164 * Gate automatic PHY configuration by hardware on non-managed
9165 * 82579
9166 */
9167 if (sc->sc_type == WM_T_PCH2)
9168 wm_gate_hw_phy_config_ich8lan(sc, 1);
9169 }
9170 }
9171
9172 static void
9173 wm_init_manageability(struct wm_softc *sc)
9174 {
9175
9176 if (sc->sc_flags & WM_F_HAS_MANAGE) {
9177 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
9178 uint32_t manc = CSR_READ(sc, WMREG_MANC);
9179
9180 /* Disable hardware interception of ARP */
9181 manc &= ~MANC_ARP_EN;
9182
9183 /* Enable receiving management packets to the host */
9184 if (sc->sc_type >= WM_T_82571) {
9185 manc |= MANC_EN_MNG2HOST;
9186 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
9187 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
9188
9189 }
9190
9191 CSR_WRITE(sc, WMREG_MANC, manc);
9192 }
9193 }
9194
9195 static void
9196 wm_release_manageability(struct wm_softc *sc)
9197 {
9198
9199 if (sc->sc_flags & WM_F_HAS_MANAGE) {
9200 uint32_t manc = CSR_READ(sc, WMREG_MANC);
9201
9202 manc |= MANC_ARP_EN;
9203 if (sc->sc_type >= WM_T_82571)
9204 manc &= ~MANC_EN_MNG2HOST;
9205
9206 CSR_WRITE(sc, WMREG_MANC, manc);
9207 }
9208 }
9209
9210 static void
9211 wm_get_wakeup(struct wm_softc *sc)
9212 {
9213
9214 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
9215 switch (sc->sc_type) {
9216 case WM_T_82573:
9217 case WM_T_82583:
9218 sc->sc_flags |= WM_F_HAS_AMT;
9219 /* FALLTHROUGH */
9220 case WM_T_80003:
9221 case WM_T_82541:
9222 case WM_T_82547:
9223 case WM_T_82571:
9224 case WM_T_82572:
9225 case WM_T_82574:
9226 case WM_T_82575:
9227 case WM_T_82576:
9228 case WM_T_82580:
9229 case WM_T_I350:
9230 case WM_T_I354:
9231 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
9232 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
9233 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
9234 break;
9235 case WM_T_ICH8:
9236 case WM_T_ICH9:
9237 case WM_T_ICH10:
9238 case WM_T_PCH:
9239 case WM_T_PCH2:
9240 case WM_T_PCH_LPT:
9241 sc->sc_flags |= WM_F_HAS_AMT;
9242 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
9243 break;
9244 default:
9245 break;
9246 }
9247
9248 /* 1: HAS_MANAGE */
9249 if (wm_enable_mng_pass_thru(sc) != 0)
9250 sc->sc_flags |= WM_F_HAS_MANAGE;
9251
9252 #ifdef WM_DEBUG
9253 printf("\n");
9254 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
9255 printf("HAS_AMT,");
9256 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
9257 printf("ARC_SUBSYS_VALID,");
9258 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
9259 printf("ASF_FIRMWARE_PRES,");
9260 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
9261 printf("HAS_MANAGE,");
9262 printf("\n");
9263 #endif
9264 /*
9265 * Note that the WOL flags is set after the resetting of the eeprom
9266 * stuff
9267 */
9268 }
9269
9270 #ifdef WM_WOL
9271 /* WOL in the newer chipset interfaces (pchlan) */
9272 static void
9273 wm_enable_phy_wakeup(struct wm_softc *sc)
9274 {
9275 #if 0
9276 uint16_t preg;
9277
9278 /* Copy MAC RARs to PHY RARs */
9279
9280 /* Copy MAC MTA to PHY MTA */
9281
9282 /* Configure PHY Rx Control register */
9283
9284 /* Enable PHY wakeup in MAC register */
9285
9286 /* Configure and enable PHY wakeup in PHY registers */
9287
9288 /* Activate PHY wakeup */
9289
9290 /* XXX */
9291 #endif
9292 }
9293
9294 /* Power down workaround on D3 */
9295 static void
9296 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
9297 {
9298 uint32_t reg;
9299 int i;
9300
9301 for (i = 0; i < 2; i++) {
9302 /* Disable link */
9303 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9304 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9305 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9306
9307 /*
9308 * Call gig speed drop workaround on Gig disable before
9309 * accessing any PHY registers
9310 */
9311 if (sc->sc_type == WM_T_ICH8)
9312 wm_gig_downshift_workaround_ich8lan(sc);
9313
9314 /* Write VR power-down enable */
9315 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
9316 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
9317 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
9318 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
9319
9320 /* Read it back and test */
9321 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
9322 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
9323 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
9324 break;
9325
9326 /* Issue PHY reset and repeat at most one more time */
9327 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
9328 }
9329 }
9330
9331 static void
9332 wm_enable_wakeup(struct wm_softc *sc)
9333 {
9334 uint32_t reg, pmreg;
9335 pcireg_t pmode;
9336
9337 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
9338 &pmreg, NULL) == 0)
9339 return;
9340
9341 /* Advertise the wakeup capability */
9342 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
9343 | CTRL_SWDPIN(3));
9344 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
9345
9346 /* ICH workaround */
9347 switch (sc->sc_type) {
9348 case WM_T_ICH8:
9349 case WM_T_ICH9:
9350 case WM_T_ICH10:
9351 case WM_T_PCH:
9352 case WM_T_PCH2:
9353 case WM_T_PCH_LPT:
9354 /* Disable gig during WOL */
9355 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9356 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
9357 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9358 if (sc->sc_type == WM_T_PCH)
9359 wm_gmii_reset(sc);
9360
9361 /* Power down workaround */
9362 if (sc->sc_phytype == WMPHY_82577) {
9363 struct mii_softc *child;
9364
9365 /* Assume that the PHY is copper */
9366 child = LIST_FIRST(&sc->sc_mii.mii_phys);
9367 if (child->mii_mpd_rev <= 2)
9368 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
9369 (768 << 5) | 25, 0x0444); /* magic num */
9370 }
9371 break;
9372 default:
9373 break;
9374 }
9375
9376 /* Keep the laser running on fiber adapters */
9377 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
9378 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
9379 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9380 reg |= CTRL_EXT_SWDPIN(3);
9381 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9382 }
9383
9384 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
9385 #if 0 /* for the multicast packet */
9386 reg |= WUFC_MC;
9387 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
9388 #endif
9389
9390 if (sc->sc_type == WM_T_PCH) {
9391 wm_enable_phy_wakeup(sc);
9392 } else {
9393 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
9394 CSR_WRITE(sc, WMREG_WUFC, reg);
9395 }
9396
9397 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9398 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9399 || (sc->sc_type == WM_T_PCH2))
9400 && (sc->sc_phytype == WMPHY_IGP_3))
9401 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
9402
9403 /* Request PME */
9404 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
9405 #if 0
9406 /* Disable WOL */
9407 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
9408 #else
9409 /* For WOL */
9410 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
9411 #endif
9412 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
9413 }
9414 #endif /* WM_WOL */
9415
9416 /* EEE */
9417
9418 static void
9419 wm_set_eee_i350(struct wm_softc *sc)
9420 {
9421 uint32_t ipcnfg, eeer;
9422
9423 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
9424 eeer = CSR_READ(sc, WMREG_EEER);
9425
9426 if ((sc->sc_flags & WM_F_EEE) != 0) {
9427 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9428 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
9429 | EEER_LPI_FC);
9430 } else {
9431 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9432 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
9433 | EEER_LPI_FC);
9434 }
9435
9436 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
9437 CSR_WRITE(sc, WMREG_EEER, eeer);
9438 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
9439 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
9440 }
9441
9442 /*
9443 * Workarounds (mainly PHY related).
9444 * Basically, PHY's workarounds are in the PHY drivers.
9445 */
9446
9447 /* Work-around for 82566 Kumeran PCS lock loss */
9448 static void
9449 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
9450 {
9451 int miistatus, active, i;
9452 int reg;
9453
9454 miistatus = sc->sc_mii.mii_media_status;
9455
9456 /* If the link is not up, do nothing */
9457 if ((miistatus & IFM_ACTIVE) != 0)
9458 return;
9459
9460 active = sc->sc_mii.mii_media_active;
9461
9462 /* Nothing to do if the link is other than 1Gbps */
9463 if (IFM_SUBTYPE(active) != IFM_1000_T)
9464 return;
9465
9466 for (i = 0; i < 10; i++) {
9467 /* read twice */
9468 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9469 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9470 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
9471 goto out; /* GOOD! */
9472
9473 /* Reset the PHY */
9474 wm_gmii_reset(sc);
9475 delay(5*1000);
9476 }
9477
9478 /* Disable GigE link negotiation */
9479 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9480 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9481 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9482
9483 /*
9484 * Call gig speed drop workaround on Gig disable before accessing
9485 * any PHY registers.
9486 */
9487 wm_gig_downshift_workaround_ich8lan(sc);
9488
9489 out:
9490 return;
9491 }
9492
9493 /* WOL from S5 stops working */
9494 static void
9495 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
9496 {
9497 uint16_t kmrn_reg;
9498
9499 /* Only for igp3 */
9500 if (sc->sc_phytype == WMPHY_IGP_3) {
9501 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
9502 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
9503 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9504 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
9505 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9506 }
9507 }
9508
9509 /*
9510 * Workaround for pch's PHYs
9511 * XXX should be moved to new PHY driver?
9512 */
9513 static void
9514 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
9515 {
9516 if (sc->sc_phytype == WMPHY_82577)
9517 wm_set_mdio_slow_mode_hv(sc);
9518
9519 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
9520
9521 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
9522
9523 /* 82578 */
9524 if (sc->sc_phytype == WMPHY_82578) {
9525 /* PCH rev. < 3 */
9526 if (sc->sc_rev < 3) {
9527 /* XXX 6 bit shift? Why? Is it page2? */
9528 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
9529 0x66c0);
9530 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
9531 0xffff);
9532 }
9533
9534 /* XXX phy rev. < 2 */
9535 }
9536
9537 /* Select page 0 */
9538
9539 /* XXX acquire semaphore */
9540 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
9541 /* XXX release semaphore */
9542
9543 /*
9544 * Configure the K1 Si workaround during phy reset assuming there is
9545 * link so that it disables K1 if link is in 1Gbps.
9546 */
9547 wm_k1_gig_workaround_hv(sc, 1);
9548 }
9549
9550 static void
9551 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
9552 {
9553
9554 wm_set_mdio_slow_mode_hv(sc);
9555 }
9556
9557 static void
9558 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
9559 {
9560 int k1_enable = sc->sc_nvm_k1_enabled;
9561
9562 /* XXX acquire semaphore */
9563
9564 if (link) {
9565 k1_enable = 0;
9566
9567 /* Link stall fix for link up */
9568 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
9569 } else {
9570 /* Link stall fix for link down */
9571 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
9572 }
9573
9574 wm_configure_k1_ich8lan(sc, k1_enable);
9575
9576 /* XXX release semaphore */
9577 }
9578
9579 static void
9580 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
9581 {
9582 uint32_t reg;
9583
9584 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
9585 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
9586 reg | HV_KMRN_MDIO_SLOW);
9587 }
9588
9589 static void
9590 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
9591 {
9592 uint32_t ctrl, ctrl_ext, tmp;
9593 uint16_t kmrn_reg;
9594
9595 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
9596
9597 if (k1_enable)
9598 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
9599 else
9600 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
9601
9602 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
9603
9604 delay(20);
9605
9606 ctrl = CSR_READ(sc, WMREG_CTRL);
9607 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9608
9609 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
9610 tmp |= CTRL_FRCSPD;
9611
9612 CSR_WRITE(sc, WMREG_CTRL, tmp);
9613 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
9614 CSR_WRITE_FLUSH(sc);
9615 delay(20);
9616
9617 CSR_WRITE(sc, WMREG_CTRL, ctrl);
9618 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9619 CSR_WRITE_FLUSH(sc);
9620 delay(20);
9621 }
9622
9623 /* special case - for 82575 - need to do manual init ... */
9624 static void
9625 wm_reset_init_script_82575(struct wm_softc *sc)
9626 {
9627 /*
9628 * remark: this is untested code - we have no board without EEPROM
9629 * same setup as mentioned int the FreeBSD driver for the i82575
9630 */
9631
9632 /* SerDes configuration via SERDESCTRL */
9633 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
9634 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
9635 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
9636 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
9637
9638 /* CCM configuration via CCMCTL register */
9639 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
9640 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
9641
9642 /* PCIe lanes configuration */
9643 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
9644 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
9645 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
9646 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
9647
9648 /* PCIe PLL Configuration */
9649 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
9650 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
9651 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
9652 }
9653