if_wm.c revision 1.312 1 /* $NetBSD: if_wm.c,v 1.312 2015/02/15 21:32:33 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - EEE (Energy Efficiency Ethernet)
77 * - MSI/MSI-X
78 * - Virtual Function
79 * - Set LED correctly (based on contents in EEPROM)
80 * - Rework how parameters are loaded from the EEPROM.
81 */
82
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.312 2015/02/15 21:32:33 msaitoh Exp $");
85
86 #ifdef _KERNEL_OPT
87 #include "opt_net_mpsafe.h"
88 #endif
89
90 #include <sys/param.h>
91 #include <sys/systm.h>
92 #include <sys/callout.h>
93 #include <sys/mbuf.h>
94 #include <sys/malloc.h>
95 #include <sys/kernel.h>
96 #include <sys/socket.h>
97 #include <sys/ioctl.h>
98 #include <sys/errno.h>
99 #include <sys/device.h>
100 #include <sys/queue.h>
101 #include <sys/syslog.h>
102
103 #include <sys/rnd.h>
104
105 #include <net/if.h>
106 #include <net/if_dl.h>
107 #include <net/if_media.h>
108 #include <net/if_ether.h>
109
110 #include <net/bpf.h>
111
112 #include <netinet/in.h> /* XXX for struct ip */
113 #include <netinet/in_systm.h> /* XXX for struct ip */
114 #include <netinet/ip.h> /* XXX for struct ip */
115 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
116 #include <netinet/tcp.h> /* XXX for struct tcphdr */
117
118 #include <sys/bus.h>
119 #include <sys/intr.h>
120 #include <machine/endian.h>
121
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
124 #include <dev/mii/miidevs.h>
125 #include <dev/mii/mii_bitbang.h>
126 #include <dev/mii/ikphyreg.h>
127 #include <dev/mii/igphyreg.h>
128 #include <dev/mii/igphyvar.h>
129 #include <dev/mii/inbmphyreg.h>
130
131 #include <dev/pci/pcireg.h>
132 #include <dev/pci/pcivar.h>
133 #include <dev/pci/pcidevs.h>
134
135 #include <dev/pci/if_wmreg.h>
136 #include <dev/pci/if_wmvar.h>
137
138 #ifdef WM_DEBUG
139 #define WM_DEBUG_LINK 0x01
140 #define WM_DEBUG_TX 0x02
141 #define WM_DEBUG_RX 0x04
142 #define WM_DEBUG_GMII 0x08
143 #define WM_DEBUG_MANAGE 0x10
144 #define WM_DEBUG_NVM 0x20
145 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
146 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
147
148 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
149 #else
150 #define DPRINTF(x, y) /* nothing */
151 #endif /* WM_DEBUG */
152
153 #ifdef NET_MPSAFE
154 #define WM_MPSAFE 1
155 #endif
156
157 /*
158 * Transmit descriptor list size. Due to errata, we can only have
159 * 256 hardware descriptors in the ring on < 82544, but we use 4096
160 * on >= 82544. We tell the upper layers that they can queue a lot
161 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
162 * of them at a time.
163 *
164 * We allow up to 256 (!) DMA segments per packet. Pathological packet
165 * chains containing many small mbufs have been observed in zero-copy
166 * situations with jumbo frames.
167 */
168 #define WM_NTXSEGS 256
169 #define WM_IFQUEUELEN 256
170 #define WM_TXQUEUELEN_MAX 64
171 #define WM_TXQUEUELEN_MAX_82547 16
172 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
173 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
174 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
175 #define WM_NTXDESC_82542 256
176 #define WM_NTXDESC_82544 4096
177 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
178 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
179 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
180 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
181 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
182
183 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
184
185 /*
186 * Receive descriptor list size. We have one Rx buffer for normal
187 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
188 * packet. We allocate 256 receive descriptors, each with a 2k
189 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
190 */
191 #define WM_NRXDESC 256
192 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
193 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
194 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
195
196 /*
197 * Control structures are DMA'd to the i82542 chip. We allocate them in
198 * a single clump that maps to a single DMA segment to make several things
199 * easier.
200 */
201 struct wm_control_data_82544 {
202 /*
203 * The receive descriptors.
204 */
205 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
206
207 /*
208 * The transmit descriptors. Put these at the end, because
209 * we might use a smaller number of them.
210 */
211 union {
212 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
213 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
214 } wdc_u;
215 };
216
217 struct wm_control_data_82542 {
218 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
219 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
220 };
221
222 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
223 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
224 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
225
226 /*
227 * Software state for transmit jobs.
228 */
229 struct wm_txsoft {
230 struct mbuf *txs_mbuf; /* head of our mbuf chain */
231 bus_dmamap_t txs_dmamap; /* our DMA map */
232 int txs_firstdesc; /* first descriptor in packet */
233 int txs_lastdesc; /* last descriptor in packet */
234 int txs_ndesc; /* # of descriptors used */
235 };
236
237 /*
238 * Software state for receive buffers. Each descriptor gets a
239 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
240 * more than one buffer, we chain them together.
241 */
242 struct wm_rxsoft {
243 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
244 bus_dmamap_t rxs_dmamap; /* our DMA map */
245 };
246
247 #define WM_LINKUP_TIMEOUT 50
248
249 static uint16_t swfwphysem[] = {
250 SWFW_PHY0_SM,
251 SWFW_PHY1_SM,
252 SWFW_PHY2_SM,
253 SWFW_PHY3_SM
254 };
255
256 /*
257 * Software state per device.
258 */
259 struct wm_softc {
260 device_t sc_dev; /* generic device information */
261 bus_space_tag_t sc_st; /* bus space tag */
262 bus_space_handle_t sc_sh; /* bus space handle */
263 bus_size_t sc_ss; /* bus space size */
264 bus_space_tag_t sc_iot; /* I/O space tag */
265 bus_space_handle_t sc_ioh; /* I/O space handle */
266 bus_size_t sc_ios; /* I/O space size */
267 bus_space_tag_t sc_flasht; /* flash registers space tag */
268 bus_space_handle_t sc_flashh; /* flash registers space handle */
269 bus_dma_tag_t sc_dmat; /* bus DMA tag */
270
271 struct ethercom sc_ethercom; /* ethernet common data */
272 struct mii_data sc_mii; /* MII/media information */
273
274 pci_chipset_tag_t sc_pc;
275 pcitag_t sc_pcitag;
276 int sc_bus_speed; /* PCI/PCIX bus speed */
277 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
278
279 uint16_t sc_pcidevid; /* PCI device ID */
280 wm_chip_type sc_type; /* MAC type */
281 int sc_rev; /* MAC revision */
282 wm_phy_type sc_phytype; /* PHY type */
283 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
284 #define WM_MEDIATYPE_UNKNOWN 0x00
285 #define WM_MEDIATYPE_FIBER 0x01
286 #define WM_MEDIATYPE_COPPER 0x02
287 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
288 int sc_funcid; /* unit number of the chip (0 to 3) */
289 int sc_flags; /* flags; see below */
290 int sc_if_flags; /* last if_flags */
291 int sc_flowflags; /* 802.3x flow control flags */
292 int sc_align_tweak;
293
294 void *sc_ih; /* interrupt cookie */
295 callout_t sc_tick_ch; /* tick callout */
296 bool sc_stopping;
297
298 int sc_nvm_addrbits; /* NVM address bits */
299 unsigned int sc_nvm_wordsize; /* NVM word size */
300 int sc_ich8_flash_base;
301 int sc_ich8_flash_bank_size;
302 int sc_nvm_k1_enabled;
303
304 /* Software state for the transmit and receive descriptors. */
305 int sc_txnum; /* must be a power of two */
306 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
307 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
308
309 /* Control data structures. */
310 int sc_ntxdesc; /* must be a power of two */
311 struct wm_control_data_82544 *sc_control_data;
312 bus_dmamap_t sc_cddmamap; /* control data DMA map */
313 bus_dma_segment_t sc_cd_seg; /* control data segment */
314 int sc_cd_rseg; /* real number of control segment */
315 size_t sc_cd_size; /* control data size */
316 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
317 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
318 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
319 #define sc_rxdescs sc_control_data->wcd_rxdescs
320
321 #ifdef WM_EVENT_COUNTERS
322 /* Event counters. */
323 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
324 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
325 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
326 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
327 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
328 struct evcnt sc_ev_rxintr; /* Rx interrupts */
329 struct evcnt sc_ev_linkintr; /* Link interrupts */
330
331 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
332 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
333 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
334 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
335 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
336 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
337 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
338 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
339
340 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
341 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
342
343 struct evcnt sc_ev_tu; /* Tx underrun */
344
345 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
346 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
347 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
348 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
349 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
350 #endif /* WM_EVENT_COUNTERS */
351
352 bus_addr_t sc_tdt_reg; /* offset of TDT register */
353
354 int sc_txfree; /* number of free Tx descriptors */
355 int sc_txnext; /* next ready Tx descriptor */
356
357 int sc_txsfree; /* number of free Tx jobs */
358 int sc_txsnext; /* next free Tx job */
359 int sc_txsdirty; /* dirty Tx jobs */
360
361 /* These 5 variables are used only on the 82547. */
362 int sc_txfifo_size; /* Tx FIFO size */
363 int sc_txfifo_head; /* current head of FIFO */
364 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
365 int sc_txfifo_stall; /* Tx FIFO is stalled */
366 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
367
368 bus_addr_t sc_rdt_reg; /* offset of RDT register */
369
370 int sc_rxptr; /* next ready Rx descriptor/queue ent */
371 int sc_rxdiscard;
372 int sc_rxlen;
373 struct mbuf *sc_rxhead;
374 struct mbuf *sc_rxtail;
375 struct mbuf **sc_rxtailp;
376
377 uint32_t sc_ctrl; /* prototype CTRL register */
378 #if 0
379 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
380 #endif
381 uint32_t sc_icr; /* prototype interrupt bits */
382 uint32_t sc_itr; /* prototype intr throttling reg */
383 uint32_t sc_tctl; /* prototype TCTL register */
384 uint32_t sc_rctl; /* prototype RCTL register */
385 uint32_t sc_txcw; /* prototype TXCW register */
386 uint32_t sc_tipg; /* prototype TIPG register */
387 uint32_t sc_fcrtl; /* prototype FCRTL register */
388 uint32_t sc_pba; /* prototype PBA register */
389
390 int sc_tbi_linkup; /* TBI link status */
391 int sc_tbi_anegticks; /* autonegotiation ticks */
392 int sc_tbi_ticks; /* tbi ticks */
393
394 int sc_mchash_type; /* multicast filter offset */
395
396 krndsource_t rnd_source; /* random source */
397
398 kmutex_t *sc_tx_lock; /* lock for tx operations */
399 kmutex_t *sc_rx_lock; /* lock for rx operations */
400 };
401
402 #define WM_TX_LOCK(_sc) if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
403 #define WM_TX_UNLOCK(_sc) if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
404 #define WM_TX_LOCKED(_sc) (!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
405 #define WM_RX_LOCK(_sc) if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
406 #define WM_RX_UNLOCK(_sc) if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
407 #define WM_RX_LOCKED(_sc) (!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
408 #define WM_BOTH_LOCK(_sc) do {WM_TX_LOCK(_sc); WM_RX_LOCK(_sc);} while (0)
409 #define WM_BOTH_UNLOCK(_sc) do {WM_RX_UNLOCK(_sc); WM_TX_UNLOCK(_sc);} while (0)
410 #define WM_BOTH_LOCKED(_sc) (WM_TX_LOCKED(_sc) && WM_RX_LOCKED(_sc))
411
412 #ifdef WM_MPSAFE
413 #define CALLOUT_FLAGS CALLOUT_MPSAFE
414 #else
415 #define CALLOUT_FLAGS 0
416 #endif
417
418 #define WM_RXCHAIN_RESET(sc) \
419 do { \
420 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
421 *(sc)->sc_rxtailp = NULL; \
422 (sc)->sc_rxlen = 0; \
423 } while (/*CONSTCOND*/0)
424
425 #define WM_RXCHAIN_LINK(sc, m) \
426 do { \
427 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
428 (sc)->sc_rxtailp = &(m)->m_next; \
429 } while (/*CONSTCOND*/0)
430
431 #ifdef WM_EVENT_COUNTERS
432 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
433 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
434 #else
435 #define WM_EVCNT_INCR(ev) /* nothing */
436 #define WM_EVCNT_ADD(ev, val) /* nothing */
437 #endif
438
439 #define CSR_READ(sc, reg) \
440 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
441 #define CSR_WRITE(sc, reg, val) \
442 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
443 #define CSR_WRITE_FLUSH(sc) \
444 (void) CSR_READ((sc), WMREG_STATUS)
445
446 #define ICH8_FLASH_READ32(sc, reg) \
447 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
448 #define ICH8_FLASH_WRITE32(sc, reg, data) \
449 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
450
451 #define ICH8_FLASH_READ16(sc, reg) \
452 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
453 #define ICH8_FLASH_WRITE16(sc, reg, data) \
454 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
455
456 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
457 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
458
459 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
460 #define WM_CDTXADDR_HI(sc, x) \
461 (sizeof(bus_addr_t) == 8 ? \
462 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
463
464 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
465 #define WM_CDRXADDR_HI(sc, x) \
466 (sizeof(bus_addr_t) == 8 ? \
467 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
468
469 #define WM_CDTXSYNC(sc, x, n, ops) \
470 do { \
471 int __x, __n; \
472 \
473 __x = (x); \
474 __n = (n); \
475 \
476 /* If it will wrap around, sync to the end of the ring. */ \
477 if ((__x + __n) > WM_NTXDESC(sc)) { \
478 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
479 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
480 (WM_NTXDESC(sc) - __x), (ops)); \
481 __n -= (WM_NTXDESC(sc) - __x); \
482 __x = 0; \
483 } \
484 \
485 /* Now sync whatever is left. */ \
486 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
487 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
488 } while (/*CONSTCOND*/0)
489
490 #define WM_CDRXSYNC(sc, x, ops) \
491 do { \
492 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
493 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
494 } while (/*CONSTCOND*/0)
495
496 #define WM_INIT_RXDESC(sc, x) \
497 do { \
498 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
499 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
500 struct mbuf *__m = __rxs->rxs_mbuf; \
501 \
502 /* \
503 * Note: We scoot the packet forward 2 bytes in the buffer \
504 * so that the payload after the Ethernet header is aligned \
505 * to a 4-byte boundary. \
506 * \
507 * XXX BRAINDAMAGE ALERT! \
508 * The stupid chip uses the same size for every buffer, which \
509 * is set in the Receive Control register. We are using the 2K \
510 * size option, but what we REALLY want is (2K - 2)! For this \
511 * reason, we can't "scoot" packets longer than the standard \
512 * Ethernet MTU. On strict-alignment platforms, if the total \
513 * size exceeds (2K - 2) we set align_tweak to 0 and let \
514 * the upper layer copy the headers. \
515 */ \
516 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
517 \
518 wm_set_dma_addr(&__rxd->wrx_addr, \
519 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
520 __rxd->wrx_len = 0; \
521 __rxd->wrx_cksum = 0; \
522 __rxd->wrx_status = 0; \
523 __rxd->wrx_errors = 0; \
524 __rxd->wrx_special = 0; \
525 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
526 \
527 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
528 } while (/*CONSTCOND*/0)
529
530 /*
531 * Register read/write functions.
532 * Other than CSR_{READ|WRITE}().
533 */
534 #if 0
535 static inline uint32_t wm_io_read(struct wm_softc *, int);
536 #endif
537 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
538 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
539 uint32_t, uint32_t);
540 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
541
542 /*
543 * Device driver interface functions and commonly used functions.
544 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
545 */
546 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
547 static int wm_match(device_t, cfdata_t, void *);
548 static void wm_attach(device_t, device_t, void *);
549 static int wm_detach(device_t, int);
550 static bool wm_suspend(device_t, const pmf_qual_t *);
551 static bool wm_resume(device_t, const pmf_qual_t *);
552 static void wm_watchdog(struct ifnet *);
553 static void wm_tick(void *);
554 static int wm_ifflags_cb(struct ethercom *);
555 static int wm_ioctl(struct ifnet *, u_long, void *);
556 /* MAC address related */
557 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
558 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
559 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
560 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
561 static void wm_set_filter(struct wm_softc *);
562 /* Reset and init related */
563 static void wm_set_vlan(struct wm_softc *);
564 static void wm_set_pcie_completion_timeout(struct wm_softc *);
565 static void wm_get_auto_rd_done(struct wm_softc *);
566 static void wm_lan_init_done(struct wm_softc *);
567 static void wm_get_cfg_done(struct wm_softc *);
568 static void wm_initialize_hardware_bits(struct wm_softc *);
569 static void wm_reset(struct wm_softc *);
570 static int wm_add_rxbuf(struct wm_softc *, int);
571 static void wm_rxdrain(struct wm_softc *);
572 static int wm_init(struct ifnet *);
573 static int wm_init_locked(struct ifnet *);
574 static void wm_stop(struct ifnet *, int);
575 static void wm_stop_locked(struct ifnet *, int);
576 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
577 uint32_t *, uint8_t *);
578 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
579 static void wm_82547_txfifo_stall(void *);
580 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
581 /* Start */
582 static void wm_start(struct ifnet *);
583 static void wm_start_locked(struct ifnet *);
584 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
585 uint32_t *, uint32_t *, bool *);
586 static void wm_nq_start(struct ifnet *);
587 static void wm_nq_start_locked(struct ifnet *);
588 /* Interrupt */
589 static void wm_txintr(struct wm_softc *);
590 static void wm_rxintr(struct wm_softc *);
591 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
592 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
593 static void wm_linkintr(struct wm_softc *, uint32_t);
594 static int wm_intr(void *);
595
596 /*
597 * Media related.
598 * GMII, SGMII, TBI, SERDES and SFP.
599 */
600 /* GMII related */
601 static void wm_gmii_reset(struct wm_softc *);
602 static int wm_get_phy_id_82575(struct wm_softc *);
603 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
604 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
605 static int wm_gmii_mediachange(struct ifnet *);
606 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
607 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
608 static int wm_gmii_i82543_readreg(device_t, int, int);
609 static void wm_gmii_i82543_writereg(device_t, int, int, int);
610 static int wm_gmii_i82544_readreg(device_t, int, int);
611 static void wm_gmii_i82544_writereg(device_t, int, int, int);
612 static int wm_gmii_i80003_readreg(device_t, int, int);
613 static void wm_gmii_i80003_writereg(device_t, int, int, int);
614 static int wm_gmii_bm_readreg(device_t, int, int);
615 static void wm_gmii_bm_writereg(device_t, int, int, int);
616 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
617 static int wm_gmii_hv_readreg(device_t, int, int);
618 static void wm_gmii_hv_writereg(device_t, int, int, int);
619 static int wm_gmii_82580_readreg(device_t, int, int);
620 static void wm_gmii_82580_writereg(device_t, int, int, int);
621 static void wm_gmii_statchg(struct ifnet *);
622 static int wm_kmrn_readreg(struct wm_softc *, int);
623 static void wm_kmrn_writereg(struct wm_softc *, int, int);
624 /* SGMII */
625 static bool wm_sgmii_uses_mdio(struct wm_softc *);
626 static int wm_sgmii_readreg(device_t, int, int);
627 static void wm_sgmii_writereg(device_t, int, int, int);
628 /* TBI related */
629 static int wm_check_for_link(struct wm_softc *);
630 static void wm_tbi_mediainit(struct wm_softc *);
631 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
632 static int wm_tbi_mediachange(struct ifnet *);
633 static void wm_tbi_set_linkled(struct wm_softc *);
634 static void wm_tbi_check_link(struct wm_softc *);
635 /* SFP related */
636 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
637 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
638
639 /*
640 * NVM related.
641 * Microwire, SPI (w/wo EERD) and Flash.
642 */
643 /* Misc functions */
644 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
645 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
646 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
647 /* Microwire */
648 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
649 /* SPI */
650 static int wm_nvm_ready_spi(struct wm_softc *);
651 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
652 /* Using with EERD */
653 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
654 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
655 /* Flash */
656 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
657 unsigned int *);
658 static int32_t wm_ich8_cycle_init(struct wm_softc *);
659 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
660 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
661 uint16_t *);
662 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
663 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
664 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
665 /* Lock, detecting NVM type, validate checksum and read */
666 static int wm_nvm_acquire(struct wm_softc *);
667 static void wm_nvm_release(struct wm_softc *);
668 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
669 static int wm_nvm_validate_checksum(struct wm_softc *);
670 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
671
672 /*
673 * Hardware semaphores.
674 * Very complexed...
675 */
676 static int wm_get_swsm_semaphore(struct wm_softc *);
677 static void wm_put_swsm_semaphore(struct wm_softc *);
678 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
679 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
680 static int wm_get_swfwhw_semaphore(struct wm_softc *);
681 static void wm_put_swfwhw_semaphore(struct wm_softc *);
682 static int wm_get_hw_semaphore_82573(struct wm_softc *);
683 static void wm_put_hw_semaphore_82573(struct wm_softc *);
684
685 /*
686 * Management mode and power management related subroutines.
687 * BMC, AMT, suspend/resume and EEE.
688 */
689 static int wm_check_mng_mode(struct wm_softc *);
690 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
691 static int wm_check_mng_mode_82574(struct wm_softc *);
692 static int wm_check_mng_mode_generic(struct wm_softc *);
693 static int wm_enable_mng_pass_thru(struct wm_softc *);
694 static int wm_check_reset_block(struct wm_softc *);
695 static void wm_get_hw_control(struct wm_softc *);
696 static void wm_release_hw_control(struct wm_softc *);
697 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
698 static void wm_smbustopci(struct wm_softc *);
699 static void wm_init_manageability(struct wm_softc *);
700 static void wm_release_manageability(struct wm_softc *);
701 static void wm_get_wakeup(struct wm_softc *);
702 #ifdef WM_WOL
703 static void wm_enable_phy_wakeup(struct wm_softc *);
704 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
705 static void wm_enable_wakeup(struct wm_softc *);
706 #endif
707 /* EEE */
708 static void wm_set_eee_i350(struct wm_softc *);
709
710 /*
711 * Workarounds (mainly PHY related).
712 * Basically, PHY's workarounds are in the PHY drivers.
713 */
714 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
715 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
716 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
717 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
718 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
719 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
720 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
721 static void wm_reset_init_script_82575(struct wm_softc *);
722
723 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
724 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
725
726 /*
727 * Devices supported by this driver.
728 */
729 static const struct wm_product {
730 pci_vendor_id_t wmp_vendor;
731 pci_product_id_t wmp_product;
732 const char *wmp_name;
733 wm_chip_type wmp_type;
734 uint32_t wmp_flags;
735 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
736 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
737 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
738 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
739 #define WMP_MEDIATYPE(x) ((x) & 0x03)
740 } wm_products[] = {
741 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
742 "Intel i82542 1000BASE-X Ethernet",
743 WM_T_82542_2_1, WMP_F_FIBER },
744
745 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
746 "Intel i82543GC 1000BASE-X Ethernet",
747 WM_T_82543, WMP_F_FIBER },
748
749 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
750 "Intel i82543GC 1000BASE-T Ethernet",
751 WM_T_82543, WMP_F_COPPER },
752
753 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
754 "Intel i82544EI 1000BASE-T Ethernet",
755 WM_T_82544, WMP_F_COPPER },
756
757 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
758 "Intel i82544EI 1000BASE-X Ethernet",
759 WM_T_82544, WMP_F_FIBER },
760
761 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
762 "Intel i82544GC 1000BASE-T Ethernet",
763 WM_T_82544, WMP_F_COPPER },
764
765 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
766 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
767 WM_T_82544, WMP_F_COPPER },
768
769 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
770 "Intel i82540EM 1000BASE-T Ethernet",
771 WM_T_82540, WMP_F_COPPER },
772
773 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
774 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
775 WM_T_82540, WMP_F_COPPER },
776
777 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
778 "Intel i82540EP 1000BASE-T Ethernet",
779 WM_T_82540, WMP_F_COPPER },
780
781 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
782 "Intel i82540EP 1000BASE-T Ethernet",
783 WM_T_82540, WMP_F_COPPER },
784
785 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
786 "Intel i82540EP 1000BASE-T Ethernet",
787 WM_T_82540, WMP_F_COPPER },
788
789 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
790 "Intel i82545EM 1000BASE-T Ethernet",
791 WM_T_82545, WMP_F_COPPER },
792
793 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
794 "Intel i82545GM 1000BASE-T Ethernet",
795 WM_T_82545_3, WMP_F_COPPER },
796
797 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
798 "Intel i82545GM 1000BASE-X Ethernet",
799 WM_T_82545_3, WMP_F_FIBER },
800
801 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
802 "Intel i82545GM Gigabit Ethernet (SERDES)",
803 WM_T_82545_3, WMP_F_SERDES },
804
805 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
806 "Intel i82546EB 1000BASE-T Ethernet",
807 WM_T_82546, WMP_F_COPPER },
808
809 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
810 "Intel i82546EB 1000BASE-T Ethernet",
811 WM_T_82546, WMP_F_COPPER },
812
813 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
814 "Intel i82545EM 1000BASE-X Ethernet",
815 WM_T_82545, WMP_F_FIBER },
816
817 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
818 "Intel i82546EB 1000BASE-X Ethernet",
819 WM_T_82546, WMP_F_FIBER },
820
821 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
822 "Intel i82546GB 1000BASE-T Ethernet",
823 WM_T_82546_3, WMP_F_COPPER },
824
825 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
826 "Intel i82546GB 1000BASE-X Ethernet",
827 WM_T_82546_3, WMP_F_FIBER },
828
829 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
830 "Intel i82546GB Gigabit Ethernet (SERDES)",
831 WM_T_82546_3, WMP_F_SERDES },
832
833 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
834 "i82546GB quad-port Gigabit Ethernet",
835 WM_T_82546_3, WMP_F_COPPER },
836
837 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
838 "i82546GB quad-port Gigabit Ethernet (KSP3)",
839 WM_T_82546_3, WMP_F_COPPER },
840
841 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
842 "Intel PRO/1000MT (82546GB)",
843 WM_T_82546_3, WMP_F_COPPER },
844
845 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
846 "Intel i82541EI 1000BASE-T Ethernet",
847 WM_T_82541, WMP_F_COPPER },
848
849 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
850 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
851 WM_T_82541, WMP_F_COPPER },
852
853 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
854 "Intel i82541EI Mobile 1000BASE-T Ethernet",
855 WM_T_82541, WMP_F_COPPER },
856
857 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
858 "Intel i82541ER 1000BASE-T Ethernet",
859 WM_T_82541_2, WMP_F_COPPER },
860
861 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
862 "Intel i82541GI 1000BASE-T Ethernet",
863 WM_T_82541_2, WMP_F_COPPER },
864
865 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
866 "Intel i82541GI Mobile 1000BASE-T Ethernet",
867 WM_T_82541_2, WMP_F_COPPER },
868
869 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
870 "Intel i82541PI 1000BASE-T Ethernet",
871 WM_T_82541_2, WMP_F_COPPER },
872
873 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
874 "Intel i82547EI 1000BASE-T Ethernet",
875 WM_T_82547, WMP_F_COPPER },
876
877 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
878 "Intel i82547EI Mobile 1000BASE-T Ethernet",
879 WM_T_82547, WMP_F_COPPER },
880
881 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
882 "Intel i82547GI 1000BASE-T Ethernet",
883 WM_T_82547_2, WMP_F_COPPER },
884
885 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
886 "Intel PRO/1000 PT (82571EB)",
887 WM_T_82571, WMP_F_COPPER },
888
889 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
890 "Intel PRO/1000 PF (82571EB)",
891 WM_T_82571, WMP_F_FIBER },
892
893 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
894 "Intel PRO/1000 PB (82571EB)",
895 WM_T_82571, WMP_F_SERDES },
896
897 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
898 "Intel PRO/1000 QT (82571EB)",
899 WM_T_82571, WMP_F_COPPER },
900
901 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
902 "Intel PRO/1000 PT Quad Port Server Adapter",
903 WM_T_82571, WMP_F_COPPER, },
904
905 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
906 "Intel Gigabit PT Quad Port Server ExpressModule",
907 WM_T_82571, WMP_F_COPPER, },
908
909 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
910 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
911 WM_T_82571, WMP_F_SERDES, },
912
913 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
914 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
915 WM_T_82571, WMP_F_SERDES, },
916
917 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
918 "Intel 82571EB Quad 1000baseX Ethernet",
919 WM_T_82571, WMP_F_FIBER, },
920
921 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
922 "Intel i82572EI 1000baseT Ethernet",
923 WM_T_82572, WMP_F_COPPER },
924
925 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
926 "Intel i82572EI 1000baseX Ethernet",
927 WM_T_82572, WMP_F_FIBER },
928
929 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
930 "Intel i82572EI Gigabit Ethernet (SERDES)",
931 WM_T_82572, WMP_F_SERDES },
932
933 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
934 "Intel i82572EI 1000baseT Ethernet",
935 WM_T_82572, WMP_F_COPPER },
936
937 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
938 "Intel i82573E",
939 WM_T_82573, WMP_F_COPPER },
940
941 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
942 "Intel i82573E IAMT",
943 WM_T_82573, WMP_F_COPPER },
944
945 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
946 "Intel i82573L Gigabit Ethernet",
947 WM_T_82573, WMP_F_COPPER },
948
949 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
950 "Intel i82574L",
951 WM_T_82574, WMP_F_COPPER },
952
953 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
954 "Intel i82574L",
955 WM_T_82574, WMP_F_COPPER },
956
957 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
958 "Intel i82583V",
959 WM_T_82583, WMP_F_COPPER },
960
961 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
962 "i80003 dual 1000baseT Ethernet",
963 WM_T_80003, WMP_F_COPPER },
964
965 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
966 "i80003 dual 1000baseX Ethernet",
967 WM_T_80003, WMP_F_COPPER },
968
969 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
970 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
971 WM_T_80003, WMP_F_SERDES },
972
973 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
974 "Intel i80003 1000baseT Ethernet",
975 WM_T_80003, WMP_F_COPPER },
976
977 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
978 "Intel i80003 Gigabit Ethernet (SERDES)",
979 WM_T_80003, WMP_F_SERDES },
980
981 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
982 "Intel i82801H (M_AMT) LAN Controller",
983 WM_T_ICH8, WMP_F_COPPER },
984 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
985 "Intel i82801H (AMT) LAN Controller",
986 WM_T_ICH8, WMP_F_COPPER },
987 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
988 "Intel i82801H LAN Controller",
989 WM_T_ICH8, WMP_F_COPPER },
990 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
991 "Intel i82801H (IFE) LAN Controller",
992 WM_T_ICH8, WMP_F_COPPER },
993 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
994 "Intel i82801H (M) LAN Controller",
995 WM_T_ICH8, WMP_F_COPPER },
996 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
997 "Intel i82801H IFE (GT) LAN Controller",
998 WM_T_ICH8, WMP_F_COPPER },
999 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1000 "Intel i82801H IFE (G) LAN Controller",
1001 WM_T_ICH8, WMP_F_COPPER },
1002 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1003 "82801I (AMT) LAN Controller",
1004 WM_T_ICH9, WMP_F_COPPER },
1005 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1006 "82801I LAN Controller",
1007 WM_T_ICH9, WMP_F_COPPER },
1008 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1009 "82801I (G) LAN Controller",
1010 WM_T_ICH9, WMP_F_COPPER },
1011 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1012 "82801I (GT) LAN Controller",
1013 WM_T_ICH9, WMP_F_COPPER },
1014 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1015 "82801I (C) LAN Controller",
1016 WM_T_ICH9, WMP_F_COPPER },
1017 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1018 "82801I mobile LAN Controller",
1019 WM_T_ICH9, WMP_F_COPPER },
1020 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1021 "82801I mobile (V) LAN Controller",
1022 WM_T_ICH9, WMP_F_COPPER },
1023 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1024 "82801I mobile (AMT) LAN Controller",
1025 WM_T_ICH9, WMP_F_COPPER },
1026 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1027 "82567LM-4 LAN Controller",
1028 WM_T_ICH9, WMP_F_COPPER },
1029 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
1030 "82567V-3 LAN Controller",
1031 WM_T_ICH9, WMP_F_COPPER },
1032 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1033 "82567LM-2 LAN Controller",
1034 WM_T_ICH10, WMP_F_COPPER },
1035 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1036 "82567LF-2 LAN Controller",
1037 WM_T_ICH10, WMP_F_COPPER },
1038 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1039 "82567LM-3 LAN Controller",
1040 WM_T_ICH10, WMP_F_COPPER },
1041 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1042 "82567LF-3 LAN Controller",
1043 WM_T_ICH10, WMP_F_COPPER },
1044 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1045 "82567V-2 LAN Controller",
1046 WM_T_ICH10, WMP_F_COPPER },
1047 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1048 "82567V-3? LAN Controller",
1049 WM_T_ICH10, WMP_F_COPPER },
1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1051 "HANKSVILLE LAN Controller",
1052 WM_T_ICH10, WMP_F_COPPER },
1053 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1054 "PCH LAN (82577LM) Controller",
1055 WM_T_PCH, WMP_F_COPPER },
1056 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1057 "PCH LAN (82577LC) Controller",
1058 WM_T_PCH, WMP_F_COPPER },
1059 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1060 "PCH LAN (82578DM) Controller",
1061 WM_T_PCH, WMP_F_COPPER },
1062 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1063 "PCH LAN (82578DC) Controller",
1064 WM_T_PCH, WMP_F_COPPER },
1065 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1066 "PCH2 LAN (82579LM) Controller",
1067 WM_T_PCH2, WMP_F_COPPER },
1068 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1069 "PCH2 LAN (82579V) Controller",
1070 WM_T_PCH2, WMP_F_COPPER },
1071 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1072 "82575EB dual-1000baseT Ethernet",
1073 WM_T_82575, WMP_F_COPPER },
1074 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1075 "82575EB dual-1000baseX Ethernet (SERDES)",
1076 WM_T_82575, WMP_F_SERDES },
1077 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1078 "82575GB quad-1000baseT Ethernet",
1079 WM_T_82575, WMP_F_COPPER },
1080 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1081 "82575GB quad-1000baseT Ethernet (PM)",
1082 WM_T_82575, WMP_F_COPPER },
1083 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1084 "82576 1000BaseT Ethernet",
1085 WM_T_82576, WMP_F_COPPER },
1086 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1087 "82576 1000BaseX Ethernet",
1088 WM_T_82576, WMP_F_FIBER },
1089
1090 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1091 "82576 gigabit Ethernet (SERDES)",
1092 WM_T_82576, WMP_F_SERDES },
1093
1094 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1095 "82576 quad-1000BaseT Ethernet",
1096 WM_T_82576, WMP_F_COPPER },
1097
1098 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1099 "82576 Gigabit ET2 Quad Port Server Adapter",
1100 WM_T_82576, WMP_F_COPPER },
1101
1102 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1103 "82576 gigabit Ethernet",
1104 WM_T_82576, WMP_F_COPPER },
1105
1106 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1107 "82576 gigabit Ethernet (SERDES)",
1108 WM_T_82576, WMP_F_SERDES },
1109 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1110 "82576 quad-gigabit Ethernet (SERDES)",
1111 WM_T_82576, WMP_F_SERDES },
1112
1113 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1114 "82580 1000BaseT Ethernet",
1115 WM_T_82580, WMP_F_COPPER },
1116 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1117 "82580 1000BaseX Ethernet",
1118 WM_T_82580, WMP_F_FIBER },
1119
1120 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1121 "82580 1000BaseT Ethernet (SERDES)",
1122 WM_T_82580, WMP_F_SERDES },
1123
1124 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1125 "82580 gigabit Ethernet (SGMII)",
1126 WM_T_82580, WMP_F_COPPER },
1127 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1128 "82580 dual-1000BaseT Ethernet",
1129 WM_T_82580, WMP_F_COPPER },
1130
1131 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1132 "82580 quad-1000BaseX Ethernet",
1133 WM_T_82580, WMP_F_FIBER },
1134
1135 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1136 "DH89XXCC Gigabit Ethernet (SGMII)",
1137 WM_T_82580, WMP_F_COPPER },
1138
1139 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1140 "DH89XXCC Gigabit Ethernet (SERDES)",
1141 WM_T_82580, WMP_F_SERDES },
1142
1143 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1144 "DH89XXCC 1000BASE-KX Ethernet",
1145 WM_T_82580, WMP_F_SERDES },
1146
1147 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1148 "DH89XXCC Gigabit Ethernet (SFP)",
1149 WM_T_82580, WMP_F_SERDES },
1150
1151 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1152 "I350 Gigabit Network Connection",
1153 WM_T_I350, WMP_F_COPPER },
1154
1155 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1156 "I350 Gigabit Fiber Network Connection",
1157 WM_T_I350, WMP_F_FIBER },
1158
1159 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1160 "I350 Gigabit Backplane Connection",
1161 WM_T_I350, WMP_F_SERDES },
1162
1163 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1164 "I350 Quad Port Gigabit Ethernet",
1165 WM_T_I350, WMP_F_SERDES },
1166
1167 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1168 "I350 Gigabit Connection",
1169 WM_T_I350, WMP_F_COPPER },
1170
1171 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1172 "I354 Gigabit Ethernet (KX)",
1173 WM_T_I354, WMP_F_SERDES },
1174
1175 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1176 "I354 Gigabit Ethernet (SGMII)",
1177 WM_T_I354, WMP_F_COPPER },
1178
1179 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1180 "I354 Gigabit Ethernet (2.5G)",
1181 WM_T_I354, WMP_F_COPPER },
1182
1183 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1184 "I210-T1 Ethernet Server Adapter",
1185 WM_T_I210, WMP_F_COPPER },
1186
1187 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1188 "I210 Ethernet (Copper OEM)",
1189 WM_T_I210, WMP_F_COPPER },
1190
1191 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1192 "I210 Ethernet (Copper IT)",
1193 WM_T_I210, WMP_F_COPPER },
1194
1195 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1196 "I210 Ethernet (FLASH less)",
1197 WM_T_I210, WMP_F_COPPER },
1198
1199 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1200 "I210 Gigabit Ethernet (Fiber)",
1201 WM_T_I210, WMP_F_FIBER },
1202
1203 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1204 "I210 Gigabit Ethernet (SERDES)",
1205 WM_T_I210, WMP_F_SERDES },
1206
1207 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1208 "I210 Gigabit Ethernet (FLASH less)",
1209 WM_T_I210, WMP_F_SERDES },
1210
1211 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1212 "I210 Gigabit Ethernet (SGMII)",
1213 WM_T_I210, WMP_F_COPPER },
1214
1215 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1216 "I211 Ethernet (COPPER)",
1217 WM_T_I211, WMP_F_COPPER },
1218 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1219 "I217 V Ethernet Connection",
1220 WM_T_PCH_LPT, WMP_F_COPPER },
1221 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1222 "I217 LM Ethernet Connection",
1223 WM_T_PCH_LPT, WMP_F_COPPER },
1224 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1225 "I218 V Ethernet Connection",
1226 WM_T_PCH_LPT, WMP_F_COPPER },
1227 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1228 "I218 V Ethernet Connection",
1229 WM_T_PCH_LPT, WMP_F_COPPER },
1230 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1231 "I218 V Ethernet Connection",
1232 WM_T_PCH_LPT, WMP_F_COPPER },
1233 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1234 "I218 LM Ethernet Connection",
1235 WM_T_PCH_LPT, WMP_F_COPPER },
1236 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1237 "I218 LM Ethernet Connection",
1238 WM_T_PCH_LPT, WMP_F_COPPER },
1239 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1240 "I218 LM Ethernet Connection",
1241 WM_T_PCH_LPT, WMP_F_COPPER },
1242 { 0, 0,
1243 NULL,
1244 0, 0 },
1245 };
1246
1247 #ifdef WM_EVENT_COUNTERS
1248 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1249 #endif /* WM_EVENT_COUNTERS */
1250
1251
1252 /*
1253 * Register read/write functions.
1254 * Other than CSR_{READ|WRITE}().
1255 */
1256
1257 #if 0 /* Not currently used */
1258 static inline uint32_t
1259 wm_io_read(struct wm_softc *sc, int reg)
1260 {
1261
1262 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1263 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1264 }
1265 #endif
1266
1267 static inline void
1268 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1269 {
1270
1271 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1272 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1273 }
1274
1275 static inline void
1276 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1277 uint32_t data)
1278 {
1279 uint32_t regval;
1280 int i;
1281
1282 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1283
1284 CSR_WRITE(sc, reg, regval);
1285
1286 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1287 delay(5);
1288 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1289 break;
1290 }
1291 if (i == SCTL_CTL_POLL_TIMEOUT) {
1292 aprint_error("%s: WARNING:"
1293 " i82575 reg 0x%08x setup did not indicate ready\n",
1294 device_xname(sc->sc_dev), reg);
1295 }
1296 }
1297
1298 static inline void
1299 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1300 {
1301 wa->wa_low = htole32(v & 0xffffffffU);
1302 if (sizeof(bus_addr_t) == 8)
1303 wa->wa_high = htole32((uint64_t) v >> 32);
1304 else
1305 wa->wa_high = 0;
1306 }
1307
1308 /*
1309 * Device driver interface functions and commonly used functions.
1310 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1311 */
1312
1313 /* Lookup supported device table */
1314 static const struct wm_product *
1315 wm_lookup(const struct pci_attach_args *pa)
1316 {
1317 const struct wm_product *wmp;
1318
1319 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1320 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1321 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1322 return wmp;
1323 }
1324 return NULL;
1325 }
1326
1327 /* The match function (ca_match) */
1328 static int
1329 wm_match(device_t parent, cfdata_t cf, void *aux)
1330 {
1331 struct pci_attach_args *pa = aux;
1332
1333 if (wm_lookup(pa) != NULL)
1334 return 1;
1335
1336 return 0;
1337 }
1338
1339 /* The attach function (ca_attach) */
1340 static void
1341 wm_attach(device_t parent, device_t self, void *aux)
1342 {
1343 struct wm_softc *sc = device_private(self);
1344 struct pci_attach_args *pa = aux;
1345 prop_dictionary_t dict;
1346 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1347 pci_chipset_tag_t pc = pa->pa_pc;
1348 pci_intr_handle_t ih;
1349 const char *intrstr = NULL;
1350 const char *eetype, *xname;
1351 bus_space_tag_t memt;
1352 bus_space_handle_t memh;
1353 bus_size_t memsize;
1354 int memh_valid;
1355 int i, error;
1356 const struct wm_product *wmp;
1357 prop_data_t ea;
1358 prop_number_t pn;
1359 uint8_t enaddr[ETHER_ADDR_LEN];
1360 uint16_t cfg1, cfg2, swdpin, io3;
1361 pcireg_t preg, memtype;
1362 uint16_t eeprom_data, apme_mask;
1363 bool force_clear_smbi;
1364 uint32_t link_mode;
1365 uint32_t reg;
1366 char intrbuf[PCI_INTRSTR_LEN];
1367
1368 sc->sc_dev = self;
1369 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1370 sc->sc_stopping = false;
1371
1372 wmp = wm_lookup(pa);
1373 #ifdef DIAGNOSTIC
1374 if (wmp == NULL) {
1375 printf("\n");
1376 panic("wm_attach: impossible");
1377 }
1378 #endif
1379 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1380
1381 sc->sc_pc = pa->pa_pc;
1382 sc->sc_pcitag = pa->pa_tag;
1383
1384 if (pci_dma64_available(pa))
1385 sc->sc_dmat = pa->pa_dmat64;
1386 else
1387 sc->sc_dmat = pa->pa_dmat;
1388
1389 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1390 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1391 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1392
1393 sc->sc_type = wmp->wmp_type;
1394 if (sc->sc_type < WM_T_82543) {
1395 if (sc->sc_rev < 2) {
1396 aprint_error_dev(sc->sc_dev,
1397 "i82542 must be at least rev. 2\n");
1398 return;
1399 }
1400 if (sc->sc_rev < 3)
1401 sc->sc_type = WM_T_82542_2_0;
1402 }
1403
1404 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1405 || (sc->sc_type == WM_T_82580)
1406 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1407 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1408 sc->sc_flags |= WM_F_NEWQUEUE;
1409
1410 /* Set device properties (mactype) */
1411 dict = device_properties(sc->sc_dev);
1412 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1413
1414 /*
1415 * Map the device. All devices support memory-mapped acccess,
1416 * and it is really required for normal operation.
1417 */
1418 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1419 switch (memtype) {
1420 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1421 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1422 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1423 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1424 break;
1425 default:
1426 memh_valid = 0;
1427 break;
1428 }
1429
1430 if (memh_valid) {
1431 sc->sc_st = memt;
1432 sc->sc_sh = memh;
1433 sc->sc_ss = memsize;
1434 } else {
1435 aprint_error_dev(sc->sc_dev,
1436 "unable to map device registers\n");
1437 return;
1438 }
1439
1440 /*
1441 * In addition, i82544 and later support I/O mapped indirect
1442 * register access. It is not desirable (nor supported in
1443 * this driver) to use it for normal operation, though it is
1444 * required to work around bugs in some chip versions.
1445 */
1446 if (sc->sc_type >= WM_T_82544) {
1447 /* First we have to find the I/O BAR. */
1448 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1449 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1450 if (memtype == PCI_MAPREG_TYPE_IO)
1451 break;
1452 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1453 PCI_MAPREG_MEM_TYPE_64BIT)
1454 i += 4; /* skip high bits, too */
1455 }
1456 if (i < PCI_MAPREG_END) {
1457 /*
1458 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1459 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1460 * It's no problem because newer chips has no this
1461 * bug.
1462 *
1463 * The i8254x doesn't apparently respond when the
1464 * I/O BAR is 0, which looks somewhat like it's not
1465 * been configured.
1466 */
1467 preg = pci_conf_read(pc, pa->pa_tag, i);
1468 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1469 aprint_error_dev(sc->sc_dev,
1470 "WARNING: I/O BAR at zero.\n");
1471 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1472 0, &sc->sc_iot, &sc->sc_ioh,
1473 NULL, &sc->sc_ios) == 0) {
1474 sc->sc_flags |= WM_F_IOH_VALID;
1475 } else {
1476 aprint_error_dev(sc->sc_dev,
1477 "WARNING: unable to map I/O space\n");
1478 }
1479 }
1480
1481 }
1482
1483 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1484 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1485 preg |= PCI_COMMAND_MASTER_ENABLE;
1486 if (sc->sc_type < WM_T_82542_2_1)
1487 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1488 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1489
1490 /* power up chip */
1491 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1492 NULL)) && error != EOPNOTSUPP) {
1493 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1494 return;
1495 }
1496
1497 /*
1498 * Map and establish our interrupt.
1499 */
1500 if (pci_intr_map(pa, &ih)) {
1501 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1502 return;
1503 }
1504 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1505 #ifdef WM_MPSAFE
1506 pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
1507 #endif
1508 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1509 if (sc->sc_ih == NULL) {
1510 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1511 if (intrstr != NULL)
1512 aprint_error(" at %s", intrstr);
1513 aprint_error("\n");
1514 return;
1515 }
1516 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1517
1518 /*
1519 * Check the function ID (unit number of the chip).
1520 */
1521 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1522 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1523 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1524 || (sc->sc_type == WM_T_82580)
1525 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1526 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1527 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1528 else
1529 sc->sc_funcid = 0;
1530
1531 /*
1532 * Determine a few things about the bus we're connected to.
1533 */
1534 if (sc->sc_type < WM_T_82543) {
1535 /* We don't really know the bus characteristics here. */
1536 sc->sc_bus_speed = 33;
1537 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1538 /*
1539 * CSA (Communication Streaming Architecture) is about as fast
1540 * a 32-bit 66MHz PCI Bus.
1541 */
1542 sc->sc_flags |= WM_F_CSA;
1543 sc->sc_bus_speed = 66;
1544 aprint_verbose_dev(sc->sc_dev,
1545 "Communication Streaming Architecture\n");
1546 if (sc->sc_type == WM_T_82547) {
1547 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1548 callout_setfunc(&sc->sc_txfifo_ch,
1549 wm_82547_txfifo_stall, sc);
1550 aprint_verbose_dev(sc->sc_dev,
1551 "using 82547 Tx FIFO stall work-around\n");
1552 }
1553 } else if (sc->sc_type >= WM_T_82571) {
1554 sc->sc_flags |= WM_F_PCIE;
1555 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1556 && (sc->sc_type != WM_T_ICH10)
1557 && (sc->sc_type != WM_T_PCH)
1558 && (sc->sc_type != WM_T_PCH2)
1559 && (sc->sc_type != WM_T_PCH_LPT)) {
1560 /* ICH* and PCH* have no PCIe capability registers */
1561 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1562 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1563 NULL) == 0)
1564 aprint_error_dev(sc->sc_dev,
1565 "unable to find PCIe capability\n");
1566 }
1567 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1568 } else {
1569 reg = CSR_READ(sc, WMREG_STATUS);
1570 if (reg & STATUS_BUS64)
1571 sc->sc_flags |= WM_F_BUS64;
1572 if ((reg & STATUS_PCIX_MODE) != 0) {
1573 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1574
1575 sc->sc_flags |= WM_F_PCIX;
1576 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1577 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1578 aprint_error_dev(sc->sc_dev,
1579 "unable to find PCIX capability\n");
1580 else if (sc->sc_type != WM_T_82545_3 &&
1581 sc->sc_type != WM_T_82546_3) {
1582 /*
1583 * Work around a problem caused by the BIOS
1584 * setting the max memory read byte count
1585 * incorrectly.
1586 */
1587 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1588 sc->sc_pcixe_capoff + PCIX_CMD);
1589 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1590 sc->sc_pcixe_capoff + PCIX_STATUS);
1591
1592 bytecnt =
1593 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1594 PCIX_CMD_BYTECNT_SHIFT;
1595 maxb =
1596 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1597 PCIX_STATUS_MAXB_SHIFT;
1598 if (bytecnt > maxb) {
1599 aprint_verbose_dev(sc->sc_dev,
1600 "resetting PCI-X MMRBC: %d -> %d\n",
1601 512 << bytecnt, 512 << maxb);
1602 pcix_cmd = (pcix_cmd &
1603 ~PCIX_CMD_BYTECNT_MASK) |
1604 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1605 pci_conf_write(pa->pa_pc, pa->pa_tag,
1606 sc->sc_pcixe_capoff + PCIX_CMD,
1607 pcix_cmd);
1608 }
1609 }
1610 }
1611 /*
1612 * The quad port adapter is special; it has a PCIX-PCIX
1613 * bridge on the board, and can run the secondary bus at
1614 * a higher speed.
1615 */
1616 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1617 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1618 : 66;
1619 } else if (sc->sc_flags & WM_F_PCIX) {
1620 switch (reg & STATUS_PCIXSPD_MASK) {
1621 case STATUS_PCIXSPD_50_66:
1622 sc->sc_bus_speed = 66;
1623 break;
1624 case STATUS_PCIXSPD_66_100:
1625 sc->sc_bus_speed = 100;
1626 break;
1627 case STATUS_PCIXSPD_100_133:
1628 sc->sc_bus_speed = 133;
1629 break;
1630 default:
1631 aprint_error_dev(sc->sc_dev,
1632 "unknown PCIXSPD %d; assuming 66MHz\n",
1633 reg & STATUS_PCIXSPD_MASK);
1634 sc->sc_bus_speed = 66;
1635 break;
1636 }
1637 } else
1638 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1639 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1640 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1641 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1642 }
1643
1644 /*
1645 * Allocate the control data structures, and create and load the
1646 * DMA map for it.
1647 *
1648 * NOTE: All Tx descriptors must be in the same 4G segment of
1649 * memory. So must Rx descriptors. We simplify by allocating
1650 * both sets within the same 4G segment.
1651 */
1652 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1653 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1654 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1655 sizeof(struct wm_control_data_82542) :
1656 sizeof(struct wm_control_data_82544);
1657 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1658 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1659 &sc->sc_cd_rseg, 0)) != 0) {
1660 aprint_error_dev(sc->sc_dev,
1661 "unable to allocate control data, error = %d\n",
1662 error);
1663 goto fail_0;
1664 }
1665
1666 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1667 sc->sc_cd_rseg, sc->sc_cd_size,
1668 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1669 aprint_error_dev(sc->sc_dev,
1670 "unable to map control data, error = %d\n", error);
1671 goto fail_1;
1672 }
1673
1674 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1675 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1676 aprint_error_dev(sc->sc_dev,
1677 "unable to create control data DMA map, error = %d\n",
1678 error);
1679 goto fail_2;
1680 }
1681
1682 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1683 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1684 aprint_error_dev(sc->sc_dev,
1685 "unable to load control data DMA map, error = %d\n",
1686 error);
1687 goto fail_3;
1688 }
1689
1690 /* Create the transmit buffer DMA maps. */
1691 WM_TXQUEUELEN(sc) =
1692 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1693 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1694 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1695 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1696 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1697 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1698 aprint_error_dev(sc->sc_dev,
1699 "unable to create Tx DMA map %d, error = %d\n",
1700 i, error);
1701 goto fail_4;
1702 }
1703 }
1704
1705 /* Create the receive buffer DMA maps. */
1706 for (i = 0; i < WM_NRXDESC; i++) {
1707 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1708 MCLBYTES, 0, 0,
1709 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1710 aprint_error_dev(sc->sc_dev,
1711 "unable to create Rx DMA map %d error = %d\n",
1712 i, error);
1713 goto fail_5;
1714 }
1715 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1716 }
1717
1718 /* clear interesting stat counters */
1719 CSR_READ(sc, WMREG_COLC);
1720 CSR_READ(sc, WMREG_RXERRC);
1721
1722 /* get PHY control from SMBus to PCIe */
1723 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1724 || (sc->sc_type == WM_T_PCH_LPT))
1725 wm_smbustopci(sc);
1726
1727 /* Reset the chip to a known state. */
1728 wm_reset(sc);
1729
1730 /* Get some information about the EEPROM. */
1731 switch (sc->sc_type) {
1732 case WM_T_82542_2_0:
1733 case WM_T_82542_2_1:
1734 case WM_T_82543:
1735 case WM_T_82544:
1736 /* Microwire */
1737 sc->sc_nvm_wordsize = 64;
1738 sc->sc_nvm_addrbits = 6;
1739 break;
1740 case WM_T_82540:
1741 case WM_T_82545:
1742 case WM_T_82545_3:
1743 case WM_T_82546:
1744 case WM_T_82546_3:
1745 /* Microwire */
1746 reg = CSR_READ(sc, WMREG_EECD);
1747 if (reg & EECD_EE_SIZE) {
1748 sc->sc_nvm_wordsize = 256;
1749 sc->sc_nvm_addrbits = 8;
1750 } else {
1751 sc->sc_nvm_wordsize = 64;
1752 sc->sc_nvm_addrbits = 6;
1753 }
1754 sc->sc_flags |= WM_F_LOCK_EECD;
1755 break;
1756 case WM_T_82541:
1757 case WM_T_82541_2:
1758 case WM_T_82547:
1759 case WM_T_82547_2:
1760 reg = CSR_READ(sc, WMREG_EECD);
1761 if (reg & EECD_EE_TYPE) {
1762 /* SPI */
1763 sc->sc_flags |= WM_F_EEPROM_SPI;
1764 wm_nvm_set_addrbits_size_eecd(sc);
1765 } else {
1766 /* Microwire */
1767 if ((reg & EECD_EE_ABITS) != 0) {
1768 sc->sc_nvm_wordsize = 256;
1769 sc->sc_nvm_addrbits = 8;
1770 } else {
1771 sc->sc_nvm_wordsize = 64;
1772 sc->sc_nvm_addrbits = 6;
1773 }
1774 }
1775 sc->sc_flags |= WM_F_LOCK_EECD;
1776 break;
1777 case WM_T_82571:
1778 case WM_T_82572:
1779 /* SPI */
1780 sc->sc_flags |= WM_F_EEPROM_SPI;
1781 wm_nvm_set_addrbits_size_eecd(sc);
1782 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1783 break;
1784 case WM_T_82573:
1785 sc->sc_flags |= WM_F_LOCK_SWSM;
1786 /* FALLTHROUGH */
1787 case WM_T_82574:
1788 case WM_T_82583:
1789 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1790 sc->sc_flags |= WM_F_EEPROM_FLASH;
1791 sc->sc_nvm_wordsize = 2048;
1792 } else {
1793 /* SPI */
1794 sc->sc_flags |= WM_F_EEPROM_SPI;
1795 wm_nvm_set_addrbits_size_eecd(sc);
1796 }
1797 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1798 break;
1799 case WM_T_82575:
1800 case WM_T_82576:
1801 case WM_T_82580:
1802 case WM_T_I350:
1803 case WM_T_I354:
1804 case WM_T_80003:
1805 /* SPI */
1806 sc->sc_flags |= WM_F_EEPROM_SPI;
1807 wm_nvm_set_addrbits_size_eecd(sc);
1808 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1809 | WM_F_LOCK_SWSM;
1810 break;
1811 case WM_T_ICH8:
1812 case WM_T_ICH9:
1813 case WM_T_ICH10:
1814 case WM_T_PCH:
1815 case WM_T_PCH2:
1816 case WM_T_PCH_LPT:
1817 /* FLASH */
1818 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1819 sc->sc_nvm_wordsize = 2048;
1820 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1821 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1822 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1823 aprint_error_dev(sc->sc_dev,
1824 "can't map FLASH registers\n");
1825 goto fail_5;
1826 }
1827 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1828 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1829 ICH_FLASH_SECTOR_SIZE;
1830 sc->sc_ich8_flash_bank_size =
1831 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1832 sc->sc_ich8_flash_bank_size -=
1833 (reg & ICH_GFPREG_BASE_MASK);
1834 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1835 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1836 break;
1837 case WM_T_I210:
1838 case WM_T_I211:
1839 wm_nvm_set_addrbits_size_eecd(sc);
1840 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1841 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1842 break;
1843 default:
1844 break;
1845 }
1846
1847 /* Ensure the SMBI bit is clear before first NVM or PHY access */
1848 switch (sc->sc_type) {
1849 case WM_T_82571:
1850 case WM_T_82572:
1851 reg = CSR_READ(sc, WMREG_SWSM2);
1852 if ((reg & SWSM2_LOCK) == 0) {
1853 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
1854 force_clear_smbi = true;
1855 } else
1856 force_clear_smbi = false;
1857 break;
1858 case WM_T_82573:
1859 case WM_T_82574:
1860 case WM_T_82583:
1861 force_clear_smbi = true;
1862 break;
1863 default:
1864 force_clear_smbi = false;
1865 break;
1866 }
1867 if (force_clear_smbi) {
1868 reg = CSR_READ(sc, WMREG_SWSM);
1869 if ((reg & SWSM_SMBI) != 0)
1870 aprint_error_dev(sc->sc_dev,
1871 "Please update the Bootagent\n");
1872 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
1873 }
1874
1875 /*
1876 * Defer printing the EEPROM type until after verifying the checksum
1877 * This allows the EEPROM type to be printed correctly in the case
1878 * that no EEPROM is attached.
1879 */
1880 /*
1881 * Validate the EEPROM checksum. If the checksum fails, flag
1882 * this for later, so we can fail future reads from the EEPROM.
1883 */
1884 if (wm_nvm_validate_checksum(sc)) {
1885 /*
1886 * Read twice again because some PCI-e parts fail the
1887 * first check due to the link being in sleep state.
1888 */
1889 if (wm_nvm_validate_checksum(sc))
1890 sc->sc_flags |= WM_F_EEPROM_INVALID;
1891 }
1892
1893 /* Set device properties (macflags) */
1894 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1895
1896 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1897 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1898 else {
1899 aprint_verbose_dev(sc->sc_dev, "%u words ",
1900 sc->sc_nvm_wordsize);
1901 if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
1902 aprint_verbose("FLASH(HW)\n");
1903 } else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1904 aprint_verbose("FLASH\n");
1905 } else {
1906 if (sc->sc_flags & WM_F_EEPROM_SPI)
1907 eetype = "SPI";
1908 else
1909 eetype = "MicroWire";
1910 aprint_verbose("(%d address bits) %s EEPROM\n",
1911 sc->sc_nvm_addrbits, eetype);
1912 }
1913 }
1914
1915 switch (sc->sc_type) {
1916 case WM_T_82571:
1917 case WM_T_82572:
1918 case WM_T_82573:
1919 case WM_T_82574:
1920 case WM_T_82583:
1921 case WM_T_80003:
1922 case WM_T_ICH8:
1923 case WM_T_ICH9:
1924 case WM_T_ICH10:
1925 case WM_T_PCH:
1926 case WM_T_PCH2:
1927 case WM_T_PCH_LPT:
1928 if (wm_check_mng_mode(sc) != 0)
1929 wm_get_hw_control(sc);
1930 break;
1931 default:
1932 break;
1933 }
1934 wm_get_wakeup(sc);
1935 /*
1936 * Read the Ethernet address from the EEPROM, if not first found
1937 * in device properties.
1938 */
1939 ea = prop_dictionary_get(dict, "mac-address");
1940 if (ea != NULL) {
1941 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1942 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1943 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1944 } else {
1945 if (wm_read_mac_addr(sc, enaddr) != 0) {
1946 aprint_error_dev(sc->sc_dev,
1947 "unable to read Ethernet address\n");
1948 goto fail_5;
1949 }
1950 }
1951
1952 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1953 ether_sprintf(enaddr));
1954
1955 /*
1956 * Read the config info from the EEPROM, and set up various
1957 * bits in the control registers based on their contents.
1958 */
1959 pn = prop_dictionary_get(dict, "i82543-cfg1");
1960 if (pn != NULL) {
1961 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1962 cfg1 = (uint16_t) prop_number_integer_value(pn);
1963 } else {
1964 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
1965 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1966 goto fail_5;
1967 }
1968 }
1969
1970 pn = prop_dictionary_get(dict, "i82543-cfg2");
1971 if (pn != NULL) {
1972 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1973 cfg2 = (uint16_t) prop_number_integer_value(pn);
1974 } else {
1975 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
1976 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1977 goto fail_5;
1978 }
1979 }
1980
1981 /* check for WM_F_WOL */
1982 switch (sc->sc_type) {
1983 case WM_T_82542_2_0:
1984 case WM_T_82542_2_1:
1985 case WM_T_82543:
1986 /* dummy? */
1987 eeprom_data = 0;
1988 apme_mask = NVM_CFG3_APME;
1989 break;
1990 case WM_T_82544:
1991 apme_mask = NVM_CFG2_82544_APM_EN;
1992 eeprom_data = cfg2;
1993 break;
1994 case WM_T_82546:
1995 case WM_T_82546_3:
1996 case WM_T_82571:
1997 case WM_T_82572:
1998 case WM_T_82573:
1999 case WM_T_82574:
2000 case WM_T_82583:
2001 case WM_T_80003:
2002 default:
2003 apme_mask = NVM_CFG3_APME;
2004 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2005 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2006 break;
2007 case WM_T_82575:
2008 case WM_T_82576:
2009 case WM_T_82580:
2010 case WM_T_I350:
2011 case WM_T_I354: /* XXX ok? */
2012 case WM_T_ICH8:
2013 case WM_T_ICH9:
2014 case WM_T_ICH10:
2015 case WM_T_PCH:
2016 case WM_T_PCH2:
2017 case WM_T_PCH_LPT:
2018 /* XXX The funcid should be checked on some devices */
2019 apme_mask = WUC_APME;
2020 eeprom_data = CSR_READ(sc, WMREG_WUC);
2021 break;
2022 }
2023
2024 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2025 if ((eeprom_data & apme_mask) != 0)
2026 sc->sc_flags |= WM_F_WOL;
2027 #ifdef WM_DEBUG
2028 if ((sc->sc_flags & WM_F_WOL) != 0)
2029 printf("WOL\n");
2030 #endif
2031
2032 /*
2033 * XXX need special handling for some multiple port cards
2034 * to disable a paticular port.
2035 */
2036
2037 if (sc->sc_type >= WM_T_82544) {
2038 pn = prop_dictionary_get(dict, "i82543-swdpin");
2039 if (pn != NULL) {
2040 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2041 swdpin = (uint16_t) prop_number_integer_value(pn);
2042 } else {
2043 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2044 aprint_error_dev(sc->sc_dev,
2045 "unable to read SWDPIN\n");
2046 goto fail_5;
2047 }
2048 }
2049 }
2050
2051 if (cfg1 & NVM_CFG1_ILOS)
2052 sc->sc_ctrl |= CTRL_ILOS;
2053 if (sc->sc_type >= WM_T_82544) {
2054 sc->sc_ctrl |=
2055 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2056 CTRL_SWDPIO_SHIFT;
2057 sc->sc_ctrl |=
2058 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2059 CTRL_SWDPINS_SHIFT;
2060 } else {
2061 sc->sc_ctrl |=
2062 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2063 CTRL_SWDPIO_SHIFT;
2064 }
2065
2066 #if 0
2067 if (sc->sc_type >= WM_T_82544) {
2068 if (cfg1 & NVM_CFG1_IPS0)
2069 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2070 if (cfg1 & NVM_CFG1_IPS1)
2071 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2072 sc->sc_ctrl_ext |=
2073 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2074 CTRL_EXT_SWDPIO_SHIFT;
2075 sc->sc_ctrl_ext |=
2076 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2077 CTRL_EXT_SWDPINS_SHIFT;
2078 } else {
2079 sc->sc_ctrl_ext |=
2080 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2081 CTRL_EXT_SWDPIO_SHIFT;
2082 }
2083 #endif
2084
2085 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2086 #if 0
2087 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2088 #endif
2089
2090 /*
2091 * Set up some register offsets that are different between
2092 * the i82542 and the i82543 and later chips.
2093 */
2094 if (sc->sc_type < WM_T_82543) {
2095 sc->sc_rdt_reg = WMREG_OLD_RDT0;
2096 sc->sc_tdt_reg = WMREG_OLD_TDT;
2097 } else {
2098 sc->sc_rdt_reg = WMREG_RDT;
2099 sc->sc_tdt_reg = WMREG_TDT;
2100 }
2101
2102 if (sc->sc_type == WM_T_PCH) {
2103 uint16_t val;
2104
2105 /* Save the NVM K1 bit setting */
2106 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2107
2108 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2109 sc->sc_nvm_k1_enabled = 1;
2110 else
2111 sc->sc_nvm_k1_enabled = 0;
2112 }
2113
2114 /*
2115 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2116 * media structures accordingly.
2117 */
2118 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2119 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2120 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2121 || sc->sc_type == WM_T_82573
2122 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2123 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2124 wm_gmii_mediainit(sc, wmp->wmp_product);
2125 } else if (sc->sc_type < WM_T_82543 ||
2126 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2127 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2128 aprint_error_dev(sc->sc_dev,
2129 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2130 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2131 }
2132 wm_tbi_mediainit(sc);
2133 } else {
2134 switch (sc->sc_type) {
2135 case WM_T_82575:
2136 case WM_T_82576:
2137 case WM_T_82580:
2138 case WM_T_I350:
2139 case WM_T_I354:
2140 case WM_T_I210:
2141 case WM_T_I211:
2142 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2143 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2144 switch (link_mode) {
2145 case CTRL_EXT_LINK_MODE_1000KX:
2146 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2147 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2148 break;
2149 case CTRL_EXT_LINK_MODE_SGMII:
2150 if (wm_sgmii_uses_mdio(sc)) {
2151 aprint_verbose_dev(sc->sc_dev,
2152 "SGMII(MDIO)\n");
2153 sc->sc_flags |= WM_F_SGMII;
2154 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2155 break;
2156 }
2157 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2158 /*FALLTHROUGH*/
2159 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2160 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2161 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2162 if (link_mode
2163 == CTRL_EXT_LINK_MODE_SGMII) {
2164 sc->sc_mediatype
2165 = WM_MEDIATYPE_COPPER;
2166 sc->sc_flags |= WM_F_SGMII;
2167 } else {
2168 sc->sc_mediatype
2169 = WM_MEDIATYPE_SERDES;
2170 aprint_verbose_dev(sc->sc_dev,
2171 "SERDES\n");
2172 }
2173 break;
2174 }
2175 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2176 aprint_verbose_dev(sc->sc_dev,
2177 "SERDES\n");
2178
2179 /* Change current link mode setting */
2180 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2181 switch (sc->sc_mediatype) {
2182 case WM_MEDIATYPE_COPPER:
2183 reg |= CTRL_EXT_LINK_MODE_SGMII;
2184 break;
2185 case WM_MEDIATYPE_SERDES:
2186 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2187 break;
2188 default:
2189 break;
2190 }
2191 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2192 break;
2193 case CTRL_EXT_LINK_MODE_GMII:
2194 default:
2195 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2196 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2197 break;
2198 }
2199
2200 reg &= ~CTRL_EXT_I2C_ENA;
2201 if ((sc->sc_flags & WM_F_SGMII) != 0)
2202 reg |= CTRL_EXT_I2C_ENA;
2203 else
2204 reg &= ~CTRL_EXT_I2C_ENA;
2205 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2206
2207 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2208 wm_gmii_mediainit(sc, wmp->wmp_product);
2209 else
2210 wm_tbi_mediainit(sc);
2211 break;
2212 default:
2213 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
2214 aprint_error_dev(sc->sc_dev,
2215 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2216 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2217 wm_gmii_mediainit(sc, wmp->wmp_product);
2218 }
2219 }
2220
2221 ifp = &sc->sc_ethercom.ec_if;
2222 xname = device_xname(sc->sc_dev);
2223 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2224 ifp->if_softc = sc;
2225 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2226 ifp->if_ioctl = wm_ioctl;
2227 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2228 ifp->if_start = wm_nq_start;
2229 else
2230 ifp->if_start = wm_start;
2231 ifp->if_watchdog = wm_watchdog;
2232 ifp->if_init = wm_init;
2233 ifp->if_stop = wm_stop;
2234 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2235 IFQ_SET_READY(&ifp->if_snd);
2236
2237 /* Check for jumbo frame */
2238 switch (sc->sc_type) {
2239 case WM_T_82573:
2240 /* XXX limited to 9234 if ASPM is disabled */
2241 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &io3);
2242 if ((io3 & NVM_3GIO_3_ASPM_MASK) != 0)
2243 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2244 break;
2245 case WM_T_82571:
2246 case WM_T_82572:
2247 case WM_T_82574:
2248 case WM_T_82575:
2249 case WM_T_82576:
2250 case WM_T_82580:
2251 case WM_T_I350:
2252 case WM_T_I354: /* XXXX ok? */
2253 case WM_T_I210:
2254 case WM_T_I211:
2255 case WM_T_80003:
2256 case WM_T_ICH9:
2257 case WM_T_ICH10:
2258 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2259 case WM_T_PCH_LPT:
2260 /* XXX limited to 9234 */
2261 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2262 break;
2263 case WM_T_PCH:
2264 /* XXX limited to 4096 */
2265 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2266 break;
2267 case WM_T_82542_2_0:
2268 case WM_T_82542_2_1:
2269 case WM_T_82583:
2270 case WM_T_ICH8:
2271 /* No support for jumbo frame */
2272 break;
2273 default:
2274 /* ETHER_MAX_LEN_JUMBO */
2275 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2276 break;
2277 }
2278
2279 /* If we're a i82543 or greater, we can support VLANs. */
2280 if (sc->sc_type >= WM_T_82543)
2281 sc->sc_ethercom.ec_capabilities |=
2282 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2283
2284 /*
2285 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2286 * on i82543 and later.
2287 */
2288 if (sc->sc_type >= WM_T_82543) {
2289 ifp->if_capabilities |=
2290 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2291 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2292 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2293 IFCAP_CSUM_TCPv6_Tx |
2294 IFCAP_CSUM_UDPv6_Tx;
2295 }
2296
2297 /*
2298 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2299 *
2300 * 82541GI (8086:1076) ... no
2301 * 82572EI (8086:10b9) ... yes
2302 */
2303 if (sc->sc_type >= WM_T_82571) {
2304 ifp->if_capabilities |=
2305 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2306 }
2307
2308 /*
2309 * If we're a i82544 or greater (except i82547), we can do
2310 * TCP segmentation offload.
2311 */
2312 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2313 ifp->if_capabilities |= IFCAP_TSOv4;
2314 }
2315
2316 if (sc->sc_type >= WM_T_82571) {
2317 ifp->if_capabilities |= IFCAP_TSOv6;
2318 }
2319
2320 #ifdef WM_MPSAFE
2321 sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2322 sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2323 #else
2324 sc->sc_tx_lock = NULL;
2325 sc->sc_rx_lock = NULL;
2326 #endif
2327
2328 /* Attach the interface. */
2329 if_attach(ifp);
2330 ether_ifattach(ifp, enaddr);
2331 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2332 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2333 RND_FLAG_DEFAULT);
2334
2335 #ifdef WM_EVENT_COUNTERS
2336 /* Attach event counters. */
2337 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2338 NULL, xname, "txsstall");
2339 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2340 NULL, xname, "txdstall");
2341 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2342 NULL, xname, "txfifo_stall");
2343 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2344 NULL, xname, "txdw");
2345 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2346 NULL, xname, "txqe");
2347 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2348 NULL, xname, "rxintr");
2349 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2350 NULL, xname, "linkintr");
2351
2352 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2353 NULL, xname, "rxipsum");
2354 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2355 NULL, xname, "rxtusum");
2356 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2357 NULL, xname, "txipsum");
2358 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2359 NULL, xname, "txtusum");
2360 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2361 NULL, xname, "txtusum6");
2362
2363 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2364 NULL, xname, "txtso");
2365 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2366 NULL, xname, "txtso6");
2367 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2368 NULL, xname, "txtsopain");
2369
2370 for (i = 0; i < WM_NTXSEGS; i++) {
2371 snprintf(wm_txseg_evcnt_names[i],
2372 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2373 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2374 NULL, xname, wm_txseg_evcnt_names[i]);
2375 }
2376
2377 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2378 NULL, xname, "txdrop");
2379
2380 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2381 NULL, xname, "tu");
2382
2383 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2384 NULL, xname, "tx_xoff");
2385 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2386 NULL, xname, "tx_xon");
2387 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2388 NULL, xname, "rx_xoff");
2389 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2390 NULL, xname, "rx_xon");
2391 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2392 NULL, xname, "rx_macctl");
2393 #endif /* WM_EVENT_COUNTERS */
2394
2395 if (pmf_device_register(self, wm_suspend, wm_resume))
2396 pmf_class_network_register(self, ifp);
2397 else
2398 aprint_error_dev(self, "couldn't establish power handler\n");
2399
2400 sc->sc_flags |= WM_F_ATTACHED;
2401 return;
2402
2403 /*
2404 * Free any resources we've allocated during the failed attach
2405 * attempt. Do this in reverse order and fall through.
2406 */
2407 fail_5:
2408 for (i = 0; i < WM_NRXDESC; i++) {
2409 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2410 bus_dmamap_destroy(sc->sc_dmat,
2411 sc->sc_rxsoft[i].rxs_dmamap);
2412 }
2413 fail_4:
2414 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2415 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2416 bus_dmamap_destroy(sc->sc_dmat,
2417 sc->sc_txsoft[i].txs_dmamap);
2418 }
2419 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2420 fail_3:
2421 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2422 fail_2:
2423 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2424 sc->sc_cd_size);
2425 fail_1:
2426 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2427 fail_0:
2428 return;
2429 }
2430
2431 /* The detach function (ca_detach) */
2432 static int
2433 wm_detach(device_t self, int flags __unused)
2434 {
2435 struct wm_softc *sc = device_private(self);
2436 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2437 int i;
2438 #ifndef WM_MPSAFE
2439 int s;
2440 #endif
2441
2442 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2443 return 0;
2444
2445 #ifndef WM_MPSAFE
2446 s = splnet();
2447 #endif
2448 /* Stop the interface. Callouts are stopped in it. */
2449 wm_stop(ifp, 1);
2450
2451 #ifndef WM_MPSAFE
2452 splx(s);
2453 #endif
2454
2455 pmf_device_deregister(self);
2456
2457 /* Tell the firmware about the release */
2458 WM_BOTH_LOCK(sc);
2459 wm_release_manageability(sc);
2460 wm_release_hw_control(sc);
2461 WM_BOTH_UNLOCK(sc);
2462
2463 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2464
2465 /* Delete all remaining media. */
2466 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2467
2468 ether_ifdetach(ifp);
2469 if_detach(ifp);
2470
2471
2472 /* Unload RX dmamaps and free mbufs */
2473 WM_RX_LOCK(sc);
2474 wm_rxdrain(sc);
2475 WM_RX_UNLOCK(sc);
2476 /* Must unlock here */
2477
2478 /* Free dmamap. It's the same as the end of the wm_attach() function */
2479 for (i = 0; i < WM_NRXDESC; i++) {
2480 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2481 bus_dmamap_destroy(sc->sc_dmat,
2482 sc->sc_rxsoft[i].rxs_dmamap);
2483 }
2484 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2485 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2486 bus_dmamap_destroy(sc->sc_dmat,
2487 sc->sc_txsoft[i].txs_dmamap);
2488 }
2489 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2490 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2491 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2492 sc->sc_cd_size);
2493 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2494
2495 /* Disestablish the interrupt handler */
2496 if (sc->sc_ih != NULL) {
2497 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2498 sc->sc_ih = NULL;
2499 }
2500
2501 /* Unmap the registers */
2502 if (sc->sc_ss) {
2503 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2504 sc->sc_ss = 0;
2505 }
2506
2507 if (sc->sc_ios) {
2508 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2509 sc->sc_ios = 0;
2510 }
2511
2512 if (sc->sc_tx_lock)
2513 mutex_obj_free(sc->sc_tx_lock);
2514 if (sc->sc_rx_lock)
2515 mutex_obj_free(sc->sc_rx_lock);
2516
2517 return 0;
2518 }
2519
2520 static bool
2521 wm_suspend(device_t self, const pmf_qual_t *qual)
2522 {
2523 struct wm_softc *sc = device_private(self);
2524
2525 wm_release_manageability(sc);
2526 wm_release_hw_control(sc);
2527 #ifdef WM_WOL
2528 wm_enable_wakeup(sc);
2529 #endif
2530
2531 return true;
2532 }
2533
2534 static bool
2535 wm_resume(device_t self, const pmf_qual_t *qual)
2536 {
2537 struct wm_softc *sc = device_private(self);
2538
2539 wm_init_manageability(sc);
2540
2541 return true;
2542 }
2543
2544 /*
2545 * wm_watchdog: [ifnet interface function]
2546 *
2547 * Watchdog timer handler.
2548 */
2549 static void
2550 wm_watchdog(struct ifnet *ifp)
2551 {
2552 struct wm_softc *sc = ifp->if_softc;
2553
2554 /*
2555 * Since we're using delayed interrupts, sweep up
2556 * before we report an error.
2557 */
2558 WM_TX_LOCK(sc);
2559 wm_txintr(sc);
2560 WM_TX_UNLOCK(sc);
2561
2562 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2563 #ifdef WM_DEBUG
2564 int i, j;
2565 struct wm_txsoft *txs;
2566 #endif
2567 log(LOG_ERR,
2568 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2569 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2570 sc->sc_txnext);
2571 ifp->if_oerrors++;
2572 #ifdef WM_DEBUG
2573 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
2574 i = WM_NEXTTXS(sc, i)) {
2575 txs = &sc->sc_txsoft[i];
2576 printf("txs %d tx %d -> %d\n",
2577 i, txs->txs_firstdesc, txs->txs_lastdesc);
2578 for (j = txs->txs_firstdesc; ;
2579 j = WM_NEXTTX(sc, j)) {
2580 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2581 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
2582 printf("\t %#08x%08x\n",
2583 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
2584 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
2585 if (j == txs->txs_lastdesc)
2586 break;
2587 }
2588 }
2589 #endif
2590 /* Reset the interface. */
2591 (void) wm_init(ifp);
2592 }
2593
2594 /* Try to get more packets going. */
2595 ifp->if_start(ifp);
2596 }
2597
2598 /*
2599 * wm_tick:
2600 *
2601 * One second timer, used to check link status, sweep up
2602 * completed transmit jobs, etc.
2603 */
2604 static void
2605 wm_tick(void *arg)
2606 {
2607 struct wm_softc *sc = arg;
2608 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2609 #ifndef WM_MPSAFE
2610 int s;
2611
2612 s = splnet();
2613 #endif
2614
2615 WM_TX_LOCK(sc);
2616
2617 if (sc->sc_stopping)
2618 goto out;
2619
2620 if (sc->sc_type >= WM_T_82542_2_1) {
2621 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2622 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2623 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2624 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2625 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2626 }
2627
2628 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2629 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2630 + CSR_READ(sc, WMREG_CRCERRS)
2631 + CSR_READ(sc, WMREG_ALGNERRC)
2632 + CSR_READ(sc, WMREG_SYMERRC)
2633 + CSR_READ(sc, WMREG_RXERRC)
2634 + CSR_READ(sc, WMREG_SEC)
2635 + CSR_READ(sc, WMREG_CEXTERR)
2636 + CSR_READ(sc, WMREG_RLEC);
2637 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2638
2639 if (sc->sc_flags & WM_F_HAS_MII)
2640 mii_tick(&sc->sc_mii);
2641 else
2642 wm_tbi_check_link(sc);
2643
2644 out:
2645 WM_TX_UNLOCK(sc);
2646 #ifndef WM_MPSAFE
2647 splx(s);
2648 #endif
2649
2650 if (!sc->sc_stopping)
2651 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2652 }
2653
2654 static int
2655 wm_ifflags_cb(struct ethercom *ec)
2656 {
2657 struct ifnet *ifp = &ec->ec_if;
2658 struct wm_softc *sc = ifp->if_softc;
2659 int change = ifp->if_flags ^ sc->sc_if_flags;
2660 int rc = 0;
2661
2662 WM_BOTH_LOCK(sc);
2663
2664 if (change != 0)
2665 sc->sc_if_flags = ifp->if_flags;
2666
2667 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
2668 rc = ENETRESET;
2669 goto out;
2670 }
2671
2672 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2673 wm_set_filter(sc);
2674
2675 wm_set_vlan(sc);
2676
2677 out:
2678 WM_BOTH_UNLOCK(sc);
2679
2680 return rc;
2681 }
2682
2683 /*
2684 * wm_ioctl: [ifnet interface function]
2685 *
2686 * Handle control requests from the operator.
2687 */
2688 static int
2689 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2690 {
2691 struct wm_softc *sc = ifp->if_softc;
2692 struct ifreq *ifr = (struct ifreq *) data;
2693 struct ifaddr *ifa = (struct ifaddr *)data;
2694 struct sockaddr_dl *sdl;
2695 int s, error;
2696
2697 #ifndef WM_MPSAFE
2698 s = splnet();
2699 #endif
2700 switch (cmd) {
2701 case SIOCSIFMEDIA:
2702 case SIOCGIFMEDIA:
2703 WM_BOTH_LOCK(sc);
2704 /* Flow control requires full-duplex mode. */
2705 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2706 (ifr->ifr_media & IFM_FDX) == 0)
2707 ifr->ifr_media &= ~IFM_ETH_FMASK;
2708 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2709 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2710 /* We can do both TXPAUSE and RXPAUSE. */
2711 ifr->ifr_media |=
2712 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2713 }
2714 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2715 }
2716 WM_BOTH_UNLOCK(sc);
2717 #ifdef WM_MPSAFE
2718 s = splnet();
2719 #endif
2720 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2721 #ifdef WM_MPSAFE
2722 splx(s);
2723 #endif
2724 break;
2725 case SIOCINITIFADDR:
2726 WM_BOTH_LOCK(sc);
2727 if (ifa->ifa_addr->sa_family == AF_LINK) {
2728 sdl = satosdl(ifp->if_dl->ifa_addr);
2729 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2730 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2731 /* unicast address is first multicast entry */
2732 wm_set_filter(sc);
2733 error = 0;
2734 WM_BOTH_UNLOCK(sc);
2735 break;
2736 }
2737 WM_BOTH_UNLOCK(sc);
2738 /*FALLTHROUGH*/
2739 default:
2740 #ifdef WM_MPSAFE
2741 s = splnet();
2742 #endif
2743 /* It may call wm_start, so unlock here */
2744 error = ether_ioctl(ifp, cmd, data);
2745 #ifdef WM_MPSAFE
2746 splx(s);
2747 #endif
2748 if (error != ENETRESET)
2749 break;
2750
2751 error = 0;
2752
2753 if (cmd == SIOCSIFCAP) {
2754 error = (*ifp->if_init)(ifp);
2755 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2756 ;
2757 else if (ifp->if_flags & IFF_RUNNING) {
2758 /*
2759 * Multicast list has changed; set the hardware filter
2760 * accordingly.
2761 */
2762 WM_BOTH_LOCK(sc);
2763 wm_set_filter(sc);
2764 WM_BOTH_UNLOCK(sc);
2765 }
2766 break;
2767 }
2768
2769 /* Try to get more packets going. */
2770 ifp->if_start(ifp);
2771
2772 #ifndef WM_MPSAFE
2773 splx(s);
2774 #endif
2775 return error;
2776 }
2777
2778 /* MAC address related */
2779
2780 /*
2781 * Get the offset of MAC address and return it.
2782 * If error occured, use offset 0.
2783 */
2784 static uint16_t
2785 wm_check_alt_mac_addr(struct wm_softc *sc)
2786 {
2787 uint16_t myea[ETHER_ADDR_LEN / 2];
2788 uint16_t offset = NVM_OFF_MACADDR;
2789
2790 /* Try to read alternative MAC address pointer */
2791 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2792 return 0;
2793
2794 /* Check pointer if it's valid or not. */
2795 if ((offset == 0x0000) || (offset == 0xffff))
2796 return 0;
2797
2798 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
2799 /*
2800 * Check whether alternative MAC address is valid or not.
2801 * Some cards have non 0xffff pointer but those don't use
2802 * alternative MAC address in reality.
2803 *
2804 * Check whether the broadcast bit is set or not.
2805 */
2806 if (wm_nvm_read(sc, offset, 1, myea) == 0)
2807 if (((myea[0] & 0xff) & 0x01) == 0)
2808 return offset; /* Found */
2809
2810 /* Not found */
2811 return 0;
2812 }
2813
2814 static int
2815 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2816 {
2817 uint16_t myea[ETHER_ADDR_LEN / 2];
2818 uint16_t offset = NVM_OFF_MACADDR;
2819 int do_invert = 0;
2820
2821 switch (sc->sc_type) {
2822 case WM_T_82580:
2823 case WM_T_I350:
2824 case WM_T_I354:
2825 /* EEPROM Top Level Partitioning */
2826 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
2827 break;
2828 case WM_T_82571:
2829 case WM_T_82575:
2830 case WM_T_82576:
2831 case WM_T_80003:
2832 case WM_T_I210:
2833 case WM_T_I211:
2834 offset = wm_check_alt_mac_addr(sc);
2835 if (offset == 0)
2836 if ((sc->sc_funcid & 0x01) == 1)
2837 do_invert = 1;
2838 break;
2839 default:
2840 if ((sc->sc_funcid & 0x01) == 1)
2841 do_invert = 1;
2842 break;
2843 }
2844
2845 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
2846 myea) != 0)
2847 goto bad;
2848
2849 enaddr[0] = myea[0] & 0xff;
2850 enaddr[1] = myea[0] >> 8;
2851 enaddr[2] = myea[1] & 0xff;
2852 enaddr[3] = myea[1] >> 8;
2853 enaddr[4] = myea[2] & 0xff;
2854 enaddr[5] = myea[2] >> 8;
2855
2856 /*
2857 * Toggle the LSB of the MAC address on the second port
2858 * of some dual port cards.
2859 */
2860 if (do_invert != 0)
2861 enaddr[5] ^= 1;
2862
2863 return 0;
2864
2865 bad:
2866 return -1;
2867 }
2868
2869 /*
2870 * wm_set_ral:
2871 *
2872 * Set an entery in the receive address list.
2873 */
2874 static void
2875 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2876 {
2877 uint32_t ral_lo, ral_hi;
2878
2879 if (enaddr != NULL) {
2880 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2881 (enaddr[3] << 24);
2882 ral_hi = enaddr[4] | (enaddr[5] << 8);
2883 ral_hi |= RAL_AV;
2884 } else {
2885 ral_lo = 0;
2886 ral_hi = 0;
2887 }
2888
2889 if (sc->sc_type >= WM_T_82544) {
2890 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2891 ral_lo);
2892 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2893 ral_hi);
2894 } else {
2895 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2896 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2897 }
2898 }
2899
2900 /*
2901 * wm_mchash:
2902 *
2903 * Compute the hash of the multicast address for the 4096-bit
2904 * multicast filter.
2905 */
2906 static uint32_t
2907 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2908 {
2909 static const int lo_shift[4] = { 4, 3, 2, 0 };
2910 static const int hi_shift[4] = { 4, 5, 6, 8 };
2911 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
2912 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
2913 uint32_t hash;
2914
2915 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2916 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2917 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
2918 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
2919 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
2920 return (hash & 0x3ff);
2921 }
2922 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2923 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2924
2925 return (hash & 0xfff);
2926 }
2927
2928 /*
2929 * wm_set_filter:
2930 *
2931 * Set up the receive filter.
2932 */
2933 static void
2934 wm_set_filter(struct wm_softc *sc)
2935 {
2936 struct ethercom *ec = &sc->sc_ethercom;
2937 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2938 struct ether_multi *enm;
2939 struct ether_multistep step;
2940 bus_addr_t mta_reg;
2941 uint32_t hash, reg, bit;
2942 int i, size;
2943
2944 if (sc->sc_type >= WM_T_82544)
2945 mta_reg = WMREG_CORDOVA_MTA;
2946 else
2947 mta_reg = WMREG_MTA;
2948
2949 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2950
2951 if (ifp->if_flags & IFF_BROADCAST)
2952 sc->sc_rctl |= RCTL_BAM;
2953 if (ifp->if_flags & IFF_PROMISC) {
2954 sc->sc_rctl |= RCTL_UPE;
2955 goto allmulti;
2956 }
2957
2958 /*
2959 * Set the station address in the first RAL slot, and
2960 * clear the remaining slots.
2961 */
2962 if (sc->sc_type == WM_T_ICH8)
2963 size = WM_RAL_TABSIZE_ICH8 -1;
2964 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
2965 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
2966 || (sc->sc_type == WM_T_PCH_LPT))
2967 size = WM_RAL_TABSIZE_ICH8;
2968 else if (sc->sc_type == WM_T_82575)
2969 size = WM_RAL_TABSIZE_82575;
2970 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
2971 size = WM_RAL_TABSIZE_82576;
2972 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2973 size = WM_RAL_TABSIZE_I350;
2974 else
2975 size = WM_RAL_TABSIZE;
2976 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
2977 for (i = 1; i < size; i++)
2978 wm_set_ral(sc, NULL, i);
2979
2980 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2981 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2982 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
2983 size = WM_ICH8_MC_TABSIZE;
2984 else
2985 size = WM_MC_TABSIZE;
2986 /* Clear out the multicast table. */
2987 for (i = 0; i < size; i++)
2988 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2989
2990 ETHER_FIRST_MULTI(step, ec, enm);
2991 while (enm != NULL) {
2992 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2993 /*
2994 * We must listen to a range of multicast addresses.
2995 * For now, just accept all multicasts, rather than
2996 * trying to set only those filter bits needed to match
2997 * the range. (At this time, the only use of address
2998 * ranges is for IP multicast routing, for which the
2999 * range is big enough to require all bits set.)
3000 */
3001 goto allmulti;
3002 }
3003
3004 hash = wm_mchash(sc, enm->enm_addrlo);
3005
3006 reg = (hash >> 5);
3007 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3008 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3009 || (sc->sc_type == WM_T_PCH2)
3010 || (sc->sc_type == WM_T_PCH_LPT))
3011 reg &= 0x1f;
3012 else
3013 reg &= 0x7f;
3014 bit = hash & 0x1f;
3015
3016 hash = CSR_READ(sc, mta_reg + (reg << 2));
3017 hash |= 1U << bit;
3018
3019 /* XXX Hardware bug?? */
3020 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3021 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3022 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3023 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3024 } else
3025 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3026
3027 ETHER_NEXT_MULTI(step, enm);
3028 }
3029
3030 ifp->if_flags &= ~IFF_ALLMULTI;
3031 goto setit;
3032
3033 allmulti:
3034 ifp->if_flags |= IFF_ALLMULTI;
3035 sc->sc_rctl |= RCTL_MPE;
3036
3037 setit:
3038 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3039 }
3040
3041 /* Reset and init related */
3042
3043 static void
3044 wm_set_vlan(struct wm_softc *sc)
3045 {
3046 /* Deal with VLAN enables. */
3047 if (VLAN_ATTACHED(&sc->sc_ethercom))
3048 sc->sc_ctrl |= CTRL_VME;
3049 else
3050 sc->sc_ctrl &= ~CTRL_VME;
3051
3052 /* Write the control registers. */
3053 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3054 }
3055
3056 static void
3057 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3058 {
3059 uint32_t gcr;
3060 pcireg_t ctrl2;
3061
3062 gcr = CSR_READ(sc, WMREG_GCR);
3063
3064 /* Only take action if timeout value is defaulted to 0 */
3065 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3066 goto out;
3067
3068 if ((gcr & GCR_CAP_VER2) == 0) {
3069 gcr |= GCR_CMPL_TMOUT_10MS;
3070 goto out;
3071 }
3072
3073 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3074 sc->sc_pcixe_capoff + PCIE_DCSR2);
3075 ctrl2 |= WM_PCIE_DCSR2_16MS;
3076 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3077 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3078
3079 out:
3080 /* Disable completion timeout resend */
3081 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3082
3083 CSR_WRITE(sc, WMREG_GCR, gcr);
3084 }
3085
3086 void
3087 wm_get_auto_rd_done(struct wm_softc *sc)
3088 {
3089 int i;
3090
3091 /* wait for eeprom to reload */
3092 switch (sc->sc_type) {
3093 case WM_T_82571:
3094 case WM_T_82572:
3095 case WM_T_82573:
3096 case WM_T_82574:
3097 case WM_T_82583:
3098 case WM_T_82575:
3099 case WM_T_82576:
3100 case WM_T_82580:
3101 case WM_T_I350:
3102 case WM_T_I354:
3103 case WM_T_I210:
3104 case WM_T_I211:
3105 case WM_T_80003:
3106 case WM_T_ICH8:
3107 case WM_T_ICH9:
3108 for (i = 0; i < 10; i++) {
3109 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3110 break;
3111 delay(1000);
3112 }
3113 if (i == 10) {
3114 log(LOG_ERR, "%s: auto read from eeprom failed to "
3115 "complete\n", device_xname(sc->sc_dev));
3116 }
3117 break;
3118 default:
3119 break;
3120 }
3121 }
3122
3123 void
3124 wm_lan_init_done(struct wm_softc *sc)
3125 {
3126 uint32_t reg = 0;
3127 int i;
3128
3129 /* wait for eeprom to reload */
3130 switch (sc->sc_type) {
3131 case WM_T_ICH10:
3132 case WM_T_PCH:
3133 case WM_T_PCH2:
3134 case WM_T_PCH_LPT:
3135 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3136 reg = CSR_READ(sc, WMREG_STATUS);
3137 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3138 break;
3139 delay(100);
3140 }
3141 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3142 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3143 "complete\n", device_xname(sc->sc_dev), __func__);
3144 }
3145 break;
3146 default:
3147 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3148 __func__);
3149 break;
3150 }
3151
3152 reg &= ~STATUS_LAN_INIT_DONE;
3153 CSR_WRITE(sc, WMREG_STATUS, reg);
3154 }
3155
3156 void
3157 wm_get_cfg_done(struct wm_softc *sc)
3158 {
3159 int mask;
3160 uint32_t reg;
3161 int i;
3162
3163 /* wait for eeprom to reload */
3164 switch (sc->sc_type) {
3165 case WM_T_82542_2_0:
3166 case WM_T_82542_2_1:
3167 /* null */
3168 break;
3169 case WM_T_82543:
3170 case WM_T_82544:
3171 case WM_T_82540:
3172 case WM_T_82545:
3173 case WM_T_82545_3:
3174 case WM_T_82546:
3175 case WM_T_82546_3:
3176 case WM_T_82541:
3177 case WM_T_82541_2:
3178 case WM_T_82547:
3179 case WM_T_82547_2:
3180 case WM_T_82573:
3181 case WM_T_82574:
3182 case WM_T_82583:
3183 /* generic */
3184 delay(10*1000);
3185 break;
3186 case WM_T_80003:
3187 case WM_T_82571:
3188 case WM_T_82572:
3189 case WM_T_82575:
3190 case WM_T_82576:
3191 case WM_T_82580:
3192 case WM_T_I350:
3193 case WM_T_I354:
3194 case WM_T_I210:
3195 case WM_T_I211:
3196 if (sc->sc_type == WM_T_82571) {
3197 /* Only 82571 shares port 0 */
3198 mask = EEMNGCTL_CFGDONE_0;
3199 } else
3200 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3201 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3202 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3203 break;
3204 delay(1000);
3205 }
3206 if (i >= WM_PHY_CFG_TIMEOUT) {
3207 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3208 device_xname(sc->sc_dev), __func__));
3209 }
3210 break;
3211 case WM_T_ICH8:
3212 case WM_T_ICH9:
3213 case WM_T_ICH10:
3214 case WM_T_PCH:
3215 case WM_T_PCH2:
3216 case WM_T_PCH_LPT:
3217 delay(10*1000);
3218 if (sc->sc_type >= WM_T_ICH10)
3219 wm_lan_init_done(sc);
3220 else
3221 wm_get_auto_rd_done(sc);
3222
3223 reg = CSR_READ(sc, WMREG_STATUS);
3224 if ((reg & STATUS_PHYRA) != 0)
3225 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3226 break;
3227 default:
3228 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3229 __func__);
3230 break;
3231 }
3232 }
3233
3234 /* Init hardware bits */
3235 void
3236 wm_initialize_hardware_bits(struct wm_softc *sc)
3237 {
3238 uint32_t tarc0, tarc1, reg;
3239
3240 /* For 82571 variant, 80003 and ICHs */
3241 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3242 || (sc->sc_type >= WM_T_80003)) {
3243
3244 /* Transmit Descriptor Control 0 */
3245 reg = CSR_READ(sc, WMREG_TXDCTL(0));
3246 reg |= TXDCTL_COUNT_DESC;
3247 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3248
3249 /* Transmit Descriptor Control 1 */
3250 reg = CSR_READ(sc, WMREG_TXDCTL(1));
3251 reg |= TXDCTL_COUNT_DESC;
3252 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3253
3254 /* TARC0 */
3255 tarc0 = CSR_READ(sc, WMREG_TARC0);
3256 switch (sc->sc_type) {
3257 case WM_T_82571:
3258 case WM_T_82572:
3259 case WM_T_82573:
3260 case WM_T_82574:
3261 case WM_T_82583:
3262 case WM_T_80003:
3263 /* Clear bits 30..27 */
3264 tarc0 &= ~__BITS(30, 27);
3265 break;
3266 default:
3267 break;
3268 }
3269
3270 switch (sc->sc_type) {
3271 case WM_T_82571:
3272 case WM_T_82572:
3273 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3274
3275 tarc1 = CSR_READ(sc, WMREG_TARC1);
3276 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3277 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3278 /* 8257[12] Errata No.7 */
3279 tarc1 |= __BIT(22); /* TARC1 bits 22 */
3280
3281 /* TARC1 bit 28 */
3282 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3283 tarc1 &= ~__BIT(28);
3284 else
3285 tarc1 |= __BIT(28);
3286 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3287
3288 /*
3289 * 8257[12] Errata No.13
3290 * Disable Dyamic Clock Gating.
3291 */
3292 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3293 reg &= ~CTRL_EXT_DMA_DYN_CLK;
3294 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3295 break;
3296 case WM_T_82573:
3297 case WM_T_82574:
3298 case WM_T_82583:
3299 if ((sc->sc_type == WM_T_82574)
3300 || (sc->sc_type == WM_T_82583))
3301 tarc0 |= __BIT(26); /* TARC0 bit 26 */
3302
3303 /* Extended Device Control */
3304 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3305 reg &= ~__BIT(23); /* Clear bit 23 */
3306 reg |= __BIT(22); /* Set bit 22 */
3307 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3308
3309 /* Device Control */
3310 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
3311 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3312
3313 /* PCIe Control Register */
3314 if ((sc->sc_type == WM_T_82574)
3315 || (sc->sc_type == WM_T_82583)) {
3316 /*
3317 * Document says this bit must be set for
3318 * proper operation.
3319 */
3320 reg = CSR_READ(sc, WMREG_GCR);
3321 reg |= __BIT(22);
3322 CSR_WRITE(sc, WMREG_GCR, reg);
3323
3324 /*
3325 * Apply workaround for hardware errata
3326 * documented in errata docs Fixes issue where
3327 * some error prone or unreliable PCIe
3328 * completions are occurring, particularly
3329 * with ASPM enabled. Without fix, issue can
3330 * cause Tx timeouts.
3331 */
3332 reg = CSR_READ(sc, WMREG_GCR2);
3333 reg |= __BIT(0);
3334 CSR_WRITE(sc, WMREG_GCR2, reg);
3335 }
3336 break;
3337 case WM_T_80003:
3338 /* TARC0 */
3339 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3340 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3341 tarc0 &= ~__BIT(20); /* Clear bits 20 */
3342
3343 /* TARC1 bit 28 */
3344 tarc1 = CSR_READ(sc, WMREG_TARC1);
3345 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3346 tarc1 &= ~__BIT(28);
3347 else
3348 tarc1 |= __BIT(28);
3349 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3350 break;
3351 case WM_T_ICH8:
3352 case WM_T_ICH9:
3353 case WM_T_ICH10:
3354 case WM_T_PCH:
3355 case WM_T_PCH2:
3356 case WM_T_PCH_LPT:
3357 /* TARC 0 */
3358 if (sc->sc_type == WM_T_ICH8) {
3359 /* Set TARC0 bits 29 and 28 */
3360 tarc0 |= __BITS(29, 28);
3361 }
3362 /* Set TARC0 bits 23,24,26,27 */
3363 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3364
3365 /* CTRL_EXT */
3366 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3367 reg |= __BIT(22); /* Set bit 22 */
3368 /*
3369 * Enable PHY low-power state when MAC is at D3
3370 * w/o WoL
3371 */
3372 if (sc->sc_type >= WM_T_PCH)
3373 reg |= CTRL_EXT_PHYPDEN;
3374 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3375
3376 /* TARC1 */
3377 tarc1 = CSR_READ(sc, WMREG_TARC1);
3378 /* bit 28 */
3379 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3380 tarc1 &= ~__BIT(28);
3381 else
3382 tarc1 |= __BIT(28);
3383 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3384 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3385
3386 /* Device Status */
3387 if (sc->sc_type == WM_T_ICH8) {
3388 reg = CSR_READ(sc, WMREG_STATUS);
3389 reg &= ~__BIT(31);
3390 CSR_WRITE(sc, WMREG_STATUS, reg);
3391
3392 }
3393
3394 /*
3395 * Work-around descriptor data corruption issue during
3396 * NFS v2 UDP traffic, just disable the NFS filtering
3397 * capability.
3398 */
3399 reg = CSR_READ(sc, WMREG_RFCTL);
3400 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3401 CSR_WRITE(sc, WMREG_RFCTL, reg);
3402 break;
3403 default:
3404 break;
3405 }
3406 CSR_WRITE(sc, WMREG_TARC0, tarc0);
3407
3408 /*
3409 * 8257[12] Errata No.52 and some others.
3410 * Avoid RSS Hash Value bug.
3411 */
3412 switch (sc->sc_type) {
3413 case WM_T_82571:
3414 case WM_T_82572:
3415 case WM_T_82573:
3416 case WM_T_80003:
3417 case WM_T_ICH8:
3418 reg = CSR_READ(sc, WMREG_RFCTL);
3419 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3420 CSR_WRITE(sc, WMREG_RFCTL, reg);
3421 break;
3422 default:
3423 break;
3424 }
3425 }
3426 }
3427
3428 /*
3429 * wm_reset:
3430 *
3431 * Reset the i82542 chip.
3432 */
3433 static void
3434 wm_reset(struct wm_softc *sc)
3435 {
3436 int phy_reset = 0;
3437 int error = 0;
3438 uint32_t reg, mask;
3439
3440 /*
3441 * Allocate on-chip memory according to the MTU size.
3442 * The Packet Buffer Allocation register must be written
3443 * before the chip is reset.
3444 */
3445 switch (sc->sc_type) {
3446 case WM_T_82547:
3447 case WM_T_82547_2:
3448 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3449 PBA_22K : PBA_30K;
3450 sc->sc_txfifo_head = 0;
3451 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3452 sc->sc_txfifo_size =
3453 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3454 sc->sc_txfifo_stall = 0;
3455 break;
3456 case WM_T_82571:
3457 case WM_T_82572:
3458 case WM_T_82575: /* XXX need special handing for jumbo frames */
3459 case WM_T_I350:
3460 case WM_T_I354:
3461 case WM_T_80003:
3462 sc->sc_pba = PBA_32K;
3463 break;
3464 case WM_T_82580:
3465 sc->sc_pba = PBA_35K;
3466 break;
3467 case WM_T_I210:
3468 case WM_T_I211:
3469 sc->sc_pba = PBA_34K;
3470 break;
3471 case WM_T_82576:
3472 sc->sc_pba = PBA_64K;
3473 break;
3474 case WM_T_82573:
3475 sc->sc_pba = PBA_12K;
3476 break;
3477 case WM_T_82574:
3478 case WM_T_82583:
3479 sc->sc_pba = PBA_20K;
3480 break;
3481 case WM_T_ICH8:
3482 /* Workaround for a bit corruption issue in FIFO memory */
3483 sc->sc_pba = PBA_8K;
3484 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3485 break;
3486 case WM_T_ICH9:
3487 case WM_T_ICH10:
3488 sc->sc_pba = PBA_10K;
3489 break;
3490 case WM_T_PCH:
3491 case WM_T_PCH2:
3492 case WM_T_PCH_LPT:
3493 sc->sc_pba = PBA_26K;
3494 break;
3495 default:
3496 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3497 PBA_40K : PBA_48K;
3498 break;
3499 }
3500 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3501
3502 /* Prevent the PCI-E bus from sticking */
3503 if (sc->sc_flags & WM_F_PCIE) {
3504 int timeout = 800;
3505
3506 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3507 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3508
3509 while (timeout--) {
3510 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3511 == 0)
3512 break;
3513 delay(100);
3514 }
3515 }
3516
3517 /* Set the completion timeout for interface */
3518 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3519 || (sc->sc_type == WM_T_82580)
3520 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3521 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3522 wm_set_pcie_completion_timeout(sc);
3523
3524 /* Clear interrupt */
3525 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3526
3527 /* Stop the transmit and receive processes. */
3528 CSR_WRITE(sc, WMREG_RCTL, 0);
3529 sc->sc_rctl &= ~RCTL_EN;
3530 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3531 CSR_WRITE_FLUSH(sc);
3532
3533 /* XXX set_tbi_sbp_82543() */
3534
3535 delay(10*1000);
3536
3537 /* Must acquire the MDIO ownership before MAC reset */
3538 switch (sc->sc_type) {
3539 case WM_T_82573:
3540 case WM_T_82574:
3541 case WM_T_82583:
3542 error = wm_get_hw_semaphore_82573(sc);
3543 break;
3544 default:
3545 break;
3546 }
3547
3548 /*
3549 * 82541 Errata 29? & 82547 Errata 28?
3550 * See also the description about PHY_RST bit in CTRL register
3551 * in 8254x_GBe_SDM.pdf.
3552 */
3553 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3554 CSR_WRITE(sc, WMREG_CTRL,
3555 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3556 CSR_WRITE_FLUSH(sc);
3557 delay(5000);
3558 }
3559
3560 switch (sc->sc_type) {
3561 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3562 case WM_T_82541:
3563 case WM_T_82541_2:
3564 case WM_T_82547:
3565 case WM_T_82547_2:
3566 /*
3567 * On some chipsets, a reset through a memory-mapped write
3568 * cycle can cause the chip to reset before completing the
3569 * write cycle. This causes major headache that can be
3570 * avoided by issuing the reset via indirect register writes
3571 * through I/O space.
3572 *
3573 * So, if we successfully mapped the I/O BAR at attach time,
3574 * use that. Otherwise, try our luck with a memory-mapped
3575 * reset.
3576 */
3577 if (sc->sc_flags & WM_F_IOH_VALID)
3578 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3579 else
3580 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3581 break;
3582 case WM_T_82545_3:
3583 case WM_T_82546_3:
3584 /* Use the shadow control register on these chips. */
3585 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3586 break;
3587 case WM_T_80003:
3588 mask = swfwphysem[sc->sc_funcid];
3589 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3590 wm_get_swfw_semaphore(sc, mask);
3591 CSR_WRITE(sc, WMREG_CTRL, reg);
3592 wm_put_swfw_semaphore(sc, mask);
3593 break;
3594 case WM_T_ICH8:
3595 case WM_T_ICH9:
3596 case WM_T_ICH10:
3597 case WM_T_PCH:
3598 case WM_T_PCH2:
3599 case WM_T_PCH_LPT:
3600 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3601 if (wm_check_reset_block(sc) == 0) {
3602 /*
3603 * Gate automatic PHY configuration by hardware on
3604 * non-managed 82579
3605 */
3606 if ((sc->sc_type == WM_T_PCH2)
3607 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3608 != 0))
3609 wm_gate_hw_phy_config_ich8lan(sc, 1);
3610
3611
3612 reg |= CTRL_PHY_RESET;
3613 phy_reset = 1;
3614 }
3615 wm_get_swfwhw_semaphore(sc);
3616 CSR_WRITE(sc, WMREG_CTRL, reg);
3617 /* Don't insert a completion barrier when reset */
3618 delay(20*1000);
3619 wm_put_swfwhw_semaphore(sc);
3620 break;
3621 case WM_T_82580:
3622 case WM_T_I350:
3623 case WM_T_I354:
3624 case WM_T_I210:
3625 case WM_T_I211:
3626 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3627 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3628 CSR_WRITE_FLUSH(sc);
3629 delay(5000);
3630 break;
3631 case WM_T_82542_2_0:
3632 case WM_T_82542_2_1:
3633 case WM_T_82543:
3634 case WM_T_82540:
3635 case WM_T_82545:
3636 case WM_T_82546:
3637 case WM_T_82571:
3638 case WM_T_82572:
3639 case WM_T_82573:
3640 case WM_T_82574:
3641 case WM_T_82575:
3642 case WM_T_82576:
3643 case WM_T_82583:
3644 default:
3645 /* Everything else can safely use the documented method. */
3646 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3647 break;
3648 }
3649
3650 /* Must release the MDIO ownership after MAC reset */
3651 switch (sc->sc_type) {
3652 case WM_T_82573:
3653 case WM_T_82574:
3654 case WM_T_82583:
3655 if (error == 0)
3656 wm_put_hw_semaphore_82573(sc);
3657 break;
3658 default:
3659 break;
3660 }
3661
3662 if (phy_reset != 0)
3663 wm_get_cfg_done(sc);
3664
3665 /* reload EEPROM */
3666 switch (sc->sc_type) {
3667 case WM_T_82542_2_0:
3668 case WM_T_82542_2_1:
3669 case WM_T_82543:
3670 case WM_T_82544:
3671 delay(10);
3672 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3673 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3674 CSR_WRITE_FLUSH(sc);
3675 delay(2000);
3676 break;
3677 case WM_T_82540:
3678 case WM_T_82545:
3679 case WM_T_82545_3:
3680 case WM_T_82546:
3681 case WM_T_82546_3:
3682 delay(5*1000);
3683 /* XXX Disable HW ARPs on ASF enabled adapters */
3684 break;
3685 case WM_T_82541:
3686 case WM_T_82541_2:
3687 case WM_T_82547:
3688 case WM_T_82547_2:
3689 delay(20000);
3690 /* XXX Disable HW ARPs on ASF enabled adapters */
3691 break;
3692 case WM_T_82571:
3693 case WM_T_82572:
3694 case WM_T_82573:
3695 case WM_T_82574:
3696 case WM_T_82583:
3697 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3698 delay(10);
3699 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3700 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3701 CSR_WRITE_FLUSH(sc);
3702 }
3703 /* check EECD_EE_AUTORD */
3704 wm_get_auto_rd_done(sc);
3705 /*
3706 * Phy configuration from NVM just starts after EECD_AUTO_RD
3707 * is set.
3708 */
3709 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3710 || (sc->sc_type == WM_T_82583))
3711 delay(25*1000);
3712 break;
3713 case WM_T_82575:
3714 case WM_T_82576:
3715 case WM_T_82580:
3716 case WM_T_I350:
3717 case WM_T_I354:
3718 case WM_T_I210:
3719 case WM_T_I211:
3720 case WM_T_80003:
3721 /* check EECD_EE_AUTORD */
3722 wm_get_auto_rd_done(sc);
3723 break;
3724 case WM_T_ICH8:
3725 case WM_T_ICH9:
3726 case WM_T_ICH10:
3727 case WM_T_PCH:
3728 case WM_T_PCH2:
3729 case WM_T_PCH_LPT:
3730 break;
3731 default:
3732 panic("%s: unknown type\n", __func__);
3733 }
3734
3735 /* Check whether EEPROM is present or not */
3736 switch (sc->sc_type) {
3737 case WM_T_82575:
3738 case WM_T_82576:
3739 #if 0 /* XXX */
3740 case WM_T_82580:
3741 #endif
3742 case WM_T_I350:
3743 case WM_T_I354:
3744 case WM_T_ICH8:
3745 case WM_T_ICH9:
3746 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3747 /* Not found */
3748 sc->sc_flags |= WM_F_EEPROM_INVALID;
3749 if ((sc->sc_type == WM_T_82575)
3750 || (sc->sc_type == WM_T_82576)
3751 || (sc->sc_type == WM_T_82580)
3752 || (sc->sc_type == WM_T_I350)
3753 || (sc->sc_type == WM_T_I354))
3754 wm_reset_init_script_82575(sc);
3755 }
3756 break;
3757 default:
3758 break;
3759 }
3760
3761 if ((sc->sc_type == WM_T_82580)
3762 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
3763 /* clear global device reset status bit */
3764 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3765 }
3766
3767 /* Clear any pending interrupt events. */
3768 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3769 reg = CSR_READ(sc, WMREG_ICR);
3770
3771 /* reload sc_ctrl */
3772 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3773
3774 if (sc->sc_type == WM_T_I350)
3775 wm_set_eee_i350(sc);
3776
3777 /* dummy read from WUC */
3778 if (sc->sc_type == WM_T_PCH)
3779 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3780 /*
3781 * For PCH, this write will make sure that any noise will be detected
3782 * as a CRC error and be dropped rather than show up as a bad packet
3783 * to the DMA engine
3784 */
3785 if (sc->sc_type == WM_T_PCH)
3786 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3787
3788 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3789 CSR_WRITE(sc, WMREG_WUC, 0);
3790
3791 /* XXX need special handling for 82580 */
3792 }
3793
3794 /*
3795 * wm_add_rxbuf:
3796 *
3797 * Add a receive buffer to the indiciated descriptor.
3798 */
3799 static int
3800 wm_add_rxbuf(struct wm_softc *sc, int idx)
3801 {
3802 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3803 struct mbuf *m;
3804 int error;
3805
3806 KASSERT(WM_RX_LOCKED(sc));
3807
3808 MGETHDR(m, M_DONTWAIT, MT_DATA);
3809 if (m == NULL)
3810 return ENOBUFS;
3811
3812 MCLGET(m, M_DONTWAIT);
3813 if ((m->m_flags & M_EXT) == 0) {
3814 m_freem(m);
3815 return ENOBUFS;
3816 }
3817
3818 if (rxs->rxs_mbuf != NULL)
3819 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3820
3821 rxs->rxs_mbuf = m;
3822
3823 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3824 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3825 BUS_DMA_READ|BUS_DMA_NOWAIT);
3826 if (error) {
3827 /* XXX XXX XXX */
3828 aprint_error_dev(sc->sc_dev,
3829 "unable to load rx DMA map %d, error = %d\n",
3830 idx, error);
3831 panic("wm_add_rxbuf");
3832 }
3833
3834 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3835 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3836
3837 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3838 if ((sc->sc_rctl & RCTL_EN) != 0)
3839 WM_INIT_RXDESC(sc, idx);
3840 } else
3841 WM_INIT_RXDESC(sc, idx);
3842
3843 return 0;
3844 }
3845
3846 /*
3847 * wm_rxdrain:
3848 *
3849 * Drain the receive queue.
3850 */
3851 static void
3852 wm_rxdrain(struct wm_softc *sc)
3853 {
3854 struct wm_rxsoft *rxs;
3855 int i;
3856
3857 KASSERT(WM_RX_LOCKED(sc));
3858
3859 for (i = 0; i < WM_NRXDESC; i++) {
3860 rxs = &sc->sc_rxsoft[i];
3861 if (rxs->rxs_mbuf != NULL) {
3862 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3863 m_freem(rxs->rxs_mbuf);
3864 rxs->rxs_mbuf = NULL;
3865 }
3866 }
3867 }
3868
3869 /*
3870 * wm_init: [ifnet interface function]
3871 *
3872 * Initialize the interface.
3873 */
3874 static int
3875 wm_init(struct ifnet *ifp)
3876 {
3877 struct wm_softc *sc = ifp->if_softc;
3878 int ret;
3879
3880 WM_BOTH_LOCK(sc);
3881 ret = wm_init_locked(ifp);
3882 WM_BOTH_UNLOCK(sc);
3883
3884 return ret;
3885 }
3886
3887 static int
3888 wm_init_locked(struct ifnet *ifp)
3889 {
3890 struct wm_softc *sc = ifp->if_softc;
3891 struct wm_rxsoft *rxs;
3892 int i, j, trynum, error = 0;
3893 uint32_t reg;
3894
3895 KASSERT(WM_BOTH_LOCKED(sc));
3896 /*
3897 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3898 * There is a small but measurable benefit to avoiding the adjusment
3899 * of the descriptor so that the headers are aligned, for normal mtu,
3900 * on such platforms. One possibility is that the DMA itself is
3901 * slightly more efficient if the front of the entire packet (instead
3902 * of the front of the headers) is aligned.
3903 *
3904 * Note we must always set align_tweak to 0 if we are using
3905 * jumbo frames.
3906 */
3907 #ifdef __NO_STRICT_ALIGNMENT
3908 sc->sc_align_tweak = 0;
3909 #else
3910 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3911 sc->sc_align_tweak = 0;
3912 else
3913 sc->sc_align_tweak = 2;
3914 #endif /* __NO_STRICT_ALIGNMENT */
3915
3916 /* Cancel any pending I/O. */
3917 wm_stop_locked(ifp, 0);
3918
3919 /* update statistics before reset */
3920 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3921 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3922
3923 /* Reset the chip to a known state. */
3924 wm_reset(sc);
3925
3926 switch (sc->sc_type) {
3927 case WM_T_82571:
3928 case WM_T_82572:
3929 case WM_T_82573:
3930 case WM_T_82574:
3931 case WM_T_82583:
3932 case WM_T_80003:
3933 case WM_T_ICH8:
3934 case WM_T_ICH9:
3935 case WM_T_ICH10:
3936 case WM_T_PCH:
3937 case WM_T_PCH2:
3938 case WM_T_PCH_LPT:
3939 if (wm_check_mng_mode(sc) != 0)
3940 wm_get_hw_control(sc);
3941 break;
3942 default:
3943 break;
3944 }
3945
3946 /* Init hardware bits */
3947 wm_initialize_hardware_bits(sc);
3948
3949 /* Reset the PHY. */
3950 if (sc->sc_flags & WM_F_HAS_MII)
3951 wm_gmii_reset(sc);
3952
3953 /* Initialize the transmit descriptor ring. */
3954 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3955 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3956 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3957 sc->sc_txfree = WM_NTXDESC(sc);
3958 sc->sc_txnext = 0;
3959
3960 if (sc->sc_type < WM_T_82543) {
3961 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
3962 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
3963 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3964 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3965 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3966 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3967 } else {
3968 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
3969 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
3970 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3971 CSR_WRITE(sc, WMREG_TDH, 0);
3972 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3973 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3974
3975 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3976 /*
3977 * Don't write TDT before TCTL.EN is set.
3978 * See the document.
3979 */
3980 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_QUEUE_ENABLE
3981 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
3982 | TXDCTL_WTHRESH(0));
3983 else {
3984 CSR_WRITE(sc, WMREG_TDT, 0);
3985 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_PTHRESH(0) |
3986 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3987 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3988 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3989 }
3990 }
3991 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3992 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3993
3994 /* Initialize the transmit job descriptors. */
3995 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3996 sc->sc_txsoft[i].txs_mbuf = NULL;
3997 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3998 sc->sc_txsnext = 0;
3999 sc->sc_txsdirty = 0;
4000
4001 /*
4002 * Initialize the receive descriptor and receive job
4003 * descriptor rings.
4004 */
4005 if (sc->sc_type < WM_T_82543) {
4006 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4007 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4008 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4009 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4010 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4011 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4012
4013 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4014 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4015 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4016 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4017 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4018 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4019 } else {
4020 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4021 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4022 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4023 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4024 CSR_WRITE(sc, WMREG_EITR(0), 450);
4025 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4026 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4027 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4028 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4029 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4030 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4031 | RXDCTL_WTHRESH(1));
4032 } else {
4033 CSR_WRITE(sc, WMREG_RDH, 0);
4034 CSR_WRITE(sc, WMREG_RDT, 0);
4035 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
4036 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
4037 }
4038 }
4039 for (i = 0; i < WM_NRXDESC; i++) {
4040 rxs = &sc->sc_rxsoft[i];
4041 if (rxs->rxs_mbuf == NULL) {
4042 if ((error = wm_add_rxbuf(sc, i)) != 0) {
4043 log(LOG_ERR, "%s: unable to allocate or map "
4044 "rx buffer %d, error = %d\n",
4045 device_xname(sc->sc_dev), i, error);
4046 /*
4047 * XXX Should attempt to run with fewer receive
4048 * XXX buffers instead of just failing.
4049 */
4050 wm_rxdrain(sc);
4051 goto out;
4052 }
4053 } else {
4054 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4055 WM_INIT_RXDESC(sc, i);
4056 /*
4057 * For 82575 and newer device, the RX descriptors
4058 * must be initialized after the setting of RCTL.EN in
4059 * wm_set_filter()
4060 */
4061 }
4062 }
4063 sc->sc_rxptr = 0;
4064 sc->sc_rxdiscard = 0;
4065 WM_RXCHAIN_RESET(sc);
4066
4067 /*
4068 * Clear out the VLAN table -- we don't use it (yet).
4069 */
4070 CSR_WRITE(sc, WMREG_VET, 0);
4071 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4072 trynum = 10; /* Due to hw errata */
4073 else
4074 trynum = 1;
4075 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4076 for (j = 0; j < trynum; j++)
4077 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4078
4079 /*
4080 * Set up flow-control parameters.
4081 *
4082 * XXX Values could probably stand some tuning.
4083 */
4084 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4085 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4086 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
4087 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4088 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4089 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4090 }
4091
4092 sc->sc_fcrtl = FCRTL_DFLT;
4093 if (sc->sc_type < WM_T_82543) {
4094 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4095 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4096 } else {
4097 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4098 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4099 }
4100
4101 if (sc->sc_type == WM_T_80003)
4102 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4103 else
4104 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4105
4106 /* Writes the control register. */
4107 wm_set_vlan(sc);
4108
4109 if (sc->sc_flags & WM_F_HAS_MII) {
4110 int val;
4111
4112 switch (sc->sc_type) {
4113 case WM_T_80003:
4114 case WM_T_ICH8:
4115 case WM_T_ICH9:
4116 case WM_T_ICH10:
4117 case WM_T_PCH:
4118 case WM_T_PCH2:
4119 case WM_T_PCH_LPT:
4120 /*
4121 * Set the mac to wait the maximum time between each
4122 * iteration and increase the max iterations when
4123 * polling the phy; this fixes erroneous timeouts at
4124 * 10Mbps.
4125 */
4126 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4127 0xFFFF);
4128 val = wm_kmrn_readreg(sc,
4129 KUMCTRLSTA_OFFSET_INB_PARAM);
4130 val |= 0x3F;
4131 wm_kmrn_writereg(sc,
4132 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4133 break;
4134 default:
4135 break;
4136 }
4137
4138 if (sc->sc_type == WM_T_80003) {
4139 val = CSR_READ(sc, WMREG_CTRL_EXT);
4140 val &= ~CTRL_EXT_LINK_MODE_MASK;
4141 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4142
4143 /* Bypass RX and TX FIFO's */
4144 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4145 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4146 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4147 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4148 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4149 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4150 }
4151 }
4152 #if 0
4153 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4154 #endif
4155
4156 /* Set up checksum offload parameters. */
4157 reg = CSR_READ(sc, WMREG_RXCSUM);
4158 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4159 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4160 reg |= RXCSUM_IPOFL;
4161 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4162 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4163 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4164 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4165 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4166
4167 /* Set up the interrupt registers. */
4168 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4169 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4170 ICR_RXO | ICR_RXT0;
4171 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4172
4173 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4174 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4175 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4176 reg = CSR_READ(sc, WMREG_KABGTXD);
4177 reg |= KABGTXD_BGSQLBIAS;
4178 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4179 }
4180
4181 /* Set up the inter-packet gap. */
4182 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4183
4184 if (sc->sc_type >= WM_T_82543) {
4185 /*
4186 * Set up the interrupt throttling register (units of 256ns)
4187 * Note that a footnote in Intel's documentation says this
4188 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4189 * or 10Mbit mode. Empirically, it appears to be the case
4190 * that that is also true for the 1024ns units of the other
4191 * interrupt-related timer registers -- so, really, we ought
4192 * to divide this value by 4 when the link speed is low.
4193 *
4194 * XXX implement this division at link speed change!
4195 */
4196
4197 /*
4198 * For N interrupts/sec, set this value to:
4199 * 1000000000 / (N * 256). Note that we set the
4200 * absolute and packet timer values to this value
4201 * divided by 4 to get "simple timer" behavior.
4202 */
4203
4204 sc->sc_itr = 1500; /* 2604 ints/sec */
4205 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4206 }
4207
4208 /* Set the VLAN ethernetype. */
4209 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4210
4211 /*
4212 * Set up the transmit control register; we start out with
4213 * a collision distance suitable for FDX, but update it whe
4214 * we resolve the media type.
4215 */
4216 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4217 | TCTL_CT(TX_COLLISION_THRESHOLD)
4218 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4219 if (sc->sc_type >= WM_T_82571)
4220 sc->sc_tctl |= TCTL_MULR;
4221 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4222
4223 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4224 /* Write TDT after TCTL.EN is set. See the document. */
4225 CSR_WRITE(sc, WMREG_TDT, 0);
4226 }
4227
4228 if (sc->sc_type == WM_T_80003) {
4229 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4230 reg &= ~TCTL_EXT_GCEX_MASK;
4231 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4232 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4233 }
4234
4235 /* Set the media. */
4236 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4237 goto out;
4238
4239 /* Configure for OS presence */
4240 wm_init_manageability(sc);
4241
4242 /*
4243 * Set up the receive control register; we actually program
4244 * the register when we set the receive filter. Use multicast
4245 * address offset type 0.
4246 *
4247 * Only the i82544 has the ability to strip the incoming
4248 * CRC, so we don't enable that feature.
4249 */
4250 sc->sc_mchash_type = 0;
4251 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4252 | RCTL_MO(sc->sc_mchash_type);
4253
4254 /*
4255 * The I350 has a bug where it always strips the CRC whether
4256 * asked to or not. So ask for stripped CRC here and cope in rxeof
4257 */
4258 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4259 || (sc->sc_type == WM_T_I210))
4260 sc->sc_rctl |= RCTL_SECRC;
4261
4262 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4263 && (ifp->if_mtu > ETHERMTU)) {
4264 sc->sc_rctl |= RCTL_LPE;
4265 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4266 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4267 }
4268
4269 if (MCLBYTES == 2048) {
4270 sc->sc_rctl |= RCTL_2k;
4271 } else {
4272 if (sc->sc_type >= WM_T_82543) {
4273 switch (MCLBYTES) {
4274 case 4096:
4275 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4276 break;
4277 case 8192:
4278 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4279 break;
4280 case 16384:
4281 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4282 break;
4283 default:
4284 panic("wm_init: MCLBYTES %d unsupported",
4285 MCLBYTES);
4286 break;
4287 }
4288 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4289 }
4290
4291 /* Set the receive filter. */
4292 wm_set_filter(sc);
4293
4294 /* Enable ECC */
4295 switch (sc->sc_type) {
4296 case WM_T_82571:
4297 reg = CSR_READ(sc, WMREG_PBA_ECC);
4298 reg |= PBA_ECC_CORR_EN;
4299 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4300 break;
4301 case WM_T_PCH_LPT:
4302 reg = CSR_READ(sc, WMREG_PBECCSTS);
4303 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4304 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4305
4306 reg = CSR_READ(sc, WMREG_CTRL);
4307 reg |= CTRL_MEHE;
4308 CSR_WRITE(sc, WMREG_CTRL, reg);
4309 break;
4310 default:
4311 break;
4312 }
4313
4314 /* On 575 and later set RDT only if RX enabled */
4315 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4316 for (i = 0; i < WM_NRXDESC; i++)
4317 WM_INIT_RXDESC(sc, i);
4318
4319 sc->sc_stopping = false;
4320
4321 /* Start the one second link check clock. */
4322 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4323
4324 /* ...all done! */
4325 ifp->if_flags |= IFF_RUNNING;
4326 ifp->if_flags &= ~IFF_OACTIVE;
4327
4328 out:
4329 sc->sc_if_flags = ifp->if_flags;
4330 if (error)
4331 log(LOG_ERR, "%s: interface not running\n",
4332 device_xname(sc->sc_dev));
4333 return error;
4334 }
4335
4336 /*
4337 * wm_stop: [ifnet interface function]
4338 *
4339 * Stop transmission on the interface.
4340 */
4341 static void
4342 wm_stop(struct ifnet *ifp, int disable)
4343 {
4344 struct wm_softc *sc = ifp->if_softc;
4345
4346 WM_BOTH_LOCK(sc);
4347 wm_stop_locked(ifp, disable);
4348 WM_BOTH_UNLOCK(sc);
4349 }
4350
4351 static void
4352 wm_stop_locked(struct ifnet *ifp, int disable)
4353 {
4354 struct wm_softc *sc = ifp->if_softc;
4355 struct wm_txsoft *txs;
4356 int i;
4357
4358 KASSERT(WM_BOTH_LOCKED(sc));
4359
4360 sc->sc_stopping = true;
4361
4362 /* Stop the one second clock. */
4363 callout_stop(&sc->sc_tick_ch);
4364
4365 /* Stop the 82547 Tx FIFO stall check timer. */
4366 if (sc->sc_type == WM_T_82547)
4367 callout_stop(&sc->sc_txfifo_ch);
4368
4369 if (sc->sc_flags & WM_F_HAS_MII) {
4370 /* Down the MII. */
4371 mii_down(&sc->sc_mii);
4372 } else {
4373 #if 0
4374 /* Should we clear PHY's status properly? */
4375 wm_reset(sc);
4376 #endif
4377 }
4378
4379 /* Stop the transmit and receive processes. */
4380 CSR_WRITE(sc, WMREG_TCTL, 0);
4381 CSR_WRITE(sc, WMREG_RCTL, 0);
4382 sc->sc_rctl &= ~RCTL_EN;
4383
4384 /*
4385 * Clear the interrupt mask to ensure the device cannot assert its
4386 * interrupt line.
4387 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4388 * any currently pending or shared interrupt.
4389 */
4390 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4391 sc->sc_icr = 0;
4392
4393 /* Release any queued transmit buffers. */
4394 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4395 txs = &sc->sc_txsoft[i];
4396 if (txs->txs_mbuf != NULL) {
4397 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4398 m_freem(txs->txs_mbuf);
4399 txs->txs_mbuf = NULL;
4400 }
4401 }
4402
4403 /* Mark the interface as down and cancel the watchdog timer. */
4404 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4405 ifp->if_timer = 0;
4406
4407 if (disable)
4408 wm_rxdrain(sc);
4409
4410 #if 0 /* notyet */
4411 if (sc->sc_type >= WM_T_82544)
4412 CSR_WRITE(sc, WMREG_WUC, 0);
4413 #endif
4414 }
4415
4416 /*
4417 * wm_tx_offload:
4418 *
4419 * Set up TCP/IP checksumming parameters for the
4420 * specified packet.
4421 */
4422 static int
4423 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
4424 uint8_t *fieldsp)
4425 {
4426 struct mbuf *m0 = txs->txs_mbuf;
4427 struct livengood_tcpip_ctxdesc *t;
4428 uint32_t ipcs, tucs, cmd, cmdlen, seg;
4429 uint32_t ipcse;
4430 struct ether_header *eh;
4431 int offset, iphl;
4432 uint8_t fields;
4433
4434 /*
4435 * XXX It would be nice if the mbuf pkthdr had offset
4436 * fields for the protocol headers.
4437 */
4438
4439 eh = mtod(m0, struct ether_header *);
4440 switch (htons(eh->ether_type)) {
4441 case ETHERTYPE_IP:
4442 case ETHERTYPE_IPV6:
4443 offset = ETHER_HDR_LEN;
4444 break;
4445
4446 case ETHERTYPE_VLAN:
4447 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4448 break;
4449
4450 default:
4451 /*
4452 * Don't support this protocol or encapsulation.
4453 */
4454 *fieldsp = 0;
4455 *cmdp = 0;
4456 return 0;
4457 }
4458
4459 if ((m0->m_pkthdr.csum_flags &
4460 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
4461 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4462 } else {
4463 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4464 }
4465 ipcse = offset + iphl - 1;
4466
4467 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
4468 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
4469 seg = 0;
4470 fields = 0;
4471
4472 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4473 int hlen = offset + iphl;
4474 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4475
4476 if (__predict_false(m0->m_len <
4477 (hlen + sizeof(struct tcphdr)))) {
4478 /*
4479 * TCP/IP headers are not in the first mbuf; we need
4480 * to do this the slow and painful way. Let's just
4481 * hope this doesn't happen very often.
4482 */
4483 struct tcphdr th;
4484
4485 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4486
4487 m_copydata(m0, hlen, sizeof(th), &th);
4488 if (v4) {
4489 struct ip ip;
4490
4491 m_copydata(m0, offset, sizeof(ip), &ip);
4492 ip.ip_len = 0;
4493 m_copyback(m0,
4494 offset + offsetof(struct ip, ip_len),
4495 sizeof(ip.ip_len), &ip.ip_len);
4496 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4497 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4498 } else {
4499 struct ip6_hdr ip6;
4500
4501 m_copydata(m0, offset, sizeof(ip6), &ip6);
4502 ip6.ip6_plen = 0;
4503 m_copyback(m0,
4504 offset + offsetof(struct ip6_hdr, ip6_plen),
4505 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4506 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4507 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4508 }
4509 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4510 sizeof(th.th_sum), &th.th_sum);
4511
4512 hlen += th.th_off << 2;
4513 } else {
4514 /*
4515 * TCP/IP headers are in the first mbuf; we can do
4516 * this the easy way.
4517 */
4518 struct tcphdr *th;
4519
4520 if (v4) {
4521 struct ip *ip =
4522 (void *)(mtod(m0, char *) + offset);
4523 th = (void *)(mtod(m0, char *) + hlen);
4524
4525 ip->ip_len = 0;
4526 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4527 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4528 } else {
4529 struct ip6_hdr *ip6 =
4530 (void *)(mtod(m0, char *) + offset);
4531 th = (void *)(mtod(m0, char *) + hlen);
4532
4533 ip6->ip6_plen = 0;
4534 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4535 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4536 }
4537 hlen += th->th_off << 2;
4538 }
4539
4540 if (v4) {
4541 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4542 cmdlen |= WTX_TCPIP_CMD_IP;
4543 } else {
4544 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4545 ipcse = 0;
4546 }
4547 cmd |= WTX_TCPIP_CMD_TSE;
4548 cmdlen |= WTX_TCPIP_CMD_TSE |
4549 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
4550 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
4551 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
4552 }
4553
4554 /*
4555 * NOTE: Even if we're not using the IP or TCP/UDP checksum
4556 * offload feature, if we load the context descriptor, we
4557 * MUST provide valid values for IPCSS and TUCSS fields.
4558 */
4559
4560 ipcs = WTX_TCPIP_IPCSS(offset) |
4561 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
4562 WTX_TCPIP_IPCSE(ipcse);
4563 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
4564 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
4565 fields |= WTX_IXSM;
4566 }
4567
4568 offset += iphl;
4569
4570 if (m0->m_pkthdr.csum_flags &
4571 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
4572 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
4573 fields |= WTX_TXSM;
4574 tucs = WTX_TCPIP_TUCSS(offset) |
4575 WTX_TCPIP_TUCSO(offset +
4576 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
4577 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4578 } else if ((m0->m_pkthdr.csum_flags &
4579 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
4580 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
4581 fields |= WTX_TXSM;
4582 tucs = WTX_TCPIP_TUCSS(offset) |
4583 WTX_TCPIP_TUCSO(offset +
4584 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
4585 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4586 } else {
4587 /* Just initialize it to a valid TCP context. */
4588 tucs = WTX_TCPIP_TUCSS(offset) |
4589 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
4590 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4591 }
4592
4593 /* Fill in the context descriptor. */
4594 t = (struct livengood_tcpip_ctxdesc *)
4595 &sc->sc_txdescs[sc->sc_txnext];
4596 t->tcpip_ipcs = htole32(ipcs);
4597 t->tcpip_tucs = htole32(tucs);
4598 t->tcpip_cmdlen = htole32(cmdlen);
4599 t->tcpip_seg = htole32(seg);
4600 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
4601
4602 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
4603 txs->txs_ndesc++;
4604
4605 *cmdp = cmd;
4606 *fieldsp = fields;
4607
4608 return 0;
4609 }
4610
4611 static void
4612 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
4613 {
4614 struct mbuf *m;
4615 int i;
4616
4617 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
4618 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
4619 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
4620 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
4621 m->m_data, m->m_len, m->m_flags);
4622 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
4623 i, i == 1 ? "" : "s");
4624 }
4625
4626 /*
4627 * wm_82547_txfifo_stall:
4628 *
4629 * Callout used to wait for the 82547 Tx FIFO to drain,
4630 * reset the FIFO pointers, and restart packet transmission.
4631 */
4632 static void
4633 wm_82547_txfifo_stall(void *arg)
4634 {
4635 struct wm_softc *sc = arg;
4636 #ifndef WM_MPSAFE
4637 int s;
4638
4639 s = splnet();
4640 #endif
4641 WM_TX_LOCK(sc);
4642
4643 if (sc->sc_stopping)
4644 goto out;
4645
4646 if (sc->sc_txfifo_stall) {
4647 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
4648 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
4649 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
4650 /*
4651 * Packets have drained. Stop transmitter, reset
4652 * FIFO pointers, restart transmitter, and kick
4653 * the packet queue.
4654 */
4655 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
4656 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
4657 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
4658 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
4659 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
4660 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
4661 CSR_WRITE(sc, WMREG_TCTL, tctl);
4662 CSR_WRITE_FLUSH(sc);
4663
4664 sc->sc_txfifo_head = 0;
4665 sc->sc_txfifo_stall = 0;
4666 wm_start_locked(&sc->sc_ethercom.ec_if);
4667 } else {
4668 /*
4669 * Still waiting for packets to drain; try again in
4670 * another tick.
4671 */
4672 callout_schedule(&sc->sc_txfifo_ch, 1);
4673 }
4674 }
4675
4676 out:
4677 WM_TX_UNLOCK(sc);
4678 #ifndef WM_MPSAFE
4679 splx(s);
4680 #endif
4681 }
4682
4683 /*
4684 * wm_82547_txfifo_bugchk:
4685 *
4686 * Check for bug condition in the 82547 Tx FIFO. We need to
4687 * prevent enqueueing a packet that would wrap around the end
4688 * if the Tx FIFO ring buffer, otherwise the chip will croak.
4689 *
4690 * We do this by checking the amount of space before the end
4691 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
4692 * the Tx FIFO, wait for all remaining packets to drain, reset
4693 * the internal FIFO pointers to the beginning, and restart
4694 * transmission on the interface.
4695 */
4696 #define WM_FIFO_HDR 0x10
4697 #define WM_82547_PAD_LEN 0x3e0
4698 static int
4699 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
4700 {
4701 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
4702 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
4703
4704 /* Just return if already stalled. */
4705 if (sc->sc_txfifo_stall)
4706 return 1;
4707
4708 if (sc->sc_mii.mii_media_active & IFM_FDX) {
4709 /* Stall only occurs in half-duplex mode. */
4710 goto send_packet;
4711 }
4712
4713 if (len >= WM_82547_PAD_LEN + space) {
4714 sc->sc_txfifo_stall = 1;
4715 callout_schedule(&sc->sc_txfifo_ch, 1);
4716 return 1;
4717 }
4718
4719 send_packet:
4720 sc->sc_txfifo_head += len;
4721 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
4722 sc->sc_txfifo_head -= sc->sc_txfifo_size;
4723
4724 return 0;
4725 }
4726
4727 /*
4728 * wm_start: [ifnet interface function]
4729 *
4730 * Start packet transmission on the interface.
4731 */
4732 static void
4733 wm_start(struct ifnet *ifp)
4734 {
4735 struct wm_softc *sc = ifp->if_softc;
4736
4737 WM_TX_LOCK(sc);
4738 if (!sc->sc_stopping)
4739 wm_start_locked(ifp);
4740 WM_TX_UNLOCK(sc);
4741 }
4742
4743 static void
4744 wm_start_locked(struct ifnet *ifp)
4745 {
4746 struct wm_softc *sc = ifp->if_softc;
4747 struct mbuf *m0;
4748 struct m_tag *mtag;
4749 struct wm_txsoft *txs;
4750 bus_dmamap_t dmamap;
4751 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
4752 bus_addr_t curaddr;
4753 bus_size_t seglen, curlen;
4754 uint32_t cksumcmd;
4755 uint8_t cksumfields;
4756
4757 KASSERT(WM_TX_LOCKED(sc));
4758
4759 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
4760 return;
4761
4762 /* Remember the previous number of free descriptors. */
4763 ofree = sc->sc_txfree;
4764
4765 /*
4766 * Loop through the send queue, setting up transmit descriptors
4767 * until we drain the queue, or use up all available transmit
4768 * descriptors.
4769 */
4770 for (;;) {
4771 m0 = NULL;
4772
4773 /* Get a work queue entry. */
4774 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
4775 wm_txintr(sc);
4776 if (sc->sc_txsfree == 0) {
4777 DPRINTF(WM_DEBUG_TX,
4778 ("%s: TX: no free job descriptors\n",
4779 device_xname(sc->sc_dev)));
4780 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
4781 break;
4782 }
4783 }
4784
4785 /* Grab a packet off the queue. */
4786 IFQ_DEQUEUE(&ifp->if_snd, m0);
4787 if (m0 == NULL)
4788 break;
4789
4790 DPRINTF(WM_DEBUG_TX,
4791 ("%s: TX: have packet to transmit: %p\n",
4792 device_xname(sc->sc_dev), m0));
4793
4794 txs = &sc->sc_txsoft[sc->sc_txsnext];
4795 dmamap = txs->txs_dmamap;
4796
4797 use_tso = (m0->m_pkthdr.csum_flags &
4798 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
4799
4800 /*
4801 * So says the Linux driver:
4802 * The controller does a simple calculation to make sure
4803 * there is enough room in the FIFO before initiating the
4804 * DMA for each buffer. The calc is:
4805 * 4 = ceil(buffer len / MSS)
4806 * To make sure we don't overrun the FIFO, adjust the max
4807 * buffer len if the MSS drops.
4808 */
4809 dmamap->dm_maxsegsz =
4810 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
4811 ? m0->m_pkthdr.segsz << 2
4812 : WTX_MAX_LEN;
4813
4814 /*
4815 * Load the DMA map. If this fails, the packet either
4816 * didn't fit in the allotted number of segments, or we
4817 * were short on resources. For the too-many-segments
4818 * case, we simply report an error and drop the packet,
4819 * since we can't sanely copy a jumbo packet to a single
4820 * buffer.
4821 */
4822 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
4823 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
4824 if (error) {
4825 if (error == EFBIG) {
4826 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4827 log(LOG_ERR, "%s: Tx packet consumes too many "
4828 "DMA segments, dropping...\n",
4829 device_xname(sc->sc_dev));
4830 wm_dump_mbuf_chain(sc, m0);
4831 m_freem(m0);
4832 continue;
4833 }
4834 /* Short on resources, just stop for now. */
4835 DPRINTF(WM_DEBUG_TX,
4836 ("%s: TX: dmamap load failed: %d\n",
4837 device_xname(sc->sc_dev), error));
4838 break;
4839 }
4840
4841 segs_needed = dmamap->dm_nsegs;
4842 if (use_tso) {
4843 /* For sentinel descriptor; see below. */
4844 segs_needed++;
4845 }
4846
4847 /*
4848 * Ensure we have enough descriptors free to describe
4849 * the packet. Note, we always reserve one descriptor
4850 * at the end of the ring due to the semantics of the
4851 * TDT register, plus one more in the event we need
4852 * to load offload context.
4853 */
4854 if (segs_needed > sc->sc_txfree - 2) {
4855 /*
4856 * Not enough free descriptors to transmit this
4857 * packet. We haven't committed anything yet,
4858 * so just unload the DMA map, put the packet
4859 * pack on the queue, and punt. Notify the upper
4860 * layer that there are no more slots left.
4861 */
4862 DPRINTF(WM_DEBUG_TX,
4863 ("%s: TX: need %d (%d) descriptors, have %d\n",
4864 device_xname(sc->sc_dev), dmamap->dm_nsegs,
4865 segs_needed, sc->sc_txfree - 1));
4866 ifp->if_flags |= IFF_OACTIVE;
4867 bus_dmamap_unload(sc->sc_dmat, dmamap);
4868 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
4869 break;
4870 }
4871
4872 /*
4873 * Check for 82547 Tx FIFO bug. We need to do this
4874 * once we know we can transmit the packet, since we
4875 * do some internal FIFO space accounting here.
4876 */
4877 if (sc->sc_type == WM_T_82547 &&
4878 wm_82547_txfifo_bugchk(sc, m0)) {
4879 DPRINTF(WM_DEBUG_TX,
4880 ("%s: TX: 82547 Tx FIFO bug detected\n",
4881 device_xname(sc->sc_dev)));
4882 ifp->if_flags |= IFF_OACTIVE;
4883 bus_dmamap_unload(sc->sc_dmat, dmamap);
4884 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
4885 break;
4886 }
4887
4888 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
4889
4890 DPRINTF(WM_DEBUG_TX,
4891 ("%s: TX: packet has %d (%d) DMA segments\n",
4892 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
4893
4894 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
4895
4896 /*
4897 * Store a pointer to the packet so that we can free it
4898 * later.
4899 *
4900 * Initially, we consider the number of descriptors the
4901 * packet uses the number of DMA segments. This may be
4902 * incremented by 1 if we do checksum offload (a descriptor
4903 * is used to set the checksum context).
4904 */
4905 txs->txs_mbuf = m0;
4906 txs->txs_firstdesc = sc->sc_txnext;
4907 txs->txs_ndesc = segs_needed;
4908
4909 /* Set up offload parameters for this packet. */
4910 if (m0->m_pkthdr.csum_flags &
4911 (M_CSUM_TSOv4|M_CSUM_TSOv6|
4912 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
4913 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
4914 if (wm_tx_offload(sc, txs, &cksumcmd,
4915 &cksumfields) != 0) {
4916 /* Error message already displayed. */
4917 bus_dmamap_unload(sc->sc_dmat, dmamap);
4918 continue;
4919 }
4920 } else {
4921 cksumcmd = 0;
4922 cksumfields = 0;
4923 }
4924
4925 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
4926
4927 /* Sync the DMA map. */
4928 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
4929 BUS_DMASYNC_PREWRITE);
4930
4931 /* Initialize the transmit descriptor. */
4932 for (nexttx = sc->sc_txnext, seg = 0;
4933 seg < dmamap->dm_nsegs; seg++) {
4934 for (seglen = dmamap->dm_segs[seg].ds_len,
4935 curaddr = dmamap->dm_segs[seg].ds_addr;
4936 seglen != 0;
4937 curaddr += curlen, seglen -= curlen,
4938 nexttx = WM_NEXTTX(sc, nexttx)) {
4939 curlen = seglen;
4940
4941 /*
4942 * So says the Linux driver:
4943 * Work around for premature descriptor
4944 * write-backs in TSO mode. Append a
4945 * 4-byte sentinel descriptor.
4946 */
4947 if (use_tso &&
4948 seg == dmamap->dm_nsegs - 1 &&
4949 curlen > 8)
4950 curlen -= 4;
4951
4952 wm_set_dma_addr(
4953 &sc->sc_txdescs[nexttx].wtx_addr,
4954 curaddr);
4955 sc->sc_txdescs[nexttx].wtx_cmdlen =
4956 htole32(cksumcmd | curlen);
4957 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
4958 0;
4959 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
4960 cksumfields;
4961 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
4962 lasttx = nexttx;
4963
4964 DPRINTF(WM_DEBUG_TX,
4965 ("%s: TX: desc %d: low %#" PRIx64 ", "
4966 "len %#04zx\n",
4967 device_xname(sc->sc_dev), nexttx,
4968 (uint64_t)curaddr, curlen));
4969 }
4970 }
4971
4972 KASSERT(lasttx != -1);
4973
4974 /*
4975 * Set up the command byte on the last descriptor of
4976 * the packet. If we're in the interrupt delay window,
4977 * delay the interrupt.
4978 */
4979 sc->sc_txdescs[lasttx].wtx_cmdlen |=
4980 htole32(WTX_CMD_EOP | WTX_CMD_RS);
4981
4982 /*
4983 * If VLANs are enabled and the packet has a VLAN tag, set
4984 * up the descriptor to encapsulate the packet for us.
4985 *
4986 * This is only valid on the last descriptor of the packet.
4987 */
4988 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
4989 sc->sc_txdescs[lasttx].wtx_cmdlen |=
4990 htole32(WTX_CMD_VLE);
4991 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
4992 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
4993 }
4994
4995 txs->txs_lastdesc = lasttx;
4996
4997 DPRINTF(WM_DEBUG_TX,
4998 ("%s: TX: desc %d: cmdlen 0x%08x\n",
4999 device_xname(sc->sc_dev),
5000 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5001
5002 /* Sync the descriptors we're using. */
5003 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5004 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5005
5006 /* Give the packet to the chip. */
5007 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5008
5009 DPRINTF(WM_DEBUG_TX,
5010 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5011
5012 DPRINTF(WM_DEBUG_TX,
5013 ("%s: TX: finished transmitting packet, job %d\n",
5014 device_xname(sc->sc_dev), sc->sc_txsnext));
5015
5016 /* Advance the tx pointer. */
5017 sc->sc_txfree -= txs->txs_ndesc;
5018 sc->sc_txnext = nexttx;
5019
5020 sc->sc_txsfree--;
5021 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5022
5023 /* Pass the packet to any BPF listeners. */
5024 bpf_mtap(ifp, m0);
5025 }
5026
5027 if (m0 != NULL) {
5028 ifp->if_flags |= IFF_OACTIVE;
5029 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5030 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5031 m_freem(m0);
5032 }
5033
5034 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5035 /* No more slots; notify upper layer. */
5036 ifp->if_flags |= IFF_OACTIVE;
5037 }
5038
5039 if (sc->sc_txfree != ofree) {
5040 /* Set a watchdog timer in case the chip flakes out. */
5041 ifp->if_timer = 5;
5042 }
5043 }
5044
5045 /*
5046 * wm_nq_tx_offload:
5047 *
5048 * Set up TCP/IP checksumming parameters for the
5049 * specified packet, for NEWQUEUE devices
5050 */
5051 static int
5052 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
5053 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
5054 {
5055 struct mbuf *m0 = txs->txs_mbuf;
5056 struct m_tag *mtag;
5057 uint32_t vl_len, mssidx, cmdc;
5058 struct ether_header *eh;
5059 int offset, iphl;
5060
5061 /*
5062 * XXX It would be nice if the mbuf pkthdr had offset
5063 * fields for the protocol headers.
5064 */
5065 *cmdlenp = 0;
5066 *fieldsp = 0;
5067
5068 eh = mtod(m0, struct ether_header *);
5069 switch (htons(eh->ether_type)) {
5070 case ETHERTYPE_IP:
5071 case ETHERTYPE_IPV6:
5072 offset = ETHER_HDR_LEN;
5073 break;
5074
5075 case ETHERTYPE_VLAN:
5076 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5077 break;
5078
5079 default:
5080 /* Don't support this protocol or encapsulation. */
5081 *do_csum = false;
5082 return 0;
5083 }
5084 *do_csum = true;
5085 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
5086 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
5087
5088 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
5089 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
5090
5091 if ((m0->m_pkthdr.csum_flags &
5092 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
5093 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
5094 } else {
5095 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
5096 }
5097 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
5098 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
5099
5100 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
5101 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
5102 << NQTXC_VLLEN_VLAN_SHIFT);
5103 *cmdlenp |= NQTX_CMD_VLE;
5104 }
5105
5106 mssidx = 0;
5107
5108 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
5109 int hlen = offset + iphl;
5110 int tcp_hlen;
5111 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
5112
5113 if (__predict_false(m0->m_len <
5114 (hlen + sizeof(struct tcphdr)))) {
5115 /*
5116 * TCP/IP headers are not in the first mbuf; we need
5117 * to do this the slow and painful way. Let's just
5118 * hope this doesn't happen very often.
5119 */
5120 struct tcphdr th;
5121
5122 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
5123
5124 m_copydata(m0, hlen, sizeof(th), &th);
5125 if (v4) {
5126 struct ip ip;
5127
5128 m_copydata(m0, offset, sizeof(ip), &ip);
5129 ip.ip_len = 0;
5130 m_copyback(m0,
5131 offset + offsetof(struct ip, ip_len),
5132 sizeof(ip.ip_len), &ip.ip_len);
5133 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
5134 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
5135 } else {
5136 struct ip6_hdr ip6;
5137
5138 m_copydata(m0, offset, sizeof(ip6), &ip6);
5139 ip6.ip6_plen = 0;
5140 m_copyback(m0,
5141 offset + offsetof(struct ip6_hdr, ip6_plen),
5142 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
5143 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
5144 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
5145 }
5146 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
5147 sizeof(th.th_sum), &th.th_sum);
5148
5149 tcp_hlen = th.th_off << 2;
5150 } else {
5151 /*
5152 * TCP/IP headers are in the first mbuf; we can do
5153 * this the easy way.
5154 */
5155 struct tcphdr *th;
5156
5157 if (v4) {
5158 struct ip *ip =
5159 (void *)(mtod(m0, char *) + offset);
5160 th = (void *)(mtod(m0, char *) + hlen);
5161
5162 ip->ip_len = 0;
5163 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
5164 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
5165 } else {
5166 struct ip6_hdr *ip6 =
5167 (void *)(mtod(m0, char *) + offset);
5168 th = (void *)(mtod(m0, char *) + hlen);
5169
5170 ip6->ip6_plen = 0;
5171 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
5172 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
5173 }
5174 tcp_hlen = th->th_off << 2;
5175 }
5176 hlen += tcp_hlen;
5177 *cmdlenp |= NQTX_CMD_TSE;
5178
5179 if (v4) {
5180 WM_EVCNT_INCR(&sc->sc_ev_txtso);
5181 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
5182 } else {
5183 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
5184 *fieldsp |= NQTXD_FIELDS_TUXSM;
5185 }
5186 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
5187 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5188 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
5189 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
5190 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
5191 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
5192 } else {
5193 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
5194 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5195 }
5196
5197 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
5198 *fieldsp |= NQTXD_FIELDS_IXSM;
5199 cmdc |= NQTXC_CMD_IP4;
5200 }
5201
5202 if (m0->m_pkthdr.csum_flags &
5203 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5204 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5205 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5206 cmdc |= NQTXC_CMD_TCP;
5207 } else {
5208 cmdc |= NQTXC_CMD_UDP;
5209 }
5210 cmdc |= NQTXC_CMD_IP4;
5211 *fieldsp |= NQTXD_FIELDS_TUXSM;
5212 }
5213 if (m0->m_pkthdr.csum_flags &
5214 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5215 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5216 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5217 cmdc |= NQTXC_CMD_TCP;
5218 } else {
5219 cmdc |= NQTXC_CMD_UDP;
5220 }
5221 cmdc |= NQTXC_CMD_IP6;
5222 *fieldsp |= NQTXD_FIELDS_TUXSM;
5223 }
5224
5225 /* Fill in the context descriptor. */
5226 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
5227 htole32(vl_len);
5228 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
5229 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
5230 htole32(cmdc);
5231 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
5232 htole32(mssidx);
5233 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
5234 DPRINTF(WM_DEBUG_TX,
5235 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
5236 sc->sc_txnext, 0, vl_len));
5237 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
5238 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
5239 txs->txs_ndesc++;
5240 return 0;
5241 }
5242
5243 /*
5244 * wm_nq_start: [ifnet interface function]
5245 *
5246 * Start packet transmission on the interface for NEWQUEUE devices
5247 */
5248 static void
5249 wm_nq_start(struct ifnet *ifp)
5250 {
5251 struct wm_softc *sc = ifp->if_softc;
5252
5253 WM_TX_LOCK(sc);
5254 if (!sc->sc_stopping)
5255 wm_nq_start_locked(ifp);
5256 WM_TX_UNLOCK(sc);
5257 }
5258
5259 static void
5260 wm_nq_start_locked(struct ifnet *ifp)
5261 {
5262 struct wm_softc *sc = ifp->if_softc;
5263 struct mbuf *m0;
5264 struct m_tag *mtag;
5265 struct wm_txsoft *txs;
5266 bus_dmamap_t dmamap;
5267 int error, nexttx, lasttx = -1, seg, segs_needed;
5268 bool do_csum, sent;
5269
5270 KASSERT(WM_TX_LOCKED(sc));
5271
5272 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5273 return;
5274
5275 sent = false;
5276
5277 /*
5278 * Loop through the send queue, setting up transmit descriptors
5279 * until we drain the queue, or use up all available transmit
5280 * descriptors.
5281 */
5282 for (;;) {
5283 m0 = NULL;
5284
5285 /* Get a work queue entry. */
5286 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
5287 wm_txintr(sc);
5288 if (sc->sc_txsfree == 0) {
5289 DPRINTF(WM_DEBUG_TX,
5290 ("%s: TX: no free job descriptors\n",
5291 device_xname(sc->sc_dev)));
5292 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
5293 break;
5294 }
5295 }
5296
5297 /* Grab a packet off the queue. */
5298 IFQ_DEQUEUE(&ifp->if_snd, m0);
5299 if (m0 == NULL)
5300 break;
5301
5302 DPRINTF(WM_DEBUG_TX,
5303 ("%s: TX: have packet to transmit: %p\n",
5304 device_xname(sc->sc_dev), m0));
5305
5306 txs = &sc->sc_txsoft[sc->sc_txsnext];
5307 dmamap = txs->txs_dmamap;
5308
5309 /*
5310 * Load the DMA map. If this fails, the packet either
5311 * didn't fit in the allotted number of segments, or we
5312 * were short on resources. For the too-many-segments
5313 * case, we simply report an error and drop the packet,
5314 * since we can't sanely copy a jumbo packet to a single
5315 * buffer.
5316 */
5317 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5318 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5319 if (error) {
5320 if (error == EFBIG) {
5321 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5322 log(LOG_ERR, "%s: Tx packet consumes too many "
5323 "DMA segments, dropping...\n",
5324 device_xname(sc->sc_dev));
5325 wm_dump_mbuf_chain(sc, m0);
5326 m_freem(m0);
5327 continue;
5328 }
5329 /* Short on resources, just stop for now. */
5330 DPRINTF(WM_DEBUG_TX,
5331 ("%s: TX: dmamap load failed: %d\n",
5332 device_xname(sc->sc_dev), error));
5333 break;
5334 }
5335
5336 segs_needed = dmamap->dm_nsegs;
5337
5338 /*
5339 * Ensure we have enough descriptors free to describe
5340 * the packet. Note, we always reserve one descriptor
5341 * at the end of the ring due to the semantics of the
5342 * TDT register, plus one more in the event we need
5343 * to load offload context.
5344 */
5345 if (segs_needed > sc->sc_txfree - 2) {
5346 /*
5347 * Not enough free descriptors to transmit this
5348 * packet. We haven't committed anything yet,
5349 * so just unload the DMA map, put the packet
5350 * pack on the queue, and punt. Notify the upper
5351 * layer that there are no more slots left.
5352 */
5353 DPRINTF(WM_DEBUG_TX,
5354 ("%s: TX: need %d (%d) descriptors, have %d\n",
5355 device_xname(sc->sc_dev), dmamap->dm_nsegs,
5356 segs_needed, sc->sc_txfree - 1));
5357 ifp->if_flags |= IFF_OACTIVE;
5358 bus_dmamap_unload(sc->sc_dmat, dmamap);
5359 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5360 break;
5361 }
5362
5363 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5364
5365 DPRINTF(WM_DEBUG_TX,
5366 ("%s: TX: packet has %d (%d) DMA segments\n",
5367 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5368
5369 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5370
5371 /*
5372 * Store a pointer to the packet so that we can free it
5373 * later.
5374 *
5375 * Initially, we consider the number of descriptors the
5376 * packet uses the number of DMA segments. This may be
5377 * incremented by 1 if we do checksum offload (a descriptor
5378 * is used to set the checksum context).
5379 */
5380 txs->txs_mbuf = m0;
5381 txs->txs_firstdesc = sc->sc_txnext;
5382 txs->txs_ndesc = segs_needed;
5383
5384 /* Set up offload parameters for this packet. */
5385 uint32_t cmdlen, fields, dcmdlen;
5386 if (m0->m_pkthdr.csum_flags &
5387 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5388 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5389 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5390 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
5391 &do_csum) != 0) {
5392 /* Error message already displayed. */
5393 bus_dmamap_unload(sc->sc_dmat, dmamap);
5394 continue;
5395 }
5396 } else {
5397 do_csum = false;
5398 cmdlen = 0;
5399 fields = 0;
5400 }
5401
5402 /* Sync the DMA map. */
5403 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5404 BUS_DMASYNC_PREWRITE);
5405
5406 /* Initialize the first transmit descriptor. */
5407 nexttx = sc->sc_txnext;
5408 if (!do_csum) {
5409 /* setup a legacy descriptor */
5410 wm_set_dma_addr(
5411 &sc->sc_txdescs[nexttx].wtx_addr,
5412 dmamap->dm_segs[0].ds_addr);
5413 sc->sc_txdescs[nexttx].wtx_cmdlen =
5414 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
5415 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
5416 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
5417 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
5418 NULL) {
5419 sc->sc_txdescs[nexttx].wtx_cmdlen |=
5420 htole32(WTX_CMD_VLE);
5421 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
5422 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5423 } else {
5424 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
5425 }
5426 dcmdlen = 0;
5427 } else {
5428 /* setup an advanced data descriptor */
5429 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5430 htole64(dmamap->dm_segs[0].ds_addr);
5431 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
5432 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5433 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
5434 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
5435 htole32(fields);
5436 DPRINTF(WM_DEBUG_TX,
5437 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
5438 device_xname(sc->sc_dev), nexttx,
5439 (uint64_t)dmamap->dm_segs[0].ds_addr));
5440 DPRINTF(WM_DEBUG_TX,
5441 ("\t 0x%08x%08x\n", fields,
5442 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
5443 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
5444 }
5445
5446 lasttx = nexttx;
5447 nexttx = WM_NEXTTX(sc, nexttx);
5448 /*
5449 * fill in the next descriptors. legacy or adcanced format
5450 * is the same here
5451 */
5452 for (seg = 1; seg < dmamap->dm_nsegs;
5453 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
5454 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5455 htole64(dmamap->dm_segs[seg].ds_addr);
5456 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5457 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
5458 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
5459 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
5460 lasttx = nexttx;
5461
5462 DPRINTF(WM_DEBUG_TX,
5463 ("%s: TX: desc %d: %#" PRIx64 ", "
5464 "len %#04zx\n",
5465 device_xname(sc->sc_dev), nexttx,
5466 (uint64_t)dmamap->dm_segs[seg].ds_addr,
5467 dmamap->dm_segs[seg].ds_len));
5468 }
5469
5470 KASSERT(lasttx != -1);
5471
5472 /*
5473 * Set up the command byte on the last descriptor of
5474 * the packet. If we're in the interrupt delay window,
5475 * delay the interrupt.
5476 */
5477 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
5478 (NQTX_CMD_EOP | NQTX_CMD_RS));
5479 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5480 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5481
5482 txs->txs_lastdesc = lasttx;
5483
5484 DPRINTF(WM_DEBUG_TX,
5485 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5486 device_xname(sc->sc_dev),
5487 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5488
5489 /* Sync the descriptors we're using. */
5490 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5491 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5492
5493 /* Give the packet to the chip. */
5494 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5495 sent = true;
5496
5497 DPRINTF(WM_DEBUG_TX,
5498 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5499
5500 DPRINTF(WM_DEBUG_TX,
5501 ("%s: TX: finished transmitting packet, job %d\n",
5502 device_xname(sc->sc_dev), sc->sc_txsnext));
5503
5504 /* Advance the tx pointer. */
5505 sc->sc_txfree -= txs->txs_ndesc;
5506 sc->sc_txnext = nexttx;
5507
5508 sc->sc_txsfree--;
5509 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5510
5511 /* Pass the packet to any BPF listeners. */
5512 bpf_mtap(ifp, m0);
5513 }
5514
5515 if (m0 != NULL) {
5516 ifp->if_flags |= IFF_OACTIVE;
5517 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5518 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5519 m_freem(m0);
5520 }
5521
5522 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5523 /* No more slots; notify upper layer. */
5524 ifp->if_flags |= IFF_OACTIVE;
5525 }
5526
5527 if (sent) {
5528 /* Set a watchdog timer in case the chip flakes out. */
5529 ifp->if_timer = 5;
5530 }
5531 }
5532
5533 /* Interrupt */
5534
5535 /*
5536 * wm_txintr:
5537 *
5538 * Helper; handle transmit interrupts.
5539 */
5540 static void
5541 wm_txintr(struct wm_softc *sc)
5542 {
5543 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5544 struct wm_txsoft *txs;
5545 uint8_t status;
5546 int i;
5547
5548 if (sc->sc_stopping)
5549 return;
5550
5551 ifp->if_flags &= ~IFF_OACTIVE;
5552
5553 /*
5554 * Go through the Tx list and free mbufs for those
5555 * frames which have been transmitted.
5556 */
5557 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
5558 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
5559 txs = &sc->sc_txsoft[i];
5560
5561 DPRINTF(WM_DEBUG_TX,
5562 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
5563
5564 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
5565 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5566
5567 status =
5568 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
5569 if ((status & WTX_ST_DD) == 0) {
5570 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
5571 BUS_DMASYNC_PREREAD);
5572 break;
5573 }
5574
5575 DPRINTF(WM_DEBUG_TX,
5576 ("%s: TX: job %d done: descs %d..%d\n",
5577 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
5578 txs->txs_lastdesc));
5579
5580 /*
5581 * XXX We should probably be using the statistics
5582 * XXX registers, but I don't know if they exist
5583 * XXX on chips before the i82544.
5584 */
5585
5586 #ifdef WM_EVENT_COUNTERS
5587 if (status & WTX_ST_TU)
5588 WM_EVCNT_INCR(&sc->sc_ev_tu);
5589 #endif /* WM_EVENT_COUNTERS */
5590
5591 if (status & (WTX_ST_EC|WTX_ST_LC)) {
5592 ifp->if_oerrors++;
5593 if (status & WTX_ST_LC)
5594 log(LOG_WARNING, "%s: late collision\n",
5595 device_xname(sc->sc_dev));
5596 else if (status & WTX_ST_EC) {
5597 ifp->if_collisions += 16;
5598 log(LOG_WARNING, "%s: excessive collisions\n",
5599 device_xname(sc->sc_dev));
5600 }
5601 } else
5602 ifp->if_opackets++;
5603
5604 sc->sc_txfree += txs->txs_ndesc;
5605 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
5606 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
5607 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
5608 m_freem(txs->txs_mbuf);
5609 txs->txs_mbuf = NULL;
5610 }
5611
5612 /* Update the dirty transmit buffer pointer. */
5613 sc->sc_txsdirty = i;
5614 DPRINTF(WM_DEBUG_TX,
5615 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
5616
5617 /*
5618 * If there are no more pending transmissions, cancel the watchdog
5619 * timer.
5620 */
5621 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
5622 ifp->if_timer = 0;
5623 }
5624
5625 /*
5626 * wm_rxintr:
5627 *
5628 * Helper; handle receive interrupts.
5629 */
5630 static void
5631 wm_rxintr(struct wm_softc *sc)
5632 {
5633 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5634 struct wm_rxsoft *rxs;
5635 struct mbuf *m;
5636 int i, len;
5637 uint8_t status, errors;
5638 uint16_t vlantag;
5639
5640 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
5641 rxs = &sc->sc_rxsoft[i];
5642
5643 DPRINTF(WM_DEBUG_RX,
5644 ("%s: RX: checking descriptor %d\n",
5645 device_xname(sc->sc_dev), i));
5646
5647 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5648
5649 status = sc->sc_rxdescs[i].wrx_status;
5650 errors = sc->sc_rxdescs[i].wrx_errors;
5651 len = le16toh(sc->sc_rxdescs[i].wrx_len);
5652 vlantag = sc->sc_rxdescs[i].wrx_special;
5653
5654 if ((status & WRX_ST_DD) == 0) {
5655 /* We have processed all of the receive descriptors. */
5656 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
5657 break;
5658 }
5659
5660 if (__predict_false(sc->sc_rxdiscard)) {
5661 DPRINTF(WM_DEBUG_RX,
5662 ("%s: RX: discarding contents of descriptor %d\n",
5663 device_xname(sc->sc_dev), i));
5664 WM_INIT_RXDESC(sc, i);
5665 if (status & WRX_ST_EOP) {
5666 /* Reset our state. */
5667 DPRINTF(WM_DEBUG_RX,
5668 ("%s: RX: resetting rxdiscard -> 0\n",
5669 device_xname(sc->sc_dev)));
5670 sc->sc_rxdiscard = 0;
5671 }
5672 continue;
5673 }
5674
5675 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5676 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
5677
5678 m = rxs->rxs_mbuf;
5679
5680 /*
5681 * Add a new receive buffer to the ring, unless of
5682 * course the length is zero. Treat the latter as a
5683 * failed mapping.
5684 */
5685 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
5686 /*
5687 * Failed, throw away what we've done so
5688 * far, and discard the rest of the packet.
5689 */
5690 ifp->if_ierrors++;
5691 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5692 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5693 WM_INIT_RXDESC(sc, i);
5694 if ((status & WRX_ST_EOP) == 0)
5695 sc->sc_rxdiscard = 1;
5696 if (sc->sc_rxhead != NULL)
5697 m_freem(sc->sc_rxhead);
5698 WM_RXCHAIN_RESET(sc);
5699 DPRINTF(WM_DEBUG_RX,
5700 ("%s: RX: Rx buffer allocation failed, "
5701 "dropping packet%s\n", device_xname(sc->sc_dev),
5702 sc->sc_rxdiscard ? " (discard)" : ""));
5703 continue;
5704 }
5705
5706 m->m_len = len;
5707 sc->sc_rxlen += len;
5708 DPRINTF(WM_DEBUG_RX,
5709 ("%s: RX: buffer at %p len %d\n",
5710 device_xname(sc->sc_dev), m->m_data, len));
5711
5712 /* If this is not the end of the packet, keep looking. */
5713 if ((status & WRX_ST_EOP) == 0) {
5714 WM_RXCHAIN_LINK(sc, m);
5715 DPRINTF(WM_DEBUG_RX,
5716 ("%s: RX: not yet EOP, rxlen -> %d\n",
5717 device_xname(sc->sc_dev), sc->sc_rxlen));
5718 continue;
5719 }
5720
5721 /*
5722 * Okay, we have the entire packet now. The chip is
5723 * configured to include the FCS except I350 and I21[01]
5724 * (not all chips can be configured to strip it),
5725 * so we need to trim it.
5726 * May need to adjust length of previous mbuf in the
5727 * chain if the current mbuf is too short.
5728 * For an eratta, the RCTL_SECRC bit in RCTL register
5729 * is always set in I350, so we don't trim it.
5730 */
5731 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
5732 && (sc->sc_type != WM_T_I210)
5733 && (sc->sc_type != WM_T_I211)) {
5734 if (m->m_len < ETHER_CRC_LEN) {
5735 sc->sc_rxtail->m_len
5736 -= (ETHER_CRC_LEN - m->m_len);
5737 m->m_len = 0;
5738 } else
5739 m->m_len -= ETHER_CRC_LEN;
5740 len = sc->sc_rxlen - ETHER_CRC_LEN;
5741 } else
5742 len = sc->sc_rxlen;
5743
5744 WM_RXCHAIN_LINK(sc, m);
5745
5746 *sc->sc_rxtailp = NULL;
5747 m = sc->sc_rxhead;
5748
5749 WM_RXCHAIN_RESET(sc);
5750
5751 DPRINTF(WM_DEBUG_RX,
5752 ("%s: RX: have entire packet, len -> %d\n",
5753 device_xname(sc->sc_dev), len));
5754
5755 /* If an error occurred, update stats and drop the packet. */
5756 if (errors &
5757 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
5758 if (errors & WRX_ER_SE)
5759 log(LOG_WARNING, "%s: symbol error\n",
5760 device_xname(sc->sc_dev));
5761 else if (errors & WRX_ER_SEQ)
5762 log(LOG_WARNING, "%s: receive sequence error\n",
5763 device_xname(sc->sc_dev));
5764 else if (errors & WRX_ER_CE)
5765 log(LOG_WARNING, "%s: CRC error\n",
5766 device_xname(sc->sc_dev));
5767 m_freem(m);
5768 continue;
5769 }
5770
5771 /* No errors. Receive the packet. */
5772 m->m_pkthdr.rcvif = ifp;
5773 m->m_pkthdr.len = len;
5774
5775 /*
5776 * If VLANs are enabled, VLAN packets have been unwrapped
5777 * for us. Associate the tag with the packet.
5778 */
5779 /* XXXX should check for i350 and i354 */
5780 if ((status & WRX_ST_VP) != 0) {
5781 VLAN_INPUT_TAG(ifp, m,
5782 le16toh(vlantag),
5783 continue);
5784 }
5785
5786 /* Set up checksum info for this packet. */
5787 if ((status & WRX_ST_IXSM) == 0) {
5788 if (status & WRX_ST_IPCS) {
5789 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
5790 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
5791 if (errors & WRX_ER_IPE)
5792 m->m_pkthdr.csum_flags |=
5793 M_CSUM_IPv4_BAD;
5794 }
5795 if (status & WRX_ST_TCPCS) {
5796 /*
5797 * Note: we don't know if this was TCP or UDP,
5798 * so we just set both bits, and expect the
5799 * upper layers to deal.
5800 */
5801 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
5802 m->m_pkthdr.csum_flags |=
5803 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
5804 M_CSUM_TCPv6 | M_CSUM_UDPv6;
5805 if (errors & WRX_ER_TCPE)
5806 m->m_pkthdr.csum_flags |=
5807 M_CSUM_TCP_UDP_BAD;
5808 }
5809 }
5810
5811 ifp->if_ipackets++;
5812
5813 WM_RX_UNLOCK(sc);
5814
5815 /* Pass this up to any BPF listeners. */
5816 bpf_mtap(ifp, m);
5817
5818 /* Pass it on. */
5819 (*ifp->if_input)(ifp, m);
5820
5821 WM_RX_LOCK(sc);
5822
5823 if (sc->sc_stopping)
5824 break;
5825 }
5826
5827 /* Update the receive pointer. */
5828 sc->sc_rxptr = i;
5829
5830 DPRINTF(WM_DEBUG_RX,
5831 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
5832 }
5833
5834 /*
5835 * wm_linkintr_gmii:
5836 *
5837 * Helper; handle link interrupts for GMII.
5838 */
5839 static void
5840 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
5841 {
5842
5843 KASSERT(WM_TX_LOCKED(sc));
5844
5845 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5846 __func__));
5847
5848 if (icr & ICR_LSC) {
5849 DPRINTF(WM_DEBUG_LINK,
5850 ("%s: LINK: LSC -> mii_pollstat\n",
5851 device_xname(sc->sc_dev)));
5852 mii_pollstat(&sc->sc_mii);
5853 if (sc->sc_type == WM_T_82543) {
5854 int miistatus, active;
5855
5856 /*
5857 * With 82543, we need to force speed and
5858 * duplex on the MAC equal to what the PHY
5859 * speed and duplex configuration is.
5860 */
5861 miistatus = sc->sc_mii.mii_media_status;
5862
5863 if (miistatus & IFM_ACTIVE) {
5864 active = sc->sc_mii.mii_media_active;
5865 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5866 switch (IFM_SUBTYPE(active)) {
5867 case IFM_10_T:
5868 sc->sc_ctrl |= CTRL_SPEED_10;
5869 break;
5870 case IFM_100_TX:
5871 sc->sc_ctrl |= CTRL_SPEED_100;
5872 break;
5873 case IFM_1000_T:
5874 sc->sc_ctrl |= CTRL_SPEED_1000;
5875 break;
5876 default:
5877 /*
5878 * fiber?
5879 * Shoud not enter here.
5880 */
5881 printf("unknown media (%x)\n",
5882 active);
5883 break;
5884 }
5885 if (active & IFM_FDX)
5886 sc->sc_ctrl |= CTRL_FD;
5887 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5888 }
5889 } else if ((sc->sc_type == WM_T_ICH8)
5890 && (sc->sc_phytype == WMPHY_IGP_3)) {
5891 wm_kmrn_lock_loss_workaround_ich8lan(sc);
5892 } else if (sc->sc_type == WM_T_PCH) {
5893 wm_k1_gig_workaround_hv(sc,
5894 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
5895 }
5896
5897 if ((sc->sc_phytype == WMPHY_82578)
5898 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
5899 == IFM_1000_T)) {
5900
5901 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
5902 delay(200*1000); /* XXX too big */
5903
5904 /* Link stall fix for link up */
5905 wm_gmii_hv_writereg(sc->sc_dev, 1,
5906 HV_MUX_DATA_CTRL,
5907 HV_MUX_DATA_CTRL_GEN_TO_MAC
5908 | HV_MUX_DATA_CTRL_FORCE_SPEED);
5909 wm_gmii_hv_writereg(sc->sc_dev, 1,
5910 HV_MUX_DATA_CTRL,
5911 HV_MUX_DATA_CTRL_GEN_TO_MAC);
5912 }
5913 }
5914 } else if (icr & ICR_RXSEQ) {
5915 DPRINTF(WM_DEBUG_LINK,
5916 ("%s: LINK Receive sequence error\n",
5917 device_xname(sc->sc_dev)));
5918 }
5919 }
5920
5921 /*
5922 * wm_linkintr_tbi:
5923 *
5924 * Helper; handle link interrupts for TBI mode.
5925 */
5926 static void
5927 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
5928 {
5929 uint32_t status;
5930
5931 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5932 __func__));
5933
5934 status = CSR_READ(sc, WMREG_STATUS);
5935 if (icr & ICR_LSC) {
5936 if (status & STATUS_LU) {
5937 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
5938 device_xname(sc->sc_dev),
5939 (status & STATUS_FD) ? "FDX" : "HDX"));
5940 /*
5941 * NOTE: CTRL will update TFCE and RFCE automatically,
5942 * so we should update sc->sc_ctrl
5943 */
5944
5945 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5946 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5947 sc->sc_fcrtl &= ~FCRTL_XONE;
5948 if (status & STATUS_FD)
5949 sc->sc_tctl |=
5950 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5951 else
5952 sc->sc_tctl |=
5953 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5954 if (sc->sc_ctrl & CTRL_TFCE)
5955 sc->sc_fcrtl |= FCRTL_XONE;
5956 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5957 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5958 WMREG_OLD_FCRTL : WMREG_FCRTL,
5959 sc->sc_fcrtl);
5960 sc->sc_tbi_linkup = 1;
5961 } else {
5962 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
5963 device_xname(sc->sc_dev)));
5964 sc->sc_tbi_linkup = 0;
5965 }
5966 wm_tbi_set_linkled(sc);
5967 } else if (icr & ICR_RXSEQ) {
5968 DPRINTF(WM_DEBUG_LINK,
5969 ("%s: LINK: Receive sequence error\n",
5970 device_xname(sc->sc_dev)));
5971 }
5972 }
5973
5974 /*
5975 * wm_linkintr:
5976 *
5977 * Helper; handle link interrupts.
5978 */
5979 static void
5980 wm_linkintr(struct wm_softc *sc, uint32_t icr)
5981 {
5982
5983 if (sc->sc_flags & WM_F_HAS_MII)
5984 wm_linkintr_gmii(sc, icr);
5985 else
5986 wm_linkintr_tbi(sc, icr);
5987 }
5988
5989 /*
5990 * wm_intr:
5991 *
5992 * Interrupt service routine.
5993 */
5994 static int
5995 wm_intr(void *arg)
5996 {
5997 struct wm_softc *sc = arg;
5998 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5999 uint32_t icr;
6000 int handled = 0;
6001
6002 while (1 /* CONSTCOND */) {
6003 icr = CSR_READ(sc, WMREG_ICR);
6004 if ((icr & sc->sc_icr) == 0)
6005 break;
6006 rnd_add_uint32(&sc->rnd_source, icr);
6007
6008 WM_RX_LOCK(sc);
6009
6010 if (sc->sc_stopping) {
6011 WM_RX_UNLOCK(sc);
6012 break;
6013 }
6014
6015 handled = 1;
6016
6017 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
6018 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
6019 DPRINTF(WM_DEBUG_RX,
6020 ("%s: RX: got Rx intr 0x%08x\n",
6021 device_xname(sc->sc_dev),
6022 icr & (ICR_RXDMT0|ICR_RXT0)));
6023 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
6024 }
6025 #endif
6026 wm_rxintr(sc);
6027
6028 WM_RX_UNLOCK(sc);
6029 WM_TX_LOCK(sc);
6030
6031 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
6032 if (icr & ICR_TXDW) {
6033 DPRINTF(WM_DEBUG_TX,
6034 ("%s: TX: got TXDW interrupt\n",
6035 device_xname(sc->sc_dev)));
6036 WM_EVCNT_INCR(&sc->sc_ev_txdw);
6037 }
6038 #endif
6039 wm_txintr(sc);
6040
6041 if (icr & (ICR_LSC|ICR_RXSEQ)) {
6042 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
6043 wm_linkintr(sc, icr);
6044 }
6045
6046 WM_TX_UNLOCK(sc);
6047
6048 if (icr & ICR_RXO) {
6049 #if defined(WM_DEBUG)
6050 log(LOG_WARNING, "%s: Receive overrun\n",
6051 device_xname(sc->sc_dev));
6052 #endif /* defined(WM_DEBUG) */
6053 }
6054 }
6055
6056 if (handled) {
6057 /* Try to get more packets going. */
6058 ifp->if_start(ifp);
6059 }
6060
6061 return handled;
6062 }
6063
6064 /*
6065 * Media related.
6066 * GMII, SGMII, TBI (and SERDES)
6067 */
6068
6069 /* GMII related */
6070
6071 /*
6072 * wm_gmii_reset:
6073 *
6074 * Reset the PHY.
6075 */
6076 static void
6077 wm_gmii_reset(struct wm_softc *sc)
6078 {
6079 uint32_t reg;
6080 int rv;
6081
6082 /* get phy semaphore */
6083 switch (sc->sc_type) {
6084 case WM_T_82571:
6085 case WM_T_82572:
6086 case WM_T_82573:
6087 case WM_T_82574:
6088 case WM_T_82583:
6089 /* XXX should get sw semaphore, too */
6090 rv = wm_get_swsm_semaphore(sc);
6091 break;
6092 case WM_T_82575:
6093 case WM_T_82576:
6094 case WM_T_82580:
6095 case WM_T_I350:
6096 case WM_T_I354:
6097 case WM_T_I210:
6098 case WM_T_I211:
6099 case WM_T_80003:
6100 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6101 break;
6102 case WM_T_ICH8:
6103 case WM_T_ICH9:
6104 case WM_T_ICH10:
6105 case WM_T_PCH:
6106 case WM_T_PCH2:
6107 case WM_T_PCH_LPT:
6108 rv = wm_get_swfwhw_semaphore(sc);
6109 break;
6110 default:
6111 /* nothing to do*/
6112 rv = 0;
6113 break;
6114 }
6115 if (rv != 0) {
6116 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6117 __func__);
6118 return;
6119 }
6120
6121 switch (sc->sc_type) {
6122 case WM_T_82542_2_0:
6123 case WM_T_82542_2_1:
6124 /* null */
6125 break;
6126 case WM_T_82543:
6127 /*
6128 * With 82543, we need to force speed and duplex on the MAC
6129 * equal to what the PHY speed and duplex configuration is.
6130 * In addition, we need to perform a hardware reset on the PHY
6131 * to take it out of reset.
6132 */
6133 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6134 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6135
6136 /* The PHY reset pin is active-low. */
6137 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6138 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6139 CTRL_EXT_SWDPIN(4));
6140 reg |= CTRL_EXT_SWDPIO(4);
6141
6142 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6143 CSR_WRITE_FLUSH(sc);
6144 delay(10*1000);
6145
6146 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6147 CSR_WRITE_FLUSH(sc);
6148 delay(150);
6149 #if 0
6150 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6151 #endif
6152 delay(20*1000); /* XXX extra delay to get PHY ID? */
6153 break;
6154 case WM_T_82544: /* reset 10000us */
6155 case WM_T_82540:
6156 case WM_T_82545:
6157 case WM_T_82545_3:
6158 case WM_T_82546:
6159 case WM_T_82546_3:
6160 case WM_T_82541:
6161 case WM_T_82541_2:
6162 case WM_T_82547:
6163 case WM_T_82547_2:
6164 case WM_T_82571: /* reset 100us */
6165 case WM_T_82572:
6166 case WM_T_82573:
6167 case WM_T_82574:
6168 case WM_T_82575:
6169 case WM_T_82576:
6170 case WM_T_82580:
6171 case WM_T_I350:
6172 case WM_T_I354:
6173 case WM_T_I210:
6174 case WM_T_I211:
6175 case WM_T_82583:
6176 case WM_T_80003:
6177 /* generic reset */
6178 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6179 CSR_WRITE_FLUSH(sc);
6180 delay(20000);
6181 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6182 CSR_WRITE_FLUSH(sc);
6183 delay(20000);
6184
6185 if ((sc->sc_type == WM_T_82541)
6186 || (sc->sc_type == WM_T_82541_2)
6187 || (sc->sc_type == WM_T_82547)
6188 || (sc->sc_type == WM_T_82547_2)) {
6189 /* workaround for igp are done in igp_reset() */
6190 /* XXX add code to set LED after phy reset */
6191 }
6192 break;
6193 case WM_T_ICH8:
6194 case WM_T_ICH9:
6195 case WM_T_ICH10:
6196 case WM_T_PCH:
6197 case WM_T_PCH2:
6198 case WM_T_PCH_LPT:
6199 /* generic reset */
6200 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6201 CSR_WRITE_FLUSH(sc);
6202 delay(100);
6203 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6204 CSR_WRITE_FLUSH(sc);
6205 delay(150);
6206 break;
6207 default:
6208 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6209 __func__);
6210 break;
6211 }
6212
6213 /* release PHY semaphore */
6214 switch (sc->sc_type) {
6215 case WM_T_82571:
6216 case WM_T_82572:
6217 case WM_T_82573:
6218 case WM_T_82574:
6219 case WM_T_82583:
6220 /* XXX should put sw semaphore, too */
6221 wm_put_swsm_semaphore(sc);
6222 break;
6223 case WM_T_82575:
6224 case WM_T_82576:
6225 case WM_T_82580:
6226 case WM_T_I350:
6227 case WM_T_I354:
6228 case WM_T_I210:
6229 case WM_T_I211:
6230 case WM_T_80003:
6231 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6232 break;
6233 case WM_T_ICH8:
6234 case WM_T_ICH9:
6235 case WM_T_ICH10:
6236 case WM_T_PCH:
6237 case WM_T_PCH2:
6238 case WM_T_PCH_LPT:
6239 wm_put_swfwhw_semaphore(sc);
6240 break;
6241 default:
6242 /* nothing to do*/
6243 rv = 0;
6244 break;
6245 }
6246
6247 /* get_cfg_done */
6248 wm_get_cfg_done(sc);
6249
6250 /* extra setup */
6251 switch (sc->sc_type) {
6252 case WM_T_82542_2_0:
6253 case WM_T_82542_2_1:
6254 case WM_T_82543:
6255 case WM_T_82544:
6256 case WM_T_82540:
6257 case WM_T_82545:
6258 case WM_T_82545_3:
6259 case WM_T_82546:
6260 case WM_T_82546_3:
6261 case WM_T_82541_2:
6262 case WM_T_82547_2:
6263 case WM_T_82571:
6264 case WM_T_82572:
6265 case WM_T_82573:
6266 case WM_T_82574:
6267 case WM_T_82575:
6268 case WM_T_82576:
6269 case WM_T_82580:
6270 case WM_T_I350:
6271 case WM_T_I354:
6272 case WM_T_I210:
6273 case WM_T_I211:
6274 case WM_T_82583:
6275 case WM_T_80003:
6276 /* null */
6277 break;
6278 case WM_T_82541:
6279 case WM_T_82547:
6280 /* XXX Configure actively LED after PHY reset */
6281 break;
6282 case WM_T_ICH8:
6283 case WM_T_ICH9:
6284 case WM_T_ICH10:
6285 case WM_T_PCH:
6286 case WM_T_PCH2:
6287 case WM_T_PCH_LPT:
6288 /* Allow time for h/w to get to a quiescent state afer reset */
6289 delay(10*1000);
6290
6291 if (sc->sc_type == WM_T_PCH)
6292 wm_hv_phy_workaround_ich8lan(sc);
6293
6294 if (sc->sc_type == WM_T_PCH2)
6295 wm_lv_phy_workaround_ich8lan(sc);
6296
6297 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6298 /*
6299 * dummy read to clear the phy wakeup bit after lcd
6300 * reset
6301 */
6302 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6303 }
6304
6305 /*
6306 * XXX Configure the LCD with th extended configuration region
6307 * in NVM
6308 */
6309
6310 /* Configure the LCD with the OEM bits in NVM */
6311 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6312 || (sc->sc_type == WM_T_PCH_LPT)) {
6313 /*
6314 * Disable LPLU.
6315 * XXX It seems that 82567 has LPLU, too.
6316 */
6317 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6318 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6319 reg |= HV_OEM_BITS_ANEGNOW;
6320 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6321 }
6322 break;
6323 default:
6324 panic("%s: unknown type\n", __func__);
6325 break;
6326 }
6327 }
6328
6329 /*
6330 * wm_get_phy_id_82575:
6331 *
6332 * Return PHY ID. Return -1 if it failed.
6333 */
6334 static int
6335 wm_get_phy_id_82575(struct wm_softc *sc)
6336 {
6337 uint32_t reg;
6338 int phyid = -1;
6339
6340 /* XXX */
6341 if ((sc->sc_flags & WM_F_SGMII) == 0)
6342 return -1;
6343
6344 if (wm_sgmii_uses_mdio(sc)) {
6345 switch (sc->sc_type) {
6346 case WM_T_82575:
6347 case WM_T_82576:
6348 reg = CSR_READ(sc, WMREG_MDIC);
6349 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
6350 break;
6351 case WM_T_82580:
6352 case WM_T_I350:
6353 case WM_T_I354:
6354 case WM_T_I210:
6355 case WM_T_I211:
6356 reg = CSR_READ(sc, WMREG_MDICNFG);
6357 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
6358 break;
6359 default:
6360 return -1;
6361 }
6362 }
6363
6364 return phyid;
6365 }
6366
6367
6368 /*
6369 * wm_gmii_mediainit:
6370 *
6371 * Initialize media for use on 1000BASE-T devices.
6372 */
6373 static void
6374 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6375 {
6376 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6377 struct mii_data *mii = &sc->sc_mii;
6378 uint32_t reg;
6379
6380 /* We have GMII. */
6381 sc->sc_flags |= WM_F_HAS_MII;
6382
6383 if (sc->sc_type == WM_T_80003)
6384 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6385 else
6386 sc->sc_tipg = TIPG_1000T_DFLT;
6387
6388 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
6389 if ((sc->sc_type == WM_T_82580)
6390 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
6391 || (sc->sc_type == WM_T_I211)) {
6392 reg = CSR_READ(sc, WMREG_PHPM);
6393 reg &= ~PHPM_GO_LINK_D;
6394 CSR_WRITE(sc, WMREG_PHPM, reg);
6395 }
6396
6397 /*
6398 * Let the chip set speed/duplex on its own based on
6399 * signals from the PHY.
6400 * XXXbouyer - I'm not sure this is right for the 80003,
6401 * the em driver only sets CTRL_SLU here - but it seems to work.
6402 */
6403 sc->sc_ctrl |= CTRL_SLU;
6404 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6405
6406 /* Initialize our media structures and probe the GMII. */
6407 mii->mii_ifp = ifp;
6408
6409 /*
6410 * Determine the PHY access method.
6411 *
6412 * For SGMII, use SGMII specific method.
6413 *
6414 * For some devices, we can determine the PHY access method
6415 * from sc_type.
6416 *
6417 * For ICH8 variants, it's difficult to determine the PHY access
6418 * method by sc_type, so use the PCI product ID for some devices.
6419 * For other ICH8 variants, try to use igp's method. If the PHY
6420 * can't detect, then use bm's method.
6421 */
6422 switch (prodid) {
6423 case PCI_PRODUCT_INTEL_PCH_M_LM:
6424 case PCI_PRODUCT_INTEL_PCH_M_LC:
6425 /* 82577 */
6426 sc->sc_phytype = WMPHY_82577;
6427 mii->mii_readreg = wm_gmii_hv_readreg;
6428 mii->mii_writereg = wm_gmii_hv_writereg;
6429 break;
6430 case PCI_PRODUCT_INTEL_PCH_D_DM:
6431 case PCI_PRODUCT_INTEL_PCH_D_DC:
6432 /* 82578 */
6433 sc->sc_phytype = WMPHY_82578;
6434 mii->mii_readreg = wm_gmii_hv_readreg;
6435 mii->mii_writereg = wm_gmii_hv_writereg;
6436 break;
6437 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6438 case PCI_PRODUCT_INTEL_PCH2_LV_V:
6439 /* 82579 */
6440 sc->sc_phytype = WMPHY_82579;
6441 mii->mii_readreg = wm_gmii_hv_readreg;
6442 mii->mii_writereg = wm_gmii_hv_writereg;
6443 break;
6444 case PCI_PRODUCT_INTEL_I217_LM:
6445 case PCI_PRODUCT_INTEL_I217_V:
6446 case PCI_PRODUCT_INTEL_I218_LM:
6447 case PCI_PRODUCT_INTEL_I218_V:
6448 /* I21[78] */
6449 mii->mii_readreg = wm_gmii_hv_readreg;
6450 mii->mii_writereg = wm_gmii_hv_writereg;
6451 break;
6452 case PCI_PRODUCT_INTEL_82801I_BM:
6453 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6454 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6455 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6456 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6457 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6458 /* 82567 */
6459 sc->sc_phytype = WMPHY_BM;
6460 mii->mii_readreg = wm_gmii_bm_readreg;
6461 mii->mii_writereg = wm_gmii_bm_writereg;
6462 break;
6463 default:
6464 if (((sc->sc_flags & WM_F_SGMII) != 0)
6465 && !wm_sgmii_uses_mdio(sc)){
6466 mii->mii_readreg = wm_sgmii_readreg;
6467 mii->mii_writereg = wm_sgmii_writereg;
6468 } else if (sc->sc_type >= WM_T_80003) {
6469 mii->mii_readreg = wm_gmii_i80003_readreg;
6470 mii->mii_writereg = wm_gmii_i80003_writereg;
6471 } else if (sc->sc_type >= WM_T_I210) {
6472 mii->mii_readreg = wm_gmii_i82544_readreg;
6473 mii->mii_writereg = wm_gmii_i82544_writereg;
6474 } else if (sc->sc_type >= WM_T_82580) {
6475 sc->sc_phytype = WMPHY_82580;
6476 mii->mii_readreg = wm_gmii_82580_readreg;
6477 mii->mii_writereg = wm_gmii_82580_writereg;
6478 } else if (sc->sc_type >= WM_T_82544) {
6479 mii->mii_readreg = wm_gmii_i82544_readreg;
6480 mii->mii_writereg = wm_gmii_i82544_writereg;
6481 } else {
6482 mii->mii_readreg = wm_gmii_i82543_readreg;
6483 mii->mii_writereg = wm_gmii_i82543_writereg;
6484 }
6485 break;
6486 }
6487 mii->mii_statchg = wm_gmii_statchg;
6488
6489 wm_gmii_reset(sc);
6490
6491 sc->sc_ethercom.ec_mii = &sc->sc_mii;
6492 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6493 wm_gmii_mediastatus);
6494
6495 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6496 || (sc->sc_type == WM_T_82580)
6497 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
6498 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
6499 if ((sc->sc_flags & WM_F_SGMII) == 0) {
6500 /* Attach only one port */
6501 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6502 MII_OFFSET_ANY, MIIF_DOPAUSE);
6503 } else {
6504 int i, id;
6505 uint32_t ctrl_ext;
6506
6507 id = wm_get_phy_id_82575(sc);
6508 if (id != -1) {
6509 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6510 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
6511 }
6512 if ((id == -1)
6513 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
6514 /* Power on sgmii phy if it is disabled */
6515 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6516 CSR_WRITE(sc, WMREG_CTRL_EXT,
6517 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6518 CSR_WRITE_FLUSH(sc);
6519 delay(300*1000); /* XXX too long */
6520
6521 /* from 1 to 8 */
6522 for (i = 1; i < 8; i++)
6523 mii_attach(sc->sc_dev, &sc->sc_mii,
6524 0xffffffff, i, MII_OFFSET_ANY,
6525 MIIF_DOPAUSE);
6526
6527 /* restore previous sfp cage power state */
6528 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6529 }
6530 }
6531 } else {
6532 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6533 MII_OFFSET_ANY, MIIF_DOPAUSE);
6534 }
6535
6536 /*
6537 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
6538 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6539 */
6540 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
6541 (LIST_FIRST(&mii->mii_phys) == NULL)) {
6542 wm_set_mdio_slow_mode_hv(sc);
6543 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6544 MII_OFFSET_ANY, MIIF_DOPAUSE);
6545 }
6546
6547 /*
6548 * (For ICH8 variants)
6549 * If PHY detection failed, use BM's r/w function and retry.
6550 */
6551 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6552 /* if failed, retry with *_bm_* */
6553 mii->mii_readreg = wm_gmii_bm_readreg;
6554 mii->mii_writereg = wm_gmii_bm_writereg;
6555
6556 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6557 MII_OFFSET_ANY, MIIF_DOPAUSE);
6558 }
6559
6560 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6561 /* Any PHY wasn't find */
6562 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6563 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6564 sc->sc_phytype = WMPHY_NONE;
6565 } else {
6566 /*
6567 * PHY Found!
6568 * Check PHY type.
6569 */
6570 uint32_t model;
6571 struct mii_softc *child;
6572
6573 child = LIST_FIRST(&mii->mii_phys);
6574 if (device_is_a(child->mii_dev, "igphy")) {
6575 struct igphy_softc *isc = (struct igphy_softc *)child;
6576
6577 model = isc->sc_mii.mii_mpd_model;
6578 if (model == MII_MODEL_yyINTEL_I82566)
6579 sc->sc_phytype = WMPHY_IGP_3;
6580 }
6581
6582 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6583 }
6584 }
6585
6586 /*
6587 * wm_gmii_mediastatus: [ifmedia interface function]
6588 *
6589 * Get the current interface media status on a 1000BASE-T device.
6590 */
6591 static void
6592 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6593 {
6594 struct wm_softc *sc = ifp->if_softc;
6595
6596 ether_mediastatus(ifp, ifmr);
6597 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6598 | sc->sc_flowflags;
6599 }
6600
6601 /*
6602 * wm_gmii_mediachange: [ifmedia interface function]
6603 *
6604 * Set hardware to newly-selected media on a 1000BASE-T device.
6605 */
6606 static int
6607 wm_gmii_mediachange(struct ifnet *ifp)
6608 {
6609 struct wm_softc *sc = ifp->if_softc;
6610 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6611 int rc;
6612
6613 if ((ifp->if_flags & IFF_UP) == 0)
6614 return 0;
6615
6616 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6617 sc->sc_ctrl |= CTRL_SLU;
6618 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6619 || (sc->sc_type > WM_T_82543)) {
6620 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6621 } else {
6622 sc->sc_ctrl &= ~CTRL_ASDE;
6623 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6624 if (ife->ifm_media & IFM_FDX)
6625 sc->sc_ctrl |= CTRL_FD;
6626 switch (IFM_SUBTYPE(ife->ifm_media)) {
6627 case IFM_10_T:
6628 sc->sc_ctrl |= CTRL_SPEED_10;
6629 break;
6630 case IFM_100_TX:
6631 sc->sc_ctrl |= CTRL_SPEED_100;
6632 break;
6633 case IFM_1000_T:
6634 sc->sc_ctrl |= CTRL_SPEED_1000;
6635 break;
6636 default:
6637 panic("wm_gmii_mediachange: bad media 0x%x",
6638 ife->ifm_media);
6639 }
6640 }
6641 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6642 if (sc->sc_type <= WM_T_82543)
6643 wm_gmii_reset(sc);
6644
6645 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6646 return 0;
6647 return rc;
6648 }
6649
6650 #define MDI_IO CTRL_SWDPIN(2)
6651 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
6652 #define MDI_CLK CTRL_SWDPIN(3)
6653
6654 static void
6655 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6656 {
6657 uint32_t i, v;
6658
6659 v = CSR_READ(sc, WMREG_CTRL);
6660 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6661 v |= MDI_DIR | CTRL_SWDPIO(3);
6662
6663 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6664 if (data & i)
6665 v |= MDI_IO;
6666 else
6667 v &= ~MDI_IO;
6668 CSR_WRITE(sc, WMREG_CTRL, v);
6669 CSR_WRITE_FLUSH(sc);
6670 delay(10);
6671 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6672 CSR_WRITE_FLUSH(sc);
6673 delay(10);
6674 CSR_WRITE(sc, WMREG_CTRL, v);
6675 CSR_WRITE_FLUSH(sc);
6676 delay(10);
6677 }
6678 }
6679
6680 static uint32_t
6681 wm_i82543_mii_recvbits(struct wm_softc *sc)
6682 {
6683 uint32_t v, i, data = 0;
6684
6685 v = CSR_READ(sc, WMREG_CTRL);
6686 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6687 v |= CTRL_SWDPIO(3);
6688
6689 CSR_WRITE(sc, WMREG_CTRL, v);
6690 CSR_WRITE_FLUSH(sc);
6691 delay(10);
6692 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6693 CSR_WRITE_FLUSH(sc);
6694 delay(10);
6695 CSR_WRITE(sc, WMREG_CTRL, v);
6696 CSR_WRITE_FLUSH(sc);
6697 delay(10);
6698
6699 for (i = 0; i < 16; i++) {
6700 data <<= 1;
6701 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6702 CSR_WRITE_FLUSH(sc);
6703 delay(10);
6704 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6705 data |= 1;
6706 CSR_WRITE(sc, WMREG_CTRL, v);
6707 CSR_WRITE_FLUSH(sc);
6708 delay(10);
6709 }
6710
6711 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6712 CSR_WRITE_FLUSH(sc);
6713 delay(10);
6714 CSR_WRITE(sc, WMREG_CTRL, v);
6715 CSR_WRITE_FLUSH(sc);
6716 delay(10);
6717
6718 return data;
6719 }
6720
6721 #undef MDI_IO
6722 #undef MDI_DIR
6723 #undef MDI_CLK
6724
6725 /*
6726 * wm_gmii_i82543_readreg: [mii interface function]
6727 *
6728 * Read a PHY register on the GMII (i82543 version).
6729 */
6730 static int
6731 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6732 {
6733 struct wm_softc *sc = device_private(self);
6734 int rv;
6735
6736 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6737 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
6738 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6739 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
6740
6741 DPRINTF(WM_DEBUG_GMII,
6742 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6743 device_xname(sc->sc_dev), phy, reg, rv));
6744
6745 return rv;
6746 }
6747
6748 /*
6749 * wm_gmii_i82543_writereg: [mii interface function]
6750 *
6751 * Write a PHY register on the GMII (i82543 version).
6752 */
6753 static void
6754 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6755 {
6756 struct wm_softc *sc = device_private(self);
6757
6758 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6759 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6760 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6761 (MII_COMMAND_START << 30), 32);
6762 }
6763
6764 /*
6765 * wm_gmii_i82544_readreg: [mii interface function]
6766 *
6767 * Read a PHY register on the GMII.
6768 */
6769 static int
6770 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6771 {
6772 struct wm_softc *sc = device_private(self);
6773 uint32_t mdic = 0;
6774 int i, rv;
6775
6776 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6777 MDIC_REGADD(reg));
6778
6779 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6780 mdic = CSR_READ(sc, WMREG_MDIC);
6781 if (mdic & MDIC_READY)
6782 break;
6783 delay(50);
6784 }
6785
6786 if ((mdic & MDIC_READY) == 0) {
6787 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6788 device_xname(sc->sc_dev), phy, reg);
6789 rv = 0;
6790 } else if (mdic & MDIC_E) {
6791 #if 0 /* This is normal if no PHY is present. */
6792 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6793 device_xname(sc->sc_dev), phy, reg);
6794 #endif
6795 rv = 0;
6796 } else {
6797 rv = MDIC_DATA(mdic);
6798 if (rv == 0xffff)
6799 rv = 0;
6800 }
6801
6802 return rv;
6803 }
6804
6805 /*
6806 * wm_gmii_i82544_writereg: [mii interface function]
6807 *
6808 * Write a PHY register on the GMII.
6809 */
6810 static void
6811 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6812 {
6813 struct wm_softc *sc = device_private(self);
6814 uint32_t mdic = 0;
6815 int i;
6816
6817 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6818 MDIC_REGADD(reg) | MDIC_DATA(val));
6819
6820 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6821 mdic = CSR_READ(sc, WMREG_MDIC);
6822 if (mdic & MDIC_READY)
6823 break;
6824 delay(50);
6825 }
6826
6827 if ((mdic & MDIC_READY) == 0)
6828 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6829 device_xname(sc->sc_dev), phy, reg);
6830 else if (mdic & MDIC_E)
6831 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6832 device_xname(sc->sc_dev), phy, reg);
6833 }
6834
6835 /*
6836 * wm_gmii_i80003_readreg: [mii interface function]
6837 *
6838 * Read a PHY register on the kumeran
6839 * This could be handled by the PHY layer if we didn't have to lock the
6840 * ressource ...
6841 */
6842 static int
6843 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6844 {
6845 struct wm_softc *sc = device_private(self);
6846 int sem;
6847 int rv;
6848
6849 if (phy != 1) /* only one PHY on kumeran bus */
6850 return 0;
6851
6852 sem = swfwphysem[sc->sc_funcid];
6853 if (wm_get_swfw_semaphore(sc, sem)) {
6854 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6855 __func__);
6856 return 0;
6857 }
6858
6859 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6860 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6861 reg >> GG82563_PAGE_SHIFT);
6862 } else {
6863 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6864 reg >> GG82563_PAGE_SHIFT);
6865 }
6866 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6867 delay(200);
6868 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6869 delay(200);
6870
6871 wm_put_swfw_semaphore(sc, sem);
6872 return rv;
6873 }
6874
6875 /*
6876 * wm_gmii_i80003_writereg: [mii interface function]
6877 *
6878 * Write a PHY register on the kumeran.
6879 * This could be handled by the PHY layer if we didn't have to lock the
6880 * ressource ...
6881 */
6882 static void
6883 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6884 {
6885 struct wm_softc *sc = device_private(self);
6886 int sem;
6887
6888 if (phy != 1) /* only one PHY on kumeran bus */
6889 return;
6890
6891 sem = swfwphysem[sc->sc_funcid];
6892 if (wm_get_swfw_semaphore(sc, sem)) {
6893 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6894 __func__);
6895 return;
6896 }
6897
6898 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6899 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6900 reg >> GG82563_PAGE_SHIFT);
6901 } else {
6902 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6903 reg >> GG82563_PAGE_SHIFT);
6904 }
6905 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6906 delay(200);
6907 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6908 delay(200);
6909
6910 wm_put_swfw_semaphore(sc, sem);
6911 }
6912
6913 /*
6914 * wm_gmii_bm_readreg: [mii interface function]
6915 *
6916 * Read a PHY register on the kumeran
6917 * This could be handled by the PHY layer if we didn't have to lock the
6918 * ressource ...
6919 */
6920 static int
6921 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6922 {
6923 struct wm_softc *sc = device_private(self);
6924 int sem;
6925 int rv;
6926
6927 sem = swfwphysem[sc->sc_funcid];
6928 if (wm_get_swfw_semaphore(sc, sem)) {
6929 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6930 __func__);
6931 return 0;
6932 }
6933
6934 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6935 if (phy == 1)
6936 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6937 reg);
6938 else
6939 wm_gmii_i82544_writereg(self, phy,
6940 GG82563_PHY_PAGE_SELECT,
6941 reg >> GG82563_PAGE_SHIFT);
6942 }
6943
6944 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6945 wm_put_swfw_semaphore(sc, sem);
6946 return rv;
6947 }
6948
6949 /*
6950 * wm_gmii_bm_writereg: [mii interface function]
6951 *
6952 * Write a PHY register on the kumeran.
6953 * This could be handled by the PHY layer if we didn't have to lock the
6954 * ressource ...
6955 */
6956 static void
6957 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6958 {
6959 struct wm_softc *sc = device_private(self);
6960 int sem;
6961
6962 sem = swfwphysem[sc->sc_funcid];
6963 if (wm_get_swfw_semaphore(sc, sem)) {
6964 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6965 __func__);
6966 return;
6967 }
6968
6969 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6970 if (phy == 1)
6971 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6972 reg);
6973 else
6974 wm_gmii_i82544_writereg(self, phy,
6975 GG82563_PHY_PAGE_SELECT,
6976 reg >> GG82563_PAGE_SHIFT);
6977 }
6978
6979 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6980 wm_put_swfw_semaphore(sc, sem);
6981 }
6982
6983 static void
6984 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6985 {
6986 struct wm_softc *sc = device_private(self);
6987 uint16_t regnum = BM_PHY_REG_NUM(offset);
6988 uint16_t wuce;
6989
6990 /* XXX Gig must be disabled for MDIO accesses to page 800 */
6991 if (sc->sc_type == WM_T_PCH) {
6992 /* XXX e1000 driver do nothing... why? */
6993 }
6994
6995 /* Set page 769 */
6996 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6997 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6998
6999 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
7000
7001 wuce &= ~BM_WUC_HOST_WU_BIT;
7002 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
7003 wuce | BM_WUC_ENABLE_BIT);
7004
7005 /* Select page 800 */
7006 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7007 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
7008
7009 /* Write page 800 */
7010 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
7011
7012 if (rd)
7013 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
7014 else
7015 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
7016
7017 /* Set page 769 */
7018 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7019 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7020
7021 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
7022 }
7023
7024 /*
7025 * wm_gmii_hv_readreg: [mii interface function]
7026 *
7027 * Read a PHY register on the kumeran
7028 * This could be handled by the PHY layer if we didn't have to lock the
7029 * ressource ...
7030 */
7031 static int
7032 wm_gmii_hv_readreg(device_t self, int phy, int reg)
7033 {
7034 struct wm_softc *sc = device_private(self);
7035 uint16_t page = BM_PHY_REG_PAGE(reg);
7036 uint16_t regnum = BM_PHY_REG_NUM(reg);
7037 uint16_t val;
7038 int rv;
7039
7040 if (wm_get_swfwhw_semaphore(sc)) {
7041 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7042 __func__);
7043 return 0;
7044 }
7045
7046 /* XXX Workaround failure in MDIO access while cable is disconnected */
7047 if (sc->sc_phytype == WMPHY_82577) {
7048 /* XXX must write */
7049 }
7050
7051 /* Page 800 works differently than the rest so it has its own func */
7052 if (page == BM_WUC_PAGE) {
7053 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
7054 return val;
7055 }
7056
7057 /*
7058 * Lower than page 768 works differently than the rest so it has its
7059 * own func
7060 */
7061 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7062 printf("gmii_hv_readreg!!!\n");
7063 return 0;
7064 }
7065
7066 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7067 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7068 page << BME1000_PAGE_SHIFT);
7069 }
7070
7071 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
7072 wm_put_swfwhw_semaphore(sc);
7073 return rv;
7074 }
7075
7076 /*
7077 * wm_gmii_hv_writereg: [mii interface function]
7078 *
7079 * Write a PHY register on the kumeran.
7080 * This could be handled by the PHY layer if we didn't have to lock the
7081 * ressource ...
7082 */
7083 static void
7084 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
7085 {
7086 struct wm_softc *sc = device_private(self);
7087 uint16_t page = BM_PHY_REG_PAGE(reg);
7088 uint16_t regnum = BM_PHY_REG_NUM(reg);
7089
7090 if (wm_get_swfwhw_semaphore(sc)) {
7091 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7092 __func__);
7093 return;
7094 }
7095
7096 /* XXX Workaround failure in MDIO access while cable is disconnected */
7097
7098 /* Page 800 works differently than the rest so it has its own func */
7099 if (page == BM_WUC_PAGE) {
7100 uint16_t tmp;
7101
7102 tmp = val;
7103 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
7104 return;
7105 }
7106
7107 /*
7108 * Lower than page 768 works differently than the rest so it has its
7109 * own func
7110 */
7111 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7112 printf("gmii_hv_writereg!!!\n");
7113 return;
7114 }
7115
7116 /*
7117 * XXX Workaround MDIO accesses being disabled after entering IEEE
7118 * Power Down (whenever bit 11 of the PHY control register is set)
7119 */
7120
7121 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7122 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7123 page << BME1000_PAGE_SHIFT);
7124 }
7125
7126 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
7127 wm_put_swfwhw_semaphore(sc);
7128 }
7129
7130 /*
7131 * wm_gmii_82580_readreg: [mii interface function]
7132 *
7133 * Read a PHY register on the 82580 and I350.
7134 * This could be handled by the PHY layer if we didn't have to lock the
7135 * ressource ...
7136 */
7137 static int
7138 wm_gmii_82580_readreg(device_t self, int phy, int reg)
7139 {
7140 struct wm_softc *sc = device_private(self);
7141 int sem;
7142 int rv;
7143
7144 sem = swfwphysem[sc->sc_funcid];
7145 if (wm_get_swfw_semaphore(sc, sem)) {
7146 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7147 __func__);
7148 return 0;
7149 }
7150
7151 rv = wm_gmii_i82544_readreg(self, phy, reg);
7152
7153 wm_put_swfw_semaphore(sc, sem);
7154 return rv;
7155 }
7156
7157 /*
7158 * wm_gmii_82580_writereg: [mii interface function]
7159 *
7160 * Write a PHY register on the 82580 and I350.
7161 * This could be handled by the PHY layer if we didn't have to lock the
7162 * ressource ...
7163 */
7164 static void
7165 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
7166 {
7167 struct wm_softc *sc = device_private(self);
7168 int sem;
7169
7170 sem = swfwphysem[sc->sc_funcid];
7171 if (wm_get_swfw_semaphore(sc, sem)) {
7172 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7173 __func__);
7174 return;
7175 }
7176
7177 wm_gmii_i82544_writereg(self, phy, reg, val);
7178
7179 wm_put_swfw_semaphore(sc, sem);
7180 }
7181
7182 /*
7183 * wm_gmii_statchg: [mii interface function]
7184 *
7185 * Callback from MII layer when media changes.
7186 */
7187 static void
7188 wm_gmii_statchg(struct ifnet *ifp)
7189 {
7190 struct wm_softc *sc = ifp->if_softc;
7191 struct mii_data *mii = &sc->sc_mii;
7192
7193 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7194 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7195 sc->sc_fcrtl &= ~FCRTL_XONE;
7196
7197 /*
7198 * Get flow control negotiation result.
7199 */
7200 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7201 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7202 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7203 mii->mii_media_active &= ~IFM_ETH_FMASK;
7204 }
7205
7206 if (sc->sc_flowflags & IFM_FLOW) {
7207 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7208 sc->sc_ctrl |= CTRL_TFCE;
7209 sc->sc_fcrtl |= FCRTL_XONE;
7210 }
7211 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7212 sc->sc_ctrl |= CTRL_RFCE;
7213 }
7214
7215 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7216 DPRINTF(WM_DEBUG_LINK,
7217 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7218 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7219 } else {
7220 DPRINTF(WM_DEBUG_LINK,
7221 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7222 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7223 }
7224
7225 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7226 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7227 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7228 : WMREG_FCRTL, sc->sc_fcrtl);
7229 if (sc->sc_type == WM_T_80003) {
7230 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7231 case IFM_1000_T:
7232 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7233 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7234 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7235 break;
7236 default:
7237 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7238 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7239 sc->sc_tipg = TIPG_10_100_80003_DFLT;
7240 break;
7241 }
7242 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7243 }
7244 }
7245
7246 /*
7247 * wm_kmrn_readreg:
7248 *
7249 * Read a kumeran register
7250 */
7251 static int
7252 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7253 {
7254 int rv;
7255
7256 if (sc->sc_flags == WM_F_LOCK_SWFW) {
7257 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7258 aprint_error_dev(sc->sc_dev,
7259 "%s: failed to get semaphore\n", __func__);
7260 return 0;
7261 }
7262 } else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
7263 if (wm_get_swfwhw_semaphore(sc)) {
7264 aprint_error_dev(sc->sc_dev,
7265 "%s: failed to get semaphore\n", __func__);
7266 return 0;
7267 }
7268 }
7269
7270 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7271 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7272 KUMCTRLSTA_REN);
7273 CSR_WRITE_FLUSH(sc);
7274 delay(2);
7275
7276 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7277
7278 if (sc->sc_flags == WM_F_LOCK_SWFW)
7279 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7280 else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
7281 wm_put_swfwhw_semaphore(sc);
7282
7283 return rv;
7284 }
7285
7286 /*
7287 * wm_kmrn_writereg:
7288 *
7289 * Write a kumeran register
7290 */
7291 static void
7292 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7293 {
7294
7295 if (sc->sc_flags == WM_F_LOCK_SWFW) {
7296 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7297 aprint_error_dev(sc->sc_dev,
7298 "%s: failed to get semaphore\n", __func__);
7299 return;
7300 }
7301 } else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
7302 if (wm_get_swfwhw_semaphore(sc)) {
7303 aprint_error_dev(sc->sc_dev,
7304 "%s: failed to get semaphore\n", __func__);
7305 return;
7306 }
7307 }
7308
7309 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7310 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7311 (val & KUMCTRLSTA_MASK));
7312
7313 if (sc->sc_flags == WM_F_LOCK_SWFW)
7314 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7315 else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
7316 wm_put_swfwhw_semaphore(sc);
7317 }
7318
7319 /* SGMII related */
7320
7321 /*
7322 * wm_sgmii_uses_mdio
7323 *
7324 * Check whether the transaction is to the internal PHY or the external
7325 * MDIO interface. Return true if it's MDIO.
7326 */
7327 static bool
7328 wm_sgmii_uses_mdio(struct wm_softc *sc)
7329 {
7330 uint32_t reg;
7331 bool ismdio = false;
7332
7333 switch (sc->sc_type) {
7334 case WM_T_82575:
7335 case WM_T_82576:
7336 reg = CSR_READ(sc, WMREG_MDIC);
7337 ismdio = ((reg & MDIC_DEST) != 0);
7338 break;
7339 case WM_T_82580:
7340 case WM_T_I350:
7341 case WM_T_I354:
7342 case WM_T_I210:
7343 case WM_T_I211:
7344 reg = CSR_READ(sc, WMREG_MDICNFG);
7345 ismdio = ((reg & MDICNFG_DEST) != 0);
7346 break;
7347 default:
7348 break;
7349 }
7350
7351 return ismdio;
7352 }
7353
7354 /*
7355 * wm_sgmii_readreg: [mii interface function]
7356 *
7357 * Read a PHY register on the SGMII
7358 * This could be handled by the PHY layer if we didn't have to lock the
7359 * ressource ...
7360 */
7361 static int
7362 wm_sgmii_readreg(device_t self, int phy, int reg)
7363 {
7364 struct wm_softc *sc = device_private(self);
7365 uint32_t i2ccmd;
7366 int i, rv;
7367
7368 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7369 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7370 __func__);
7371 return 0;
7372 }
7373
7374 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7375 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7376 | I2CCMD_OPCODE_READ;
7377 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7378
7379 /* Poll the ready bit */
7380 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7381 delay(50);
7382 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7383 if (i2ccmd & I2CCMD_READY)
7384 break;
7385 }
7386 if ((i2ccmd & I2CCMD_READY) == 0)
7387 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7388 if ((i2ccmd & I2CCMD_ERROR) != 0)
7389 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7390
7391 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7392
7393 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7394 return rv;
7395 }
7396
7397 /*
7398 * wm_sgmii_writereg: [mii interface function]
7399 *
7400 * Write a PHY register on the SGMII.
7401 * This could be handled by the PHY layer if we didn't have to lock the
7402 * ressource ...
7403 */
7404 static void
7405 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7406 {
7407 struct wm_softc *sc = device_private(self);
7408 uint32_t i2ccmd;
7409 int i;
7410
7411 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7412 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7413 __func__);
7414 return;
7415 }
7416
7417 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7418 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7419 | I2CCMD_OPCODE_WRITE;
7420 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7421
7422 /* Poll the ready bit */
7423 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7424 delay(50);
7425 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7426 if (i2ccmd & I2CCMD_READY)
7427 break;
7428 }
7429 if ((i2ccmd & I2CCMD_READY) == 0)
7430 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7431 if ((i2ccmd & I2CCMD_ERROR) != 0)
7432 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7433
7434 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7435 }
7436
7437 /* TBI related */
7438
7439 /* XXX Currently TBI only */
7440 static int
7441 wm_check_for_link(struct wm_softc *sc)
7442 {
7443 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7444 uint32_t rxcw;
7445 uint32_t ctrl;
7446 uint32_t status;
7447 uint32_t sig;
7448
7449 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
7450 sc->sc_tbi_linkup = 1;
7451 return 0;
7452 }
7453
7454 rxcw = CSR_READ(sc, WMREG_RXCW);
7455 ctrl = CSR_READ(sc, WMREG_CTRL);
7456 status = CSR_READ(sc, WMREG_STATUS);
7457
7458 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7459
7460 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7461 device_xname(sc->sc_dev), __func__,
7462 ((ctrl & CTRL_SWDPIN(1)) == sig),
7463 ((status & STATUS_LU) != 0),
7464 ((rxcw & RXCW_C) != 0)
7465 ));
7466
7467 /*
7468 * SWDPIN LU RXCW
7469 * 0 0 0
7470 * 0 0 1 (should not happen)
7471 * 0 1 0 (should not happen)
7472 * 0 1 1 (should not happen)
7473 * 1 0 0 Disable autonego and force linkup
7474 * 1 0 1 got /C/ but not linkup yet
7475 * 1 1 0 (linkup)
7476 * 1 1 1 If IFM_AUTO, back to autonego
7477 *
7478 */
7479 if (((ctrl & CTRL_SWDPIN(1)) == sig)
7480 && ((status & STATUS_LU) == 0)
7481 && ((rxcw & RXCW_C) == 0)) {
7482 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7483 __func__));
7484 sc->sc_tbi_linkup = 0;
7485 /* Disable auto-negotiation in the TXCW register */
7486 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7487
7488 /*
7489 * Force link-up and also force full-duplex.
7490 *
7491 * NOTE: CTRL was updated TFCE and RFCE automatically,
7492 * so we should update sc->sc_ctrl
7493 */
7494 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7495 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7496 } else if (((status & STATUS_LU) != 0)
7497 && ((rxcw & RXCW_C) != 0)
7498 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7499 sc->sc_tbi_linkup = 1;
7500 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7501 __func__));
7502 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7503 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7504 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7505 && ((rxcw & RXCW_C) != 0)) {
7506 DPRINTF(WM_DEBUG_LINK, ("/C/"));
7507 } else {
7508 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7509 status));
7510 }
7511
7512 return 0;
7513 }
7514
7515 /*
7516 * wm_tbi_mediainit:
7517 *
7518 * Initialize media for use on 1000BASE-X devices.
7519 */
7520 static void
7521 wm_tbi_mediainit(struct wm_softc *sc)
7522 {
7523 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7524 const char *sep = "";
7525
7526 if (sc->sc_type < WM_T_82543)
7527 sc->sc_tipg = TIPG_WM_DFLT;
7528 else
7529 sc->sc_tipg = TIPG_LG_DFLT;
7530
7531 sc->sc_tbi_anegticks = 5;
7532
7533 /* Initialize our media structures */
7534 sc->sc_mii.mii_ifp = ifp;
7535
7536 sc->sc_ethercom.ec_mii = &sc->sc_mii;
7537 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
7538 wm_tbi_mediastatus);
7539
7540 /*
7541 * SWD Pins:
7542 *
7543 * 0 = Link LED (output)
7544 * 1 = Loss Of Signal (input)
7545 */
7546 sc->sc_ctrl |= CTRL_SWDPIO(0);
7547 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
7548 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7549 sc->sc_ctrl &= ~CTRL_LRST;
7550
7551 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7552
7553 #define ADD(ss, mm, dd) \
7554 do { \
7555 aprint_normal("%s%s", sep, ss); \
7556 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
7557 sep = ", "; \
7558 } while (/*CONSTCOND*/0)
7559
7560 aprint_normal_dev(sc->sc_dev, "");
7561
7562 /* Only 82545 is LX */
7563 if (sc->sc_type == WM_T_82545) {
7564 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
7565 ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
7566 } else {
7567 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
7568 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
7569 }
7570 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
7571 aprint_normal("\n");
7572
7573 #undef ADD
7574
7575 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
7576 }
7577
7578 /*
7579 * wm_tbi_mediastatus: [ifmedia interface function]
7580 *
7581 * Get the current interface media status on a 1000BASE-X device.
7582 */
7583 static void
7584 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
7585 {
7586 struct wm_softc *sc = ifp->if_softc;
7587 uint32_t ctrl, status;
7588
7589 ifmr->ifm_status = IFM_AVALID;
7590 ifmr->ifm_active = IFM_ETHER;
7591
7592 status = CSR_READ(sc, WMREG_STATUS);
7593 if ((status & STATUS_LU) == 0) {
7594 ifmr->ifm_active |= IFM_NONE;
7595 return;
7596 }
7597
7598 ifmr->ifm_status |= IFM_ACTIVE;
7599 /* Only 82545 is LX */
7600 if (sc->sc_type == WM_T_82545)
7601 ifmr->ifm_active |= IFM_1000_LX;
7602 else
7603 ifmr->ifm_active |= IFM_1000_SX;
7604 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
7605 ifmr->ifm_active |= IFM_FDX;
7606 else
7607 ifmr->ifm_active |= IFM_HDX;
7608 ctrl = CSR_READ(sc, WMREG_CTRL);
7609 if (ctrl & CTRL_RFCE)
7610 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
7611 if (ctrl & CTRL_TFCE)
7612 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
7613 }
7614
7615 /*
7616 * wm_tbi_mediachange: [ifmedia interface function]
7617 *
7618 * Set hardware to newly-selected media on a 1000BASE-X device.
7619 */
7620 static int
7621 wm_tbi_mediachange(struct ifnet *ifp)
7622 {
7623 struct wm_softc *sc = ifp->if_softc;
7624 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7625 uint32_t status;
7626 int i;
7627
7628 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7629 return 0;
7630
7631 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
7632 || (sc->sc_type >= WM_T_82575))
7633 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
7634
7635 /* XXX power_up_serdes_link_82575() */
7636
7637 sc->sc_ctrl &= ~CTRL_LRST;
7638 sc->sc_txcw = TXCW_ANE;
7639 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
7640 sc->sc_txcw |= TXCW_FD | TXCW_HD;
7641 else if (ife->ifm_media & IFM_FDX)
7642 sc->sc_txcw |= TXCW_FD;
7643 else
7644 sc->sc_txcw |= TXCW_HD;
7645
7646 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
7647 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
7648
7649 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
7650 device_xname(sc->sc_dev), sc->sc_txcw));
7651 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7652 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7653 CSR_WRITE_FLUSH(sc);
7654 delay(1000);
7655
7656 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
7657 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
7658
7659 /*
7660 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
7661 * optics detect a signal, 0 if they don't.
7662 */
7663 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
7664 /* Have signal; wait for the link to come up. */
7665 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
7666 delay(10000);
7667 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
7668 break;
7669 }
7670
7671 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
7672 device_xname(sc->sc_dev),i));
7673
7674 status = CSR_READ(sc, WMREG_STATUS);
7675 DPRINTF(WM_DEBUG_LINK,
7676 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
7677 device_xname(sc->sc_dev),status, STATUS_LU));
7678 if (status & STATUS_LU) {
7679 /* Link is up. */
7680 DPRINTF(WM_DEBUG_LINK,
7681 ("%s: LINK: set media -> link up %s\n",
7682 device_xname(sc->sc_dev),
7683 (status & STATUS_FD) ? "FDX" : "HDX"));
7684
7685 /*
7686 * NOTE: CTRL will update TFCE and RFCE automatically,
7687 * so we should update sc->sc_ctrl
7688 */
7689 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7690 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7691 sc->sc_fcrtl &= ~FCRTL_XONE;
7692 if (status & STATUS_FD)
7693 sc->sc_tctl |=
7694 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7695 else
7696 sc->sc_tctl |=
7697 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7698 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
7699 sc->sc_fcrtl |= FCRTL_XONE;
7700 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7701 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7702 WMREG_OLD_FCRTL : WMREG_FCRTL,
7703 sc->sc_fcrtl);
7704 sc->sc_tbi_linkup = 1;
7705 } else {
7706 if (i == WM_LINKUP_TIMEOUT)
7707 wm_check_for_link(sc);
7708 /* Link is down. */
7709 DPRINTF(WM_DEBUG_LINK,
7710 ("%s: LINK: set media -> link down\n",
7711 device_xname(sc->sc_dev)));
7712 sc->sc_tbi_linkup = 0;
7713 }
7714 } else {
7715 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
7716 device_xname(sc->sc_dev)));
7717 sc->sc_tbi_linkup = 0;
7718 }
7719
7720 wm_tbi_set_linkled(sc);
7721
7722 return 0;
7723 }
7724
7725 /*
7726 * wm_tbi_set_linkled:
7727 *
7728 * Update the link LED on 1000BASE-X devices.
7729 */
7730 static void
7731 wm_tbi_set_linkled(struct wm_softc *sc)
7732 {
7733
7734 if (sc->sc_tbi_linkup)
7735 sc->sc_ctrl |= CTRL_SWDPIN(0);
7736 else
7737 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7738
7739 /* 82540 or newer devices are active low */
7740 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7741
7742 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7743 }
7744
7745 /*
7746 * wm_tbi_check_link:
7747 *
7748 * Check the link on 1000BASE-X devices.
7749 */
7750 static void
7751 wm_tbi_check_link(struct wm_softc *sc)
7752 {
7753 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7754 uint32_t status;
7755
7756 KASSERT(WM_TX_LOCKED(sc));
7757
7758 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
7759 sc->sc_tbi_linkup = 1;
7760 return;
7761 }
7762
7763 status = CSR_READ(sc, WMREG_STATUS);
7764
7765 /* XXX is this needed? */
7766 (void)CSR_READ(sc, WMREG_RXCW);
7767 (void)CSR_READ(sc, WMREG_CTRL);
7768
7769 /* set link status */
7770 if ((status & STATUS_LU) == 0) {
7771 DPRINTF(WM_DEBUG_LINK,
7772 ("%s: LINK: checklink -> down\n",
7773 device_xname(sc->sc_dev)));
7774 sc->sc_tbi_linkup = 0;
7775 } else if (sc->sc_tbi_linkup == 0) {
7776 DPRINTF(WM_DEBUG_LINK,
7777 ("%s: LINK: checklink -> up %s\n",
7778 device_xname(sc->sc_dev),
7779 (status & STATUS_FD) ? "FDX" : "HDX"));
7780 sc->sc_tbi_linkup = 1;
7781 }
7782
7783 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
7784 && ((status & STATUS_LU) == 0)) {
7785 sc->sc_tbi_linkup = 0;
7786 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7787 /* If the timer expired, retry autonegotiation */
7788 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
7789 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
7790 sc->sc_tbi_ticks = 0;
7791 /*
7792 * Reset the link, and let autonegotiation do
7793 * its thing
7794 */
7795 sc->sc_ctrl |= CTRL_LRST;
7796 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7797 CSR_WRITE_FLUSH(sc);
7798 delay(1000);
7799 sc->sc_ctrl &= ~CTRL_LRST;
7800 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7801 CSR_WRITE_FLUSH(sc);
7802 delay(1000);
7803 CSR_WRITE(sc, WMREG_TXCW,
7804 sc->sc_txcw & ~TXCW_ANE);
7805 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7806 }
7807 }
7808 }
7809
7810 wm_tbi_set_linkled(sc);
7811 }
7812
7813 /* SFP related */
7814
7815 static int
7816 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
7817 {
7818 uint32_t i2ccmd;
7819 int i;
7820
7821 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
7822 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7823
7824 /* Poll the ready bit */
7825 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7826 delay(50);
7827 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7828 if (i2ccmd & I2CCMD_READY)
7829 break;
7830 }
7831 if ((i2ccmd & I2CCMD_READY) == 0)
7832 return -1;
7833 if ((i2ccmd & I2CCMD_ERROR) != 0)
7834 return -1;
7835
7836 *data = i2ccmd & 0x00ff;
7837
7838 return 0;
7839 }
7840
7841 static uint32_t
7842 wm_sfp_get_media_type(struct wm_softc *sc)
7843 {
7844 uint32_t ctrl_ext;
7845 uint8_t val = 0;
7846 int timeout = 3;
7847 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
7848 int rv = -1;
7849
7850 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7851 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
7852 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
7853 CSR_WRITE_FLUSH(sc);
7854
7855 /* Read SFP module data */
7856 while (timeout) {
7857 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
7858 if (rv == 0)
7859 break;
7860 delay(100*1000); /* XXX too big */
7861 timeout--;
7862 }
7863 if (rv != 0)
7864 goto out;
7865 switch (val) {
7866 case SFF_SFP_ID_SFF:
7867 aprint_normal_dev(sc->sc_dev,
7868 "Module/Connector soldered to board\n");
7869 break;
7870 case SFF_SFP_ID_SFP:
7871 aprint_normal_dev(sc->sc_dev, "SFP\n");
7872 break;
7873 case SFF_SFP_ID_UNKNOWN:
7874 goto out;
7875 default:
7876 break;
7877 }
7878
7879 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
7880 if (rv != 0) {
7881 goto out;
7882 }
7883
7884 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
7885 mediatype = WM_MEDIATYPE_SERDES;
7886 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
7887 sc->sc_flags |= WM_F_SGMII;
7888 mediatype = WM_MEDIATYPE_COPPER;
7889 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
7890 sc->sc_flags |= WM_F_SGMII;
7891 mediatype = WM_MEDIATYPE_SERDES;
7892 }
7893
7894 out:
7895 /* Restore I2C interface setting */
7896 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7897
7898 return mediatype;
7899 }
7900 /*
7901 * NVM related.
7902 * Microwire, SPI (w/wo EERD) and Flash.
7903 */
7904
7905 /* Both spi and uwire */
7906
7907 /*
7908 * wm_eeprom_sendbits:
7909 *
7910 * Send a series of bits to the EEPROM.
7911 */
7912 static void
7913 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
7914 {
7915 uint32_t reg;
7916 int x;
7917
7918 reg = CSR_READ(sc, WMREG_EECD);
7919
7920 for (x = nbits; x > 0; x--) {
7921 if (bits & (1U << (x - 1)))
7922 reg |= EECD_DI;
7923 else
7924 reg &= ~EECD_DI;
7925 CSR_WRITE(sc, WMREG_EECD, reg);
7926 CSR_WRITE_FLUSH(sc);
7927 delay(2);
7928 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7929 CSR_WRITE_FLUSH(sc);
7930 delay(2);
7931 CSR_WRITE(sc, WMREG_EECD, reg);
7932 CSR_WRITE_FLUSH(sc);
7933 delay(2);
7934 }
7935 }
7936
7937 /*
7938 * wm_eeprom_recvbits:
7939 *
7940 * Receive a series of bits from the EEPROM.
7941 */
7942 static void
7943 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
7944 {
7945 uint32_t reg, val;
7946 int x;
7947
7948 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
7949
7950 val = 0;
7951 for (x = nbits; x > 0; x--) {
7952 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7953 CSR_WRITE_FLUSH(sc);
7954 delay(2);
7955 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
7956 val |= (1U << (x - 1));
7957 CSR_WRITE(sc, WMREG_EECD, reg);
7958 CSR_WRITE_FLUSH(sc);
7959 delay(2);
7960 }
7961 *valp = val;
7962 }
7963
7964 /* Microwire */
7965
7966 /*
7967 * wm_nvm_read_uwire:
7968 *
7969 * Read a word from the EEPROM using the MicroWire protocol.
7970 */
7971 static int
7972 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
7973 {
7974 uint32_t reg, val;
7975 int i;
7976
7977 for (i = 0; i < wordcnt; i++) {
7978 /* Clear SK and DI. */
7979 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
7980 CSR_WRITE(sc, WMREG_EECD, reg);
7981
7982 /*
7983 * XXX: workaround for a bug in qemu-0.12.x and prior
7984 * and Xen.
7985 *
7986 * We use this workaround only for 82540 because qemu's
7987 * e1000 act as 82540.
7988 */
7989 if (sc->sc_type == WM_T_82540) {
7990 reg |= EECD_SK;
7991 CSR_WRITE(sc, WMREG_EECD, reg);
7992 reg &= ~EECD_SK;
7993 CSR_WRITE(sc, WMREG_EECD, reg);
7994 CSR_WRITE_FLUSH(sc);
7995 delay(2);
7996 }
7997 /* XXX: end of workaround */
7998
7999 /* Set CHIP SELECT. */
8000 reg |= EECD_CS;
8001 CSR_WRITE(sc, WMREG_EECD, reg);
8002 CSR_WRITE_FLUSH(sc);
8003 delay(2);
8004
8005 /* Shift in the READ command. */
8006 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
8007
8008 /* Shift in address. */
8009 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
8010
8011 /* Shift out the data. */
8012 wm_eeprom_recvbits(sc, &val, 16);
8013 data[i] = val & 0xffff;
8014
8015 /* Clear CHIP SELECT. */
8016 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
8017 CSR_WRITE(sc, WMREG_EECD, reg);
8018 CSR_WRITE_FLUSH(sc);
8019 delay(2);
8020 }
8021
8022 return 0;
8023 }
8024
8025 /* SPI */
8026
8027 /*
8028 * Set SPI and FLASH related information from the EECD register.
8029 * For 82541 and 82547, the word size is taken from EEPROM.
8030 */
8031 static int
8032 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
8033 {
8034 int size;
8035 uint32_t reg;
8036 uint16_t data;
8037
8038 reg = CSR_READ(sc, WMREG_EECD);
8039 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
8040
8041 /* Read the size of NVM from EECD by default */
8042 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
8043 switch (sc->sc_type) {
8044 case WM_T_82541:
8045 case WM_T_82541_2:
8046 case WM_T_82547:
8047 case WM_T_82547_2:
8048 /* Set dummy value to access EEPROM */
8049 sc->sc_nvm_wordsize = 64;
8050 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
8051 reg = data;
8052 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
8053 if (size == 0)
8054 size = 6; /* 64 word size */
8055 else
8056 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
8057 break;
8058 case WM_T_80003:
8059 case WM_T_82571:
8060 case WM_T_82572:
8061 case WM_T_82573: /* SPI case */
8062 case WM_T_82574: /* SPI case */
8063 case WM_T_82583: /* SPI case */
8064 size += NVM_WORD_SIZE_BASE_SHIFT;
8065 if (size > 14)
8066 size = 14;
8067 break;
8068 case WM_T_82575:
8069 case WM_T_82576:
8070 case WM_T_82580:
8071 case WM_T_I350:
8072 case WM_T_I354:
8073 case WM_T_I210:
8074 case WM_T_I211:
8075 size += NVM_WORD_SIZE_BASE_SHIFT;
8076 if (size > 15)
8077 size = 15;
8078 break;
8079 default:
8080 aprint_error_dev(sc->sc_dev,
8081 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
8082 return -1;
8083 break;
8084 }
8085
8086 sc->sc_nvm_wordsize = 1 << size;
8087
8088 return 0;
8089 }
8090
8091 /*
8092 * wm_nvm_ready_spi:
8093 *
8094 * Wait for a SPI EEPROM to be ready for commands.
8095 */
8096 static int
8097 wm_nvm_ready_spi(struct wm_softc *sc)
8098 {
8099 uint32_t val;
8100 int usec;
8101
8102 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
8103 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
8104 wm_eeprom_recvbits(sc, &val, 8);
8105 if ((val & SPI_SR_RDY) == 0)
8106 break;
8107 }
8108 if (usec >= SPI_MAX_RETRIES) {
8109 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
8110 return 1;
8111 }
8112 return 0;
8113 }
8114
8115 /*
8116 * wm_nvm_read_spi:
8117 *
8118 * Read a work from the EEPROM using the SPI protocol.
8119 */
8120 static int
8121 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8122 {
8123 uint32_t reg, val;
8124 int i;
8125 uint8_t opc;
8126
8127 /* Clear SK and CS. */
8128 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
8129 CSR_WRITE(sc, WMREG_EECD, reg);
8130 CSR_WRITE_FLUSH(sc);
8131 delay(2);
8132
8133 if (wm_nvm_ready_spi(sc))
8134 return 1;
8135
8136 /* Toggle CS to flush commands. */
8137 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
8138 CSR_WRITE_FLUSH(sc);
8139 delay(2);
8140 CSR_WRITE(sc, WMREG_EECD, reg);
8141 CSR_WRITE_FLUSH(sc);
8142 delay(2);
8143
8144 opc = SPI_OPC_READ;
8145 if (sc->sc_nvm_addrbits == 8 && word >= 128)
8146 opc |= SPI_OPC_A8;
8147
8148 wm_eeprom_sendbits(sc, opc, 8);
8149 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
8150
8151 for (i = 0; i < wordcnt; i++) {
8152 wm_eeprom_recvbits(sc, &val, 16);
8153 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
8154 }
8155
8156 /* Raise CS and clear SK. */
8157 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
8158 CSR_WRITE(sc, WMREG_EECD, reg);
8159 CSR_WRITE_FLUSH(sc);
8160 delay(2);
8161
8162 return 0;
8163 }
8164
8165 /* Using with EERD */
8166
8167 static int
8168 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
8169 {
8170 uint32_t attempts = 100000;
8171 uint32_t i, reg = 0;
8172 int32_t done = -1;
8173
8174 for (i = 0; i < attempts; i++) {
8175 reg = CSR_READ(sc, rw);
8176
8177 if (reg & EERD_DONE) {
8178 done = 0;
8179 break;
8180 }
8181 delay(5);
8182 }
8183
8184 return done;
8185 }
8186
8187 static int
8188 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
8189 uint16_t *data)
8190 {
8191 int i, eerd = 0;
8192 int error = 0;
8193
8194 for (i = 0; i < wordcnt; i++) {
8195 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
8196
8197 CSR_WRITE(sc, WMREG_EERD, eerd);
8198 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
8199 if (error != 0)
8200 break;
8201
8202 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
8203 }
8204
8205 return error;
8206 }
8207
8208 /* Flash */
8209
8210 static int
8211 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
8212 {
8213 uint32_t eecd;
8214 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
8215 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
8216 uint8_t sig_byte = 0;
8217
8218 switch (sc->sc_type) {
8219 case WM_T_ICH8:
8220 case WM_T_ICH9:
8221 eecd = CSR_READ(sc, WMREG_EECD);
8222 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
8223 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
8224 return 0;
8225 }
8226 /* FALLTHROUGH */
8227 default:
8228 /* Default to 0 */
8229 *bank = 0;
8230
8231 /* Check bank 0 */
8232 wm_read_ich8_byte(sc, act_offset, &sig_byte);
8233 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
8234 *bank = 0;
8235 return 0;
8236 }
8237
8238 /* Check bank 1 */
8239 wm_read_ich8_byte(sc, act_offset + bank1_offset,
8240 &sig_byte);
8241 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
8242 *bank = 1;
8243 return 0;
8244 }
8245 }
8246
8247 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
8248 device_xname(sc->sc_dev)));
8249 return -1;
8250 }
8251
8252 /******************************************************************************
8253 * This function does initial flash setup so that a new read/write/erase cycle
8254 * can be started.
8255 *
8256 * sc - The pointer to the hw structure
8257 ****************************************************************************/
8258 static int32_t
8259 wm_ich8_cycle_init(struct wm_softc *sc)
8260 {
8261 uint16_t hsfsts;
8262 int32_t error = 1;
8263 int32_t i = 0;
8264
8265 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8266
8267 /* May be check the Flash Des Valid bit in Hw status */
8268 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
8269 return error;
8270 }
8271
8272 /* Clear FCERR in Hw status by writing 1 */
8273 /* Clear DAEL in Hw status by writing a 1 */
8274 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
8275
8276 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8277
8278 /*
8279 * Either we should have a hardware SPI cycle in progress bit to check
8280 * against, in order to start a new cycle or FDONE bit should be
8281 * changed in the hardware so that it is 1 after harware reset, which
8282 * can then be used as an indication whether a cycle is in progress or
8283 * has been completed .. we should also have some software semaphore
8284 * mechanism to guard FDONE or the cycle in progress bit so that two
8285 * threads access to those bits can be sequentiallized or a way so that
8286 * 2 threads dont start the cycle at the same time
8287 */
8288
8289 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8290 /*
8291 * There is no cycle running at present, so we can start a
8292 * cycle
8293 */
8294
8295 /* Begin by setting Flash Cycle Done. */
8296 hsfsts |= HSFSTS_DONE;
8297 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8298 error = 0;
8299 } else {
8300 /*
8301 * otherwise poll for sometime so the current cycle has a
8302 * chance to end before giving up.
8303 */
8304 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
8305 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8306 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8307 error = 0;
8308 break;
8309 }
8310 delay(1);
8311 }
8312 if (error == 0) {
8313 /*
8314 * Successful in waiting for previous cycle to timeout,
8315 * now set the Flash Cycle Done.
8316 */
8317 hsfsts |= HSFSTS_DONE;
8318 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8319 }
8320 }
8321 return error;
8322 }
8323
8324 /******************************************************************************
8325 * This function starts a flash cycle and waits for its completion
8326 *
8327 * sc - The pointer to the hw structure
8328 ****************************************************************************/
8329 static int32_t
8330 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
8331 {
8332 uint16_t hsflctl;
8333 uint16_t hsfsts;
8334 int32_t error = 1;
8335 uint32_t i = 0;
8336
8337 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
8338 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8339 hsflctl |= HSFCTL_GO;
8340 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8341
8342 /* Wait till FDONE bit is set to 1 */
8343 do {
8344 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8345 if (hsfsts & HSFSTS_DONE)
8346 break;
8347 delay(1);
8348 i++;
8349 } while (i < timeout);
8350 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
8351 error = 0;
8352
8353 return error;
8354 }
8355
8356 /******************************************************************************
8357 * Reads a byte or word from the NVM using the ICH8 flash access registers.
8358 *
8359 * sc - The pointer to the hw structure
8360 * index - The index of the byte or word to read.
8361 * size - Size of data to read, 1=byte 2=word
8362 * data - Pointer to the word to store the value read.
8363 *****************************************************************************/
8364 static int32_t
8365 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
8366 uint32_t size, uint16_t *data)
8367 {
8368 uint16_t hsfsts;
8369 uint16_t hsflctl;
8370 uint32_t flash_linear_address;
8371 uint32_t flash_data = 0;
8372 int32_t error = 1;
8373 int32_t count = 0;
8374
8375 if (size < 1 || size > 2 || data == 0x0 ||
8376 index > ICH_FLASH_LINEAR_ADDR_MASK)
8377 return error;
8378
8379 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
8380 sc->sc_ich8_flash_base;
8381
8382 do {
8383 delay(1);
8384 /* Steps */
8385 error = wm_ich8_cycle_init(sc);
8386 if (error)
8387 break;
8388
8389 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8390 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
8391 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
8392 & HSFCTL_BCOUNT_MASK;
8393 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
8394 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8395
8396 /*
8397 * Write the last 24 bits of index into Flash Linear address
8398 * field in Flash Address
8399 */
8400 /* TODO: TBD maybe check the index against the size of flash */
8401
8402 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
8403
8404 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
8405
8406 /*
8407 * Check if FCERR is set to 1, if set to 1, clear it and try
8408 * the whole sequence a few more times, else read in (shift in)
8409 * the Flash Data0, the order is least significant byte first
8410 * msb to lsb
8411 */
8412 if (error == 0) {
8413 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
8414 if (size == 1)
8415 *data = (uint8_t)(flash_data & 0x000000FF);
8416 else if (size == 2)
8417 *data = (uint16_t)(flash_data & 0x0000FFFF);
8418 break;
8419 } else {
8420 /*
8421 * If we've gotten here, then things are probably
8422 * completely hosed, but if the error condition is
8423 * detected, it won't hurt to give it another try...
8424 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
8425 */
8426 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8427 if (hsfsts & HSFSTS_ERR) {
8428 /* Repeat for some time before giving up. */
8429 continue;
8430 } else if ((hsfsts & HSFSTS_DONE) == 0)
8431 break;
8432 }
8433 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
8434
8435 return error;
8436 }
8437
8438 /******************************************************************************
8439 * Reads a single byte from the NVM using the ICH8 flash access registers.
8440 *
8441 * sc - pointer to wm_hw structure
8442 * index - The index of the byte to read.
8443 * data - Pointer to a byte to store the value read.
8444 *****************************************************************************/
8445 static int32_t
8446 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
8447 {
8448 int32_t status;
8449 uint16_t word = 0;
8450
8451 status = wm_read_ich8_data(sc, index, 1, &word);
8452 if (status == 0)
8453 *data = (uint8_t)word;
8454 else
8455 *data = 0;
8456
8457 return status;
8458 }
8459
8460 /******************************************************************************
8461 * Reads a word from the NVM using the ICH8 flash access registers.
8462 *
8463 * sc - pointer to wm_hw structure
8464 * index - The starting byte index of the word to read.
8465 * data - Pointer to a word to store the value read.
8466 *****************************************************************************/
8467 static int32_t
8468 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
8469 {
8470 int32_t status;
8471
8472 status = wm_read_ich8_data(sc, index, 2, data);
8473 return status;
8474 }
8475
8476 /******************************************************************************
8477 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
8478 * register.
8479 *
8480 * sc - Struct containing variables accessed by shared code
8481 * offset - offset of word in the EEPROM to read
8482 * data - word read from the EEPROM
8483 * words - number of words to read
8484 *****************************************************************************/
8485 static int
8486 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
8487 {
8488 int32_t error = 0;
8489 uint32_t flash_bank = 0;
8490 uint32_t act_offset = 0;
8491 uint32_t bank_offset = 0;
8492 uint16_t word = 0;
8493 uint16_t i = 0;
8494
8495 /*
8496 * We need to know which is the valid flash bank. In the event
8497 * that we didn't allocate eeprom_shadow_ram, we may not be
8498 * managing flash_bank. So it cannot be trusted and needs
8499 * to be updated with each read.
8500 */
8501 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
8502 if (error) {
8503 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
8504 device_xname(sc->sc_dev)));
8505 flash_bank = 0;
8506 }
8507
8508 /*
8509 * Adjust offset appropriately if we're on bank 1 - adjust for word
8510 * size
8511 */
8512 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
8513
8514 error = wm_get_swfwhw_semaphore(sc);
8515 if (error) {
8516 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8517 __func__);
8518 return error;
8519 }
8520
8521 for (i = 0; i < words; i++) {
8522 /* The NVM part needs a byte offset, hence * 2 */
8523 act_offset = bank_offset + ((offset + i) * 2);
8524 error = wm_read_ich8_word(sc, act_offset, &word);
8525 if (error) {
8526 aprint_error_dev(sc->sc_dev,
8527 "%s: failed to read NVM\n", __func__);
8528 break;
8529 }
8530 data[i] = word;
8531 }
8532
8533 wm_put_swfwhw_semaphore(sc);
8534 return error;
8535 }
8536
8537 /* Lock, detecting NVM type, validate checksum and read */
8538
8539 /*
8540 * wm_nvm_acquire:
8541 *
8542 * Perform the EEPROM handshake required on some chips.
8543 */
8544 static int
8545 wm_nvm_acquire(struct wm_softc *sc)
8546 {
8547 uint32_t reg;
8548 int x;
8549 int ret = 0;
8550
8551 /* always success */
8552 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8553 return 0;
8554
8555 if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8556 ret = wm_get_swfwhw_semaphore(sc);
8557 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
8558 /* This will also do wm_get_swsm_semaphore() if needed */
8559 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
8560 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
8561 ret = wm_get_swsm_semaphore(sc);
8562 }
8563
8564 if (ret) {
8565 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8566 __func__);
8567 return 1;
8568 }
8569
8570 if (sc->sc_flags & WM_F_LOCK_EECD) {
8571 reg = CSR_READ(sc, WMREG_EECD);
8572
8573 /* Request EEPROM access. */
8574 reg |= EECD_EE_REQ;
8575 CSR_WRITE(sc, WMREG_EECD, reg);
8576
8577 /* ..and wait for it to be granted. */
8578 for (x = 0; x < 1000; x++) {
8579 reg = CSR_READ(sc, WMREG_EECD);
8580 if (reg & EECD_EE_GNT)
8581 break;
8582 delay(5);
8583 }
8584 if ((reg & EECD_EE_GNT) == 0) {
8585 aprint_error_dev(sc->sc_dev,
8586 "could not acquire EEPROM GNT\n");
8587 reg &= ~EECD_EE_REQ;
8588 CSR_WRITE(sc, WMREG_EECD, reg);
8589 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8590 wm_put_swfwhw_semaphore(sc);
8591 if (sc->sc_flags & WM_F_LOCK_SWFW)
8592 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8593 else if (sc->sc_flags & WM_F_LOCK_SWSM)
8594 wm_put_swsm_semaphore(sc);
8595 return 1;
8596 }
8597 }
8598
8599 return 0;
8600 }
8601
8602 /*
8603 * wm_nvm_release:
8604 *
8605 * Release the EEPROM mutex.
8606 */
8607 static void
8608 wm_nvm_release(struct wm_softc *sc)
8609 {
8610 uint32_t reg;
8611
8612 /* always success */
8613 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8614 return;
8615
8616 if (sc->sc_flags & WM_F_LOCK_EECD) {
8617 reg = CSR_READ(sc, WMREG_EECD);
8618 reg &= ~EECD_EE_REQ;
8619 CSR_WRITE(sc, WMREG_EECD, reg);
8620 }
8621
8622 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8623 wm_put_swfwhw_semaphore(sc);
8624 if (sc->sc_flags & WM_F_LOCK_SWFW)
8625 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8626 else if (sc->sc_flags & WM_F_LOCK_SWSM)
8627 wm_put_swsm_semaphore(sc);
8628 }
8629
8630 static int
8631 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
8632 {
8633 uint32_t eecd = 0;
8634
8635 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
8636 || sc->sc_type == WM_T_82583) {
8637 eecd = CSR_READ(sc, WMREG_EECD);
8638
8639 /* Isolate bits 15 & 16 */
8640 eecd = ((eecd >> 15) & 0x03);
8641
8642 /* If both bits are set, device is Flash type */
8643 if (eecd == 0x03)
8644 return 0;
8645 }
8646 return 1;
8647 }
8648
8649 /*
8650 * wm_nvm_validate_checksum
8651 *
8652 * The checksum is defined as the sum of the first 64 (16 bit) words.
8653 */
8654 static int
8655 wm_nvm_validate_checksum(struct wm_softc *sc)
8656 {
8657 uint16_t checksum;
8658 uint16_t eeprom_data;
8659 #ifdef WM_DEBUG
8660 uint16_t csum_wordaddr, valid_checksum;
8661 #endif
8662 int i;
8663
8664 checksum = 0;
8665
8666 /* Don't check for I211 */
8667 if (sc->sc_type == WM_T_I211)
8668 return 0;
8669
8670 #ifdef WM_DEBUG
8671 if (sc->sc_type == WM_T_PCH_LPT) {
8672 csum_wordaddr = NVM_OFF_COMPAT;
8673 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
8674 } else {
8675 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
8676 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
8677 }
8678
8679 /* Dump EEPROM image for debug */
8680 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8681 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8682 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
8683 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
8684 if ((eeprom_data & valid_checksum) == 0) {
8685 DPRINTF(WM_DEBUG_NVM,
8686 ("%s: NVM need to be updated (%04x != %04x)\n",
8687 device_xname(sc->sc_dev), eeprom_data,
8688 valid_checksum));
8689 }
8690 }
8691
8692 if ((wm_debug & WM_DEBUG_NVM) != 0) {
8693 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
8694 for (i = 0; i < NVM_SIZE; i++) {
8695 if (wm_nvm_read(sc, i, 1, &eeprom_data))
8696 printf("XXXX ");
8697 else
8698 printf("%04hx ", eeprom_data);
8699 if (i % 8 == 7)
8700 printf("\n");
8701 }
8702 }
8703
8704 #endif /* WM_DEBUG */
8705
8706 for (i = 0; i < NVM_SIZE; i++) {
8707 if (wm_nvm_read(sc, i, 1, &eeprom_data))
8708 return 1;
8709 checksum += eeprom_data;
8710 }
8711
8712 if (checksum != (uint16_t) NVM_CHECKSUM) {
8713 #ifdef WM_DEBUG
8714 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
8715 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
8716 #endif
8717 }
8718
8719 return 0;
8720 }
8721
8722 /*
8723 * wm_nvm_read:
8724 *
8725 * Read data from the serial EEPROM.
8726 */
8727 static int
8728 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8729 {
8730 int rv;
8731
8732 if (sc->sc_flags & WM_F_EEPROM_INVALID)
8733 return 1;
8734
8735 if (wm_nvm_acquire(sc))
8736 return 1;
8737
8738 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8739 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8740 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
8741 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
8742 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
8743 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
8744 else if (sc->sc_flags & WM_F_EEPROM_SPI)
8745 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
8746 else
8747 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
8748
8749 wm_nvm_release(sc);
8750 return rv;
8751 }
8752
8753 /*
8754 * Hardware semaphores.
8755 * Very complexed...
8756 */
8757
8758 static int
8759 wm_get_swsm_semaphore(struct wm_softc *sc)
8760 {
8761 int32_t timeout;
8762 uint32_t swsm;
8763
8764 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8765 /* Get the SW semaphore. */
8766 timeout = sc->sc_nvm_wordsize + 1;
8767 while (timeout) {
8768 swsm = CSR_READ(sc, WMREG_SWSM);
8769
8770 if ((swsm & SWSM_SMBI) == 0)
8771 break;
8772
8773 delay(50);
8774 timeout--;
8775 }
8776
8777 if (timeout == 0) {
8778 aprint_error_dev(sc->sc_dev,
8779 "could not acquire SWSM SMBI\n");
8780 return 1;
8781 }
8782 }
8783
8784 /* Get the FW semaphore. */
8785 timeout = sc->sc_nvm_wordsize + 1;
8786 while (timeout) {
8787 swsm = CSR_READ(sc, WMREG_SWSM);
8788 swsm |= SWSM_SWESMBI;
8789 CSR_WRITE(sc, WMREG_SWSM, swsm);
8790 /* If we managed to set the bit we got the semaphore. */
8791 swsm = CSR_READ(sc, WMREG_SWSM);
8792 if (swsm & SWSM_SWESMBI)
8793 break;
8794
8795 delay(50);
8796 timeout--;
8797 }
8798
8799 if (timeout == 0) {
8800 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
8801 /* Release semaphores */
8802 wm_put_swsm_semaphore(sc);
8803 return 1;
8804 }
8805 return 0;
8806 }
8807
8808 static void
8809 wm_put_swsm_semaphore(struct wm_softc *sc)
8810 {
8811 uint32_t swsm;
8812
8813 swsm = CSR_READ(sc, WMREG_SWSM);
8814 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
8815 CSR_WRITE(sc, WMREG_SWSM, swsm);
8816 }
8817
8818 static int
8819 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8820 {
8821 uint32_t swfw_sync;
8822 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
8823 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
8824 int timeout = 200;
8825
8826 for (timeout = 0; timeout < 200; timeout++) {
8827 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8828 if (wm_get_swsm_semaphore(sc)) {
8829 aprint_error_dev(sc->sc_dev,
8830 "%s: failed to get semaphore\n",
8831 __func__);
8832 return 1;
8833 }
8834 }
8835 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8836 if ((swfw_sync & (swmask | fwmask)) == 0) {
8837 swfw_sync |= swmask;
8838 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8839 if (sc->sc_flags & WM_F_LOCK_SWSM)
8840 wm_put_swsm_semaphore(sc);
8841 return 0;
8842 }
8843 if (sc->sc_flags & WM_F_LOCK_SWSM)
8844 wm_put_swsm_semaphore(sc);
8845 delay(5000);
8846 }
8847 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
8848 device_xname(sc->sc_dev), mask, swfw_sync);
8849 return 1;
8850 }
8851
8852 static void
8853 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8854 {
8855 uint32_t swfw_sync;
8856
8857 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8858 while (wm_get_swsm_semaphore(sc) != 0)
8859 continue;
8860 }
8861 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8862 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
8863 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8864 if (sc->sc_flags & WM_F_LOCK_SWSM)
8865 wm_put_swsm_semaphore(sc);
8866 }
8867
8868 static int
8869 wm_get_swfwhw_semaphore(struct wm_softc *sc)
8870 {
8871 uint32_t ext_ctrl;
8872 int timeout = 200;
8873
8874 for (timeout = 0; timeout < 200; timeout++) {
8875 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8876 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
8877 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8878
8879 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8880 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
8881 return 0;
8882 delay(5000);
8883 }
8884 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
8885 device_xname(sc->sc_dev), ext_ctrl);
8886 return 1;
8887 }
8888
8889 static void
8890 wm_put_swfwhw_semaphore(struct wm_softc *sc)
8891 {
8892 uint32_t ext_ctrl;
8893 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8894 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
8895 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8896 }
8897
8898 static int
8899 wm_get_hw_semaphore_82573(struct wm_softc *sc)
8900 {
8901 int i = 0;
8902 uint32_t reg;
8903
8904 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8905 do {
8906 CSR_WRITE(sc, WMREG_EXTCNFCTR,
8907 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
8908 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8909 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
8910 break;
8911 delay(2*1000);
8912 i++;
8913 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
8914
8915 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
8916 wm_put_hw_semaphore_82573(sc);
8917 log(LOG_ERR, "%s: Driver can't access the PHY\n",
8918 device_xname(sc->sc_dev));
8919 return -1;
8920 }
8921
8922 return 0;
8923 }
8924
8925 static void
8926 wm_put_hw_semaphore_82573(struct wm_softc *sc)
8927 {
8928 uint32_t reg;
8929
8930 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8931 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
8932 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
8933 }
8934
8935 /*
8936 * Management mode and power management related subroutines.
8937 * BMC, AMT, suspend/resume and EEE.
8938 */
8939
8940 static int
8941 wm_check_mng_mode(struct wm_softc *sc)
8942 {
8943 int rv;
8944
8945 switch (sc->sc_type) {
8946 case WM_T_ICH8:
8947 case WM_T_ICH9:
8948 case WM_T_ICH10:
8949 case WM_T_PCH:
8950 case WM_T_PCH2:
8951 case WM_T_PCH_LPT:
8952 rv = wm_check_mng_mode_ich8lan(sc);
8953 break;
8954 case WM_T_82574:
8955 case WM_T_82583:
8956 rv = wm_check_mng_mode_82574(sc);
8957 break;
8958 case WM_T_82571:
8959 case WM_T_82572:
8960 case WM_T_82573:
8961 case WM_T_80003:
8962 rv = wm_check_mng_mode_generic(sc);
8963 break;
8964 default:
8965 /* noting to do */
8966 rv = 0;
8967 break;
8968 }
8969
8970 return rv;
8971 }
8972
8973 static int
8974 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
8975 {
8976 uint32_t fwsm;
8977
8978 fwsm = CSR_READ(sc, WMREG_FWSM);
8979
8980 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
8981 return 1;
8982
8983 return 0;
8984 }
8985
8986 static int
8987 wm_check_mng_mode_82574(struct wm_softc *sc)
8988 {
8989 uint16_t data;
8990
8991 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
8992
8993 if ((data & NVM_CFG2_MNGM_MASK) != 0)
8994 return 1;
8995
8996 return 0;
8997 }
8998
8999 static int
9000 wm_check_mng_mode_generic(struct wm_softc *sc)
9001 {
9002 uint32_t fwsm;
9003
9004 fwsm = CSR_READ(sc, WMREG_FWSM);
9005
9006 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
9007 return 1;
9008
9009 return 0;
9010 }
9011
9012 static int
9013 wm_enable_mng_pass_thru(struct wm_softc *sc)
9014 {
9015 uint32_t manc, fwsm, factps;
9016
9017 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
9018 return 0;
9019
9020 manc = CSR_READ(sc, WMREG_MANC);
9021
9022 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
9023 device_xname(sc->sc_dev), manc));
9024 if ((manc & MANC_RECV_TCO_EN) == 0)
9025 return 0;
9026
9027 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
9028 fwsm = CSR_READ(sc, WMREG_FWSM);
9029 factps = CSR_READ(sc, WMREG_FACTPS);
9030 if (((factps & FACTPS_MNGCG) == 0)
9031 && ((fwsm & FWSM_MODE_MASK)
9032 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
9033 return 1;
9034 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
9035 uint16_t data;
9036
9037 factps = CSR_READ(sc, WMREG_FACTPS);
9038 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
9039 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
9040 device_xname(sc->sc_dev), factps, data));
9041 if (((factps & FACTPS_MNGCG) == 0)
9042 && ((data & NVM_CFG2_MNGM_MASK)
9043 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
9044 return 1;
9045 } else if (((manc & MANC_SMBUS_EN) != 0)
9046 && ((manc & MANC_ASF_EN) == 0))
9047 return 1;
9048
9049 return 0;
9050 }
9051
9052 static int
9053 wm_check_reset_block(struct wm_softc *sc)
9054 {
9055 uint32_t reg;
9056
9057 switch (sc->sc_type) {
9058 case WM_T_ICH8:
9059 case WM_T_ICH9:
9060 case WM_T_ICH10:
9061 case WM_T_PCH:
9062 case WM_T_PCH2:
9063 case WM_T_PCH_LPT:
9064 reg = CSR_READ(sc, WMREG_FWSM);
9065 if ((reg & FWSM_RSPCIPHY) != 0)
9066 return 0;
9067 else
9068 return -1;
9069 break;
9070 case WM_T_82571:
9071 case WM_T_82572:
9072 case WM_T_82573:
9073 case WM_T_82574:
9074 case WM_T_82583:
9075 case WM_T_80003:
9076 reg = CSR_READ(sc, WMREG_MANC);
9077 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
9078 return -1;
9079 else
9080 return 0;
9081 break;
9082 default:
9083 /* no problem */
9084 break;
9085 }
9086
9087 return 0;
9088 }
9089
9090 static void
9091 wm_get_hw_control(struct wm_softc *sc)
9092 {
9093 uint32_t reg;
9094
9095 switch (sc->sc_type) {
9096 case WM_T_82573:
9097 reg = CSR_READ(sc, WMREG_SWSM);
9098 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
9099 break;
9100 case WM_T_82571:
9101 case WM_T_82572:
9102 case WM_T_82574:
9103 case WM_T_82583:
9104 case WM_T_80003:
9105 case WM_T_ICH8:
9106 case WM_T_ICH9:
9107 case WM_T_ICH10:
9108 case WM_T_PCH:
9109 case WM_T_PCH2:
9110 case WM_T_PCH_LPT:
9111 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9112 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
9113 break;
9114 default:
9115 break;
9116 }
9117 }
9118
9119 static void
9120 wm_release_hw_control(struct wm_softc *sc)
9121 {
9122 uint32_t reg;
9123
9124 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
9125 return;
9126
9127 if (sc->sc_type == WM_T_82573) {
9128 reg = CSR_READ(sc, WMREG_SWSM);
9129 reg &= ~SWSM_DRV_LOAD;
9130 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
9131 } else {
9132 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9133 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
9134 }
9135 }
9136
9137 static void
9138 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
9139 {
9140 uint32_t reg;
9141
9142 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
9143
9144 if (on != 0)
9145 reg |= EXTCNFCTR_GATE_PHY_CFG;
9146 else
9147 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
9148
9149 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
9150 }
9151
9152 static void
9153 wm_smbustopci(struct wm_softc *sc)
9154 {
9155 uint32_t fwsm;
9156
9157 fwsm = CSR_READ(sc, WMREG_FWSM);
9158 if (((fwsm & FWSM_FW_VALID) == 0)
9159 && ((wm_check_reset_block(sc) == 0))) {
9160 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
9161 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
9162 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9163 CSR_WRITE_FLUSH(sc);
9164 delay(10);
9165 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
9166 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9167 CSR_WRITE_FLUSH(sc);
9168 delay(50*1000);
9169
9170 /*
9171 * Gate automatic PHY configuration by hardware on non-managed
9172 * 82579
9173 */
9174 if (sc->sc_type == WM_T_PCH2)
9175 wm_gate_hw_phy_config_ich8lan(sc, 1);
9176 }
9177 }
9178
9179 static void
9180 wm_init_manageability(struct wm_softc *sc)
9181 {
9182
9183 if (sc->sc_flags & WM_F_HAS_MANAGE) {
9184 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
9185 uint32_t manc = CSR_READ(sc, WMREG_MANC);
9186
9187 /* Disable hardware interception of ARP */
9188 manc &= ~MANC_ARP_EN;
9189
9190 /* Enable receiving management packets to the host */
9191 if (sc->sc_type >= WM_T_82571) {
9192 manc |= MANC_EN_MNG2HOST;
9193 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
9194 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
9195
9196 }
9197
9198 CSR_WRITE(sc, WMREG_MANC, manc);
9199 }
9200 }
9201
9202 static void
9203 wm_release_manageability(struct wm_softc *sc)
9204 {
9205
9206 if (sc->sc_flags & WM_F_HAS_MANAGE) {
9207 uint32_t manc = CSR_READ(sc, WMREG_MANC);
9208
9209 manc |= MANC_ARP_EN;
9210 if (sc->sc_type >= WM_T_82571)
9211 manc &= ~MANC_EN_MNG2HOST;
9212
9213 CSR_WRITE(sc, WMREG_MANC, manc);
9214 }
9215 }
9216
9217 static void
9218 wm_get_wakeup(struct wm_softc *sc)
9219 {
9220
9221 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
9222 switch (sc->sc_type) {
9223 case WM_T_82573:
9224 case WM_T_82583:
9225 sc->sc_flags |= WM_F_HAS_AMT;
9226 /* FALLTHROUGH */
9227 case WM_T_80003:
9228 case WM_T_82541:
9229 case WM_T_82547:
9230 case WM_T_82571:
9231 case WM_T_82572:
9232 case WM_T_82574:
9233 case WM_T_82575:
9234 case WM_T_82576:
9235 case WM_T_82580:
9236 case WM_T_I350:
9237 case WM_T_I354:
9238 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
9239 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
9240 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
9241 break;
9242 case WM_T_ICH8:
9243 case WM_T_ICH9:
9244 case WM_T_ICH10:
9245 case WM_T_PCH:
9246 case WM_T_PCH2:
9247 case WM_T_PCH_LPT:
9248 sc->sc_flags |= WM_F_HAS_AMT;
9249 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
9250 break;
9251 default:
9252 break;
9253 }
9254
9255 /* 1: HAS_MANAGE */
9256 if (wm_enable_mng_pass_thru(sc) != 0)
9257 sc->sc_flags |= WM_F_HAS_MANAGE;
9258
9259 #ifdef WM_DEBUG
9260 printf("\n");
9261 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
9262 printf("HAS_AMT,");
9263 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
9264 printf("ARC_SUBSYS_VALID,");
9265 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
9266 printf("ASF_FIRMWARE_PRES,");
9267 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
9268 printf("HAS_MANAGE,");
9269 printf("\n");
9270 #endif
9271 /*
9272 * Note that the WOL flags is set after the resetting of the eeprom
9273 * stuff
9274 */
9275 }
9276
9277 #ifdef WM_WOL
9278 /* WOL in the newer chipset interfaces (pchlan) */
9279 static void
9280 wm_enable_phy_wakeup(struct wm_softc *sc)
9281 {
9282 #if 0
9283 uint16_t preg;
9284
9285 /* Copy MAC RARs to PHY RARs */
9286
9287 /* Copy MAC MTA to PHY MTA */
9288
9289 /* Configure PHY Rx Control register */
9290
9291 /* Enable PHY wakeup in MAC register */
9292
9293 /* Configure and enable PHY wakeup in PHY registers */
9294
9295 /* Activate PHY wakeup */
9296
9297 /* XXX */
9298 #endif
9299 }
9300
9301 /* Power down workaround on D3 */
9302 static void
9303 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
9304 {
9305 uint32_t reg;
9306 int i;
9307
9308 for (i = 0; i < 2; i++) {
9309 /* Disable link */
9310 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9311 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9312 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9313
9314 /*
9315 * Call gig speed drop workaround on Gig disable before
9316 * accessing any PHY registers
9317 */
9318 if (sc->sc_type == WM_T_ICH8)
9319 wm_gig_downshift_workaround_ich8lan(sc);
9320
9321 /* Write VR power-down enable */
9322 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
9323 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
9324 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
9325 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
9326
9327 /* Read it back and test */
9328 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
9329 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
9330 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
9331 break;
9332
9333 /* Issue PHY reset and repeat at most one more time */
9334 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
9335 }
9336 }
9337
9338 static void
9339 wm_enable_wakeup(struct wm_softc *sc)
9340 {
9341 uint32_t reg, pmreg;
9342 pcireg_t pmode;
9343
9344 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
9345 &pmreg, NULL) == 0)
9346 return;
9347
9348 /* Advertise the wakeup capability */
9349 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
9350 | CTRL_SWDPIN(3));
9351 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
9352
9353 /* ICH workaround */
9354 switch (sc->sc_type) {
9355 case WM_T_ICH8:
9356 case WM_T_ICH9:
9357 case WM_T_ICH10:
9358 case WM_T_PCH:
9359 case WM_T_PCH2:
9360 case WM_T_PCH_LPT:
9361 /* Disable gig during WOL */
9362 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9363 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
9364 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9365 if (sc->sc_type == WM_T_PCH)
9366 wm_gmii_reset(sc);
9367
9368 /* Power down workaround */
9369 if (sc->sc_phytype == WMPHY_82577) {
9370 struct mii_softc *child;
9371
9372 /* Assume that the PHY is copper */
9373 child = LIST_FIRST(&sc->sc_mii.mii_phys);
9374 if (child->mii_mpd_rev <= 2)
9375 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
9376 (768 << 5) | 25, 0x0444); /* magic num */
9377 }
9378 break;
9379 default:
9380 break;
9381 }
9382
9383 /* Keep the laser running on fiber adapters */
9384 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
9385 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
9386 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9387 reg |= CTRL_EXT_SWDPIN(3);
9388 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9389 }
9390
9391 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
9392 #if 0 /* for the multicast packet */
9393 reg |= WUFC_MC;
9394 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
9395 #endif
9396
9397 if (sc->sc_type == WM_T_PCH) {
9398 wm_enable_phy_wakeup(sc);
9399 } else {
9400 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
9401 CSR_WRITE(sc, WMREG_WUFC, reg);
9402 }
9403
9404 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9405 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9406 || (sc->sc_type == WM_T_PCH2))
9407 && (sc->sc_phytype == WMPHY_IGP_3))
9408 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
9409
9410 /* Request PME */
9411 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
9412 #if 0
9413 /* Disable WOL */
9414 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
9415 #else
9416 /* For WOL */
9417 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
9418 #endif
9419 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
9420 }
9421 #endif /* WM_WOL */
9422
9423 /* EEE */
9424
9425 static void
9426 wm_set_eee_i350(struct wm_softc *sc)
9427 {
9428 uint32_t ipcnfg, eeer;
9429
9430 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
9431 eeer = CSR_READ(sc, WMREG_EEER);
9432
9433 if ((sc->sc_flags & WM_F_EEE) != 0) {
9434 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9435 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
9436 | EEER_LPI_FC);
9437 } else {
9438 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9439 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
9440 | EEER_LPI_FC);
9441 }
9442
9443 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
9444 CSR_WRITE(sc, WMREG_EEER, eeer);
9445 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
9446 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
9447 }
9448
9449 /*
9450 * Workarounds (mainly PHY related).
9451 * Basically, PHY's workarounds are in the PHY drivers.
9452 */
9453
9454 /* Work-around for 82566 Kumeran PCS lock loss */
9455 static void
9456 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
9457 {
9458 int miistatus, active, i;
9459 int reg;
9460
9461 miistatus = sc->sc_mii.mii_media_status;
9462
9463 /* If the link is not up, do nothing */
9464 if ((miistatus & IFM_ACTIVE) != 0)
9465 return;
9466
9467 active = sc->sc_mii.mii_media_active;
9468
9469 /* Nothing to do if the link is other than 1Gbps */
9470 if (IFM_SUBTYPE(active) != IFM_1000_T)
9471 return;
9472
9473 for (i = 0; i < 10; i++) {
9474 /* read twice */
9475 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9476 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9477 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
9478 goto out; /* GOOD! */
9479
9480 /* Reset the PHY */
9481 wm_gmii_reset(sc);
9482 delay(5*1000);
9483 }
9484
9485 /* Disable GigE link negotiation */
9486 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9487 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9488 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9489
9490 /*
9491 * Call gig speed drop workaround on Gig disable before accessing
9492 * any PHY registers.
9493 */
9494 wm_gig_downshift_workaround_ich8lan(sc);
9495
9496 out:
9497 return;
9498 }
9499
9500 /* WOL from S5 stops working */
9501 static void
9502 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
9503 {
9504 uint16_t kmrn_reg;
9505
9506 /* Only for igp3 */
9507 if (sc->sc_phytype == WMPHY_IGP_3) {
9508 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
9509 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
9510 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9511 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
9512 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9513 }
9514 }
9515
9516 /*
9517 * Workaround for pch's PHYs
9518 * XXX should be moved to new PHY driver?
9519 */
9520 static void
9521 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
9522 {
9523 if (sc->sc_phytype == WMPHY_82577)
9524 wm_set_mdio_slow_mode_hv(sc);
9525
9526 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
9527
9528 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
9529
9530 /* 82578 */
9531 if (sc->sc_phytype == WMPHY_82578) {
9532 /* PCH rev. < 3 */
9533 if (sc->sc_rev < 3) {
9534 /* XXX 6 bit shift? Why? Is it page2? */
9535 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
9536 0x66c0);
9537 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
9538 0xffff);
9539 }
9540
9541 /* XXX phy rev. < 2 */
9542 }
9543
9544 /* Select page 0 */
9545
9546 /* XXX acquire semaphore */
9547 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
9548 /* XXX release semaphore */
9549
9550 /*
9551 * Configure the K1 Si workaround during phy reset assuming there is
9552 * link so that it disables K1 if link is in 1Gbps.
9553 */
9554 wm_k1_gig_workaround_hv(sc, 1);
9555 }
9556
9557 static void
9558 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
9559 {
9560
9561 wm_set_mdio_slow_mode_hv(sc);
9562 }
9563
9564 static void
9565 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
9566 {
9567 int k1_enable = sc->sc_nvm_k1_enabled;
9568
9569 /* XXX acquire semaphore */
9570
9571 if (link) {
9572 k1_enable = 0;
9573
9574 /* Link stall fix for link up */
9575 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
9576 } else {
9577 /* Link stall fix for link down */
9578 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
9579 }
9580
9581 wm_configure_k1_ich8lan(sc, k1_enable);
9582
9583 /* XXX release semaphore */
9584 }
9585
9586 static void
9587 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
9588 {
9589 uint32_t reg;
9590
9591 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
9592 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
9593 reg | HV_KMRN_MDIO_SLOW);
9594 }
9595
9596 static void
9597 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
9598 {
9599 uint32_t ctrl, ctrl_ext, tmp;
9600 uint16_t kmrn_reg;
9601
9602 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
9603
9604 if (k1_enable)
9605 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
9606 else
9607 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
9608
9609 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
9610
9611 delay(20);
9612
9613 ctrl = CSR_READ(sc, WMREG_CTRL);
9614 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9615
9616 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
9617 tmp |= CTRL_FRCSPD;
9618
9619 CSR_WRITE(sc, WMREG_CTRL, tmp);
9620 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
9621 CSR_WRITE_FLUSH(sc);
9622 delay(20);
9623
9624 CSR_WRITE(sc, WMREG_CTRL, ctrl);
9625 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9626 CSR_WRITE_FLUSH(sc);
9627 delay(20);
9628 }
9629
9630 /* special case - for 82575 - need to do manual init ... */
9631 static void
9632 wm_reset_init_script_82575(struct wm_softc *sc)
9633 {
9634 /*
9635 * remark: this is untested code - we have no board without EEPROM
9636 * same setup as mentioned int the FreeBSD driver for the i82575
9637 */
9638
9639 /* SerDes configuration via SERDESCTRL */
9640 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
9641 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
9642 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
9643 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
9644
9645 /* CCM configuration via CCMCTL register */
9646 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
9647 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
9648
9649 /* PCIe lanes configuration */
9650 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
9651 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
9652 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
9653 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
9654
9655 /* PCIe PLL Configuration */
9656 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
9657 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
9658 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
9659 }
9660