if_wm.c revision 1.319 1 /* $NetBSD: if_wm.c,v 1.319 2015/05/04 08:46:09 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - EEE (Energy Efficiency Ethernet)
77 * - MSI/MSI-X
78 * - Virtual Function
79 * - Set LED correctly (based on contents in EEPROM)
80 * - Rework how parameters are loaded from the EEPROM.
81 */
82
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.319 2015/05/04 08:46:09 msaitoh Exp $");
85
86 #ifdef _KERNEL_OPT
87 #include "opt_net_mpsafe.h"
88 #endif
89
90 #include <sys/param.h>
91 #include <sys/systm.h>
92 #include <sys/callout.h>
93 #include <sys/mbuf.h>
94 #include <sys/malloc.h>
95 #include <sys/kernel.h>
96 #include <sys/socket.h>
97 #include <sys/ioctl.h>
98 #include <sys/errno.h>
99 #include <sys/device.h>
100 #include <sys/queue.h>
101 #include <sys/syslog.h>
102
103 #include <sys/rndsource.h>
104
105 #include <net/if.h>
106 #include <net/if_dl.h>
107 #include <net/if_media.h>
108 #include <net/if_ether.h>
109
110 #include <net/bpf.h>
111
112 #include <netinet/in.h> /* XXX for struct ip */
113 #include <netinet/in_systm.h> /* XXX for struct ip */
114 #include <netinet/ip.h> /* XXX for struct ip */
115 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
116 #include <netinet/tcp.h> /* XXX for struct tcphdr */
117
118 #include <sys/bus.h>
119 #include <sys/intr.h>
120 #include <machine/endian.h>
121
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
124 #include <dev/mii/miidevs.h>
125 #include <dev/mii/mii_bitbang.h>
126 #include <dev/mii/ikphyreg.h>
127 #include <dev/mii/igphyreg.h>
128 #include <dev/mii/igphyvar.h>
129 #include <dev/mii/inbmphyreg.h>
130
131 #include <dev/pci/pcireg.h>
132 #include <dev/pci/pcivar.h>
133 #include <dev/pci/pcidevs.h>
134
135 #include <dev/pci/if_wmreg.h>
136 #include <dev/pci/if_wmvar.h>
137
138 #ifdef WM_DEBUG
139 #define WM_DEBUG_LINK 0x01
140 #define WM_DEBUG_TX 0x02
141 #define WM_DEBUG_RX 0x04
142 #define WM_DEBUG_GMII 0x08
143 #define WM_DEBUG_MANAGE 0x10
144 #define WM_DEBUG_NVM 0x20
145 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
146 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
147
148 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
149 #else
150 #define DPRINTF(x, y) /* nothing */
151 #endif /* WM_DEBUG */
152
153 #ifdef NET_MPSAFE
154 #define WM_MPSAFE 1
155 #endif
156
157 /*
158 * Transmit descriptor list size. Due to errata, we can only have
159 * 256 hardware descriptors in the ring on < 82544, but we use 4096
160 * on >= 82544. We tell the upper layers that they can queue a lot
161 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
162 * of them at a time.
163 *
164 * We allow up to 256 (!) DMA segments per packet. Pathological packet
165 * chains containing many small mbufs have been observed in zero-copy
166 * situations with jumbo frames.
167 */
168 #define WM_NTXSEGS 256
169 #define WM_IFQUEUELEN 256
170 #define WM_TXQUEUELEN_MAX 64
171 #define WM_TXQUEUELEN_MAX_82547 16
172 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
173 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
174 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
175 #define WM_NTXDESC_82542 256
176 #define WM_NTXDESC_82544 4096
177 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
178 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
179 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
180 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
181 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
182
183 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
184
185 /*
186 * Receive descriptor list size. We have one Rx buffer for normal
187 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
188 * packet. We allocate 256 receive descriptors, each with a 2k
189 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
190 */
191 #define WM_NRXDESC 256
192 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
193 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
194 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
195
196 /*
197 * Control structures are DMA'd to the i82542 chip. We allocate them in
198 * a single clump that maps to a single DMA segment to make several things
199 * easier.
200 */
201 struct wm_control_data_82544 {
202 /*
203 * The receive descriptors.
204 */
205 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
206
207 /*
208 * The transmit descriptors. Put these at the end, because
209 * we might use a smaller number of them.
210 */
211 union {
212 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
213 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
214 } wdc_u;
215 };
216
217 struct wm_control_data_82542 {
218 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
219 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
220 };
221
222 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
223 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
224 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
225
226 /*
227 * Software state for transmit jobs.
228 */
229 struct wm_txsoft {
230 struct mbuf *txs_mbuf; /* head of our mbuf chain */
231 bus_dmamap_t txs_dmamap; /* our DMA map */
232 int txs_firstdesc; /* first descriptor in packet */
233 int txs_lastdesc; /* last descriptor in packet */
234 int txs_ndesc; /* # of descriptors used */
235 };
236
237 /*
238 * Software state for receive buffers. Each descriptor gets a
239 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
240 * more than one buffer, we chain them together.
241 */
242 struct wm_rxsoft {
243 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
244 bus_dmamap_t rxs_dmamap; /* our DMA map */
245 };
246
247 #define WM_LINKUP_TIMEOUT 50
248
249 static uint16_t swfwphysem[] = {
250 SWFW_PHY0_SM,
251 SWFW_PHY1_SM,
252 SWFW_PHY2_SM,
253 SWFW_PHY3_SM
254 };
255
256 /*
257 * Software state per device.
258 */
259 struct wm_softc {
260 device_t sc_dev; /* generic device information */
261 bus_space_tag_t sc_st; /* bus space tag */
262 bus_space_handle_t sc_sh; /* bus space handle */
263 bus_size_t sc_ss; /* bus space size */
264 bus_space_tag_t sc_iot; /* I/O space tag */
265 bus_space_handle_t sc_ioh; /* I/O space handle */
266 bus_size_t sc_ios; /* I/O space size */
267 bus_space_tag_t sc_flasht; /* flash registers space tag */
268 bus_space_handle_t sc_flashh; /* flash registers space handle */
269 bus_dma_tag_t sc_dmat; /* bus DMA tag */
270
271 struct ethercom sc_ethercom; /* ethernet common data */
272 struct mii_data sc_mii; /* MII/media information */
273
274 pci_chipset_tag_t sc_pc;
275 pcitag_t sc_pcitag;
276 int sc_bus_speed; /* PCI/PCIX bus speed */
277 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
278
279 uint16_t sc_pcidevid; /* PCI device ID */
280 wm_chip_type sc_type; /* MAC type */
281 int sc_rev; /* MAC revision */
282 wm_phy_type sc_phytype; /* PHY type */
283 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
284 #define WM_MEDIATYPE_UNKNOWN 0x00
285 #define WM_MEDIATYPE_FIBER 0x01
286 #define WM_MEDIATYPE_COPPER 0x02
287 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
288 int sc_funcid; /* unit number of the chip (0 to 3) */
289 int sc_flags; /* flags; see below */
290 int sc_if_flags; /* last if_flags */
291 int sc_flowflags; /* 802.3x flow control flags */
292 int sc_align_tweak;
293
294 void *sc_ih; /* interrupt cookie */
295 callout_t sc_tick_ch; /* tick callout */
296 bool sc_stopping;
297
298 int sc_nvm_addrbits; /* NVM address bits */
299 unsigned int sc_nvm_wordsize; /* NVM word size */
300 int sc_ich8_flash_base;
301 int sc_ich8_flash_bank_size;
302 int sc_nvm_k1_enabled;
303
304 /* Software state for the transmit and receive descriptors. */
305 int sc_txnum; /* must be a power of two */
306 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
307 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
308
309 /* Control data structures. */
310 int sc_ntxdesc; /* must be a power of two */
311 struct wm_control_data_82544 *sc_control_data;
312 bus_dmamap_t sc_cddmamap; /* control data DMA map */
313 bus_dma_segment_t sc_cd_seg; /* control data segment */
314 int sc_cd_rseg; /* real number of control segment */
315 size_t sc_cd_size; /* control data size */
316 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
317 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
318 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
319 #define sc_rxdescs sc_control_data->wcd_rxdescs
320
321 #ifdef WM_EVENT_COUNTERS
322 /* Event counters. */
323 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
324 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
325 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
326 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
327 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
328 struct evcnt sc_ev_rxintr; /* Rx interrupts */
329 struct evcnt sc_ev_linkintr; /* Link interrupts */
330
331 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
332 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
333 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
334 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
335 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
336 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
337 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
338 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
339
340 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
341 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
342
343 struct evcnt sc_ev_tu; /* Tx underrun */
344
345 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
346 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
347 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
348 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
349 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
350 #endif /* WM_EVENT_COUNTERS */
351
352 bus_addr_t sc_tdt_reg; /* offset of TDT register */
353
354 int sc_txfree; /* number of free Tx descriptors */
355 int sc_txnext; /* next ready Tx descriptor */
356
357 int sc_txsfree; /* number of free Tx jobs */
358 int sc_txsnext; /* next free Tx job */
359 int sc_txsdirty; /* dirty Tx jobs */
360
361 /* These 5 variables are used only on the 82547. */
362 int sc_txfifo_size; /* Tx FIFO size */
363 int sc_txfifo_head; /* current head of FIFO */
364 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
365 int sc_txfifo_stall; /* Tx FIFO is stalled */
366 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
367
368 bus_addr_t sc_rdt_reg; /* offset of RDT register */
369
370 int sc_rxptr; /* next ready Rx descriptor/queue ent */
371 int sc_rxdiscard;
372 int sc_rxlen;
373 struct mbuf *sc_rxhead;
374 struct mbuf *sc_rxtail;
375 struct mbuf **sc_rxtailp;
376
377 uint32_t sc_ctrl; /* prototype CTRL register */
378 #if 0
379 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
380 #endif
381 uint32_t sc_icr; /* prototype interrupt bits */
382 uint32_t sc_itr; /* prototype intr throttling reg */
383 uint32_t sc_tctl; /* prototype TCTL register */
384 uint32_t sc_rctl; /* prototype RCTL register */
385 uint32_t sc_txcw; /* prototype TXCW register */
386 uint32_t sc_tipg; /* prototype TIPG register */
387 uint32_t sc_fcrtl; /* prototype FCRTL register */
388 uint32_t sc_pba; /* prototype PBA register */
389
390 int sc_tbi_linkup; /* TBI link status */
391 int sc_tbi_anegticks; /* autonegotiation ticks */
392 int sc_tbi_ticks; /* tbi ticks */
393
394 int sc_mchash_type; /* multicast filter offset */
395
396 krndsource_t rnd_source; /* random source */
397
398 kmutex_t *sc_tx_lock; /* lock for tx operations */
399 kmutex_t *sc_rx_lock; /* lock for rx operations */
400 };
401
402 #define WM_TX_LOCK(_sc) if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
403 #define WM_TX_UNLOCK(_sc) if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
404 #define WM_TX_LOCKED(_sc) (!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
405 #define WM_RX_LOCK(_sc) if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
406 #define WM_RX_UNLOCK(_sc) if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
407 #define WM_RX_LOCKED(_sc) (!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
408 #define WM_BOTH_LOCK(_sc) do {WM_TX_LOCK(_sc); WM_RX_LOCK(_sc);} while (0)
409 #define WM_BOTH_UNLOCK(_sc) do {WM_RX_UNLOCK(_sc); WM_TX_UNLOCK(_sc);} while (0)
410 #define WM_BOTH_LOCKED(_sc) (WM_TX_LOCKED(_sc) && WM_RX_LOCKED(_sc))
411
412 #ifdef WM_MPSAFE
413 #define CALLOUT_FLAGS CALLOUT_MPSAFE
414 #else
415 #define CALLOUT_FLAGS 0
416 #endif
417
418 #define WM_RXCHAIN_RESET(sc) \
419 do { \
420 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
421 *(sc)->sc_rxtailp = NULL; \
422 (sc)->sc_rxlen = 0; \
423 } while (/*CONSTCOND*/0)
424
425 #define WM_RXCHAIN_LINK(sc, m) \
426 do { \
427 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
428 (sc)->sc_rxtailp = &(m)->m_next; \
429 } while (/*CONSTCOND*/0)
430
431 #ifdef WM_EVENT_COUNTERS
432 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
433 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
434 #else
435 #define WM_EVCNT_INCR(ev) /* nothing */
436 #define WM_EVCNT_ADD(ev, val) /* nothing */
437 #endif
438
439 #define CSR_READ(sc, reg) \
440 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
441 #define CSR_WRITE(sc, reg, val) \
442 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
443 #define CSR_WRITE_FLUSH(sc) \
444 (void) CSR_READ((sc), WMREG_STATUS)
445
446 #define ICH8_FLASH_READ32(sc, reg) \
447 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
448 #define ICH8_FLASH_WRITE32(sc, reg, data) \
449 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
450
451 #define ICH8_FLASH_READ16(sc, reg) \
452 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
453 #define ICH8_FLASH_WRITE16(sc, reg, data) \
454 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
455
456 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
457 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
458
459 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
460 #define WM_CDTXADDR_HI(sc, x) \
461 (sizeof(bus_addr_t) == 8 ? \
462 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
463
464 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
465 #define WM_CDRXADDR_HI(sc, x) \
466 (sizeof(bus_addr_t) == 8 ? \
467 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
468
469 #define WM_CDTXSYNC(sc, x, n, ops) \
470 do { \
471 int __x, __n; \
472 \
473 __x = (x); \
474 __n = (n); \
475 \
476 /* If it will wrap around, sync to the end of the ring. */ \
477 if ((__x + __n) > WM_NTXDESC(sc)) { \
478 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
479 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
480 (WM_NTXDESC(sc) - __x), (ops)); \
481 __n -= (WM_NTXDESC(sc) - __x); \
482 __x = 0; \
483 } \
484 \
485 /* Now sync whatever is left. */ \
486 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
487 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
488 } while (/*CONSTCOND*/0)
489
490 #define WM_CDRXSYNC(sc, x, ops) \
491 do { \
492 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
493 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
494 } while (/*CONSTCOND*/0)
495
496 #define WM_INIT_RXDESC(sc, x) \
497 do { \
498 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
499 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
500 struct mbuf *__m = __rxs->rxs_mbuf; \
501 \
502 /* \
503 * Note: We scoot the packet forward 2 bytes in the buffer \
504 * so that the payload after the Ethernet header is aligned \
505 * to a 4-byte boundary. \
506 * \
507 * XXX BRAINDAMAGE ALERT! \
508 * The stupid chip uses the same size for every buffer, which \
509 * is set in the Receive Control register. We are using the 2K \
510 * size option, but what we REALLY want is (2K - 2)! For this \
511 * reason, we can't "scoot" packets longer than the standard \
512 * Ethernet MTU. On strict-alignment platforms, if the total \
513 * size exceeds (2K - 2) we set align_tweak to 0 and let \
514 * the upper layer copy the headers. \
515 */ \
516 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
517 \
518 wm_set_dma_addr(&__rxd->wrx_addr, \
519 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
520 __rxd->wrx_len = 0; \
521 __rxd->wrx_cksum = 0; \
522 __rxd->wrx_status = 0; \
523 __rxd->wrx_errors = 0; \
524 __rxd->wrx_special = 0; \
525 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
526 \
527 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
528 } while (/*CONSTCOND*/0)
529
530 /*
531 * Register read/write functions.
532 * Other than CSR_{READ|WRITE}().
533 */
534 #if 0
535 static inline uint32_t wm_io_read(struct wm_softc *, int);
536 #endif
537 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
538 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
539 uint32_t, uint32_t);
540 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
541
542 /*
543 * Device driver interface functions and commonly used functions.
544 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
545 */
546 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
547 static int wm_match(device_t, cfdata_t, void *);
548 static void wm_attach(device_t, device_t, void *);
549 static int wm_detach(device_t, int);
550 static bool wm_suspend(device_t, const pmf_qual_t *);
551 static bool wm_resume(device_t, const pmf_qual_t *);
552 static void wm_watchdog(struct ifnet *);
553 static void wm_tick(void *);
554 static int wm_ifflags_cb(struct ethercom *);
555 static int wm_ioctl(struct ifnet *, u_long, void *);
556 /* MAC address related */
557 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
558 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
559 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
560 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
561 static void wm_set_filter(struct wm_softc *);
562 /* Reset and init related */
563 static void wm_set_vlan(struct wm_softc *);
564 static void wm_set_pcie_completion_timeout(struct wm_softc *);
565 static void wm_get_auto_rd_done(struct wm_softc *);
566 static void wm_lan_init_done(struct wm_softc *);
567 static void wm_get_cfg_done(struct wm_softc *);
568 static void wm_initialize_hardware_bits(struct wm_softc *);
569 static void wm_reset(struct wm_softc *);
570 static int wm_add_rxbuf(struct wm_softc *, int);
571 static void wm_rxdrain(struct wm_softc *);
572 static int wm_init(struct ifnet *);
573 static int wm_init_locked(struct ifnet *);
574 static void wm_stop(struct ifnet *, int);
575 static void wm_stop_locked(struct ifnet *, int);
576 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
577 uint32_t *, uint8_t *);
578 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
579 static void wm_82547_txfifo_stall(void *);
580 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
581 /* Start */
582 static void wm_start(struct ifnet *);
583 static void wm_start_locked(struct ifnet *);
584 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
585 uint32_t *, uint32_t *, bool *);
586 static void wm_nq_start(struct ifnet *);
587 static void wm_nq_start_locked(struct ifnet *);
588 /* Interrupt */
589 static void wm_txintr(struct wm_softc *);
590 static void wm_rxintr(struct wm_softc *);
591 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
592 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
593 static void wm_linkintr(struct wm_softc *, uint32_t);
594 static int wm_intr(void *);
595
596 /*
597 * Media related.
598 * GMII, SGMII, TBI, SERDES and SFP.
599 */
600 /* GMII related */
601 static void wm_gmii_reset(struct wm_softc *);
602 static int wm_get_phy_id_82575(struct wm_softc *);
603 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
604 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
605 static int wm_gmii_mediachange(struct ifnet *);
606 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
607 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
608 static int wm_gmii_i82543_readreg(device_t, int, int);
609 static void wm_gmii_i82543_writereg(device_t, int, int, int);
610 static int wm_gmii_i82544_readreg(device_t, int, int);
611 static void wm_gmii_i82544_writereg(device_t, int, int, int);
612 static int wm_gmii_i80003_readreg(device_t, int, int);
613 static void wm_gmii_i80003_writereg(device_t, int, int, int);
614 static int wm_gmii_bm_readreg(device_t, int, int);
615 static void wm_gmii_bm_writereg(device_t, int, int, int);
616 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
617 static int wm_gmii_hv_readreg(device_t, int, int);
618 static void wm_gmii_hv_writereg(device_t, int, int, int);
619 static int wm_gmii_82580_readreg(device_t, int, int);
620 static void wm_gmii_82580_writereg(device_t, int, int, int);
621 static void wm_gmii_statchg(struct ifnet *);
622 static int wm_kmrn_readreg(struct wm_softc *, int);
623 static void wm_kmrn_writereg(struct wm_softc *, int, int);
624 /* SGMII */
625 static bool wm_sgmii_uses_mdio(struct wm_softc *);
626 static int wm_sgmii_readreg(device_t, int, int);
627 static void wm_sgmii_writereg(device_t, int, int, int);
628 /* TBI related */
629 static int wm_check_for_link(struct wm_softc *);
630 static void wm_tbi_mediainit(struct wm_softc *);
631 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
632 static int wm_tbi_mediachange(struct ifnet *);
633 static void wm_tbi_set_linkled(struct wm_softc *);
634 static void wm_tbi_check_link(struct wm_softc *);
635 /* SFP related */
636 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
637 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
638
639 /*
640 * NVM related.
641 * Microwire, SPI (w/wo EERD) and Flash.
642 */
643 /* Misc functions */
644 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
645 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
646 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
647 /* Microwire */
648 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
649 /* SPI */
650 static int wm_nvm_ready_spi(struct wm_softc *);
651 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
652 /* Using with EERD */
653 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
654 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
655 /* Flash */
656 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
657 unsigned int *);
658 static int32_t wm_ich8_cycle_init(struct wm_softc *);
659 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
660 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
661 uint16_t *);
662 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
663 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
664 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
665 /* Lock, detecting NVM type, validate checksum and read */
666 static int wm_nvm_acquire(struct wm_softc *);
667 static void wm_nvm_release(struct wm_softc *);
668 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
669 static int wm_nvm_validate_checksum(struct wm_softc *);
670 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
671
672 /*
673 * Hardware semaphores.
674 * Very complexed...
675 */
676 static int wm_get_swsm_semaphore(struct wm_softc *);
677 static void wm_put_swsm_semaphore(struct wm_softc *);
678 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
679 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
680 static int wm_get_swfwhw_semaphore(struct wm_softc *);
681 static void wm_put_swfwhw_semaphore(struct wm_softc *);
682 static int wm_get_hw_semaphore_82573(struct wm_softc *);
683 static void wm_put_hw_semaphore_82573(struct wm_softc *);
684
685 /*
686 * Management mode and power management related subroutines.
687 * BMC, AMT, suspend/resume and EEE.
688 */
689 static int wm_check_mng_mode(struct wm_softc *);
690 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
691 static int wm_check_mng_mode_82574(struct wm_softc *);
692 static int wm_check_mng_mode_generic(struct wm_softc *);
693 static int wm_enable_mng_pass_thru(struct wm_softc *);
694 static int wm_check_reset_block(struct wm_softc *);
695 static void wm_get_hw_control(struct wm_softc *);
696 static void wm_release_hw_control(struct wm_softc *);
697 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
698 static void wm_smbustopci(struct wm_softc *);
699 static void wm_init_manageability(struct wm_softc *);
700 static void wm_release_manageability(struct wm_softc *);
701 static void wm_get_wakeup(struct wm_softc *);
702 #ifdef WM_WOL
703 static void wm_enable_phy_wakeup(struct wm_softc *);
704 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
705 static void wm_enable_wakeup(struct wm_softc *);
706 #endif
707 /* EEE */
708 static void wm_set_eee_i350(struct wm_softc *);
709
710 /*
711 * Workarounds (mainly PHY related).
712 * Basically, PHY's workarounds are in the PHY drivers.
713 */
714 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
715 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
716 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
717 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
718 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
719 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
720 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
721 static void wm_reset_init_script_82575(struct wm_softc *);
722
723 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
724 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
725
726 /*
727 * Devices supported by this driver.
728 */
729 static const struct wm_product {
730 pci_vendor_id_t wmp_vendor;
731 pci_product_id_t wmp_product;
732 const char *wmp_name;
733 wm_chip_type wmp_type;
734 uint32_t wmp_flags;
735 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
736 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
737 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
738 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
739 #define WMP_MEDIATYPE(x) ((x) & 0x03)
740 } wm_products[] = {
741 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
742 "Intel i82542 1000BASE-X Ethernet",
743 WM_T_82542_2_1, WMP_F_FIBER },
744
745 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
746 "Intel i82543GC 1000BASE-X Ethernet",
747 WM_T_82543, WMP_F_FIBER },
748
749 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
750 "Intel i82543GC 1000BASE-T Ethernet",
751 WM_T_82543, WMP_F_COPPER },
752
753 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
754 "Intel i82544EI 1000BASE-T Ethernet",
755 WM_T_82544, WMP_F_COPPER },
756
757 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
758 "Intel i82544EI 1000BASE-X Ethernet",
759 WM_T_82544, WMP_F_FIBER },
760
761 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
762 "Intel i82544GC 1000BASE-T Ethernet",
763 WM_T_82544, WMP_F_COPPER },
764
765 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
766 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
767 WM_T_82544, WMP_F_COPPER },
768
769 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
770 "Intel i82540EM 1000BASE-T Ethernet",
771 WM_T_82540, WMP_F_COPPER },
772
773 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
774 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
775 WM_T_82540, WMP_F_COPPER },
776
777 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
778 "Intel i82540EP 1000BASE-T Ethernet",
779 WM_T_82540, WMP_F_COPPER },
780
781 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
782 "Intel i82540EP 1000BASE-T Ethernet",
783 WM_T_82540, WMP_F_COPPER },
784
785 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
786 "Intel i82540EP 1000BASE-T Ethernet",
787 WM_T_82540, WMP_F_COPPER },
788
789 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
790 "Intel i82545EM 1000BASE-T Ethernet",
791 WM_T_82545, WMP_F_COPPER },
792
793 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
794 "Intel i82545GM 1000BASE-T Ethernet",
795 WM_T_82545_3, WMP_F_COPPER },
796
797 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
798 "Intel i82545GM 1000BASE-X Ethernet",
799 WM_T_82545_3, WMP_F_FIBER },
800
801 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
802 "Intel i82545GM Gigabit Ethernet (SERDES)",
803 WM_T_82545_3, WMP_F_SERDES },
804
805 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
806 "Intel i82546EB 1000BASE-T Ethernet",
807 WM_T_82546, WMP_F_COPPER },
808
809 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
810 "Intel i82546EB 1000BASE-T Ethernet",
811 WM_T_82546, WMP_F_COPPER },
812
813 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
814 "Intel i82545EM 1000BASE-X Ethernet",
815 WM_T_82545, WMP_F_FIBER },
816
817 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
818 "Intel i82546EB 1000BASE-X Ethernet",
819 WM_T_82546, WMP_F_FIBER },
820
821 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
822 "Intel i82546GB 1000BASE-T Ethernet",
823 WM_T_82546_3, WMP_F_COPPER },
824
825 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
826 "Intel i82546GB 1000BASE-X Ethernet",
827 WM_T_82546_3, WMP_F_FIBER },
828
829 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
830 "Intel i82546GB Gigabit Ethernet (SERDES)",
831 WM_T_82546_3, WMP_F_SERDES },
832
833 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
834 "i82546GB quad-port Gigabit Ethernet",
835 WM_T_82546_3, WMP_F_COPPER },
836
837 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
838 "i82546GB quad-port Gigabit Ethernet (KSP3)",
839 WM_T_82546_3, WMP_F_COPPER },
840
841 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
842 "Intel PRO/1000MT (82546GB)",
843 WM_T_82546_3, WMP_F_COPPER },
844
845 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
846 "Intel i82541EI 1000BASE-T Ethernet",
847 WM_T_82541, WMP_F_COPPER },
848
849 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
850 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
851 WM_T_82541, WMP_F_COPPER },
852
853 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
854 "Intel i82541EI Mobile 1000BASE-T Ethernet",
855 WM_T_82541, WMP_F_COPPER },
856
857 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
858 "Intel i82541ER 1000BASE-T Ethernet",
859 WM_T_82541_2, WMP_F_COPPER },
860
861 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
862 "Intel i82541GI 1000BASE-T Ethernet",
863 WM_T_82541_2, WMP_F_COPPER },
864
865 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
866 "Intel i82541GI Mobile 1000BASE-T Ethernet",
867 WM_T_82541_2, WMP_F_COPPER },
868
869 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
870 "Intel i82541PI 1000BASE-T Ethernet",
871 WM_T_82541_2, WMP_F_COPPER },
872
873 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
874 "Intel i82547EI 1000BASE-T Ethernet",
875 WM_T_82547, WMP_F_COPPER },
876
877 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
878 "Intel i82547EI Mobile 1000BASE-T Ethernet",
879 WM_T_82547, WMP_F_COPPER },
880
881 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
882 "Intel i82547GI 1000BASE-T Ethernet",
883 WM_T_82547_2, WMP_F_COPPER },
884
885 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
886 "Intel PRO/1000 PT (82571EB)",
887 WM_T_82571, WMP_F_COPPER },
888
889 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
890 "Intel PRO/1000 PF (82571EB)",
891 WM_T_82571, WMP_F_FIBER },
892
893 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
894 "Intel PRO/1000 PB (82571EB)",
895 WM_T_82571, WMP_F_SERDES },
896
897 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
898 "Intel PRO/1000 QT (82571EB)",
899 WM_T_82571, WMP_F_COPPER },
900
901 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
902 "Intel PRO/1000 PT Quad Port Server Adapter",
903 WM_T_82571, WMP_F_COPPER, },
904
905 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
906 "Intel Gigabit PT Quad Port Server ExpressModule",
907 WM_T_82571, WMP_F_COPPER, },
908
909 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
910 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
911 WM_T_82571, WMP_F_SERDES, },
912
913 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
914 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
915 WM_T_82571, WMP_F_SERDES, },
916
917 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
918 "Intel 82571EB Quad 1000baseX Ethernet",
919 WM_T_82571, WMP_F_FIBER, },
920
921 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
922 "Intel i82572EI 1000baseT Ethernet",
923 WM_T_82572, WMP_F_COPPER },
924
925 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
926 "Intel i82572EI 1000baseX Ethernet",
927 WM_T_82572, WMP_F_FIBER },
928
929 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
930 "Intel i82572EI Gigabit Ethernet (SERDES)",
931 WM_T_82572, WMP_F_SERDES },
932
933 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
934 "Intel i82572EI 1000baseT Ethernet",
935 WM_T_82572, WMP_F_COPPER },
936
937 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
938 "Intel i82573E",
939 WM_T_82573, WMP_F_COPPER },
940
941 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
942 "Intel i82573E IAMT",
943 WM_T_82573, WMP_F_COPPER },
944
945 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
946 "Intel i82573L Gigabit Ethernet",
947 WM_T_82573, WMP_F_COPPER },
948
949 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
950 "Intel i82574L",
951 WM_T_82574, WMP_F_COPPER },
952
953 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
954 "Intel i82574L",
955 WM_T_82574, WMP_F_COPPER },
956
957 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
958 "Intel i82583V",
959 WM_T_82583, WMP_F_COPPER },
960
961 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
962 "i80003 dual 1000baseT Ethernet",
963 WM_T_80003, WMP_F_COPPER },
964
965 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
966 "i80003 dual 1000baseX Ethernet",
967 WM_T_80003, WMP_F_COPPER },
968
969 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
970 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
971 WM_T_80003, WMP_F_SERDES },
972
973 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
974 "Intel i80003 1000baseT Ethernet",
975 WM_T_80003, WMP_F_COPPER },
976
977 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
978 "Intel i80003 Gigabit Ethernet (SERDES)",
979 WM_T_80003, WMP_F_SERDES },
980
981 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
982 "Intel i82801H (M_AMT) LAN Controller",
983 WM_T_ICH8, WMP_F_COPPER },
984 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
985 "Intel i82801H (AMT) LAN Controller",
986 WM_T_ICH8, WMP_F_COPPER },
987 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
988 "Intel i82801H LAN Controller",
989 WM_T_ICH8, WMP_F_COPPER },
990 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
991 "Intel i82801H (IFE) LAN Controller",
992 WM_T_ICH8, WMP_F_COPPER },
993 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
994 "Intel i82801H (M) LAN Controller",
995 WM_T_ICH8, WMP_F_COPPER },
996 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
997 "Intel i82801H IFE (GT) LAN Controller",
998 WM_T_ICH8, WMP_F_COPPER },
999 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1000 "Intel i82801H IFE (G) LAN Controller",
1001 WM_T_ICH8, WMP_F_COPPER },
1002 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1003 "82801I (AMT) LAN Controller",
1004 WM_T_ICH9, WMP_F_COPPER },
1005 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1006 "82801I LAN Controller",
1007 WM_T_ICH9, WMP_F_COPPER },
1008 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1009 "82801I (G) LAN Controller",
1010 WM_T_ICH9, WMP_F_COPPER },
1011 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1012 "82801I (GT) LAN Controller",
1013 WM_T_ICH9, WMP_F_COPPER },
1014 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1015 "82801I (C) LAN Controller",
1016 WM_T_ICH9, WMP_F_COPPER },
1017 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1018 "82801I mobile LAN Controller",
1019 WM_T_ICH9, WMP_F_COPPER },
1020 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1021 "82801I mobile (V) LAN Controller",
1022 WM_T_ICH9, WMP_F_COPPER },
1023 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1024 "82801I mobile (AMT) LAN Controller",
1025 WM_T_ICH9, WMP_F_COPPER },
1026 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1027 "82567LM-4 LAN Controller",
1028 WM_T_ICH9, WMP_F_COPPER },
1029 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
1030 "82567V-3 LAN Controller",
1031 WM_T_ICH9, WMP_F_COPPER },
1032 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1033 "82567LM-2 LAN Controller",
1034 WM_T_ICH10, WMP_F_COPPER },
1035 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1036 "82567LF-2 LAN Controller",
1037 WM_T_ICH10, WMP_F_COPPER },
1038 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1039 "82567LM-3 LAN Controller",
1040 WM_T_ICH10, WMP_F_COPPER },
1041 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1042 "82567LF-3 LAN Controller",
1043 WM_T_ICH10, WMP_F_COPPER },
1044 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1045 "82567V-2 LAN Controller",
1046 WM_T_ICH10, WMP_F_COPPER },
1047 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1048 "82567V-3? LAN Controller",
1049 WM_T_ICH10, WMP_F_COPPER },
1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1051 "HANKSVILLE LAN Controller",
1052 WM_T_ICH10, WMP_F_COPPER },
1053 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1054 "PCH LAN (82577LM) Controller",
1055 WM_T_PCH, WMP_F_COPPER },
1056 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1057 "PCH LAN (82577LC) Controller",
1058 WM_T_PCH, WMP_F_COPPER },
1059 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1060 "PCH LAN (82578DM) Controller",
1061 WM_T_PCH, WMP_F_COPPER },
1062 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1063 "PCH LAN (82578DC) Controller",
1064 WM_T_PCH, WMP_F_COPPER },
1065 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1066 "PCH2 LAN (82579LM) Controller",
1067 WM_T_PCH2, WMP_F_COPPER },
1068 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1069 "PCH2 LAN (82579V) Controller",
1070 WM_T_PCH2, WMP_F_COPPER },
1071 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1072 "82575EB dual-1000baseT Ethernet",
1073 WM_T_82575, WMP_F_COPPER },
1074 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1075 "82575EB dual-1000baseX Ethernet (SERDES)",
1076 WM_T_82575, WMP_F_SERDES },
1077 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1078 "82575GB quad-1000baseT Ethernet",
1079 WM_T_82575, WMP_F_COPPER },
1080 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1081 "82575GB quad-1000baseT Ethernet (PM)",
1082 WM_T_82575, WMP_F_COPPER },
1083 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1084 "82576 1000BaseT Ethernet",
1085 WM_T_82576, WMP_F_COPPER },
1086 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1087 "82576 1000BaseX Ethernet",
1088 WM_T_82576, WMP_F_FIBER },
1089
1090 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1091 "82576 gigabit Ethernet (SERDES)",
1092 WM_T_82576, WMP_F_SERDES },
1093
1094 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1095 "82576 quad-1000BaseT Ethernet",
1096 WM_T_82576, WMP_F_COPPER },
1097
1098 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1099 "82576 Gigabit ET2 Quad Port Server Adapter",
1100 WM_T_82576, WMP_F_COPPER },
1101
1102 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1103 "82576 gigabit Ethernet",
1104 WM_T_82576, WMP_F_COPPER },
1105
1106 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1107 "82576 gigabit Ethernet (SERDES)",
1108 WM_T_82576, WMP_F_SERDES },
1109 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1110 "82576 quad-gigabit Ethernet (SERDES)",
1111 WM_T_82576, WMP_F_SERDES },
1112
1113 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1114 "82580 1000BaseT Ethernet",
1115 WM_T_82580, WMP_F_COPPER },
1116 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1117 "82580 1000BaseX Ethernet",
1118 WM_T_82580, WMP_F_FIBER },
1119
1120 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1121 "82580 1000BaseT Ethernet (SERDES)",
1122 WM_T_82580, WMP_F_SERDES },
1123
1124 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1125 "82580 gigabit Ethernet (SGMII)",
1126 WM_T_82580, WMP_F_COPPER },
1127 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1128 "82580 dual-1000BaseT Ethernet",
1129 WM_T_82580, WMP_F_COPPER },
1130
1131 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1132 "82580 quad-1000BaseX Ethernet",
1133 WM_T_82580, WMP_F_FIBER },
1134
1135 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1136 "DH89XXCC Gigabit Ethernet (SGMII)",
1137 WM_T_82580, WMP_F_COPPER },
1138
1139 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1140 "DH89XXCC Gigabit Ethernet (SERDES)",
1141 WM_T_82580, WMP_F_SERDES },
1142
1143 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1144 "DH89XXCC 1000BASE-KX Ethernet",
1145 WM_T_82580, WMP_F_SERDES },
1146
1147 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1148 "DH89XXCC Gigabit Ethernet (SFP)",
1149 WM_T_82580, WMP_F_SERDES },
1150
1151 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1152 "I350 Gigabit Network Connection",
1153 WM_T_I350, WMP_F_COPPER },
1154
1155 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1156 "I350 Gigabit Fiber Network Connection",
1157 WM_T_I350, WMP_F_FIBER },
1158
1159 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1160 "I350 Gigabit Backplane Connection",
1161 WM_T_I350, WMP_F_SERDES },
1162
1163 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1164 "I350 Quad Port Gigabit Ethernet",
1165 WM_T_I350, WMP_F_SERDES },
1166
1167 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1168 "I350 Gigabit Connection",
1169 WM_T_I350, WMP_F_COPPER },
1170
1171 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1172 "I354 Gigabit Ethernet (KX)",
1173 WM_T_I354, WMP_F_SERDES },
1174
1175 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1176 "I354 Gigabit Ethernet (SGMII)",
1177 WM_T_I354, WMP_F_COPPER },
1178
1179 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1180 "I354 Gigabit Ethernet (2.5G)",
1181 WM_T_I354, WMP_F_COPPER },
1182
1183 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1184 "I210-T1 Ethernet Server Adapter",
1185 WM_T_I210, WMP_F_COPPER },
1186
1187 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1188 "I210 Ethernet (Copper OEM)",
1189 WM_T_I210, WMP_F_COPPER },
1190
1191 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1192 "I210 Ethernet (Copper IT)",
1193 WM_T_I210, WMP_F_COPPER },
1194
1195 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1196 "I210 Ethernet (FLASH less)",
1197 WM_T_I210, WMP_F_COPPER },
1198
1199 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1200 "I210 Gigabit Ethernet (Fiber)",
1201 WM_T_I210, WMP_F_FIBER },
1202
1203 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1204 "I210 Gigabit Ethernet (SERDES)",
1205 WM_T_I210, WMP_F_SERDES },
1206
1207 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1208 "I210 Gigabit Ethernet (FLASH less)",
1209 WM_T_I210, WMP_F_SERDES },
1210
1211 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1212 "I210 Gigabit Ethernet (SGMII)",
1213 WM_T_I210, WMP_F_COPPER },
1214
1215 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1216 "I211 Ethernet (COPPER)",
1217 WM_T_I211, WMP_F_COPPER },
1218 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1219 "I217 V Ethernet Connection",
1220 WM_T_PCH_LPT, WMP_F_COPPER },
1221 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1222 "I217 LM Ethernet Connection",
1223 WM_T_PCH_LPT, WMP_F_COPPER },
1224 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1225 "I218 V Ethernet Connection",
1226 WM_T_PCH_LPT, WMP_F_COPPER },
1227 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1228 "I218 V Ethernet Connection",
1229 WM_T_PCH_LPT, WMP_F_COPPER },
1230 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1231 "I218 V Ethernet Connection",
1232 WM_T_PCH_LPT, WMP_F_COPPER },
1233 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1234 "I218 LM Ethernet Connection",
1235 WM_T_PCH_LPT, WMP_F_COPPER },
1236 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1237 "I218 LM Ethernet Connection",
1238 WM_T_PCH_LPT, WMP_F_COPPER },
1239 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1240 "I218 LM Ethernet Connection",
1241 WM_T_PCH_LPT, WMP_F_COPPER },
1242 { 0, 0,
1243 NULL,
1244 0, 0 },
1245 };
1246
1247 #ifdef WM_EVENT_COUNTERS
1248 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1249 #endif /* WM_EVENT_COUNTERS */
1250
1251
1252 /*
1253 * Register read/write functions.
1254 * Other than CSR_{READ|WRITE}().
1255 */
1256
1257 #if 0 /* Not currently used */
1258 static inline uint32_t
1259 wm_io_read(struct wm_softc *sc, int reg)
1260 {
1261
1262 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1263 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1264 }
1265 #endif
1266
1267 static inline void
1268 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1269 {
1270
1271 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1272 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1273 }
1274
1275 static inline void
1276 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1277 uint32_t data)
1278 {
1279 uint32_t regval;
1280 int i;
1281
1282 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1283
1284 CSR_WRITE(sc, reg, regval);
1285
1286 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1287 delay(5);
1288 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1289 break;
1290 }
1291 if (i == SCTL_CTL_POLL_TIMEOUT) {
1292 aprint_error("%s: WARNING:"
1293 " i82575 reg 0x%08x setup did not indicate ready\n",
1294 device_xname(sc->sc_dev), reg);
1295 }
1296 }
1297
1298 static inline void
1299 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1300 {
1301 wa->wa_low = htole32(v & 0xffffffffU);
1302 if (sizeof(bus_addr_t) == 8)
1303 wa->wa_high = htole32((uint64_t) v >> 32);
1304 else
1305 wa->wa_high = 0;
1306 }
1307
1308 /*
1309 * Device driver interface functions and commonly used functions.
1310 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1311 */
1312
1313 /* Lookup supported device table */
1314 static const struct wm_product *
1315 wm_lookup(const struct pci_attach_args *pa)
1316 {
1317 const struct wm_product *wmp;
1318
1319 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1320 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1321 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1322 return wmp;
1323 }
1324 return NULL;
1325 }
1326
1327 /* The match function (ca_match) */
1328 static int
1329 wm_match(device_t parent, cfdata_t cf, void *aux)
1330 {
1331 struct pci_attach_args *pa = aux;
1332
1333 if (wm_lookup(pa) != NULL)
1334 return 1;
1335
1336 return 0;
1337 }
1338
1339 /* The attach function (ca_attach) */
1340 static void
1341 wm_attach(device_t parent, device_t self, void *aux)
1342 {
1343 struct wm_softc *sc = device_private(self);
1344 struct pci_attach_args *pa = aux;
1345 prop_dictionary_t dict;
1346 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1347 pci_chipset_tag_t pc = pa->pa_pc;
1348 pci_intr_handle_t ih;
1349 const char *intrstr = NULL;
1350 const char *eetype, *xname;
1351 bus_space_tag_t memt;
1352 bus_space_handle_t memh;
1353 bus_size_t memsize;
1354 int memh_valid;
1355 int i, error;
1356 const struct wm_product *wmp;
1357 prop_data_t ea;
1358 prop_number_t pn;
1359 uint8_t enaddr[ETHER_ADDR_LEN];
1360 uint16_t cfg1, cfg2, swdpin, io3;
1361 pcireg_t preg, memtype;
1362 uint16_t eeprom_data, apme_mask;
1363 bool force_clear_smbi;
1364 uint32_t link_mode;
1365 uint32_t reg;
1366 char intrbuf[PCI_INTRSTR_LEN];
1367
1368 sc->sc_dev = self;
1369 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1370 sc->sc_stopping = false;
1371
1372 wmp = wm_lookup(pa);
1373 #ifdef DIAGNOSTIC
1374 if (wmp == NULL) {
1375 printf("\n");
1376 panic("wm_attach: impossible");
1377 }
1378 #endif
1379 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1380
1381 sc->sc_pc = pa->pa_pc;
1382 sc->sc_pcitag = pa->pa_tag;
1383
1384 if (pci_dma64_available(pa))
1385 sc->sc_dmat = pa->pa_dmat64;
1386 else
1387 sc->sc_dmat = pa->pa_dmat;
1388
1389 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1390 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1391 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1392
1393 sc->sc_type = wmp->wmp_type;
1394 if (sc->sc_type < WM_T_82543) {
1395 if (sc->sc_rev < 2) {
1396 aprint_error_dev(sc->sc_dev,
1397 "i82542 must be at least rev. 2\n");
1398 return;
1399 }
1400 if (sc->sc_rev < 3)
1401 sc->sc_type = WM_T_82542_2_0;
1402 }
1403
1404 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1405 || (sc->sc_type == WM_T_82580)
1406 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1407 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1408 sc->sc_flags |= WM_F_NEWQUEUE;
1409
1410 /* Set device properties (mactype) */
1411 dict = device_properties(sc->sc_dev);
1412 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1413
1414 /*
1415 * Map the device. All devices support memory-mapped acccess,
1416 * and it is really required for normal operation.
1417 */
1418 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1419 switch (memtype) {
1420 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1421 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1422 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1423 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1424 break;
1425 default:
1426 memh_valid = 0;
1427 break;
1428 }
1429
1430 if (memh_valid) {
1431 sc->sc_st = memt;
1432 sc->sc_sh = memh;
1433 sc->sc_ss = memsize;
1434 } else {
1435 aprint_error_dev(sc->sc_dev,
1436 "unable to map device registers\n");
1437 return;
1438 }
1439
1440 /*
1441 * In addition, i82544 and later support I/O mapped indirect
1442 * register access. It is not desirable (nor supported in
1443 * this driver) to use it for normal operation, though it is
1444 * required to work around bugs in some chip versions.
1445 */
1446 if (sc->sc_type >= WM_T_82544) {
1447 /* First we have to find the I/O BAR. */
1448 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1449 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1450 if (memtype == PCI_MAPREG_TYPE_IO)
1451 break;
1452 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1453 PCI_MAPREG_MEM_TYPE_64BIT)
1454 i += 4; /* skip high bits, too */
1455 }
1456 if (i < PCI_MAPREG_END) {
1457 /*
1458 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1459 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1460 * It's no problem because newer chips has no this
1461 * bug.
1462 *
1463 * The i8254x doesn't apparently respond when the
1464 * I/O BAR is 0, which looks somewhat like it's not
1465 * been configured.
1466 */
1467 preg = pci_conf_read(pc, pa->pa_tag, i);
1468 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1469 aprint_error_dev(sc->sc_dev,
1470 "WARNING: I/O BAR at zero.\n");
1471 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1472 0, &sc->sc_iot, &sc->sc_ioh,
1473 NULL, &sc->sc_ios) == 0) {
1474 sc->sc_flags |= WM_F_IOH_VALID;
1475 } else {
1476 aprint_error_dev(sc->sc_dev,
1477 "WARNING: unable to map I/O space\n");
1478 }
1479 }
1480
1481 }
1482
1483 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1484 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1485 preg |= PCI_COMMAND_MASTER_ENABLE;
1486 if (sc->sc_type < WM_T_82542_2_1)
1487 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1488 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1489
1490 /* power up chip */
1491 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1492 NULL)) && error != EOPNOTSUPP) {
1493 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1494 return;
1495 }
1496
1497 /*
1498 * Map and establish our interrupt.
1499 */
1500 if (pci_intr_map(pa, &ih)) {
1501 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1502 return;
1503 }
1504 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1505 #ifdef WM_MPSAFE
1506 pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
1507 #endif
1508 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1509 if (sc->sc_ih == NULL) {
1510 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1511 if (intrstr != NULL)
1512 aprint_error(" at %s", intrstr);
1513 aprint_error("\n");
1514 return;
1515 }
1516 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1517
1518 /*
1519 * Check the function ID (unit number of the chip).
1520 */
1521 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1522 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1523 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1524 || (sc->sc_type == WM_T_82580)
1525 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1526 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1527 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1528 else
1529 sc->sc_funcid = 0;
1530
1531 /*
1532 * Determine a few things about the bus we're connected to.
1533 */
1534 if (sc->sc_type < WM_T_82543) {
1535 /* We don't really know the bus characteristics here. */
1536 sc->sc_bus_speed = 33;
1537 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1538 /*
1539 * CSA (Communication Streaming Architecture) is about as fast
1540 * a 32-bit 66MHz PCI Bus.
1541 */
1542 sc->sc_flags |= WM_F_CSA;
1543 sc->sc_bus_speed = 66;
1544 aprint_verbose_dev(sc->sc_dev,
1545 "Communication Streaming Architecture\n");
1546 if (sc->sc_type == WM_T_82547) {
1547 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1548 callout_setfunc(&sc->sc_txfifo_ch,
1549 wm_82547_txfifo_stall, sc);
1550 aprint_verbose_dev(sc->sc_dev,
1551 "using 82547 Tx FIFO stall work-around\n");
1552 }
1553 } else if (sc->sc_type >= WM_T_82571) {
1554 sc->sc_flags |= WM_F_PCIE;
1555 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1556 && (sc->sc_type != WM_T_ICH10)
1557 && (sc->sc_type != WM_T_PCH)
1558 && (sc->sc_type != WM_T_PCH2)
1559 && (sc->sc_type != WM_T_PCH_LPT)) {
1560 /* ICH* and PCH* have no PCIe capability registers */
1561 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1562 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1563 NULL) == 0)
1564 aprint_error_dev(sc->sc_dev,
1565 "unable to find PCIe capability\n");
1566 }
1567 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1568 } else {
1569 reg = CSR_READ(sc, WMREG_STATUS);
1570 if (reg & STATUS_BUS64)
1571 sc->sc_flags |= WM_F_BUS64;
1572 if ((reg & STATUS_PCIX_MODE) != 0) {
1573 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1574
1575 sc->sc_flags |= WM_F_PCIX;
1576 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1577 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1578 aprint_error_dev(sc->sc_dev,
1579 "unable to find PCIX capability\n");
1580 else if (sc->sc_type != WM_T_82545_3 &&
1581 sc->sc_type != WM_T_82546_3) {
1582 /*
1583 * Work around a problem caused by the BIOS
1584 * setting the max memory read byte count
1585 * incorrectly.
1586 */
1587 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1588 sc->sc_pcixe_capoff + PCIX_CMD);
1589 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1590 sc->sc_pcixe_capoff + PCIX_STATUS);
1591
1592 bytecnt =
1593 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1594 PCIX_CMD_BYTECNT_SHIFT;
1595 maxb =
1596 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1597 PCIX_STATUS_MAXB_SHIFT;
1598 if (bytecnt > maxb) {
1599 aprint_verbose_dev(sc->sc_dev,
1600 "resetting PCI-X MMRBC: %d -> %d\n",
1601 512 << bytecnt, 512 << maxb);
1602 pcix_cmd = (pcix_cmd &
1603 ~PCIX_CMD_BYTECNT_MASK) |
1604 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1605 pci_conf_write(pa->pa_pc, pa->pa_tag,
1606 sc->sc_pcixe_capoff + PCIX_CMD,
1607 pcix_cmd);
1608 }
1609 }
1610 }
1611 /*
1612 * The quad port adapter is special; it has a PCIX-PCIX
1613 * bridge on the board, and can run the secondary bus at
1614 * a higher speed.
1615 */
1616 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1617 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1618 : 66;
1619 } else if (sc->sc_flags & WM_F_PCIX) {
1620 switch (reg & STATUS_PCIXSPD_MASK) {
1621 case STATUS_PCIXSPD_50_66:
1622 sc->sc_bus_speed = 66;
1623 break;
1624 case STATUS_PCIXSPD_66_100:
1625 sc->sc_bus_speed = 100;
1626 break;
1627 case STATUS_PCIXSPD_100_133:
1628 sc->sc_bus_speed = 133;
1629 break;
1630 default:
1631 aprint_error_dev(sc->sc_dev,
1632 "unknown PCIXSPD %d; assuming 66MHz\n",
1633 reg & STATUS_PCIXSPD_MASK);
1634 sc->sc_bus_speed = 66;
1635 break;
1636 }
1637 } else
1638 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1639 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1640 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1641 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1642 }
1643
1644 /*
1645 * Allocate the control data structures, and create and load the
1646 * DMA map for it.
1647 *
1648 * NOTE: All Tx descriptors must be in the same 4G segment of
1649 * memory. So must Rx descriptors. We simplify by allocating
1650 * both sets within the same 4G segment.
1651 */
1652 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1653 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1654 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1655 sizeof(struct wm_control_data_82542) :
1656 sizeof(struct wm_control_data_82544);
1657 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1658 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1659 &sc->sc_cd_rseg, 0)) != 0) {
1660 aprint_error_dev(sc->sc_dev,
1661 "unable to allocate control data, error = %d\n",
1662 error);
1663 goto fail_0;
1664 }
1665
1666 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1667 sc->sc_cd_rseg, sc->sc_cd_size,
1668 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1669 aprint_error_dev(sc->sc_dev,
1670 "unable to map control data, error = %d\n", error);
1671 goto fail_1;
1672 }
1673
1674 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1675 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1676 aprint_error_dev(sc->sc_dev,
1677 "unable to create control data DMA map, error = %d\n",
1678 error);
1679 goto fail_2;
1680 }
1681
1682 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1683 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1684 aprint_error_dev(sc->sc_dev,
1685 "unable to load control data DMA map, error = %d\n",
1686 error);
1687 goto fail_3;
1688 }
1689
1690 /* Create the transmit buffer DMA maps. */
1691 WM_TXQUEUELEN(sc) =
1692 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1693 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1694 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1695 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1696 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1697 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1698 aprint_error_dev(sc->sc_dev,
1699 "unable to create Tx DMA map %d, error = %d\n",
1700 i, error);
1701 goto fail_4;
1702 }
1703 }
1704
1705 /* Create the receive buffer DMA maps. */
1706 for (i = 0; i < WM_NRXDESC; i++) {
1707 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1708 MCLBYTES, 0, 0,
1709 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1710 aprint_error_dev(sc->sc_dev,
1711 "unable to create Rx DMA map %d error = %d\n",
1712 i, error);
1713 goto fail_5;
1714 }
1715 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1716 }
1717
1718 /* clear interesting stat counters */
1719 CSR_READ(sc, WMREG_COLC);
1720 CSR_READ(sc, WMREG_RXERRC);
1721
1722 /* get PHY control from SMBus to PCIe */
1723 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1724 || (sc->sc_type == WM_T_PCH_LPT))
1725 wm_smbustopci(sc);
1726
1727 /* Reset the chip to a known state. */
1728 wm_reset(sc);
1729
1730 /* Get some information about the EEPROM. */
1731 switch (sc->sc_type) {
1732 case WM_T_82542_2_0:
1733 case WM_T_82542_2_1:
1734 case WM_T_82543:
1735 case WM_T_82544:
1736 /* Microwire */
1737 sc->sc_nvm_wordsize = 64;
1738 sc->sc_nvm_addrbits = 6;
1739 break;
1740 case WM_T_82540:
1741 case WM_T_82545:
1742 case WM_T_82545_3:
1743 case WM_T_82546:
1744 case WM_T_82546_3:
1745 /* Microwire */
1746 reg = CSR_READ(sc, WMREG_EECD);
1747 if (reg & EECD_EE_SIZE) {
1748 sc->sc_nvm_wordsize = 256;
1749 sc->sc_nvm_addrbits = 8;
1750 } else {
1751 sc->sc_nvm_wordsize = 64;
1752 sc->sc_nvm_addrbits = 6;
1753 }
1754 sc->sc_flags |= WM_F_LOCK_EECD;
1755 break;
1756 case WM_T_82541:
1757 case WM_T_82541_2:
1758 case WM_T_82547:
1759 case WM_T_82547_2:
1760 sc->sc_flags |= WM_F_LOCK_EECD;
1761 reg = CSR_READ(sc, WMREG_EECD);
1762 if (reg & EECD_EE_TYPE) {
1763 /* SPI */
1764 sc->sc_flags |= WM_F_EEPROM_SPI;
1765 wm_nvm_set_addrbits_size_eecd(sc);
1766 } else {
1767 /* Microwire */
1768 if ((reg & EECD_EE_ABITS) != 0) {
1769 sc->sc_nvm_wordsize = 256;
1770 sc->sc_nvm_addrbits = 8;
1771 } else {
1772 sc->sc_nvm_wordsize = 64;
1773 sc->sc_nvm_addrbits = 6;
1774 }
1775 }
1776 break;
1777 case WM_T_82571:
1778 case WM_T_82572:
1779 /* SPI */
1780 sc->sc_flags |= WM_F_EEPROM_SPI;
1781 wm_nvm_set_addrbits_size_eecd(sc);
1782 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1783 break;
1784 case WM_T_82573:
1785 sc->sc_flags |= WM_F_LOCK_SWSM;
1786 /* FALLTHROUGH */
1787 case WM_T_82574:
1788 case WM_T_82583:
1789 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1790 sc->sc_flags |= WM_F_EEPROM_FLASH;
1791 sc->sc_nvm_wordsize = 2048;
1792 } else {
1793 /* SPI */
1794 sc->sc_flags |= WM_F_EEPROM_SPI;
1795 wm_nvm_set_addrbits_size_eecd(sc);
1796 }
1797 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1798 break;
1799 case WM_T_82575:
1800 case WM_T_82576:
1801 case WM_T_82580:
1802 case WM_T_I350:
1803 case WM_T_I354:
1804 case WM_T_80003:
1805 /* SPI */
1806 sc->sc_flags |= WM_F_EEPROM_SPI;
1807 wm_nvm_set_addrbits_size_eecd(sc);
1808 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1809 | WM_F_LOCK_SWSM;
1810 break;
1811 case WM_T_ICH8:
1812 case WM_T_ICH9:
1813 case WM_T_ICH10:
1814 case WM_T_PCH:
1815 case WM_T_PCH2:
1816 case WM_T_PCH_LPT:
1817 /* FLASH */
1818 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1819 sc->sc_nvm_wordsize = 2048;
1820 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1821 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1822 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1823 aprint_error_dev(sc->sc_dev,
1824 "can't map FLASH registers\n");
1825 goto fail_5;
1826 }
1827 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1828 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1829 ICH_FLASH_SECTOR_SIZE;
1830 sc->sc_ich8_flash_bank_size =
1831 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1832 sc->sc_ich8_flash_bank_size -=
1833 (reg & ICH_GFPREG_BASE_MASK);
1834 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1835 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1836 break;
1837 case WM_T_I210:
1838 case WM_T_I211:
1839 wm_nvm_set_addrbits_size_eecd(sc);
1840 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1841 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1842 break;
1843 default:
1844 break;
1845 }
1846
1847 /* Ensure the SMBI bit is clear before first NVM or PHY access */
1848 switch (sc->sc_type) {
1849 case WM_T_82571:
1850 case WM_T_82572:
1851 reg = CSR_READ(sc, WMREG_SWSM2);
1852 if ((reg & SWSM2_LOCK) == 0) {
1853 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
1854 force_clear_smbi = true;
1855 } else
1856 force_clear_smbi = false;
1857 break;
1858 case WM_T_82573:
1859 case WM_T_82574:
1860 case WM_T_82583:
1861 force_clear_smbi = true;
1862 break;
1863 default:
1864 force_clear_smbi = false;
1865 break;
1866 }
1867 if (force_clear_smbi) {
1868 reg = CSR_READ(sc, WMREG_SWSM);
1869 if ((reg & SWSM_SMBI) != 0)
1870 aprint_error_dev(sc->sc_dev,
1871 "Please update the Bootagent\n");
1872 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
1873 }
1874
1875 /*
1876 * Defer printing the EEPROM type until after verifying the checksum
1877 * This allows the EEPROM type to be printed correctly in the case
1878 * that no EEPROM is attached.
1879 */
1880 /*
1881 * Validate the EEPROM checksum. If the checksum fails, flag
1882 * this for later, so we can fail future reads from the EEPROM.
1883 */
1884 if (wm_nvm_validate_checksum(sc)) {
1885 /*
1886 * Read twice again because some PCI-e parts fail the
1887 * first check due to the link being in sleep state.
1888 */
1889 if (wm_nvm_validate_checksum(sc))
1890 sc->sc_flags |= WM_F_EEPROM_INVALID;
1891 }
1892
1893 /* Set device properties (macflags) */
1894 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1895
1896 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1897 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1898 else {
1899 aprint_verbose_dev(sc->sc_dev, "%u words ",
1900 sc->sc_nvm_wordsize);
1901 if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
1902 aprint_verbose("FLASH(HW)\n");
1903 } else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1904 aprint_verbose("FLASH\n");
1905 } else {
1906 if (sc->sc_flags & WM_F_EEPROM_SPI)
1907 eetype = "SPI";
1908 else
1909 eetype = "MicroWire";
1910 aprint_verbose("(%d address bits) %s EEPROM\n",
1911 sc->sc_nvm_addrbits, eetype);
1912 }
1913 }
1914
1915 switch (sc->sc_type) {
1916 case WM_T_82571:
1917 case WM_T_82572:
1918 case WM_T_82573:
1919 case WM_T_82574:
1920 case WM_T_82583:
1921 case WM_T_80003:
1922 case WM_T_ICH8:
1923 case WM_T_ICH9:
1924 case WM_T_ICH10:
1925 case WM_T_PCH:
1926 case WM_T_PCH2:
1927 case WM_T_PCH_LPT:
1928 if (wm_check_mng_mode(sc) != 0)
1929 wm_get_hw_control(sc);
1930 break;
1931 default:
1932 break;
1933 }
1934 wm_get_wakeup(sc);
1935 /*
1936 * Read the Ethernet address from the EEPROM, if not first found
1937 * in device properties.
1938 */
1939 ea = prop_dictionary_get(dict, "mac-address");
1940 if (ea != NULL) {
1941 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1942 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1943 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1944 } else {
1945 if (wm_read_mac_addr(sc, enaddr) != 0) {
1946 aprint_error_dev(sc->sc_dev,
1947 "unable to read Ethernet address\n");
1948 goto fail_5;
1949 }
1950 }
1951
1952 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1953 ether_sprintf(enaddr));
1954
1955 /*
1956 * Read the config info from the EEPROM, and set up various
1957 * bits in the control registers based on their contents.
1958 */
1959 pn = prop_dictionary_get(dict, "i82543-cfg1");
1960 if (pn != NULL) {
1961 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1962 cfg1 = (uint16_t) prop_number_integer_value(pn);
1963 } else {
1964 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
1965 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1966 goto fail_5;
1967 }
1968 }
1969
1970 pn = prop_dictionary_get(dict, "i82543-cfg2");
1971 if (pn != NULL) {
1972 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1973 cfg2 = (uint16_t) prop_number_integer_value(pn);
1974 } else {
1975 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
1976 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1977 goto fail_5;
1978 }
1979 }
1980
1981 /* check for WM_F_WOL */
1982 switch (sc->sc_type) {
1983 case WM_T_82542_2_0:
1984 case WM_T_82542_2_1:
1985 case WM_T_82543:
1986 /* dummy? */
1987 eeprom_data = 0;
1988 apme_mask = NVM_CFG3_APME;
1989 break;
1990 case WM_T_82544:
1991 apme_mask = NVM_CFG2_82544_APM_EN;
1992 eeprom_data = cfg2;
1993 break;
1994 case WM_T_82546:
1995 case WM_T_82546_3:
1996 case WM_T_82571:
1997 case WM_T_82572:
1998 case WM_T_82573:
1999 case WM_T_82574:
2000 case WM_T_82583:
2001 case WM_T_80003:
2002 default:
2003 apme_mask = NVM_CFG3_APME;
2004 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2005 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2006 break;
2007 case WM_T_82575:
2008 case WM_T_82576:
2009 case WM_T_82580:
2010 case WM_T_I350:
2011 case WM_T_I354: /* XXX ok? */
2012 case WM_T_ICH8:
2013 case WM_T_ICH9:
2014 case WM_T_ICH10:
2015 case WM_T_PCH:
2016 case WM_T_PCH2:
2017 case WM_T_PCH_LPT:
2018 /* XXX The funcid should be checked on some devices */
2019 apme_mask = WUC_APME;
2020 eeprom_data = CSR_READ(sc, WMREG_WUC);
2021 break;
2022 }
2023
2024 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2025 if ((eeprom_data & apme_mask) != 0)
2026 sc->sc_flags |= WM_F_WOL;
2027 #ifdef WM_DEBUG
2028 if ((sc->sc_flags & WM_F_WOL) != 0)
2029 printf("WOL\n");
2030 #endif
2031
2032 /*
2033 * XXX need special handling for some multiple port cards
2034 * to disable a paticular port.
2035 */
2036
2037 if (sc->sc_type >= WM_T_82544) {
2038 pn = prop_dictionary_get(dict, "i82543-swdpin");
2039 if (pn != NULL) {
2040 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2041 swdpin = (uint16_t) prop_number_integer_value(pn);
2042 } else {
2043 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2044 aprint_error_dev(sc->sc_dev,
2045 "unable to read SWDPIN\n");
2046 goto fail_5;
2047 }
2048 }
2049 }
2050
2051 if (cfg1 & NVM_CFG1_ILOS)
2052 sc->sc_ctrl |= CTRL_ILOS;
2053 if (sc->sc_type >= WM_T_82544) {
2054 sc->sc_ctrl |=
2055 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2056 CTRL_SWDPIO_SHIFT;
2057 sc->sc_ctrl |=
2058 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2059 CTRL_SWDPINS_SHIFT;
2060 } else {
2061 sc->sc_ctrl |=
2062 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2063 CTRL_SWDPIO_SHIFT;
2064 }
2065
2066 #if 0
2067 if (sc->sc_type >= WM_T_82544) {
2068 if (cfg1 & NVM_CFG1_IPS0)
2069 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2070 if (cfg1 & NVM_CFG1_IPS1)
2071 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2072 sc->sc_ctrl_ext |=
2073 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2074 CTRL_EXT_SWDPIO_SHIFT;
2075 sc->sc_ctrl_ext |=
2076 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2077 CTRL_EXT_SWDPINS_SHIFT;
2078 } else {
2079 sc->sc_ctrl_ext |=
2080 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2081 CTRL_EXT_SWDPIO_SHIFT;
2082 }
2083 #endif
2084
2085 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2086 #if 0
2087 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2088 #endif
2089
2090 /*
2091 * Set up some register offsets that are different between
2092 * the i82542 and the i82543 and later chips.
2093 */
2094 if (sc->sc_type < WM_T_82543) {
2095 sc->sc_rdt_reg = WMREG_OLD_RDT0;
2096 sc->sc_tdt_reg = WMREG_OLD_TDT;
2097 } else {
2098 sc->sc_rdt_reg = WMREG_RDT;
2099 sc->sc_tdt_reg = WMREG_TDT;
2100 }
2101
2102 if (sc->sc_type == WM_T_PCH) {
2103 uint16_t val;
2104
2105 /* Save the NVM K1 bit setting */
2106 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2107
2108 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2109 sc->sc_nvm_k1_enabled = 1;
2110 else
2111 sc->sc_nvm_k1_enabled = 0;
2112 }
2113
2114 /*
2115 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2116 * media structures accordingly.
2117 */
2118 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2119 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2120 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2121 || sc->sc_type == WM_T_82573
2122 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2123 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2124 wm_gmii_mediainit(sc, wmp->wmp_product);
2125 } else if (sc->sc_type < WM_T_82543 ||
2126 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2127 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2128 aprint_error_dev(sc->sc_dev,
2129 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2130 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2131 }
2132 wm_tbi_mediainit(sc);
2133 } else {
2134 switch (sc->sc_type) {
2135 case WM_T_82575:
2136 case WM_T_82576:
2137 case WM_T_82580:
2138 case WM_T_I350:
2139 case WM_T_I354:
2140 case WM_T_I210:
2141 case WM_T_I211:
2142 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2143 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2144 switch (link_mode) {
2145 case CTRL_EXT_LINK_MODE_1000KX:
2146 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2147 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2148 break;
2149 case CTRL_EXT_LINK_MODE_SGMII:
2150 if (wm_sgmii_uses_mdio(sc)) {
2151 aprint_verbose_dev(sc->sc_dev,
2152 "SGMII(MDIO)\n");
2153 sc->sc_flags |= WM_F_SGMII;
2154 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2155 break;
2156 }
2157 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2158 /*FALLTHROUGH*/
2159 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2160 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2161 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2162 if (link_mode
2163 == CTRL_EXT_LINK_MODE_SGMII) {
2164 sc->sc_mediatype
2165 = WM_MEDIATYPE_COPPER;
2166 sc->sc_flags |= WM_F_SGMII;
2167 } else {
2168 sc->sc_mediatype
2169 = WM_MEDIATYPE_SERDES;
2170 aprint_verbose_dev(sc->sc_dev,
2171 "SERDES\n");
2172 }
2173 break;
2174 }
2175 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2176 aprint_verbose_dev(sc->sc_dev,
2177 "SERDES\n");
2178
2179 /* Change current link mode setting */
2180 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2181 switch (sc->sc_mediatype) {
2182 case WM_MEDIATYPE_COPPER:
2183 reg |= CTRL_EXT_LINK_MODE_SGMII;
2184 break;
2185 case WM_MEDIATYPE_SERDES:
2186 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2187 break;
2188 default:
2189 break;
2190 }
2191 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2192 break;
2193 case CTRL_EXT_LINK_MODE_GMII:
2194 default:
2195 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2196 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2197 break;
2198 }
2199
2200 reg &= ~CTRL_EXT_I2C_ENA;
2201 if ((sc->sc_flags & WM_F_SGMII) != 0)
2202 reg |= CTRL_EXT_I2C_ENA;
2203 else
2204 reg &= ~CTRL_EXT_I2C_ENA;
2205 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2206
2207 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2208 wm_gmii_mediainit(sc, wmp->wmp_product);
2209 else
2210 wm_tbi_mediainit(sc);
2211 break;
2212 default:
2213 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
2214 aprint_error_dev(sc->sc_dev,
2215 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2216 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2217 wm_gmii_mediainit(sc, wmp->wmp_product);
2218 }
2219 }
2220
2221 ifp = &sc->sc_ethercom.ec_if;
2222 xname = device_xname(sc->sc_dev);
2223 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2224 ifp->if_softc = sc;
2225 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2226 ifp->if_ioctl = wm_ioctl;
2227 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2228 ifp->if_start = wm_nq_start;
2229 else
2230 ifp->if_start = wm_start;
2231 ifp->if_watchdog = wm_watchdog;
2232 ifp->if_init = wm_init;
2233 ifp->if_stop = wm_stop;
2234 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2235 IFQ_SET_READY(&ifp->if_snd);
2236
2237 /* Check for jumbo frame */
2238 switch (sc->sc_type) {
2239 case WM_T_82573:
2240 /* XXX limited to 9234 if ASPM is disabled */
2241 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &io3);
2242 if ((io3 & NVM_3GIO_3_ASPM_MASK) != 0)
2243 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2244 break;
2245 case WM_T_82571:
2246 case WM_T_82572:
2247 case WM_T_82574:
2248 case WM_T_82575:
2249 case WM_T_82576:
2250 case WM_T_82580:
2251 case WM_T_I350:
2252 case WM_T_I354: /* XXXX ok? */
2253 case WM_T_I210:
2254 case WM_T_I211:
2255 case WM_T_80003:
2256 case WM_T_ICH9:
2257 case WM_T_ICH10:
2258 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2259 case WM_T_PCH_LPT:
2260 /* XXX limited to 9234 */
2261 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2262 break;
2263 case WM_T_PCH:
2264 /* XXX limited to 4096 */
2265 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2266 break;
2267 case WM_T_82542_2_0:
2268 case WM_T_82542_2_1:
2269 case WM_T_82583:
2270 case WM_T_ICH8:
2271 /* No support for jumbo frame */
2272 break;
2273 default:
2274 /* ETHER_MAX_LEN_JUMBO */
2275 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2276 break;
2277 }
2278
2279 /* If we're a i82543 or greater, we can support VLANs. */
2280 if (sc->sc_type >= WM_T_82543)
2281 sc->sc_ethercom.ec_capabilities |=
2282 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2283
2284 /*
2285 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2286 * on i82543 and later.
2287 */
2288 if (sc->sc_type >= WM_T_82543) {
2289 ifp->if_capabilities |=
2290 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2291 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2292 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2293 IFCAP_CSUM_TCPv6_Tx |
2294 IFCAP_CSUM_UDPv6_Tx;
2295 }
2296
2297 /*
2298 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2299 *
2300 * 82541GI (8086:1076) ... no
2301 * 82572EI (8086:10b9) ... yes
2302 */
2303 if (sc->sc_type >= WM_T_82571) {
2304 ifp->if_capabilities |=
2305 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2306 }
2307
2308 /*
2309 * If we're a i82544 or greater (except i82547), we can do
2310 * TCP segmentation offload.
2311 */
2312 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2313 ifp->if_capabilities |= IFCAP_TSOv4;
2314 }
2315
2316 if (sc->sc_type >= WM_T_82571) {
2317 ifp->if_capabilities |= IFCAP_TSOv6;
2318 }
2319
2320 #ifdef WM_MPSAFE
2321 sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2322 sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2323 #else
2324 sc->sc_tx_lock = NULL;
2325 sc->sc_rx_lock = NULL;
2326 #endif
2327
2328 /* Attach the interface. */
2329 if_attach(ifp);
2330 ether_ifattach(ifp, enaddr);
2331 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2332 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2333 RND_FLAG_DEFAULT);
2334
2335 #ifdef WM_EVENT_COUNTERS
2336 /* Attach event counters. */
2337 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2338 NULL, xname, "txsstall");
2339 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2340 NULL, xname, "txdstall");
2341 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2342 NULL, xname, "txfifo_stall");
2343 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2344 NULL, xname, "txdw");
2345 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2346 NULL, xname, "txqe");
2347 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2348 NULL, xname, "rxintr");
2349 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2350 NULL, xname, "linkintr");
2351
2352 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2353 NULL, xname, "rxipsum");
2354 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2355 NULL, xname, "rxtusum");
2356 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2357 NULL, xname, "txipsum");
2358 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2359 NULL, xname, "txtusum");
2360 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2361 NULL, xname, "txtusum6");
2362
2363 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2364 NULL, xname, "txtso");
2365 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2366 NULL, xname, "txtso6");
2367 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2368 NULL, xname, "txtsopain");
2369
2370 for (i = 0; i < WM_NTXSEGS; i++) {
2371 snprintf(wm_txseg_evcnt_names[i],
2372 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2373 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2374 NULL, xname, wm_txseg_evcnt_names[i]);
2375 }
2376
2377 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2378 NULL, xname, "txdrop");
2379
2380 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2381 NULL, xname, "tu");
2382
2383 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2384 NULL, xname, "tx_xoff");
2385 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2386 NULL, xname, "tx_xon");
2387 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2388 NULL, xname, "rx_xoff");
2389 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2390 NULL, xname, "rx_xon");
2391 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2392 NULL, xname, "rx_macctl");
2393 #endif /* WM_EVENT_COUNTERS */
2394
2395 if (pmf_device_register(self, wm_suspend, wm_resume))
2396 pmf_class_network_register(self, ifp);
2397 else
2398 aprint_error_dev(self, "couldn't establish power handler\n");
2399
2400 sc->sc_flags |= WM_F_ATTACHED;
2401 return;
2402
2403 /*
2404 * Free any resources we've allocated during the failed attach
2405 * attempt. Do this in reverse order and fall through.
2406 */
2407 fail_5:
2408 for (i = 0; i < WM_NRXDESC; i++) {
2409 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2410 bus_dmamap_destroy(sc->sc_dmat,
2411 sc->sc_rxsoft[i].rxs_dmamap);
2412 }
2413 fail_4:
2414 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2415 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2416 bus_dmamap_destroy(sc->sc_dmat,
2417 sc->sc_txsoft[i].txs_dmamap);
2418 }
2419 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2420 fail_3:
2421 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2422 fail_2:
2423 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2424 sc->sc_cd_size);
2425 fail_1:
2426 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2427 fail_0:
2428 return;
2429 }
2430
2431 /* The detach function (ca_detach) */
2432 static int
2433 wm_detach(device_t self, int flags __unused)
2434 {
2435 struct wm_softc *sc = device_private(self);
2436 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2437 int i;
2438 #ifndef WM_MPSAFE
2439 int s;
2440 #endif
2441
2442 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2443 return 0;
2444
2445 #ifndef WM_MPSAFE
2446 s = splnet();
2447 #endif
2448 /* Stop the interface. Callouts are stopped in it. */
2449 wm_stop(ifp, 1);
2450
2451 #ifndef WM_MPSAFE
2452 splx(s);
2453 #endif
2454
2455 pmf_device_deregister(self);
2456
2457 /* Tell the firmware about the release */
2458 WM_BOTH_LOCK(sc);
2459 wm_release_manageability(sc);
2460 wm_release_hw_control(sc);
2461 WM_BOTH_UNLOCK(sc);
2462
2463 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2464
2465 /* Delete all remaining media. */
2466 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2467
2468 ether_ifdetach(ifp);
2469 if_detach(ifp);
2470
2471
2472 /* Unload RX dmamaps and free mbufs */
2473 WM_RX_LOCK(sc);
2474 wm_rxdrain(sc);
2475 WM_RX_UNLOCK(sc);
2476 /* Must unlock here */
2477
2478 /* Free dmamap. It's the same as the end of the wm_attach() function */
2479 for (i = 0; i < WM_NRXDESC; i++) {
2480 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2481 bus_dmamap_destroy(sc->sc_dmat,
2482 sc->sc_rxsoft[i].rxs_dmamap);
2483 }
2484 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2485 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2486 bus_dmamap_destroy(sc->sc_dmat,
2487 sc->sc_txsoft[i].txs_dmamap);
2488 }
2489 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2490 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2491 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2492 sc->sc_cd_size);
2493 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2494
2495 /* Disestablish the interrupt handler */
2496 if (sc->sc_ih != NULL) {
2497 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2498 sc->sc_ih = NULL;
2499 }
2500
2501 /* Unmap the registers */
2502 if (sc->sc_ss) {
2503 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2504 sc->sc_ss = 0;
2505 }
2506
2507 if (sc->sc_ios) {
2508 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2509 sc->sc_ios = 0;
2510 }
2511
2512 if (sc->sc_tx_lock)
2513 mutex_obj_free(sc->sc_tx_lock);
2514 if (sc->sc_rx_lock)
2515 mutex_obj_free(sc->sc_rx_lock);
2516
2517 return 0;
2518 }
2519
2520 static bool
2521 wm_suspend(device_t self, const pmf_qual_t *qual)
2522 {
2523 struct wm_softc *sc = device_private(self);
2524
2525 wm_release_manageability(sc);
2526 wm_release_hw_control(sc);
2527 #ifdef WM_WOL
2528 wm_enable_wakeup(sc);
2529 #endif
2530
2531 return true;
2532 }
2533
2534 static bool
2535 wm_resume(device_t self, const pmf_qual_t *qual)
2536 {
2537 struct wm_softc *sc = device_private(self);
2538
2539 wm_init_manageability(sc);
2540
2541 return true;
2542 }
2543
2544 /*
2545 * wm_watchdog: [ifnet interface function]
2546 *
2547 * Watchdog timer handler.
2548 */
2549 static void
2550 wm_watchdog(struct ifnet *ifp)
2551 {
2552 struct wm_softc *sc = ifp->if_softc;
2553
2554 /*
2555 * Since we're using delayed interrupts, sweep up
2556 * before we report an error.
2557 */
2558 WM_TX_LOCK(sc);
2559 wm_txintr(sc);
2560 WM_TX_UNLOCK(sc);
2561
2562 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2563 #ifdef WM_DEBUG
2564 int i, j;
2565 struct wm_txsoft *txs;
2566 #endif
2567 log(LOG_ERR,
2568 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2569 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2570 sc->sc_txnext);
2571 ifp->if_oerrors++;
2572 #ifdef WM_DEBUG
2573 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
2574 i = WM_NEXTTXS(sc, i)) {
2575 txs = &sc->sc_txsoft[i];
2576 printf("txs %d tx %d -> %d\n",
2577 i, txs->txs_firstdesc, txs->txs_lastdesc);
2578 for (j = txs->txs_firstdesc; ;
2579 j = WM_NEXTTX(sc, j)) {
2580 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2581 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
2582 printf("\t %#08x%08x\n",
2583 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
2584 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
2585 if (j == txs->txs_lastdesc)
2586 break;
2587 }
2588 }
2589 #endif
2590 /* Reset the interface. */
2591 (void) wm_init(ifp);
2592 }
2593
2594 /* Try to get more packets going. */
2595 ifp->if_start(ifp);
2596 }
2597
2598 /*
2599 * wm_tick:
2600 *
2601 * One second timer, used to check link status, sweep up
2602 * completed transmit jobs, etc.
2603 */
2604 static void
2605 wm_tick(void *arg)
2606 {
2607 struct wm_softc *sc = arg;
2608 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2609 #ifndef WM_MPSAFE
2610 int s;
2611
2612 s = splnet();
2613 #endif
2614
2615 WM_TX_LOCK(sc);
2616
2617 if (sc->sc_stopping)
2618 goto out;
2619
2620 if (sc->sc_type >= WM_T_82542_2_1) {
2621 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2622 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2623 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2624 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2625 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2626 }
2627
2628 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2629 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2630 + CSR_READ(sc, WMREG_CRCERRS)
2631 + CSR_READ(sc, WMREG_ALGNERRC)
2632 + CSR_READ(sc, WMREG_SYMERRC)
2633 + CSR_READ(sc, WMREG_RXERRC)
2634 + CSR_READ(sc, WMREG_SEC)
2635 + CSR_READ(sc, WMREG_CEXTERR)
2636 + CSR_READ(sc, WMREG_RLEC);
2637 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2638
2639 if (sc->sc_flags & WM_F_HAS_MII)
2640 mii_tick(&sc->sc_mii);
2641 else
2642 wm_tbi_check_link(sc);
2643
2644 out:
2645 WM_TX_UNLOCK(sc);
2646 #ifndef WM_MPSAFE
2647 splx(s);
2648 #endif
2649
2650 if (!sc->sc_stopping)
2651 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2652 }
2653
2654 static int
2655 wm_ifflags_cb(struct ethercom *ec)
2656 {
2657 struct ifnet *ifp = &ec->ec_if;
2658 struct wm_softc *sc = ifp->if_softc;
2659 int change = ifp->if_flags ^ sc->sc_if_flags;
2660 int rc = 0;
2661
2662 WM_BOTH_LOCK(sc);
2663
2664 if (change != 0)
2665 sc->sc_if_flags = ifp->if_flags;
2666
2667 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
2668 rc = ENETRESET;
2669 goto out;
2670 }
2671
2672 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2673 wm_set_filter(sc);
2674
2675 wm_set_vlan(sc);
2676
2677 out:
2678 WM_BOTH_UNLOCK(sc);
2679
2680 return rc;
2681 }
2682
2683 /*
2684 * wm_ioctl: [ifnet interface function]
2685 *
2686 * Handle control requests from the operator.
2687 */
2688 static int
2689 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2690 {
2691 struct wm_softc *sc = ifp->if_softc;
2692 struct ifreq *ifr = (struct ifreq *) data;
2693 struct ifaddr *ifa = (struct ifaddr *)data;
2694 struct sockaddr_dl *sdl;
2695 int s, error;
2696
2697 #ifndef WM_MPSAFE
2698 s = splnet();
2699 #endif
2700 switch (cmd) {
2701 case SIOCSIFMEDIA:
2702 case SIOCGIFMEDIA:
2703 WM_BOTH_LOCK(sc);
2704 /* Flow control requires full-duplex mode. */
2705 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2706 (ifr->ifr_media & IFM_FDX) == 0)
2707 ifr->ifr_media &= ~IFM_ETH_FMASK;
2708 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2709 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2710 /* We can do both TXPAUSE and RXPAUSE. */
2711 ifr->ifr_media |=
2712 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2713 }
2714 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2715 }
2716 WM_BOTH_UNLOCK(sc);
2717 #ifdef WM_MPSAFE
2718 s = splnet();
2719 #endif
2720 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2721 #ifdef WM_MPSAFE
2722 splx(s);
2723 #endif
2724 break;
2725 case SIOCINITIFADDR:
2726 WM_BOTH_LOCK(sc);
2727 if (ifa->ifa_addr->sa_family == AF_LINK) {
2728 sdl = satosdl(ifp->if_dl->ifa_addr);
2729 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2730 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2731 /* unicast address is first multicast entry */
2732 wm_set_filter(sc);
2733 error = 0;
2734 WM_BOTH_UNLOCK(sc);
2735 break;
2736 }
2737 WM_BOTH_UNLOCK(sc);
2738 /*FALLTHROUGH*/
2739 default:
2740 #ifdef WM_MPSAFE
2741 s = splnet();
2742 #endif
2743 /* It may call wm_start, so unlock here */
2744 error = ether_ioctl(ifp, cmd, data);
2745 #ifdef WM_MPSAFE
2746 splx(s);
2747 #endif
2748 if (error != ENETRESET)
2749 break;
2750
2751 error = 0;
2752
2753 if (cmd == SIOCSIFCAP) {
2754 error = (*ifp->if_init)(ifp);
2755 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2756 ;
2757 else if (ifp->if_flags & IFF_RUNNING) {
2758 /*
2759 * Multicast list has changed; set the hardware filter
2760 * accordingly.
2761 */
2762 WM_BOTH_LOCK(sc);
2763 wm_set_filter(sc);
2764 WM_BOTH_UNLOCK(sc);
2765 }
2766 break;
2767 }
2768
2769 /* Try to get more packets going. */
2770 ifp->if_start(ifp);
2771
2772 #ifndef WM_MPSAFE
2773 splx(s);
2774 #endif
2775 return error;
2776 }
2777
2778 /* MAC address related */
2779
2780 /*
2781 * Get the offset of MAC address and return it.
2782 * If error occured, use offset 0.
2783 */
2784 static uint16_t
2785 wm_check_alt_mac_addr(struct wm_softc *sc)
2786 {
2787 uint16_t myea[ETHER_ADDR_LEN / 2];
2788 uint16_t offset = NVM_OFF_MACADDR;
2789
2790 /* Try to read alternative MAC address pointer */
2791 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2792 return 0;
2793
2794 /* Check pointer if it's valid or not. */
2795 if ((offset == 0x0000) || (offset == 0xffff))
2796 return 0;
2797
2798 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
2799 /*
2800 * Check whether alternative MAC address is valid or not.
2801 * Some cards have non 0xffff pointer but those don't use
2802 * alternative MAC address in reality.
2803 *
2804 * Check whether the broadcast bit is set or not.
2805 */
2806 if (wm_nvm_read(sc, offset, 1, myea) == 0)
2807 if (((myea[0] & 0xff) & 0x01) == 0)
2808 return offset; /* Found */
2809
2810 /* Not found */
2811 return 0;
2812 }
2813
2814 static int
2815 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2816 {
2817 uint16_t myea[ETHER_ADDR_LEN / 2];
2818 uint16_t offset = NVM_OFF_MACADDR;
2819 int do_invert = 0;
2820
2821 switch (sc->sc_type) {
2822 case WM_T_82580:
2823 case WM_T_I350:
2824 case WM_T_I354:
2825 /* EEPROM Top Level Partitioning */
2826 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
2827 break;
2828 case WM_T_82571:
2829 case WM_T_82575:
2830 case WM_T_82576:
2831 case WM_T_80003:
2832 case WM_T_I210:
2833 case WM_T_I211:
2834 offset = wm_check_alt_mac_addr(sc);
2835 if (offset == 0)
2836 if ((sc->sc_funcid & 0x01) == 1)
2837 do_invert = 1;
2838 break;
2839 default:
2840 if ((sc->sc_funcid & 0x01) == 1)
2841 do_invert = 1;
2842 break;
2843 }
2844
2845 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
2846 myea) != 0)
2847 goto bad;
2848
2849 enaddr[0] = myea[0] & 0xff;
2850 enaddr[1] = myea[0] >> 8;
2851 enaddr[2] = myea[1] & 0xff;
2852 enaddr[3] = myea[1] >> 8;
2853 enaddr[4] = myea[2] & 0xff;
2854 enaddr[5] = myea[2] >> 8;
2855
2856 /*
2857 * Toggle the LSB of the MAC address on the second port
2858 * of some dual port cards.
2859 */
2860 if (do_invert != 0)
2861 enaddr[5] ^= 1;
2862
2863 return 0;
2864
2865 bad:
2866 return -1;
2867 }
2868
2869 /*
2870 * wm_set_ral:
2871 *
2872 * Set an entery in the receive address list.
2873 */
2874 static void
2875 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2876 {
2877 uint32_t ral_lo, ral_hi;
2878
2879 if (enaddr != NULL) {
2880 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2881 (enaddr[3] << 24);
2882 ral_hi = enaddr[4] | (enaddr[5] << 8);
2883 ral_hi |= RAL_AV;
2884 } else {
2885 ral_lo = 0;
2886 ral_hi = 0;
2887 }
2888
2889 if (sc->sc_type >= WM_T_82544) {
2890 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2891 ral_lo);
2892 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2893 ral_hi);
2894 } else {
2895 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2896 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2897 }
2898 }
2899
2900 /*
2901 * wm_mchash:
2902 *
2903 * Compute the hash of the multicast address for the 4096-bit
2904 * multicast filter.
2905 */
2906 static uint32_t
2907 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2908 {
2909 static const int lo_shift[4] = { 4, 3, 2, 0 };
2910 static const int hi_shift[4] = { 4, 5, 6, 8 };
2911 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
2912 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
2913 uint32_t hash;
2914
2915 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2916 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2917 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
2918 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
2919 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
2920 return (hash & 0x3ff);
2921 }
2922 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2923 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2924
2925 return (hash & 0xfff);
2926 }
2927
2928 /*
2929 * wm_set_filter:
2930 *
2931 * Set up the receive filter.
2932 */
2933 static void
2934 wm_set_filter(struct wm_softc *sc)
2935 {
2936 struct ethercom *ec = &sc->sc_ethercom;
2937 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2938 struct ether_multi *enm;
2939 struct ether_multistep step;
2940 bus_addr_t mta_reg;
2941 uint32_t hash, reg, bit;
2942 int i, size;
2943
2944 if (sc->sc_type >= WM_T_82544)
2945 mta_reg = WMREG_CORDOVA_MTA;
2946 else
2947 mta_reg = WMREG_MTA;
2948
2949 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2950
2951 if (ifp->if_flags & IFF_BROADCAST)
2952 sc->sc_rctl |= RCTL_BAM;
2953 if (ifp->if_flags & IFF_PROMISC) {
2954 sc->sc_rctl |= RCTL_UPE;
2955 goto allmulti;
2956 }
2957
2958 /*
2959 * Set the station address in the first RAL slot, and
2960 * clear the remaining slots.
2961 */
2962 if (sc->sc_type == WM_T_ICH8)
2963 size = WM_RAL_TABSIZE_ICH8 -1;
2964 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
2965 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
2966 || (sc->sc_type == WM_T_PCH_LPT))
2967 size = WM_RAL_TABSIZE_ICH8;
2968 else if (sc->sc_type == WM_T_82575)
2969 size = WM_RAL_TABSIZE_82575;
2970 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
2971 size = WM_RAL_TABSIZE_82576;
2972 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2973 size = WM_RAL_TABSIZE_I350;
2974 else
2975 size = WM_RAL_TABSIZE;
2976 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
2977 for (i = 1; i < size; i++)
2978 wm_set_ral(sc, NULL, i);
2979
2980 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2981 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2982 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
2983 size = WM_ICH8_MC_TABSIZE;
2984 else
2985 size = WM_MC_TABSIZE;
2986 /* Clear out the multicast table. */
2987 for (i = 0; i < size; i++)
2988 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2989
2990 ETHER_FIRST_MULTI(step, ec, enm);
2991 while (enm != NULL) {
2992 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2993 /*
2994 * We must listen to a range of multicast addresses.
2995 * For now, just accept all multicasts, rather than
2996 * trying to set only those filter bits needed to match
2997 * the range. (At this time, the only use of address
2998 * ranges is for IP multicast routing, for which the
2999 * range is big enough to require all bits set.)
3000 */
3001 goto allmulti;
3002 }
3003
3004 hash = wm_mchash(sc, enm->enm_addrlo);
3005
3006 reg = (hash >> 5);
3007 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3008 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3009 || (sc->sc_type == WM_T_PCH2)
3010 || (sc->sc_type == WM_T_PCH_LPT))
3011 reg &= 0x1f;
3012 else
3013 reg &= 0x7f;
3014 bit = hash & 0x1f;
3015
3016 hash = CSR_READ(sc, mta_reg + (reg << 2));
3017 hash |= 1U << bit;
3018
3019 /* XXX Hardware bug?? */
3020 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3021 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3022 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3023 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3024 } else
3025 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3026
3027 ETHER_NEXT_MULTI(step, enm);
3028 }
3029
3030 ifp->if_flags &= ~IFF_ALLMULTI;
3031 goto setit;
3032
3033 allmulti:
3034 ifp->if_flags |= IFF_ALLMULTI;
3035 sc->sc_rctl |= RCTL_MPE;
3036
3037 setit:
3038 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3039 }
3040
3041 /* Reset and init related */
3042
3043 static void
3044 wm_set_vlan(struct wm_softc *sc)
3045 {
3046 /* Deal with VLAN enables. */
3047 if (VLAN_ATTACHED(&sc->sc_ethercom))
3048 sc->sc_ctrl |= CTRL_VME;
3049 else
3050 sc->sc_ctrl &= ~CTRL_VME;
3051
3052 /* Write the control registers. */
3053 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3054 }
3055
3056 static void
3057 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3058 {
3059 uint32_t gcr;
3060 pcireg_t ctrl2;
3061
3062 gcr = CSR_READ(sc, WMREG_GCR);
3063
3064 /* Only take action if timeout value is defaulted to 0 */
3065 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3066 goto out;
3067
3068 if ((gcr & GCR_CAP_VER2) == 0) {
3069 gcr |= GCR_CMPL_TMOUT_10MS;
3070 goto out;
3071 }
3072
3073 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3074 sc->sc_pcixe_capoff + PCIE_DCSR2);
3075 ctrl2 |= WM_PCIE_DCSR2_16MS;
3076 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3077 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3078
3079 out:
3080 /* Disable completion timeout resend */
3081 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3082
3083 CSR_WRITE(sc, WMREG_GCR, gcr);
3084 }
3085
3086 void
3087 wm_get_auto_rd_done(struct wm_softc *sc)
3088 {
3089 int i;
3090
3091 /* wait for eeprom to reload */
3092 switch (sc->sc_type) {
3093 case WM_T_82571:
3094 case WM_T_82572:
3095 case WM_T_82573:
3096 case WM_T_82574:
3097 case WM_T_82583:
3098 case WM_T_82575:
3099 case WM_T_82576:
3100 case WM_T_82580:
3101 case WM_T_I350:
3102 case WM_T_I354:
3103 case WM_T_I210:
3104 case WM_T_I211:
3105 case WM_T_80003:
3106 case WM_T_ICH8:
3107 case WM_T_ICH9:
3108 for (i = 0; i < 10; i++) {
3109 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3110 break;
3111 delay(1000);
3112 }
3113 if (i == 10) {
3114 log(LOG_ERR, "%s: auto read from eeprom failed to "
3115 "complete\n", device_xname(sc->sc_dev));
3116 }
3117 break;
3118 default:
3119 break;
3120 }
3121 }
3122
3123 void
3124 wm_lan_init_done(struct wm_softc *sc)
3125 {
3126 uint32_t reg = 0;
3127 int i;
3128
3129 /* wait for eeprom to reload */
3130 switch (sc->sc_type) {
3131 case WM_T_ICH10:
3132 case WM_T_PCH:
3133 case WM_T_PCH2:
3134 case WM_T_PCH_LPT:
3135 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3136 reg = CSR_READ(sc, WMREG_STATUS);
3137 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3138 break;
3139 delay(100);
3140 }
3141 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3142 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3143 "complete\n", device_xname(sc->sc_dev), __func__);
3144 }
3145 break;
3146 default:
3147 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3148 __func__);
3149 break;
3150 }
3151
3152 reg &= ~STATUS_LAN_INIT_DONE;
3153 CSR_WRITE(sc, WMREG_STATUS, reg);
3154 }
3155
3156 void
3157 wm_get_cfg_done(struct wm_softc *sc)
3158 {
3159 int mask;
3160 uint32_t reg;
3161 int i;
3162
3163 /* wait for eeprom to reload */
3164 switch (sc->sc_type) {
3165 case WM_T_82542_2_0:
3166 case WM_T_82542_2_1:
3167 /* null */
3168 break;
3169 case WM_T_82543:
3170 case WM_T_82544:
3171 case WM_T_82540:
3172 case WM_T_82545:
3173 case WM_T_82545_3:
3174 case WM_T_82546:
3175 case WM_T_82546_3:
3176 case WM_T_82541:
3177 case WM_T_82541_2:
3178 case WM_T_82547:
3179 case WM_T_82547_2:
3180 case WM_T_82573:
3181 case WM_T_82574:
3182 case WM_T_82583:
3183 /* generic */
3184 delay(10*1000);
3185 break;
3186 case WM_T_80003:
3187 case WM_T_82571:
3188 case WM_T_82572:
3189 case WM_T_82575:
3190 case WM_T_82576:
3191 case WM_T_82580:
3192 case WM_T_I350:
3193 case WM_T_I354:
3194 case WM_T_I210:
3195 case WM_T_I211:
3196 if (sc->sc_type == WM_T_82571) {
3197 /* Only 82571 shares port 0 */
3198 mask = EEMNGCTL_CFGDONE_0;
3199 } else
3200 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3201 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3202 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3203 break;
3204 delay(1000);
3205 }
3206 if (i >= WM_PHY_CFG_TIMEOUT) {
3207 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3208 device_xname(sc->sc_dev), __func__));
3209 }
3210 break;
3211 case WM_T_ICH8:
3212 case WM_T_ICH9:
3213 case WM_T_ICH10:
3214 case WM_T_PCH:
3215 case WM_T_PCH2:
3216 case WM_T_PCH_LPT:
3217 delay(10*1000);
3218 if (sc->sc_type >= WM_T_ICH10)
3219 wm_lan_init_done(sc);
3220 else
3221 wm_get_auto_rd_done(sc);
3222
3223 reg = CSR_READ(sc, WMREG_STATUS);
3224 if ((reg & STATUS_PHYRA) != 0)
3225 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3226 break;
3227 default:
3228 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3229 __func__);
3230 break;
3231 }
3232 }
3233
3234 /* Init hardware bits */
3235 void
3236 wm_initialize_hardware_bits(struct wm_softc *sc)
3237 {
3238 uint32_t tarc0, tarc1, reg;
3239
3240 /* For 82571 variant, 80003 and ICHs */
3241 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3242 || (sc->sc_type >= WM_T_80003)) {
3243
3244 /* Transmit Descriptor Control 0 */
3245 reg = CSR_READ(sc, WMREG_TXDCTL(0));
3246 reg |= TXDCTL_COUNT_DESC;
3247 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3248
3249 /* Transmit Descriptor Control 1 */
3250 reg = CSR_READ(sc, WMREG_TXDCTL(1));
3251 reg |= TXDCTL_COUNT_DESC;
3252 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3253
3254 /* TARC0 */
3255 tarc0 = CSR_READ(sc, WMREG_TARC0);
3256 switch (sc->sc_type) {
3257 case WM_T_82571:
3258 case WM_T_82572:
3259 case WM_T_82573:
3260 case WM_T_82574:
3261 case WM_T_82583:
3262 case WM_T_80003:
3263 /* Clear bits 30..27 */
3264 tarc0 &= ~__BITS(30, 27);
3265 break;
3266 default:
3267 break;
3268 }
3269
3270 switch (sc->sc_type) {
3271 case WM_T_82571:
3272 case WM_T_82572:
3273 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3274
3275 tarc1 = CSR_READ(sc, WMREG_TARC1);
3276 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3277 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3278 /* 8257[12] Errata No.7 */
3279 tarc1 |= __BIT(22); /* TARC1 bits 22 */
3280
3281 /* TARC1 bit 28 */
3282 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3283 tarc1 &= ~__BIT(28);
3284 else
3285 tarc1 |= __BIT(28);
3286 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3287
3288 /*
3289 * 8257[12] Errata No.13
3290 * Disable Dyamic Clock Gating.
3291 */
3292 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3293 reg &= ~CTRL_EXT_DMA_DYN_CLK;
3294 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3295 break;
3296 case WM_T_82573:
3297 case WM_T_82574:
3298 case WM_T_82583:
3299 if ((sc->sc_type == WM_T_82574)
3300 || (sc->sc_type == WM_T_82583))
3301 tarc0 |= __BIT(26); /* TARC0 bit 26 */
3302
3303 /* Extended Device Control */
3304 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3305 reg &= ~__BIT(23); /* Clear bit 23 */
3306 reg |= __BIT(22); /* Set bit 22 */
3307 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3308
3309 /* Device Control */
3310 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
3311 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3312
3313 /* PCIe Control Register */
3314 if ((sc->sc_type == WM_T_82574)
3315 || (sc->sc_type == WM_T_82583)) {
3316 /*
3317 * Document says this bit must be set for
3318 * proper operation.
3319 */
3320 reg = CSR_READ(sc, WMREG_GCR);
3321 reg |= __BIT(22);
3322 CSR_WRITE(sc, WMREG_GCR, reg);
3323
3324 /*
3325 * Apply workaround for hardware errata
3326 * documented in errata docs Fixes issue where
3327 * some error prone or unreliable PCIe
3328 * completions are occurring, particularly
3329 * with ASPM enabled. Without fix, issue can
3330 * cause Tx timeouts.
3331 */
3332 reg = CSR_READ(sc, WMREG_GCR2);
3333 reg |= __BIT(0);
3334 CSR_WRITE(sc, WMREG_GCR2, reg);
3335 }
3336 break;
3337 case WM_T_80003:
3338 /* TARC0 */
3339 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3340 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3341 tarc0 &= ~__BIT(20); /* Clear bits 20 */
3342
3343 /* TARC1 bit 28 */
3344 tarc1 = CSR_READ(sc, WMREG_TARC1);
3345 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3346 tarc1 &= ~__BIT(28);
3347 else
3348 tarc1 |= __BIT(28);
3349 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3350 break;
3351 case WM_T_ICH8:
3352 case WM_T_ICH9:
3353 case WM_T_ICH10:
3354 case WM_T_PCH:
3355 case WM_T_PCH2:
3356 case WM_T_PCH_LPT:
3357 /* TARC 0 */
3358 if (sc->sc_type == WM_T_ICH8) {
3359 /* Set TARC0 bits 29 and 28 */
3360 tarc0 |= __BITS(29, 28);
3361 }
3362 /* Set TARC0 bits 23,24,26,27 */
3363 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3364
3365 /* CTRL_EXT */
3366 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3367 reg |= __BIT(22); /* Set bit 22 */
3368 /*
3369 * Enable PHY low-power state when MAC is at D3
3370 * w/o WoL
3371 */
3372 if (sc->sc_type >= WM_T_PCH)
3373 reg |= CTRL_EXT_PHYPDEN;
3374 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3375
3376 /* TARC1 */
3377 tarc1 = CSR_READ(sc, WMREG_TARC1);
3378 /* bit 28 */
3379 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3380 tarc1 &= ~__BIT(28);
3381 else
3382 tarc1 |= __BIT(28);
3383 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3384 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3385
3386 /* Device Status */
3387 if (sc->sc_type == WM_T_ICH8) {
3388 reg = CSR_READ(sc, WMREG_STATUS);
3389 reg &= ~__BIT(31);
3390 CSR_WRITE(sc, WMREG_STATUS, reg);
3391
3392 }
3393
3394 /*
3395 * Work-around descriptor data corruption issue during
3396 * NFS v2 UDP traffic, just disable the NFS filtering
3397 * capability.
3398 */
3399 reg = CSR_READ(sc, WMREG_RFCTL);
3400 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3401 CSR_WRITE(sc, WMREG_RFCTL, reg);
3402 break;
3403 default:
3404 break;
3405 }
3406 CSR_WRITE(sc, WMREG_TARC0, tarc0);
3407
3408 /*
3409 * 8257[12] Errata No.52 and some others.
3410 * Avoid RSS Hash Value bug.
3411 */
3412 switch (sc->sc_type) {
3413 case WM_T_82571:
3414 case WM_T_82572:
3415 case WM_T_82573:
3416 case WM_T_80003:
3417 case WM_T_ICH8:
3418 reg = CSR_READ(sc, WMREG_RFCTL);
3419 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3420 CSR_WRITE(sc, WMREG_RFCTL, reg);
3421 break;
3422 default:
3423 break;
3424 }
3425 }
3426 }
3427
3428 /*
3429 * wm_reset:
3430 *
3431 * Reset the i82542 chip.
3432 */
3433 static void
3434 wm_reset(struct wm_softc *sc)
3435 {
3436 int phy_reset = 0;
3437 int error = 0;
3438 uint32_t reg, mask;
3439
3440 /*
3441 * Allocate on-chip memory according to the MTU size.
3442 * The Packet Buffer Allocation register must be written
3443 * before the chip is reset.
3444 */
3445 switch (sc->sc_type) {
3446 case WM_T_82547:
3447 case WM_T_82547_2:
3448 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3449 PBA_22K : PBA_30K;
3450 sc->sc_txfifo_head = 0;
3451 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3452 sc->sc_txfifo_size =
3453 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3454 sc->sc_txfifo_stall = 0;
3455 break;
3456 case WM_T_82571:
3457 case WM_T_82572:
3458 case WM_T_82575: /* XXX need special handing for jumbo frames */
3459 case WM_T_I350:
3460 case WM_T_I354:
3461 case WM_T_80003:
3462 sc->sc_pba = PBA_32K;
3463 break;
3464 case WM_T_82580:
3465 sc->sc_pba = PBA_35K;
3466 break;
3467 case WM_T_I210:
3468 case WM_T_I211:
3469 sc->sc_pba = PBA_34K;
3470 break;
3471 case WM_T_82576:
3472 sc->sc_pba = PBA_64K;
3473 break;
3474 case WM_T_82573:
3475 sc->sc_pba = PBA_12K;
3476 break;
3477 case WM_T_82574:
3478 case WM_T_82583:
3479 sc->sc_pba = PBA_20K;
3480 break;
3481 case WM_T_ICH8:
3482 /* Workaround for a bit corruption issue in FIFO memory */
3483 sc->sc_pba = PBA_8K;
3484 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3485 break;
3486 case WM_T_ICH9:
3487 case WM_T_ICH10:
3488 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3489 PBA_14K : PBA_10K;
3490 break;
3491 case WM_T_PCH:
3492 case WM_T_PCH2:
3493 case WM_T_PCH_LPT:
3494 sc->sc_pba = PBA_26K;
3495 break;
3496 default:
3497 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3498 PBA_40K : PBA_48K;
3499 break;
3500 }
3501 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3502
3503 /* Prevent the PCI-E bus from sticking */
3504 if (sc->sc_flags & WM_F_PCIE) {
3505 int timeout = 800;
3506
3507 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3508 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3509
3510 while (timeout--) {
3511 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3512 == 0)
3513 break;
3514 delay(100);
3515 }
3516 }
3517
3518 /* Set the completion timeout for interface */
3519 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3520 || (sc->sc_type == WM_T_82580)
3521 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3522 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3523 wm_set_pcie_completion_timeout(sc);
3524
3525 /* Clear interrupt */
3526 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3527
3528 /* Stop the transmit and receive processes. */
3529 CSR_WRITE(sc, WMREG_RCTL, 0);
3530 sc->sc_rctl &= ~RCTL_EN;
3531 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3532 CSR_WRITE_FLUSH(sc);
3533
3534 /* XXX set_tbi_sbp_82543() */
3535
3536 delay(10*1000);
3537
3538 /* Must acquire the MDIO ownership before MAC reset */
3539 switch (sc->sc_type) {
3540 case WM_T_82573:
3541 case WM_T_82574:
3542 case WM_T_82583:
3543 error = wm_get_hw_semaphore_82573(sc);
3544 break;
3545 default:
3546 break;
3547 }
3548
3549 /*
3550 * 82541 Errata 29? & 82547 Errata 28?
3551 * See also the description about PHY_RST bit in CTRL register
3552 * in 8254x_GBe_SDM.pdf.
3553 */
3554 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3555 CSR_WRITE(sc, WMREG_CTRL,
3556 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3557 CSR_WRITE_FLUSH(sc);
3558 delay(5000);
3559 }
3560
3561 switch (sc->sc_type) {
3562 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3563 case WM_T_82541:
3564 case WM_T_82541_2:
3565 case WM_T_82547:
3566 case WM_T_82547_2:
3567 /*
3568 * On some chipsets, a reset through a memory-mapped write
3569 * cycle can cause the chip to reset before completing the
3570 * write cycle. This causes major headache that can be
3571 * avoided by issuing the reset via indirect register writes
3572 * through I/O space.
3573 *
3574 * So, if we successfully mapped the I/O BAR at attach time,
3575 * use that. Otherwise, try our luck with a memory-mapped
3576 * reset.
3577 */
3578 if (sc->sc_flags & WM_F_IOH_VALID)
3579 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3580 else
3581 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3582 break;
3583 case WM_T_82545_3:
3584 case WM_T_82546_3:
3585 /* Use the shadow control register on these chips. */
3586 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3587 break;
3588 case WM_T_80003:
3589 mask = swfwphysem[sc->sc_funcid];
3590 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3591 wm_get_swfw_semaphore(sc, mask);
3592 CSR_WRITE(sc, WMREG_CTRL, reg);
3593 wm_put_swfw_semaphore(sc, mask);
3594 break;
3595 case WM_T_ICH8:
3596 case WM_T_ICH9:
3597 case WM_T_ICH10:
3598 case WM_T_PCH:
3599 case WM_T_PCH2:
3600 case WM_T_PCH_LPT:
3601 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3602 if (wm_check_reset_block(sc) == 0) {
3603 /*
3604 * Gate automatic PHY configuration by hardware on
3605 * non-managed 82579
3606 */
3607 if ((sc->sc_type == WM_T_PCH2)
3608 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3609 != 0))
3610 wm_gate_hw_phy_config_ich8lan(sc, 1);
3611
3612
3613 reg |= CTRL_PHY_RESET;
3614 phy_reset = 1;
3615 }
3616 wm_get_swfwhw_semaphore(sc);
3617 CSR_WRITE(sc, WMREG_CTRL, reg);
3618 /* Don't insert a completion barrier when reset */
3619 delay(20*1000);
3620 wm_put_swfwhw_semaphore(sc);
3621 break;
3622 case WM_T_82580:
3623 case WM_T_I350:
3624 case WM_T_I354:
3625 case WM_T_I210:
3626 case WM_T_I211:
3627 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3628 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3629 CSR_WRITE_FLUSH(sc);
3630 delay(5000);
3631 break;
3632 case WM_T_82542_2_0:
3633 case WM_T_82542_2_1:
3634 case WM_T_82543:
3635 case WM_T_82540:
3636 case WM_T_82545:
3637 case WM_T_82546:
3638 case WM_T_82571:
3639 case WM_T_82572:
3640 case WM_T_82573:
3641 case WM_T_82574:
3642 case WM_T_82575:
3643 case WM_T_82576:
3644 case WM_T_82583:
3645 default:
3646 /* Everything else can safely use the documented method. */
3647 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3648 break;
3649 }
3650
3651 /* Must release the MDIO ownership after MAC reset */
3652 switch (sc->sc_type) {
3653 case WM_T_82573:
3654 case WM_T_82574:
3655 case WM_T_82583:
3656 if (error == 0)
3657 wm_put_hw_semaphore_82573(sc);
3658 break;
3659 default:
3660 break;
3661 }
3662
3663 if (phy_reset != 0)
3664 wm_get_cfg_done(sc);
3665
3666 /* reload EEPROM */
3667 switch (sc->sc_type) {
3668 case WM_T_82542_2_0:
3669 case WM_T_82542_2_1:
3670 case WM_T_82543:
3671 case WM_T_82544:
3672 delay(10);
3673 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3674 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3675 CSR_WRITE_FLUSH(sc);
3676 delay(2000);
3677 break;
3678 case WM_T_82540:
3679 case WM_T_82545:
3680 case WM_T_82545_3:
3681 case WM_T_82546:
3682 case WM_T_82546_3:
3683 delay(5*1000);
3684 /* XXX Disable HW ARPs on ASF enabled adapters */
3685 break;
3686 case WM_T_82541:
3687 case WM_T_82541_2:
3688 case WM_T_82547:
3689 case WM_T_82547_2:
3690 delay(20000);
3691 /* XXX Disable HW ARPs on ASF enabled adapters */
3692 break;
3693 case WM_T_82571:
3694 case WM_T_82572:
3695 case WM_T_82573:
3696 case WM_T_82574:
3697 case WM_T_82583:
3698 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3699 delay(10);
3700 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3701 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3702 CSR_WRITE_FLUSH(sc);
3703 }
3704 /* check EECD_EE_AUTORD */
3705 wm_get_auto_rd_done(sc);
3706 /*
3707 * Phy configuration from NVM just starts after EECD_AUTO_RD
3708 * is set.
3709 */
3710 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3711 || (sc->sc_type == WM_T_82583))
3712 delay(25*1000);
3713 break;
3714 case WM_T_82575:
3715 case WM_T_82576:
3716 case WM_T_82580:
3717 case WM_T_I350:
3718 case WM_T_I354:
3719 case WM_T_I210:
3720 case WM_T_I211:
3721 case WM_T_80003:
3722 /* check EECD_EE_AUTORD */
3723 wm_get_auto_rd_done(sc);
3724 break;
3725 case WM_T_ICH8:
3726 case WM_T_ICH9:
3727 case WM_T_ICH10:
3728 case WM_T_PCH:
3729 case WM_T_PCH2:
3730 case WM_T_PCH_LPT:
3731 break;
3732 default:
3733 panic("%s: unknown type\n", __func__);
3734 }
3735
3736 /* Check whether EEPROM is present or not */
3737 switch (sc->sc_type) {
3738 case WM_T_82575:
3739 case WM_T_82576:
3740 #if 0 /* XXX */
3741 case WM_T_82580:
3742 #endif
3743 case WM_T_I350:
3744 case WM_T_I354:
3745 case WM_T_ICH8:
3746 case WM_T_ICH9:
3747 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3748 /* Not found */
3749 sc->sc_flags |= WM_F_EEPROM_INVALID;
3750 if ((sc->sc_type == WM_T_82575)
3751 || (sc->sc_type == WM_T_82576)
3752 || (sc->sc_type == WM_T_82580)
3753 || (sc->sc_type == WM_T_I350)
3754 || (sc->sc_type == WM_T_I354))
3755 wm_reset_init_script_82575(sc);
3756 }
3757 break;
3758 default:
3759 break;
3760 }
3761
3762 if ((sc->sc_type == WM_T_82580)
3763 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
3764 /* clear global device reset status bit */
3765 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3766 }
3767
3768 /* Clear any pending interrupt events. */
3769 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3770 reg = CSR_READ(sc, WMREG_ICR);
3771
3772 /* reload sc_ctrl */
3773 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3774
3775 if (sc->sc_type == WM_T_I350)
3776 wm_set_eee_i350(sc);
3777
3778 /* dummy read from WUC */
3779 if (sc->sc_type == WM_T_PCH)
3780 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3781 /*
3782 * For PCH, this write will make sure that any noise will be detected
3783 * as a CRC error and be dropped rather than show up as a bad packet
3784 * to the DMA engine
3785 */
3786 if (sc->sc_type == WM_T_PCH)
3787 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3788
3789 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3790 CSR_WRITE(sc, WMREG_WUC, 0);
3791
3792 /* XXX need special handling for 82580 */
3793 }
3794
3795 /*
3796 * wm_add_rxbuf:
3797 *
3798 * Add a receive buffer to the indiciated descriptor.
3799 */
3800 static int
3801 wm_add_rxbuf(struct wm_softc *sc, int idx)
3802 {
3803 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3804 struct mbuf *m;
3805 int error;
3806
3807 KASSERT(WM_RX_LOCKED(sc));
3808
3809 MGETHDR(m, M_DONTWAIT, MT_DATA);
3810 if (m == NULL)
3811 return ENOBUFS;
3812
3813 MCLGET(m, M_DONTWAIT);
3814 if ((m->m_flags & M_EXT) == 0) {
3815 m_freem(m);
3816 return ENOBUFS;
3817 }
3818
3819 if (rxs->rxs_mbuf != NULL)
3820 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3821
3822 rxs->rxs_mbuf = m;
3823
3824 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3825 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3826 BUS_DMA_READ|BUS_DMA_NOWAIT);
3827 if (error) {
3828 /* XXX XXX XXX */
3829 aprint_error_dev(sc->sc_dev,
3830 "unable to load rx DMA map %d, error = %d\n",
3831 idx, error);
3832 panic("wm_add_rxbuf");
3833 }
3834
3835 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3836 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3837
3838 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3839 if ((sc->sc_rctl & RCTL_EN) != 0)
3840 WM_INIT_RXDESC(sc, idx);
3841 } else
3842 WM_INIT_RXDESC(sc, idx);
3843
3844 return 0;
3845 }
3846
3847 /*
3848 * wm_rxdrain:
3849 *
3850 * Drain the receive queue.
3851 */
3852 static void
3853 wm_rxdrain(struct wm_softc *sc)
3854 {
3855 struct wm_rxsoft *rxs;
3856 int i;
3857
3858 KASSERT(WM_RX_LOCKED(sc));
3859
3860 for (i = 0; i < WM_NRXDESC; i++) {
3861 rxs = &sc->sc_rxsoft[i];
3862 if (rxs->rxs_mbuf != NULL) {
3863 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3864 m_freem(rxs->rxs_mbuf);
3865 rxs->rxs_mbuf = NULL;
3866 }
3867 }
3868 }
3869
3870 /*
3871 * wm_init: [ifnet interface function]
3872 *
3873 * Initialize the interface.
3874 */
3875 static int
3876 wm_init(struct ifnet *ifp)
3877 {
3878 struct wm_softc *sc = ifp->if_softc;
3879 int ret;
3880
3881 WM_BOTH_LOCK(sc);
3882 ret = wm_init_locked(ifp);
3883 WM_BOTH_UNLOCK(sc);
3884
3885 return ret;
3886 }
3887
3888 static int
3889 wm_init_locked(struct ifnet *ifp)
3890 {
3891 struct wm_softc *sc = ifp->if_softc;
3892 struct wm_rxsoft *rxs;
3893 int i, j, trynum, error = 0;
3894 uint32_t reg;
3895
3896 KASSERT(WM_BOTH_LOCKED(sc));
3897 /*
3898 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3899 * There is a small but measurable benefit to avoiding the adjusment
3900 * of the descriptor so that the headers are aligned, for normal mtu,
3901 * on such platforms. One possibility is that the DMA itself is
3902 * slightly more efficient if the front of the entire packet (instead
3903 * of the front of the headers) is aligned.
3904 *
3905 * Note we must always set align_tweak to 0 if we are using
3906 * jumbo frames.
3907 */
3908 #ifdef __NO_STRICT_ALIGNMENT
3909 sc->sc_align_tweak = 0;
3910 #else
3911 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3912 sc->sc_align_tweak = 0;
3913 else
3914 sc->sc_align_tweak = 2;
3915 #endif /* __NO_STRICT_ALIGNMENT */
3916
3917 /* Cancel any pending I/O. */
3918 wm_stop_locked(ifp, 0);
3919
3920 /* update statistics before reset */
3921 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3922 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3923
3924 /* Reset the chip to a known state. */
3925 wm_reset(sc);
3926
3927 switch (sc->sc_type) {
3928 case WM_T_82571:
3929 case WM_T_82572:
3930 case WM_T_82573:
3931 case WM_T_82574:
3932 case WM_T_82583:
3933 case WM_T_80003:
3934 case WM_T_ICH8:
3935 case WM_T_ICH9:
3936 case WM_T_ICH10:
3937 case WM_T_PCH:
3938 case WM_T_PCH2:
3939 case WM_T_PCH_LPT:
3940 if (wm_check_mng_mode(sc) != 0)
3941 wm_get_hw_control(sc);
3942 break;
3943 default:
3944 break;
3945 }
3946
3947 /* Init hardware bits */
3948 wm_initialize_hardware_bits(sc);
3949
3950 /* Reset the PHY. */
3951 if (sc->sc_flags & WM_F_HAS_MII)
3952 wm_gmii_reset(sc);
3953
3954 /* Calculate (E)ITR value */
3955 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3956 sc->sc_itr = 450; /* For EITR */
3957 } else if (sc->sc_type >= WM_T_82543) {
3958 /*
3959 * Set up the interrupt throttling register (units of 256ns)
3960 * Note that a footnote in Intel's documentation says this
3961 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3962 * or 10Mbit mode. Empirically, it appears to be the case
3963 * that that is also true for the 1024ns units of the other
3964 * interrupt-related timer registers -- so, really, we ought
3965 * to divide this value by 4 when the link speed is low.
3966 *
3967 * XXX implement this division at link speed change!
3968 */
3969
3970 /*
3971 * For N interrupts/sec, set this value to:
3972 * 1000000000 / (N * 256). Note that we set the
3973 * absolute and packet timer values to this value
3974 * divided by 4 to get "simple timer" behavior.
3975 */
3976
3977 sc->sc_itr = 1500; /* 2604 ints/sec */
3978 }
3979
3980 /* Initialize the transmit descriptor ring. */
3981 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3982 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3983 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3984 sc->sc_txfree = WM_NTXDESC(sc);
3985 sc->sc_txnext = 0;
3986
3987 if (sc->sc_type < WM_T_82543) {
3988 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
3989 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
3990 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3991 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3992 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3993 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3994 } else {
3995 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
3996 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
3997 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3998 CSR_WRITE(sc, WMREG_TDH, 0);
3999
4000 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4001 /*
4002 * Don't write TDT before TCTL.EN is set.
4003 * See the document.
4004 */
4005 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_QUEUE_ENABLE
4006 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
4007 | TXDCTL_WTHRESH(0));
4008 else {
4009 /* ITR / 4 */
4010 CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
4011 if (sc->sc_type >= WM_T_82540) {
4012 /* should be same */
4013 CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
4014 }
4015
4016 CSR_WRITE(sc, WMREG_TDT, 0);
4017 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_PTHRESH(0) |
4018 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
4019 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
4020 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
4021 }
4022 }
4023
4024 /* Initialize the transmit job descriptors. */
4025 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
4026 sc->sc_txsoft[i].txs_mbuf = NULL;
4027 sc->sc_txsfree = WM_TXQUEUELEN(sc);
4028 sc->sc_txsnext = 0;
4029 sc->sc_txsdirty = 0;
4030
4031 /*
4032 * Initialize the receive descriptor and receive job
4033 * descriptor rings.
4034 */
4035 if (sc->sc_type < WM_T_82543) {
4036 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4037 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4038 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4039 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4040 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4041 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4042
4043 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4044 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4045 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4046 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4047 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4048 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4049 } else {
4050 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4051 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4052 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4053
4054 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4055 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4056 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4057 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4058 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4059 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4060 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4061 | RXDCTL_WTHRESH(1));
4062 } else {
4063 CSR_WRITE(sc, WMREG_RDH, 0);
4064 CSR_WRITE(sc, WMREG_RDT, 0);
4065 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
4066 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
4067 }
4068 }
4069 for (i = 0; i < WM_NRXDESC; i++) {
4070 rxs = &sc->sc_rxsoft[i];
4071 if (rxs->rxs_mbuf == NULL) {
4072 if ((error = wm_add_rxbuf(sc, i)) != 0) {
4073 log(LOG_ERR, "%s: unable to allocate or map "
4074 "rx buffer %d, error = %d\n",
4075 device_xname(sc->sc_dev), i, error);
4076 /*
4077 * XXX Should attempt to run with fewer receive
4078 * XXX buffers instead of just failing.
4079 */
4080 wm_rxdrain(sc);
4081 goto out;
4082 }
4083 } else {
4084 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4085 WM_INIT_RXDESC(sc, i);
4086 /*
4087 * For 82575 and newer device, the RX descriptors
4088 * must be initialized after the setting of RCTL.EN in
4089 * wm_set_filter()
4090 */
4091 }
4092 }
4093 sc->sc_rxptr = 0;
4094 sc->sc_rxdiscard = 0;
4095 WM_RXCHAIN_RESET(sc);
4096
4097 /*
4098 * Clear out the VLAN table -- we don't use it (yet).
4099 */
4100 CSR_WRITE(sc, WMREG_VET, 0);
4101 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4102 trynum = 10; /* Due to hw errata */
4103 else
4104 trynum = 1;
4105 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4106 for (j = 0; j < trynum; j++)
4107 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4108
4109 /*
4110 * Set up flow-control parameters.
4111 *
4112 * XXX Values could probably stand some tuning.
4113 */
4114 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4115 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4116 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
4117 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4118 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4119 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4120 }
4121
4122 sc->sc_fcrtl = FCRTL_DFLT;
4123 if (sc->sc_type < WM_T_82543) {
4124 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4125 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4126 } else {
4127 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4128 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4129 }
4130
4131 if (sc->sc_type == WM_T_80003)
4132 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4133 else
4134 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4135
4136 /* Writes the control register. */
4137 wm_set_vlan(sc);
4138
4139 if (sc->sc_flags & WM_F_HAS_MII) {
4140 int val;
4141
4142 switch (sc->sc_type) {
4143 case WM_T_80003:
4144 case WM_T_ICH8:
4145 case WM_T_ICH9:
4146 case WM_T_ICH10:
4147 case WM_T_PCH:
4148 case WM_T_PCH2:
4149 case WM_T_PCH_LPT:
4150 /*
4151 * Set the mac to wait the maximum time between each
4152 * iteration and increase the max iterations when
4153 * polling the phy; this fixes erroneous timeouts at
4154 * 10Mbps.
4155 */
4156 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4157 0xFFFF);
4158 val = wm_kmrn_readreg(sc,
4159 KUMCTRLSTA_OFFSET_INB_PARAM);
4160 val |= 0x3F;
4161 wm_kmrn_writereg(sc,
4162 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4163 break;
4164 default:
4165 break;
4166 }
4167
4168 if (sc->sc_type == WM_T_80003) {
4169 val = CSR_READ(sc, WMREG_CTRL_EXT);
4170 val &= ~CTRL_EXT_LINK_MODE_MASK;
4171 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4172
4173 /* Bypass RX and TX FIFO's */
4174 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4175 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4176 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4177 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4178 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4179 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4180 }
4181 }
4182 #if 0
4183 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4184 #endif
4185
4186 /* Set up checksum offload parameters. */
4187 reg = CSR_READ(sc, WMREG_RXCSUM);
4188 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4189 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4190 reg |= RXCSUM_IPOFL;
4191 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4192 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4193 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4194 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4195 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4196
4197 /* Set up the interrupt registers. */
4198 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4199 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4200 ICR_RXO | ICR_RXT0;
4201 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4202
4203 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4204 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4205 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4206 reg = CSR_READ(sc, WMREG_KABGTXD);
4207 reg |= KABGTXD_BGSQLBIAS;
4208 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4209 }
4210
4211 /* Set up the inter-packet gap. */
4212 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4213
4214 if (sc->sc_type >= WM_T_82543) {
4215 /*
4216 * XXX 82574 has both ITR and EITR. SET EITR when we use
4217 * the multi queue function with MSI-X.
4218 */
4219 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4220 CSR_WRITE(sc, WMREG_EITR(0), sc->sc_itr);
4221 else
4222 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4223 }
4224
4225 /* Set the VLAN ethernetype. */
4226 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4227
4228 /*
4229 * Set up the transmit control register; we start out with
4230 * a collision distance suitable for FDX, but update it whe
4231 * we resolve the media type.
4232 */
4233 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4234 | TCTL_CT(TX_COLLISION_THRESHOLD)
4235 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4236 if (sc->sc_type >= WM_T_82571)
4237 sc->sc_tctl |= TCTL_MULR;
4238 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4239
4240 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4241 /* Write TDT after TCTL.EN is set. See the document. */
4242 CSR_WRITE(sc, WMREG_TDT, 0);
4243 }
4244
4245 if (sc->sc_type == WM_T_80003) {
4246 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4247 reg &= ~TCTL_EXT_GCEX_MASK;
4248 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4249 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4250 }
4251
4252 /* Set the media. */
4253 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4254 goto out;
4255
4256 /* Configure for OS presence */
4257 wm_init_manageability(sc);
4258
4259 /*
4260 * Set up the receive control register; we actually program
4261 * the register when we set the receive filter. Use multicast
4262 * address offset type 0.
4263 *
4264 * Only the i82544 has the ability to strip the incoming
4265 * CRC, so we don't enable that feature.
4266 */
4267 sc->sc_mchash_type = 0;
4268 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4269 | RCTL_MO(sc->sc_mchash_type);
4270
4271 /*
4272 * The I350 has a bug where it always strips the CRC whether
4273 * asked to or not. So ask for stripped CRC here and cope in rxeof
4274 */
4275 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4276 || (sc->sc_type == WM_T_I210))
4277 sc->sc_rctl |= RCTL_SECRC;
4278
4279 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4280 && (ifp->if_mtu > ETHERMTU)) {
4281 sc->sc_rctl |= RCTL_LPE;
4282 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4283 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4284 }
4285
4286 if (MCLBYTES == 2048) {
4287 sc->sc_rctl |= RCTL_2k;
4288 } else {
4289 if (sc->sc_type >= WM_T_82543) {
4290 switch (MCLBYTES) {
4291 case 4096:
4292 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4293 break;
4294 case 8192:
4295 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4296 break;
4297 case 16384:
4298 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4299 break;
4300 default:
4301 panic("wm_init: MCLBYTES %d unsupported",
4302 MCLBYTES);
4303 break;
4304 }
4305 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4306 }
4307
4308 /* Set the receive filter. */
4309 wm_set_filter(sc);
4310
4311 /* Enable ECC */
4312 switch (sc->sc_type) {
4313 case WM_T_82571:
4314 reg = CSR_READ(sc, WMREG_PBA_ECC);
4315 reg |= PBA_ECC_CORR_EN;
4316 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4317 break;
4318 case WM_T_PCH_LPT:
4319 reg = CSR_READ(sc, WMREG_PBECCSTS);
4320 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4321 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4322
4323 reg = CSR_READ(sc, WMREG_CTRL);
4324 reg |= CTRL_MEHE;
4325 CSR_WRITE(sc, WMREG_CTRL, reg);
4326 break;
4327 default:
4328 break;
4329 }
4330
4331 /* On 575 and later set RDT only if RX enabled */
4332 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4333 for (i = 0; i < WM_NRXDESC; i++)
4334 WM_INIT_RXDESC(sc, i);
4335
4336 sc->sc_stopping = false;
4337
4338 /* Start the one second link check clock. */
4339 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4340
4341 /* ...all done! */
4342 ifp->if_flags |= IFF_RUNNING;
4343 ifp->if_flags &= ~IFF_OACTIVE;
4344
4345 out:
4346 sc->sc_if_flags = ifp->if_flags;
4347 if (error)
4348 log(LOG_ERR, "%s: interface not running\n",
4349 device_xname(sc->sc_dev));
4350 return error;
4351 }
4352
4353 /*
4354 * wm_stop: [ifnet interface function]
4355 *
4356 * Stop transmission on the interface.
4357 */
4358 static void
4359 wm_stop(struct ifnet *ifp, int disable)
4360 {
4361 struct wm_softc *sc = ifp->if_softc;
4362
4363 WM_BOTH_LOCK(sc);
4364 wm_stop_locked(ifp, disable);
4365 WM_BOTH_UNLOCK(sc);
4366 }
4367
4368 static void
4369 wm_stop_locked(struct ifnet *ifp, int disable)
4370 {
4371 struct wm_softc *sc = ifp->if_softc;
4372 struct wm_txsoft *txs;
4373 int i;
4374
4375 KASSERT(WM_BOTH_LOCKED(sc));
4376
4377 sc->sc_stopping = true;
4378
4379 /* Stop the one second clock. */
4380 callout_stop(&sc->sc_tick_ch);
4381
4382 /* Stop the 82547 Tx FIFO stall check timer. */
4383 if (sc->sc_type == WM_T_82547)
4384 callout_stop(&sc->sc_txfifo_ch);
4385
4386 if (sc->sc_flags & WM_F_HAS_MII) {
4387 /* Down the MII. */
4388 mii_down(&sc->sc_mii);
4389 } else {
4390 #if 0
4391 /* Should we clear PHY's status properly? */
4392 wm_reset(sc);
4393 #endif
4394 }
4395
4396 /* Stop the transmit and receive processes. */
4397 CSR_WRITE(sc, WMREG_TCTL, 0);
4398 CSR_WRITE(sc, WMREG_RCTL, 0);
4399 sc->sc_rctl &= ~RCTL_EN;
4400
4401 /*
4402 * Clear the interrupt mask to ensure the device cannot assert its
4403 * interrupt line.
4404 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4405 * any currently pending or shared interrupt.
4406 */
4407 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4408 sc->sc_icr = 0;
4409
4410 /* Release any queued transmit buffers. */
4411 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4412 txs = &sc->sc_txsoft[i];
4413 if (txs->txs_mbuf != NULL) {
4414 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4415 m_freem(txs->txs_mbuf);
4416 txs->txs_mbuf = NULL;
4417 }
4418 }
4419
4420 /* Mark the interface as down and cancel the watchdog timer. */
4421 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4422 ifp->if_timer = 0;
4423
4424 if (disable)
4425 wm_rxdrain(sc);
4426
4427 #if 0 /* notyet */
4428 if (sc->sc_type >= WM_T_82544)
4429 CSR_WRITE(sc, WMREG_WUC, 0);
4430 #endif
4431 }
4432
4433 /*
4434 * wm_tx_offload:
4435 *
4436 * Set up TCP/IP checksumming parameters for the
4437 * specified packet.
4438 */
4439 static int
4440 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
4441 uint8_t *fieldsp)
4442 {
4443 struct mbuf *m0 = txs->txs_mbuf;
4444 struct livengood_tcpip_ctxdesc *t;
4445 uint32_t ipcs, tucs, cmd, cmdlen, seg;
4446 uint32_t ipcse;
4447 struct ether_header *eh;
4448 int offset, iphl;
4449 uint8_t fields;
4450
4451 /*
4452 * XXX It would be nice if the mbuf pkthdr had offset
4453 * fields for the protocol headers.
4454 */
4455
4456 eh = mtod(m0, struct ether_header *);
4457 switch (htons(eh->ether_type)) {
4458 case ETHERTYPE_IP:
4459 case ETHERTYPE_IPV6:
4460 offset = ETHER_HDR_LEN;
4461 break;
4462
4463 case ETHERTYPE_VLAN:
4464 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4465 break;
4466
4467 default:
4468 /*
4469 * Don't support this protocol or encapsulation.
4470 */
4471 *fieldsp = 0;
4472 *cmdp = 0;
4473 return 0;
4474 }
4475
4476 if ((m0->m_pkthdr.csum_flags &
4477 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
4478 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4479 } else {
4480 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4481 }
4482 ipcse = offset + iphl - 1;
4483
4484 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
4485 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
4486 seg = 0;
4487 fields = 0;
4488
4489 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4490 int hlen = offset + iphl;
4491 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4492
4493 if (__predict_false(m0->m_len <
4494 (hlen + sizeof(struct tcphdr)))) {
4495 /*
4496 * TCP/IP headers are not in the first mbuf; we need
4497 * to do this the slow and painful way. Let's just
4498 * hope this doesn't happen very often.
4499 */
4500 struct tcphdr th;
4501
4502 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4503
4504 m_copydata(m0, hlen, sizeof(th), &th);
4505 if (v4) {
4506 struct ip ip;
4507
4508 m_copydata(m0, offset, sizeof(ip), &ip);
4509 ip.ip_len = 0;
4510 m_copyback(m0,
4511 offset + offsetof(struct ip, ip_len),
4512 sizeof(ip.ip_len), &ip.ip_len);
4513 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4514 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4515 } else {
4516 struct ip6_hdr ip6;
4517
4518 m_copydata(m0, offset, sizeof(ip6), &ip6);
4519 ip6.ip6_plen = 0;
4520 m_copyback(m0,
4521 offset + offsetof(struct ip6_hdr, ip6_plen),
4522 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4523 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4524 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4525 }
4526 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4527 sizeof(th.th_sum), &th.th_sum);
4528
4529 hlen += th.th_off << 2;
4530 } else {
4531 /*
4532 * TCP/IP headers are in the first mbuf; we can do
4533 * this the easy way.
4534 */
4535 struct tcphdr *th;
4536
4537 if (v4) {
4538 struct ip *ip =
4539 (void *)(mtod(m0, char *) + offset);
4540 th = (void *)(mtod(m0, char *) + hlen);
4541
4542 ip->ip_len = 0;
4543 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4544 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4545 } else {
4546 struct ip6_hdr *ip6 =
4547 (void *)(mtod(m0, char *) + offset);
4548 th = (void *)(mtod(m0, char *) + hlen);
4549
4550 ip6->ip6_plen = 0;
4551 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4552 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4553 }
4554 hlen += th->th_off << 2;
4555 }
4556
4557 if (v4) {
4558 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4559 cmdlen |= WTX_TCPIP_CMD_IP;
4560 } else {
4561 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4562 ipcse = 0;
4563 }
4564 cmd |= WTX_TCPIP_CMD_TSE;
4565 cmdlen |= WTX_TCPIP_CMD_TSE |
4566 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
4567 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
4568 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
4569 }
4570
4571 /*
4572 * NOTE: Even if we're not using the IP or TCP/UDP checksum
4573 * offload feature, if we load the context descriptor, we
4574 * MUST provide valid values for IPCSS and TUCSS fields.
4575 */
4576
4577 ipcs = WTX_TCPIP_IPCSS(offset) |
4578 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
4579 WTX_TCPIP_IPCSE(ipcse);
4580 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
4581 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
4582 fields |= WTX_IXSM;
4583 }
4584
4585 offset += iphl;
4586
4587 if (m0->m_pkthdr.csum_flags &
4588 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
4589 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
4590 fields |= WTX_TXSM;
4591 tucs = WTX_TCPIP_TUCSS(offset) |
4592 WTX_TCPIP_TUCSO(offset +
4593 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
4594 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4595 } else if ((m0->m_pkthdr.csum_flags &
4596 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
4597 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
4598 fields |= WTX_TXSM;
4599 tucs = WTX_TCPIP_TUCSS(offset) |
4600 WTX_TCPIP_TUCSO(offset +
4601 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
4602 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4603 } else {
4604 /* Just initialize it to a valid TCP context. */
4605 tucs = WTX_TCPIP_TUCSS(offset) |
4606 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
4607 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4608 }
4609
4610 /* Fill in the context descriptor. */
4611 t = (struct livengood_tcpip_ctxdesc *)
4612 &sc->sc_txdescs[sc->sc_txnext];
4613 t->tcpip_ipcs = htole32(ipcs);
4614 t->tcpip_tucs = htole32(tucs);
4615 t->tcpip_cmdlen = htole32(cmdlen);
4616 t->tcpip_seg = htole32(seg);
4617 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
4618
4619 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
4620 txs->txs_ndesc++;
4621
4622 *cmdp = cmd;
4623 *fieldsp = fields;
4624
4625 return 0;
4626 }
4627
4628 static void
4629 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
4630 {
4631 struct mbuf *m;
4632 int i;
4633
4634 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
4635 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
4636 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
4637 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
4638 m->m_data, m->m_len, m->m_flags);
4639 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
4640 i, i == 1 ? "" : "s");
4641 }
4642
4643 /*
4644 * wm_82547_txfifo_stall:
4645 *
4646 * Callout used to wait for the 82547 Tx FIFO to drain,
4647 * reset the FIFO pointers, and restart packet transmission.
4648 */
4649 static void
4650 wm_82547_txfifo_stall(void *arg)
4651 {
4652 struct wm_softc *sc = arg;
4653 #ifndef WM_MPSAFE
4654 int s;
4655
4656 s = splnet();
4657 #endif
4658 WM_TX_LOCK(sc);
4659
4660 if (sc->sc_stopping)
4661 goto out;
4662
4663 if (sc->sc_txfifo_stall) {
4664 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
4665 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
4666 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
4667 /*
4668 * Packets have drained. Stop transmitter, reset
4669 * FIFO pointers, restart transmitter, and kick
4670 * the packet queue.
4671 */
4672 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
4673 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
4674 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
4675 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
4676 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
4677 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
4678 CSR_WRITE(sc, WMREG_TCTL, tctl);
4679 CSR_WRITE_FLUSH(sc);
4680
4681 sc->sc_txfifo_head = 0;
4682 sc->sc_txfifo_stall = 0;
4683 wm_start_locked(&sc->sc_ethercom.ec_if);
4684 } else {
4685 /*
4686 * Still waiting for packets to drain; try again in
4687 * another tick.
4688 */
4689 callout_schedule(&sc->sc_txfifo_ch, 1);
4690 }
4691 }
4692
4693 out:
4694 WM_TX_UNLOCK(sc);
4695 #ifndef WM_MPSAFE
4696 splx(s);
4697 #endif
4698 }
4699
4700 /*
4701 * wm_82547_txfifo_bugchk:
4702 *
4703 * Check for bug condition in the 82547 Tx FIFO. We need to
4704 * prevent enqueueing a packet that would wrap around the end
4705 * if the Tx FIFO ring buffer, otherwise the chip will croak.
4706 *
4707 * We do this by checking the amount of space before the end
4708 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
4709 * the Tx FIFO, wait for all remaining packets to drain, reset
4710 * the internal FIFO pointers to the beginning, and restart
4711 * transmission on the interface.
4712 */
4713 #define WM_FIFO_HDR 0x10
4714 #define WM_82547_PAD_LEN 0x3e0
4715 static int
4716 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
4717 {
4718 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
4719 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
4720
4721 /* Just return if already stalled. */
4722 if (sc->sc_txfifo_stall)
4723 return 1;
4724
4725 if (sc->sc_mii.mii_media_active & IFM_FDX) {
4726 /* Stall only occurs in half-duplex mode. */
4727 goto send_packet;
4728 }
4729
4730 if (len >= WM_82547_PAD_LEN + space) {
4731 sc->sc_txfifo_stall = 1;
4732 callout_schedule(&sc->sc_txfifo_ch, 1);
4733 return 1;
4734 }
4735
4736 send_packet:
4737 sc->sc_txfifo_head += len;
4738 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
4739 sc->sc_txfifo_head -= sc->sc_txfifo_size;
4740
4741 return 0;
4742 }
4743
4744 /*
4745 * wm_start: [ifnet interface function]
4746 *
4747 * Start packet transmission on the interface.
4748 */
4749 static void
4750 wm_start(struct ifnet *ifp)
4751 {
4752 struct wm_softc *sc = ifp->if_softc;
4753
4754 WM_TX_LOCK(sc);
4755 if (!sc->sc_stopping)
4756 wm_start_locked(ifp);
4757 WM_TX_UNLOCK(sc);
4758 }
4759
4760 static void
4761 wm_start_locked(struct ifnet *ifp)
4762 {
4763 struct wm_softc *sc = ifp->if_softc;
4764 struct mbuf *m0;
4765 struct m_tag *mtag;
4766 struct wm_txsoft *txs;
4767 bus_dmamap_t dmamap;
4768 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
4769 bus_addr_t curaddr;
4770 bus_size_t seglen, curlen;
4771 uint32_t cksumcmd;
4772 uint8_t cksumfields;
4773
4774 KASSERT(WM_TX_LOCKED(sc));
4775
4776 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
4777 return;
4778
4779 /* Remember the previous number of free descriptors. */
4780 ofree = sc->sc_txfree;
4781
4782 /*
4783 * Loop through the send queue, setting up transmit descriptors
4784 * until we drain the queue, or use up all available transmit
4785 * descriptors.
4786 */
4787 for (;;) {
4788 m0 = NULL;
4789
4790 /* Get a work queue entry. */
4791 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
4792 wm_txintr(sc);
4793 if (sc->sc_txsfree == 0) {
4794 DPRINTF(WM_DEBUG_TX,
4795 ("%s: TX: no free job descriptors\n",
4796 device_xname(sc->sc_dev)));
4797 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
4798 break;
4799 }
4800 }
4801
4802 /* Grab a packet off the queue. */
4803 IFQ_DEQUEUE(&ifp->if_snd, m0);
4804 if (m0 == NULL)
4805 break;
4806
4807 DPRINTF(WM_DEBUG_TX,
4808 ("%s: TX: have packet to transmit: %p\n",
4809 device_xname(sc->sc_dev), m0));
4810
4811 txs = &sc->sc_txsoft[sc->sc_txsnext];
4812 dmamap = txs->txs_dmamap;
4813
4814 use_tso = (m0->m_pkthdr.csum_flags &
4815 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
4816
4817 /*
4818 * So says the Linux driver:
4819 * The controller does a simple calculation to make sure
4820 * there is enough room in the FIFO before initiating the
4821 * DMA for each buffer. The calc is:
4822 * 4 = ceil(buffer len / MSS)
4823 * To make sure we don't overrun the FIFO, adjust the max
4824 * buffer len if the MSS drops.
4825 */
4826 dmamap->dm_maxsegsz =
4827 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
4828 ? m0->m_pkthdr.segsz << 2
4829 : WTX_MAX_LEN;
4830
4831 /*
4832 * Load the DMA map. If this fails, the packet either
4833 * didn't fit in the allotted number of segments, or we
4834 * were short on resources. For the too-many-segments
4835 * case, we simply report an error and drop the packet,
4836 * since we can't sanely copy a jumbo packet to a single
4837 * buffer.
4838 */
4839 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
4840 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
4841 if (error) {
4842 if (error == EFBIG) {
4843 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4844 log(LOG_ERR, "%s: Tx packet consumes too many "
4845 "DMA segments, dropping...\n",
4846 device_xname(sc->sc_dev));
4847 wm_dump_mbuf_chain(sc, m0);
4848 m_freem(m0);
4849 continue;
4850 }
4851 /* Short on resources, just stop for now. */
4852 DPRINTF(WM_DEBUG_TX,
4853 ("%s: TX: dmamap load failed: %d\n",
4854 device_xname(sc->sc_dev), error));
4855 break;
4856 }
4857
4858 segs_needed = dmamap->dm_nsegs;
4859 if (use_tso) {
4860 /* For sentinel descriptor; see below. */
4861 segs_needed++;
4862 }
4863
4864 /*
4865 * Ensure we have enough descriptors free to describe
4866 * the packet. Note, we always reserve one descriptor
4867 * at the end of the ring due to the semantics of the
4868 * TDT register, plus one more in the event we need
4869 * to load offload context.
4870 */
4871 if (segs_needed > sc->sc_txfree - 2) {
4872 /*
4873 * Not enough free descriptors to transmit this
4874 * packet. We haven't committed anything yet,
4875 * so just unload the DMA map, put the packet
4876 * pack on the queue, and punt. Notify the upper
4877 * layer that there are no more slots left.
4878 */
4879 DPRINTF(WM_DEBUG_TX,
4880 ("%s: TX: need %d (%d) descriptors, have %d\n",
4881 device_xname(sc->sc_dev), dmamap->dm_nsegs,
4882 segs_needed, sc->sc_txfree - 1));
4883 ifp->if_flags |= IFF_OACTIVE;
4884 bus_dmamap_unload(sc->sc_dmat, dmamap);
4885 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
4886 break;
4887 }
4888
4889 /*
4890 * Check for 82547 Tx FIFO bug. We need to do this
4891 * once we know we can transmit the packet, since we
4892 * do some internal FIFO space accounting here.
4893 */
4894 if (sc->sc_type == WM_T_82547 &&
4895 wm_82547_txfifo_bugchk(sc, m0)) {
4896 DPRINTF(WM_DEBUG_TX,
4897 ("%s: TX: 82547 Tx FIFO bug detected\n",
4898 device_xname(sc->sc_dev)));
4899 ifp->if_flags |= IFF_OACTIVE;
4900 bus_dmamap_unload(sc->sc_dmat, dmamap);
4901 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
4902 break;
4903 }
4904
4905 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
4906
4907 DPRINTF(WM_DEBUG_TX,
4908 ("%s: TX: packet has %d (%d) DMA segments\n",
4909 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
4910
4911 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
4912
4913 /*
4914 * Store a pointer to the packet so that we can free it
4915 * later.
4916 *
4917 * Initially, we consider the number of descriptors the
4918 * packet uses the number of DMA segments. This may be
4919 * incremented by 1 if we do checksum offload (a descriptor
4920 * is used to set the checksum context).
4921 */
4922 txs->txs_mbuf = m0;
4923 txs->txs_firstdesc = sc->sc_txnext;
4924 txs->txs_ndesc = segs_needed;
4925
4926 /* Set up offload parameters for this packet. */
4927 if (m0->m_pkthdr.csum_flags &
4928 (M_CSUM_TSOv4|M_CSUM_TSOv6|
4929 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
4930 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
4931 if (wm_tx_offload(sc, txs, &cksumcmd,
4932 &cksumfields) != 0) {
4933 /* Error message already displayed. */
4934 bus_dmamap_unload(sc->sc_dmat, dmamap);
4935 continue;
4936 }
4937 } else {
4938 cksumcmd = 0;
4939 cksumfields = 0;
4940 }
4941
4942 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
4943
4944 /* Sync the DMA map. */
4945 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
4946 BUS_DMASYNC_PREWRITE);
4947
4948 /* Initialize the transmit descriptor. */
4949 for (nexttx = sc->sc_txnext, seg = 0;
4950 seg < dmamap->dm_nsegs; seg++) {
4951 for (seglen = dmamap->dm_segs[seg].ds_len,
4952 curaddr = dmamap->dm_segs[seg].ds_addr;
4953 seglen != 0;
4954 curaddr += curlen, seglen -= curlen,
4955 nexttx = WM_NEXTTX(sc, nexttx)) {
4956 curlen = seglen;
4957
4958 /*
4959 * So says the Linux driver:
4960 * Work around for premature descriptor
4961 * write-backs in TSO mode. Append a
4962 * 4-byte sentinel descriptor.
4963 */
4964 if (use_tso &&
4965 seg == dmamap->dm_nsegs - 1 &&
4966 curlen > 8)
4967 curlen -= 4;
4968
4969 wm_set_dma_addr(
4970 &sc->sc_txdescs[nexttx].wtx_addr,
4971 curaddr);
4972 sc->sc_txdescs[nexttx].wtx_cmdlen =
4973 htole32(cksumcmd | curlen);
4974 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
4975 0;
4976 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
4977 cksumfields;
4978 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
4979 lasttx = nexttx;
4980
4981 DPRINTF(WM_DEBUG_TX,
4982 ("%s: TX: desc %d: low %#" PRIx64 ", "
4983 "len %#04zx\n",
4984 device_xname(sc->sc_dev), nexttx,
4985 (uint64_t)curaddr, curlen));
4986 }
4987 }
4988
4989 KASSERT(lasttx != -1);
4990
4991 /*
4992 * Set up the command byte on the last descriptor of
4993 * the packet. If we're in the interrupt delay window,
4994 * delay the interrupt.
4995 */
4996 sc->sc_txdescs[lasttx].wtx_cmdlen |=
4997 htole32(WTX_CMD_EOP | WTX_CMD_RS);
4998
4999 /*
5000 * If VLANs are enabled and the packet has a VLAN tag, set
5001 * up the descriptor to encapsulate the packet for us.
5002 *
5003 * This is only valid on the last descriptor of the packet.
5004 */
5005 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
5006 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5007 htole32(WTX_CMD_VLE);
5008 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
5009 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5010 }
5011
5012 txs->txs_lastdesc = lasttx;
5013
5014 DPRINTF(WM_DEBUG_TX,
5015 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5016 device_xname(sc->sc_dev),
5017 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5018
5019 /* Sync the descriptors we're using. */
5020 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5021 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5022
5023 /* Give the packet to the chip. */
5024 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5025
5026 DPRINTF(WM_DEBUG_TX,
5027 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5028
5029 DPRINTF(WM_DEBUG_TX,
5030 ("%s: TX: finished transmitting packet, job %d\n",
5031 device_xname(sc->sc_dev), sc->sc_txsnext));
5032
5033 /* Advance the tx pointer. */
5034 sc->sc_txfree -= txs->txs_ndesc;
5035 sc->sc_txnext = nexttx;
5036
5037 sc->sc_txsfree--;
5038 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5039
5040 /* Pass the packet to any BPF listeners. */
5041 bpf_mtap(ifp, m0);
5042 }
5043
5044 if (m0 != NULL) {
5045 ifp->if_flags |= IFF_OACTIVE;
5046 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5047 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5048 m_freem(m0);
5049 }
5050
5051 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5052 /* No more slots; notify upper layer. */
5053 ifp->if_flags |= IFF_OACTIVE;
5054 }
5055
5056 if (sc->sc_txfree != ofree) {
5057 /* Set a watchdog timer in case the chip flakes out. */
5058 ifp->if_timer = 5;
5059 }
5060 }
5061
5062 /*
5063 * wm_nq_tx_offload:
5064 *
5065 * Set up TCP/IP checksumming parameters for the
5066 * specified packet, for NEWQUEUE devices
5067 */
5068 static int
5069 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
5070 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
5071 {
5072 struct mbuf *m0 = txs->txs_mbuf;
5073 struct m_tag *mtag;
5074 uint32_t vl_len, mssidx, cmdc;
5075 struct ether_header *eh;
5076 int offset, iphl;
5077
5078 /*
5079 * XXX It would be nice if the mbuf pkthdr had offset
5080 * fields for the protocol headers.
5081 */
5082 *cmdlenp = 0;
5083 *fieldsp = 0;
5084
5085 eh = mtod(m0, struct ether_header *);
5086 switch (htons(eh->ether_type)) {
5087 case ETHERTYPE_IP:
5088 case ETHERTYPE_IPV6:
5089 offset = ETHER_HDR_LEN;
5090 break;
5091
5092 case ETHERTYPE_VLAN:
5093 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5094 break;
5095
5096 default:
5097 /* Don't support this protocol or encapsulation. */
5098 *do_csum = false;
5099 return 0;
5100 }
5101 *do_csum = true;
5102 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
5103 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
5104
5105 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
5106 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
5107
5108 if ((m0->m_pkthdr.csum_flags &
5109 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
5110 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
5111 } else {
5112 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
5113 }
5114 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
5115 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
5116
5117 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
5118 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
5119 << NQTXC_VLLEN_VLAN_SHIFT);
5120 *cmdlenp |= NQTX_CMD_VLE;
5121 }
5122
5123 mssidx = 0;
5124
5125 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
5126 int hlen = offset + iphl;
5127 int tcp_hlen;
5128 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
5129
5130 if (__predict_false(m0->m_len <
5131 (hlen + sizeof(struct tcphdr)))) {
5132 /*
5133 * TCP/IP headers are not in the first mbuf; we need
5134 * to do this the slow and painful way. Let's just
5135 * hope this doesn't happen very often.
5136 */
5137 struct tcphdr th;
5138
5139 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
5140
5141 m_copydata(m0, hlen, sizeof(th), &th);
5142 if (v4) {
5143 struct ip ip;
5144
5145 m_copydata(m0, offset, sizeof(ip), &ip);
5146 ip.ip_len = 0;
5147 m_copyback(m0,
5148 offset + offsetof(struct ip, ip_len),
5149 sizeof(ip.ip_len), &ip.ip_len);
5150 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
5151 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
5152 } else {
5153 struct ip6_hdr ip6;
5154
5155 m_copydata(m0, offset, sizeof(ip6), &ip6);
5156 ip6.ip6_plen = 0;
5157 m_copyback(m0,
5158 offset + offsetof(struct ip6_hdr, ip6_plen),
5159 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
5160 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
5161 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
5162 }
5163 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
5164 sizeof(th.th_sum), &th.th_sum);
5165
5166 tcp_hlen = th.th_off << 2;
5167 } else {
5168 /*
5169 * TCP/IP headers are in the first mbuf; we can do
5170 * this the easy way.
5171 */
5172 struct tcphdr *th;
5173
5174 if (v4) {
5175 struct ip *ip =
5176 (void *)(mtod(m0, char *) + offset);
5177 th = (void *)(mtod(m0, char *) + hlen);
5178
5179 ip->ip_len = 0;
5180 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
5181 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
5182 } else {
5183 struct ip6_hdr *ip6 =
5184 (void *)(mtod(m0, char *) + offset);
5185 th = (void *)(mtod(m0, char *) + hlen);
5186
5187 ip6->ip6_plen = 0;
5188 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
5189 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
5190 }
5191 tcp_hlen = th->th_off << 2;
5192 }
5193 hlen += tcp_hlen;
5194 *cmdlenp |= NQTX_CMD_TSE;
5195
5196 if (v4) {
5197 WM_EVCNT_INCR(&sc->sc_ev_txtso);
5198 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
5199 } else {
5200 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
5201 *fieldsp |= NQTXD_FIELDS_TUXSM;
5202 }
5203 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
5204 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5205 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
5206 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
5207 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
5208 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
5209 } else {
5210 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
5211 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5212 }
5213
5214 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
5215 *fieldsp |= NQTXD_FIELDS_IXSM;
5216 cmdc |= NQTXC_CMD_IP4;
5217 }
5218
5219 if (m0->m_pkthdr.csum_flags &
5220 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5221 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5222 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5223 cmdc |= NQTXC_CMD_TCP;
5224 } else {
5225 cmdc |= NQTXC_CMD_UDP;
5226 }
5227 cmdc |= NQTXC_CMD_IP4;
5228 *fieldsp |= NQTXD_FIELDS_TUXSM;
5229 }
5230 if (m0->m_pkthdr.csum_flags &
5231 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5232 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5233 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5234 cmdc |= NQTXC_CMD_TCP;
5235 } else {
5236 cmdc |= NQTXC_CMD_UDP;
5237 }
5238 cmdc |= NQTXC_CMD_IP6;
5239 *fieldsp |= NQTXD_FIELDS_TUXSM;
5240 }
5241
5242 /* Fill in the context descriptor. */
5243 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
5244 htole32(vl_len);
5245 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
5246 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
5247 htole32(cmdc);
5248 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
5249 htole32(mssidx);
5250 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
5251 DPRINTF(WM_DEBUG_TX,
5252 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
5253 sc->sc_txnext, 0, vl_len));
5254 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
5255 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
5256 txs->txs_ndesc++;
5257 return 0;
5258 }
5259
5260 /*
5261 * wm_nq_start: [ifnet interface function]
5262 *
5263 * Start packet transmission on the interface for NEWQUEUE devices
5264 */
5265 static void
5266 wm_nq_start(struct ifnet *ifp)
5267 {
5268 struct wm_softc *sc = ifp->if_softc;
5269
5270 WM_TX_LOCK(sc);
5271 if (!sc->sc_stopping)
5272 wm_nq_start_locked(ifp);
5273 WM_TX_UNLOCK(sc);
5274 }
5275
5276 static void
5277 wm_nq_start_locked(struct ifnet *ifp)
5278 {
5279 struct wm_softc *sc = ifp->if_softc;
5280 struct mbuf *m0;
5281 struct m_tag *mtag;
5282 struct wm_txsoft *txs;
5283 bus_dmamap_t dmamap;
5284 int error, nexttx, lasttx = -1, seg, segs_needed;
5285 bool do_csum, sent;
5286
5287 KASSERT(WM_TX_LOCKED(sc));
5288
5289 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5290 return;
5291
5292 sent = false;
5293
5294 /*
5295 * Loop through the send queue, setting up transmit descriptors
5296 * until we drain the queue, or use up all available transmit
5297 * descriptors.
5298 */
5299 for (;;) {
5300 m0 = NULL;
5301
5302 /* Get a work queue entry. */
5303 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
5304 wm_txintr(sc);
5305 if (sc->sc_txsfree == 0) {
5306 DPRINTF(WM_DEBUG_TX,
5307 ("%s: TX: no free job descriptors\n",
5308 device_xname(sc->sc_dev)));
5309 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
5310 break;
5311 }
5312 }
5313
5314 /* Grab a packet off the queue. */
5315 IFQ_DEQUEUE(&ifp->if_snd, m0);
5316 if (m0 == NULL)
5317 break;
5318
5319 DPRINTF(WM_DEBUG_TX,
5320 ("%s: TX: have packet to transmit: %p\n",
5321 device_xname(sc->sc_dev), m0));
5322
5323 txs = &sc->sc_txsoft[sc->sc_txsnext];
5324 dmamap = txs->txs_dmamap;
5325
5326 /*
5327 * Load the DMA map. If this fails, the packet either
5328 * didn't fit in the allotted number of segments, or we
5329 * were short on resources. For the too-many-segments
5330 * case, we simply report an error and drop the packet,
5331 * since we can't sanely copy a jumbo packet to a single
5332 * buffer.
5333 */
5334 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5335 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5336 if (error) {
5337 if (error == EFBIG) {
5338 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5339 log(LOG_ERR, "%s: Tx packet consumes too many "
5340 "DMA segments, dropping...\n",
5341 device_xname(sc->sc_dev));
5342 wm_dump_mbuf_chain(sc, m0);
5343 m_freem(m0);
5344 continue;
5345 }
5346 /* Short on resources, just stop for now. */
5347 DPRINTF(WM_DEBUG_TX,
5348 ("%s: TX: dmamap load failed: %d\n",
5349 device_xname(sc->sc_dev), error));
5350 break;
5351 }
5352
5353 segs_needed = dmamap->dm_nsegs;
5354
5355 /*
5356 * Ensure we have enough descriptors free to describe
5357 * the packet. Note, we always reserve one descriptor
5358 * at the end of the ring due to the semantics of the
5359 * TDT register, plus one more in the event we need
5360 * to load offload context.
5361 */
5362 if (segs_needed > sc->sc_txfree - 2) {
5363 /*
5364 * Not enough free descriptors to transmit this
5365 * packet. We haven't committed anything yet,
5366 * so just unload the DMA map, put the packet
5367 * pack on the queue, and punt. Notify the upper
5368 * layer that there are no more slots left.
5369 */
5370 DPRINTF(WM_DEBUG_TX,
5371 ("%s: TX: need %d (%d) descriptors, have %d\n",
5372 device_xname(sc->sc_dev), dmamap->dm_nsegs,
5373 segs_needed, sc->sc_txfree - 1));
5374 ifp->if_flags |= IFF_OACTIVE;
5375 bus_dmamap_unload(sc->sc_dmat, dmamap);
5376 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5377 break;
5378 }
5379
5380 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5381
5382 DPRINTF(WM_DEBUG_TX,
5383 ("%s: TX: packet has %d (%d) DMA segments\n",
5384 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5385
5386 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5387
5388 /*
5389 * Store a pointer to the packet so that we can free it
5390 * later.
5391 *
5392 * Initially, we consider the number of descriptors the
5393 * packet uses the number of DMA segments. This may be
5394 * incremented by 1 if we do checksum offload (a descriptor
5395 * is used to set the checksum context).
5396 */
5397 txs->txs_mbuf = m0;
5398 txs->txs_firstdesc = sc->sc_txnext;
5399 txs->txs_ndesc = segs_needed;
5400
5401 /* Set up offload parameters for this packet. */
5402 uint32_t cmdlen, fields, dcmdlen;
5403 if (m0->m_pkthdr.csum_flags &
5404 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5405 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5406 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5407 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
5408 &do_csum) != 0) {
5409 /* Error message already displayed. */
5410 bus_dmamap_unload(sc->sc_dmat, dmamap);
5411 continue;
5412 }
5413 } else {
5414 do_csum = false;
5415 cmdlen = 0;
5416 fields = 0;
5417 }
5418
5419 /* Sync the DMA map. */
5420 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5421 BUS_DMASYNC_PREWRITE);
5422
5423 /* Initialize the first transmit descriptor. */
5424 nexttx = sc->sc_txnext;
5425 if (!do_csum) {
5426 /* setup a legacy descriptor */
5427 wm_set_dma_addr(
5428 &sc->sc_txdescs[nexttx].wtx_addr,
5429 dmamap->dm_segs[0].ds_addr);
5430 sc->sc_txdescs[nexttx].wtx_cmdlen =
5431 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
5432 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
5433 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
5434 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
5435 NULL) {
5436 sc->sc_txdescs[nexttx].wtx_cmdlen |=
5437 htole32(WTX_CMD_VLE);
5438 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
5439 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5440 } else {
5441 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
5442 }
5443 dcmdlen = 0;
5444 } else {
5445 /* setup an advanced data descriptor */
5446 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5447 htole64(dmamap->dm_segs[0].ds_addr);
5448 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
5449 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5450 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
5451 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
5452 htole32(fields);
5453 DPRINTF(WM_DEBUG_TX,
5454 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
5455 device_xname(sc->sc_dev), nexttx,
5456 (uint64_t)dmamap->dm_segs[0].ds_addr));
5457 DPRINTF(WM_DEBUG_TX,
5458 ("\t 0x%08x%08x\n", fields,
5459 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
5460 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
5461 }
5462
5463 lasttx = nexttx;
5464 nexttx = WM_NEXTTX(sc, nexttx);
5465 /*
5466 * fill in the next descriptors. legacy or adcanced format
5467 * is the same here
5468 */
5469 for (seg = 1; seg < dmamap->dm_nsegs;
5470 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
5471 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5472 htole64(dmamap->dm_segs[seg].ds_addr);
5473 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5474 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
5475 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
5476 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
5477 lasttx = nexttx;
5478
5479 DPRINTF(WM_DEBUG_TX,
5480 ("%s: TX: desc %d: %#" PRIx64 ", "
5481 "len %#04zx\n",
5482 device_xname(sc->sc_dev), nexttx,
5483 (uint64_t)dmamap->dm_segs[seg].ds_addr,
5484 dmamap->dm_segs[seg].ds_len));
5485 }
5486
5487 KASSERT(lasttx != -1);
5488
5489 /*
5490 * Set up the command byte on the last descriptor of
5491 * the packet. If we're in the interrupt delay window,
5492 * delay the interrupt.
5493 */
5494 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
5495 (NQTX_CMD_EOP | NQTX_CMD_RS));
5496 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5497 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5498
5499 txs->txs_lastdesc = lasttx;
5500
5501 DPRINTF(WM_DEBUG_TX,
5502 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5503 device_xname(sc->sc_dev),
5504 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5505
5506 /* Sync the descriptors we're using. */
5507 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5508 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5509
5510 /* Give the packet to the chip. */
5511 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5512 sent = true;
5513
5514 DPRINTF(WM_DEBUG_TX,
5515 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5516
5517 DPRINTF(WM_DEBUG_TX,
5518 ("%s: TX: finished transmitting packet, job %d\n",
5519 device_xname(sc->sc_dev), sc->sc_txsnext));
5520
5521 /* Advance the tx pointer. */
5522 sc->sc_txfree -= txs->txs_ndesc;
5523 sc->sc_txnext = nexttx;
5524
5525 sc->sc_txsfree--;
5526 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5527
5528 /* Pass the packet to any BPF listeners. */
5529 bpf_mtap(ifp, m0);
5530 }
5531
5532 if (m0 != NULL) {
5533 ifp->if_flags |= IFF_OACTIVE;
5534 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5535 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5536 m_freem(m0);
5537 }
5538
5539 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5540 /* No more slots; notify upper layer. */
5541 ifp->if_flags |= IFF_OACTIVE;
5542 }
5543
5544 if (sent) {
5545 /* Set a watchdog timer in case the chip flakes out. */
5546 ifp->if_timer = 5;
5547 }
5548 }
5549
5550 /* Interrupt */
5551
5552 /*
5553 * wm_txintr:
5554 *
5555 * Helper; handle transmit interrupts.
5556 */
5557 static void
5558 wm_txintr(struct wm_softc *sc)
5559 {
5560 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5561 struct wm_txsoft *txs;
5562 uint8_t status;
5563 int i;
5564
5565 if (sc->sc_stopping)
5566 return;
5567
5568 ifp->if_flags &= ~IFF_OACTIVE;
5569
5570 /*
5571 * Go through the Tx list and free mbufs for those
5572 * frames which have been transmitted.
5573 */
5574 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
5575 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
5576 txs = &sc->sc_txsoft[i];
5577
5578 DPRINTF(WM_DEBUG_TX,
5579 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
5580
5581 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
5582 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5583
5584 status =
5585 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
5586 if ((status & WTX_ST_DD) == 0) {
5587 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
5588 BUS_DMASYNC_PREREAD);
5589 break;
5590 }
5591
5592 DPRINTF(WM_DEBUG_TX,
5593 ("%s: TX: job %d done: descs %d..%d\n",
5594 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
5595 txs->txs_lastdesc));
5596
5597 /*
5598 * XXX We should probably be using the statistics
5599 * XXX registers, but I don't know if they exist
5600 * XXX on chips before the i82544.
5601 */
5602
5603 #ifdef WM_EVENT_COUNTERS
5604 if (status & WTX_ST_TU)
5605 WM_EVCNT_INCR(&sc->sc_ev_tu);
5606 #endif /* WM_EVENT_COUNTERS */
5607
5608 if (status & (WTX_ST_EC|WTX_ST_LC)) {
5609 ifp->if_oerrors++;
5610 if (status & WTX_ST_LC)
5611 log(LOG_WARNING, "%s: late collision\n",
5612 device_xname(sc->sc_dev));
5613 else if (status & WTX_ST_EC) {
5614 ifp->if_collisions += 16;
5615 log(LOG_WARNING, "%s: excessive collisions\n",
5616 device_xname(sc->sc_dev));
5617 }
5618 } else
5619 ifp->if_opackets++;
5620
5621 sc->sc_txfree += txs->txs_ndesc;
5622 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
5623 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
5624 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
5625 m_freem(txs->txs_mbuf);
5626 txs->txs_mbuf = NULL;
5627 }
5628
5629 /* Update the dirty transmit buffer pointer. */
5630 sc->sc_txsdirty = i;
5631 DPRINTF(WM_DEBUG_TX,
5632 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
5633
5634 /*
5635 * If there are no more pending transmissions, cancel the watchdog
5636 * timer.
5637 */
5638 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
5639 ifp->if_timer = 0;
5640 }
5641
5642 /*
5643 * wm_rxintr:
5644 *
5645 * Helper; handle receive interrupts.
5646 */
5647 static void
5648 wm_rxintr(struct wm_softc *sc)
5649 {
5650 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5651 struct wm_rxsoft *rxs;
5652 struct mbuf *m;
5653 int i, len;
5654 uint8_t status, errors;
5655 uint16_t vlantag;
5656
5657 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
5658 rxs = &sc->sc_rxsoft[i];
5659
5660 DPRINTF(WM_DEBUG_RX,
5661 ("%s: RX: checking descriptor %d\n",
5662 device_xname(sc->sc_dev), i));
5663
5664 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5665
5666 status = sc->sc_rxdescs[i].wrx_status;
5667 errors = sc->sc_rxdescs[i].wrx_errors;
5668 len = le16toh(sc->sc_rxdescs[i].wrx_len);
5669 vlantag = sc->sc_rxdescs[i].wrx_special;
5670
5671 if ((status & WRX_ST_DD) == 0) {
5672 /* We have processed all of the receive descriptors. */
5673 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
5674 break;
5675 }
5676
5677 if (__predict_false(sc->sc_rxdiscard)) {
5678 DPRINTF(WM_DEBUG_RX,
5679 ("%s: RX: discarding contents of descriptor %d\n",
5680 device_xname(sc->sc_dev), i));
5681 WM_INIT_RXDESC(sc, i);
5682 if (status & WRX_ST_EOP) {
5683 /* Reset our state. */
5684 DPRINTF(WM_DEBUG_RX,
5685 ("%s: RX: resetting rxdiscard -> 0\n",
5686 device_xname(sc->sc_dev)));
5687 sc->sc_rxdiscard = 0;
5688 }
5689 continue;
5690 }
5691
5692 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5693 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
5694
5695 m = rxs->rxs_mbuf;
5696
5697 /*
5698 * Add a new receive buffer to the ring, unless of
5699 * course the length is zero. Treat the latter as a
5700 * failed mapping.
5701 */
5702 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
5703 /*
5704 * Failed, throw away what we've done so
5705 * far, and discard the rest of the packet.
5706 */
5707 ifp->if_ierrors++;
5708 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5709 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5710 WM_INIT_RXDESC(sc, i);
5711 if ((status & WRX_ST_EOP) == 0)
5712 sc->sc_rxdiscard = 1;
5713 if (sc->sc_rxhead != NULL)
5714 m_freem(sc->sc_rxhead);
5715 WM_RXCHAIN_RESET(sc);
5716 DPRINTF(WM_DEBUG_RX,
5717 ("%s: RX: Rx buffer allocation failed, "
5718 "dropping packet%s\n", device_xname(sc->sc_dev),
5719 sc->sc_rxdiscard ? " (discard)" : ""));
5720 continue;
5721 }
5722
5723 m->m_len = len;
5724 sc->sc_rxlen += len;
5725 DPRINTF(WM_DEBUG_RX,
5726 ("%s: RX: buffer at %p len %d\n",
5727 device_xname(sc->sc_dev), m->m_data, len));
5728
5729 /* If this is not the end of the packet, keep looking. */
5730 if ((status & WRX_ST_EOP) == 0) {
5731 WM_RXCHAIN_LINK(sc, m);
5732 DPRINTF(WM_DEBUG_RX,
5733 ("%s: RX: not yet EOP, rxlen -> %d\n",
5734 device_xname(sc->sc_dev), sc->sc_rxlen));
5735 continue;
5736 }
5737
5738 /*
5739 * Okay, we have the entire packet now. The chip is
5740 * configured to include the FCS except I350 and I21[01]
5741 * (not all chips can be configured to strip it),
5742 * so we need to trim it.
5743 * May need to adjust length of previous mbuf in the
5744 * chain if the current mbuf is too short.
5745 * For an eratta, the RCTL_SECRC bit in RCTL register
5746 * is always set in I350, so we don't trim it.
5747 */
5748 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
5749 && (sc->sc_type != WM_T_I210)
5750 && (sc->sc_type != WM_T_I211)) {
5751 if (m->m_len < ETHER_CRC_LEN) {
5752 sc->sc_rxtail->m_len
5753 -= (ETHER_CRC_LEN - m->m_len);
5754 m->m_len = 0;
5755 } else
5756 m->m_len -= ETHER_CRC_LEN;
5757 len = sc->sc_rxlen - ETHER_CRC_LEN;
5758 } else
5759 len = sc->sc_rxlen;
5760
5761 WM_RXCHAIN_LINK(sc, m);
5762
5763 *sc->sc_rxtailp = NULL;
5764 m = sc->sc_rxhead;
5765
5766 WM_RXCHAIN_RESET(sc);
5767
5768 DPRINTF(WM_DEBUG_RX,
5769 ("%s: RX: have entire packet, len -> %d\n",
5770 device_xname(sc->sc_dev), len));
5771
5772 /* If an error occurred, update stats and drop the packet. */
5773 if (errors &
5774 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
5775 if (errors & WRX_ER_SE)
5776 log(LOG_WARNING, "%s: symbol error\n",
5777 device_xname(sc->sc_dev));
5778 else if (errors & WRX_ER_SEQ)
5779 log(LOG_WARNING, "%s: receive sequence error\n",
5780 device_xname(sc->sc_dev));
5781 else if (errors & WRX_ER_CE)
5782 log(LOG_WARNING, "%s: CRC error\n",
5783 device_xname(sc->sc_dev));
5784 m_freem(m);
5785 continue;
5786 }
5787
5788 /* No errors. Receive the packet. */
5789 m->m_pkthdr.rcvif = ifp;
5790 m->m_pkthdr.len = len;
5791
5792 /*
5793 * If VLANs are enabled, VLAN packets have been unwrapped
5794 * for us. Associate the tag with the packet.
5795 */
5796 /* XXXX should check for i350 and i354 */
5797 if ((status & WRX_ST_VP) != 0) {
5798 VLAN_INPUT_TAG(ifp, m,
5799 le16toh(vlantag),
5800 continue);
5801 }
5802
5803 /* Set up checksum info for this packet. */
5804 if ((status & WRX_ST_IXSM) == 0) {
5805 if (status & WRX_ST_IPCS) {
5806 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
5807 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
5808 if (errors & WRX_ER_IPE)
5809 m->m_pkthdr.csum_flags |=
5810 M_CSUM_IPv4_BAD;
5811 }
5812 if (status & WRX_ST_TCPCS) {
5813 /*
5814 * Note: we don't know if this was TCP or UDP,
5815 * so we just set both bits, and expect the
5816 * upper layers to deal.
5817 */
5818 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
5819 m->m_pkthdr.csum_flags |=
5820 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
5821 M_CSUM_TCPv6 | M_CSUM_UDPv6;
5822 if (errors & WRX_ER_TCPE)
5823 m->m_pkthdr.csum_flags |=
5824 M_CSUM_TCP_UDP_BAD;
5825 }
5826 }
5827
5828 ifp->if_ipackets++;
5829
5830 WM_RX_UNLOCK(sc);
5831
5832 /* Pass this up to any BPF listeners. */
5833 bpf_mtap(ifp, m);
5834
5835 /* Pass it on. */
5836 (*ifp->if_input)(ifp, m);
5837
5838 WM_RX_LOCK(sc);
5839
5840 if (sc->sc_stopping)
5841 break;
5842 }
5843
5844 /* Update the receive pointer. */
5845 sc->sc_rxptr = i;
5846
5847 DPRINTF(WM_DEBUG_RX,
5848 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
5849 }
5850
5851 /*
5852 * wm_linkintr_gmii:
5853 *
5854 * Helper; handle link interrupts for GMII.
5855 */
5856 static void
5857 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
5858 {
5859
5860 KASSERT(WM_TX_LOCKED(sc));
5861
5862 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5863 __func__));
5864
5865 if (icr & ICR_LSC) {
5866 DPRINTF(WM_DEBUG_LINK,
5867 ("%s: LINK: LSC -> mii_pollstat\n",
5868 device_xname(sc->sc_dev)));
5869 mii_pollstat(&sc->sc_mii);
5870 if (sc->sc_type == WM_T_82543) {
5871 int miistatus, active;
5872
5873 /*
5874 * With 82543, we need to force speed and
5875 * duplex on the MAC equal to what the PHY
5876 * speed and duplex configuration is.
5877 */
5878 miistatus = sc->sc_mii.mii_media_status;
5879
5880 if (miistatus & IFM_ACTIVE) {
5881 active = sc->sc_mii.mii_media_active;
5882 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5883 switch (IFM_SUBTYPE(active)) {
5884 case IFM_10_T:
5885 sc->sc_ctrl |= CTRL_SPEED_10;
5886 break;
5887 case IFM_100_TX:
5888 sc->sc_ctrl |= CTRL_SPEED_100;
5889 break;
5890 case IFM_1000_T:
5891 sc->sc_ctrl |= CTRL_SPEED_1000;
5892 break;
5893 default:
5894 /*
5895 * fiber?
5896 * Shoud not enter here.
5897 */
5898 printf("unknown media (%x)\n",
5899 active);
5900 break;
5901 }
5902 if (active & IFM_FDX)
5903 sc->sc_ctrl |= CTRL_FD;
5904 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5905 }
5906 } else if ((sc->sc_type == WM_T_ICH8)
5907 && (sc->sc_phytype == WMPHY_IGP_3)) {
5908 wm_kmrn_lock_loss_workaround_ich8lan(sc);
5909 } else if (sc->sc_type == WM_T_PCH) {
5910 wm_k1_gig_workaround_hv(sc,
5911 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
5912 }
5913
5914 if ((sc->sc_phytype == WMPHY_82578)
5915 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
5916 == IFM_1000_T)) {
5917
5918 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
5919 delay(200*1000); /* XXX too big */
5920
5921 /* Link stall fix for link up */
5922 wm_gmii_hv_writereg(sc->sc_dev, 1,
5923 HV_MUX_DATA_CTRL,
5924 HV_MUX_DATA_CTRL_GEN_TO_MAC
5925 | HV_MUX_DATA_CTRL_FORCE_SPEED);
5926 wm_gmii_hv_writereg(sc->sc_dev, 1,
5927 HV_MUX_DATA_CTRL,
5928 HV_MUX_DATA_CTRL_GEN_TO_MAC);
5929 }
5930 }
5931 } else if (icr & ICR_RXSEQ) {
5932 DPRINTF(WM_DEBUG_LINK,
5933 ("%s: LINK Receive sequence error\n",
5934 device_xname(sc->sc_dev)));
5935 }
5936 }
5937
5938 /*
5939 * wm_linkintr_tbi:
5940 *
5941 * Helper; handle link interrupts for TBI mode.
5942 */
5943 static void
5944 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
5945 {
5946 uint32_t status;
5947
5948 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5949 __func__));
5950
5951 status = CSR_READ(sc, WMREG_STATUS);
5952 if (icr & ICR_LSC) {
5953 if (status & STATUS_LU) {
5954 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
5955 device_xname(sc->sc_dev),
5956 (status & STATUS_FD) ? "FDX" : "HDX"));
5957 /*
5958 * NOTE: CTRL will update TFCE and RFCE automatically,
5959 * so we should update sc->sc_ctrl
5960 */
5961
5962 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5963 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5964 sc->sc_fcrtl &= ~FCRTL_XONE;
5965 if (status & STATUS_FD)
5966 sc->sc_tctl |=
5967 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5968 else
5969 sc->sc_tctl |=
5970 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5971 if (sc->sc_ctrl & CTRL_TFCE)
5972 sc->sc_fcrtl |= FCRTL_XONE;
5973 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5974 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5975 WMREG_OLD_FCRTL : WMREG_FCRTL,
5976 sc->sc_fcrtl);
5977 sc->sc_tbi_linkup = 1;
5978 } else {
5979 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
5980 device_xname(sc->sc_dev)));
5981 sc->sc_tbi_linkup = 0;
5982 }
5983 wm_tbi_set_linkled(sc);
5984 } else if (icr & ICR_RXSEQ) {
5985 DPRINTF(WM_DEBUG_LINK,
5986 ("%s: LINK: Receive sequence error\n",
5987 device_xname(sc->sc_dev)));
5988 }
5989 }
5990
5991 /*
5992 * wm_linkintr:
5993 *
5994 * Helper; handle link interrupts.
5995 */
5996 static void
5997 wm_linkintr(struct wm_softc *sc, uint32_t icr)
5998 {
5999
6000 if (sc->sc_flags & WM_F_HAS_MII)
6001 wm_linkintr_gmii(sc, icr);
6002 else
6003 wm_linkintr_tbi(sc, icr);
6004 }
6005
6006 /*
6007 * wm_intr:
6008 *
6009 * Interrupt service routine.
6010 */
6011 static int
6012 wm_intr(void *arg)
6013 {
6014 struct wm_softc *sc = arg;
6015 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6016 uint32_t icr;
6017 int handled = 0;
6018
6019 while (1 /* CONSTCOND */) {
6020 icr = CSR_READ(sc, WMREG_ICR);
6021 if ((icr & sc->sc_icr) == 0)
6022 break;
6023 rnd_add_uint32(&sc->rnd_source, icr);
6024
6025 WM_RX_LOCK(sc);
6026
6027 if (sc->sc_stopping) {
6028 WM_RX_UNLOCK(sc);
6029 break;
6030 }
6031
6032 handled = 1;
6033
6034 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
6035 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
6036 DPRINTF(WM_DEBUG_RX,
6037 ("%s: RX: got Rx intr 0x%08x\n",
6038 device_xname(sc->sc_dev),
6039 icr & (ICR_RXDMT0|ICR_RXT0)));
6040 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
6041 }
6042 #endif
6043 wm_rxintr(sc);
6044
6045 WM_RX_UNLOCK(sc);
6046 WM_TX_LOCK(sc);
6047
6048 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
6049 if (icr & ICR_TXDW) {
6050 DPRINTF(WM_DEBUG_TX,
6051 ("%s: TX: got TXDW interrupt\n",
6052 device_xname(sc->sc_dev)));
6053 WM_EVCNT_INCR(&sc->sc_ev_txdw);
6054 }
6055 #endif
6056 wm_txintr(sc);
6057
6058 if (icr & (ICR_LSC|ICR_RXSEQ)) {
6059 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
6060 wm_linkintr(sc, icr);
6061 }
6062
6063 WM_TX_UNLOCK(sc);
6064
6065 if (icr & ICR_RXO) {
6066 #if defined(WM_DEBUG)
6067 log(LOG_WARNING, "%s: Receive overrun\n",
6068 device_xname(sc->sc_dev));
6069 #endif /* defined(WM_DEBUG) */
6070 }
6071 }
6072
6073 if (handled) {
6074 /* Try to get more packets going. */
6075 ifp->if_start(ifp);
6076 }
6077
6078 return handled;
6079 }
6080
6081 /*
6082 * Media related.
6083 * GMII, SGMII, TBI (and SERDES)
6084 */
6085
6086 /* GMII related */
6087
6088 /*
6089 * wm_gmii_reset:
6090 *
6091 * Reset the PHY.
6092 */
6093 static void
6094 wm_gmii_reset(struct wm_softc *sc)
6095 {
6096 uint32_t reg;
6097 int rv;
6098
6099 /* get phy semaphore */
6100 switch (sc->sc_type) {
6101 case WM_T_82571:
6102 case WM_T_82572:
6103 case WM_T_82573:
6104 case WM_T_82574:
6105 case WM_T_82583:
6106 /* XXX should get sw semaphore, too */
6107 rv = wm_get_swsm_semaphore(sc);
6108 break;
6109 case WM_T_82575:
6110 case WM_T_82576:
6111 case WM_T_82580:
6112 case WM_T_I350:
6113 case WM_T_I354:
6114 case WM_T_I210:
6115 case WM_T_I211:
6116 case WM_T_80003:
6117 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6118 break;
6119 case WM_T_ICH8:
6120 case WM_T_ICH9:
6121 case WM_T_ICH10:
6122 case WM_T_PCH:
6123 case WM_T_PCH2:
6124 case WM_T_PCH_LPT:
6125 rv = wm_get_swfwhw_semaphore(sc);
6126 break;
6127 default:
6128 /* nothing to do*/
6129 rv = 0;
6130 break;
6131 }
6132 if (rv != 0) {
6133 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6134 __func__);
6135 return;
6136 }
6137
6138 switch (sc->sc_type) {
6139 case WM_T_82542_2_0:
6140 case WM_T_82542_2_1:
6141 /* null */
6142 break;
6143 case WM_T_82543:
6144 /*
6145 * With 82543, we need to force speed and duplex on the MAC
6146 * equal to what the PHY speed and duplex configuration is.
6147 * In addition, we need to perform a hardware reset on the PHY
6148 * to take it out of reset.
6149 */
6150 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6151 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6152
6153 /* The PHY reset pin is active-low. */
6154 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6155 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6156 CTRL_EXT_SWDPIN(4));
6157 reg |= CTRL_EXT_SWDPIO(4);
6158
6159 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6160 CSR_WRITE_FLUSH(sc);
6161 delay(10*1000);
6162
6163 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6164 CSR_WRITE_FLUSH(sc);
6165 delay(150);
6166 #if 0
6167 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6168 #endif
6169 delay(20*1000); /* XXX extra delay to get PHY ID? */
6170 break;
6171 case WM_T_82544: /* reset 10000us */
6172 case WM_T_82540:
6173 case WM_T_82545:
6174 case WM_T_82545_3:
6175 case WM_T_82546:
6176 case WM_T_82546_3:
6177 case WM_T_82541:
6178 case WM_T_82541_2:
6179 case WM_T_82547:
6180 case WM_T_82547_2:
6181 case WM_T_82571: /* reset 100us */
6182 case WM_T_82572:
6183 case WM_T_82573:
6184 case WM_T_82574:
6185 case WM_T_82575:
6186 case WM_T_82576:
6187 case WM_T_82580:
6188 case WM_T_I350:
6189 case WM_T_I354:
6190 case WM_T_I210:
6191 case WM_T_I211:
6192 case WM_T_82583:
6193 case WM_T_80003:
6194 /* generic reset */
6195 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6196 CSR_WRITE_FLUSH(sc);
6197 delay(20000);
6198 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6199 CSR_WRITE_FLUSH(sc);
6200 delay(20000);
6201
6202 if ((sc->sc_type == WM_T_82541)
6203 || (sc->sc_type == WM_T_82541_2)
6204 || (sc->sc_type == WM_T_82547)
6205 || (sc->sc_type == WM_T_82547_2)) {
6206 /* workaround for igp are done in igp_reset() */
6207 /* XXX add code to set LED after phy reset */
6208 }
6209 break;
6210 case WM_T_ICH8:
6211 case WM_T_ICH9:
6212 case WM_T_ICH10:
6213 case WM_T_PCH:
6214 case WM_T_PCH2:
6215 case WM_T_PCH_LPT:
6216 /* generic reset */
6217 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6218 CSR_WRITE_FLUSH(sc);
6219 delay(100);
6220 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6221 CSR_WRITE_FLUSH(sc);
6222 delay(150);
6223 break;
6224 default:
6225 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6226 __func__);
6227 break;
6228 }
6229
6230 /* release PHY semaphore */
6231 switch (sc->sc_type) {
6232 case WM_T_82571:
6233 case WM_T_82572:
6234 case WM_T_82573:
6235 case WM_T_82574:
6236 case WM_T_82583:
6237 /* XXX should put sw semaphore, too */
6238 wm_put_swsm_semaphore(sc);
6239 break;
6240 case WM_T_82575:
6241 case WM_T_82576:
6242 case WM_T_82580:
6243 case WM_T_I350:
6244 case WM_T_I354:
6245 case WM_T_I210:
6246 case WM_T_I211:
6247 case WM_T_80003:
6248 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6249 break;
6250 case WM_T_ICH8:
6251 case WM_T_ICH9:
6252 case WM_T_ICH10:
6253 case WM_T_PCH:
6254 case WM_T_PCH2:
6255 case WM_T_PCH_LPT:
6256 wm_put_swfwhw_semaphore(sc);
6257 break;
6258 default:
6259 /* nothing to do*/
6260 rv = 0;
6261 break;
6262 }
6263
6264 /* get_cfg_done */
6265 wm_get_cfg_done(sc);
6266
6267 /* extra setup */
6268 switch (sc->sc_type) {
6269 case WM_T_82542_2_0:
6270 case WM_T_82542_2_1:
6271 case WM_T_82543:
6272 case WM_T_82544:
6273 case WM_T_82540:
6274 case WM_T_82545:
6275 case WM_T_82545_3:
6276 case WM_T_82546:
6277 case WM_T_82546_3:
6278 case WM_T_82541_2:
6279 case WM_T_82547_2:
6280 case WM_T_82571:
6281 case WM_T_82572:
6282 case WM_T_82573:
6283 case WM_T_82574:
6284 case WM_T_82575:
6285 case WM_T_82576:
6286 case WM_T_82580:
6287 case WM_T_I350:
6288 case WM_T_I354:
6289 case WM_T_I210:
6290 case WM_T_I211:
6291 case WM_T_82583:
6292 case WM_T_80003:
6293 /* null */
6294 break;
6295 case WM_T_82541:
6296 case WM_T_82547:
6297 /* XXX Configure actively LED after PHY reset */
6298 break;
6299 case WM_T_ICH8:
6300 case WM_T_ICH9:
6301 case WM_T_ICH10:
6302 case WM_T_PCH:
6303 case WM_T_PCH2:
6304 case WM_T_PCH_LPT:
6305 /* Allow time for h/w to get to a quiescent state afer reset */
6306 delay(10*1000);
6307
6308 if (sc->sc_type == WM_T_PCH)
6309 wm_hv_phy_workaround_ich8lan(sc);
6310
6311 if (sc->sc_type == WM_T_PCH2)
6312 wm_lv_phy_workaround_ich8lan(sc);
6313
6314 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6315 /*
6316 * dummy read to clear the phy wakeup bit after lcd
6317 * reset
6318 */
6319 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6320 }
6321
6322 /*
6323 * XXX Configure the LCD with th extended configuration region
6324 * in NVM
6325 */
6326
6327 /* Configure the LCD with the OEM bits in NVM */
6328 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6329 || (sc->sc_type == WM_T_PCH_LPT)) {
6330 /*
6331 * Disable LPLU.
6332 * XXX It seems that 82567 has LPLU, too.
6333 */
6334 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6335 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6336 reg |= HV_OEM_BITS_ANEGNOW;
6337 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6338 }
6339 break;
6340 default:
6341 panic("%s: unknown type\n", __func__);
6342 break;
6343 }
6344 }
6345
6346 /*
6347 * wm_get_phy_id_82575:
6348 *
6349 * Return PHY ID. Return -1 if it failed.
6350 */
6351 static int
6352 wm_get_phy_id_82575(struct wm_softc *sc)
6353 {
6354 uint32_t reg;
6355 int phyid = -1;
6356
6357 /* XXX */
6358 if ((sc->sc_flags & WM_F_SGMII) == 0)
6359 return -1;
6360
6361 if (wm_sgmii_uses_mdio(sc)) {
6362 switch (sc->sc_type) {
6363 case WM_T_82575:
6364 case WM_T_82576:
6365 reg = CSR_READ(sc, WMREG_MDIC);
6366 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
6367 break;
6368 case WM_T_82580:
6369 case WM_T_I350:
6370 case WM_T_I354:
6371 case WM_T_I210:
6372 case WM_T_I211:
6373 reg = CSR_READ(sc, WMREG_MDICNFG);
6374 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
6375 break;
6376 default:
6377 return -1;
6378 }
6379 }
6380
6381 return phyid;
6382 }
6383
6384
6385 /*
6386 * wm_gmii_mediainit:
6387 *
6388 * Initialize media for use on 1000BASE-T devices.
6389 */
6390 static void
6391 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6392 {
6393 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6394 struct mii_data *mii = &sc->sc_mii;
6395 uint32_t reg;
6396
6397 /* We have GMII. */
6398 sc->sc_flags |= WM_F_HAS_MII;
6399
6400 if (sc->sc_type == WM_T_80003)
6401 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6402 else
6403 sc->sc_tipg = TIPG_1000T_DFLT;
6404
6405 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
6406 if ((sc->sc_type == WM_T_82580)
6407 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
6408 || (sc->sc_type == WM_T_I211)) {
6409 reg = CSR_READ(sc, WMREG_PHPM);
6410 reg &= ~PHPM_GO_LINK_D;
6411 CSR_WRITE(sc, WMREG_PHPM, reg);
6412 }
6413
6414 /*
6415 * Let the chip set speed/duplex on its own based on
6416 * signals from the PHY.
6417 * XXXbouyer - I'm not sure this is right for the 80003,
6418 * the em driver only sets CTRL_SLU here - but it seems to work.
6419 */
6420 sc->sc_ctrl |= CTRL_SLU;
6421 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6422
6423 /* Initialize our media structures and probe the GMII. */
6424 mii->mii_ifp = ifp;
6425
6426 /*
6427 * Determine the PHY access method.
6428 *
6429 * For SGMII, use SGMII specific method.
6430 *
6431 * For some devices, we can determine the PHY access method
6432 * from sc_type.
6433 *
6434 * For ICH and PCH variants, it's difficult to determine the PHY
6435 * access method by sc_type, so use the PCI product ID for some
6436 * devices.
6437 * For other ICH8 variants, try to use igp's method. If the PHY
6438 * can't detect, then use bm's method.
6439 */
6440 switch (prodid) {
6441 case PCI_PRODUCT_INTEL_PCH_M_LM:
6442 case PCI_PRODUCT_INTEL_PCH_M_LC:
6443 /* 82577 */
6444 sc->sc_phytype = WMPHY_82577;
6445 break;
6446 case PCI_PRODUCT_INTEL_PCH_D_DM:
6447 case PCI_PRODUCT_INTEL_PCH_D_DC:
6448 /* 82578 */
6449 sc->sc_phytype = WMPHY_82578;
6450 break;
6451 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6452 case PCI_PRODUCT_INTEL_PCH2_LV_V:
6453 /* 82579 */
6454 sc->sc_phytype = WMPHY_82579;
6455 break;
6456 case PCI_PRODUCT_INTEL_82801I_BM:
6457 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6458 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6459 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6460 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6461 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6462 /* 82567 */
6463 sc->sc_phytype = WMPHY_BM;
6464 mii->mii_readreg = wm_gmii_bm_readreg;
6465 mii->mii_writereg = wm_gmii_bm_writereg;
6466 break;
6467 default:
6468 if (((sc->sc_flags & WM_F_SGMII) != 0)
6469 && !wm_sgmii_uses_mdio(sc)){
6470 mii->mii_readreg = wm_sgmii_readreg;
6471 mii->mii_writereg = wm_sgmii_writereg;
6472 } else if (sc->sc_type >= WM_T_80003) {
6473 mii->mii_readreg = wm_gmii_i80003_readreg;
6474 mii->mii_writereg = wm_gmii_i80003_writereg;
6475 } else if (sc->sc_type >= WM_T_I210) {
6476 mii->mii_readreg = wm_gmii_i82544_readreg;
6477 mii->mii_writereg = wm_gmii_i82544_writereg;
6478 } else if (sc->sc_type >= WM_T_82580) {
6479 sc->sc_phytype = WMPHY_82580;
6480 mii->mii_readreg = wm_gmii_82580_readreg;
6481 mii->mii_writereg = wm_gmii_82580_writereg;
6482 } else if (sc->sc_type >= WM_T_82544) {
6483 mii->mii_readreg = wm_gmii_i82544_readreg;
6484 mii->mii_writereg = wm_gmii_i82544_writereg;
6485 } else {
6486 mii->mii_readreg = wm_gmii_i82543_readreg;
6487 mii->mii_writereg = wm_gmii_i82543_writereg;
6488 }
6489 break;
6490 }
6491 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) {
6492 /* All PCH* use _hv_ */
6493 mii->mii_readreg = wm_gmii_hv_readreg;
6494 mii->mii_writereg = wm_gmii_hv_writereg;
6495 }
6496 mii->mii_statchg = wm_gmii_statchg;
6497
6498 wm_gmii_reset(sc);
6499
6500 sc->sc_ethercom.ec_mii = &sc->sc_mii;
6501 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6502 wm_gmii_mediastatus);
6503
6504 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6505 || (sc->sc_type == WM_T_82580)
6506 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
6507 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
6508 if ((sc->sc_flags & WM_F_SGMII) == 0) {
6509 /* Attach only one port */
6510 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6511 MII_OFFSET_ANY, MIIF_DOPAUSE);
6512 } else {
6513 int i, id;
6514 uint32_t ctrl_ext;
6515
6516 id = wm_get_phy_id_82575(sc);
6517 if (id != -1) {
6518 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6519 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
6520 }
6521 if ((id == -1)
6522 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
6523 /* Power on sgmii phy if it is disabled */
6524 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6525 CSR_WRITE(sc, WMREG_CTRL_EXT,
6526 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6527 CSR_WRITE_FLUSH(sc);
6528 delay(300*1000); /* XXX too long */
6529
6530 /* from 1 to 8 */
6531 for (i = 1; i < 8; i++)
6532 mii_attach(sc->sc_dev, &sc->sc_mii,
6533 0xffffffff, i, MII_OFFSET_ANY,
6534 MIIF_DOPAUSE);
6535
6536 /* restore previous sfp cage power state */
6537 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6538 }
6539 }
6540 } else {
6541 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6542 MII_OFFSET_ANY, MIIF_DOPAUSE);
6543 }
6544
6545 /*
6546 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
6547 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6548 */
6549 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
6550 (LIST_FIRST(&mii->mii_phys) == NULL)) {
6551 wm_set_mdio_slow_mode_hv(sc);
6552 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6553 MII_OFFSET_ANY, MIIF_DOPAUSE);
6554 }
6555
6556 /*
6557 * (For ICH8 variants)
6558 * If PHY detection failed, use BM's r/w function and retry.
6559 */
6560 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6561 /* if failed, retry with *_bm_* */
6562 mii->mii_readreg = wm_gmii_bm_readreg;
6563 mii->mii_writereg = wm_gmii_bm_writereg;
6564
6565 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6566 MII_OFFSET_ANY, MIIF_DOPAUSE);
6567 }
6568
6569 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6570 /* Any PHY wasn't find */
6571 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6572 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6573 sc->sc_phytype = WMPHY_NONE;
6574 } else {
6575 /*
6576 * PHY Found!
6577 * Check PHY type.
6578 */
6579 uint32_t model;
6580 struct mii_softc *child;
6581
6582 child = LIST_FIRST(&mii->mii_phys);
6583 if (device_is_a(child->mii_dev, "igphy")) {
6584 struct igphy_softc *isc = (struct igphy_softc *)child;
6585
6586 model = isc->sc_mii.mii_mpd_model;
6587 if (model == MII_MODEL_yyINTEL_I82566)
6588 sc->sc_phytype = WMPHY_IGP_3;
6589 }
6590
6591 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6592 }
6593 }
6594
6595 /*
6596 * wm_gmii_mediastatus: [ifmedia interface function]
6597 *
6598 * Get the current interface media status on a 1000BASE-T device.
6599 */
6600 static void
6601 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6602 {
6603 struct wm_softc *sc = ifp->if_softc;
6604
6605 ether_mediastatus(ifp, ifmr);
6606 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6607 | sc->sc_flowflags;
6608 }
6609
6610 /*
6611 * wm_gmii_mediachange: [ifmedia interface function]
6612 *
6613 * Set hardware to newly-selected media on a 1000BASE-T device.
6614 */
6615 static int
6616 wm_gmii_mediachange(struct ifnet *ifp)
6617 {
6618 struct wm_softc *sc = ifp->if_softc;
6619 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6620 int rc;
6621
6622 if ((ifp->if_flags & IFF_UP) == 0)
6623 return 0;
6624
6625 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6626 sc->sc_ctrl |= CTRL_SLU;
6627 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6628 || (sc->sc_type > WM_T_82543)) {
6629 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6630 } else {
6631 sc->sc_ctrl &= ~CTRL_ASDE;
6632 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6633 if (ife->ifm_media & IFM_FDX)
6634 sc->sc_ctrl |= CTRL_FD;
6635 switch (IFM_SUBTYPE(ife->ifm_media)) {
6636 case IFM_10_T:
6637 sc->sc_ctrl |= CTRL_SPEED_10;
6638 break;
6639 case IFM_100_TX:
6640 sc->sc_ctrl |= CTRL_SPEED_100;
6641 break;
6642 case IFM_1000_T:
6643 sc->sc_ctrl |= CTRL_SPEED_1000;
6644 break;
6645 default:
6646 panic("wm_gmii_mediachange: bad media 0x%x",
6647 ife->ifm_media);
6648 }
6649 }
6650 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6651 if (sc->sc_type <= WM_T_82543)
6652 wm_gmii_reset(sc);
6653
6654 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6655 return 0;
6656 return rc;
6657 }
6658
6659 #define MDI_IO CTRL_SWDPIN(2)
6660 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
6661 #define MDI_CLK CTRL_SWDPIN(3)
6662
6663 static void
6664 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6665 {
6666 uint32_t i, v;
6667
6668 v = CSR_READ(sc, WMREG_CTRL);
6669 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6670 v |= MDI_DIR | CTRL_SWDPIO(3);
6671
6672 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6673 if (data & i)
6674 v |= MDI_IO;
6675 else
6676 v &= ~MDI_IO;
6677 CSR_WRITE(sc, WMREG_CTRL, v);
6678 CSR_WRITE_FLUSH(sc);
6679 delay(10);
6680 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6681 CSR_WRITE_FLUSH(sc);
6682 delay(10);
6683 CSR_WRITE(sc, WMREG_CTRL, v);
6684 CSR_WRITE_FLUSH(sc);
6685 delay(10);
6686 }
6687 }
6688
6689 static uint32_t
6690 wm_i82543_mii_recvbits(struct wm_softc *sc)
6691 {
6692 uint32_t v, i, data = 0;
6693
6694 v = CSR_READ(sc, WMREG_CTRL);
6695 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6696 v |= CTRL_SWDPIO(3);
6697
6698 CSR_WRITE(sc, WMREG_CTRL, v);
6699 CSR_WRITE_FLUSH(sc);
6700 delay(10);
6701 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6702 CSR_WRITE_FLUSH(sc);
6703 delay(10);
6704 CSR_WRITE(sc, WMREG_CTRL, v);
6705 CSR_WRITE_FLUSH(sc);
6706 delay(10);
6707
6708 for (i = 0; i < 16; i++) {
6709 data <<= 1;
6710 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6711 CSR_WRITE_FLUSH(sc);
6712 delay(10);
6713 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6714 data |= 1;
6715 CSR_WRITE(sc, WMREG_CTRL, v);
6716 CSR_WRITE_FLUSH(sc);
6717 delay(10);
6718 }
6719
6720 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6721 CSR_WRITE_FLUSH(sc);
6722 delay(10);
6723 CSR_WRITE(sc, WMREG_CTRL, v);
6724 CSR_WRITE_FLUSH(sc);
6725 delay(10);
6726
6727 return data;
6728 }
6729
6730 #undef MDI_IO
6731 #undef MDI_DIR
6732 #undef MDI_CLK
6733
6734 /*
6735 * wm_gmii_i82543_readreg: [mii interface function]
6736 *
6737 * Read a PHY register on the GMII (i82543 version).
6738 */
6739 static int
6740 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6741 {
6742 struct wm_softc *sc = device_private(self);
6743 int rv;
6744
6745 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6746 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
6747 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6748 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
6749
6750 DPRINTF(WM_DEBUG_GMII,
6751 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6752 device_xname(sc->sc_dev), phy, reg, rv));
6753
6754 return rv;
6755 }
6756
6757 /*
6758 * wm_gmii_i82543_writereg: [mii interface function]
6759 *
6760 * Write a PHY register on the GMII (i82543 version).
6761 */
6762 static void
6763 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6764 {
6765 struct wm_softc *sc = device_private(self);
6766
6767 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6768 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6769 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6770 (MII_COMMAND_START << 30), 32);
6771 }
6772
6773 /*
6774 * wm_gmii_i82544_readreg: [mii interface function]
6775 *
6776 * Read a PHY register on the GMII.
6777 */
6778 static int
6779 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6780 {
6781 struct wm_softc *sc = device_private(self);
6782 uint32_t mdic = 0;
6783 int i, rv;
6784
6785 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6786 MDIC_REGADD(reg));
6787
6788 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6789 mdic = CSR_READ(sc, WMREG_MDIC);
6790 if (mdic & MDIC_READY)
6791 break;
6792 delay(50);
6793 }
6794
6795 if ((mdic & MDIC_READY) == 0) {
6796 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6797 device_xname(sc->sc_dev), phy, reg);
6798 rv = 0;
6799 } else if (mdic & MDIC_E) {
6800 #if 0 /* This is normal if no PHY is present. */
6801 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6802 device_xname(sc->sc_dev), phy, reg);
6803 #endif
6804 rv = 0;
6805 } else {
6806 rv = MDIC_DATA(mdic);
6807 if (rv == 0xffff)
6808 rv = 0;
6809 }
6810
6811 return rv;
6812 }
6813
6814 /*
6815 * wm_gmii_i82544_writereg: [mii interface function]
6816 *
6817 * Write a PHY register on the GMII.
6818 */
6819 static void
6820 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6821 {
6822 struct wm_softc *sc = device_private(self);
6823 uint32_t mdic = 0;
6824 int i;
6825
6826 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6827 MDIC_REGADD(reg) | MDIC_DATA(val));
6828
6829 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6830 mdic = CSR_READ(sc, WMREG_MDIC);
6831 if (mdic & MDIC_READY)
6832 break;
6833 delay(50);
6834 }
6835
6836 if ((mdic & MDIC_READY) == 0)
6837 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6838 device_xname(sc->sc_dev), phy, reg);
6839 else if (mdic & MDIC_E)
6840 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6841 device_xname(sc->sc_dev), phy, reg);
6842 }
6843
6844 /*
6845 * wm_gmii_i80003_readreg: [mii interface function]
6846 *
6847 * Read a PHY register on the kumeran
6848 * This could be handled by the PHY layer if we didn't have to lock the
6849 * ressource ...
6850 */
6851 static int
6852 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6853 {
6854 struct wm_softc *sc = device_private(self);
6855 int sem;
6856 int rv;
6857
6858 if (phy != 1) /* only one PHY on kumeran bus */
6859 return 0;
6860
6861 sem = swfwphysem[sc->sc_funcid];
6862 if (wm_get_swfw_semaphore(sc, sem)) {
6863 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6864 __func__);
6865 return 0;
6866 }
6867
6868 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6869 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6870 reg >> GG82563_PAGE_SHIFT);
6871 } else {
6872 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6873 reg >> GG82563_PAGE_SHIFT);
6874 }
6875 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6876 delay(200);
6877 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6878 delay(200);
6879
6880 wm_put_swfw_semaphore(sc, sem);
6881 return rv;
6882 }
6883
6884 /*
6885 * wm_gmii_i80003_writereg: [mii interface function]
6886 *
6887 * Write a PHY register on the kumeran.
6888 * This could be handled by the PHY layer if we didn't have to lock the
6889 * ressource ...
6890 */
6891 static void
6892 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6893 {
6894 struct wm_softc *sc = device_private(self);
6895 int sem;
6896
6897 if (phy != 1) /* only one PHY on kumeran bus */
6898 return;
6899
6900 sem = swfwphysem[sc->sc_funcid];
6901 if (wm_get_swfw_semaphore(sc, sem)) {
6902 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6903 __func__);
6904 return;
6905 }
6906
6907 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6908 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6909 reg >> GG82563_PAGE_SHIFT);
6910 } else {
6911 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6912 reg >> GG82563_PAGE_SHIFT);
6913 }
6914 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6915 delay(200);
6916 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6917 delay(200);
6918
6919 wm_put_swfw_semaphore(sc, sem);
6920 }
6921
6922 /*
6923 * wm_gmii_bm_readreg: [mii interface function]
6924 *
6925 * Read a PHY register on the kumeran
6926 * This could be handled by the PHY layer if we didn't have to lock the
6927 * ressource ...
6928 */
6929 static int
6930 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6931 {
6932 struct wm_softc *sc = device_private(self);
6933 int sem;
6934 int rv;
6935
6936 sem = swfwphysem[sc->sc_funcid];
6937 if (wm_get_swfw_semaphore(sc, sem)) {
6938 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6939 __func__);
6940 return 0;
6941 }
6942
6943 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6944 if (phy == 1)
6945 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6946 reg);
6947 else
6948 wm_gmii_i82544_writereg(self, phy,
6949 GG82563_PHY_PAGE_SELECT,
6950 reg >> GG82563_PAGE_SHIFT);
6951 }
6952
6953 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6954 wm_put_swfw_semaphore(sc, sem);
6955 return rv;
6956 }
6957
6958 /*
6959 * wm_gmii_bm_writereg: [mii interface function]
6960 *
6961 * Write a PHY register on the kumeran.
6962 * This could be handled by the PHY layer if we didn't have to lock the
6963 * ressource ...
6964 */
6965 static void
6966 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6967 {
6968 struct wm_softc *sc = device_private(self);
6969 int sem;
6970
6971 sem = swfwphysem[sc->sc_funcid];
6972 if (wm_get_swfw_semaphore(sc, sem)) {
6973 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6974 __func__);
6975 return;
6976 }
6977
6978 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6979 if (phy == 1)
6980 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6981 reg);
6982 else
6983 wm_gmii_i82544_writereg(self, phy,
6984 GG82563_PHY_PAGE_SELECT,
6985 reg >> GG82563_PAGE_SHIFT);
6986 }
6987
6988 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6989 wm_put_swfw_semaphore(sc, sem);
6990 }
6991
6992 static void
6993 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6994 {
6995 struct wm_softc *sc = device_private(self);
6996 uint16_t regnum = BM_PHY_REG_NUM(offset);
6997 uint16_t wuce;
6998
6999 /* XXX Gig must be disabled for MDIO accesses to page 800 */
7000 if (sc->sc_type == WM_T_PCH) {
7001 /* XXX e1000 driver do nothing... why? */
7002 }
7003
7004 /* Set page 769 */
7005 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7006 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7007
7008 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
7009
7010 wuce &= ~BM_WUC_HOST_WU_BIT;
7011 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
7012 wuce | BM_WUC_ENABLE_BIT);
7013
7014 /* Select page 800 */
7015 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7016 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
7017
7018 /* Write page 800 */
7019 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
7020
7021 if (rd)
7022 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
7023 else
7024 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
7025
7026 /* Set page 769 */
7027 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7028 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7029
7030 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
7031 }
7032
7033 /*
7034 * wm_gmii_hv_readreg: [mii interface function]
7035 *
7036 * Read a PHY register on the kumeran
7037 * This could be handled by the PHY layer if we didn't have to lock the
7038 * ressource ...
7039 */
7040 static int
7041 wm_gmii_hv_readreg(device_t self, int phy, int reg)
7042 {
7043 struct wm_softc *sc = device_private(self);
7044 uint16_t page = BM_PHY_REG_PAGE(reg);
7045 uint16_t regnum = BM_PHY_REG_NUM(reg);
7046 uint16_t val;
7047 int rv;
7048
7049 if (wm_get_swfwhw_semaphore(sc)) {
7050 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7051 __func__);
7052 return 0;
7053 }
7054
7055 /* XXX Workaround failure in MDIO access while cable is disconnected */
7056 if (sc->sc_phytype == WMPHY_82577) {
7057 /* XXX must write */
7058 }
7059
7060 /* Page 800 works differently than the rest so it has its own func */
7061 if (page == BM_WUC_PAGE) {
7062 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
7063 return val;
7064 }
7065
7066 /*
7067 * Lower than page 768 works differently than the rest so it has its
7068 * own func
7069 */
7070 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7071 printf("gmii_hv_readreg!!!\n");
7072 return 0;
7073 }
7074
7075 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7076 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7077 page << BME1000_PAGE_SHIFT);
7078 }
7079
7080 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
7081 wm_put_swfwhw_semaphore(sc);
7082 return rv;
7083 }
7084
7085 /*
7086 * wm_gmii_hv_writereg: [mii interface function]
7087 *
7088 * Write a PHY register on the kumeran.
7089 * This could be handled by the PHY layer if we didn't have to lock the
7090 * ressource ...
7091 */
7092 static void
7093 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
7094 {
7095 struct wm_softc *sc = device_private(self);
7096 uint16_t page = BM_PHY_REG_PAGE(reg);
7097 uint16_t regnum = BM_PHY_REG_NUM(reg);
7098
7099 if (wm_get_swfwhw_semaphore(sc)) {
7100 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7101 __func__);
7102 return;
7103 }
7104
7105 /* XXX Workaround failure in MDIO access while cable is disconnected */
7106
7107 /* Page 800 works differently than the rest so it has its own func */
7108 if (page == BM_WUC_PAGE) {
7109 uint16_t tmp;
7110
7111 tmp = val;
7112 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
7113 return;
7114 }
7115
7116 /*
7117 * Lower than page 768 works differently than the rest so it has its
7118 * own func
7119 */
7120 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7121 printf("gmii_hv_writereg!!!\n");
7122 return;
7123 }
7124
7125 /*
7126 * XXX Workaround MDIO accesses being disabled after entering IEEE
7127 * Power Down (whenever bit 11 of the PHY control register is set)
7128 */
7129
7130 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7131 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7132 page << BME1000_PAGE_SHIFT);
7133 }
7134
7135 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
7136 wm_put_swfwhw_semaphore(sc);
7137 }
7138
7139 /*
7140 * wm_gmii_82580_readreg: [mii interface function]
7141 *
7142 * Read a PHY register on the 82580 and I350.
7143 * This could be handled by the PHY layer if we didn't have to lock the
7144 * ressource ...
7145 */
7146 static int
7147 wm_gmii_82580_readreg(device_t self, int phy, int reg)
7148 {
7149 struct wm_softc *sc = device_private(self);
7150 int sem;
7151 int rv;
7152
7153 sem = swfwphysem[sc->sc_funcid];
7154 if (wm_get_swfw_semaphore(sc, sem)) {
7155 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7156 __func__);
7157 return 0;
7158 }
7159
7160 rv = wm_gmii_i82544_readreg(self, phy, reg);
7161
7162 wm_put_swfw_semaphore(sc, sem);
7163 return rv;
7164 }
7165
7166 /*
7167 * wm_gmii_82580_writereg: [mii interface function]
7168 *
7169 * Write a PHY register on the 82580 and I350.
7170 * This could be handled by the PHY layer if we didn't have to lock the
7171 * ressource ...
7172 */
7173 static void
7174 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
7175 {
7176 struct wm_softc *sc = device_private(self);
7177 int sem;
7178
7179 sem = swfwphysem[sc->sc_funcid];
7180 if (wm_get_swfw_semaphore(sc, sem)) {
7181 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7182 __func__);
7183 return;
7184 }
7185
7186 wm_gmii_i82544_writereg(self, phy, reg, val);
7187
7188 wm_put_swfw_semaphore(sc, sem);
7189 }
7190
7191 /*
7192 * wm_gmii_statchg: [mii interface function]
7193 *
7194 * Callback from MII layer when media changes.
7195 */
7196 static void
7197 wm_gmii_statchg(struct ifnet *ifp)
7198 {
7199 struct wm_softc *sc = ifp->if_softc;
7200 struct mii_data *mii = &sc->sc_mii;
7201
7202 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7203 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7204 sc->sc_fcrtl &= ~FCRTL_XONE;
7205
7206 /*
7207 * Get flow control negotiation result.
7208 */
7209 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7210 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7211 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7212 mii->mii_media_active &= ~IFM_ETH_FMASK;
7213 }
7214
7215 if (sc->sc_flowflags & IFM_FLOW) {
7216 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7217 sc->sc_ctrl |= CTRL_TFCE;
7218 sc->sc_fcrtl |= FCRTL_XONE;
7219 }
7220 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7221 sc->sc_ctrl |= CTRL_RFCE;
7222 }
7223
7224 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7225 DPRINTF(WM_DEBUG_LINK,
7226 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7227 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7228 } else {
7229 DPRINTF(WM_DEBUG_LINK,
7230 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7231 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7232 }
7233
7234 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7235 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7236 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7237 : WMREG_FCRTL, sc->sc_fcrtl);
7238 if (sc->sc_type == WM_T_80003) {
7239 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7240 case IFM_1000_T:
7241 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7242 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7243 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7244 break;
7245 default:
7246 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7247 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7248 sc->sc_tipg = TIPG_10_100_80003_DFLT;
7249 break;
7250 }
7251 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7252 }
7253 }
7254
7255 /*
7256 * wm_kmrn_readreg:
7257 *
7258 * Read a kumeran register
7259 */
7260 static int
7261 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7262 {
7263 int rv;
7264
7265 if (sc->sc_flags == WM_F_LOCK_SWFW) {
7266 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7267 aprint_error_dev(sc->sc_dev,
7268 "%s: failed to get semaphore\n", __func__);
7269 return 0;
7270 }
7271 } else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
7272 if (wm_get_swfwhw_semaphore(sc)) {
7273 aprint_error_dev(sc->sc_dev,
7274 "%s: failed to get semaphore\n", __func__);
7275 return 0;
7276 }
7277 }
7278
7279 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7280 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7281 KUMCTRLSTA_REN);
7282 CSR_WRITE_FLUSH(sc);
7283 delay(2);
7284
7285 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7286
7287 if (sc->sc_flags == WM_F_LOCK_SWFW)
7288 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7289 else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
7290 wm_put_swfwhw_semaphore(sc);
7291
7292 return rv;
7293 }
7294
7295 /*
7296 * wm_kmrn_writereg:
7297 *
7298 * Write a kumeran register
7299 */
7300 static void
7301 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7302 {
7303
7304 if (sc->sc_flags == WM_F_LOCK_SWFW) {
7305 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7306 aprint_error_dev(sc->sc_dev,
7307 "%s: failed to get semaphore\n", __func__);
7308 return;
7309 }
7310 } else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
7311 if (wm_get_swfwhw_semaphore(sc)) {
7312 aprint_error_dev(sc->sc_dev,
7313 "%s: failed to get semaphore\n", __func__);
7314 return;
7315 }
7316 }
7317
7318 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7319 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7320 (val & KUMCTRLSTA_MASK));
7321
7322 if (sc->sc_flags == WM_F_LOCK_SWFW)
7323 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7324 else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
7325 wm_put_swfwhw_semaphore(sc);
7326 }
7327
7328 /* SGMII related */
7329
7330 /*
7331 * wm_sgmii_uses_mdio
7332 *
7333 * Check whether the transaction is to the internal PHY or the external
7334 * MDIO interface. Return true if it's MDIO.
7335 */
7336 static bool
7337 wm_sgmii_uses_mdio(struct wm_softc *sc)
7338 {
7339 uint32_t reg;
7340 bool ismdio = false;
7341
7342 switch (sc->sc_type) {
7343 case WM_T_82575:
7344 case WM_T_82576:
7345 reg = CSR_READ(sc, WMREG_MDIC);
7346 ismdio = ((reg & MDIC_DEST) != 0);
7347 break;
7348 case WM_T_82580:
7349 case WM_T_I350:
7350 case WM_T_I354:
7351 case WM_T_I210:
7352 case WM_T_I211:
7353 reg = CSR_READ(sc, WMREG_MDICNFG);
7354 ismdio = ((reg & MDICNFG_DEST) != 0);
7355 break;
7356 default:
7357 break;
7358 }
7359
7360 return ismdio;
7361 }
7362
7363 /*
7364 * wm_sgmii_readreg: [mii interface function]
7365 *
7366 * Read a PHY register on the SGMII
7367 * This could be handled by the PHY layer if we didn't have to lock the
7368 * ressource ...
7369 */
7370 static int
7371 wm_sgmii_readreg(device_t self, int phy, int reg)
7372 {
7373 struct wm_softc *sc = device_private(self);
7374 uint32_t i2ccmd;
7375 int i, rv;
7376
7377 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7378 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7379 __func__);
7380 return 0;
7381 }
7382
7383 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7384 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7385 | I2CCMD_OPCODE_READ;
7386 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7387
7388 /* Poll the ready bit */
7389 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7390 delay(50);
7391 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7392 if (i2ccmd & I2CCMD_READY)
7393 break;
7394 }
7395 if ((i2ccmd & I2CCMD_READY) == 0)
7396 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7397 if ((i2ccmd & I2CCMD_ERROR) != 0)
7398 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7399
7400 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7401
7402 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7403 return rv;
7404 }
7405
7406 /*
7407 * wm_sgmii_writereg: [mii interface function]
7408 *
7409 * Write a PHY register on the SGMII.
7410 * This could be handled by the PHY layer if we didn't have to lock the
7411 * ressource ...
7412 */
7413 static void
7414 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7415 {
7416 struct wm_softc *sc = device_private(self);
7417 uint32_t i2ccmd;
7418 int i;
7419 int val_swapped;
7420
7421 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7422 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7423 __func__);
7424 return;
7425 }
7426 /* Swap the data bytes for the I2C interface */
7427 val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
7428 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7429 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7430 | I2CCMD_OPCODE_WRITE | val_swapped;
7431 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7432
7433 /* Poll the ready bit */
7434 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7435 delay(50);
7436 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7437 if (i2ccmd & I2CCMD_READY)
7438 break;
7439 }
7440 if ((i2ccmd & I2CCMD_READY) == 0)
7441 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7442 if ((i2ccmd & I2CCMD_ERROR) != 0)
7443 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7444
7445 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7446 }
7447
7448 /* TBI related */
7449
7450 /* XXX Currently TBI only */
7451 static int
7452 wm_check_for_link(struct wm_softc *sc)
7453 {
7454 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7455 uint32_t rxcw;
7456 uint32_t ctrl;
7457 uint32_t status;
7458 uint32_t sig;
7459
7460 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
7461 sc->sc_tbi_linkup = 1;
7462 return 0;
7463 }
7464
7465 rxcw = CSR_READ(sc, WMREG_RXCW);
7466 ctrl = CSR_READ(sc, WMREG_CTRL);
7467 status = CSR_READ(sc, WMREG_STATUS);
7468
7469 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7470
7471 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7472 device_xname(sc->sc_dev), __func__,
7473 ((ctrl & CTRL_SWDPIN(1)) == sig),
7474 ((status & STATUS_LU) != 0),
7475 ((rxcw & RXCW_C) != 0)
7476 ));
7477
7478 /*
7479 * SWDPIN LU RXCW
7480 * 0 0 0
7481 * 0 0 1 (should not happen)
7482 * 0 1 0 (should not happen)
7483 * 0 1 1 (should not happen)
7484 * 1 0 0 Disable autonego and force linkup
7485 * 1 0 1 got /C/ but not linkup yet
7486 * 1 1 0 (linkup)
7487 * 1 1 1 If IFM_AUTO, back to autonego
7488 *
7489 */
7490 if (((ctrl & CTRL_SWDPIN(1)) == sig)
7491 && ((status & STATUS_LU) == 0)
7492 && ((rxcw & RXCW_C) == 0)) {
7493 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7494 __func__));
7495 sc->sc_tbi_linkup = 0;
7496 /* Disable auto-negotiation in the TXCW register */
7497 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7498
7499 /*
7500 * Force link-up and also force full-duplex.
7501 *
7502 * NOTE: CTRL was updated TFCE and RFCE automatically,
7503 * so we should update sc->sc_ctrl
7504 */
7505 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7506 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7507 } else if (((status & STATUS_LU) != 0)
7508 && ((rxcw & RXCW_C) != 0)
7509 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7510 sc->sc_tbi_linkup = 1;
7511 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7512 __func__));
7513 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7514 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7515 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7516 && ((rxcw & RXCW_C) != 0)) {
7517 DPRINTF(WM_DEBUG_LINK, ("/C/"));
7518 } else {
7519 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7520 status));
7521 }
7522
7523 return 0;
7524 }
7525
7526 /*
7527 * wm_tbi_mediainit:
7528 *
7529 * Initialize media for use on 1000BASE-X devices.
7530 */
7531 static void
7532 wm_tbi_mediainit(struct wm_softc *sc)
7533 {
7534 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7535 const char *sep = "";
7536
7537 if (sc->sc_type < WM_T_82543)
7538 sc->sc_tipg = TIPG_WM_DFLT;
7539 else
7540 sc->sc_tipg = TIPG_LG_DFLT;
7541
7542 sc->sc_tbi_anegticks = 5;
7543
7544 /* Initialize our media structures */
7545 sc->sc_mii.mii_ifp = ifp;
7546
7547 sc->sc_ethercom.ec_mii = &sc->sc_mii;
7548 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
7549 wm_tbi_mediastatus);
7550
7551 /*
7552 * SWD Pins:
7553 *
7554 * 0 = Link LED (output)
7555 * 1 = Loss Of Signal (input)
7556 */
7557 sc->sc_ctrl |= CTRL_SWDPIO(0);
7558 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
7559 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7560 sc->sc_ctrl &= ~CTRL_LRST;
7561
7562 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7563
7564 #define ADD(ss, mm, dd) \
7565 do { \
7566 aprint_normal("%s%s", sep, ss); \
7567 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
7568 sep = ", "; \
7569 } while (/*CONSTCOND*/0)
7570
7571 aprint_normal_dev(sc->sc_dev, "");
7572
7573 /* Only 82545 is LX */
7574 if (sc->sc_type == WM_T_82545) {
7575 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
7576 ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
7577 } else {
7578 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
7579 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
7580 }
7581 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
7582 aprint_normal("\n");
7583
7584 #undef ADD
7585
7586 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
7587 }
7588
7589 /*
7590 * wm_tbi_mediastatus: [ifmedia interface function]
7591 *
7592 * Get the current interface media status on a 1000BASE-X device.
7593 */
7594 static void
7595 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
7596 {
7597 struct wm_softc *sc = ifp->if_softc;
7598 uint32_t ctrl, status;
7599
7600 ifmr->ifm_status = IFM_AVALID;
7601 ifmr->ifm_active = IFM_ETHER;
7602
7603 status = CSR_READ(sc, WMREG_STATUS);
7604 if ((status & STATUS_LU) == 0) {
7605 ifmr->ifm_active |= IFM_NONE;
7606 return;
7607 }
7608
7609 ifmr->ifm_status |= IFM_ACTIVE;
7610 /* Only 82545 is LX */
7611 if (sc->sc_type == WM_T_82545)
7612 ifmr->ifm_active |= IFM_1000_LX;
7613 else
7614 ifmr->ifm_active |= IFM_1000_SX;
7615 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
7616 ifmr->ifm_active |= IFM_FDX;
7617 else
7618 ifmr->ifm_active |= IFM_HDX;
7619 ctrl = CSR_READ(sc, WMREG_CTRL);
7620 if (ctrl & CTRL_RFCE)
7621 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
7622 if (ctrl & CTRL_TFCE)
7623 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
7624 }
7625
7626 /*
7627 * wm_tbi_mediachange: [ifmedia interface function]
7628 *
7629 * Set hardware to newly-selected media on a 1000BASE-X device.
7630 */
7631 static int
7632 wm_tbi_mediachange(struct ifnet *ifp)
7633 {
7634 struct wm_softc *sc = ifp->if_softc;
7635 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7636 uint32_t status;
7637 int i;
7638
7639 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7640 return 0;
7641
7642 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
7643 || (sc->sc_type >= WM_T_82575))
7644 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
7645
7646 /* XXX power_up_serdes_link_82575() */
7647
7648 sc->sc_ctrl &= ~CTRL_LRST;
7649 sc->sc_txcw = TXCW_ANE;
7650 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
7651 sc->sc_txcw |= TXCW_FD | TXCW_HD;
7652 else if (ife->ifm_media & IFM_FDX)
7653 sc->sc_txcw |= TXCW_FD;
7654 else
7655 sc->sc_txcw |= TXCW_HD;
7656
7657 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
7658 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
7659
7660 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
7661 device_xname(sc->sc_dev), sc->sc_txcw));
7662 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7663 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7664 CSR_WRITE_FLUSH(sc);
7665 delay(1000);
7666
7667 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
7668 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
7669
7670 /*
7671 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
7672 * optics detect a signal, 0 if they don't.
7673 */
7674 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
7675 /* Have signal; wait for the link to come up. */
7676 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
7677 delay(10000);
7678 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
7679 break;
7680 }
7681
7682 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
7683 device_xname(sc->sc_dev),i));
7684
7685 status = CSR_READ(sc, WMREG_STATUS);
7686 DPRINTF(WM_DEBUG_LINK,
7687 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
7688 device_xname(sc->sc_dev),status, STATUS_LU));
7689 if (status & STATUS_LU) {
7690 /* Link is up. */
7691 DPRINTF(WM_DEBUG_LINK,
7692 ("%s: LINK: set media -> link up %s\n",
7693 device_xname(sc->sc_dev),
7694 (status & STATUS_FD) ? "FDX" : "HDX"));
7695
7696 /*
7697 * NOTE: CTRL will update TFCE and RFCE automatically,
7698 * so we should update sc->sc_ctrl
7699 */
7700 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7701 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7702 sc->sc_fcrtl &= ~FCRTL_XONE;
7703 if (status & STATUS_FD)
7704 sc->sc_tctl |=
7705 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7706 else
7707 sc->sc_tctl |=
7708 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7709 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
7710 sc->sc_fcrtl |= FCRTL_XONE;
7711 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7712 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7713 WMREG_OLD_FCRTL : WMREG_FCRTL,
7714 sc->sc_fcrtl);
7715 sc->sc_tbi_linkup = 1;
7716 } else {
7717 if (i == WM_LINKUP_TIMEOUT)
7718 wm_check_for_link(sc);
7719 /* Link is down. */
7720 DPRINTF(WM_DEBUG_LINK,
7721 ("%s: LINK: set media -> link down\n",
7722 device_xname(sc->sc_dev)));
7723 sc->sc_tbi_linkup = 0;
7724 }
7725 } else {
7726 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
7727 device_xname(sc->sc_dev)));
7728 sc->sc_tbi_linkup = 0;
7729 }
7730
7731 wm_tbi_set_linkled(sc);
7732
7733 return 0;
7734 }
7735
7736 /*
7737 * wm_tbi_set_linkled:
7738 *
7739 * Update the link LED on 1000BASE-X devices.
7740 */
7741 static void
7742 wm_tbi_set_linkled(struct wm_softc *sc)
7743 {
7744
7745 if (sc->sc_tbi_linkup)
7746 sc->sc_ctrl |= CTRL_SWDPIN(0);
7747 else
7748 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7749
7750 /* 82540 or newer devices are active low */
7751 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7752
7753 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7754 }
7755
7756 /*
7757 * wm_tbi_check_link:
7758 *
7759 * Check the link on 1000BASE-X devices.
7760 */
7761 static void
7762 wm_tbi_check_link(struct wm_softc *sc)
7763 {
7764 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7765 uint32_t status;
7766
7767 KASSERT(WM_TX_LOCKED(sc));
7768
7769 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
7770 sc->sc_tbi_linkup = 1;
7771 return;
7772 }
7773
7774 status = CSR_READ(sc, WMREG_STATUS);
7775
7776 /* XXX is this needed? */
7777 (void)CSR_READ(sc, WMREG_RXCW);
7778 (void)CSR_READ(sc, WMREG_CTRL);
7779
7780 /* set link status */
7781 if ((status & STATUS_LU) == 0) {
7782 DPRINTF(WM_DEBUG_LINK,
7783 ("%s: LINK: checklink -> down\n",
7784 device_xname(sc->sc_dev)));
7785 sc->sc_tbi_linkup = 0;
7786 } else if (sc->sc_tbi_linkup == 0) {
7787 DPRINTF(WM_DEBUG_LINK,
7788 ("%s: LINK: checklink -> up %s\n",
7789 device_xname(sc->sc_dev),
7790 (status & STATUS_FD) ? "FDX" : "HDX"));
7791 sc->sc_tbi_linkup = 1;
7792 }
7793
7794 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
7795 && ((status & STATUS_LU) == 0)) {
7796 sc->sc_tbi_linkup = 0;
7797 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7798 /* If the timer expired, retry autonegotiation */
7799 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
7800 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
7801 sc->sc_tbi_ticks = 0;
7802 /*
7803 * Reset the link, and let autonegotiation do
7804 * its thing
7805 */
7806 sc->sc_ctrl |= CTRL_LRST;
7807 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7808 CSR_WRITE_FLUSH(sc);
7809 delay(1000);
7810 sc->sc_ctrl &= ~CTRL_LRST;
7811 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7812 CSR_WRITE_FLUSH(sc);
7813 delay(1000);
7814 CSR_WRITE(sc, WMREG_TXCW,
7815 sc->sc_txcw & ~TXCW_ANE);
7816 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7817 }
7818 }
7819 }
7820
7821 wm_tbi_set_linkled(sc);
7822 }
7823
7824 /* SFP related */
7825
7826 static int
7827 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
7828 {
7829 uint32_t i2ccmd;
7830 int i;
7831
7832 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
7833 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7834
7835 /* Poll the ready bit */
7836 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7837 delay(50);
7838 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7839 if (i2ccmd & I2CCMD_READY)
7840 break;
7841 }
7842 if ((i2ccmd & I2CCMD_READY) == 0)
7843 return -1;
7844 if ((i2ccmd & I2CCMD_ERROR) != 0)
7845 return -1;
7846
7847 *data = i2ccmd & 0x00ff;
7848
7849 return 0;
7850 }
7851
7852 static uint32_t
7853 wm_sfp_get_media_type(struct wm_softc *sc)
7854 {
7855 uint32_t ctrl_ext;
7856 uint8_t val = 0;
7857 int timeout = 3;
7858 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
7859 int rv = -1;
7860
7861 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7862 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
7863 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
7864 CSR_WRITE_FLUSH(sc);
7865
7866 /* Read SFP module data */
7867 while (timeout) {
7868 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
7869 if (rv == 0)
7870 break;
7871 delay(100*1000); /* XXX too big */
7872 timeout--;
7873 }
7874 if (rv != 0)
7875 goto out;
7876 switch (val) {
7877 case SFF_SFP_ID_SFF:
7878 aprint_normal_dev(sc->sc_dev,
7879 "Module/Connector soldered to board\n");
7880 break;
7881 case SFF_SFP_ID_SFP:
7882 aprint_normal_dev(sc->sc_dev, "SFP\n");
7883 break;
7884 case SFF_SFP_ID_UNKNOWN:
7885 goto out;
7886 default:
7887 break;
7888 }
7889
7890 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
7891 if (rv != 0) {
7892 goto out;
7893 }
7894
7895 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
7896 mediatype = WM_MEDIATYPE_SERDES;
7897 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
7898 sc->sc_flags |= WM_F_SGMII;
7899 mediatype = WM_MEDIATYPE_COPPER;
7900 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
7901 sc->sc_flags |= WM_F_SGMII;
7902 mediatype = WM_MEDIATYPE_SERDES;
7903 }
7904
7905 out:
7906 /* Restore I2C interface setting */
7907 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7908
7909 return mediatype;
7910 }
7911 /*
7912 * NVM related.
7913 * Microwire, SPI (w/wo EERD) and Flash.
7914 */
7915
7916 /* Both spi and uwire */
7917
7918 /*
7919 * wm_eeprom_sendbits:
7920 *
7921 * Send a series of bits to the EEPROM.
7922 */
7923 static void
7924 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
7925 {
7926 uint32_t reg;
7927 int x;
7928
7929 reg = CSR_READ(sc, WMREG_EECD);
7930
7931 for (x = nbits; x > 0; x--) {
7932 if (bits & (1U << (x - 1)))
7933 reg |= EECD_DI;
7934 else
7935 reg &= ~EECD_DI;
7936 CSR_WRITE(sc, WMREG_EECD, reg);
7937 CSR_WRITE_FLUSH(sc);
7938 delay(2);
7939 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7940 CSR_WRITE_FLUSH(sc);
7941 delay(2);
7942 CSR_WRITE(sc, WMREG_EECD, reg);
7943 CSR_WRITE_FLUSH(sc);
7944 delay(2);
7945 }
7946 }
7947
7948 /*
7949 * wm_eeprom_recvbits:
7950 *
7951 * Receive a series of bits from the EEPROM.
7952 */
7953 static void
7954 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
7955 {
7956 uint32_t reg, val;
7957 int x;
7958
7959 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
7960
7961 val = 0;
7962 for (x = nbits; x > 0; x--) {
7963 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7964 CSR_WRITE_FLUSH(sc);
7965 delay(2);
7966 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
7967 val |= (1U << (x - 1));
7968 CSR_WRITE(sc, WMREG_EECD, reg);
7969 CSR_WRITE_FLUSH(sc);
7970 delay(2);
7971 }
7972 *valp = val;
7973 }
7974
7975 /* Microwire */
7976
7977 /*
7978 * wm_nvm_read_uwire:
7979 *
7980 * Read a word from the EEPROM using the MicroWire protocol.
7981 */
7982 static int
7983 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
7984 {
7985 uint32_t reg, val;
7986 int i;
7987
7988 for (i = 0; i < wordcnt; i++) {
7989 /* Clear SK and DI. */
7990 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
7991 CSR_WRITE(sc, WMREG_EECD, reg);
7992
7993 /*
7994 * XXX: workaround for a bug in qemu-0.12.x and prior
7995 * and Xen.
7996 *
7997 * We use this workaround only for 82540 because qemu's
7998 * e1000 act as 82540.
7999 */
8000 if (sc->sc_type == WM_T_82540) {
8001 reg |= EECD_SK;
8002 CSR_WRITE(sc, WMREG_EECD, reg);
8003 reg &= ~EECD_SK;
8004 CSR_WRITE(sc, WMREG_EECD, reg);
8005 CSR_WRITE_FLUSH(sc);
8006 delay(2);
8007 }
8008 /* XXX: end of workaround */
8009
8010 /* Set CHIP SELECT. */
8011 reg |= EECD_CS;
8012 CSR_WRITE(sc, WMREG_EECD, reg);
8013 CSR_WRITE_FLUSH(sc);
8014 delay(2);
8015
8016 /* Shift in the READ command. */
8017 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
8018
8019 /* Shift in address. */
8020 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
8021
8022 /* Shift out the data. */
8023 wm_eeprom_recvbits(sc, &val, 16);
8024 data[i] = val & 0xffff;
8025
8026 /* Clear CHIP SELECT. */
8027 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
8028 CSR_WRITE(sc, WMREG_EECD, reg);
8029 CSR_WRITE_FLUSH(sc);
8030 delay(2);
8031 }
8032
8033 return 0;
8034 }
8035
8036 /* SPI */
8037
8038 /*
8039 * Set SPI and FLASH related information from the EECD register.
8040 * For 82541 and 82547, the word size is taken from EEPROM.
8041 */
8042 static int
8043 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
8044 {
8045 int size;
8046 uint32_t reg;
8047 uint16_t data;
8048
8049 reg = CSR_READ(sc, WMREG_EECD);
8050 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
8051
8052 /* Read the size of NVM from EECD by default */
8053 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
8054 switch (sc->sc_type) {
8055 case WM_T_82541:
8056 case WM_T_82541_2:
8057 case WM_T_82547:
8058 case WM_T_82547_2:
8059 /* Set dummy value to access EEPROM */
8060 sc->sc_nvm_wordsize = 64;
8061 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
8062 reg = data;
8063 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
8064 if (size == 0)
8065 size = 6; /* 64 word size */
8066 else
8067 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
8068 break;
8069 case WM_T_80003:
8070 case WM_T_82571:
8071 case WM_T_82572:
8072 case WM_T_82573: /* SPI case */
8073 case WM_T_82574: /* SPI case */
8074 case WM_T_82583: /* SPI case */
8075 size += NVM_WORD_SIZE_BASE_SHIFT;
8076 if (size > 14)
8077 size = 14;
8078 break;
8079 case WM_T_82575:
8080 case WM_T_82576:
8081 case WM_T_82580:
8082 case WM_T_I350:
8083 case WM_T_I354:
8084 case WM_T_I210:
8085 case WM_T_I211:
8086 size += NVM_WORD_SIZE_BASE_SHIFT;
8087 if (size > 15)
8088 size = 15;
8089 break;
8090 default:
8091 aprint_error_dev(sc->sc_dev,
8092 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
8093 return -1;
8094 break;
8095 }
8096
8097 sc->sc_nvm_wordsize = 1 << size;
8098
8099 return 0;
8100 }
8101
8102 /*
8103 * wm_nvm_ready_spi:
8104 *
8105 * Wait for a SPI EEPROM to be ready for commands.
8106 */
8107 static int
8108 wm_nvm_ready_spi(struct wm_softc *sc)
8109 {
8110 uint32_t val;
8111 int usec;
8112
8113 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
8114 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
8115 wm_eeprom_recvbits(sc, &val, 8);
8116 if ((val & SPI_SR_RDY) == 0)
8117 break;
8118 }
8119 if (usec >= SPI_MAX_RETRIES) {
8120 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
8121 return 1;
8122 }
8123 return 0;
8124 }
8125
8126 /*
8127 * wm_nvm_read_spi:
8128 *
8129 * Read a work from the EEPROM using the SPI protocol.
8130 */
8131 static int
8132 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8133 {
8134 uint32_t reg, val;
8135 int i;
8136 uint8_t opc;
8137
8138 /* Clear SK and CS. */
8139 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
8140 CSR_WRITE(sc, WMREG_EECD, reg);
8141 CSR_WRITE_FLUSH(sc);
8142 delay(2);
8143
8144 if (wm_nvm_ready_spi(sc))
8145 return 1;
8146
8147 /* Toggle CS to flush commands. */
8148 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
8149 CSR_WRITE_FLUSH(sc);
8150 delay(2);
8151 CSR_WRITE(sc, WMREG_EECD, reg);
8152 CSR_WRITE_FLUSH(sc);
8153 delay(2);
8154
8155 opc = SPI_OPC_READ;
8156 if (sc->sc_nvm_addrbits == 8 && word >= 128)
8157 opc |= SPI_OPC_A8;
8158
8159 wm_eeprom_sendbits(sc, opc, 8);
8160 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
8161
8162 for (i = 0; i < wordcnt; i++) {
8163 wm_eeprom_recvbits(sc, &val, 16);
8164 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
8165 }
8166
8167 /* Raise CS and clear SK. */
8168 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
8169 CSR_WRITE(sc, WMREG_EECD, reg);
8170 CSR_WRITE_FLUSH(sc);
8171 delay(2);
8172
8173 return 0;
8174 }
8175
8176 /* Using with EERD */
8177
8178 static int
8179 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
8180 {
8181 uint32_t attempts = 100000;
8182 uint32_t i, reg = 0;
8183 int32_t done = -1;
8184
8185 for (i = 0; i < attempts; i++) {
8186 reg = CSR_READ(sc, rw);
8187
8188 if (reg & EERD_DONE) {
8189 done = 0;
8190 break;
8191 }
8192 delay(5);
8193 }
8194
8195 return done;
8196 }
8197
8198 static int
8199 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
8200 uint16_t *data)
8201 {
8202 int i, eerd = 0;
8203 int error = 0;
8204
8205 for (i = 0; i < wordcnt; i++) {
8206 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
8207
8208 CSR_WRITE(sc, WMREG_EERD, eerd);
8209 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
8210 if (error != 0)
8211 break;
8212
8213 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
8214 }
8215
8216 return error;
8217 }
8218
8219 /* Flash */
8220
8221 static int
8222 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
8223 {
8224 uint32_t eecd;
8225 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
8226 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
8227 uint8_t sig_byte = 0;
8228
8229 switch (sc->sc_type) {
8230 case WM_T_ICH8:
8231 case WM_T_ICH9:
8232 eecd = CSR_READ(sc, WMREG_EECD);
8233 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
8234 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
8235 return 0;
8236 }
8237 /* FALLTHROUGH */
8238 default:
8239 /* Default to 0 */
8240 *bank = 0;
8241
8242 /* Check bank 0 */
8243 wm_read_ich8_byte(sc, act_offset, &sig_byte);
8244 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
8245 *bank = 0;
8246 return 0;
8247 }
8248
8249 /* Check bank 1 */
8250 wm_read_ich8_byte(sc, act_offset + bank1_offset,
8251 &sig_byte);
8252 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
8253 *bank = 1;
8254 return 0;
8255 }
8256 }
8257
8258 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
8259 device_xname(sc->sc_dev)));
8260 return -1;
8261 }
8262
8263 /******************************************************************************
8264 * This function does initial flash setup so that a new read/write/erase cycle
8265 * can be started.
8266 *
8267 * sc - The pointer to the hw structure
8268 ****************************************************************************/
8269 static int32_t
8270 wm_ich8_cycle_init(struct wm_softc *sc)
8271 {
8272 uint16_t hsfsts;
8273 int32_t error = 1;
8274 int32_t i = 0;
8275
8276 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8277
8278 /* May be check the Flash Des Valid bit in Hw status */
8279 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
8280 return error;
8281 }
8282
8283 /* Clear FCERR in Hw status by writing 1 */
8284 /* Clear DAEL in Hw status by writing a 1 */
8285 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
8286
8287 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8288
8289 /*
8290 * Either we should have a hardware SPI cycle in progress bit to check
8291 * against, in order to start a new cycle or FDONE bit should be
8292 * changed in the hardware so that it is 1 after harware reset, which
8293 * can then be used as an indication whether a cycle is in progress or
8294 * has been completed .. we should also have some software semaphore
8295 * mechanism to guard FDONE or the cycle in progress bit so that two
8296 * threads access to those bits can be sequentiallized or a way so that
8297 * 2 threads dont start the cycle at the same time
8298 */
8299
8300 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8301 /*
8302 * There is no cycle running at present, so we can start a
8303 * cycle
8304 */
8305
8306 /* Begin by setting Flash Cycle Done. */
8307 hsfsts |= HSFSTS_DONE;
8308 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8309 error = 0;
8310 } else {
8311 /*
8312 * otherwise poll for sometime so the current cycle has a
8313 * chance to end before giving up.
8314 */
8315 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
8316 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8317 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8318 error = 0;
8319 break;
8320 }
8321 delay(1);
8322 }
8323 if (error == 0) {
8324 /*
8325 * Successful in waiting for previous cycle to timeout,
8326 * now set the Flash Cycle Done.
8327 */
8328 hsfsts |= HSFSTS_DONE;
8329 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8330 }
8331 }
8332 return error;
8333 }
8334
8335 /******************************************************************************
8336 * This function starts a flash cycle and waits for its completion
8337 *
8338 * sc - The pointer to the hw structure
8339 ****************************************************************************/
8340 static int32_t
8341 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
8342 {
8343 uint16_t hsflctl;
8344 uint16_t hsfsts;
8345 int32_t error = 1;
8346 uint32_t i = 0;
8347
8348 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
8349 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8350 hsflctl |= HSFCTL_GO;
8351 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8352
8353 /* Wait till FDONE bit is set to 1 */
8354 do {
8355 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8356 if (hsfsts & HSFSTS_DONE)
8357 break;
8358 delay(1);
8359 i++;
8360 } while (i < timeout);
8361 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
8362 error = 0;
8363
8364 return error;
8365 }
8366
8367 /******************************************************************************
8368 * Reads a byte or word from the NVM using the ICH8 flash access registers.
8369 *
8370 * sc - The pointer to the hw structure
8371 * index - The index of the byte or word to read.
8372 * size - Size of data to read, 1=byte 2=word
8373 * data - Pointer to the word to store the value read.
8374 *****************************************************************************/
8375 static int32_t
8376 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
8377 uint32_t size, uint16_t *data)
8378 {
8379 uint16_t hsfsts;
8380 uint16_t hsflctl;
8381 uint32_t flash_linear_address;
8382 uint32_t flash_data = 0;
8383 int32_t error = 1;
8384 int32_t count = 0;
8385
8386 if (size < 1 || size > 2 || data == 0x0 ||
8387 index > ICH_FLASH_LINEAR_ADDR_MASK)
8388 return error;
8389
8390 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
8391 sc->sc_ich8_flash_base;
8392
8393 do {
8394 delay(1);
8395 /* Steps */
8396 error = wm_ich8_cycle_init(sc);
8397 if (error)
8398 break;
8399
8400 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8401 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
8402 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
8403 & HSFCTL_BCOUNT_MASK;
8404 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
8405 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8406
8407 /*
8408 * Write the last 24 bits of index into Flash Linear address
8409 * field in Flash Address
8410 */
8411 /* TODO: TBD maybe check the index against the size of flash */
8412
8413 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
8414
8415 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
8416
8417 /*
8418 * Check if FCERR is set to 1, if set to 1, clear it and try
8419 * the whole sequence a few more times, else read in (shift in)
8420 * the Flash Data0, the order is least significant byte first
8421 * msb to lsb
8422 */
8423 if (error == 0) {
8424 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
8425 if (size == 1)
8426 *data = (uint8_t)(flash_data & 0x000000FF);
8427 else if (size == 2)
8428 *data = (uint16_t)(flash_data & 0x0000FFFF);
8429 break;
8430 } else {
8431 /*
8432 * If we've gotten here, then things are probably
8433 * completely hosed, but if the error condition is
8434 * detected, it won't hurt to give it another try...
8435 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
8436 */
8437 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8438 if (hsfsts & HSFSTS_ERR) {
8439 /* Repeat for some time before giving up. */
8440 continue;
8441 } else if ((hsfsts & HSFSTS_DONE) == 0)
8442 break;
8443 }
8444 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
8445
8446 return error;
8447 }
8448
8449 /******************************************************************************
8450 * Reads a single byte from the NVM using the ICH8 flash access registers.
8451 *
8452 * sc - pointer to wm_hw structure
8453 * index - The index of the byte to read.
8454 * data - Pointer to a byte to store the value read.
8455 *****************************************************************************/
8456 static int32_t
8457 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
8458 {
8459 int32_t status;
8460 uint16_t word = 0;
8461
8462 status = wm_read_ich8_data(sc, index, 1, &word);
8463 if (status == 0)
8464 *data = (uint8_t)word;
8465 else
8466 *data = 0;
8467
8468 return status;
8469 }
8470
8471 /******************************************************************************
8472 * Reads a word from the NVM using the ICH8 flash access registers.
8473 *
8474 * sc - pointer to wm_hw structure
8475 * index - The starting byte index of the word to read.
8476 * data - Pointer to a word to store the value read.
8477 *****************************************************************************/
8478 static int32_t
8479 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
8480 {
8481 int32_t status;
8482
8483 status = wm_read_ich8_data(sc, index, 2, data);
8484 return status;
8485 }
8486
8487 /******************************************************************************
8488 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
8489 * register.
8490 *
8491 * sc - Struct containing variables accessed by shared code
8492 * offset - offset of word in the EEPROM to read
8493 * data - word read from the EEPROM
8494 * words - number of words to read
8495 *****************************************************************************/
8496 static int
8497 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
8498 {
8499 int32_t error = 0;
8500 uint32_t flash_bank = 0;
8501 uint32_t act_offset = 0;
8502 uint32_t bank_offset = 0;
8503 uint16_t word = 0;
8504 uint16_t i = 0;
8505
8506 /*
8507 * We need to know which is the valid flash bank. In the event
8508 * that we didn't allocate eeprom_shadow_ram, we may not be
8509 * managing flash_bank. So it cannot be trusted and needs
8510 * to be updated with each read.
8511 */
8512 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
8513 if (error) {
8514 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
8515 device_xname(sc->sc_dev)));
8516 flash_bank = 0;
8517 }
8518
8519 /*
8520 * Adjust offset appropriately if we're on bank 1 - adjust for word
8521 * size
8522 */
8523 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
8524
8525 error = wm_get_swfwhw_semaphore(sc);
8526 if (error) {
8527 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8528 __func__);
8529 return error;
8530 }
8531
8532 for (i = 0; i < words; i++) {
8533 /* The NVM part needs a byte offset, hence * 2 */
8534 act_offset = bank_offset + ((offset + i) * 2);
8535 error = wm_read_ich8_word(sc, act_offset, &word);
8536 if (error) {
8537 aprint_error_dev(sc->sc_dev,
8538 "%s: failed to read NVM\n", __func__);
8539 break;
8540 }
8541 data[i] = word;
8542 }
8543
8544 wm_put_swfwhw_semaphore(sc);
8545 return error;
8546 }
8547
8548 /* Lock, detecting NVM type, validate checksum and read */
8549
8550 /*
8551 * wm_nvm_acquire:
8552 *
8553 * Perform the EEPROM handshake required on some chips.
8554 */
8555 static int
8556 wm_nvm_acquire(struct wm_softc *sc)
8557 {
8558 uint32_t reg;
8559 int x;
8560 int ret = 0;
8561
8562 /* always success */
8563 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8564 return 0;
8565
8566 if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8567 ret = wm_get_swfwhw_semaphore(sc);
8568 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
8569 /* This will also do wm_get_swsm_semaphore() if needed */
8570 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
8571 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
8572 ret = wm_get_swsm_semaphore(sc);
8573 }
8574
8575 if (ret) {
8576 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8577 __func__);
8578 return 1;
8579 }
8580
8581 if (sc->sc_flags & WM_F_LOCK_EECD) {
8582 reg = CSR_READ(sc, WMREG_EECD);
8583
8584 /* Request EEPROM access. */
8585 reg |= EECD_EE_REQ;
8586 CSR_WRITE(sc, WMREG_EECD, reg);
8587
8588 /* ..and wait for it to be granted. */
8589 for (x = 0; x < 1000; x++) {
8590 reg = CSR_READ(sc, WMREG_EECD);
8591 if (reg & EECD_EE_GNT)
8592 break;
8593 delay(5);
8594 }
8595 if ((reg & EECD_EE_GNT) == 0) {
8596 aprint_error_dev(sc->sc_dev,
8597 "could not acquire EEPROM GNT\n");
8598 reg &= ~EECD_EE_REQ;
8599 CSR_WRITE(sc, WMREG_EECD, reg);
8600 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8601 wm_put_swfwhw_semaphore(sc);
8602 if (sc->sc_flags & WM_F_LOCK_SWFW)
8603 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8604 else if (sc->sc_flags & WM_F_LOCK_SWSM)
8605 wm_put_swsm_semaphore(sc);
8606 return 1;
8607 }
8608 }
8609
8610 return 0;
8611 }
8612
8613 /*
8614 * wm_nvm_release:
8615 *
8616 * Release the EEPROM mutex.
8617 */
8618 static void
8619 wm_nvm_release(struct wm_softc *sc)
8620 {
8621 uint32_t reg;
8622
8623 /* always success */
8624 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8625 return;
8626
8627 if (sc->sc_flags & WM_F_LOCK_EECD) {
8628 reg = CSR_READ(sc, WMREG_EECD);
8629 reg &= ~EECD_EE_REQ;
8630 CSR_WRITE(sc, WMREG_EECD, reg);
8631 }
8632
8633 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8634 wm_put_swfwhw_semaphore(sc);
8635 if (sc->sc_flags & WM_F_LOCK_SWFW)
8636 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8637 else if (sc->sc_flags & WM_F_LOCK_SWSM)
8638 wm_put_swsm_semaphore(sc);
8639 }
8640
8641 static int
8642 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
8643 {
8644 uint32_t eecd = 0;
8645
8646 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
8647 || sc->sc_type == WM_T_82583) {
8648 eecd = CSR_READ(sc, WMREG_EECD);
8649
8650 /* Isolate bits 15 & 16 */
8651 eecd = ((eecd >> 15) & 0x03);
8652
8653 /* If both bits are set, device is Flash type */
8654 if (eecd == 0x03)
8655 return 0;
8656 }
8657 return 1;
8658 }
8659
8660 /*
8661 * wm_nvm_validate_checksum
8662 *
8663 * The checksum is defined as the sum of the first 64 (16 bit) words.
8664 */
8665 static int
8666 wm_nvm_validate_checksum(struct wm_softc *sc)
8667 {
8668 uint16_t checksum;
8669 uint16_t eeprom_data;
8670 #ifdef WM_DEBUG
8671 uint16_t csum_wordaddr, valid_checksum;
8672 #endif
8673 int i;
8674
8675 checksum = 0;
8676
8677 /* Don't check for I211 */
8678 if (sc->sc_type == WM_T_I211)
8679 return 0;
8680
8681 #ifdef WM_DEBUG
8682 if (sc->sc_type == WM_T_PCH_LPT) {
8683 csum_wordaddr = NVM_OFF_COMPAT;
8684 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
8685 } else {
8686 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
8687 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
8688 }
8689
8690 /* Dump EEPROM image for debug */
8691 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8692 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8693 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
8694 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
8695 if ((eeprom_data & valid_checksum) == 0) {
8696 DPRINTF(WM_DEBUG_NVM,
8697 ("%s: NVM need to be updated (%04x != %04x)\n",
8698 device_xname(sc->sc_dev), eeprom_data,
8699 valid_checksum));
8700 }
8701 }
8702
8703 if ((wm_debug & WM_DEBUG_NVM) != 0) {
8704 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
8705 for (i = 0; i < NVM_SIZE; i++) {
8706 if (wm_nvm_read(sc, i, 1, &eeprom_data))
8707 printf("XXXX ");
8708 else
8709 printf("%04hx ", eeprom_data);
8710 if (i % 8 == 7)
8711 printf("\n");
8712 }
8713 }
8714
8715 #endif /* WM_DEBUG */
8716
8717 for (i = 0; i < NVM_SIZE; i++) {
8718 if (wm_nvm_read(sc, i, 1, &eeprom_data))
8719 return 1;
8720 checksum += eeprom_data;
8721 }
8722
8723 if (checksum != (uint16_t) NVM_CHECKSUM) {
8724 #ifdef WM_DEBUG
8725 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
8726 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
8727 #endif
8728 }
8729
8730 return 0;
8731 }
8732
8733 /*
8734 * wm_nvm_read:
8735 *
8736 * Read data from the serial EEPROM.
8737 */
8738 static int
8739 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8740 {
8741 int rv;
8742
8743 if (sc->sc_flags & WM_F_EEPROM_INVALID)
8744 return 1;
8745
8746 if (wm_nvm_acquire(sc))
8747 return 1;
8748
8749 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8750 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8751 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
8752 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
8753 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
8754 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
8755 else if (sc->sc_flags & WM_F_EEPROM_SPI)
8756 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
8757 else
8758 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
8759
8760 wm_nvm_release(sc);
8761 return rv;
8762 }
8763
8764 /*
8765 * Hardware semaphores.
8766 * Very complexed...
8767 */
8768
8769 static int
8770 wm_get_swsm_semaphore(struct wm_softc *sc)
8771 {
8772 int32_t timeout;
8773 uint32_t swsm;
8774
8775 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8776 /* Get the SW semaphore. */
8777 timeout = sc->sc_nvm_wordsize + 1;
8778 while (timeout) {
8779 swsm = CSR_READ(sc, WMREG_SWSM);
8780
8781 if ((swsm & SWSM_SMBI) == 0)
8782 break;
8783
8784 delay(50);
8785 timeout--;
8786 }
8787
8788 if (timeout == 0) {
8789 aprint_error_dev(sc->sc_dev,
8790 "could not acquire SWSM SMBI\n");
8791 return 1;
8792 }
8793 }
8794
8795 /* Get the FW semaphore. */
8796 timeout = sc->sc_nvm_wordsize + 1;
8797 while (timeout) {
8798 swsm = CSR_READ(sc, WMREG_SWSM);
8799 swsm |= SWSM_SWESMBI;
8800 CSR_WRITE(sc, WMREG_SWSM, swsm);
8801 /* If we managed to set the bit we got the semaphore. */
8802 swsm = CSR_READ(sc, WMREG_SWSM);
8803 if (swsm & SWSM_SWESMBI)
8804 break;
8805
8806 delay(50);
8807 timeout--;
8808 }
8809
8810 if (timeout == 0) {
8811 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
8812 /* Release semaphores */
8813 wm_put_swsm_semaphore(sc);
8814 return 1;
8815 }
8816 return 0;
8817 }
8818
8819 static void
8820 wm_put_swsm_semaphore(struct wm_softc *sc)
8821 {
8822 uint32_t swsm;
8823
8824 swsm = CSR_READ(sc, WMREG_SWSM);
8825 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
8826 CSR_WRITE(sc, WMREG_SWSM, swsm);
8827 }
8828
8829 static int
8830 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8831 {
8832 uint32_t swfw_sync;
8833 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
8834 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
8835 int timeout = 200;
8836
8837 for (timeout = 0; timeout < 200; timeout++) {
8838 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8839 if (wm_get_swsm_semaphore(sc)) {
8840 aprint_error_dev(sc->sc_dev,
8841 "%s: failed to get semaphore\n",
8842 __func__);
8843 return 1;
8844 }
8845 }
8846 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8847 if ((swfw_sync & (swmask | fwmask)) == 0) {
8848 swfw_sync |= swmask;
8849 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8850 if (sc->sc_flags & WM_F_LOCK_SWSM)
8851 wm_put_swsm_semaphore(sc);
8852 return 0;
8853 }
8854 if (sc->sc_flags & WM_F_LOCK_SWSM)
8855 wm_put_swsm_semaphore(sc);
8856 delay(5000);
8857 }
8858 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
8859 device_xname(sc->sc_dev), mask, swfw_sync);
8860 return 1;
8861 }
8862
8863 static void
8864 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8865 {
8866 uint32_t swfw_sync;
8867
8868 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8869 while (wm_get_swsm_semaphore(sc) != 0)
8870 continue;
8871 }
8872 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8873 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
8874 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8875 if (sc->sc_flags & WM_F_LOCK_SWSM)
8876 wm_put_swsm_semaphore(sc);
8877 }
8878
8879 static int
8880 wm_get_swfwhw_semaphore(struct wm_softc *sc)
8881 {
8882 uint32_t ext_ctrl;
8883 int timeout = 200;
8884
8885 for (timeout = 0; timeout < 200; timeout++) {
8886 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8887 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
8888 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8889
8890 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8891 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
8892 return 0;
8893 delay(5000);
8894 }
8895 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
8896 device_xname(sc->sc_dev), ext_ctrl);
8897 return 1;
8898 }
8899
8900 static void
8901 wm_put_swfwhw_semaphore(struct wm_softc *sc)
8902 {
8903 uint32_t ext_ctrl;
8904 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8905 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
8906 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8907 }
8908
8909 static int
8910 wm_get_hw_semaphore_82573(struct wm_softc *sc)
8911 {
8912 int i = 0;
8913 uint32_t reg;
8914
8915 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8916 do {
8917 CSR_WRITE(sc, WMREG_EXTCNFCTR,
8918 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
8919 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8920 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
8921 break;
8922 delay(2*1000);
8923 i++;
8924 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
8925
8926 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
8927 wm_put_hw_semaphore_82573(sc);
8928 log(LOG_ERR, "%s: Driver can't access the PHY\n",
8929 device_xname(sc->sc_dev));
8930 return -1;
8931 }
8932
8933 return 0;
8934 }
8935
8936 static void
8937 wm_put_hw_semaphore_82573(struct wm_softc *sc)
8938 {
8939 uint32_t reg;
8940
8941 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8942 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
8943 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
8944 }
8945
8946 /*
8947 * Management mode and power management related subroutines.
8948 * BMC, AMT, suspend/resume and EEE.
8949 */
8950
8951 static int
8952 wm_check_mng_mode(struct wm_softc *sc)
8953 {
8954 int rv;
8955
8956 switch (sc->sc_type) {
8957 case WM_T_ICH8:
8958 case WM_T_ICH9:
8959 case WM_T_ICH10:
8960 case WM_T_PCH:
8961 case WM_T_PCH2:
8962 case WM_T_PCH_LPT:
8963 rv = wm_check_mng_mode_ich8lan(sc);
8964 break;
8965 case WM_T_82574:
8966 case WM_T_82583:
8967 rv = wm_check_mng_mode_82574(sc);
8968 break;
8969 case WM_T_82571:
8970 case WM_T_82572:
8971 case WM_T_82573:
8972 case WM_T_80003:
8973 rv = wm_check_mng_mode_generic(sc);
8974 break;
8975 default:
8976 /* noting to do */
8977 rv = 0;
8978 break;
8979 }
8980
8981 return rv;
8982 }
8983
8984 static int
8985 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
8986 {
8987 uint32_t fwsm;
8988
8989 fwsm = CSR_READ(sc, WMREG_FWSM);
8990
8991 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
8992 return 1;
8993
8994 return 0;
8995 }
8996
8997 static int
8998 wm_check_mng_mode_82574(struct wm_softc *sc)
8999 {
9000 uint16_t data;
9001
9002 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
9003
9004 if ((data & NVM_CFG2_MNGM_MASK) != 0)
9005 return 1;
9006
9007 return 0;
9008 }
9009
9010 static int
9011 wm_check_mng_mode_generic(struct wm_softc *sc)
9012 {
9013 uint32_t fwsm;
9014
9015 fwsm = CSR_READ(sc, WMREG_FWSM);
9016
9017 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
9018 return 1;
9019
9020 return 0;
9021 }
9022
9023 static int
9024 wm_enable_mng_pass_thru(struct wm_softc *sc)
9025 {
9026 uint32_t manc, fwsm, factps;
9027
9028 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
9029 return 0;
9030
9031 manc = CSR_READ(sc, WMREG_MANC);
9032
9033 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
9034 device_xname(sc->sc_dev), manc));
9035 if ((manc & MANC_RECV_TCO_EN) == 0)
9036 return 0;
9037
9038 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
9039 fwsm = CSR_READ(sc, WMREG_FWSM);
9040 factps = CSR_READ(sc, WMREG_FACTPS);
9041 if (((factps & FACTPS_MNGCG) == 0)
9042 && ((fwsm & FWSM_MODE_MASK)
9043 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
9044 return 1;
9045 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
9046 uint16_t data;
9047
9048 factps = CSR_READ(sc, WMREG_FACTPS);
9049 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
9050 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
9051 device_xname(sc->sc_dev), factps, data));
9052 if (((factps & FACTPS_MNGCG) == 0)
9053 && ((data & NVM_CFG2_MNGM_MASK)
9054 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
9055 return 1;
9056 } else if (((manc & MANC_SMBUS_EN) != 0)
9057 && ((manc & MANC_ASF_EN) == 0))
9058 return 1;
9059
9060 return 0;
9061 }
9062
9063 static int
9064 wm_check_reset_block(struct wm_softc *sc)
9065 {
9066 uint32_t reg;
9067
9068 switch (sc->sc_type) {
9069 case WM_T_ICH8:
9070 case WM_T_ICH9:
9071 case WM_T_ICH10:
9072 case WM_T_PCH:
9073 case WM_T_PCH2:
9074 case WM_T_PCH_LPT:
9075 reg = CSR_READ(sc, WMREG_FWSM);
9076 if ((reg & FWSM_RSPCIPHY) != 0)
9077 return 0;
9078 else
9079 return -1;
9080 break;
9081 case WM_T_82571:
9082 case WM_T_82572:
9083 case WM_T_82573:
9084 case WM_T_82574:
9085 case WM_T_82583:
9086 case WM_T_80003:
9087 reg = CSR_READ(sc, WMREG_MANC);
9088 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
9089 return -1;
9090 else
9091 return 0;
9092 break;
9093 default:
9094 /* no problem */
9095 break;
9096 }
9097
9098 return 0;
9099 }
9100
9101 static void
9102 wm_get_hw_control(struct wm_softc *sc)
9103 {
9104 uint32_t reg;
9105
9106 switch (sc->sc_type) {
9107 case WM_T_82573:
9108 reg = CSR_READ(sc, WMREG_SWSM);
9109 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
9110 break;
9111 case WM_T_82571:
9112 case WM_T_82572:
9113 case WM_T_82574:
9114 case WM_T_82583:
9115 case WM_T_80003:
9116 case WM_T_ICH8:
9117 case WM_T_ICH9:
9118 case WM_T_ICH10:
9119 case WM_T_PCH:
9120 case WM_T_PCH2:
9121 case WM_T_PCH_LPT:
9122 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9123 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
9124 break;
9125 default:
9126 break;
9127 }
9128 }
9129
9130 static void
9131 wm_release_hw_control(struct wm_softc *sc)
9132 {
9133 uint32_t reg;
9134
9135 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
9136 return;
9137
9138 if (sc->sc_type == WM_T_82573) {
9139 reg = CSR_READ(sc, WMREG_SWSM);
9140 reg &= ~SWSM_DRV_LOAD;
9141 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
9142 } else {
9143 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9144 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
9145 }
9146 }
9147
9148 static void
9149 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
9150 {
9151 uint32_t reg;
9152
9153 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
9154
9155 if (on != 0)
9156 reg |= EXTCNFCTR_GATE_PHY_CFG;
9157 else
9158 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
9159
9160 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
9161 }
9162
9163 static void
9164 wm_smbustopci(struct wm_softc *sc)
9165 {
9166 uint32_t fwsm;
9167
9168 fwsm = CSR_READ(sc, WMREG_FWSM);
9169 if (((fwsm & FWSM_FW_VALID) == 0)
9170 && ((wm_check_reset_block(sc) == 0))) {
9171 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
9172 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
9173 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9174 CSR_WRITE_FLUSH(sc);
9175 delay(10);
9176 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
9177 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9178 CSR_WRITE_FLUSH(sc);
9179 delay(50*1000);
9180
9181 /*
9182 * Gate automatic PHY configuration by hardware on non-managed
9183 * 82579
9184 */
9185 if (sc->sc_type == WM_T_PCH2)
9186 wm_gate_hw_phy_config_ich8lan(sc, 1);
9187 }
9188 }
9189
9190 static void
9191 wm_init_manageability(struct wm_softc *sc)
9192 {
9193
9194 if (sc->sc_flags & WM_F_HAS_MANAGE) {
9195 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
9196 uint32_t manc = CSR_READ(sc, WMREG_MANC);
9197
9198 /* Disable hardware interception of ARP */
9199 manc &= ~MANC_ARP_EN;
9200
9201 /* Enable receiving management packets to the host */
9202 if (sc->sc_type >= WM_T_82571) {
9203 manc |= MANC_EN_MNG2HOST;
9204 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
9205 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
9206
9207 }
9208
9209 CSR_WRITE(sc, WMREG_MANC, manc);
9210 }
9211 }
9212
9213 static void
9214 wm_release_manageability(struct wm_softc *sc)
9215 {
9216
9217 if (sc->sc_flags & WM_F_HAS_MANAGE) {
9218 uint32_t manc = CSR_READ(sc, WMREG_MANC);
9219
9220 manc |= MANC_ARP_EN;
9221 if (sc->sc_type >= WM_T_82571)
9222 manc &= ~MANC_EN_MNG2HOST;
9223
9224 CSR_WRITE(sc, WMREG_MANC, manc);
9225 }
9226 }
9227
9228 static void
9229 wm_get_wakeup(struct wm_softc *sc)
9230 {
9231
9232 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
9233 switch (sc->sc_type) {
9234 case WM_T_82573:
9235 case WM_T_82583:
9236 sc->sc_flags |= WM_F_HAS_AMT;
9237 /* FALLTHROUGH */
9238 case WM_T_80003:
9239 case WM_T_82541:
9240 case WM_T_82547:
9241 case WM_T_82571:
9242 case WM_T_82572:
9243 case WM_T_82574:
9244 case WM_T_82575:
9245 case WM_T_82576:
9246 case WM_T_82580:
9247 case WM_T_I350:
9248 case WM_T_I354:
9249 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
9250 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
9251 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
9252 break;
9253 case WM_T_ICH8:
9254 case WM_T_ICH9:
9255 case WM_T_ICH10:
9256 case WM_T_PCH:
9257 case WM_T_PCH2:
9258 case WM_T_PCH_LPT:
9259 sc->sc_flags |= WM_F_HAS_AMT;
9260 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
9261 break;
9262 default:
9263 break;
9264 }
9265
9266 /* 1: HAS_MANAGE */
9267 if (wm_enable_mng_pass_thru(sc) != 0)
9268 sc->sc_flags |= WM_F_HAS_MANAGE;
9269
9270 #ifdef WM_DEBUG
9271 printf("\n");
9272 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
9273 printf("HAS_AMT,");
9274 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
9275 printf("ARC_SUBSYS_VALID,");
9276 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
9277 printf("ASF_FIRMWARE_PRES,");
9278 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
9279 printf("HAS_MANAGE,");
9280 printf("\n");
9281 #endif
9282 /*
9283 * Note that the WOL flags is set after the resetting of the eeprom
9284 * stuff
9285 */
9286 }
9287
9288 #ifdef WM_WOL
9289 /* WOL in the newer chipset interfaces (pchlan) */
9290 static void
9291 wm_enable_phy_wakeup(struct wm_softc *sc)
9292 {
9293 #if 0
9294 uint16_t preg;
9295
9296 /* Copy MAC RARs to PHY RARs */
9297
9298 /* Copy MAC MTA to PHY MTA */
9299
9300 /* Configure PHY Rx Control register */
9301
9302 /* Enable PHY wakeup in MAC register */
9303
9304 /* Configure and enable PHY wakeup in PHY registers */
9305
9306 /* Activate PHY wakeup */
9307
9308 /* XXX */
9309 #endif
9310 }
9311
9312 /* Power down workaround on D3 */
9313 static void
9314 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
9315 {
9316 uint32_t reg;
9317 int i;
9318
9319 for (i = 0; i < 2; i++) {
9320 /* Disable link */
9321 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9322 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9323 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9324
9325 /*
9326 * Call gig speed drop workaround on Gig disable before
9327 * accessing any PHY registers
9328 */
9329 if (sc->sc_type == WM_T_ICH8)
9330 wm_gig_downshift_workaround_ich8lan(sc);
9331
9332 /* Write VR power-down enable */
9333 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
9334 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
9335 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
9336 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
9337
9338 /* Read it back and test */
9339 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
9340 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
9341 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
9342 break;
9343
9344 /* Issue PHY reset and repeat at most one more time */
9345 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
9346 }
9347 }
9348
9349 static void
9350 wm_enable_wakeup(struct wm_softc *sc)
9351 {
9352 uint32_t reg, pmreg;
9353 pcireg_t pmode;
9354
9355 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
9356 &pmreg, NULL) == 0)
9357 return;
9358
9359 /* Advertise the wakeup capability */
9360 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
9361 | CTRL_SWDPIN(3));
9362 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
9363
9364 /* ICH workaround */
9365 switch (sc->sc_type) {
9366 case WM_T_ICH8:
9367 case WM_T_ICH9:
9368 case WM_T_ICH10:
9369 case WM_T_PCH:
9370 case WM_T_PCH2:
9371 case WM_T_PCH_LPT:
9372 /* Disable gig during WOL */
9373 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9374 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
9375 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9376 if (sc->sc_type == WM_T_PCH)
9377 wm_gmii_reset(sc);
9378
9379 /* Power down workaround */
9380 if (sc->sc_phytype == WMPHY_82577) {
9381 struct mii_softc *child;
9382
9383 /* Assume that the PHY is copper */
9384 child = LIST_FIRST(&sc->sc_mii.mii_phys);
9385 if (child->mii_mpd_rev <= 2)
9386 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
9387 (768 << 5) | 25, 0x0444); /* magic num */
9388 }
9389 break;
9390 default:
9391 break;
9392 }
9393
9394 /* Keep the laser running on fiber adapters */
9395 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
9396 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
9397 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9398 reg |= CTRL_EXT_SWDPIN(3);
9399 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9400 }
9401
9402 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
9403 #if 0 /* for the multicast packet */
9404 reg |= WUFC_MC;
9405 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
9406 #endif
9407
9408 if (sc->sc_type == WM_T_PCH) {
9409 wm_enable_phy_wakeup(sc);
9410 } else {
9411 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
9412 CSR_WRITE(sc, WMREG_WUFC, reg);
9413 }
9414
9415 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9416 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9417 || (sc->sc_type == WM_T_PCH2))
9418 && (sc->sc_phytype == WMPHY_IGP_3))
9419 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
9420
9421 /* Request PME */
9422 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
9423 #if 0
9424 /* Disable WOL */
9425 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
9426 #else
9427 /* For WOL */
9428 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
9429 #endif
9430 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
9431 }
9432 #endif /* WM_WOL */
9433
9434 /* EEE */
9435
9436 static void
9437 wm_set_eee_i350(struct wm_softc *sc)
9438 {
9439 uint32_t ipcnfg, eeer;
9440
9441 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
9442 eeer = CSR_READ(sc, WMREG_EEER);
9443
9444 if ((sc->sc_flags & WM_F_EEE) != 0) {
9445 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9446 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
9447 | EEER_LPI_FC);
9448 } else {
9449 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9450 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
9451 | EEER_LPI_FC);
9452 }
9453
9454 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
9455 CSR_WRITE(sc, WMREG_EEER, eeer);
9456 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
9457 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
9458 }
9459
9460 /*
9461 * Workarounds (mainly PHY related).
9462 * Basically, PHY's workarounds are in the PHY drivers.
9463 */
9464
9465 /* Work-around for 82566 Kumeran PCS lock loss */
9466 static void
9467 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
9468 {
9469 int miistatus, active, i;
9470 int reg;
9471
9472 miistatus = sc->sc_mii.mii_media_status;
9473
9474 /* If the link is not up, do nothing */
9475 if ((miistatus & IFM_ACTIVE) != 0)
9476 return;
9477
9478 active = sc->sc_mii.mii_media_active;
9479
9480 /* Nothing to do if the link is other than 1Gbps */
9481 if (IFM_SUBTYPE(active) != IFM_1000_T)
9482 return;
9483
9484 for (i = 0; i < 10; i++) {
9485 /* read twice */
9486 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9487 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9488 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
9489 goto out; /* GOOD! */
9490
9491 /* Reset the PHY */
9492 wm_gmii_reset(sc);
9493 delay(5*1000);
9494 }
9495
9496 /* Disable GigE link negotiation */
9497 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9498 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9499 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9500
9501 /*
9502 * Call gig speed drop workaround on Gig disable before accessing
9503 * any PHY registers.
9504 */
9505 wm_gig_downshift_workaround_ich8lan(sc);
9506
9507 out:
9508 return;
9509 }
9510
9511 /* WOL from S5 stops working */
9512 static void
9513 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
9514 {
9515 uint16_t kmrn_reg;
9516
9517 /* Only for igp3 */
9518 if (sc->sc_phytype == WMPHY_IGP_3) {
9519 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
9520 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
9521 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9522 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
9523 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9524 }
9525 }
9526
9527 /*
9528 * Workaround for pch's PHYs
9529 * XXX should be moved to new PHY driver?
9530 */
9531 static void
9532 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
9533 {
9534 if (sc->sc_phytype == WMPHY_82577)
9535 wm_set_mdio_slow_mode_hv(sc);
9536
9537 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
9538
9539 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
9540
9541 /* 82578 */
9542 if (sc->sc_phytype == WMPHY_82578) {
9543 /* PCH rev. < 3 */
9544 if (sc->sc_rev < 3) {
9545 /* XXX 6 bit shift? Why? Is it page2? */
9546 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
9547 0x66c0);
9548 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
9549 0xffff);
9550 }
9551
9552 /* XXX phy rev. < 2 */
9553 }
9554
9555 /* Select page 0 */
9556
9557 /* XXX acquire semaphore */
9558 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
9559 /* XXX release semaphore */
9560
9561 /*
9562 * Configure the K1 Si workaround during phy reset assuming there is
9563 * link so that it disables K1 if link is in 1Gbps.
9564 */
9565 wm_k1_gig_workaround_hv(sc, 1);
9566 }
9567
9568 static void
9569 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
9570 {
9571
9572 wm_set_mdio_slow_mode_hv(sc);
9573 }
9574
9575 static void
9576 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
9577 {
9578 int k1_enable = sc->sc_nvm_k1_enabled;
9579
9580 /* XXX acquire semaphore */
9581
9582 if (link) {
9583 k1_enable = 0;
9584
9585 /* Link stall fix for link up */
9586 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
9587 } else {
9588 /* Link stall fix for link down */
9589 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
9590 }
9591
9592 wm_configure_k1_ich8lan(sc, k1_enable);
9593
9594 /* XXX release semaphore */
9595 }
9596
9597 static void
9598 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
9599 {
9600 uint32_t reg;
9601
9602 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
9603 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
9604 reg | HV_KMRN_MDIO_SLOW);
9605 }
9606
9607 static void
9608 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
9609 {
9610 uint32_t ctrl, ctrl_ext, tmp;
9611 uint16_t kmrn_reg;
9612
9613 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
9614
9615 if (k1_enable)
9616 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
9617 else
9618 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
9619
9620 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
9621
9622 delay(20);
9623
9624 ctrl = CSR_READ(sc, WMREG_CTRL);
9625 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9626
9627 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
9628 tmp |= CTRL_FRCSPD;
9629
9630 CSR_WRITE(sc, WMREG_CTRL, tmp);
9631 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
9632 CSR_WRITE_FLUSH(sc);
9633 delay(20);
9634
9635 CSR_WRITE(sc, WMREG_CTRL, ctrl);
9636 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9637 CSR_WRITE_FLUSH(sc);
9638 delay(20);
9639 }
9640
9641 /* special case - for 82575 - need to do manual init ... */
9642 static void
9643 wm_reset_init_script_82575(struct wm_softc *sc)
9644 {
9645 /*
9646 * remark: this is untested code - we have no board without EEPROM
9647 * same setup as mentioned int the FreeBSD driver for the i82575
9648 */
9649
9650 /* SerDes configuration via SERDESCTRL */
9651 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
9652 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
9653 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
9654 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
9655
9656 /* CCM configuration via CCMCTL register */
9657 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
9658 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
9659
9660 /* PCIe lanes configuration */
9661 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
9662 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
9663 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
9664 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
9665
9666 /* PCIe PLL Configuration */
9667 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
9668 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
9669 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
9670 }
9671