if_wm.c revision 1.323 1 /* $NetBSD: if_wm.c,v 1.323 2015/06/02 03:49:10 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - EEE (Energy Efficiency Ethernet)
77 * - MSI/MSI-X
78 * - Virtual Function
79 * - Set LED correctly (based on contents in EEPROM)
80 * - Rework how parameters are loaded from the EEPROM.
81 */
82
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.323 2015/06/02 03:49:10 msaitoh Exp $");
85
86 #ifdef _KERNEL_OPT
87 #include "opt_net_mpsafe.h"
88 #endif
89
90 #include <sys/param.h>
91 #include <sys/systm.h>
92 #include <sys/callout.h>
93 #include <sys/mbuf.h>
94 #include <sys/malloc.h>
95 #include <sys/kernel.h>
96 #include <sys/socket.h>
97 #include <sys/ioctl.h>
98 #include <sys/errno.h>
99 #include <sys/device.h>
100 #include <sys/queue.h>
101 #include <sys/syslog.h>
102
103 #include <sys/rndsource.h>
104
105 #include <net/if.h>
106 #include <net/if_dl.h>
107 #include <net/if_media.h>
108 #include <net/if_ether.h>
109
110 #include <net/bpf.h>
111
112 #include <netinet/in.h> /* XXX for struct ip */
113 #include <netinet/in_systm.h> /* XXX for struct ip */
114 #include <netinet/ip.h> /* XXX for struct ip */
115 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
116 #include <netinet/tcp.h> /* XXX for struct tcphdr */
117
118 #include <sys/bus.h>
119 #include <sys/intr.h>
120 #include <machine/endian.h>
121
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
124 #include <dev/mii/miidevs.h>
125 #include <dev/mii/mii_bitbang.h>
126 #include <dev/mii/ikphyreg.h>
127 #include <dev/mii/igphyreg.h>
128 #include <dev/mii/igphyvar.h>
129 #include <dev/mii/inbmphyreg.h>
130
131 #include <dev/pci/pcireg.h>
132 #include <dev/pci/pcivar.h>
133 #include <dev/pci/pcidevs.h>
134
135 #include <dev/pci/if_wmreg.h>
136 #include <dev/pci/if_wmvar.h>
137
138 #ifdef WM_DEBUG
139 #define WM_DEBUG_LINK 0x01
140 #define WM_DEBUG_TX 0x02
141 #define WM_DEBUG_RX 0x04
142 #define WM_DEBUG_GMII 0x08
143 #define WM_DEBUG_MANAGE 0x10
144 #define WM_DEBUG_NVM 0x20
145 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
146 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
147
148 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
149 #else
150 #define DPRINTF(x, y) /* nothing */
151 #endif /* WM_DEBUG */
152
153 #ifdef NET_MPSAFE
154 #define WM_MPSAFE 1
155 #endif
156
157 /*
158 * Transmit descriptor list size. Due to errata, we can only have
159 * 256 hardware descriptors in the ring on < 82544, but we use 4096
160 * on >= 82544. We tell the upper layers that they can queue a lot
161 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
162 * of them at a time.
163 *
164 * We allow up to 256 (!) DMA segments per packet. Pathological packet
165 * chains containing many small mbufs have been observed in zero-copy
166 * situations with jumbo frames.
167 */
168 #define WM_NTXSEGS 256
169 #define WM_IFQUEUELEN 256
170 #define WM_TXQUEUELEN_MAX 64
171 #define WM_TXQUEUELEN_MAX_82547 16
172 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
173 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
174 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
175 #define WM_NTXDESC_82542 256
176 #define WM_NTXDESC_82544 4096
177 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
178 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
179 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
180 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
181 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
182
183 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
184
185 /*
186 * Receive descriptor list size. We have one Rx buffer for normal
187 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
188 * packet. We allocate 256 receive descriptors, each with a 2k
189 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
190 */
191 #define WM_NRXDESC 256
192 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
193 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
194 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
195
196 /*
197 * Control structures are DMA'd to the i82542 chip. We allocate them in
198 * a single clump that maps to a single DMA segment to make several things
199 * easier.
200 */
201 struct wm_control_data_82544 {
202 /*
203 * The receive descriptors.
204 */
205 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
206
207 /*
208 * The transmit descriptors. Put these at the end, because
209 * we might use a smaller number of them.
210 */
211 union {
212 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
213 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
214 } wdc_u;
215 };
216
217 struct wm_control_data_82542 {
218 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
219 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
220 };
221
222 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
223 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
224 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
225
226 /*
227 * Software state for transmit jobs.
228 */
229 struct wm_txsoft {
230 struct mbuf *txs_mbuf; /* head of our mbuf chain */
231 bus_dmamap_t txs_dmamap; /* our DMA map */
232 int txs_firstdesc; /* first descriptor in packet */
233 int txs_lastdesc; /* last descriptor in packet */
234 int txs_ndesc; /* # of descriptors used */
235 };
236
237 /*
238 * Software state for receive buffers. Each descriptor gets a
239 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
240 * more than one buffer, we chain them together.
241 */
242 struct wm_rxsoft {
243 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
244 bus_dmamap_t rxs_dmamap; /* our DMA map */
245 };
246
247 #define WM_LINKUP_TIMEOUT 50
248
249 static uint16_t swfwphysem[] = {
250 SWFW_PHY0_SM,
251 SWFW_PHY1_SM,
252 SWFW_PHY2_SM,
253 SWFW_PHY3_SM
254 };
255
256 static const uint32_t wm_82580_rxpbs_table[] = {
257 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
258 };
259
260 /*
261 * Software state per device.
262 */
263 struct wm_softc {
264 device_t sc_dev; /* generic device information */
265 bus_space_tag_t sc_st; /* bus space tag */
266 bus_space_handle_t sc_sh; /* bus space handle */
267 bus_size_t sc_ss; /* bus space size */
268 bus_space_tag_t sc_iot; /* I/O space tag */
269 bus_space_handle_t sc_ioh; /* I/O space handle */
270 bus_size_t sc_ios; /* I/O space size */
271 bus_space_tag_t sc_flasht; /* flash registers space tag */
272 bus_space_handle_t sc_flashh; /* flash registers space handle */
273 bus_dma_tag_t sc_dmat; /* bus DMA tag */
274
275 struct ethercom sc_ethercom; /* ethernet common data */
276 struct mii_data sc_mii; /* MII/media information */
277
278 pci_chipset_tag_t sc_pc;
279 pcitag_t sc_pcitag;
280 int sc_bus_speed; /* PCI/PCIX bus speed */
281 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
282
283 uint16_t sc_pcidevid; /* PCI device ID */
284 wm_chip_type sc_type; /* MAC type */
285 int sc_rev; /* MAC revision */
286 wm_phy_type sc_phytype; /* PHY type */
287 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
288 #define WM_MEDIATYPE_UNKNOWN 0x00
289 #define WM_MEDIATYPE_FIBER 0x01
290 #define WM_MEDIATYPE_COPPER 0x02
291 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
292 int sc_funcid; /* unit number of the chip (0 to 3) */
293 int sc_flags; /* flags; see below */
294 int sc_if_flags; /* last if_flags */
295 int sc_flowflags; /* 802.3x flow control flags */
296 int sc_align_tweak;
297
298 void *sc_ih; /* interrupt cookie */
299 callout_t sc_tick_ch; /* tick callout */
300 bool sc_stopping;
301
302 int sc_nvm_addrbits; /* NVM address bits */
303 unsigned int sc_nvm_wordsize; /* NVM word size */
304 int sc_ich8_flash_base;
305 int sc_ich8_flash_bank_size;
306 int sc_nvm_k1_enabled;
307
308 /* Software state for the transmit and receive descriptors. */
309 int sc_txnum; /* must be a power of two */
310 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
311 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
312
313 /* Control data structures. */
314 int sc_ntxdesc; /* must be a power of two */
315 struct wm_control_data_82544 *sc_control_data;
316 bus_dmamap_t sc_cddmamap; /* control data DMA map */
317 bus_dma_segment_t sc_cd_seg; /* control data segment */
318 int sc_cd_rseg; /* real number of control segment */
319 size_t sc_cd_size; /* control data size */
320 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
321 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
322 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
323 #define sc_rxdescs sc_control_data->wcd_rxdescs
324
325 #ifdef WM_EVENT_COUNTERS
326 /* Event counters. */
327 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
328 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
329 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
330 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
331 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
332 struct evcnt sc_ev_rxintr; /* Rx interrupts */
333 struct evcnt sc_ev_linkintr; /* Link interrupts */
334
335 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
336 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
337 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
338 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
339 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
340 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
341 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
342 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
343
344 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
345 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
346
347 struct evcnt sc_ev_tu; /* Tx underrun */
348
349 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
350 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
351 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
352 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
353 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
354 #endif /* WM_EVENT_COUNTERS */
355
356 bus_addr_t sc_tdt_reg; /* offset of TDT register */
357
358 int sc_txfree; /* number of free Tx descriptors */
359 int sc_txnext; /* next ready Tx descriptor */
360
361 int sc_txsfree; /* number of free Tx jobs */
362 int sc_txsnext; /* next free Tx job */
363 int sc_txsdirty; /* dirty Tx jobs */
364
365 /* These 5 variables are used only on the 82547. */
366 int sc_txfifo_size; /* Tx FIFO size */
367 int sc_txfifo_head; /* current head of FIFO */
368 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
369 int sc_txfifo_stall; /* Tx FIFO is stalled */
370 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
371
372 bus_addr_t sc_rdt_reg; /* offset of RDT register */
373
374 int sc_rxptr; /* next ready Rx descriptor/queue ent */
375 int sc_rxdiscard;
376 int sc_rxlen;
377 struct mbuf *sc_rxhead;
378 struct mbuf *sc_rxtail;
379 struct mbuf **sc_rxtailp;
380
381 uint32_t sc_ctrl; /* prototype CTRL register */
382 #if 0
383 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
384 #endif
385 uint32_t sc_icr; /* prototype interrupt bits */
386 uint32_t sc_itr; /* prototype intr throttling reg */
387 uint32_t sc_tctl; /* prototype TCTL register */
388 uint32_t sc_rctl; /* prototype RCTL register */
389 uint32_t sc_txcw; /* prototype TXCW register */
390 uint32_t sc_tipg; /* prototype TIPG register */
391 uint32_t sc_fcrtl; /* prototype FCRTL register */
392 uint32_t sc_pba; /* prototype PBA register */
393
394 int sc_tbi_linkup; /* TBI link status */
395 int sc_tbi_anegticks; /* autonegotiation ticks */
396 int sc_tbi_ticks; /* tbi ticks */
397
398 int sc_mchash_type; /* multicast filter offset */
399
400 krndsource_t rnd_source; /* random source */
401
402 kmutex_t *sc_tx_lock; /* lock for tx operations */
403 kmutex_t *sc_rx_lock; /* lock for rx operations */
404 };
405
406 #define WM_TX_LOCK(_sc) if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
407 #define WM_TX_UNLOCK(_sc) if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
408 #define WM_TX_LOCKED(_sc) (!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
409 #define WM_RX_LOCK(_sc) if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
410 #define WM_RX_UNLOCK(_sc) if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
411 #define WM_RX_LOCKED(_sc) (!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
412 #define WM_BOTH_LOCK(_sc) do {WM_TX_LOCK(_sc); WM_RX_LOCK(_sc);} while (0)
413 #define WM_BOTH_UNLOCK(_sc) do {WM_RX_UNLOCK(_sc); WM_TX_UNLOCK(_sc);} while (0)
414 #define WM_BOTH_LOCKED(_sc) (WM_TX_LOCKED(_sc) && WM_RX_LOCKED(_sc))
415
416 #ifdef WM_MPSAFE
417 #define CALLOUT_FLAGS CALLOUT_MPSAFE
418 #else
419 #define CALLOUT_FLAGS 0
420 #endif
421
422 #define WM_RXCHAIN_RESET(sc) \
423 do { \
424 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
425 *(sc)->sc_rxtailp = NULL; \
426 (sc)->sc_rxlen = 0; \
427 } while (/*CONSTCOND*/0)
428
429 #define WM_RXCHAIN_LINK(sc, m) \
430 do { \
431 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
432 (sc)->sc_rxtailp = &(m)->m_next; \
433 } while (/*CONSTCOND*/0)
434
435 #ifdef WM_EVENT_COUNTERS
436 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
437 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
438 #else
439 #define WM_EVCNT_INCR(ev) /* nothing */
440 #define WM_EVCNT_ADD(ev, val) /* nothing */
441 #endif
442
443 #define CSR_READ(sc, reg) \
444 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
445 #define CSR_WRITE(sc, reg, val) \
446 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
447 #define CSR_WRITE_FLUSH(sc) \
448 (void) CSR_READ((sc), WMREG_STATUS)
449
450 #define ICH8_FLASH_READ32(sc, reg) \
451 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
452 #define ICH8_FLASH_WRITE32(sc, reg, data) \
453 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
454
455 #define ICH8_FLASH_READ16(sc, reg) \
456 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
457 #define ICH8_FLASH_WRITE16(sc, reg, data) \
458 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
459
460 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
461 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
462
463 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
464 #define WM_CDTXADDR_HI(sc, x) \
465 (sizeof(bus_addr_t) == 8 ? \
466 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
467
468 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
469 #define WM_CDRXADDR_HI(sc, x) \
470 (sizeof(bus_addr_t) == 8 ? \
471 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
472
473 #define WM_CDTXSYNC(sc, x, n, ops) \
474 do { \
475 int __x, __n; \
476 \
477 __x = (x); \
478 __n = (n); \
479 \
480 /* If it will wrap around, sync to the end of the ring. */ \
481 if ((__x + __n) > WM_NTXDESC(sc)) { \
482 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
483 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
484 (WM_NTXDESC(sc) - __x), (ops)); \
485 __n -= (WM_NTXDESC(sc) - __x); \
486 __x = 0; \
487 } \
488 \
489 /* Now sync whatever is left. */ \
490 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
491 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
492 } while (/*CONSTCOND*/0)
493
494 #define WM_CDRXSYNC(sc, x, ops) \
495 do { \
496 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
497 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
498 } while (/*CONSTCOND*/0)
499
500 #define WM_INIT_RXDESC(sc, x) \
501 do { \
502 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
503 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
504 struct mbuf *__m = __rxs->rxs_mbuf; \
505 \
506 /* \
507 * Note: We scoot the packet forward 2 bytes in the buffer \
508 * so that the payload after the Ethernet header is aligned \
509 * to a 4-byte boundary. \
510 * \
511 * XXX BRAINDAMAGE ALERT! \
512 * The stupid chip uses the same size for every buffer, which \
513 * is set in the Receive Control register. We are using the 2K \
514 * size option, but what we REALLY want is (2K - 2)! For this \
515 * reason, we can't "scoot" packets longer than the standard \
516 * Ethernet MTU. On strict-alignment platforms, if the total \
517 * size exceeds (2K - 2) we set align_tweak to 0 and let \
518 * the upper layer copy the headers. \
519 */ \
520 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
521 \
522 wm_set_dma_addr(&__rxd->wrx_addr, \
523 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
524 __rxd->wrx_len = 0; \
525 __rxd->wrx_cksum = 0; \
526 __rxd->wrx_status = 0; \
527 __rxd->wrx_errors = 0; \
528 __rxd->wrx_special = 0; \
529 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
530 \
531 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
532 } while (/*CONSTCOND*/0)
533
534 /*
535 * Register read/write functions.
536 * Other than CSR_{READ|WRITE}().
537 */
538 #if 0
539 static inline uint32_t wm_io_read(struct wm_softc *, int);
540 #endif
541 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
542 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
543 uint32_t, uint32_t);
544 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
545
546 /*
547 * Device driver interface functions and commonly used functions.
548 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
549 */
550 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
551 static int wm_match(device_t, cfdata_t, void *);
552 static void wm_attach(device_t, device_t, void *);
553 static int wm_detach(device_t, int);
554 static bool wm_suspend(device_t, const pmf_qual_t *);
555 static bool wm_resume(device_t, const pmf_qual_t *);
556 static void wm_watchdog(struct ifnet *);
557 static void wm_tick(void *);
558 static int wm_ifflags_cb(struct ethercom *);
559 static int wm_ioctl(struct ifnet *, u_long, void *);
560 /* MAC address related */
561 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
562 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
563 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
564 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
565 static void wm_set_filter(struct wm_softc *);
566 /* Reset and init related */
567 static void wm_set_vlan(struct wm_softc *);
568 static void wm_set_pcie_completion_timeout(struct wm_softc *);
569 static void wm_get_auto_rd_done(struct wm_softc *);
570 static void wm_lan_init_done(struct wm_softc *);
571 static void wm_get_cfg_done(struct wm_softc *);
572 static void wm_initialize_hardware_bits(struct wm_softc *);
573 static uint32_t wm_rxpbs_adjust_82580(uint32_t);
574 static void wm_reset(struct wm_softc *);
575 static int wm_add_rxbuf(struct wm_softc *, int);
576 static void wm_rxdrain(struct wm_softc *);
577 static int wm_init(struct ifnet *);
578 static int wm_init_locked(struct ifnet *);
579 static void wm_stop(struct ifnet *, int);
580 static void wm_stop_locked(struct ifnet *, int);
581 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
582 uint32_t *, uint8_t *);
583 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
584 static void wm_82547_txfifo_stall(void *);
585 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
586 /* Start */
587 static void wm_start(struct ifnet *);
588 static void wm_start_locked(struct ifnet *);
589 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
590 uint32_t *, uint32_t *, bool *);
591 static void wm_nq_start(struct ifnet *);
592 static void wm_nq_start_locked(struct ifnet *);
593 /* Interrupt */
594 static void wm_txintr(struct wm_softc *);
595 static void wm_rxintr(struct wm_softc *);
596 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
597 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
598 static void wm_linkintr(struct wm_softc *, uint32_t);
599 static int wm_intr(void *);
600
601 /*
602 * Media related.
603 * GMII, SGMII, TBI, SERDES and SFP.
604 */
605 /* GMII related */
606 static void wm_gmii_reset(struct wm_softc *);
607 static int wm_get_phy_id_82575(struct wm_softc *);
608 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
609 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
610 static int wm_gmii_mediachange(struct ifnet *);
611 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
612 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
613 static int wm_gmii_i82543_readreg(device_t, int, int);
614 static void wm_gmii_i82543_writereg(device_t, int, int, int);
615 static int wm_gmii_i82544_readreg(device_t, int, int);
616 static void wm_gmii_i82544_writereg(device_t, int, int, int);
617 static int wm_gmii_i80003_readreg(device_t, int, int);
618 static void wm_gmii_i80003_writereg(device_t, int, int, int);
619 static int wm_gmii_bm_readreg(device_t, int, int);
620 static void wm_gmii_bm_writereg(device_t, int, int, int);
621 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
622 static int wm_gmii_hv_readreg(device_t, int, int);
623 static void wm_gmii_hv_writereg(device_t, int, int, int);
624 static int wm_gmii_82580_readreg(device_t, int, int);
625 static void wm_gmii_82580_writereg(device_t, int, int, int);
626 static void wm_gmii_statchg(struct ifnet *);
627 static int wm_kmrn_readreg(struct wm_softc *, int);
628 static void wm_kmrn_writereg(struct wm_softc *, int, int);
629 /* SGMII */
630 static bool wm_sgmii_uses_mdio(struct wm_softc *);
631 static int wm_sgmii_readreg(device_t, int, int);
632 static void wm_sgmii_writereg(device_t, int, int, int);
633 /* TBI related */
634 static int wm_check_for_link(struct wm_softc *);
635 static void wm_tbi_mediainit(struct wm_softc *);
636 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
637 static int wm_tbi_mediachange(struct ifnet *);
638 static void wm_tbi_set_linkled(struct wm_softc *);
639 static void wm_tbi_check_link(struct wm_softc *);
640 /* SFP related */
641 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
642 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
643
644 /*
645 * NVM related.
646 * Microwire, SPI (w/wo EERD) and Flash.
647 */
648 /* Misc functions */
649 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
650 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
651 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
652 /* Microwire */
653 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
654 /* SPI */
655 static int wm_nvm_ready_spi(struct wm_softc *);
656 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
657 /* Using with EERD */
658 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
659 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
660 /* Flash */
661 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
662 unsigned int *);
663 static int32_t wm_ich8_cycle_init(struct wm_softc *);
664 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
665 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
666 uint16_t *);
667 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
668 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
669 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
670 /* iNVM */
671 static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
672 static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
673 /* Lock, detecting NVM type, validate checksum and read */
674 static int wm_nvm_acquire(struct wm_softc *);
675 static void wm_nvm_release(struct wm_softc *);
676 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
677 static int wm_nvm_get_flash_presence_i210(struct wm_softc *);
678 static int wm_nvm_validate_checksum(struct wm_softc *);
679 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
680
681 /*
682 * Hardware semaphores.
683 * Very complexed...
684 */
685 static int wm_get_swsm_semaphore(struct wm_softc *);
686 static void wm_put_swsm_semaphore(struct wm_softc *);
687 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
688 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
689 static int wm_get_swfwhw_semaphore(struct wm_softc *);
690 static void wm_put_swfwhw_semaphore(struct wm_softc *);
691 static int wm_get_hw_semaphore_82573(struct wm_softc *);
692 static void wm_put_hw_semaphore_82573(struct wm_softc *);
693
694 /*
695 * Management mode and power management related subroutines.
696 * BMC, AMT, suspend/resume and EEE.
697 */
698 static int wm_check_mng_mode(struct wm_softc *);
699 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
700 static int wm_check_mng_mode_82574(struct wm_softc *);
701 static int wm_check_mng_mode_generic(struct wm_softc *);
702 static int wm_enable_mng_pass_thru(struct wm_softc *);
703 static int wm_check_reset_block(struct wm_softc *);
704 static void wm_get_hw_control(struct wm_softc *);
705 static void wm_release_hw_control(struct wm_softc *);
706 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
707 static void wm_smbustopci(struct wm_softc *);
708 static void wm_init_manageability(struct wm_softc *);
709 static void wm_release_manageability(struct wm_softc *);
710 static void wm_get_wakeup(struct wm_softc *);
711 #ifdef WM_WOL
712 static void wm_enable_phy_wakeup(struct wm_softc *);
713 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
714 static void wm_enable_wakeup(struct wm_softc *);
715 #endif
716 /* EEE */
717 static void wm_set_eee_i350(struct wm_softc *);
718
719 /*
720 * Workarounds (mainly PHY related).
721 * Basically, PHY's workarounds are in the PHY drivers.
722 */
723 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
724 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
725 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
726 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
727 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
728 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
729 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
730 static void wm_reset_init_script_82575(struct wm_softc *);
731
732 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
733 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
734
735 /*
736 * Devices supported by this driver.
737 */
738 static const struct wm_product {
739 pci_vendor_id_t wmp_vendor;
740 pci_product_id_t wmp_product;
741 const char *wmp_name;
742 wm_chip_type wmp_type;
743 uint32_t wmp_flags;
744 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
745 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
746 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
747 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
748 #define WMP_MEDIATYPE(x) ((x) & 0x03)
749 } wm_products[] = {
750 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
751 "Intel i82542 1000BASE-X Ethernet",
752 WM_T_82542_2_1, WMP_F_FIBER },
753
754 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
755 "Intel i82543GC 1000BASE-X Ethernet",
756 WM_T_82543, WMP_F_FIBER },
757
758 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
759 "Intel i82543GC 1000BASE-T Ethernet",
760 WM_T_82543, WMP_F_COPPER },
761
762 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
763 "Intel i82544EI 1000BASE-T Ethernet",
764 WM_T_82544, WMP_F_COPPER },
765
766 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
767 "Intel i82544EI 1000BASE-X Ethernet",
768 WM_T_82544, WMP_F_FIBER },
769
770 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
771 "Intel i82544GC 1000BASE-T Ethernet",
772 WM_T_82544, WMP_F_COPPER },
773
774 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
775 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
776 WM_T_82544, WMP_F_COPPER },
777
778 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
779 "Intel i82540EM 1000BASE-T Ethernet",
780 WM_T_82540, WMP_F_COPPER },
781
782 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
783 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
784 WM_T_82540, WMP_F_COPPER },
785
786 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
787 "Intel i82540EP 1000BASE-T Ethernet",
788 WM_T_82540, WMP_F_COPPER },
789
790 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
791 "Intel i82540EP 1000BASE-T Ethernet",
792 WM_T_82540, WMP_F_COPPER },
793
794 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
795 "Intel i82540EP 1000BASE-T Ethernet",
796 WM_T_82540, WMP_F_COPPER },
797
798 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
799 "Intel i82545EM 1000BASE-T Ethernet",
800 WM_T_82545, WMP_F_COPPER },
801
802 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
803 "Intel i82545GM 1000BASE-T Ethernet",
804 WM_T_82545_3, WMP_F_COPPER },
805
806 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
807 "Intel i82545GM 1000BASE-X Ethernet",
808 WM_T_82545_3, WMP_F_FIBER },
809
810 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
811 "Intel i82545GM Gigabit Ethernet (SERDES)",
812 WM_T_82545_3, WMP_F_SERDES },
813
814 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
815 "Intel i82546EB 1000BASE-T Ethernet",
816 WM_T_82546, WMP_F_COPPER },
817
818 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
819 "Intel i82546EB 1000BASE-T Ethernet",
820 WM_T_82546, WMP_F_COPPER },
821
822 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
823 "Intel i82545EM 1000BASE-X Ethernet",
824 WM_T_82545, WMP_F_FIBER },
825
826 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
827 "Intel i82546EB 1000BASE-X Ethernet",
828 WM_T_82546, WMP_F_FIBER },
829
830 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
831 "Intel i82546GB 1000BASE-T Ethernet",
832 WM_T_82546_3, WMP_F_COPPER },
833
834 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
835 "Intel i82546GB 1000BASE-X Ethernet",
836 WM_T_82546_3, WMP_F_FIBER },
837
838 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
839 "Intel i82546GB Gigabit Ethernet (SERDES)",
840 WM_T_82546_3, WMP_F_SERDES },
841
842 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
843 "i82546GB quad-port Gigabit Ethernet",
844 WM_T_82546_3, WMP_F_COPPER },
845
846 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
847 "i82546GB quad-port Gigabit Ethernet (KSP3)",
848 WM_T_82546_3, WMP_F_COPPER },
849
850 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
851 "Intel PRO/1000MT (82546GB)",
852 WM_T_82546_3, WMP_F_COPPER },
853
854 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
855 "Intel i82541EI 1000BASE-T Ethernet",
856 WM_T_82541, WMP_F_COPPER },
857
858 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
859 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
860 WM_T_82541, WMP_F_COPPER },
861
862 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
863 "Intel i82541EI Mobile 1000BASE-T Ethernet",
864 WM_T_82541, WMP_F_COPPER },
865
866 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
867 "Intel i82541ER 1000BASE-T Ethernet",
868 WM_T_82541_2, WMP_F_COPPER },
869
870 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
871 "Intel i82541GI 1000BASE-T Ethernet",
872 WM_T_82541_2, WMP_F_COPPER },
873
874 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
875 "Intel i82541GI Mobile 1000BASE-T Ethernet",
876 WM_T_82541_2, WMP_F_COPPER },
877
878 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
879 "Intel i82541PI 1000BASE-T Ethernet",
880 WM_T_82541_2, WMP_F_COPPER },
881
882 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
883 "Intel i82547EI 1000BASE-T Ethernet",
884 WM_T_82547, WMP_F_COPPER },
885
886 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
887 "Intel i82547EI Mobile 1000BASE-T Ethernet",
888 WM_T_82547, WMP_F_COPPER },
889
890 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
891 "Intel i82547GI 1000BASE-T Ethernet",
892 WM_T_82547_2, WMP_F_COPPER },
893
894 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
895 "Intel PRO/1000 PT (82571EB)",
896 WM_T_82571, WMP_F_COPPER },
897
898 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
899 "Intel PRO/1000 PF (82571EB)",
900 WM_T_82571, WMP_F_FIBER },
901
902 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
903 "Intel PRO/1000 PB (82571EB)",
904 WM_T_82571, WMP_F_SERDES },
905
906 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
907 "Intel PRO/1000 QT (82571EB)",
908 WM_T_82571, WMP_F_COPPER },
909
910 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
911 "Intel PRO/1000 PT Quad Port Server Adapter",
912 WM_T_82571, WMP_F_COPPER, },
913
914 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
915 "Intel Gigabit PT Quad Port Server ExpressModule",
916 WM_T_82571, WMP_F_COPPER, },
917
918 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
919 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
920 WM_T_82571, WMP_F_SERDES, },
921
922 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
923 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
924 WM_T_82571, WMP_F_SERDES, },
925
926 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
927 "Intel 82571EB Quad 1000baseX Ethernet",
928 WM_T_82571, WMP_F_FIBER, },
929
930 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
931 "Intel i82572EI 1000baseT Ethernet",
932 WM_T_82572, WMP_F_COPPER },
933
934 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
935 "Intel i82572EI 1000baseX Ethernet",
936 WM_T_82572, WMP_F_FIBER },
937
938 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
939 "Intel i82572EI Gigabit Ethernet (SERDES)",
940 WM_T_82572, WMP_F_SERDES },
941
942 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
943 "Intel i82572EI 1000baseT Ethernet",
944 WM_T_82572, WMP_F_COPPER },
945
946 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
947 "Intel i82573E",
948 WM_T_82573, WMP_F_COPPER },
949
950 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
951 "Intel i82573E IAMT",
952 WM_T_82573, WMP_F_COPPER },
953
954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
955 "Intel i82573L Gigabit Ethernet",
956 WM_T_82573, WMP_F_COPPER },
957
958 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
959 "Intel i82574L",
960 WM_T_82574, WMP_F_COPPER },
961
962 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
963 "Intel i82574L",
964 WM_T_82574, WMP_F_COPPER },
965
966 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
967 "Intel i82583V",
968 WM_T_82583, WMP_F_COPPER },
969
970 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
971 "i80003 dual 1000baseT Ethernet",
972 WM_T_80003, WMP_F_COPPER },
973
974 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
975 "i80003 dual 1000baseX Ethernet",
976 WM_T_80003, WMP_F_COPPER },
977
978 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
979 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
980 WM_T_80003, WMP_F_SERDES },
981
982 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
983 "Intel i80003 1000baseT Ethernet",
984 WM_T_80003, WMP_F_COPPER },
985
986 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
987 "Intel i80003 Gigabit Ethernet (SERDES)",
988 WM_T_80003, WMP_F_SERDES },
989
990 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
991 "Intel i82801H (M_AMT) LAN Controller",
992 WM_T_ICH8, WMP_F_COPPER },
993 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
994 "Intel i82801H (AMT) LAN Controller",
995 WM_T_ICH8, WMP_F_COPPER },
996 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
997 "Intel i82801H LAN Controller",
998 WM_T_ICH8, WMP_F_COPPER },
999 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1000 "Intel i82801H (IFE) LAN Controller",
1001 WM_T_ICH8, WMP_F_COPPER },
1002 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
1003 "Intel i82801H (M) LAN Controller",
1004 WM_T_ICH8, WMP_F_COPPER },
1005 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1006 "Intel i82801H IFE (GT) LAN Controller",
1007 WM_T_ICH8, WMP_F_COPPER },
1008 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1009 "Intel i82801H IFE (G) LAN Controller",
1010 WM_T_ICH8, WMP_F_COPPER },
1011 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1012 "82801I (AMT) LAN Controller",
1013 WM_T_ICH9, WMP_F_COPPER },
1014 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1015 "82801I LAN Controller",
1016 WM_T_ICH9, WMP_F_COPPER },
1017 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1018 "82801I (G) LAN Controller",
1019 WM_T_ICH9, WMP_F_COPPER },
1020 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1021 "82801I (GT) LAN Controller",
1022 WM_T_ICH9, WMP_F_COPPER },
1023 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1024 "82801I (C) LAN Controller",
1025 WM_T_ICH9, WMP_F_COPPER },
1026 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1027 "82801I mobile LAN Controller",
1028 WM_T_ICH9, WMP_F_COPPER },
1029 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1030 "82801I mobile (V) LAN Controller",
1031 WM_T_ICH9, WMP_F_COPPER },
1032 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1033 "82801I mobile (AMT) LAN Controller",
1034 WM_T_ICH9, WMP_F_COPPER },
1035 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1036 "82567LM-4 LAN Controller",
1037 WM_T_ICH9, WMP_F_COPPER },
1038 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
1039 "82567V-3 LAN Controller",
1040 WM_T_ICH9, WMP_F_COPPER },
1041 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1042 "82567LM-2 LAN Controller",
1043 WM_T_ICH10, WMP_F_COPPER },
1044 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1045 "82567LF-2 LAN Controller",
1046 WM_T_ICH10, WMP_F_COPPER },
1047 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1048 "82567LM-3 LAN Controller",
1049 WM_T_ICH10, WMP_F_COPPER },
1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1051 "82567LF-3 LAN Controller",
1052 WM_T_ICH10, WMP_F_COPPER },
1053 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1054 "82567V-2 LAN Controller",
1055 WM_T_ICH10, WMP_F_COPPER },
1056 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1057 "82567V-3? LAN Controller",
1058 WM_T_ICH10, WMP_F_COPPER },
1059 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1060 "HANKSVILLE LAN Controller",
1061 WM_T_ICH10, WMP_F_COPPER },
1062 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1063 "PCH LAN (82577LM) Controller",
1064 WM_T_PCH, WMP_F_COPPER },
1065 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1066 "PCH LAN (82577LC) Controller",
1067 WM_T_PCH, WMP_F_COPPER },
1068 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1069 "PCH LAN (82578DM) Controller",
1070 WM_T_PCH, WMP_F_COPPER },
1071 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1072 "PCH LAN (82578DC) Controller",
1073 WM_T_PCH, WMP_F_COPPER },
1074 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1075 "PCH2 LAN (82579LM) Controller",
1076 WM_T_PCH2, WMP_F_COPPER },
1077 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1078 "PCH2 LAN (82579V) Controller",
1079 WM_T_PCH2, WMP_F_COPPER },
1080 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1081 "82575EB dual-1000baseT Ethernet",
1082 WM_T_82575, WMP_F_COPPER },
1083 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1084 "82575EB dual-1000baseX Ethernet (SERDES)",
1085 WM_T_82575, WMP_F_SERDES },
1086 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1087 "82575GB quad-1000baseT Ethernet",
1088 WM_T_82575, WMP_F_COPPER },
1089 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1090 "82575GB quad-1000baseT Ethernet (PM)",
1091 WM_T_82575, WMP_F_COPPER },
1092 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1093 "82576 1000BaseT Ethernet",
1094 WM_T_82576, WMP_F_COPPER },
1095 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1096 "82576 1000BaseX Ethernet",
1097 WM_T_82576, WMP_F_FIBER },
1098
1099 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1100 "82576 gigabit Ethernet (SERDES)",
1101 WM_T_82576, WMP_F_SERDES },
1102
1103 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1104 "82576 quad-1000BaseT Ethernet",
1105 WM_T_82576, WMP_F_COPPER },
1106
1107 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1108 "82576 Gigabit ET2 Quad Port Server Adapter",
1109 WM_T_82576, WMP_F_COPPER },
1110
1111 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1112 "82576 gigabit Ethernet",
1113 WM_T_82576, WMP_F_COPPER },
1114
1115 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1116 "82576 gigabit Ethernet (SERDES)",
1117 WM_T_82576, WMP_F_SERDES },
1118 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1119 "82576 quad-gigabit Ethernet (SERDES)",
1120 WM_T_82576, WMP_F_SERDES },
1121
1122 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1123 "82580 1000BaseT Ethernet",
1124 WM_T_82580, WMP_F_COPPER },
1125 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1126 "82580 1000BaseX Ethernet",
1127 WM_T_82580, WMP_F_FIBER },
1128
1129 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1130 "82580 1000BaseT Ethernet (SERDES)",
1131 WM_T_82580, WMP_F_SERDES },
1132
1133 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1134 "82580 gigabit Ethernet (SGMII)",
1135 WM_T_82580, WMP_F_COPPER },
1136 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1137 "82580 dual-1000BaseT Ethernet",
1138 WM_T_82580, WMP_F_COPPER },
1139
1140 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1141 "82580 quad-1000BaseX Ethernet",
1142 WM_T_82580, WMP_F_FIBER },
1143
1144 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1145 "DH89XXCC Gigabit Ethernet (SGMII)",
1146 WM_T_82580, WMP_F_COPPER },
1147
1148 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1149 "DH89XXCC Gigabit Ethernet (SERDES)",
1150 WM_T_82580, WMP_F_SERDES },
1151
1152 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1153 "DH89XXCC 1000BASE-KX Ethernet",
1154 WM_T_82580, WMP_F_SERDES },
1155
1156 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1157 "DH89XXCC Gigabit Ethernet (SFP)",
1158 WM_T_82580, WMP_F_SERDES },
1159
1160 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1161 "I350 Gigabit Network Connection",
1162 WM_T_I350, WMP_F_COPPER },
1163
1164 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1165 "I350 Gigabit Fiber Network Connection",
1166 WM_T_I350, WMP_F_FIBER },
1167
1168 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1169 "I350 Gigabit Backplane Connection",
1170 WM_T_I350, WMP_F_SERDES },
1171
1172 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1173 "I350 Quad Port Gigabit Ethernet",
1174 WM_T_I350, WMP_F_SERDES },
1175
1176 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1177 "I350 Gigabit Connection",
1178 WM_T_I350, WMP_F_COPPER },
1179
1180 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1181 "I354 Gigabit Ethernet (KX)",
1182 WM_T_I354, WMP_F_SERDES },
1183
1184 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1185 "I354 Gigabit Ethernet (SGMII)",
1186 WM_T_I354, WMP_F_COPPER },
1187
1188 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1189 "I354 Gigabit Ethernet (2.5G)",
1190 WM_T_I354, WMP_F_COPPER },
1191
1192 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1193 "I210-T1 Ethernet Server Adapter",
1194 WM_T_I210, WMP_F_COPPER },
1195
1196 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1197 "I210 Ethernet (Copper OEM)",
1198 WM_T_I210, WMP_F_COPPER },
1199
1200 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1201 "I210 Ethernet (Copper IT)",
1202 WM_T_I210, WMP_F_COPPER },
1203
1204 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1205 "I210 Ethernet (FLASH less)",
1206 WM_T_I210, WMP_F_COPPER },
1207
1208 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1209 "I210 Gigabit Ethernet (Fiber)",
1210 WM_T_I210, WMP_F_FIBER },
1211
1212 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1213 "I210 Gigabit Ethernet (SERDES)",
1214 WM_T_I210, WMP_F_SERDES },
1215
1216 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1217 "I210 Gigabit Ethernet (FLASH less)",
1218 WM_T_I210, WMP_F_SERDES },
1219
1220 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1221 "I210 Gigabit Ethernet (SGMII)",
1222 WM_T_I210, WMP_F_COPPER },
1223
1224 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1225 "I211 Ethernet (COPPER)",
1226 WM_T_I211, WMP_F_COPPER },
1227 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1228 "I217 V Ethernet Connection",
1229 WM_T_PCH_LPT, WMP_F_COPPER },
1230 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1231 "I217 LM Ethernet Connection",
1232 WM_T_PCH_LPT, WMP_F_COPPER },
1233 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1234 "I218 V Ethernet Connection",
1235 WM_T_PCH_LPT, WMP_F_COPPER },
1236 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1237 "I218 V Ethernet Connection",
1238 WM_T_PCH_LPT, WMP_F_COPPER },
1239 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1240 "I218 V Ethernet Connection",
1241 WM_T_PCH_LPT, WMP_F_COPPER },
1242 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1243 "I218 LM Ethernet Connection",
1244 WM_T_PCH_LPT, WMP_F_COPPER },
1245 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1246 "I218 LM Ethernet Connection",
1247 WM_T_PCH_LPT, WMP_F_COPPER },
1248 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1249 "I218 LM Ethernet Connection",
1250 WM_T_PCH_LPT, WMP_F_COPPER },
1251 { 0, 0,
1252 NULL,
1253 0, 0 },
1254 };
1255
1256 #ifdef WM_EVENT_COUNTERS
1257 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1258 #endif /* WM_EVENT_COUNTERS */
1259
1260
1261 /*
1262 * Register read/write functions.
1263 * Other than CSR_{READ|WRITE}().
1264 */
1265
1266 #if 0 /* Not currently used */
1267 static inline uint32_t
1268 wm_io_read(struct wm_softc *sc, int reg)
1269 {
1270
1271 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1272 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1273 }
1274 #endif
1275
1276 static inline void
1277 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1278 {
1279
1280 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1281 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1282 }
1283
1284 static inline void
1285 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1286 uint32_t data)
1287 {
1288 uint32_t regval;
1289 int i;
1290
1291 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1292
1293 CSR_WRITE(sc, reg, regval);
1294
1295 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1296 delay(5);
1297 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1298 break;
1299 }
1300 if (i == SCTL_CTL_POLL_TIMEOUT) {
1301 aprint_error("%s: WARNING:"
1302 " i82575 reg 0x%08x setup did not indicate ready\n",
1303 device_xname(sc->sc_dev), reg);
1304 }
1305 }
1306
1307 static inline void
1308 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1309 {
1310 wa->wa_low = htole32(v & 0xffffffffU);
1311 if (sizeof(bus_addr_t) == 8)
1312 wa->wa_high = htole32((uint64_t) v >> 32);
1313 else
1314 wa->wa_high = 0;
1315 }
1316
1317 /*
1318 * Device driver interface functions and commonly used functions.
1319 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1320 */
1321
1322 /* Lookup supported device table */
1323 static const struct wm_product *
1324 wm_lookup(const struct pci_attach_args *pa)
1325 {
1326 const struct wm_product *wmp;
1327
1328 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1329 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1330 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1331 return wmp;
1332 }
1333 return NULL;
1334 }
1335
1336 /* The match function (ca_match) */
1337 static int
1338 wm_match(device_t parent, cfdata_t cf, void *aux)
1339 {
1340 struct pci_attach_args *pa = aux;
1341
1342 if (wm_lookup(pa) != NULL)
1343 return 1;
1344
1345 return 0;
1346 }
1347
1348 /* The attach function (ca_attach) */
1349 static void
1350 wm_attach(device_t parent, device_t self, void *aux)
1351 {
1352 struct wm_softc *sc = device_private(self);
1353 struct pci_attach_args *pa = aux;
1354 prop_dictionary_t dict;
1355 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1356 pci_chipset_tag_t pc = pa->pa_pc;
1357 pci_intr_handle_t ih;
1358 const char *intrstr = NULL;
1359 const char *eetype, *xname;
1360 bus_space_tag_t memt;
1361 bus_space_handle_t memh;
1362 bus_size_t memsize;
1363 int memh_valid;
1364 int i, error;
1365 const struct wm_product *wmp;
1366 prop_data_t ea;
1367 prop_number_t pn;
1368 uint8_t enaddr[ETHER_ADDR_LEN];
1369 uint16_t cfg1, cfg2, swdpin, io3;
1370 pcireg_t preg, memtype;
1371 uint16_t eeprom_data, apme_mask;
1372 bool force_clear_smbi;
1373 uint32_t link_mode;
1374 uint32_t reg;
1375 char intrbuf[PCI_INTRSTR_LEN];
1376
1377 sc->sc_dev = self;
1378 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1379 sc->sc_stopping = false;
1380
1381 wmp = wm_lookup(pa);
1382 #ifdef DIAGNOSTIC
1383 if (wmp == NULL) {
1384 printf("\n");
1385 panic("wm_attach: impossible");
1386 }
1387 #endif
1388 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1389
1390 sc->sc_pc = pa->pa_pc;
1391 sc->sc_pcitag = pa->pa_tag;
1392
1393 if (pci_dma64_available(pa))
1394 sc->sc_dmat = pa->pa_dmat64;
1395 else
1396 sc->sc_dmat = pa->pa_dmat;
1397
1398 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1399 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1400 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1401
1402 sc->sc_type = wmp->wmp_type;
1403 if (sc->sc_type < WM_T_82543) {
1404 if (sc->sc_rev < 2) {
1405 aprint_error_dev(sc->sc_dev,
1406 "i82542 must be at least rev. 2\n");
1407 return;
1408 }
1409 if (sc->sc_rev < 3)
1410 sc->sc_type = WM_T_82542_2_0;
1411 }
1412
1413 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1414 || (sc->sc_type == WM_T_82580)
1415 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1416 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1417 sc->sc_flags |= WM_F_NEWQUEUE;
1418
1419 /* Set device properties (mactype) */
1420 dict = device_properties(sc->sc_dev);
1421 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1422
1423 /*
1424 * Map the device. All devices support memory-mapped acccess,
1425 * and it is really required for normal operation.
1426 */
1427 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1428 switch (memtype) {
1429 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1430 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1431 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1432 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1433 break;
1434 default:
1435 memh_valid = 0;
1436 break;
1437 }
1438
1439 if (memh_valid) {
1440 sc->sc_st = memt;
1441 sc->sc_sh = memh;
1442 sc->sc_ss = memsize;
1443 } else {
1444 aprint_error_dev(sc->sc_dev,
1445 "unable to map device registers\n");
1446 return;
1447 }
1448
1449 /*
1450 * In addition, i82544 and later support I/O mapped indirect
1451 * register access. It is not desirable (nor supported in
1452 * this driver) to use it for normal operation, though it is
1453 * required to work around bugs in some chip versions.
1454 */
1455 if (sc->sc_type >= WM_T_82544) {
1456 /* First we have to find the I/O BAR. */
1457 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1458 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1459 if (memtype == PCI_MAPREG_TYPE_IO)
1460 break;
1461 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1462 PCI_MAPREG_MEM_TYPE_64BIT)
1463 i += 4; /* skip high bits, too */
1464 }
1465 if (i < PCI_MAPREG_END) {
1466 /*
1467 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1468 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1469 * It's no problem because newer chips has no this
1470 * bug.
1471 *
1472 * The i8254x doesn't apparently respond when the
1473 * I/O BAR is 0, which looks somewhat like it's not
1474 * been configured.
1475 */
1476 preg = pci_conf_read(pc, pa->pa_tag, i);
1477 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1478 aprint_error_dev(sc->sc_dev,
1479 "WARNING: I/O BAR at zero.\n");
1480 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1481 0, &sc->sc_iot, &sc->sc_ioh,
1482 NULL, &sc->sc_ios) == 0) {
1483 sc->sc_flags |= WM_F_IOH_VALID;
1484 } else {
1485 aprint_error_dev(sc->sc_dev,
1486 "WARNING: unable to map I/O space\n");
1487 }
1488 }
1489
1490 }
1491
1492 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1493 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1494 preg |= PCI_COMMAND_MASTER_ENABLE;
1495 if (sc->sc_type < WM_T_82542_2_1)
1496 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1497 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1498
1499 /* power up chip */
1500 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1501 NULL)) && error != EOPNOTSUPP) {
1502 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1503 return;
1504 }
1505
1506 /*
1507 * Map and establish our interrupt.
1508 */
1509 if (pci_intr_map(pa, &ih)) {
1510 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1511 return;
1512 }
1513 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1514 #ifdef WM_MPSAFE
1515 pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
1516 #endif
1517 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1518 if (sc->sc_ih == NULL) {
1519 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1520 if (intrstr != NULL)
1521 aprint_error(" at %s", intrstr);
1522 aprint_error("\n");
1523 return;
1524 }
1525 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1526
1527 /*
1528 * Check the function ID (unit number of the chip).
1529 */
1530 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1531 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1532 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1533 || (sc->sc_type == WM_T_82580)
1534 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1535 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1536 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1537 else
1538 sc->sc_funcid = 0;
1539
1540 /*
1541 * Determine a few things about the bus we're connected to.
1542 */
1543 if (sc->sc_type < WM_T_82543) {
1544 /* We don't really know the bus characteristics here. */
1545 sc->sc_bus_speed = 33;
1546 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1547 /*
1548 * CSA (Communication Streaming Architecture) is about as fast
1549 * a 32-bit 66MHz PCI Bus.
1550 */
1551 sc->sc_flags |= WM_F_CSA;
1552 sc->sc_bus_speed = 66;
1553 aprint_verbose_dev(sc->sc_dev,
1554 "Communication Streaming Architecture\n");
1555 if (sc->sc_type == WM_T_82547) {
1556 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1557 callout_setfunc(&sc->sc_txfifo_ch,
1558 wm_82547_txfifo_stall, sc);
1559 aprint_verbose_dev(sc->sc_dev,
1560 "using 82547 Tx FIFO stall work-around\n");
1561 }
1562 } else if (sc->sc_type >= WM_T_82571) {
1563 sc->sc_flags |= WM_F_PCIE;
1564 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1565 && (sc->sc_type != WM_T_ICH10)
1566 && (sc->sc_type != WM_T_PCH)
1567 && (sc->sc_type != WM_T_PCH2)
1568 && (sc->sc_type != WM_T_PCH_LPT)) {
1569 /* ICH* and PCH* have no PCIe capability registers */
1570 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1571 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1572 NULL) == 0)
1573 aprint_error_dev(sc->sc_dev,
1574 "unable to find PCIe capability\n");
1575 }
1576 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1577 } else {
1578 reg = CSR_READ(sc, WMREG_STATUS);
1579 if (reg & STATUS_BUS64)
1580 sc->sc_flags |= WM_F_BUS64;
1581 if ((reg & STATUS_PCIX_MODE) != 0) {
1582 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1583
1584 sc->sc_flags |= WM_F_PCIX;
1585 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1586 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1587 aprint_error_dev(sc->sc_dev,
1588 "unable to find PCIX capability\n");
1589 else if (sc->sc_type != WM_T_82545_3 &&
1590 sc->sc_type != WM_T_82546_3) {
1591 /*
1592 * Work around a problem caused by the BIOS
1593 * setting the max memory read byte count
1594 * incorrectly.
1595 */
1596 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1597 sc->sc_pcixe_capoff + PCIX_CMD);
1598 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1599 sc->sc_pcixe_capoff + PCIX_STATUS);
1600
1601 bytecnt =
1602 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1603 PCIX_CMD_BYTECNT_SHIFT;
1604 maxb =
1605 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1606 PCIX_STATUS_MAXB_SHIFT;
1607 if (bytecnt > maxb) {
1608 aprint_verbose_dev(sc->sc_dev,
1609 "resetting PCI-X MMRBC: %d -> %d\n",
1610 512 << bytecnt, 512 << maxb);
1611 pcix_cmd = (pcix_cmd &
1612 ~PCIX_CMD_BYTECNT_MASK) |
1613 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1614 pci_conf_write(pa->pa_pc, pa->pa_tag,
1615 sc->sc_pcixe_capoff + PCIX_CMD,
1616 pcix_cmd);
1617 }
1618 }
1619 }
1620 /*
1621 * The quad port adapter is special; it has a PCIX-PCIX
1622 * bridge on the board, and can run the secondary bus at
1623 * a higher speed.
1624 */
1625 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1626 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1627 : 66;
1628 } else if (sc->sc_flags & WM_F_PCIX) {
1629 switch (reg & STATUS_PCIXSPD_MASK) {
1630 case STATUS_PCIXSPD_50_66:
1631 sc->sc_bus_speed = 66;
1632 break;
1633 case STATUS_PCIXSPD_66_100:
1634 sc->sc_bus_speed = 100;
1635 break;
1636 case STATUS_PCIXSPD_100_133:
1637 sc->sc_bus_speed = 133;
1638 break;
1639 default:
1640 aprint_error_dev(sc->sc_dev,
1641 "unknown PCIXSPD %d; assuming 66MHz\n",
1642 reg & STATUS_PCIXSPD_MASK);
1643 sc->sc_bus_speed = 66;
1644 break;
1645 }
1646 } else
1647 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1648 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1649 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1650 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1651 }
1652
1653 /*
1654 * Allocate the control data structures, and create and load the
1655 * DMA map for it.
1656 *
1657 * NOTE: All Tx descriptors must be in the same 4G segment of
1658 * memory. So must Rx descriptors. We simplify by allocating
1659 * both sets within the same 4G segment.
1660 */
1661 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1662 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1663 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1664 sizeof(struct wm_control_data_82542) :
1665 sizeof(struct wm_control_data_82544);
1666 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1667 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1668 &sc->sc_cd_rseg, 0)) != 0) {
1669 aprint_error_dev(sc->sc_dev,
1670 "unable to allocate control data, error = %d\n",
1671 error);
1672 goto fail_0;
1673 }
1674
1675 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1676 sc->sc_cd_rseg, sc->sc_cd_size,
1677 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1678 aprint_error_dev(sc->sc_dev,
1679 "unable to map control data, error = %d\n", error);
1680 goto fail_1;
1681 }
1682
1683 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1684 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1685 aprint_error_dev(sc->sc_dev,
1686 "unable to create control data DMA map, error = %d\n",
1687 error);
1688 goto fail_2;
1689 }
1690
1691 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1692 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1693 aprint_error_dev(sc->sc_dev,
1694 "unable to load control data DMA map, error = %d\n",
1695 error);
1696 goto fail_3;
1697 }
1698
1699 /* Create the transmit buffer DMA maps. */
1700 WM_TXQUEUELEN(sc) =
1701 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1702 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1703 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1704 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1705 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1706 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1707 aprint_error_dev(sc->sc_dev,
1708 "unable to create Tx DMA map %d, error = %d\n",
1709 i, error);
1710 goto fail_4;
1711 }
1712 }
1713
1714 /* Create the receive buffer DMA maps. */
1715 for (i = 0; i < WM_NRXDESC; i++) {
1716 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1717 MCLBYTES, 0, 0,
1718 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1719 aprint_error_dev(sc->sc_dev,
1720 "unable to create Rx DMA map %d error = %d\n",
1721 i, error);
1722 goto fail_5;
1723 }
1724 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1725 }
1726
1727 /* clear interesting stat counters */
1728 CSR_READ(sc, WMREG_COLC);
1729 CSR_READ(sc, WMREG_RXERRC);
1730
1731 /* get PHY control from SMBus to PCIe */
1732 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1733 || (sc->sc_type == WM_T_PCH_LPT))
1734 wm_smbustopci(sc);
1735
1736 /* Reset the chip to a known state. */
1737 wm_reset(sc);
1738
1739 /* Get some information about the EEPROM. */
1740 switch (sc->sc_type) {
1741 case WM_T_82542_2_0:
1742 case WM_T_82542_2_1:
1743 case WM_T_82543:
1744 case WM_T_82544:
1745 /* Microwire */
1746 sc->sc_nvm_wordsize = 64;
1747 sc->sc_nvm_addrbits = 6;
1748 break;
1749 case WM_T_82540:
1750 case WM_T_82545:
1751 case WM_T_82545_3:
1752 case WM_T_82546:
1753 case WM_T_82546_3:
1754 /* Microwire */
1755 reg = CSR_READ(sc, WMREG_EECD);
1756 if (reg & EECD_EE_SIZE) {
1757 sc->sc_nvm_wordsize = 256;
1758 sc->sc_nvm_addrbits = 8;
1759 } else {
1760 sc->sc_nvm_wordsize = 64;
1761 sc->sc_nvm_addrbits = 6;
1762 }
1763 sc->sc_flags |= WM_F_LOCK_EECD;
1764 break;
1765 case WM_T_82541:
1766 case WM_T_82541_2:
1767 case WM_T_82547:
1768 case WM_T_82547_2:
1769 sc->sc_flags |= WM_F_LOCK_EECD;
1770 reg = CSR_READ(sc, WMREG_EECD);
1771 if (reg & EECD_EE_TYPE) {
1772 /* SPI */
1773 sc->sc_flags |= WM_F_EEPROM_SPI;
1774 wm_nvm_set_addrbits_size_eecd(sc);
1775 } else {
1776 /* Microwire */
1777 if ((reg & EECD_EE_ABITS) != 0) {
1778 sc->sc_nvm_wordsize = 256;
1779 sc->sc_nvm_addrbits = 8;
1780 } else {
1781 sc->sc_nvm_wordsize = 64;
1782 sc->sc_nvm_addrbits = 6;
1783 }
1784 }
1785 break;
1786 case WM_T_82571:
1787 case WM_T_82572:
1788 /* SPI */
1789 sc->sc_flags |= WM_F_EEPROM_SPI;
1790 wm_nvm_set_addrbits_size_eecd(sc);
1791 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1792 break;
1793 case WM_T_82573:
1794 sc->sc_flags |= WM_F_LOCK_SWSM;
1795 /* FALLTHROUGH */
1796 case WM_T_82574:
1797 case WM_T_82583:
1798 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1799 sc->sc_flags |= WM_F_EEPROM_FLASH;
1800 sc->sc_nvm_wordsize = 2048;
1801 } else {
1802 /* SPI */
1803 sc->sc_flags |= WM_F_EEPROM_SPI;
1804 wm_nvm_set_addrbits_size_eecd(sc);
1805 }
1806 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1807 break;
1808 case WM_T_82575:
1809 case WM_T_82576:
1810 case WM_T_82580:
1811 case WM_T_I350:
1812 case WM_T_I354:
1813 case WM_T_80003:
1814 /* SPI */
1815 sc->sc_flags |= WM_F_EEPROM_SPI;
1816 wm_nvm_set_addrbits_size_eecd(sc);
1817 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1818 | WM_F_LOCK_SWSM;
1819 break;
1820 case WM_T_ICH8:
1821 case WM_T_ICH9:
1822 case WM_T_ICH10:
1823 case WM_T_PCH:
1824 case WM_T_PCH2:
1825 case WM_T_PCH_LPT:
1826 /* FLASH */
1827 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1828 sc->sc_nvm_wordsize = 2048;
1829 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1830 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1831 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1832 aprint_error_dev(sc->sc_dev,
1833 "can't map FLASH registers\n");
1834 goto fail_5;
1835 }
1836 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1837 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1838 ICH_FLASH_SECTOR_SIZE;
1839 sc->sc_ich8_flash_bank_size =
1840 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1841 sc->sc_ich8_flash_bank_size -=
1842 (reg & ICH_GFPREG_BASE_MASK);
1843 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1844 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1845 break;
1846 case WM_T_I210:
1847 case WM_T_I211:
1848 if (wm_nvm_get_flash_presence_i210(sc)) {
1849 wm_nvm_set_addrbits_size_eecd(sc);
1850 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1851 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1852 } else {
1853 sc->sc_nvm_wordsize = INVM_SIZE;
1854 sc->sc_flags |= WM_F_EEPROM_INVM;
1855 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1856 }
1857 break;
1858 default:
1859 break;
1860 }
1861
1862 /* Ensure the SMBI bit is clear before first NVM or PHY access */
1863 switch (sc->sc_type) {
1864 case WM_T_82571:
1865 case WM_T_82572:
1866 reg = CSR_READ(sc, WMREG_SWSM2);
1867 if ((reg & SWSM2_LOCK) == 0) {
1868 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
1869 force_clear_smbi = true;
1870 } else
1871 force_clear_smbi = false;
1872 break;
1873 case WM_T_82573:
1874 case WM_T_82574:
1875 case WM_T_82583:
1876 force_clear_smbi = true;
1877 break;
1878 default:
1879 force_clear_smbi = false;
1880 break;
1881 }
1882 if (force_clear_smbi) {
1883 reg = CSR_READ(sc, WMREG_SWSM);
1884 if ((reg & SWSM_SMBI) != 0)
1885 aprint_error_dev(sc->sc_dev,
1886 "Please update the Bootagent\n");
1887 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
1888 }
1889
1890 /*
1891 * Defer printing the EEPROM type until after verifying the checksum
1892 * This allows the EEPROM type to be printed correctly in the case
1893 * that no EEPROM is attached.
1894 */
1895 /*
1896 * Validate the EEPROM checksum. If the checksum fails, flag
1897 * this for later, so we can fail future reads from the EEPROM.
1898 */
1899 if (wm_nvm_validate_checksum(sc)) {
1900 /*
1901 * Read twice again because some PCI-e parts fail the
1902 * first check due to the link being in sleep state.
1903 */
1904 if (wm_nvm_validate_checksum(sc))
1905 sc->sc_flags |= WM_F_EEPROM_INVALID;
1906 }
1907
1908 /* Set device properties (macflags) */
1909 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1910
1911 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1912 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1913 else {
1914 aprint_verbose_dev(sc->sc_dev, "%u words ",
1915 sc->sc_nvm_wordsize);
1916 if (sc->sc_flags & WM_F_EEPROM_INVM)
1917 aprint_verbose("iNVM\n");
1918 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
1919 aprint_verbose("FLASH(HW)\n");
1920 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
1921 aprint_verbose("FLASH\n");
1922 else {
1923 if (sc->sc_flags & WM_F_EEPROM_SPI)
1924 eetype = "SPI";
1925 else
1926 eetype = "MicroWire";
1927 aprint_verbose("(%d address bits) %s EEPROM\n",
1928 sc->sc_nvm_addrbits, eetype);
1929 }
1930 }
1931
1932 switch (sc->sc_type) {
1933 case WM_T_82571:
1934 case WM_T_82572:
1935 case WM_T_82573:
1936 case WM_T_82574:
1937 case WM_T_82583:
1938 case WM_T_80003:
1939 case WM_T_ICH8:
1940 case WM_T_ICH9:
1941 case WM_T_ICH10:
1942 case WM_T_PCH:
1943 case WM_T_PCH2:
1944 case WM_T_PCH_LPT:
1945 if (wm_check_mng_mode(sc) != 0)
1946 wm_get_hw_control(sc);
1947 break;
1948 default:
1949 break;
1950 }
1951 wm_get_wakeup(sc);
1952 /*
1953 * Read the Ethernet address from the EEPROM, if not first found
1954 * in device properties.
1955 */
1956 ea = prop_dictionary_get(dict, "mac-address");
1957 if (ea != NULL) {
1958 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1959 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1960 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1961 } else {
1962 if (wm_read_mac_addr(sc, enaddr) != 0) {
1963 aprint_error_dev(sc->sc_dev,
1964 "unable to read Ethernet address\n");
1965 goto fail_5;
1966 }
1967 }
1968
1969 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1970 ether_sprintf(enaddr));
1971
1972 /*
1973 * Read the config info from the EEPROM, and set up various
1974 * bits in the control registers based on their contents.
1975 */
1976 pn = prop_dictionary_get(dict, "i82543-cfg1");
1977 if (pn != NULL) {
1978 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1979 cfg1 = (uint16_t) prop_number_integer_value(pn);
1980 } else {
1981 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
1982 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1983 goto fail_5;
1984 }
1985 }
1986
1987 pn = prop_dictionary_get(dict, "i82543-cfg2");
1988 if (pn != NULL) {
1989 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1990 cfg2 = (uint16_t) prop_number_integer_value(pn);
1991 } else {
1992 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
1993 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1994 goto fail_5;
1995 }
1996 }
1997
1998 /* check for WM_F_WOL */
1999 switch (sc->sc_type) {
2000 case WM_T_82542_2_0:
2001 case WM_T_82542_2_1:
2002 case WM_T_82543:
2003 /* dummy? */
2004 eeprom_data = 0;
2005 apme_mask = NVM_CFG3_APME;
2006 break;
2007 case WM_T_82544:
2008 apme_mask = NVM_CFG2_82544_APM_EN;
2009 eeprom_data = cfg2;
2010 break;
2011 case WM_T_82546:
2012 case WM_T_82546_3:
2013 case WM_T_82571:
2014 case WM_T_82572:
2015 case WM_T_82573:
2016 case WM_T_82574:
2017 case WM_T_82583:
2018 case WM_T_80003:
2019 default:
2020 apme_mask = NVM_CFG3_APME;
2021 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2022 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2023 break;
2024 case WM_T_82575:
2025 case WM_T_82576:
2026 case WM_T_82580:
2027 case WM_T_I350:
2028 case WM_T_I354: /* XXX ok? */
2029 case WM_T_ICH8:
2030 case WM_T_ICH9:
2031 case WM_T_ICH10:
2032 case WM_T_PCH:
2033 case WM_T_PCH2:
2034 case WM_T_PCH_LPT:
2035 /* XXX The funcid should be checked on some devices */
2036 apme_mask = WUC_APME;
2037 eeprom_data = CSR_READ(sc, WMREG_WUC);
2038 break;
2039 }
2040
2041 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2042 if ((eeprom_data & apme_mask) != 0)
2043 sc->sc_flags |= WM_F_WOL;
2044 #ifdef WM_DEBUG
2045 if ((sc->sc_flags & WM_F_WOL) != 0)
2046 printf("WOL\n");
2047 #endif
2048
2049 /*
2050 * XXX need special handling for some multiple port cards
2051 * to disable a paticular port.
2052 */
2053
2054 if (sc->sc_type >= WM_T_82544) {
2055 pn = prop_dictionary_get(dict, "i82543-swdpin");
2056 if (pn != NULL) {
2057 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2058 swdpin = (uint16_t) prop_number_integer_value(pn);
2059 } else {
2060 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2061 aprint_error_dev(sc->sc_dev,
2062 "unable to read SWDPIN\n");
2063 goto fail_5;
2064 }
2065 }
2066 }
2067
2068 if (cfg1 & NVM_CFG1_ILOS)
2069 sc->sc_ctrl |= CTRL_ILOS;
2070 if (sc->sc_type >= WM_T_82544) {
2071 sc->sc_ctrl |=
2072 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2073 CTRL_SWDPIO_SHIFT;
2074 sc->sc_ctrl |=
2075 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2076 CTRL_SWDPINS_SHIFT;
2077 } else {
2078 sc->sc_ctrl |=
2079 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2080 CTRL_SWDPIO_SHIFT;
2081 }
2082
2083 #if 0
2084 if (sc->sc_type >= WM_T_82544) {
2085 if (cfg1 & NVM_CFG1_IPS0)
2086 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2087 if (cfg1 & NVM_CFG1_IPS1)
2088 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2089 sc->sc_ctrl_ext |=
2090 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2091 CTRL_EXT_SWDPIO_SHIFT;
2092 sc->sc_ctrl_ext |=
2093 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2094 CTRL_EXT_SWDPINS_SHIFT;
2095 } else {
2096 sc->sc_ctrl_ext |=
2097 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2098 CTRL_EXT_SWDPIO_SHIFT;
2099 }
2100 #endif
2101
2102 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2103 #if 0
2104 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2105 #endif
2106
2107 /*
2108 * Set up some register offsets that are different between
2109 * the i82542 and the i82543 and later chips.
2110 */
2111 if (sc->sc_type < WM_T_82543) {
2112 sc->sc_rdt_reg = WMREG_OLD_RDT0;
2113 sc->sc_tdt_reg = WMREG_OLD_TDT;
2114 } else {
2115 sc->sc_rdt_reg = WMREG_RDT;
2116 sc->sc_tdt_reg = WMREG_TDT;
2117 }
2118
2119 if (sc->sc_type == WM_T_PCH) {
2120 uint16_t val;
2121
2122 /* Save the NVM K1 bit setting */
2123 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2124
2125 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2126 sc->sc_nvm_k1_enabled = 1;
2127 else
2128 sc->sc_nvm_k1_enabled = 0;
2129 }
2130
2131 /*
2132 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2133 * media structures accordingly.
2134 */
2135 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2136 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2137 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2138 || sc->sc_type == WM_T_82573
2139 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2140 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2141 wm_gmii_mediainit(sc, wmp->wmp_product);
2142 } else if (sc->sc_type < WM_T_82543 ||
2143 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2144 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2145 aprint_error_dev(sc->sc_dev,
2146 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2147 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2148 }
2149 wm_tbi_mediainit(sc);
2150 } else {
2151 switch (sc->sc_type) {
2152 case WM_T_82575:
2153 case WM_T_82576:
2154 case WM_T_82580:
2155 case WM_T_I350:
2156 case WM_T_I354:
2157 case WM_T_I210:
2158 case WM_T_I211:
2159 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2160 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2161 switch (link_mode) {
2162 case CTRL_EXT_LINK_MODE_1000KX:
2163 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2164 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2165 break;
2166 case CTRL_EXT_LINK_MODE_SGMII:
2167 if (wm_sgmii_uses_mdio(sc)) {
2168 aprint_verbose_dev(sc->sc_dev,
2169 "SGMII(MDIO)\n");
2170 sc->sc_flags |= WM_F_SGMII;
2171 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2172 break;
2173 }
2174 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2175 /*FALLTHROUGH*/
2176 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2177 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2178 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2179 if (link_mode
2180 == CTRL_EXT_LINK_MODE_SGMII) {
2181 sc->sc_mediatype
2182 = WM_MEDIATYPE_COPPER;
2183 sc->sc_flags |= WM_F_SGMII;
2184 } else {
2185 sc->sc_mediatype
2186 = WM_MEDIATYPE_SERDES;
2187 aprint_verbose_dev(sc->sc_dev,
2188 "SERDES\n");
2189 }
2190 break;
2191 }
2192 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2193 aprint_verbose_dev(sc->sc_dev,
2194 "SERDES\n");
2195
2196 /* Change current link mode setting */
2197 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2198 switch (sc->sc_mediatype) {
2199 case WM_MEDIATYPE_COPPER:
2200 reg |= CTRL_EXT_LINK_MODE_SGMII;
2201 break;
2202 case WM_MEDIATYPE_SERDES:
2203 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2204 break;
2205 default:
2206 break;
2207 }
2208 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2209 break;
2210 case CTRL_EXT_LINK_MODE_GMII:
2211 default:
2212 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2213 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2214 break;
2215 }
2216
2217 reg &= ~CTRL_EXT_I2C_ENA;
2218 if ((sc->sc_flags & WM_F_SGMII) != 0)
2219 reg |= CTRL_EXT_I2C_ENA;
2220 else
2221 reg &= ~CTRL_EXT_I2C_ENA;
2222 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2223
2224 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2225 wm_gmii_mediainit(sc, wmp->wmp_product);
2226 else
2227 wm_tbi_mediainit(sc);
2228 break;
2229 default:
2230 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
2231 aprint_error_dev(sc->sc_dev,
2232 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2233 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2234 wm_gmii_mediainit(sc, wmp->wmp_product);
2235 }
2236 }
2237
2238 ifp = &sc->sc_ethercom.ec_if;
2239 xname = device_xname(sc->sc_dev);
2240 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2241 ifp->if_softc = sc;
2242 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2243 ifp->if_ioctl = wm_ioctl;
2244 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2245 ifp->if_start = wm_nq_start;
2246 else
2247 ifp->if_start = wm_start;
2248 ifp->if_watchdog = wm_watchdog;
2249 ifp->if_init = wm_init;
2250 ifp->if_stop = wm_stop;
2251 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2252 IFQ_SET_READY(&ifp->if_snd);
2253
2254 /* Check for jumbo frame */
2255 switch (sc->sc_type) {
2256 case WM_T_82573:
2257 /* XXX limited to 9234 if ASPM is disabled */
2258 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &io3);
2259 if ((io3 & NVM_3GIO_3_ASPM_MASK) != 0)
2260 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2261 break;
2262 case WM_T_82571:
2263 case WM_T_82572:
2264 case WM_T_82574:
2265 case WM_T_82575:
2266 case WM_T_82576:
2267 case WM_T_82580:
2268 case WM_T_I350:
2269 case WM_T_I354: /* XXXX ok? */
2270 case WM_T_I210:
2271 case WM_T_I211:
2272 case WM_T_80003:
2273 case WM_T_ICH9:
2274 case WM_T_ICH10:
2275 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2276 case WM_T_PCH_LPT:
2277 /* XXX limited to 9234 */
2278 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2279 break;
2280 case WM_T_PCH:
2281 /* XXX limited to 4096 */
2282 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2283 break;
2284 case WM_T_82542_2_0:
2285 case WM_T_82542_2_1:
2286 case WM_T_82583:
2287 case WM_T_ICH8:
2288 /* No support for jumbo frame */
2289 break;
2290 default:
2291 /* ETHER_MAX_LEN_JUMBO */
2292 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2293 break;
2294 }
2295
2296 /* If we're a i82543 or greater, we can support VLANs. */
2297 if (sc->sc_type >= WM_T_82543)
2298 sc->sc_ethercom.ec_capabilities |=
2299 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2300
2301 /*
2302 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2303 * on i82543 and later.
2304 */
2305 if (sc->sc_type >= WM_T_82543) {
2306 ifp->if_capabilities |=
2307 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2308 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2309 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2310 IFCAP_CSUM_TCPv6_Tx |
2311 IFCAP_CSUM_UDPv6_Tx;
2312 }
2313
2314 /*
2315 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2316 *
2317 * 82541GI (8086:1076) ... no
2318 * 82572EI (8086:10b9) ... yes
2319 */
2320 if (sc->sc_type >= WM_T_82571) {
2321 ifp->if_capabilities |=
2322 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2323 }
2324
2325 /*
2326 * If we're a i82544 or greater (except i82547), we can do
2327 * TCP segmentation offload.
2328 */
2329 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2330 ifp->if_capabilities |= IFCAP_TSOv4;
2331 }
2332
2333 if (sc->sc_type >= WM_T_82571) {
2334 ifp->if_capabilities |= IFCAP_TSOv6;
2335 }
2336
2337 #ifdef WM_MPSAFE
2338 sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2339 sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2340 #else
2341 sc->sc_tx_lock = NULL;
2342 sc->sc_rx_lock = NULL;
2343 #endif
2344
2345 /* Attach the interface. */
2346 if_attach(ifp);
2347 ether_ifattach(ifp, enaddr);
2348 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2349 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2350 RND_FLAG_DEFAULT);
2351
2352 #ifdef WM_EVENT_COUNTERS
2353 /* Attach event counters. */
2354 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2355 NULL, xname, "txsstall");
2356 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2357 NULL, xname, "txdstall");
2358 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2359 NULL, xname, "txfifo_stall");
2360 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2361 NULL, xname, "txdw");
2362 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2363 NULL, xname, "txqe");
2364 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2365 NULL, xname, "rxintr");
2366 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2367 NULL, xname, "linkintr");
2368
2369 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2370 NULL, xname, "rxipsum");
2371 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2372 NULL, xname, "rxtusum");
2373 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2374 NULL, xname, "txipsum");
2375 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2376 NULL, xname, "txtusum");
2377 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2378 NULL, xname, "txtusum6");
2379
2380 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2381 NULL, xname, "txtso");
2382 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2383 NULL, xname, "txtso6");
2384 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2385 NULL, xname, "txtsopain");
2386
2387 for (i = 0; i < WM_NTXSEGS; i++) {
2388 snprintf(wm_txseg_evcnt_names[i],
2389 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2390 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2391 NULL, xname, wm_txseg_evcnt_names[i]);
2392 }
2393
2394 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2395 NULL, xname, "txdrop");
2396
2397 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2398 NULL, xname, "tu");
2399
2400 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2401 NULL, xname, "tx_xoff");
2402 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2403 NULL, xname, "tx_xon");
2404 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2405 NULL, xname, "rx_xoff");
2406 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2407 NULL, xname, "rx_xon");
2408 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2409 NULL, xname, "rx_macctl");
2410 #endif /* WM_EVENT_COUNTERS */
2411
2412 if (pmf_device_register(self, wm_suspend, wm_resume))
2413 pmf_class_network_register(self, ifp);
2414 else
2415 aprint_error_dev(self, "couldn't establish power handler\n");
2416
2417 sc->sc_flags |= WM_F_ATTACHED;
2418 return;
2419
2420 /*
2421 * Free any resources we've allocated during the failed attach
2422 * attempt. Do this in reverse order and fall through.
2423 */
2424 fail_5:
2425 for (i = 0; i < WM_NRXDESC; i++) {
2426 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2427 bus_dmamap_destroy(sc->sc_dmat,
2428 sc->sc_rxsoft[i].rxs_dmamap);
2429 }
2430 fail_4:
2431 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2432 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2433 bus_dmamap_destroy(sc->sc_dmat,
2434 sc->sc_txsoft[i].txs_dmamap);
2435 }
2436 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2437 fail_3:
2438 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2439 fail_2:
2440 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2441 sc->sc_cd_size);
2442 fail_1:
2443 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2444 fail_0:
2445 return;
2446 }
2447
2448 /* The detach function (ca_detach) */
2449 static int
2450 wm_detach(device_t self, int flags __unused)
2451 {
2452 struct wm_softc *sc = device_private(self);
2453 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2454 int i;
2455 #ifndef WM_MPSAFE
2456 int s;
2457 #endif
2458
2459 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2460 return 0;
2461
2462 #ifndef WM_MPSAFE
2463 s = splnet();
2464 #endif
2465 /* Stop the interface. Callouts are stopped in it. */
2466 wm_stop(ifp, 1);
2467
2468 #ifndef WM_MPSAFE
2469 splx(s);
2470 #endif
2471
2472 pmf_device_deregister(self);
2473
2474 /* Tell the firmware about the release */
2475 WM_BOTH_LOCK(sc);
2476 wm_release_manageability(sc);
2477 wm_release_hw_control(sc);
2478 WM_BOTH_UNLOCK(sc);
2479
2480 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2481
2482 /* Delete all remaining media. */
2483 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2484
2485 ether_ifdetach(ifp);
2486 if_detach(ifp);
2487
2488
2489 /* Unload RX dmamaps and free mbufs */
2490 WM_RX_LOCK(sc);
2491 wm_rxdrain(sc);
2492 WM_RX_UNLOCK(sc);
2493 /* Must unlock here */
2494
2495 /* Free dmamap. It's the same as the end of the wm_attach() function */
2496 for (i = 0; i < WM_NRXDESC; i++) {
2497 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2498 bus_dmamap_destroy(sc->sc_dmat,
2499 sc->sc_rxsoft[i].rxs_dmamap);
2500 }
2501 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2502 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2503 bus_dmamap_destroy(sc->sc_dmat,
2504 sc->sc_txsoft[i].txs_dmamap);
2505 }
2506 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2507 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2508 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2509 sc->sc_cd_size);
2510 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2511
2512 /* Disestablish the interrupt handler */
2513 if (sc->sc_ih != NULL) {
2514 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2515 sc->sc_ih = NULL;
2516 }
2517
2518 /* Unmap the registers */
2519 if (sc->sc_ss) {
2520 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2521 sc->sc_ss = 0;
2522 }
2523
2524 if (sc->sc_ios) {
2525 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2526 sc->sc_ios = 0;
2527 }
2528
2529 if (sc->sc_tx_lock)
2530 mutex_obj_free(sc->sc_tx_lock);
2531 if (sc->sc_rx_lock)
2532 mutex_obj_free(sc->sc_rx_lock);
2533
2534 return 0;
2535 }
2536
2537 static bool
2538 wm_suspend(device_t self, const pmf_qual_t *qual)
2539 {
2540 struct wm_softc *sc = device_private(self);
2541
2542 wm_release_manageability(sc);
2543 wm_release_hw_control(sc);
2544 #ifdef WM_WOL
2545 wm_enable_wakeup(sc);
2546 #endif
2547
2548 return true;
2549 }
2550
2551 static bool
2552 wm_resume(device_t self, const pmf_qual_t *qual)
2553 {
2554 struct wm_softc *sc = device_private(self);
2555
2556 wm_init_manageability(sc);
2557
2558 return true;
2559 }
2560
2561 /*
2562 * wm_watchdog: [ifnet interface function]
2563 *
2564 * Watchdog timer handler.
2565 */
2566 static void
2567 wm_watchdog(struct ifnet *ifp)
2568 {
2569 struct wm_softc *sc = ifp->if_softc;
2570
2571 /*
2572 * Since we're using delayed interrupts, sweep up
2573 * before we report an error.
2574 */
2575 WM_TX_LOCK(sc);
2576 wm_txintr(sc);
2577 WM_TX_UNLOCK(sc);
2578
2579 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2580 #ifdef WM_DEBUG
2581 int i, j;
2582 struct wm_txsoft *txs;
2583 #endif
2584 log(LOG_ERR,
2585 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2586 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2587 sc->sc_txnext);
2588 ifp->if_oerrors++;
2589 #ifdef WM_DEBUG
2590 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
2591 i = WM_NEXTTXS(sc, i)) {
2592 txs = &sc->sc_txsoft[i];
2593 printf("txs %d tx %d -> %d\n",
2594 i, txs->txs_firstdesc, txs->txs_lastdesc);
2595 for (j = txs->txs_firstdesc; ;
2596 j = WM_NEXTTX(sc, j)) {
2597 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2598 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
2599 printf("\t %#08x%08x\n",
2600 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
2601 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
2602 if (j == txs->txs_lastdesc)
2603 break;
2604 }
2605 }
2606 #endif
2607 /* Reset the interface. */
2608 (void) wm_init(ifp);
2609 }
2610
2611 /* Try to get more packets going. */
2612 ifp->if_start(ifp);
2613 }
2614
2615 /*
2616 * wm_tick:
2617 *
2618 * One second timer, used to check link status, sweep up
2619 * completed transmit jobs, etc.
2620 */
2621 static void
2622 wm_tick(void *arg)
2623 {
2624 struct wm_softc *sc = arg;
2625 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2626 #ifndef WM_MPSAFE
2627 int s;
2628
2629 s = splnet();
2630 #endif
2631
2632 WM_TX_LOCK(sc);
2633
2634 if (sc->sc_stopping)
2635 goto out;
2636
2637 if (sc->sc_type >= WM_T_82542_2_1) {
2638 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2639 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2640 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2641 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2642 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2643 }
2644
2645 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2646 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2647 + CSR_READ(sc, WMREG_CRCERRS)
2648 + CSR_READ(sc, WMREG_ALGNERRC)
2649 + CSR_READ(sc, WMREG_SYMERRC)
2650 + CSR_READ(sc, WMREG_RXERRC)
2651 + CSR_READ(sc, WMREG_SEC)
2652 + CSR_READ(sc, WMREG_CEXTERR)
2653 + CSR_READ(sc, WMREG_RLEC);
2654 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2655
2656 if (sc->sc_flags & WM_F_HAS_MII)
2657 mii_tick(&sc->sc_mii);
2658 else
2659 wm_tbi_check_link(sc);
2660
2661 out:
2662 WM_TX_UNLOCK(sc);
2663 #ifndef WM_MPSAFE
2664 splx(s);
2665 #endif
2666
2667 if (!sc->sc_stopping)
2668 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2669 }
2670
2671 static int
2672 wm_ifflags_cb(struct ethercom *ec)
2673 {
2674 struct ifnet *ifp = &ec->ec_if;
2675 struct wm_softc *sc = ifp->if_softc;
2676 int change = ifp->if_flags ^ sc->sc_if_flags;
2677 int rc = 0;
2678
2679 WM_BOTH_LOCK(sc);
2680
2681 if (change != 0)
2682 sc->sc_if_flags = ifp->if_flags;
2683
2684 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
2685 rc = ENETRESET;
2686 goto out;
2687 }
2688
2689 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2690 wm_set_filter(sc);
2691
2692 wm_set_vlan(sc);
2693
2694 out:
2695 WM_BOTH_UNLOCK(sc);
2696
2697 return rc;
2698 }
2699
2700 /*
2701 * wm_ioctl: [ifnet interface function]
2702 *
2703 * Handle control requests from the operator.
2704 */
2705 static int
2706 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2707 {
2708 struct wm_softc *sc = ifp->if_softc;
2709 struct ifreq *ifr = (struct ifreq *) data;
2710 struct ifaddr *ifa = (struct ifaddr *)data;
2711 struct sockaddr_dl *sdl;
2712 int s, error;
2713
2714 #ifndef WM_MPSAFE
2715 s = splnet();
2716 #endif
2717 switch (cmd) {
2718 case SIOCSIFMEDIA:
2719 case SIOCGIFMEDIA:
2720 WM_BOTH_LOCK(sc);
2721 /* Flow control requires full-duplex mode. */
2722 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2723 (ifr->ifr_media & IFM_FDX) == 0)
2724 ifr->ifr_media &= ~IFM_ETH_FMASK;
2725 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2726 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2727 /* We can do both TXPAUSE and RXPAUSE. */
2728 ifr->ifr_media |=
2729 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2730 }
2731 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2732 }
2733 WM_BOTH_UNLOCK(sc);
2734 #ifdef WM_MPSAFE
2735 s = splnet();
2736 #endif
2737 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2738 #ifdef WM_MPSAFE
2739 splx(s);
2740 #endif
2741 break;
2742 case SIOCINITIFADDR:
2743 WM_BOTH_LOCK(sc);
2744 if (ifa->ifa_addr->sa_family == AF_LINK) {
2745 sdl = satosdl(ifp->if_dl->ifa_addr);
2746 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2747 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2748 /* unicast address is first multicast entry */
2749 wm_set_filter(sc);
2750 error = 0;
2751 WM_BOTH_UNLOCK(sc);
2752 break;
2753 }
2754 WM_BOTH_UNLOCK(sc);
2755 /*FALLTHROUGH*/
2756 default:
2757 #ifdef WM_MPSAFE
2758 s = splnet();
2759 #endif
2760 /* It may call wm_start, so unlock here */
2761 error = ether_ioctl(ifp, cmd, data);
2762 #ifdef WM_MPSAFE
2763 splx(s);
2764 #endif
2765 if (error != ENETRESET)
2766 break;
2767
2768 error = 0;
2769
2770 if (cmd == SIOCSIFCAP) {
2771 error = (*ifp->if_init)(ifp);
2772 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2773 ;
2774 else if (ifp->if_flags & IFF_RUNNING) {
2775 /*
2776 * Multicast list has changed; set the hardware filter
2777 * accordingly.
2778 */
2779 WM_BOTH_LOCK(sc);
2780 wm_set_filter(sc);
2781 WM_BOTH_UNLOCK(sc);
2782 }
2783 break;
2784 }
2785
2786 /* Try to get more packets going. */
2787 ifp->if_start(ifp);
2788
2789 #ifndef WM_MPSAFE
2790 splx(s);
2791 #endif
2792 return error;
2793 }
2794
2795 /* MAC address related */
2796
2797 /*
2798 * Get the offset of MAC address and return it.
2799 * If error occured, use offset 0.
2800 */
2801 static uint16_t
2802 wm_check_alt_mac_addr(struct wm_softc *sc)
2803 {
2804 uint16_t myea[ETHER_ADDR_LEN / 2];
2805 uint16_t offset = NVM_OFF_MACADDR;
2806
2807 /* Try to read alternative MAC address pointer */
2808 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2809 return 0;
2810
2811 /* Check pointer if it's valid or not. */
2812 if ((offset == 0x0000) || (offset == 0xffff))
2813 return 0;
2814
2815 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
2816 /*
2817 * Check whether alternative MAC address is valid or not.
2818 * Some cards have non 0xffff pointer but those don't use
2819 * alternative MAC address in reality.
2820 *
2821 * Check whether the broadcast bit is set or not.
2822 */
2823 if (wm_nvm_read(sc, offset, 1, myea) == 0)
2824 if (((myea[0] & 0xff) & 0x01) == 0)
2825 return offset; /* Found */
2826
2827 /* Not found */
2828 return 0;
2829 }
2830
2831 static int
2832 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2833 {
2834 uint16_t myea[ETHER_ADDR_LEN / 2];
2835 uint16_t offset = NVM_OFF_MACADDR;
2836 int do_invert = 0;
2837
2838 switch (sc->sc_type) {
2839 case WM_T_82580:
2840 case WM_T_I350:
2841 case WM_T_I354:
2842 /* EEPROM Top Level Partitioning */
2843 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
2844 break;
2845 case WM_T_82571:
2846 case WM_T_82575:
2847 case WM_T_82576:
2848 case WM_T_80003:
2849 case WM_T_I210:
2850 case WM_T_I211:
2851 offset = wm_check_alt_mac_addr(sc);
2852 if (offset == 0)
2853 if ((sc->sc_funcid & 0x01) == 1)
2854 do_invert = 1;
2855 break;
2856 default:
2857 if ((sc->sc_funcid & 0x01) == 1)
2858 do_invert = 1;
2859 break;
2860 }
2861
2862 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
2863 myea) != 0)
2864 goto bad;
2865
2866 enaddr[0] = myea[0] & 0xff;
2867 enaddr[1] = myea[0] >> 8;
2868 enaddr[2] = myea[1] & 0xff;
2869 enaddr[3] = myea[1] >> 8;
2870 enaddr[4] = myea[2] & 0xff;
2871 enaddr[5] = myea[2] >> 8;
2872
2873 /*
2874 * Toggle the LSB of the MAC address on the second port
2875 * of some dual port cards.
2876 */
2877 if (do_invert != 0)
2878 enaddr[5] ^= 1;
2879
2880 return 0;
2881
2882 bad:
2883 return -1;
2884 }
2885
2886 /*
2887 * wm_set_ral:
2888 *
2889 * Set an entery in the receive address list.
2890 */
2891 static void
2892 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2893 {
2894 uint32_t ral_lo, ral_hi;
2895
2896 if (enaddr != NULL) {
2897 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2898 (enaddr[3] << 24);
2899 ral_hi = enaddr[4] | (enaddr[5] << 8);
2900 ral_hi |= RAL_AV;
2901 } else {
2902 ral_lo = 0;
2903 ral_hi = 0;
2904 }
2905
2906 if (sc->sc_type >= WM_T_82544) {
2907 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2908 ral_lo);
2909 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2910 ral_hi);
2911 } else {
2912 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2913 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2914 }
2915 }
2916
2917 /*
2918 * wm_mchash:
2919 *
2920 * Compute the hash of the multicast address for the 4096-bit
2921 * multicast filter.
2922 */
2923 static uint32_t
2924 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2925 {
2926 static const int lo_shift[4] = { 4, 3, 2, 0 };
2927 static const int hi_shift[4] = { 4, 5, 6, 8 };
2928 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
2929 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
2930 uint32_t hash;
2931
2932 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2933 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2934 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
2935 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
2936 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
2937 return (hash & 0x3ff);
2938 }
2939 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2940 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2941
2942 return (hash & 0xfff);
2943 }
2944
2945 /*
2946 * wm_set_filter:
2947 *
2948 * Set up the receive filter.
2949 */
2950 static void
2951 wm_set_filter(struct wm_softc *sc)
2952 {
2953 struct ethercom *ec = &sc->sc_ethercom;
2954 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2955 struct ether_multi *enm;
2956 struct ether_multistep step;
2957 bus_addr_t mta_reg;
2958 uint32_t hash, reg, bit;
2959 int i, size;
2960
2961 if (sc->sc_type >= WM_T_82544)
2962 mta_reg = WMREG_CORDOVA_MTA;
2963 else
2964 mta_reg = WMREG_MTA;
2965
2966 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2967
2968 if (ifp->if_flags & IFF_BROADCAST)
2969 sc->sc_rctl |= RCTL_BAM;
2970 if (ifp->if_flags & IFF_PROMISC) {
2971 sc->sc_rctl |= RCTL_UPE;
2972 goto allmulti;
2973 }
2974
2975 /*
2976 * Set the station address in the first RAL slot, and
2977 * clear the remaining slots.
2978 */
2979 if (sc->sc_type == WM_T_ICH8)
2980 size = WM_RAL_TABSIZE_ICH8 -1;
2981 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
2982 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
2983 || (sc->sc_type == WM_T_PCH_LPT))
2984 size = WM_RAL_TABSIZE_ICH8;
2985 else if (sc->sc_type == WM_T_82575)
2986 size = WM_RAL_TABSIZE_82575;
2987 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
2988 size = WM_RAL_TABSIZE_82576;
2989 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2990 size = WM_RAL_TABSIZE_I350;
2991 else
2992 size = WM_RAL_TABSIZE;
2993 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
2994 for (i = 1; i < size; i++)
2995 wm_set_ral(sc, NULL, i);
2996
2997 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2998 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2999 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
3000 size = WM_ICH8_MC_TABSIZE;
3001 else
3002 size = WM_MC_TABSIZE;
3003 /* Clear out the multicast table. */
3004 for (i = 0; i < size; i++)
3005 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3006
3007 ETHER_FIRST_MULTI(step, ec, enm);
3008 while (enm != NULL) {
3009 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3010 /*
3011 * We must listen to a range of multicast addresses.
3012 * For now, just accept all multicasts, rather than
3013 * trying to set only those filter bits needed to match
3014 * the range. (At this time, the only use of address
3015 * ranges is for IP multicast routing, for which the
3016 * range is big enough to require all bits set.)
3017 */
3018 goto allmulti;
3019 }
3020
3021 hash = wm_mchash(sc, enm->enm_addrlo);
3022
3023 reg = (hash >> 5);
3024 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3025 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3026 || (sc->sc_type == WM_T_PCH2)
3027 || (sc->sc_type == WM_T_PCH_LPT))
3028 reg &= 0x1f;
3029 else
3030 reg &= 0x7f;
3031 bit = hash & 0x1f;
3032
3033 hash = CSR_READ(sc, mta_reg + (reg << 2));
3034 hash |= 1U << bit;
3035
3036 /* XXX Hardware bug?? */
3037 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3038 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3039 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3040 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3041 } else
3042 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3043
3044 ETHER_NEXT_MULTI(step, enm);
3045 }
3046
3047 ifp->if_flags &= ~IFF_ALLMULTI;
3048 goto setit;
3049
3050 allmulti:
3051 ifp->if_flags |= IFF_ALLMULTI;
3052 sc->sc_rctl |= RCTL_MPE;
3053
3054 setit:
3055 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3056 }
3057
3058 /* Reset and init related */
3059
3060 static void
3061 wm_set_vlan(struct wm_softc *sc)
3062 {
3063 /* Deal with VLAN enables. */
3064 if (VLAN_ATTACHED(&sc->sc_ethercom))
3065 sc->sc_ctrl |= CTRL_VME;
3066 else
3067 sc->sc_ctrl &= ~CTRL_VME;
3068
3069 /* Write the control registers. */
3070 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3071 }
3072
3073 static void
3074 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3075 {
3076 uint32_t gcr;
3077 pcireg_t ctrl2;
3078
3079 gcr = CSR_READ(sc, WMREG_GCR);
3080
3081 /* Only take action if timeout value is defaulted to 0 */
3082 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3083 goto out;
3084
3085 if ((gcr & GCR_CAP_VER2) == 0) {
3086 gcr |= GCR_CMPL_TMOUT_10MS;
3087 goto out;
3088 }
3089
3090 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3091 sc->sc_pcixe_capoff + PCIE_DCSR2);
3092 ctrl2 |= WM_PCIE_DCSR2_16MS;
3093 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3094 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3095
3096 out:
3097 /* Disable completion timeout resend */
3098 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3099
3100 CSR_WRITE(sc, WMREG_GCR, gcr);
3101 }
3102
3103 void
3104 wm_get_auto_rd_done(struct wm_softc *sc)
3105 {
3106 int i;
3107
3108 /* wait for eeprom to reload */
3109 switch (sc->sc_type) {
3110 case WM_T_82571:
3111 case WM_T_82572:
3112 case WM_T_82573:
3113 case WM_T_82574:
3114 case WM_T_82583:
3115 case WM_T_82575:
3116 case WM_T_82576:
3117 case WM_T_82580:
3118 case WM_T_I350:
3119 case WM_T_I354:
3120 case WM_T_I210:
3121 case WM_T_I211:
3122 case WM_T_80003:
3123 case WM_T_ICH8:
3124 case WM_T_ICH9:
3125 for (i = 0; i < 10; i++) {
3126 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3127 break;
3128 delay(1000);
3129 }
3130 if (i == 10) {
3131 log(LOG_ERR, "%s: auto read from eeprom failed to "
3132 "complete\n", device_xname(sc->sc_dev));
3133 }
3134 break;
3135 default:
3136 break;
3137 }
3138 }
3139
3140 void
3141 wm_lan_init_done(struct wm_softc *sc)
3142 {
3143 uint32_t reg = 0;
3144 int i;
3145
3146 /* wait for eeprom to reload */
3147 switch (sc->sc_type) {
3148 case WM_T_ICH10:
3149 case WM_T_PCH:
3150 case WM_T_PCH2:
3151 case WM_T_PCH_LPT:
3152 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3153 reg = CSR_READ(sc, WMREG_STATUS);
3154 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3155 break;
3156 delay(100);
3157 }
3158 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3159 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3160 "complete\n", device_xname(sc->sc_dev), __func__);
3161 }
3162 break;
3163 default:
3164 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3165 __func__);
3166 break;
3167 }
3168
3169 reg &= ~STATUS_LAN_INIT_DONE;
3170 CSR_WRITE(sc, WMREG_STATUS, reg);
3171 }
3172
3173 void
3174 wm_get_cfg_done(struct wm_softc *sc)
3175 {
3176 int mask;
3177 uint32_t reg;
3178 int i;
3179
3180 /* wait for eeprom to reload */
3181 switch (sc->sc_type) {
3182 case WM_T_82542_2_0:
3183 case WM_T_82542_2_1:
3184 /* null */
3185 break;
3186 case WM_T_82543:
3187 case WM_T_82544:
3188 case WM_T_82540:
3189 case WM_T_82545:
3190 case WM_T_82545_3:
3191 case WM_T_82546:
3192 case WM_T_82546_3:
3193 case WM_T_82541:
3194 case WM_T_82541_2:
3195 case WM_T_82547:
3196 case WM_T_82547_2:
3197 case WM_T_82573:
3198 case WM_T_82574:
3199 case WM_T_82583:
3200 /* generic */
3201 delay(10*1000);
3202 break;
3203 case WM_T_80003:
3204 case WM_T_82571:
3205 case WM_T_82572:
3206 case WM_T_82575:
3207 case WM_T_82576:
3208 case WM_T_82580:
3209 case WM_T_I350:
3210 case WM_T_I354:
3211 case WM_T_I210:
3212 case WM_T_I211:
3213 if (sc->sc_type == WM_T_82571) {
3214 /* Only 82571 shares port 0 */
3215 mask = EEMNGCTL_CFGDONE_0;
3216 } else
3217 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3218 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3219 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3220 break;
3221 delay(1000);
3222 }
3223 if (i >= WM_PHY_CFG_TIMEOUT) {
3224 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3225 device_xname(sc->sc_dev), __func__));
3226 }
3227 break;
3228 case WM_T_ICH8:
3229 case WM_T_ICH9:
3230 case WM_T_ICH10:
3231 case WM_T_PCH:
3232 case WM_T_PCH2:
3233 case WM_T_PCH_LPT:
3234 delay(10*1000);
3235 if (sc->sc_type >= WM_T_ICH10)
3236 wm_lan_init_done(sc);
3237 else
3238 wm_get_auto_rd_done(sc);
3239
3240 reg = CSR_READ(sc, WMREG_STATUS);
3241 if ((reg & STATUS_PHYRA) != 0)
3242 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3243 break;
3244 default:
3245 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3246 __func__);
3247 break;
3248 }
3249 }
3250
3251 /* Init hardware bits */
3252 void
3253 wm_initialize_hardware_bits(struct wm_softc *sc)
3254 {
3255 uint32_t tarc0, tarc1, reg;
3256
3257 /* For 82571 variant, 80003 and ICHs */
3258 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3259 || (sc->sc_type >= WM_T_80003)) {
3260
3261 /* Transmit Descriptor Control 0 */
3262 reg = CSR_READ(sc, WMREG_TXDCTL(0));
3263 reg |= TXDCTL_COUNT_DESC;
3264 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3265
3266 /* Transmit Descriptor Control 1 */
3267 reg = CSR_READ(sc, WMREG_TXDCTL(1));
3268 reg |= TXDCTL_COUNT_DESC;
3269 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3270
3271 /* TARC0 */
3272 tarc0 = CSR_READ(sc, WMREG_TARC0);
3273 switch (sc->sc_type) {
3274 case WM_T_82571:
3275 case WM_T_82572:
3276 case WM_T_82573:
3277 case WM_T_82574:
3278 case WM_T_82583:
3279 case WM_T_80003:
3280 /* Clear bits 30..27 */
3281 tarc0 &= ~__BITS(30, 27);
3282 break;
3283 default:
3284 break;
3285 }
3286
3287 switch (sc->sc_type) {
3288 case WM_T_82571:
3289 case WM_T_82572:
3290 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3291
3292 tarc1 = CSR_READ(sc, WMREG_TARC1);
3293 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3294 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3295 /* 8257[12] Errata No.7 */
3296 tarc1 |= __BIT(22); /* TARC1 bits 22 */
3297
3298 /* TARC1 bit 28 */
3299 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3300 tarc1 &= ~__BIT(28);
3301 else
3302 tarc1 |= __BIT(28);
3303 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3304
3305 /*
3306 * 8257[12] Errata No.13
3307 * Disable Dyamic Clock Gating.
3308 */
3309 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3310 reg &= ~CTRL_EXT_DMA_DYN_CLK;
3311 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3312 break;
3313 case WM_T_82573:
3314 case WM_T_82574:
3315 case WM_T_82583:
3316 if ((sc->sc_type == WM_T_82574)
3317 || (sc->sc_type == WM_T_82583))
3318 tarc0 |= __BIT(26); /* TARC0 bit 26 */
3319
3320 /* Extended Device Control */
3321 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3322 reg &= ~__BIT(23); /* Clear bit 23 */
3323 reg |= __BIT(22); /* Set bit 22 */
3324 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3325
3326 /* Device Control */
3327 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
3328 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3329
3330 /* PCIe Control Register */
3331 if ((sc->sc_type == WM_T_82574)
3332 || (sc->sc_type == WM_T_82583)) {
3333 /*
3334 * Document says this bit must be set for
3335 * proper operation.
3336 */
3337 reg = CSR_READ(sc, WMREG_GCR);
3338 reg |= __BIT(22);
3339 CSR_WRITE(sc, WMREG_GCR, reg);
3340
3341 /*
3342 * Apply workaround for hardware errata
3343 * documented in errata docs Fixes issue where
3344 * some error prone or unreliable PCIe
3345 * completions are occurring, particularly
3346 * with ASPM enabled. Without fix, issue can
3347 * cause Tx timeouts.
3348 */
3349 reg = CSR_READ(sc, WMREG_GCR2);
3350 reg |= __BIT(0);
3351 CSR_WRITE(sc, WMREG_GCR2, reg);
3352 }
3353 break;
3354 case WM_T_80003:
3355 /* TARC0 */
3356 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3357 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3358 tarc0 &= ~__BIT(20); /* Clear bits 20 */
3359
3360 /* TARC1 bit 28 */
3361 tarc1 = CSR_READ(sc, WMREG_TARC1);
3362 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3363 tarc1 &= ~__BIT(28);
3364 else
3365 tarc1 |= __BIT(28);
3366 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3367 break;
3368 case WM_T_ICH8:
3369 case WM_T_ICH9:
3370 case WM_T_ICH10:
3371 case WM_T_PCH:
3372 case WM_T_PCH2:
3373 case WM_T_PCH_LPT:
3374 /* TARC 0 */
3375 if (sc->sc_type == WM_T_ICH8) {
3376 /* Set TARC0 bits 29 and 28 */
3377 tarc0 |= __BITS(29, 28);
3378 }
3379 /* Set TARC0 bits 23,24,26,27 */
3380 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3381
3382 /* CTRL_EXT */
3383 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3384 reg |= __BIT(22); /* Set bit 22 */
3385 /*
3386 * Enable PHY low-power state when MAC is at D3
3387 * w/o WoL
3388 */
3389 if (sc->sc_type >= WM_T_PCH)
3390 reg |= CTRL_EXT_PHYPDEN;
3391 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3392
3393 /* TARC1 */
3394 tarc1 = CSR_READ(sc, WMREG_TARC1);
3395 /* bit 28 */
3396 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3397 tarc1 &= ~__BIT(28);
3398 else
3399 tarc1 |= __BIT(28);
3400 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3401 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3402
3403 /* Device Status */
3404 if (sc->sc_type == WM_T_ICH8) {
3405 reg = CSR_READ(sc, WMREG_STATUS);
3406 reg &= ~__BIT(31);
3407 CSR_WRITE(sc, WMREG_STATUS, reg);
3408
3409 }
3410
3411 /*
3412 * Work-around descriptor data corruption issue during
3413 * NFS v2 UDP traffic, just disable the NFS filtering
3414 * capability.
3415 */
3416 reg = CSR_READ(sc, WMREG_RFCTL);
3417 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3418 CSR_WRITE(sc, WMREG_RFCTL, reg);
3419 break;
3420 default:
3421 break;
3422 }
3423 CSR_WRITE(sc, WMREG_TARC0, tarc0);
3424
3425 /*
3426 * 8257[12] Errata No.52 and some others.
3427 * Avoid RSS Hash Value bug.
3428 */
3429 switch (sc->sc_type) {
3430 case WM_T_82571:
3431 case WM_T_82572:
3432 case WM_T_82573:
3433 case WM_T_80003:
3434 case WM_T_ICH8:
3435 reg = CSR_READ(sc, WMREG_RFCTL);
3436 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3437 CSR_WRITE(sc, WMREG_RFCTL, reg);
3438 break;
3439 default:
3440 break;
3441 }
3442 }
3443 }
3444
3445 static uint32_t
3446 wm_rxpbs_adjust_82580(uint32_t val)
3447 {
3448 uint32_t rv = 0;
3449
3450 if (val < __arraycount(wm_82580_rxpbs_table))
3451 rv = wm_82580_rxpbs_table[val];
3452
3453 return rv;
3454 }
3455
3456 /*
3457 * wm_reset:
3458 *
3459 * Reset the i82542 chip.
3460 */
3461 static void
3462 wm_reset(struct wm_softc *sc)
3463 {
3464 int phy_reset = 0;
3465 int error = 0;
3466 uint32_t reg, mask;
3467
3468 /*
3469 * Allocate on-chip memory according to the MTU size.
3470 * The Packet Buffer Allocation register must be written
3471 * before the chip is reset.
3472 */
3473 switch (sc->sc_type) {
3474 case WM_T_82547:
3475 case WM_T_82547_2:
3476 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3477 PBA_22K : PBA_30K;
3478 sc->sc_txfifo_head = 0;
3479 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3480 sc->sc_txfifo_size =
3481 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3482 sc->sc_txfifo_stall = 0;
3483 break;
3484 case WM_T_82571:
3485 case WM_T_82572:
3486 case WM_T_82575: /* XXX need special handing for jumbo frames */
3487 case WM_T_80003:
3488 sc->sc_pba = PBA_32K;
3489 break;
3490 case WM_T_82573:
3491 sc->sc_pba = PBA_12K;
3492 break;
3493 case WM_T_82574:
3494 case WM_T_82583:
3495 sc->sc_pba = PBA_20K;
3496 break;
3497 case WM_T_82576:
3498 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
3499 sc->sc_pba &= RXPBS_SIZE_MASK_82576;
3500 break;
3501 case WM_T_82580:
3502 case WM_T_I350:
3503 case WM_T_I354:
3504 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
3505 break;
3506 case WM_T_I210:
3507 case WM_T_I211:
3508 sc->sc_pba = PBA_34K;
3509 break;
3510 case WM_T_ICH8:
3511 /* Workaround for a bit corruption issue in FIFO memory */
3512 sc->sc_pba = PBA_8K;
3513 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3514 break;
3515 case WM_T_ICH9:
3516 case WM_T_ICH10:
3517 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3518 PBA_14K : PBA_10K;
3519 break;
3520 case WM_T_PCH:
3521 case WM_T_PCH2:
3522 case WM_T_PCH_LPT:
3523 sc->sc_pba = PBA_26K;
3524 break;
3525 default:
3526 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3527 PBA_40K : PBA_48K;
3528 break;
3529 }
3530 /*
3531 * Only old or non-multiqueue devices have the PBA register
3532 * XXX Need special handling for 82575.
3533 */
3534 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3535 || (sc->sc_type == WM_T_82575))
3536 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3537
3538 /* Prevent the PCI-E bus from sticking */
3539 if (sc->sc_flags & WM_F_PCIE) {
3540 int timeout = 800;
3541
3542 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3543 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3544
3545 while (timeout--) {
3546 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3547 == 0)
3548 break;
3549 delay(100);
3550 }
3551 }
3552
3553 /* Set the completion timeout for interface */
3554 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3555 || (sc->sc_type == WM_T_82580)
3556 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3557 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3558 wm_set_pcie_completion_timeout(sc);
3559
3560 /* Clear interrupt */
3561 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3562
3563 /* Stop the transmit and receive processes. */
3564 CSR_WRITE(sc, WMREG_RCTL, 0);
3565 sc->sc_rctl &= ~RCTL_EN;
3566 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3567 CSR_WRITE_FLUSH(sc);
3568
3569 /* XXX set_tbi_sbp_82543() */
3570
3571 delay(10*1000);
3572
3573 /* Must acquire the MDIO ownership before MAC reset */
3574 switch (sc->sc_type) {
3575 case WM_T_82573:
3576 case WM_T_82574:
3577 case WM_T_82583:
3578 error = wm_get_hw_semaphore_82573(sc);
3579 break;
3580 default:
3581 break;
3582 }
3583
3584 /*
3585 * 82541 Errata 29? & 82547 Errata 28?
3586 * See also the description about PHY_RST bit in CTRL register
3587 * in 8254x_GBe_SDM.pdf.
3588 */
3589 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3590 CSR_WRITE(sc, WMREG_CTRL,
3591 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3592 CSR_WRITE_FLUSH(sc);
3593 delay(5000);
3594 }
3595
3596 switch (sc->sc_type) {
3597 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3598 case WM_T_82541:
3599 case WM_T_82541_2:
3600 case WM_T_82547:
3601 case WM_T_82547_2:
3602 /*
3603 * On some chipsets, a reset through a memory-mapped write
3604 * cycle can cause the chip to reset before completing the
3605 * write cycle. This causes major headache that can be
3606 * avoided by issuing the reset via indirect register writes
3607 * through I/O space.
3608 *
3609 * So, if we successfully mapped the I/O BAR at attach time,
3610 * use that. Otherwise, try our luck with a memory-mapped
3611 * reset.
3612 */
3613 if (sc->sc_flags & WM_F_IOH_VALID)
3614 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3615 else
3616 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3617 break;
3618 case WM_T_82545_3:
3619 case WM_T_82546_3:
3620 /* Use the shadow control register on these chips. */
3621 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3622 break;
3623 case WM_T_80003:
3624 mask = swfwphysem[sc->sc_funcid];
3625 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3626 wm_get_swfw_semaphore(sc, mask);
3627 CSR_WRITE(sc, WMREG_CTRL, reg);
3628 wm_put_swfw_semaphore(sc, mask);
3629 break;
3630 case WM_T_ICH8:
3631 case WM_T_ICH9:
3632 case WM_T_ICH10:
3633 case WM_T_PCH:
3634 case WM_T_PCH2:
3635 case WM_T_PCH_LPT:
3636 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3637 if (wm_check_reset_block(sc) == 0) {
3638 /*
3639 * Gate automatic PHY configuration by hardware on
3640 * non-managed 82579
3641 */
3642 if ((sc->sc_type == WM_T_PCH2)
3643 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3644 != 0))
3645 wm_gate_hw_phy_config_ich8lan(sc, 1);
3646
3647
3648 reg |= CTRL_PHY_RESET;
3649 phy_reset = 1;
3650 }
3651 wm_get_swfwhw_semaphore(sc);
3652 CSR_WRITE(sc, WMREG_CTRL, reg);
3653 /* Don't insert a completion barrier when reset */
3654 delay(20*1000);
3655 wm_put_swfwhw_semaphore(sc);
3656 break;
3657 case WM_T_82580:
3658 case WM_T_I350:
3659 case WM_T_I354:
3660 case WM_T_I210:
3661 case WM_T_I211:
3662 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3663 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3664 CSR_WRITE_FLUSH(sc);
3665 delay(5000);
3666 break;
3667 case WM_T_82542_2_0:
3668 case WM_T_82542_2_1:
3669 case WM_T_82543:
3670 case WM_T_82540:
3671 case WM_T_82545:
3672 case WM_T_82546:
3673 case WM_T_82571:
3674 case WM_T_82572:
3675 case WM_T_82573:
3676 case WM_T_82574:
3677 case WM_T_82575:
3678 case WM_T_82576:
3679 case WM_T_82583:
3680 default:
3681 /* Everything else can safely use the documented method. */
3682 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3683 break;
3684 }
3685
3686 /* Must release the MDIO ownership after MAC reset */
3687 switch (sc->sc_type) {
3688 case WM_T_82573:
3689 case WM_T_82574:
3690 case WM_T_82583:
3691 if (error == 0)
3692 wm_put_hw_semaphore_82573(sc);
3693 break;
3694 default:
3695 break;
3696 }
3697
3698 if (phy_reset != 0)
3699 wm_get_cfg_done(sc);
3700
3701 /* reload EEPROM */
3702 switch (sc->sc_type) {
3703 case WM_T_82542_2_0:
3704 case WM_T_82542_2_1:
3705 case WM_T_82543:
3706 case WM_T_82544:
3707 delay(10);
3708 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3709 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3710 CSR_WRITE_FLUSH(sc);
3711 delay(2000);
3712 break;
3713 case WM_T_82540:
3714 case WM_T_82545:
3715 case WM_T_82545_3:
3716 case WM_T_82546:
3717 case WM_T_82546_3:
3718 delay(5*1000);
3719 /* XXX Disable HW ARPs on ASF enabled adapters */
3720 break;
3721 case WM_T_82541:
3722 case WM_T_82541_2:
3723 case WM_T_82547:
3724 case WM_T_82547_2:
3725 delay(20000);
3726 /* XXX Disable HW ARPs on ASF enabled adapters */
3727 break;
3728 case WM_T_82571:
3729 case WM_T_82572:
3730 case WM_T_82573:
3731 case WM_T_82574:
3732 case WM_T_82583:
3733 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3734 delay(10);
3735 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3736 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3737 CSR_WRITE_FLUSH(sc);
3738 }
3739 /* check EECD_EE_AUTORD */
3740 wm_get_auto_rd_done(sc);
3741 /*
3742 * Phy configuration from NVM just starts after EECD_AUTO_RD
3743 * is set.
3744 */
3745 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3746 || (sc->sc_type == WM_T_82583))
3747 delay(25*1000);
3748 break;
3749 case WM_T_82575:
3750 case WM_T_82576:
3751 case WM_T_82580:
3752 case WM_T_I350:
3753 case WM_T_I354:
3754 case WM_T_I210:
3755 case WM_T_I211:
3756 case WM_T_80003:
3757 /* check EECD_EE_AUTORD */
3758 wm_get_auto_rd_done(sc);
3759 break;
3760 case WM_T_ICH8:
3761 case WM_T_ICH9:
3762 case WM_T_ICH10:
3763 case WM_T_PCH:
3764 case WM_T_PCH2:
3765 case WM_T_PCH_LPT:
3766 break;
3767 default:
3768 panic("%s: unknown type\n", __func__);
3769 }
3770
3771 /* Check whether EEPROM is present or not */
3772 switch (sc->sc_type) {
3773 case WM_T_82575:
3774 case WM_T_82576:
3775 #if 0 /* XXX */
3776 case WM_T_82580:
3777 #endif
3778 case WM_T_I350:
3779 case WM_T_I354:
3780 case WM_T_ICH8:
3781 case WM_T_ICH9:
3782 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3783 /* Not found */
3784 sc->sc_flags |= WM_F_EEPROM_INVALID;
3785 if ((sc->sc_type == WM_T_82575)
3786 || (sc->sc_type == WM_T_82576)
3787 || (sc->sc_type == WM_T_82580)
3788 || (sc->sc_type == WM_T_I350)
3789 || (sc->sc_type == WM_T_I354))
3790 wm_reset_init_script_82575(sc);
3791 }
3792 break;
3793 default:
3794 break;
3795 }
3796
3797 if ((sc->sc_type == WM_T_82580)
3798 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
3799 /* clear global device reset status bit */
3800 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3801 }
3802
3803 /* Clear any pending interrupt events. */
3804 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3805 reg = CSR_READ(sc, WMREG_ICR);
3806
3807 /* reload sc_ctrl */
3808 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3809
3810 if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
3811 wm_set_eee_i350(sc);
3812
3813 /* dummy read from WUC */
3814 if (sc->sc_type == WM_T_PCH)
3815 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3816 /*
3817 * For PCH, this write will make sure that any noise will be detected
3818 * as a CRC error and be dropped rather than show up as a bad packet
3819 * to the DMA engine
3820 */
3821 if (sc->sc_type == WM_T_PCH)
3822 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3823
3824 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3825 CSR_WRITE(sc, WMREG_WUC, 0);
3826
3827 /* XXX need special handling for 82580 */
3828 }
3829
3830 /*
3831 * wm_add_rxbuf:
3832 *
3833 * Add a receive buffer to the indiciated descriptor.
3834 */
3835 static int
3836 wm_add_rxbuf(struct wm_softc *sc, int idx)
3837 {
3838 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3839 struct mbuf *m;
3840 int error;
3841
3842 KASSERT(WM_RX_LOCKED(sc));
3843
3844 MGETHDR(m, M_DONTWAIT, MT_DATA);
3845 if (m == NULL)
3846 return ENOBUFS;
3847
3848 MCLGET(m, M_DONTWAIT);
3849 if ((m->m_flags & M_EXT) == 0) {
3850 m_freem(m);
3851 return ENOBUFS;
3852 }
3853
3854 if (rxs->rxs_mbuf != NULL)
3855 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3856
3857 rxs->rxs_mbuf = m;
3858
3859 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3860 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3861 BUS_DMA_READ|BUS_DMA_NOWAIT);
3862 if (error) {
3863 /* XXX XXX XXX */
3864 aprint_error_dev(sc->sc_dev,
3865 "unable to load rx DMA map %d, error = %d\n",
3866 idx, error);
3867 panic("wm_add_rxbuf");
3868 }
3869
3870 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3871 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3872
3873 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3874 if ((sc->sc_rctl & RCTL_EN) != 0)
3875 WM_INIT_RXDESC(sc, idx);
3876 } else
3877 WM_INIT_RXDESC(sc, idx);
3878
3879 return 0;
3880 }
3881
3882 /*
3883 * wm_rxdrain:
3884 *
3885 * Drain the receive queue.
3886 */
3887 static void
3888 wm_rxdrain(struct wm_softc *sc)
3889 {
3890 struct wm_rxsoft *rxs;
3891 int i;
3892
3893 KASSERT(WM_RX_LOCKED(sc));
3894
3895 for (i = 0; i < WM_NRXDESC; i++) {
3896 rxs = &sc->sc_rxsoft[i];
3897 if (rxs->rxs_mbuf != NULL) {
3898 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3899 m_freem(rxs->rxs_mbuf);
3900 rxs->rxs_mbuf = NULL;
3901 }
3902 }
3903 }
3904
3905 /*
3906 * wm_init: [ifnet interface function]
3907 *
3908 * Initialize the interface.
3909 */
3910 static int
3911 wm_init(struct ifnet *ifp)
3912 {
3913 struct wm_softc *sc = ifp->if_softc;
3914 int ret;
3915
3916 WM_BOTH_LOCK(sc);
3917 ret = wm_init_locked(ifp);
3918 WM_BOTH_UNLOCK(sc);
3919
3920 return ret;
3921 }
3922
3923 static int
3924 wm_init_locked(struct ifnet *ifp)
3925 {
3926 struct wm_softc *sc = ifp->if_softc;
3927 struct wm_rxsoft *rxs;
3928 int i, j, trynum, error = 0;
3929 uint32_t reg;
3930
3931 KASSERT(WM_BOTH_LOCKED(sc));
3932 /*
3933 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3934 * There is a small but measurable benefit to avoiding the adjusment
3935 * of the descriptor so that the headers are aligned, for normal mtu,
3936 * on such platforms. One possibility is that the DMA itself is
3937 * slightly more efficient if the front of the entire packet (instead
3938 * of the front of the headers) is aligned.
3939 *
3940 * Note we must always set align_tweak to 0 if we are using
3941 * jumbo frames.
3942 */
3943 #ifdef __NO_STRICT_ALIGNMENT
3944 sc->sc_align_tweak = 0;
3945 #else
3946 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3947 sc->sc_align_tweak = 0;
3948 else
3949 sc->sc_align_tweak = 2;
3950 #endif /* __NO_STRICT_ALIGNMENT */
3951
3952 /* Cancel any pending I/O. */
3953 wm_stop_locked(ifp, 0);
3954
3955 /* update statistics before reset */
3956 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3957 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3958
3959 /* Reset the chip to a known state. */
3960 wm_reset(sc);
3961
3962 switch (sc->sc_type) {
3963 case WM_T_82571:
3964 case WM_T_82572:
3965 case WM_T_82573:
3966 case WM_T_82574:
3967 case WM_T_82583:
3968 case WM_T_80003:
3969 case WM_T_ICH8:
3970 case WM_T_ICH9:
3971 case WM_T_ICH10:
3972 case WM_T_PCH:
3973 case WM_T_PCH2:
3974 case WM_T_PCH_LPT:
3975 if (wm_check_mng_mode(sc) != 0)
3976 wm_get_hw_control(sc);
3977 break;
3978 default:
3979 break;
3980 }
3981
3982 /* Init hardware bits */
3983 wm_initialize_hardware_bits(sc);
3984
3985 /* Reset the PHY. */
3986 if (sc->sc_flags & WM_F_HAS_MII)
3987 wm_gmii_reset(sc);
3988
3989 /* Calculate (E)ITR value */
3990 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3991 sc->sc_itr = 450; /* For EITR */
3992 } else if (sc->sc_type >= WM_T_82543) {
3993 /*
3994 * Set up the interrupt throttling register (units of 256ns)
3995 * Note that a footnote in Intel's documentation says this
3996 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3997 * or 10Mbit mode. Empirically, it appears to be the case
3998 * that that is also true for the 1024ns units of the other
3999 * interrupt-related timer registers -- so, really, we ought
4000 * to divide this value by 4 when the link speed is low.
4001 *
4002 * XXX implement this division at link speed change!
4003 */
4004
4005 /*
4006 * For N interrupts/sec, set this value to:
4007 * 1000000000 / (N * 256). Note that we set the
4008 * absolute and packet timer values to this value
4009 * divided by 4 to get "simple timer" behavior.
4010 */
4011
4012 sc->sc_itr = 1500; /* 2604 ints/sec */
4013 }
4014
4015 /* Initialize the transmit descriptor ring. */
4016 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
4017 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
4018 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4019 sc->sc_txfree = WM_NTXDESC(sc);
4020 sc->sc_txnext = 0;
4021
4022 if (sc->sc_type < WM_T_82543) {
4023 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
4024 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
4025 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
4026 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
4027 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
4028 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
4029 } else {
4030 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
4031 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
4032 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
4033 CSR_WRITE(sc, WMREG_TDH, 0);
4034
4035 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4036 /*
4037 * Don't write TDT before TCTL.EN is set.
4038 * See the document.
4039 */
4040 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_QUEUE_ENABLE
4041 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
4042 | TXDCTL_WTHRESH(0));
4043 else {
4044 /* ITR / 4 */
4045 CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
4046 if (sc->sc_type >= WM_T_82540) {
4047 /* should be same */
4048 CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
4049 }
4050
4051 CSR_WRITE(sc, WMREG_TDT, 0);
4052 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_PTHRESH(0) |
4053 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
4054 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
4055 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
4056 }
4057 }
4058
4059 /* Initialize the transmit job descriptors. */
4060 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
4061 sc->sc_txsoft[i].txs_mbuf = NULL;
4062 sc->sc_txsfree = WM_TXQUEUELEN(sc);
4063 sc->sc_txsnext = 0;
4064 sc->sc_txsdirty = 0;
4065
4066 /*
4067 * Initialize the receive descriptor and receive job
4068 * descriptor rings.
4069 */
4070 if (sc->sc_type < WM_T_82543) {
4071 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4072 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4073 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4074 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4075 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4076 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4077
4078 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4079 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4080 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4081 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4082 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4083 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4084 } else {
4085 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4086 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4087 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4088
4089 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4090 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4091 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4092 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4093 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4094 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4095 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4096 | RXDCTL_WTHRESH(1));
4097 } else {
4098 CSR_WRITE(sc, WMREG_RDH, 0);
4099 CSR_WRITE(sc, WMREG_RDT, 0);
4100 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
4101 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
4102 }
4103 }
4104 for (i = 0; i < WM_NRXDESC; i++) {
4105 rxs = &sc->sc_rxsoft[i];
4106 if (rxs->rxs_mbuf == NULL) {
4107 if ((error = wm_add_rxbuf(sc, i)) != 0) {
4108 log(LOG_ERR, "%s: unable to allocate or map "
4109 "rx buffer %d, error = %d\n",
4110 device_xname(sc->sc_dev), i, error);
4111 /*
4112 * XXX Should attempt to run with fewer receive
4113 * XXX buffers instead of just failing.
4114 */
4115 wm_rxdrain(sc);
4116 goto out;
4117 }
4118 } else {
4119 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4120 WM_INIT_RXDESC(sc, i);
4121 /*
4122 * For 82575 and newer device, the RX descriptors
4123 * must be initialized after the setting of RCTL.EN in
4124 * wm_set_filter()
4125 */
4126 }
4127 }
4128 sc->sc_rxptr = 0;
4129 sc->sc_rxdiscard = 0;
4130 WM_RXCHAIN_RESET(sc);
4131
4132 /*
4133 * Clear out the VLAN table -- we don't use it (yet).
4134 */
4135 CSR_WRITE(sc, WMREG_VET, 0);
4136 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4137 trynum = 10; /* Due to hw errata */
4138 else
4139 trynum = 1;
4140 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4141 for (j = 0; j < trynum; j++)
4142 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4143
4144 /*
4145 * Set up flow-control parameters.
4146 *
4147 * XXX Values could probably stand some tuning.
4148 */
4149 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4150 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4151 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
4152 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4153 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4154 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4155 }
4156
4157 sc->sc_fcrtl = FCRTL_DFLT;
4158 if (sc->sc_type < WM_T_82543) {
4159 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4160 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4161 } else {
4162 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4163 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4164 }
4165
4166 if (sc->sc_type == WM_T_80003)
4167 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4168 else
4169 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4170
4171 /* Writes the control register. */
4172 wm_set_vlan(sc);
4173
4174 if (sc->sc_flags & WM_F_HAS_MII) {
4175 int val;
4176
4177 switch (sc->sc_type) {
4178 case WM_T_80003:
4179 case WM_T_ICH8:
4180 case WM_T_ICH9:
4181 case WM_T_ICH10:
4182 case WM_T_PCH:
4183 case WM_T_PCH2:
4184 case WM_T_PCH_LPT:
4185 /*
4186 * Set the mac to wait the maximum time between each
4187 * iteration and increase the max iterations when
4188 * polling the phy; this fixes erroneous timeouts at
4189 * 10Mbps.
4190 */
4191 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4192 0xFFFF);
4193 val = wm_kmrn_readreg(sc,
4194 KUMCTRLSTA_OFFSET_INB_PARAM);
4195 val |= 0x3F;
4196 wm_kmrn_writereg(sc,
4197 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4198 break;
4199 default:
4200 break;
4201 }
4202
4203 if (sc->sc_type == WM_T_80003) {
4204 val = CSR_READ(sc, WMREG_CTRL_EXT);
4205 val &= ~CTRL_EXT_LINK_MODE_MASK;
4206 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4207
4208 /* Bypass RX and TX FIFO's */
4209 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4210 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4211 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4212 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4213 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4214 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4215 }
4216 }
4217 #if 0
4218 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4219 #endif
4220
4221 /* Set up checksum offload parameters. */
4222 reg = CSR_READ(sc, WMREG_RXCSUM);
4223 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4224 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4225 reg |= RXCSUM_IPOFL;
4226 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4227 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4228 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4229 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4230 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4231
4232 /* Set up the interrupt registers. */
4233 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4234 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4235 ICR_RXO | ICR_RXT0;
4236 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4237
4238 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4239 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4240 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4241 reg = CSR_READ(sc, WMREG_KABGTXD);
4242 reg |= KABGTXD_BGSQLBIAS;
4243 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4244 }
4245
4246 /* Set up the inter-packet gap. */
4247 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4248
4249 if (sc->sc_type >= WM_T_82543) {
4250 /*
4251 * XXX 82574 has both ITR and EITR. SET EITR when we use
4252 * the multi queue function with MSI-X.
4253 */
4254 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4255 CSR_WRITE(sc, WMREG_EITR(0), sc->sc_itr);
4256 else
4257 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4258 }
4259
4260 /* Set the VLAN ethernetype. */
4261 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4262
4263 /*
4264 * Set up the transmit control register; we start out with
4265 * a collision distance suitable for FDX, but update it whe
4266 * we resolve the media type.
4267 */
4268 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4269 | TCTL_CT(TX_COLLISION_THRESHOLD)
4270 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4271 if (sc->sc_type >= WM_T_82571)
4272 sc->sc_tctl |= TCTL_MULR;
4273 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4274
4275 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4276 /* Write TDT after TCTL.EN is set. See the document. */
4277 CSR_WRITE(sc, WMREG_TDT, 0);
4278 }
4279
4280 if (sc->sc_type == WM_T_80003) {
4281 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4282 reg &= ~TCTL_EXT_GCEX_MASK;
4283 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4284 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4285 }
4286
4287 /* Set the media. */
4288 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4289 goto out;
4290
4291 /* Configure for OS presence */
4292 wm_init_manageability(sc);
4293
4294 /*
4295 * Set up the receive control register; we actually program
4296 * the register when we set the receive filter. Use multicast
4297 * address offset type 0.
4298 *
4299 * Only the i82544 has the ability to strip the incoming
4300 * CRC, so we don't enable that feature.
4301 */
4302 sc->sc_mchash_type = 0;
4303 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4304 | RCTL_MO(sc->sc_mchash_type);
4305
4306 /*
4307 * The I350 has a bug where it always strips the CRC whether
4308 * asked to or not. So ask for stripped CRC here and cope in rxeof
4309 */
4310 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4311 || (sc->sc_type == WM_T_I210))
4312 sc->sc_rctl |= RCTL_SECRC;
4313
4314 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4315 && (ifp->if_mtu > ETHERMTU)) {
4316 sc->sc_rctl |= RCTL_LPE;
4317 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4318 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4319 }
4320
4321 if (MCLBYTES == 2048) {
4322 sc->sc_rctl |= RCTL_2k;
4323 } else {
4324 if (sc->sc_type >= WM_T_82543) {
4325 switch (MCLBYTES) {
4326 case 4096:
4327 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4328 break;
4329 case 8192:
4330 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4331 break;
4332 case 16384:
4333 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4334 break;
4335 default:
4336 panic("wm_init: MCLBYTES %d unsupported",
4337 MCLBYTES);
4338 break;
4339 }
4340 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4341 }
4342
4343 /* Set the receive filter. */
4344 wm_set_filter(sc);
4345
4346 /* Enable ECC */
4347 switch (sc->sc_type) {
4348 case WM_T_82571:
4349 reg = CSR_READ(sc, WMREG_PBA_ECC);
4350 reg |= PBA_ECC_CORR_EN;
4351 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4352 break;
4353 case WM_T_PCH_LPT:
4354 reg = CSR_READ(sc, WMREG_PBECCSTS);
4355 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4356 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4357
4358 reg = CSR_READ(sc, WMREG_CTRL);
4359 reg |= CTRL_MEHE;
4360 CSR_WRITE(sc, WMREG_CTRL, reg);
4361 break;
4362 default:
4363 break;
4364 }
4365
4366 /* On 575 and later set RDT only if RX enabled */
4367 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4368 for (i = 0; i < WM_NRXDESC; i++)
4369 WM_INIT_RXDESC(sc, i);
4370
4371 sc->sc_stopping = false;
4372
4373 /* Start the one second link check clock. */
4374 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4375
4376 /* ...all done! */
4377 ifp->if_flags |= IFF_RUNNING;
4378 ifp->if_flags &= ~IFF_OACTIVE;
4379
4380 out:
4381 sc->sc_if_flags = ifp->if_flags;
4382 if (error)
4383 log(LOG_ERR, "%s: interface not running\n",
4384 device_xname(sc->sc_dev));
4385 return error;
4386 }
4387
4388 /*
4389 * wm_stop: [ifnet interface function]
4390 *
4391 * Stop transmission on the interface.
4392 */
4393 static void
4394 wm_stop(struct ifnet *ifp, int disable)
4395 {
4396 struct wm_softc *sc = ifp->if_softc;
4397
4398 WM_BOTH_LOCK(sc);
4399 wm_stop_locked(ifp, disable);
4400 WM_BOTH_UNLOCK(sc);
4401 }
4402
4403 static void
4404 wm_stop_locked(struct ifnet *ifp, int disable)
4405 {
4406 struct wm_softc *sc = ifp->if_softc;
4407 struct wm_txsoft *txs;
4408 int i;
4409
4410 KASSERT(WM_BOTH_LOCKED(sc));
4411
4412 sc->sc_stopping = true;
4413
4414 /* Stop the one second clock. */
4415 callout_stop(&sc->sc_tick_ch);
4416
4417 /* Stop the 82547 Tx FIFO stall check timer. */
4418 if (sc->sc_type == WM_T_82547)
4419 callout_stop(&sc->sc_txfifo_ch);
4420
4421 if (sc->sc_flags & WM_F_HAS_MII) {
4422 /* Down the MII. */
4423 mii_down(&sc->sc_mii);
4424 } else {
4425 #if 0
4426 /* Should we clear PHY's status properly? */
4427 wm_reset(sc);
4428 #endif
4429 }
4430
4431 /* Stop the transmit and receive processes. */
4432 CSR_WRITE(sc, WMREG_TCTL, 0);
4433 CSR_WRITE(sc, WMREG_RCTL, 0);
4434 sc->sc_rctl &= ~RCTL_EN;
4435
4436 /*
4437 * Clear the interrupt mask to ensure the device cannot assert its
4438 * interrupt line.
4439 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4440 * any currently pending or shared interrupt.
4441 */
4442 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4443 sc->sc_icr = 0;
4444
4445 /* Release any queued transmit buffers. */
4446 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4447 txs = &sc->sc_txsoft[i];
4448 if (txs->txs_mbuf != NULL) {
4449 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4450 m_freem(txs->txs_mbuf);
4451 txs->txs_mbuf = NULL;
4452 }
4453 }
4454
4455 /* Mark the interface as down and cancel the watchdog timer. */
4456 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4457 ifp->if_timer = 0;
4458
4459 if (disable)
4460 wm_rxdrain(sc);
4461
4462 #if 0 /* notyet */
4463 if (sc->sc_type >= WM_T_82544)
4464 CSR_WRITE(sc, WMREG_WUC, 0);
4465 #endif
4466 }
4467
4468 /*
4469 * wm_tx_offload:
4470 *
4471 * Set up TCP/IP checksumming parameters for the
4472 * specified packet.
4473 */
4474 static int
4475 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
4476 uint8_t *fieldsp)
4477 {
4478 struct mbuf *m0 = txs->txs_mbuf;
4479 struct livengood_tcpip_ctxdesc *t;
4480 uint32_t ipcs, tucs, cmd, cmdlen, seg;
4481 uint32_t ipcse;
4482 struct ether_header *eh;
4483 int offset, iphl;
4484 uint8_t fields;
4485
4486 /*
4487 * XXX It would be nice if the mbuf pkthdr had offset
4488 * fields for the protocol headers.
4489 */
4490
4491 eh = mtod(m0, struct ether_header *);
4492 switch (htons(eh->ether_type)) {
4493 case ETHERTYPE_IP:
4494 case ETHERTYPE_IPV6:
4495 offset = ETHER_HDR_LEN;
4496 break;
4497
4498 case ETHERTYPE_VLAN:
4499 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4500 break;
4501
4502 default:
4503 /*
4504 * Don't support this protocol or encapsulation.
4505 */
4506 *fieldsp = 0;
4507 *cmdp = 0;
4508 return 0;
4509 }
4510
4511 if ((m0->m_pkthdr.csum_flags &
4512 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
4513 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4514 } else {
4515 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4516 }
4517 ipcse = offset + iphl - 1;
4518
4519 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
4520 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
4521 seg = 0;
4522 fields = 0;
4523
4524 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4525 int hlen = offset + iphl;
4526 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4527
4528 if (__predict_false(m0->m_len <
4529 (hlen + sizeof(struct tcphdr)))) {
4530 /*
4531 * TCP/IP headers are not in the first mbuf; we need
4532 * to do this the slow and painful way. Let's just
4533 * hope this doesn't happen very often.
4534 */
4535 struct tcphdr th;
4536
4537 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4538
4539 m_copydata(m0, hlen, sizeof(th), &th);
4540 if (v4) {
4541 struct ip ip;
4542
4543 m_copydata(m0, offset, sizeof(ip), &ip);
4544 ip.ip_len = 0;
4545 m_copyback(m0,
4546 offset + offsetof(struct ip, ip_len),
4547 sizeof(ip.ip_len), &ip.ip_len);
4548 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4549 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4550 } else {
4551 struct ip6_hdr ip6;
4552
4553 m_copydata(m0, offset, sizeof(ip6), &ip6);
4554 ip6.ip6_plen = 0;
4555 m_copyback(m0,
4556 offset + offsetof(struct ip6_hdr, ip6_plen),
4557 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4558 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4559 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4560 }
4561 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4562 sizeof(th.th_sum), &th.th_sum);
4563
4564 hlen += th.th_off << 2;
4565 } else {
4566 /*
4567 * TCP/IP headers are in the first mbuf; we can do
4568 * this the easy way.
4569 */
4570 struct tcphdr *th;
4571
4572 if (v4) {
4573 struct ip *ip =
4574 (void *)(mtod(m0, char *) + offset);
4575 th = (void *)(mtod(m0, char *) + hlen);
4576
4577 ip->ip_len = 0;
4578 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4579 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4580 } else {
4581 struct ip6_hdr *ip6 =
4582 (void *)(mtod(m0, char *) + offset);
4583 th = (void *)(mtod(m0, char *) + hlen);
4584
4585 ip6->ip6_plen = 0;
4586 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4587 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4588 }
4589 hlen += th->th_off << 2;
4590 }
4591
4592 if (v4) {
4593 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4594 cmdlen |= WTX_TCPIP_CMD_IP;
4595 } else {
4596 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4597 ipcse = 0;
4598 }
4599 cmd |= WTX_TCPIP_CMD_TSE;
4600 cmdlen |= WTX_TCPIP_CMD_TSE |
4601 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
4602 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
4603 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
4604 }
4605
4606 /*
4607 * NOTE: Even if we're not using the IP or TCP/UDP checksum
4608 * offload feature, if we load the context descriptor, we
4609 * MUST provide valid values for IPCSS and TUCSS fields.
4610 */
4611
4612 ipcs = WTX_TCPIP_IPCSS(offset) |
4613 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
4614 WTX_TCPIP_IPCSE(ipcse);
4615 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
4616 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
4617 fields |= WTX_IXSM;
4618 }
4619
4620 offset += iphl;
4621
4622 if (m0->m_pkthdr.csum_flags &
4623 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
4624 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
4625 fields |= WTX_TXSM;
4626 tucs = WTX_TCPIP_TUCSS(offset) |
4627 WTX_TCPIP_TUCSO(offset +
4628 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
4629 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4630 } else if ((m0->m_pkthdr.csum_flags &
4631 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
4632 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
4633 fields |= WTX_TXSM;
4634 tucs = WTX_TCPIP_TUCSS(offset) |
4635 WTX_TCPIP_TUCSO(offset +
4636 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
4637 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4638 } else {
4639 /* Just initialize it to a valid TCP context. */
4640 tucs = WTX_TCPIP_TUCSS(offset) |
4641 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
4642 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4643 }
4644
4645 /* Fill in the context descriptor. */
4646 t = (struct livengood_tcpip_ctxdesc *)
4647 &sc->sc_txdescs[sc->sc_txnext];
4648 t->tcpip_ipcs = htole32(ipcs);
4649 t->tcpip_tucs = htole32(tucs);
4650 t->tcpip_cmdlen = htole32(cmdlen);
4651 t->tcpip_seg = htole32(seg);
4652 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
4653
4654 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
4655 txs->txs_ndesc++;
4656
4657 *cmdp = cmd;
4658 *fieldsp = fields;
4659
4660 return 0;
4661 }
4662
4663 static void
4664 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
4665 {
4666 struct mbuf *m;
4667 int i;
4668
4669 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
4670 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
4671 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
4672 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
4673 m->m_data, m->m_len, m->m_flags);
4674 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
4675 i, i == 1 ? "" : "s");
4676 }
4677
4678 /*
4679 * wm_82547_txfifo_stall:
4680 *
4681 * Callout used to wait for the 82547 Tx FIFO to drain,
4682 * reset the FIFO pointers, and restart packet transmission.
4683 */
4684 static void
4685 wm_82547_txfifo_stall(void *arg)
4686 {
4687 struct wm_softc *sc = arg;
4688 #ifndef WM_MPSAFE
4689 int s;
4690
4691 s = splnet();
4692 #endif
4693 WM_TX_LOCK(sc);
4694
4695 if (sc->sc_stopping)
4696 goto out;
4697
4698 if (sc->sc_txfifo_stall) {
4699 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
4700 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
4701 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
4702 /*
4703 * Packets have drained. Stop transmitter, reset
4704 * FIFO pointers, restart transmitter, and kick
4705 * the packet queue.
4706 */
4707 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
4708 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
4709 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
4710 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
4711 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
4712 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
4713 CSR_WRITE(sc, WMREG_TCTL, tctl);
4714 CSR_WRITE_FLUSH(sc);
4715
4716 sc->sc_txfifo_head = 0;
4717 sc->sc_txfifo_stall = 0;
4718 wm_start_locked(&sc->sc_ethercom.ec_if);
4719 } else {
4720 /*
4721 * Still waiting for packets to drain; try again in
4722 * another tick.
4723 */
4724 callout_schedule(&sc->sc_txfifo_ch, 1);
4725 }
4726 }
4727
4728 out:
4729 WM_TX_UNLOCK(sc);
4730 #ifndef WM_MPSAFE
4731 splx(s);
4732 #endif
4733 }
4734
4735 /*
4736 * wm_82547_txfifo_bugchk:
4737 *
4738 * Check for bug condition in the 82547 Tx FIFO. We need to
4739 * prevent enqueueing a packet that would wrap around the end
4740 * if the Tx FIFO ring buffer, otherwise the chip will croak.
4741 *
4742 * We do this by checking the amount of space before the end
4743 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
4744 * the Tx FIFO, wait for all remaining packets to drain, reset
4745 * the internal FIFO pointers to the beginning, and restart
4746 * transmission on the interface.
4747 */
4748 #define WM_FIFO_HDR 0x10
4749 #define WM_82547_PAD_LEN 0x3e0
4750 static int
4751 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
4752 {
4753 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
4754 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
4755
4756 /* Just return if already stalled. */
4757 if (sc->sc_txfifo_stall)
4758 return 1;
4759
4760 if (sc->sc_mii.mii_media_active & IFM_FDX) {
4761 /* Stall only occurs in half-duplex mode. */
4762 goto send_packet;
4763 }
4764
4765 if (len >= WM_82547_PAD_LEN + space) {
4766 sc->sc_txfifo_stall = 1;
4767 callout_schedule(&sc->sc_txfifo_ch, 1);
4768 return 1;
4769 }
4770
4771 send_packet:
4772 sc->sc_txfifo_head += len;
4773 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
4774 sc->sc_txfifo_head -= sc->sc_txfifo_size;
4775
4776 return 0;
4777 }
4778
4779 /*
4780 * wm_start: [ifnet interface function]
4781 *
4782 * Start packet transmission on the interface.
4783 */
4784 static void
4785 wm_start(struct ifnet *ifp)
4786 {
4787 struct wm_softc *sc = ifp->if_softc;
4788
4789 WM_TX_LOCK(sc);
4790 if (!sc->sc_stopping)
4791 wm_start_locked(ifp);
4792 WM_TX_UNLOCK(sc);
4793 }
4794
4795 static void
4796 wm_start_locked(struct ifnet *ifp)
4797 {
4798 struct wm_softc *sc = ifp->if_softc;
4799 struct mbuf *m0;
4800 struct m_tag *mtag;
4801 struct wm_txsoft *txs;
4802 bus_dmamap_t dmamap;
4803 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
4804 bus_addr_t curaddr;
4805 bus_size_t seglen, curlen;
4806 uint32_t cksumcmd;
4807 uint8_t cksumfields;
4808
4809 KASSERT(WM_TX_LOCKED(sc));
4810
4811 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
4812 return;
4813
4814 /* Remember the previous number of free descriptors. */
4815 ofree = sc->sc_txfree;
4816
4817 /*
4818 * Loop through the send queue, setting up transmit descriptors
4819 * until we drain the queue, or use up all available transmit
4820 * descriptors.
4821 */
4822 for (;;) {
4823 m0 = NULL;
4824
4825 /* Get a work queue entry. */
4826 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
4827 wm_txintr(sc);
4828 if (sc->sc_txsfree == 0) {
4829 DPRINTF(WM_DEBUG_TX,
4830 ("%s: TX: no free job descriptors\n",
4831 device_xname(sc->sc_dev)));
4832 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
4833 break;
4834 }
4835 }
4836
4837 /* Grab a packet off the queue. */
4838 IFQ_DEQUEUE(&ifp->if_snd, m0);
4839 if (m0 == NULL)
4840 break;
4841
4842 DPRINTF(WM_DEBUG_TX,
4843 ("%s: TX: have packet to transmit: %p\n",
4844 device_xname(sc->sc_dev), m0));
4845
4846 txs = &sc->sc_txsoft[sc->sc_txsnext];
4847 dmamap = txs->txs_dmamap;
4848
4849 use_tso = (m0->m_pkthdr.csum_flags &
4850 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
4851
4852 /*
4853 * So says the Linux driver:
4854 * The controller does a simple calculation to make sure
4855 * there is enough room in the FIFO before initiating the
4856 * DMA for each buffer. The calc is:
4857 * 4 = ceil(buffer len / MSS)
4858 * To make sure we don't overrun the FIFO, adjust the max
4859 * buffer len if the MSS drops.
4860 */
4861 dmamap->dm_maxsegsz =
4862 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
4863 ? m0->m_pkthdr.segsz << 2
4864 : WTX_MAX_LEN;
4865
4866 /*
4867 * Load the DMA map. If this fails, the packet either
4868 * didn't fit in the allotted number of segments, or we
4869 * were short on resources. For the too-many-segments
4870 * case, we simply report an error and drop the packet,
4871 * since we can't sanely copy a jumbo packet to a single
4872 * buffer.
4873 */
4874 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
4875 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
4876 if (error) {
4877 if (error == EFBIG) {
4878 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4879 log(LOG_ERR, "%s: Tx packet consumes too many "
4880 "DMA segments, dropping...\n",
4881 device_xname(sc->sc_dev));
4882 wm_dump_mbuf_chain(sc, m0);
4883 m_freem(m0);
4884 continue;
4885 }
4886 /* Short on resources, just stop for now. */
4887 DPRINTF(WM_DEBUG_TX,
4888 ("%s: TX: dmamap load failed: %d\n",
4889 device_xname(sc->sc_dev), error));
4890 break;
4891 }
4892
4893 segs_needed = dmamap->dm_nsegs;
4894 if (use_tso) {
4895 /* For sentinel descriptor; see below. */
4896 segs_needed++;
4897 }
4898
4899 /*
4900 * Ensure we have enough descriptors free to describe
4901 * the packet. Note, we always reserve one descriptor
4902 * at the end of the ring due to the semantics of the
4903 * TDT register, plus one more in the event we need
4904 * to load offload context.
4905 */
4906 if (segs_needed > sc->sc_txfree - 2) {
4907 /*
4908 * Not enough free descriptors to transmit this
4909 * packet. We haven't committed anything yet,
4910 * so just unload the DMA map, put the packet
4911 * pack on the queue, and punt. Notify the upper
4912 * layer that there are no more slots left.
4913 */
4914 DPRINTF(WM_DEBUG_TX,
4915 ("%s: TX: need %d (%d) descriptors, have %d\n",
4916 device_xname(sc->sc_dev), dmamap->dm_nsegs,
4917 segs_needed, sc->sc_txfree - 1));
4918 ifp->if_flags |= IFF_OACTIVE;
4919 bus_dmamap_unload(sc->sc_dmat, dmamap);
4920 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
4921 break;
4922 }
4923
4924 /*
4925 * Check for 82547 Tx FIFO bug. We need to do this
4926 * once we know we can transmit the packet, since we
4927 * do some internal FIFO space accounting here.
4928 */
4929 if (sc->sc_type == WM_T_82547 &&
4930 wm_82547_txfifo_bugchk(sc, m0)) {
4931 DPRINTF(WM_DEBUG_TX,
4932 ("%s: TX: 82547 Tx FIFO bug detected\n",
4933 device_xname(sc->sc_dev)));
4934 ifp->if_flags |= IFF_OACTIVE;
4935 bus_dmamap_unload(sc->sc_dmat, dmamap);
4936 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
4937 break;
4938 }
4939
4940 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
4941
4942 DPRINTF(WM_DEBUG_TX,
4943 ("%s: TX: packet has %d (%d) DMA segments\n",
4944 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
4945
4946 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
4947
4948 /*
4949 * Store a pointer to the packet so that we can free it
4950 * later.
4951 *
4952 * Initially, we consider the number of descriptors the
4953 * packet uses the number of DMA segments. This may be
4954 * incremented by 1 if we do checksum offload (a descriptor
4955 * is used to set the checksum context).
4956 */
4957 txs->txs_mbuf = m0;
4958 txs->txs_firstdesc = sc->sc_txnext;
4959 txs->txs_ndesc = segs_needed;
4960
4961 /* Set up offload parameters for this packet. */
4962 if (m0->m_pkthdr.csum_flags &
4963 (M_CSUM_TSOv4|M_CSUM_TSOv6|
4964 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
4965 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
4966 if (wm_tx_offload(sc, txs, &cksumcmd,
4967 &cksumfields) != 0) {
4968 /* Error message already displayed. */
4969 bus_dmamap_unload(sc->sc_dmat, dmamap);
4970 continue;
4971 }
4972 } else {
4973 cksumcmd = 0;
4974 cksumfields = 0;
4975 }
4976
4977 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
4978
4979 /* Sync the DMA map. */
4980 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
4981 BUS_DMASYNC_PREWRITE);
4982
4983 /* Initialize the transmit descriptor. */
4984 for (nexttx = sc->sc_txnext, seg = 0;
4985 seg < dmamap->dm_nsegs; seg++) {
4986 for (seglen = dmamap->dm_segs[seg].ds_len,
4987 curaddr = dmamap->dm_segs[seg].ds_addr;
4988 seglen != 0;
4989 curaddr += curlen, seglen -= curlen,
4990 nexttx = WM_NEXTTX(sc, nexttx)) {
4991 curlen = seglen;
4992
4993 /*
4994 * So says the Linux driver:
4995 * Work around for premature descriptor
4996 * write-backs in TSO mode. Append a
4997 * 4-byte sentinel descriptor.
4998 */
4999 if (use_tso &&
5000 seg == dmamap->dm_nsegs - 1 &&
5001 curlen > 8)
5002 curlen -= 4;
5003
5004 wm_set_dma_addr(
5005 &sc->sc_txdescs[nexttx].wtx_addr,
5006 curaddr);
5007 sc->sc_txdescs[nexttx].wtx_cmdlen =
5008 htole32(cksumcmd | curlen);
5009 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
5010 0;
5011 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
5012 cksumfields;
5013 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
5014 lasttx = nexttx;
5015
5016 DPRINTF(WM_DEBUG_TX,
5017 ("%s: TX: desc %d: low %#" PRIx64 ", "
5018 "len %#04zx\n",
5019 device_xname(sc->sc_dev), nexttx,
5020 (uint64_t)curaddr, curlen));
5021 }
5022 }
5023
5024 KASSERT(lasttx != -1);
5025
5026 /*
5027 * Set up the command byte on the last descriptor of
5028 * the packet. If we're in the interrupt delay window,
5029 * delay the interrupt.
5030 */
5031 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5032 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5033
5034 /*
5035 * If VLANs are enabled and the packet has a VLAN tag, set
5036 * up the descriptor to encapsulate the packet for us.
5037 *
5038 * This is only valid on the last descriptor of the packet.
5039 */
5040 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
5041 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5042 htole32(WTX_CMD_VLE);
5043 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
5044 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5045 }
5046
5047 txs->txs_lastdesc = lasttx;
5048
5049 DPRINTF(WM_DEBUG_TX,
5050 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5051 device_xname(sc->sc_dev),
5052 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5053
5054 /* Sync the descriptors we're using. */
5055 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5056 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5057
5058 /* Give the packet to the chip. */
5059 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5060
5061 DPRINTF(WM_DEBUG_TX,
5062 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5063
5064 DPRINTF(WM_DEBUG_TX,
5065 ("%s: TX: finished transmitting packet, job %d\n",
5066 device_xname(sc->sc_dev), sc->sc_txsnext));
5067
5068 /* Advance the tx pointer. */
5069 sc->sc_txfree -= txs->txs_ndesc;
5070 sc->sc_txnext = nexttx;
5071
5072 sc->sc_txsfree--;
5073 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5074
5075 /* Pass the packet to any BPF listeners. */
5076 bpf_mtap(ifp, m0);
5077 }
5078
5079 if (m0 != NULL) {
5080 ifp->if_flags |= IFF_OACTIVE;
5081 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5082 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5083 m_freem(m0);
5084 }
5085
5086 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5087 /* No more slots; notify upper layer. */
5088 ifp->if_flags |= IFF_OACTIVE;
5089 }
5090
5091 if (sc->sc_txfree != ofree) {
5092 /* Set a watchdog timer in case the chip flakes out. */
5093 ifp->if_timer = 5;
5094 }
5095 }
5096
5097 /*
5098 * wm_nq_tx_offload:
5099 *
5100 * Set up TCP/IP checksumming parameters for the
5101 * specified packet, for NEWQUEUE devices
5102 */
5103 static int
5104 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
5105 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
5106 {
5107 struct mbuf *m0 = txs->txs_mbuf;
5108 struct m_tag *mtag;
5109 uint32_t vl_len, mssidx, cmdc;
5110 struct ether_header *eh;
5111 int offset, iphl;
5112
5113 /*
5114 * XXX It would be nice if the mbuf pkthdr had offset
5115 * fields for the protocol headers.
5116 */
5117 *cmdlenp = 0;
5118 *fieldsp = 0;
5119
5120 eh = mtod(m0, struct ether_header *);
5121 switch (htons(eh->ether_type)) {
5122 case ETHERTYPE_IP:
5123 case ETHERTYPE_IPV6:
5124 offset = ETHER_HDR_LEN;
5125 break;
5126
5127 case ETHERTYPE_VLAN:
5128 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5129 break;
5130
5131 default:
5132 /* Don't support this protocol or encapsulation. */
5133 *do_csum = false;
5134 return 0;
5135 }
5136 *do_csum = true;
5137 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
5138 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
5139
5140 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
5141 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
5142
5143 if ((m0->m_pkthdr.csum_flags &
5144 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
5145 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
5146 } else {
5147 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
5148 }
5149 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
5150 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
5151
5152 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
5153 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
5154 << NQTXC_VLLEN_VLAN_SHIFT);
5155 *cmdlenp |= NQTX_CMD_VLE;
5156 }
5157
5158 mssidx = 0;
5159
5160 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
5161 int hlen = offset + iphl;
5162 int tcp_hlen;
5163 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
5164
5165 if (__predict_false(m0->m_len <
5166 (hlen + sizeof(struct tcphdr)))) {
5167 /*
5168 * TCP/IP headers are not in the first mbuf; we need
5169 * to do this the slow and painful way. Let's just
5170 * hope this doesn't happen very often.
5171 */
5172 struct tcphdr th;
5173
5174 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
5175
5176 m_copydata(m0, hlen, sizeof(th), &th);
5177 if (v4) {
5178 struct ip ip;
5179
5180 m_copydata(m0, offset, sizeof(ip), &ip);
5181 ip.ip_len = 0;
5182 m_copyback(m0,
5183 offset + offsetof(struct ip, ip_len),
5184 sizeof(ip.ip_len), &ip.ip_len);
5185 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
5186 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
5187 } else {
5188 struct ip6_hdr ip6;
5189
5190 m_copydata(m0, offset, sizeof(ip6), &ip6);
5191 ip6.ip6_plen = 0;
5192 m_copyback(m0,
5193 offset + offsetof(struct ip6_hdr, ip6_plen),
5194 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
5195 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
5196 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
5197 }
5198 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
5199 sizeof(th.th_sum), &th.th_sum);
5200
5201 tcp_hlen = th.th_off << 2;
5202 } else {
5203 /*
5204 * TCP/IP headers are in the first mbuf; we can do
5205 * this the easy way.
5206 */
5207 struct tcphdr *th;
5208
5209 if (v4) {
5210 struct ip *ip =
5211 (void *)(mtod(m0, char *) + offset);
5212 th = (void *)(mtod(m0, char *) + hlen);
5213
5214 ip->ip_len = 0;
5215 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
5216 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
5217 } else {
5218 struct ip6_hdr *ip6 =
5219 (void *)(mtod(m0, char *) + offset);
5220 th = (void *)(mtod(m0, char *) + hlen);
5221
5222 ip6->ip6_plen = 0;
5223 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
5224 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
5225 }
5226 tcp_hlen = th->th_off << 2;
5227 }
5228 hlen += tcp_hlen;
5229 *cmdlenp |= NQTX_CMD_TSE;
5230
5231 if (v4) {
5232 WM_EVCNT_INCR(&sc->sc_ev_txtso);
5233 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
5234 } else {
5235 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
5236 *fieldsp |= NQTXD_FIELDS_TUXSM;
5237 }
5238 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
5239 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5240 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
5241 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
5242 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
5243 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
5244 } else {
5245 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
5246 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5247 }
5248
5249 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
5250 *fieldsp |= NQTXD_FIELDS_IXSM;
5251 cmdc |= NQTXC_CMD_IP4;
5252 }
5253
5254 if (m0->m_pkthdr.csum_flags &
5255 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5256 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5257 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5258 cmdc |= NQTXC_CMD_TCP;
5259 } else {
5260 cmdc |= NQTXC_CMD_UDP;
5261 }
5262 cmdc |= NQTXC_CMD_IP4;
5263 *fieldsp |= NQTXD_FIELDS_TUXSM;
5264 }
5265 if (m0->m_pkthdr.csum_flags &
5266 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5267 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5268 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5269 cmdc |= NQTXC_CMD_TCP;
5270 } else {
5271 cmdc |= NQTXC_CMD_UDP;
5272 }
5273 cmdc |= NQTXC_CMD_IP6;
5274 *fieldsp |= NQTXD_FIELDS_TUXSM;
5275 }
5276
5277 /* Fill in the context descriptor. */
5278 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
5279 htole32(vl_len);
5280 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
5281 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
5282 htole32(cmdc);
5283 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
5284 htole32(mssidx);
5285 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
5286 DPRINTF(WM_DEBUG_TX,
5287 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
5288 sc->sc_txnext, 0, vl_len));
5289 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
5290 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
5291 txs->txs_ndesc++;
5292 return 0;
5293 }
5294
5295 /*
5296 * wm_nq_start: [ifnet interface function]
5297 *
5298 * Start packet transmission on the interface for NEWQUEUE devices
5299 */
5300 static void
5301 wm_nq_start(struct ifnet *ifp)
5302 {
5303 struct wm_softc *sc = ifp->if_softc;
5304
5305 WM_TX_LOCK(sc);
5306 if (!sc->sc_stopping)
5307 wm_nq_start_locked(ifp);
5308 WM_TX_UNLOCK(sc);
5309 }
5310
5311 static void
5312 wm_nq_start_locked(struct ifnet *ifp)
5313 {
5314 struct wm_softc *sc = ifp->if_softc;
5315 struct mbuf *m0;
5316 struct m_tag *mtag;
5317 struct wm_txsoft *txs;
5318 bus_dmamap_t dmamap;
5319 int error, nexttx, lasttx = -1, seg, segs_needed;
5320 bool do_csum, sent;
5321
5322 KASSERT(WM_TX_LOCKED(sc));
5323
5324 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5325 return;
5326
5327 sent = false;
5328
5329 /*
5330 * Loop through the send queue, setting up transmit descriptors
5331 * until we drain the queue, or use up all available transmit
5332 * descriptors.
5333 */
5334 for (;;) {
5335 m0 = NULL;
5336
5337 /* Get a work queue entry. */
5338 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
5339 wm_txintr(sc);
5340 if (sc->sc_txsfree == 0) {
5341 DPRINTF(WM_DEBUG_TX,
5342 ("%s: TX: no free job descriptors\n",
5343 device_xname(sc->sc_dev)));
5344 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
5345 break;
5346 }
5347 }
5348
5349 /* Grab a packet off the queue. */
5350 IFQ_DEQUEUE(&ifp->if_snd, m0);
5351 if (m0 == NULL)
5352 break;
5353
5354 DPRINTF(WM_DEBUG_TX,
5355 ("%s: TX: have packet to transmit: %p\n",
5356 device_xname(sc->sc_dev), m0));
5357
5358 txs = &sc->sc_txsoft[sc->sc_txsnext];
5359 dmamap = txs->txs_dmamap;
5360
5361 /*
5362 * Load the DMA map. If this fails, the packet either
5363 * didn't fit in the allotted number of segments, or we
5364 * were short on resources. For the too-many-segments
5365 * case, we simply report an error and drop the packet,
5366 * since we can't sanely copy a jumbo packet to a single
5367 * buffer.
5368 */
5369 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5370 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5371 if (error) {
5372 if (error == EFBIG) {
5373 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5374 log(LOG_ERR, "%s: Tx packet consumes too many "
5375 "DMA segments, dropping...\n",
5376 device_xname(sc->sc_dev));
5377 wm_dump_mbuf_chain(sc, m0);
5378 m_freem(m0);
5379 continue;
5380 }
5381 /* Short on resources, just stop for now. */
5382 DPRINTF(WM_DEBUG_TX,
5383 ("%s: TX: dmamap load failed: %d\n",
5384 device_xname(sc->sc_dev), error));
5385 break;
5386 }
5387
5388 segs_needed = dmamap->dm_nsegs;
5389
5390 /*
5391 * Ensure we have enough descriptors free to describe
5392 * the packet. Note, we always reserve one descriptor
5393 * at the end of the ring due to the semantics of the
5394 * TDT register, plus one more in the event we need
5395 * to load offload context.
5396 */
5397 if (segs_needed > sc->sc_txfree - 2) {
5398 /*
5399 * Not enough free descriptors to transmit this
5400 * packet. We haven't committed anything yet,
5401 * so just unload the DMA map, put the packet
5402 * pack on the queue, and punt. Notify the upper
5403 * layer that there are no more slots left.
5404 */
5405 DPRINTF(WM_DEBUG_TX,
5406 ("%s: TX: need %d (%d) descriptors, have %d\n",
5407 device_xname(sc->sc_dev), dmamap->dm_nsegs,
5408 segs_needed, sc->sc_txfree - 1));
5409 ifp->if_flags |= IFF_OACTIVE;
5410 bus_dmamap_unload(sc->sc_dmat, dmamap);
5411 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5412 break;
5413 }
5414
5415 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5416
5417 DPRINTF(WM_DEBUG_TX,
5418 ("%s: TX: packet has %d (%d) DMA segments\n",
5419 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5420
5421 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5422
5423 /*
5424 * Store a pointer to the packet so that we can free it
5425 * later.
5426 *
5427 * Initially, we consider the number of descriptors the
5428 * packet uses the number of DMA segments. This may be
5429 * incremented by 1 if we do checksum offload (a descriptor
5430 * is used to set the checksum context).
5431 */
5432 txs->txs_mbuf = m0;
5433 txs->txs_firstdesc = sc->sc_txnext;
5434 txs->txs_ndesc = segs_needed;
5435
5436 /* Set up offload parameters for this packet. */
5437 uint32_t cmdlen, fields, dcmdlen;
5438 if (m0->m_pkthdr.csum_flags &
5439 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5440 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5441 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5442 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
5443 &do_csum) != 0) {
5444 /* Error message already displayed. */
5445 bus_dmamap_unload(sc->sc_dmat, dmamap);
5446 continue;
5447 }
5448 } else {
5449 do_csum = false;
5450 cmdlen = 0;
5451 fields = 0;
5452 }
5453
5454 /* Sync the DMA map. */
5455 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5456 BUS_DMASYNC_PREWRITE);
5457
5458 /* Initialize the first transmit descriptor. */
5459 nexttx = sc->sc_txnext;
5460 if (!do_csum) {
5461 /* setup a legacy descriptor */
5462 wm_set_dma_addr(
5463 &sc->sc_txdescs[nexttx].wtx_addr,
5464 dmamap->dm_segs[0].ds_addr);
5465 sc->sc_txdescs[nexttx].wtx_cmdlen =
5466 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
5467 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
5468 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
5469 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
5470 NULL) {
5471 sc->sc_txdescs[nexttx].wtx_cmdlen |=
5472 htole32(WTX_CMD_VLE);
5473 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
5474 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5475 } else {
5476 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
5477 }
5478 dcmdlen = 0;
5479 } else {
5480 /* setup an advanced data descriptor */
5481 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5482 htole64(dmamap->dm_segs[0].ds_addr);
5483 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
5484 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5485 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
5486 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
5487 htole32(fields);
5488 DPRINTF(WM_DEBUG_TX,
5489 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
5490 device_xname(sc->sc_dev), nexttx,
5491 (uint64_t)dmamap->dm_segs[0].ds_addr));
5492 DPRINTF(WM_DEBUG_TX,
5493 ("\t 0x%08x%08x\n", fields,
5494 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
5495 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
5496 }
5497
5498 lasttx = nexttx;
5499 nexttx = WM_NEXTTX(sc, nexttx);
5500 /*
5501 * fill in the next descriptors. legacy or adcanced format
5502 * is the same here
5503 */
5504 for (seg = 1; seg < dmamap->dm_nsegs;
5505 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
5506 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5507 htole64(dmamap->dm_segs[seg].ds_addr);
5508 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5509 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
5510 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
5511 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
5512 lasttx = nexttx;
5513
5514 DPRINTF(WM_DEBUG_TX,
5515 ("%s: TX: desc %d: %#" PRIx64 ", "
5516 "len %#04zx\n",
5517 device_xname(sc->sc_dev), nexttx,
5518 (uint64_t)dmamap->dm_segs[seg].ds_addr,
5519 dmamap->dm_segs[seg].ds_len));
5520 }
5521
5522 KASSERT(lasttx != -1);
5523
5524 /*
5525 * Set up the command byte on the last descriptor of
5526 * the packet. If we're in the interrupt delay window,
5527 * delay the interrupt.
5528 */
5529 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
5530 (NQTX_CMD_EOP | NQTX_CMD_RS));
5531 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5532 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5533
5534 txs->txs_lastdesc = lasttx;
5535
5536 DPRINTF(WM_DEBUG_TX,
5537 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5538 device_xname(sc->sc_dev),
5539 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5540
5541 /* Sync the descriptors we're using. */
5542 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5543 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5544
5545 /* Give the packet to the chip. */
5546 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5547 sent = true;
5548
5549 DPRINTF(WM_DEBUG_TX,
5550 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5551
5552 DPRINTF(WM_DEBUG_TX,
5553 ("%s: TX: finished transmitting packet, job %d\n",
5554 device_xname(sc->sc_dev), sc->sc_txsnext));
5555
5556 /* Advance the tx pointer. */
5557 sc->sc_txfree -= txs->txs_ndesc;
5558 sc->sc_txnext = nexttx;
5559
5560 sc->sc_txsfree--;
5561 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5562
5563 /* Pass the packet to any BPF listeners. */
5564 bpf_mtap(ifp, m0);
5565 }
5566
5567 if (m0 != NULL) {
5568 ifp->if_flags |= IFF_OACTIVE;
5569 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5570 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5571 m_freem(m0);
5572 }
5573
5574 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5575 /* No more slots; notify upper layer. */
5576 ifp->if_flags |= IFF_OACTIVE;
5577 }
5578
5579 if (sent) {
5580 /* Set a watchdog timer in case the chip flakes out. */
5581 ifp->if_timer = 5;
5582 }
5583 }
5584
5585 /* Interrupt */
5586
5587 /*
5588 * wm_txintr:
5589 *
5590 * Helper; handle transmit interrupts.
5591 */
5592 static void
5593 wm_txintr(struct wm_softc *sc)
5594 {
5595 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5596 struct wm_txsoft *txs;
5597 uint8_t status;
5598 int i;
5599
5600 if (sc->sc_stopping)
5601 return;
5602
5603 ifp->if_flags &= ~IFF_OACTIVE;
5604
5605 /*
5606 * Go through the Tx list and free mbufs for those
5607 * frames which have been transmitted.
5608 */
5609 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
5610 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
5611 txs = &sc->sc_txsoft[i];
5612
5613 DPRINTF(WM_DEBUG_TX,
5614 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
5615
5616 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
5617 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5618
5619 status =
5620 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
5621 if ((status & WTX_ST_DD) == 0) {
5622 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
5623 BUS_DMASYNC_PREREAD);
5624 break;
5625 }
5626
5627 DPRINTF(WM_DEBUG_TX,
5628 ("%s: TX: job %d done: descs %d..%d\n",
5629 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
5630 txs->txs_lastdesc));
5631
5632 /*
5633 * XXX We should probably be using the statistics
5634 * XXX registers, but I don't know if they exist
5635 * XXX on chips before the i82544.
5636 */
5637
5638 #ifdef WM_EVENT_COUNTERS
5639 if (status & WTX_ST_TU)
5640 WM_EVCNT_INCR(&sc->sc_ev_tu);
5641 #endif /* WM_EVENT_COUNTERS */
5642
5643 if (status & (WTX_ST_EC|WTX_ST_LC)) {
5644 ifp->if_oerrors++;
5645 if (status & WTX_ST_LC)
5646 log(LOG_WARNING, "%s: late collision\n",
5647 device_xname(sc->sc_dev));
5648 else if (status & WTX_ST_EC) {
5649 ifp->if_collisions += 16;
5650 log(LOG_WARNING, "%s: excessive collisions\n",
5651 device_xname(sc->sc_dev));
5652 }
5653 } else
5654 ifp->if_opackets++;
5655
5656 sc->sc_txfree += txs->txs_ndesc;
5657 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
5658 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
5659 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
5660 m_freem(txs->txs_mbuf);
5661 txs->txs_mbuf = NULL;
5662 }
5663
5664 /* Update the dirty transmit buffer pointer. */
5665 sc->sc_txsdirty = i;
5666 DPRINTF(WM_DEBUG_TX,
5667 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
5668
5669 /*
5670 * If there are no more pending transmissions, cancel the watchdog
5671 * timer.
5672 */
5673 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
5674 ifp->if_timer = 0;
5675 }
5676
5677 /*
5678 * wm_rxintr:
5679 *
5680 * Helper; handle receive interrupts.
5681 */
5682 static void
5683 wm_rxintr(struct wm_softc *sc)
5684 {
5685 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5686 struct wm_rxsoft *rxs;
5687 struct mbuf *m;
5688 int i, len;
5689 uint8_t status, errors;
5690 uint16_t vlantag;
5691
5692 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
5693 rxs = &sc->sc_rxsoft[i];
5694
5695 DPRINTF(WM_DEBUG_RX,
5696 ("%s: RX: checking descriptor %d\n",
5697 device_xname(sc->sc_dev), i));
5698
5699 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5700
5701 status = sc->sc_rxdescs[i].wrx_status;
5702 errors = sc->sc_rxdescs[i].wrx_errors;
5703 len = le16toh(sc->sc_rxdescs[i].wrx_len);
5704 vlantag = sc->sc_rxdescs[i].wrx_special;
5705
5706 if ((status & WRX_ST_DD) == 0) {
5707 /* We have processed all of the receive descriptors. */
5708 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
5709 break;
5710 }
5711
5712 if (__predict_false(sc->sc_rxdiscard)) {
5713 DPRINTF(WM_DEBUG_RX,
5714 ("%s: RX: discarding contents of descriptor %d\n",
5715 device_xname(sc->sc_dev), i));
5716 WM_INIT_RXDESC(sc, i);
5717 if (status & WRX_ST_EOP) {
5718 /* Reset our state. */
5719 DPRINTF(WM_DEBUG_RX,
5720 ("%s: RX: resetting rxdiscard -> 0\n",
5721 device_xname(sc->sc_dev)));
5722 sc->sc_rxdiscard = 0;
5723 }
5724 continue;
5725 }
5726
5727 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5728 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
5729
5730 m = rxs->rxs_mbuf;
5731
5732 /*
5733 * Add a new receive buffer to the ring, unless of
5734 * course the length is zero. Treat the latter as a
5735 * failed mapping.
5736 */
5737 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
5738 /*
5739 * Failed, throw away what we've done so
5740 * far, and discard the rest of the packet.
5741 */
5742 ifp->if_ierrors++;
5743 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5744 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5745 WM_INIT_RXDESC(sc, i);
5746 if ((status & WRX_ST_EOP) == 0)
5747 sc->sc_rxdiscard = 1;
5748 if (sc->sc_rxhead != NULL)
5749 m_freem(sc->sc_rxhead);
5750 WM_RXCHAIN_RESET(sc);
5751 DPRINTF(WM_DEBUG_RX,
5752 ("%s: RX: Rx buffer allocation failed, "
5753 "dropping packet%s\n", device_xname(sc->sc_dev),
5754 sc->sc_rxdiscard ? " (discard)" : ""));
5755 continue;
5756 }
5757
5758 m->m_len = len;
5759 sc->sc_rxlen += len;
5760 DPRINTF(WM_DEBUG_RX,
5761 ("%s: RX: buffer at %p len %d\n",
5762 device_xname(sc->sc_dev), m->m_data, len));
5763
5764 /* If this is not the end of the packet, keep looking. */
5765 if ((status & WRX_ST_EOP) == 0) {
5766 WM_RXCHAIN_LINK(sc, m);
5767 DPRINTF(WM_DEBUG_RX,
5768 ("%s: RX: not yet EOP, rxlen -> %d\n",
5769 device_xname(sc->sc_dev), sc->sc_rxlen));
5770 continue;
5771 }
5772
5773 /*
5774 * Okay, we have the entire packet now. The chip is
5775 * configured to include the FCS except I350 and I21[01]
5776 * (not all chips can be configured to strip it),
5777 * so we need to trim it.
5778 * May need to adjust length of previous mbuf in the
5779 * chain if the current mbuf is too short.
5780 * For an eratta, the RCTL_SECRC bit in RCTL register
5781 * is always set in I350, so we don't trim it.
5782 */
5783 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
5784 && (sc->sc_type != WM_T_I210)
5785 && (sc->sc_type != WM_T_I211)) {
5786 if (m->m_len < ETHER_CRC_LEN) {
5787 sc->sc_rxtail->m_len
5788 -= (ETHER_CRC_LEN - m->m_len);
5789 m->m_len = 0;
5790 } else
5791 m->m_len -= ETHER_CRC_LEN;
5792 len = sc->sc_rxlen - ETHER_CRC_LEN;
5793 } else
5794 len = sc->sc_rxlen;
5795
5796 WM_RXCHAIN_LINK(sc, m);
5797
5798 *sc->sc_rxtailp = NULL;
5799 m = sc->sc_rxhead;
5800
5801 WM_RXCHAIN_RESET(sc);
5802
5803 DPRINTF(WM_DEBUG_RX,
5804 ("%s: RX: have entire packet, len -> %d\n",
5805 device_xname(sc->sc_dev), len));
5806
5807 /* If an error occurred, update stats and drop the packet. */
5808 if (errors &
5809 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
5810 if (errors & WRX_ER_SE)
5811 log(LOG_WARNING, "%s: symbol error\n",
5812 device_xname(sc->sc_dev));
5813 else if (errors & WRX_ER_SEQ)
5814 log(LOG_WARNING, "%s: receive sequence error\n",
5815 device_xname(sc->sc_dev));
5816 else if (errors & WRX_ER_CE)
5817 log(LOG_WARNING, "%s: CRC error\n",
5818 device_xname(sc->sc_dev));
5819 m_freem(m);
5820 continue;
5821 }
5822
5823 /* No errors. Receive the packet. */
5824 m->m_pkthdr.rcvif = ifp;
5825 m->m_pkthdr.len = len;
5826
5827 /*
5828 * If VLANs are enabled, VLAN packets have been unwrapped
5829 * for us. Associate the tag with the packet.
5830 */
5831 /* XXXX should check for i350 and i354 */
5832 if ((status & WRX_ST_VP) != 0) {
5833 VLAN_INPUT_TAG(ifp, m,
5834 le16toh(vlantag),
5835 continue);
5836 }
5837
5838 /* Set up checksum info for this packet. */
5839 if ((status & WRX_ST_IXSM) == 0) {
5840 if (status & WRX_ST_IPCS) {
5841 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
5842 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
5843 if (errors & WRX_ER_IPE)
5844 m->m_pkthdr.csum_flags |=
5845 M_CSUM_IPv4_BAD;
5846 }
5847 if (status & WRX_ST_TCPCS) {
5848 /*
5849 * Note: we don't know if this was TCP or UDP,
5850 * so we just set both bits, and expect the
5851 * upper layers to deal.
5852 */
5853 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
5854 m->m_pkthdr.csum_flags |=
5855 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
5856 M_CSUM_TCPv6 | M_CSUM_UDPv6;
5857 if (errors & WRX_ER_TCPE)
5858 m->m_pkthdr.csum_flags |=
5859 M_CSUM_TCP_UDP_BAD;
5860 }
5861 }
5862
5863 ifp->if_ipackets++;
5864
5865 WM_RX_UNLOCK(sc);
5866
5867 /* Pass this up to any BPF listeners. */
5868 bpf_mtap(ifp, m);
5869
5870 /* Pass it on. */
5871 (*ifp->if_input)(ifp, m);
5872
5873 WM_RX_LOCK(sc);
5874
5875 if (sc->sc_stopping)
5876 break;
5877 }
5878
5879 /* Update the receive pointer. */
5880 sc->sc_rxptr = i;
5881
5882 DPRINTF(WM_DEBUG_RX,
5883 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
5884 }
5885
5886 /*
5887 * wm_linkintr_gmii:
5888 *
5889 * Helper; handle link interrupts for GMII.
5890 */
5891 static void
5892 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
5893 {
5894
5895 KASSERT(WM_TX_LOCKED(sc));
5896
5897 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5898 __func__));
5899
5900 if (icr & ICR_LSC) {
5901 DPRINTF(WM_DEBUG_LINK,
5902 ("%s: LINK: LSC -> mii_pollstat\n",
5903 device_xname(sc->sc_dev)));
5904 mii_pollstat(&sc->sc_mii);
5905 if (sc->sc_type == WM_T_82543) {
5906 int miistatus, active;
5907
5908 /*
5909 * With 82543, we need to force speed and
5910 * duplex on the MAC equal to what the PHY
5911 * speed and duplex configuration is.
5912 */
5913 miistatus = sc->sc_mii.mii_media_status;
5914
5915 if (miistatus & IFM_ACTIVE) {
5916 active = sc->sc_mii.mii_media_active;
5917 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5918 switch (IFM_SUBTYPE(active)) {
5919 case IFM_10_T:
5920 sc->sc_ctrl |= CTRL_SPEED_10;
5921 break;
5922 case IFM_100_TX:
5923 sc->sc_ctrl |= CTRL_SPEED_100;
5924 break;
5925 case IFM_1000_T:
5926 sc->sc_ctrl |= CTRL_SPEED_1000;
5927 break;
5928 default:
5929 /*
5930 * fiber?
5931 * Shoud not enter here.
5932 */
5933 printf("unknown media (%x)\n",
5934 active);
5935 break;
5936 }
5937 if (active & IFM_FDX)
5938 sc->sc_ctrl |= CTRL_FD;
5939 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5940 }
5941 } else if ((sc->sc_type == WM_T_ICH8)
5942 && (sc->sc_phytype == WMPHY_IGP_3)) {
5943 wm_kmrn_lock_loss_workaround_ich8lan(sc);
5944 } else if (sc->sc_type == WM_T_PCH) {
5945 wm_k1_gig_workaround_hv(sc,
5946 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
5947 }
5948
5949 if ((sc->sc_phytype == WMPHY_82578)
5950 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
5951 == IFM_1000_T)) {
5952
5953 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
5954 delay(200*1000); /* XXX too big */
5955
5956 /* Link stall fix for link up */
5957 wm_gmii_hv_writereg(sc->sc_dev, 1,
5958 HV_MUX_DATA_CTRL,
5959 HV_MUX_DATA_CTRL_GEN_TO_MAC
5960 | HV_MUX_DATA_CTRL_FORCE_SPEED);
5961 wm_gmii_hv_writereg(sc->sc_dev, 1,
5962 HV_MUX_DATA_CTRL,
5963 HV_MUX_DATA_CTRL_GEN_TO_MAC);
5964 }
5965 }
5966 } else if (icr & ICR_RXSEQ) {
5967 DPRINTF(WM_DEBUG_LINK,
5968 ("%s: LINK Receive sequence error\n",
5969 device_xname(sc->sc_dev)));
5970 }
5971 }
5972
5973 /*
5974 * wm_linkintr_tbi:
5975 *
5976 * Helper; handle link interrupts for TBI mode.
5977 */
5978 static void
5979 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
5980 {
5981 uint32_t status;
5982
5983 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5984 __func__));
5985
5986 status = CSR_READ(sc, WMREG_STATUS);
5987 if (icr & ICR_LSC) {
5988 if (status & STATUS_LU) {
5989 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
5990 device_xname(sc->sc_dev),
5991 (status & STATUS_FD) ? "FDX" : "HDX"));
5992 /*
5993 * NOTE: CTRL will update TFCE and RFCE automatically,
5994 * so we should update sc->sc_ctrl
5995 */
5996
5997 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5998 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5999 sc->sc_fcrtl &= ~FCRTL_XONE;
6000 if (status & STATUS_FD)
6001 sc->sc_tctl |=
6002 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6003 else
6004 sc->sc_tctl |=
6005 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6006 if (sc->sc_ctrl & CTRL_TFCE)
6007 sc->sc_fcrtl |= FCRTL_XONE;
6008 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6009 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
6010 WMREG_OLD_FCRTL : WMREG_FCRTL,
6011 sc->sc_fcrtl);
6012 sc->sc_tbi_linkup = 1;
6013 } else {
6014 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
6015 device_xname(sc->sc_dev)));
6016 sc->sc_tbi_linkup = 0;
6017 }
6018 wm_tbi_set_linkled(sc);
6019 } else if (icr & ICR_RXSEQ) {
6020 DPRINTF(WM_DEBUG_LINK,
6021 ("%s: LINK: Receive sequence error\n",
6022 device_xname(sc->sc_dev)));
6023 }
6024 }
6025
6026 /*
6027 * wm_linkintr:
6028 *
6029 * Helper; handle link interrupts.
6030 */
6031 static void
6032 wm_linkintr(struct wm_softc *sc, uint32_t icr)
6033 {
6034
6035 if (sc->sc_flags & WM_F_HAS_MII)
6036 wm_linkintr_gmii(sc, icr);
6037 else
6038 wm_linkintr_tbi(sc, icr);
6039 }
6040
6041 /*
6042 * wm_intr:
6043 *
6044 * Interrupt service routine.
6045 */
6046 static int
6047 wm_intr(void *arg)
6048 {
6049 struct wm_softc *sc = arg;
6050 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6051 uint32_t icr;
6052 int handled = 0;
6053
6054 while (1 /* CONSTCOND */) {
6055 icr = CSR_READ(sc, WMREG_ICR);
6056 if ((icr & sc->sc_icr) == 0)
6057 break;
6058 rnd_add_uint32(&sc->rnd_source, icr);
6059
6060 WM_RX_LOCK(sc);
6061
6062 if (sc->sc_stopping) {
6063 WM_RX_UNLOCK(sc);
6064 break;
6065 }
6066
6067 handled = 1;
6068
6069 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
6070 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
6071 DPRINTF(WM_DEBUG_RX,
6072 ("%s: RX: got Rx intr 0x%08x\n",
6073 device_xname(sc->sc_dev),
6074 icr & (ICR_RXDMT0|ICR_RXT0)));
6075 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
6076 }
6077 #endif
6078 wm_rxintr(sc);
6079
6080 WM_RX_UNLOCK(sc);
6081 WM_TX_LOCK(sc);
6082
6083 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
6084 if (icr & ICR_TXDW) {
6085 DPRINTF(WM_DEBUG_TX,
6086 ("%s: TX: got TXDW interrupt\n",
6087 device_xname(sc->sc_dev)));
6088 WM_EVCNT_INCR(&sc->sc_ev_txdw);
6089 }
6090 #endif
6091 wm_txintr(sc);
6092
6093 if (icr & (ICR_LSC|ICR_RXSEQ)) {
6094 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
6095 wm_linkintr(sc, icr);
6096 }
6097
6098 WM_TX_UNLOCK(sc);
6099
6100 if (icr & ICR_RXO) {
6101 #if defined(WM_DEBUG)
6102 log(LOG_WARNING, "%s: Receive overrun\n",
6103 device_xname(sc->sc_dev));
6104 #endif /* defined(WM_DEBUG) */
6105 }
6106 }
6107
6108 if (handled) {
6109 /* Try to get more packets going. */
6110 ifp->if_start(ifp);
6111 }
6112
6113 return handled;
6114 }
6115
6116 /*
6117 * Media related.
6118 * GMII, SGMII, TBI (and SERDES)
6119 */
6120
6121 /* GMII related */
6122
6123 /*
6124 * wm_gmii_reset:
6125 *
6126 * Reset the PHY.
6127 */
6128 static void
6129 wm_gmii_reset(struct wm_softc *sc)
6130 {
6131 uint32_t reg;
6132 int rv;
6133
6134 /* get phy semaphore */
6135 switch (sc->sc_type) {
6136 case WM_T_82571:
6137 case WM_T_82572:
6138 case WM_T_82573:
6139 case WM_T_82574:
6140 case WM_T_82583:
6141 /* XXX should get sw semaphore, too */
6142 rv = wm_get_swsm_semaphore(sc);
6143 break;
6144 case WM_T_82575:
6145 case WM_T_82576:
6146 case WM_T_82580:
6147 case WM_T_I350:
6148 case WM_T_I354:
6149 case WM_T_I210:
6150 case WM_T_I211:
6151 case WM_T_80003:
6152 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6153 break;
6154 case WM_T_ICH8:
6155 case WM_T_ICH9:
6156 case WM_T_ICH10:
6157 case WM_T_PCH:
6158 case WM_T_PCH2:
6159 case WM_T_PCH_LPT:
6160 rv = wm_get_swfwhw_semaphore(sc);
6161 break;
6162 default:
6163 /* nothing to do*/
6164 rv = 0;
6165 break;
6166 }
6167 if (rv != 0) {
6168 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6169 __func__);
6170 return;
6171 }
6172
6173 switch (sc->sc_type) {
6174 case WM_T_82542_2_0:
6175 case WM_T_82542_2_1:
6176 /* null */
6177 break;
6178 case WM_T_82543:
6179 /*
6180 * With 82543, we need to force speed and duplex on the MAC
6181 * equal to what the PHY speed and duplex configuration is.
6182 * In addition, we need to perform a hardware reset on the PHY
6183 * to take it out of reset.
6184 */
6185 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6186 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6187
6188 /* The PHY reset pin is active-low. */
6189 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6190 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6191 CTRL_EXT_SWDPIN(4));
6192 reg |= CTRL_EXT_SWDPIO(4);
6193
6194 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6195 CSR_WRITE_FLUSH(sc);
6196 delay(10*1000);
6197
6198 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6199 CSR_WRITE_FLUSH(sc);
6200 delay(150);
6201 #if 0
6202 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6203 #endif
6204 delay(20*1000); /* XXX extra delay to get PHY ID? */
6205 break;
6206 case WM_T_82544: /* reset 10000us */
6207 case WM_T_82540:
6208 case WM_T_82545:
6209 case WM_T_82545_3:
6210 case WM_T_82546:
6211 case WM_T_82546_3:
6212 case WM_T_82541:
6213 case WM_T_82541_2:
6214 case WM_T_82547:
6215 case WM_T_82547_2:
6216 case WM_T_82571: /* reset 100us */
6217 case WM_T_82572:
6218 case WM_T_82573:
6219 case WM_T_82574:
6220 case WM_T_82575:
6221 case WM_T_82576:
6222 case WM_T_82580:
6223 case WM_T_I350:
6224 case WM_T_I354:
6225 case WM_T_I210:
6226 case WM_T_I211:
6227 case WM_T_82583:
6228 case WM_T_80003:
6229 /* generic reset */
6230 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6231 CSR_WRITE_FLUSH(sc);
6232 delay(20000);
6233 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6234 CSR_WRITE_FLUSH(sc);
6235 delay(20000);
6236
6237 if ((sc->sc_type == WM_T_82541)
6238 || (sc->sc_type == WM_T_82541_2)
6239 || (sc->sc_type == WM_T_82547)
6240 || (sc->sc_type == WM_T_82547_2)) {
6241 /* workaround for igp are done in igp_reset() */
6242 /* XXX add code to set LED after phy reset */
6243 }
6244 break;
6245 case WM_T_ICH8:
6246 case WM_T_ICH9:
6247 case WM_T_ICH10:
6248 case WM_T_PCH:
6249 case WM_T_PCH2:
6250 case WM_T_PCH_LPT:
6251 /* generic reset */
6252 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6253 CSR_WRITE_FLUSH(sc);
6254 delay(100);
6255 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6256 CSR_WRITE_FLUSH(sc);
6257 delay(150);
6258 break;
6259 default:
6260 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6261 __func__);
6262 break;
6263 }
6264
6265 /* release PHY semaphore */
6266 switch (sc->sc_type) {
6267 case WM_T_82571:
6268 case WM_T_82572:
6269 case WM_T_82573:
6270 case WM_T_82574:
6271 case WM_T_82583:
6272 /* XXX should put sw semaphore, too */
6273 wm_put_swsm_semaphore(sc);
6274 break;
6275 case WM_T_82575:
6276 case WM_T_82576:
6277 case WM_T_82580:
6278 case WM_T_I350:
6279 case WM_T_I354:
6280 case WM_T_I210:
6281 case WM_T_I211:
6282 case WM_T_80003:
6283 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6284 break;
6285 case WM_T_ICH8:
6286 case WM_T_ICH9:
6287 case WM_T_ICH10:
6288 case WM_T_PCH:
6289 case WM_T_PCH2:
6290 case WM_T_PCH_LPT:
6291 wm_put_swfwhw_semaphore(sc);
6292 break;
6293 default:
6294 /* nothing to do*/
6295 rv = 0;
6296 break;
6297 }
6298
6299 /* get_cfg_done */
6300 wm_get_cfg_done(sc);
6301
6302 /* extra setup */
6303 switch (sc->sc_type) {
6304 case WM_T_82542_2_0:
6305 case WM_T_82542_2_1:
6306 case WM_T_82543:
6307 case WM_T_82544:
6308 case WM_T_82540:
6309 case WM_T_82545:
6310 case WM_T_82545_3:
6311 case WM_T_82546:
6312 case WM_T_82546_3:
6313 case WM_T_82541_2:
6314 case WM_T_82547_2:
6315 case WM_T_82571:
6316 case WM_T_82572:
6317 case WM_T_82573:
6318 case WM_T_82574:
6319 case WM_T_82575:
6320 case WM_T_82576:
6321 case WM_T_82580:
6322 case WM_T_I350:
6323 case WM_T_I354:
6324 case WM_T_I210:
6325 case WM_T_I211:
6326 case WM_T_82583:
6327 case WM_T_80003:
6328 /* null */
6329 break;
6330 case WM_T_82541:
6331 case WM_T_82547:
6332 /* XXX Configure actively LED after PHY reset */
6333 break;
6334 case WM_T_ICH8:
6335 case WM_T_ICH9:
6336 case WM_T_ICH10:
6337 case WM_T_PCH:
6338 case WM_T_PCH2:
6339 case WM_T_PCH_LPT:
6340 /* Allow time for h/w to get to a quiescent state afer reset */
6341 delay(10*1000);
6342
6343 if (sc->sc_type == WM_T_PCH)
6344 wm_hv_phy_workaround_ich8lan(sc);
6345
6346 if (sc->sc_type == WM_T_PCH2)
6347 wm_lv_phy_workaround_ich8lan(sc);
6348
6349 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6350 /*
6351 * dummy read to clear the phy wakeup bit after lcd
6352 * reset
6353 */
6354 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6355 }
6356
6357 /*
6358 * XXX Configure the LCD with th extended configuration region
6359 * in NVM
6360 */
6361
6362 /* Configure the LCD with the OEM bits in NVM */
6363 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6364 || (sc->sc_type == WM_T_PCH_LPT)) {
6365 /*
6366 * Disable LPLU.
6367 * XXX It seems that 82567 has LPLU, too.
6368 */
6369 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6370 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6371 reg |= HV_OEM_BITS_ANEGNOW;
6372 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6373 }
6374 break;
6375 default:
6376 panic("%s: unknown type\n", __func__);
6377 break;
6378 }
6379 }
6380
6381 /*
6382 * wm_get_phy_id_82575:
6383 *
6384 * Return PHY ID. Return -1 if it failed.
6385 */
6386 static int
6387 wm_get_phy_id_82575(struct wm_softc *sc)
6388 {
6389 uint32_t reg;
6390 int phyid = -1;
6391
6392 /* XXX */
6393 if ((sc->sc_flags & WM_F_SGMII) == 0)
6394 return -1;
6395
6396 if (wm_sgmii_uses_mdio(sc)) {
6397 switch (sc->sc_type) {
6398 case WM_T_82575:
6399 case WM_T_82576:
6400 reg = CSR_READ(sc, WMREG_MDIC);
6401 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
6402 break;
6403 case WM_T_82580:
6404 case WM_T_I350:
6405 case WM_T_I354:
6406 case WM_T_I210:
6407 case WM_T_I211:
6408 reg = CSR_READ(sc, WMREG_MDICNFG);
6409 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
6410 break;
6411 default:
6412 return -1;
6413 }
6414 }
6415
6416 return phyid;
6417 }
6418
6419
6420 /*
6421 * wm_gmii_mediainit:
6422 *
6423 * Initialize media for use on 1000BASE-T devices.
6424 */
6425 static void
6426 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6427 {
6428 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6429 struct mii_data *mii = &sc->sc_mii;
6430 uint32_t reg;
6431
6432 /* We have GMII. */
6433 sc->sc_flags |= WM_F_HAS_MII;
6434
6435 if (sc->sc_type == WM_T_80003)
6436 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6437 else
6438 sc->sc_tipg = TIPG_1000T_DFLT;
6439
6440 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
6441 if ((sc->sc_type == WM_T_82580)
6442 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
6443 || (sc->sc_type == WM_T_I211)) {
6444 reg = CSR_READ(sc, WMREG_PHPM);
6445 reg &= ~PHPM_GO_LINK_D;
6446 CSR_WRITE(sc, WMREG_PHPM, reg);
6447 }
6448
6449 /*
6450 * Let the chip set speed/duplex on its own based on
6451 * signals from the PHY.
6452 * XXXbouyer - I'm not sure this is right for the 80003,
6453 * the em driver only sets CTRL_SLU here - but it seems to work.
6454 */
6455 sc->sc_ctrl |= CTRL_SLU;
6456 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6457
6458 /* Initialize our media structures and probe the GMII. */
6459 mii->mii_ifp = ifp;
6460
6461 /*
6462 * Determine the PHY access method.
6463 *
6464 * For SGMII, use SGMII specific method.
6465 *
6466 * For some devices, we can determine the PHY access method
6467 * from sc_type.
6468 *
6469 * For ICH and PCH variants, it's difficult to determine the PHY
6470 * access method by sc_type, so use the PCI product ID for some
6471 * devices.
6472 * For other ICH8 variants, try to use igp's method. If the PHY
6473 * can't detect, then use bm's method.
6474 */
6475 switch (prodid) {
6476 case PCI_PRODUCT_INTEL_PCH_M_LM:
6477 case PCI_PRODUCT_INTEL_PCH_M_LC:
6478 /* 82577 */
6479 sc->sc_phytype = WMPHY_82577;
6480 break;
6481 case PCI_PRODUCT_INTEL_PCH_D_DM:
6482 case PCI_PRODUCT_INTEL_PCH_D_DC:
6483 /* 82578 */
6484 sc->sc_phytype = WMPHY_82578;
6485 break;
6486 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6487 case PCI_PRODUCT_INTEL_PCH2_LV_V:
6488 /* 82579 */
6489 sc->sc_phytype = WMPHY_82579;
6490 break;
6491 case PCI_PRODUCT_INTEL_82801I_BM:
6492 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6493 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6494 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6495 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6496 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6497 /* 82567 */
6498 sc->sc_phytype = WMPHY_BM;
6499 mii->mii_readreg = wm_gmii_bm_readreg;
6500 mii->mii_writereg = wm_gmii_bm_writereg;
6501 break;
6502 default:
6503 if (((sc->sc_flags & WM_F_SGMII) != 0)
6504 && !wm_sgmii_uses_mdio(sc)){
6505 mii->mii_readreg = wm_sgmii_readreg;
6506 mii->mii_writereg = wm_sgmii_writereg;
6507 } else if (sc->sc_type >= WM_T_80003) {
6508 mii->mii_readreg = wm_gmii_i80003_readreg;
6509 mii->mii_writereg = wm_gmii_i80003_writereg;
6510 } else if (sc->sc_type >= WM_T_I210) {
6511 mii->mii_readreg = wm_gmii_i82544_readreg;
6512 mii->mii_writereg = wm_gmii_i82544_writereg;
6513 } else if (sc->sc_type >= WM_T_82580) {
6514 sc->sc_phytype = WMPHY_82580;
6515 mii->mii_readreg = wm_gmii_82580_readreg;
6516 mii->mii_writereg = wm_gmii_82580_writereg;
6517 } else if (sc->sc_type >= WM_T_82544) {
6518 mii->mii_readreg = wm_gmii_i82544_readreg;
6519 mii->mii_writereg = wm_gmii_i82544_writereg;
6520 } else {
6521 mii->mii_readreg = wm_gmii_i82543_readreg;
6522 mii->mii_writereg = wm_gmii_i82543_writereg;
6523 }
6524 break;
6525 }
6526 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) {
6527 /* All PCH* use _hv_ */
6528 mii->mii_readreg = wm_gmii_hv_readreg;
6529 mii->mii_writereg = wm_gmii_hv_writereg;
6530 }
6531 mii->mii_statchg = wm_gmii_statchg;
6532
6533 wm_gmii_reset(sc);
6534
6535 sc->sc_ethercom.ec_mii = &sc->sc_mii;
6536 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6537 wm_gmii_mediastatus);
6538
6539 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6540 || (sc->sc_type == WM_T_82580)
6541 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
6542 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
6543 if ((sc->sc_flags & WM_F_SGMII) == 0) {
6544 /* Attach only one port */
6545 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6546 MII_OFFSET_ANY, MIIF_DOPAUSE);
6547 } else {
6548 int i, id;
6549 uint32_t ctrl_ext;
6550
6551 id = wm_get_phy_id_82575(sc);
6552 if (id != -1) {
6553 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6554 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
6555 }
6556 if ((id == -1)
6557 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
6558 /* Power on sgmii phy if it is disabled */
6559 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6560 CSR_WRITE(sc, WMREG_CTRL_EXT,
6561 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6562 CSR_WRITE_FLUSH(sc);
6563 delay(300*1000); /* XXX too long */
6564
6565 /* from 1 to 8 */
6566 for (i = 1; i < 8; i++)
6567 mii_attach(sc->sc_dev, &sc->sc_mii,
6568 0xffffffff, i, MII_OFFSET_ANY,
6569 MIIF_DOPAUSE);
6570
6571 /* restore previous sfp cage power state */
6572 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6573 }
6574 }
6575 } else {
6576 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6577 MII_OFFSET_ANY, MIIF_DOPAUSE);
6578 }
6579
6580 /*
6581 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
6582 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6583 */
6584 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
6585 (LIST_FIRST(&mii->mii_phys) == NULL)) {
6586 wm_set_mdio_slow_mode_hv(sc);
6587 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6588 MII_OFFSET_ANY, MIIF_DOPAUSE);
6589 }
6590
6591 /*
6592 * (For ICH8 variants)
6593 * If PHY detection failed, use BM's r/w function and retry.
6594 */
6595 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6596 /* if failed, retry with *_bm_* */
6597 mii->mii_readreg = wm_gmii_bm_readreg;
6598 mii->mii_writereg = wm_gmii_bm_writereg;
6599
6600 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6601 MII_OFFSET_ANY, MIIF_DOPAUSE);
6602 }
6603
6604 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6605 /* Any PHY wasn't find */
6606 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6607 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6608 sc->sc_phytype = WMPHY_NONE;
6609 } else {
6610 /*
6611 * PHY Found!
6612 * Check PHY type.
6613 */
6614 uint32_t model;
6615 struct mii_softc *child;
6616
6617 child = LIST_FIRST(&mii->mii_phys);
6618 if (device_is_a(child->mii_dev, "igphy")) {
6619 struct igphy_softc *isc = (struct igphy_softc *)child;
6620
6621 model = isc->sc_mii.mii_mpd_model;
6622 if (model == MII_MODEL_yyINTEL_I82566)
6623 sc->sc_phytype = WMPHY_IGP_3;
6624 }
6625
6626 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6627 }
6628 }
6629
6630 /*
6631 * wm_gmii_mediastatus: [ifmedia interface function]
6632 *
6633 * Get the current interface media status on a 1000BASE-T device.
6634 */
6635 static void
6636 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6637 {
6638 struct wm_softc *sc = ifp->if_softc;
6639
6640 ether_mediastatus(ifp, ifmr);
6641 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6642 | sc->sc_flowflags;
6643 }
6644
6645 /*
6646 * wm_gmii_mediachange: [ifmedia interface function]
6647 *
6648 * Set hardware to newly-selected media on a 1000BASE-T device.
6649 */
6650 static int
6651 wm_gmii_mediachange(struct ifnet *ifp)
6652 {
6653 struct wm_softc *sc = ifp->if_softc;
6654 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6655 int rc;
6656
6657 if ((ifp->if_flags & IFF_UP) == 0)
6658 return 0;
6659
6660 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6661 sc->sc_ctrl |= CTRL_SLU;
6662 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6663 || (sc->sc_type > WM_T_82543)) {
6664 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6665 } else {
6666 sc->sc_ctrl &= ~CTRL_ASDE;
6667 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6668 if (ife->ifm_media & IFM_FDX)
6669 sc->sc_ctrl |= CTRL_FD;
6670 switch (IFM_SUBTYPE(ife->ifm_media)) {
6671 case IFM_10_T:
6672 sc->sc_ctrl |= CTRL_SPEED_10;
6673 break;
6674 case IFM_100_TX:
6675 sc->sc_ctrl |= CTRL_SPEED_100;
6676 break;
6677 case IFM_1000_T:
6678 sc->sc_ctrl |= CTRL_SPEED_1000;
6679 break;
6680 default:
6681 panic("wm_gmii_mediachange: bad media 0x%x",
6682 ife->ifm_media);
6683 }
6684 }
6685 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6686 if (sc->sc_type <= WM_T_82543)
6687 wm_gmii_reset(sc);
6688
6689 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6690 return 0;
6691 return rc;
6692 }
6693
6694 #define MDI_IO CTRL_SWDPIN(2)
6695 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
6696 #define MDI_CLK CTRL_SWDPIN(3)
6697
6698 static void
6699 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6700 {
6701 uint32_t i, v;
6702
6703 v = CSR_READ(sc, WMREG_CTRL);
6704 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6705 v |= MDI_DIR | CTRL_SWDPIO(3);
6706
6707 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6708 if (data & i)
6709 v |= MDI_IO;
6710 else
6711 v &= ~MDI_IO;
6712 CSR_WRITE(sc, WMREG_CTRL, v);
6713 CSR_WRITE_FLUSH(sc);
6714 delay(10);
6715 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6716 CSR_WRITE_FLUSH(sc);
6717 delay(10);
6718 CSR_WRITE(sc, WMREG_CTRL, v);
6719 CSR_WRITE_FLUSH(sc);
6720 delay(10);
6721 }
6722 }
6723
6724 static uint32_t
6725 wm_i82543_mii_recvbits(struct wm_softc *sc)
6726 {
6727 uint32_t v, i, data = 0;
6728
6729 v = CSR_READ(sc, WMREG_CTRL);
6730 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6731 v |= CTRL_SWDPIO(3);
6732
6733 CSR_WRITE(sc, WMREG_CTRL, v);
6734 CSR_WRITE_FLUSH(sc);
6735 delay(10);
6736 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6737 CSR_WRITE_FLUSH(sc);
6738 delay(10);
6739 CSR_WRITE(sc, WMREG_CTRL, v);
6740 CSR_WRITE_FLUSH(sc);
6741 delay(10);
6742
6743 for (i = 0; i < 16; i++) {
6744 data <<= 1;
6745 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6746 CSR_WRITE_FLUSH(sc);
6747 delay(10);
6748 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6749 data |= 1;
6750 CSR_WRITE(sc, WMREG_CTRL, v);
6751 CSR_WRITE_FLUSH(sc);
6752 delay(10);
6753 }
6754
6755 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6756 CSR_WRITE_FLUSH(sc);
6757 delay(10);
6758 CSR_WRITE(sc, WMREG_CTRL, v);
6759 CSR_WRITE_FLUSH(sc);
6760 delay(10);
6761
6762 return data;
6763 }
6764
6765 #undef MDI_IO
6766 #undef MDI_DIR
6767 #undef MDI_CLK
6768
6769 /*
6770 * wm_gmii_i82543_readreg: [mii interface function]
6771 *
6772 * Read a PHY register on the GMII (i82543 version).
6773 */
6774 static int
6775 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6776 {
6777 struct wm_softc *sc = device_private(self);
6778 int rv;
6779
6780 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6781 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
6782 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6783 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
6784
6785 DPRINTF(WM_DEBUG_GMII,
6786 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6787 device_xname(sc->sc_dev), phy, reg, rv));
6788
6789 return rv;
6790 }
6791
6792 /*
6793 * wm_gmii_i82543_writereg: [mii interface function]
6794 *
6795 * Write a PHY register on the GMII (i82543 version).
6796 */
6797 static void
6798 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6799 {
6800 struct wm_softc *sc = device_private(self);
6801
6802 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6803 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6804 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6805 (MII_COMMAND_START << 30), 32);
6806 }
6807
6808 /*
6809 * wm_gmii_i82544_readreg: [mii interface function]
6810 *
6811 * Read a PHY register on the GMII.
6812 */
6813 static int
6814 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6815 {
6816 struct wm_softc *sc = device_private(self);
6817 uint32_t mdic = 0;
6818 int i, rv;
6819
6820 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6821 MDIC_REGADD(reg));
6822
6823 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6824 mdic = CSR_READ(sc, WMREG_MDIC);
6825 if (mdic & MDIC_READY)
6826 break;
6827 delay(50);
6828 }
6829
6830 if ((mdic & MDIC_READY) == 0) {
6831 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6832 device_xname(sc->sc_dev), phy, reg);
6833 rv = 0;
6834 } else if (mdic & MDIC_E) {
6835 #if 0 /* This is normal if no PHY is present. */
6836 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6837 device_xname(sc->sc_dev), phy, reg);
6838 #endif
6839 rv = 0;
6840 } else {
6841 rv = MDIC_DATA(mdic);
6842 if (rv == 0xffff)
6843 rv = 0;
6844 }
6845
6846 return rv;
6847 }
6848
6849 /*
6850 * wm_gmii_i82544_writereg: [mii interface function]
6851 *
6852 * Write a PHY register on the GMII.
6853 */
6854 static void
6855 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6856 {
6857 struct wm_softc *sc = device_private(self);
6858 uint32_t mdic = 0;
6859 int i;
6860
6861 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6862 MDIC_REGADD(reg) | MDIC_DATA(val));
6863
6864 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6865 mdic = CSR_READ(sc, WMREG_MDIC);
6866 if (mdic & MDIC_READY)
6867 break;
6868 delay(50);
6869 }
6870
6871 if ((mdic & MDIC_READY) == 0)
6872 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6873 device_xname(sc->sc_dev), phy, reg);
6874 else if (mdic & MDIC_E)
6875 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6876 device_xname(sc->sc_dev), phy, reg);
6877 }
6878
6879 /*
6880 * wm_gmii_i80003_readreg: [mii interface function]
6881 *
6882 * Read a PHY register on the kumeran
6883 * This could be handled by the PHY layer if we didn't have to lock the
6884 * ressource ...
6885 */
6886 static int
6887 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6888 {
6889 struct wm_softc *sc = device_private(self);
6890 int sem;
6891 int rv;
6892
6893 if (phy != 1) /* only one PHY on kumeran bus */
6894 return 0;
6895
6896 sem = swfwphysem[sc->sc_funcid];
6897 if (wm_get_swfw_semaphore(sc, sem)) {
6898 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6899 __func__);
6900 return 0;
6901 }
6902
6903 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6904 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6905 reg >> GG82563_PAGE_SHIFT);
6906 } else {
6907 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6908 reg >> GG82563_PAGE_SHIFT);
6909 }
6910 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6911 delay(200);
6912 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6913 delay(200);
6914
6915 wm_put_swfw_semaphore(sc, sem);
6916 return rv;
6917 }
6918
6919 /*
6920 * wm_gmii_i80003_writereg: [mii interface function]
6921 *
6922 * Write a PHY register on the kumeran.
6923 * This could be handled by the PHY layer if we didn't have to lock the
6924 * ressource ...
6925 */
6926 static void
6927 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6928 {
6929 struct wm_softc *sc = device_private(self);
6930 int sem;
6931
6932 if (phy != 1) /* only one PHY on kumeran bus */
6933 return;
6934
6935 sem = swfwphysem[sc->sc_funcid];
6936 if (wm_get_swfw_semaphore(sc, sem)) {
6937 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6938 __func__);
6939 return;
6940 }
6941
6942 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6943 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6944 reg >> GG82563_PAGE_SHIFT);
6945 } else {
6946 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6947 reg >> GG82563_PAGE_SHIFT);
6948 }
6949 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6950 delay(200);
6951 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6952 delay(200);
6953
6954 wm_put_swfw_semaphore(sc, sem);
6955 }
6956
6957 /*
6958 * wm_gmii_bm_readreg: [mii interface function]
6959 *
6960 * Read a PHY register on the kumeran
6961 * This could be handled by the PHY layer if we didn't have to lock the
6962 * ressource ...
6963 */
6964 static int
6965 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6966 {
6967 struct wm_softc *sc = device_private(self);
6968 int sem;
6969 int rv;
6970
6971 sem = swfwphysem[sc->sc_funcid];
6972 if (wm_get_swfw_semaphore(sc, sem)) {
6973 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6974 __func__);
6975 return 0;
6976 }
6977
6978 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6979 if (phy == 1)
6980 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6981 reg);
6982 else
6983 wm_gmii_i82544_writereg(self, phy,
6984 GG82563_PHY_PAGE_SELECT,
6985 reg >> GG82563_PAGE_SHIFT);
6986 }
6987
6988 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6989 wm_put_swfw_semaphore(sc, sem);
6990 return rv;
6991 }
6992
6993 /*
6994 * wm_gmii_bm_writereg: [mii interface function]
6995 *
6996 * Write a PHY register on the kumeran.
6997 * This could be handled by the PHY layer if we didn't have to lock the
6998 * ressource ...
6999 */
7000 static void
7001 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
7002 {
7003 struct wm_softc *sc = device_private(self);
7004 int sem;
7005
7006 sem = swfwphysem[sc->sc_funcid];
7007 if (wm_get_swfw_semaphore(sc, sem)) {
7008 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7009 __func__);
7010 return;
7011 }
7012
7013 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7014 if (phy == 1)
7015 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7016 reg);
7017 else
7018 wm_gmii_i82544_writereg(self, phy,
7019 GG82563_PHY_PAGE_SELECT,
7020 reg >> GG82563_PAGE_SHIFT);
7021 }
7022
7023 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7024 wm_put_swfw_semaphore(sc, sem);
7025 }
7026
7027 static void
7028 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
7029 {
7030 struct wm_softc *sc = device_private(self);
7031 uint16_t regnum = BM_PHY_REG_NUM(offset);
7032 uint16_t wuce;
7033
7034 /* XXX Gig must be disabled for MDIO accesses to page 800 */
7035 if (sc->sc_type == WM_T_PCH) {
7036 /* XXX e1000 driver do nothing... why? */
7037 }
7038
7039 /* Set page 769 */
7040 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7041 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7042
7043 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
7044
7045 wuce &= ~BM_WUC_HOST_WU_BIT;
7046 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
7047 wuce | BM_WUC_ENABLE_BIT);
7048
7049 /* Select page 800 */
7050 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7051 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
7052
7053 /* Write page 800 */
7054 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
7055
7056 if (rd)
7057 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
7058 else
7059 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
7060
7061 /* Set page 769 */
7062 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7063 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7064
7065 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
7066 }
7067
7068 /*
7069 * wm_gmii_hv_readreg: [mii interface function]
7070 *
7071 * Read a PHY register on the kumeran
7072 * This could be handled by the PHY layer if we didn't have to lock the
7073 * ressource ...
7074 */
7075 static int
7076 wm_gmii_hv_readreg(device_t self, int phy, int reg)
7077 {
7078 struct wm_softc *sc = device_private(self);
7079 uint16_t page = BM_PHY_REG_PAGE(reg);
7080 uint16_t regnum = BM_PHY_REG_NUM(reg);
7081 uint16_t val;
7082 int rv;
7083
7084 if (wm_get_swfwhw_semaphore(sc)) {
7085 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7086 __func__);
7087 return 0;
7088 }
7089
7090 /* XXX Workaround failure in MDIO access while cable is disconnected */
7091 if (sc->sc_phytype == WMPHY_82577) {
7092 /* XXX must write */
7093 }
7094
7095 /* Page 800 works differently than the rest so it has its own func */
7096 if (page == BM_WUC_PAGE) {
7097 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
7098 return val;
7099 }
7100
7101 /*
7102 * Lower than page 768 works differently than the rest so it has its
7103 * own func
7104 */
7105 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7106 printf("gmii_hv_readreg!!!\n");
7107 return 0;
7108 }
7109
7110 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7111 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7112 page << BME1000_PAGE_SHIFT);
7113 }
7114
7115 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
7116 wm_put_swfwhw_semaphore(sc);
7117 return rv;
7118 }
7119
7120 /*
7121 * wm_gmii_hv_writereg: [mii interface function]
7122 *
7123 * Write a PHY register on the kumeran.
7124 * This could be handled by the PHY layer if we didn't have to lock the
7125 * ressource ...
7126 */
7127 static void
7128 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
7129 {
7130 struct wm_softc *sc = device_private(self);
7131 uint16_t page = BM_PHY_REG_PAGE(reg);
7132 uint16_t regnum = BM_PHY_REG_NUM(reg);
7133
7134 if (wm_get_swfwhw_semaphore(sc)) {
7135 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7136 __func__);
7137 return;
7138 }
7139
7140 /* XXX Workaround failure in MDIO access while cable is disconnected */
7141
7142 /* Page 800 works differently than the rest so it has its own func */
7143 if (page == BM_WUC_PAGE) {
7144 uint16_t tmp;
7145
7146 tmp = val;
7147 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
7148 return;
7149 }
7150
7151 /*
7152 * Lower than page 768 works differently than the rest so it has its
7153 * own func
7154 */
7155 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7156 printf("gmii_hv_writereg!!!\n");
7157 return;
7158 }
7159
7160 /*
7161 * XXX Workaround MDIO accesses being disabled after entering IEEE
7162 * Power Down (whenever bit 11 of the PHY control register is set)
7163 */
7164
7165 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7166 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7167 page << BME1000_PAGE_SHIFT);
7168 }
7169
7170 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
7171 wm_put_swfwhw_semaphore(sc);
7172 }
7173
7174 /*
7175 * wm_gmii_82580_readreg: [mii interface function]
7176 *
7177 * Read a PHY register on the 82580 and I350.
7178 * This could be handled by the PHY layer if we didn't have to lock the
7179 * ressource ...
7180 */
7181 static int
7182 wm_gmii_82580_readreg(device_t self, int phy, int reg)
7183 {
7184 struct wm_softc *sc = device_private(self);
7185 int sem;
7186 int rv;
7187
7188 sem = swfwphysem[sc->sc_funcid];
7189 if (wm_get_swfw_semaphore(sc, sem)) {
7190 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7191 __func__);
7192 return 0;
7193 }
7194
7195 rv = wm_gmii_i82544_readreg(self, phy, reg);
7196
7197 wm_put_swfw_semaphore(sc, sem);
7198 return rv;
7199 }
7200
7201 /*
7202 * wm_gmii_82580_writereg: [mii interface function]
7203 *
7204 * Write a PHY register on the 82580 and I350.
7205 * This could be handled by the PHY layer if we didn't have to lock the
7206 * ressource ...
7207 */
7208 static void
7209 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
7210 {
7211 struct wm_softc *sc = device_private(self);
7212 int sem;
7213
7214 sem = swfwphysem[sc->sc_funcid];
7215 if (wm_get_swfw_semaphore(sc, sem)) {
7216 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7217 __func__);
7218 return;
7219 }
7220
7221 wm_gmii_i82544_writereg(self, phy, reg, val);
7222
7223 wm_put_swfw_semaphore(sc, sem);
7224 }
7225
7226 /*
7227 * wm_gmii_statchg: [mii interface function]
7228 *
7229 * Callback from MII layer when media changes.
7230 */
7231 static void
7232 wm_gmii_statchg(struct ifnet *ifp)
7233 {
7234 struct wm_softc *sc = ifp->if_softc;
7235 struct mii_data *mii = &sc->sc_mii;
7236
7237 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7238 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7239 sc->sc_fcrtl &= ~FCRTL_XONE;
7240
7241 /*
7242 * Get flow control negotiation result.
7243 */
7244 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7245 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7246 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7247 mii->mii_media_active &= ~IFM_ETH_FMASK;
7248 }
7249
7250 if (sc->sc_flowflags & IFM_FLOW) {
7251 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7252 sc->sc_ctrl |= CTRL_TFCE;
7253 sc->sc_fcrtl |= FCRTL_XONE;
7254 }
7255 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7256 sc->sc_ctrl |= CTRL_RFCE;
7257 }
7258
7259 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7260 DPRINTF(WM_DEBUG_LINK,
7261 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7262 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7263 } else {
7264 DPRINTF(WM_DEBUG_LINK,
7265 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7266 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7267 }
7268
7269 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7270 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7271 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7272 : WMREG_FCRTL, sc->sc_fcrtl);
7273 if (sc->sc_type == WM_T_80003) {
7274 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7275 case IFM_1000_T:
7276 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7277 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7278 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7279 break;
7280 default:
7281 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7282 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7283 sc->sc_tipg = TIPG_10_100_80003_DFLT;
7284 break;
7285 }
7286 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7287 }
7288 }
7289
7290 /*
7291 * wm_kmrn_readreg:
7292 *
7293 * Read a kumeran register
7294 */
7295 static int
7296 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7297 {
7298 int rv;
7299
7300 if (sc->sc_flags & WM_F_LOCK_SWFW) {
7301 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7302 aprint_error_dev(sc->sc_dev,
7303 "%s: failed to get semaphore\n", __func__);
7304 return 0;
7305 }
7306 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
7307 if (wm_get_swfwhw_semaphore(sc)) {
7308 aprint_error_dev(sc->sc_dev,
7309 "%s: failed to get semaphore\n", __func__);
7310 return 0;
7311 }
7312 }
7313
7314 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7315 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7316 KUMCTRLSTA_REN);
7317 CSR_WRITE_FLUSH(sc);
7318 delay(2);
7319
7320 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7321
7322 if (sc->sc_flags & WM_F_LOCK_SWFW)
7323 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7324 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
7325 wm_put_swfwhw_semaphore(sc);
7326
7327 return rv;
7328 }
7329
7330 /*
7331 * wm_kmrn_writereg:
7332 *
7333 * Write a kumeran register
7334 */
7335 static void
7336 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7337 {
7338
7339 if (sc->sc_flags & WM_F_LOCK_SWFW) {
7340 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7341 aprint_error_dev(sc->sc_dev,
7342 "%s: failed to get semaphore\n", __func__);
7343 return;
7344 }
7345 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
7346 if (wm_get_swfwhw_semaphore(sc)) {
7347 aprint_error_dev(sc->sc_dev,
7348 "%s: failed to get semaphore\n", __func__);
7349 return;
7350 }
7351 }
7352
7353 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7354 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7355 (val & KUMCTRLSTA_MASK));
7356
7357 if (sc->sc_flags & WM_F_LOCK_SWFW)
7358 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7359 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
7360 wm_put_swfwhw_semaphore(sc);
7361 }
7362
7363 /* SGMII related */
7364
7365 /*
7366 * wm_sgmii_uses_mdio
7367 *
7368 * Check whether the transaction is to the internal PHY or the external
7369 * MDIO interface. Return true if it's MDIO.
7370 */
7371 static bool
7372 wm_sgmii_uses_mdio(struct wm_softc *sc)
7373 {
7374 uint32_t reg;
7375 bool ismdio = false;
7376
7377 switch (sc->sc_type) {
7378 case WM_T_82575:
7379 case WM_T_82576:
7380 reg = CSR_READ(sc, WMREG_MDIC);
7381 ismdio = ((reg & MDIC_DEST) != 0);
7382 break;
7383 case WM_T_82580:
7384 case WM_T_I350:
7385 case WM_T_I354:
7386 case WM_T_I210:
7387 case WM_T_I211:
7388 reg = CSR_READ(sc, WMREG_MDICNFG);
7389 ismdio = ((reg & MDICNFG_DEST) != 0);
7390 break;
7391 default:
7392 break;
7393 }
7394
7395 return ismdio;
7396 }
7397
7398 /*
7399 * wm_sgmii_readreg: [mii interface function]
7400 *
7401 * Read a PHY register on the SGMII
7402 * This could be handled by the PHY layer if we didn't have to lock the
7403 * ressource ...
7404 */
7405 static int
7406 wm_sgmii_readreg(device_t self, int phy, int reg)
7407 {
7408 struct wm_softc *sc = device_private(self);
7409 uint32_t i2ccmd;
7410 int i, rv;
7411
7412 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7413 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7414 __func__);
7415 return 0;
7416 }
7417
7418 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7419 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7420 | I2CCMD_OPCODE_READ;
7421 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7422
7423 /* Poll the ready bit */
7424 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7425 delay(50);
7426 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7427 if (i2ccmd & I2CCMD_READY)
7428 break;
7429 }
7430 if ((i2ccmd & I2CCMD_READY) == 0)
7431 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7432 if ((i2ccmd & I2CCMD_ERROR) != 0)
7433 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7434
7435 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7436
7437 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7438 return rv;
7439 }
7440
7441 /*
7442 * wm_sgmii_writereg: [mii interface function]
7443 *
7444 * Write a PHY register on the SGMII.
7445 * This could be handled by the PHY layer if we didn't have to lock the
7446 * ressource ...
7447 */
7448 static void
7449 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7450 {
7451 struct wm_softc *sc = device_private(self);
7452 uint32_t i2ccmd;
7453 int i;
7454 int val_swapped;
7455
7456 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7457 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7458 __func__);
7459 return;
7460 }
7461 /* Swap the data bytes for the I2C interface */
7462 val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
7463 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7464 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7465 | I2CCMD_OPCODE_WRITE | val_swapped;
7466 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7467
7468 /* Poll the ready bit */
7469 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7470 delay(50);
7471 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7472 if (i2ccmd & I2CCMD_READY)
7473 break;
7474 }
7475 if ((i2ccmd & I2CCMD_READY) == 0)
7476 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7477 if ((i2ccmd & I2CCMD_ERROR) != 0)
7478 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7479
7480 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7481 }
7482
7483 /* TBI related */
7484
7485 /* XXX Currently TBI only */
7486 static int
7487 wm_check_for_link(struct wm_softc *sc)
7488 {
7489 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7490 uint32_t rxcw;
7491 uint32_t ctrl;
7492 uint32_t status;
7493 uint32_t sig;
7494
7495 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
7496 sc->sc_tbi_linkup = 1;
7497 return 0;
7498 }
7499
7500 rxcw = CSR_READ(sc, WMREG_RXCW);
7501 ctrl = CSR_READ(sc, WMREG_CTRL);
7502 status = CSR_READ(sc, WMREG_STATUS);
7503
7504 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7505
7506 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7507 device_xname(sc->sc_dev), __func__,
7508 ((ctrl & CTRL_SWDPIN(1)) == sig),
7509 ((status & STATUS_LU) != 0),
7510 ((rxcw & RXCW_C) != 0)
7511 ));
7512
7513 /*
7514 * SWDPIN LU RXCW
7515 * 0 0 0
7516 * 0 0 1 (should not happen)
7517 * 0 1 0 (should not happen)
7518 * 0 1 1 (should not happen)
7519 * 1 0 0 Disable autonego and force linkup
7520 * 1 0 1 got /C/ but not linkup yet
7521 * 1 1 0 (linkup)
7522 * 1 1 1 If IFM_AUTO, back to autonego
7523 *
7524 */
7525 if (((ctrl & CTRL_SWDPIN(1)) == sig)
7526 && ((status & STATUS_LU) == 0)
7527 && ((rxcw & RXCW_C) == 0)) {
7528 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7529 __func__));
7530 sc->sc_tbi_linkup = 0;
7531 /* Disable auto-negotiation in the TXCW register */
7532 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7533
7534 /*
7535 * Force link-up and also force full-duplex.
7536 *
7537 * NOTE: CTRL was updated TFCE and RFCE automatically,
7538 * so we should update sc->sc_ctrl
7539 */
7540 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7541 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7542 } else if (((status & STATUS_LU) != 0)
7543 && ((rxcw & RXCW_C) != 0)
7544 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7545 sc->sc_tbi_linkup = 1;
7546 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7547 __func__));
7548 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7549 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7550 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7551 && ((rxcw & RXCW_C) != 0)) {
7552 DPRINTF(WM_DEBUG_LINK, ("/C/"));
7553 } else {
7554 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7555 status));
7556 }
7557
7558 return 0;
7559 }
7560
7561 /*
7562 * wm_tbi_mediainit:
7563 *
7564 * Initialize media for use on 1000BASE-X devices.
7565 */
7566 static void
7567 wm_tbi_mediainit(struct wm_softc *sc)
7568 {
7569 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7570 const char *sep = "";
7571
7572 if (sc->sc_type < WM_T_82543)
7573 sc->sc_tipg = TIPG_WM_DFLT;
7574 else
7575 sc->sc_tipg = TIPG_LG_DFLT;
7576
7577 sc->sc_tbi_anegticks = 5;
7578
7579 /* Initialize our media structures */
7580 sc->sc_mii.mii_ifp = ifp;
7581
7582 sc->sc_ethercom.ec_mii = &sc->sc_mii;
7583 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
7584 wm_tbi_mediastatus);
7585
7586 /*
7587 * SWD Pins:
7588 *
7589 * 0 = Link LED (output)
7590 * 1 = Loss Of Signal (input)
7591 */
7592 sc->sc_ctrl |= CTRL_SWDPIO(0);
7593 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
7594 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7595 sc->sc_ctrl &= ~CTRL_LRST;
7596
7597 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7598
7599 #define ADD(ss, mm, dd) \
7600 do { \
7601 aprint_normal("%s%s", sep, ss); \
7602 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
7603 sep = ", "; \
7604 } while (/*CONSTCOND*/0)
7605
7606 aprint_normal_dev(sc->sc_dev, "");
7607
7608 /* Only 82545 is LX */
7609 if (sc->sc_type == WM_T_82545) {
7610 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
7611 ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
7612 } else {
7613 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
7614 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
7615 }
7616 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
7617 aprint_normal("\n");
7618
7619 #undef ADD
7620
7621 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
7622 }
7623
7624 /*
7625 * wm_tbi_mediastatus: [ifmedia interface function]
7626 *
7627 * Get the current interface media status on a 1000BASE-X device.
7628 */
7629 static void
7630 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
7631 {
7632 struct wm_softc *sc = ifp->if_softc;
7633 uint32_t ctrl, status;
7634
7635 ifmr->ifm_status = IFM_AVALID;
7636 ifmr->ifm_active = IFM_ETHER;
7637
7638 status = CSR_READ(sc, WMREG_STATUS);
7639 if ((status & STATUS_LU) == 0) {
7640 ifmr->ifm_active |= IFM_NONE;
7641 return;
7642 }
7643
7644 ifmr->ifm_status |= IFM_ACTIVE;
7645 /* Only 82545 is LX */
7646 if (sc->sc_type == WM_T_82545)
7647 ifmr->ifm_active |= IFM_1000_LX;
7648 else
7649 ifmr->ifm_active |= IFM_1000_SX;
7650 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
7651 ifmr->ifm_active |= IFM_FDX;
7652 else
7653 ifmr->ifm_active |= IFM_HDX;
7654 ctrl = CSR_READ(sc, WMREG_CTRL);
7655 if (ctrl & CTRL_RFCE)
7656 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
7657 if (ctrl & CTRL_TFCE)
7658 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
7659 }
7660
7661 /*
7662 * wm_tbi_mediachange: [ifmedia interface function]
7663 *
7664 * Set hardware to newly-selected media on a 1000BASE-X device.
7665 */
7666 static int
7667 wm_tbi_mediachange(struct ifnet *ifp)
7668 {
7669 struct wm_softc *sc = ifp->if_softc;
7670 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7671 uint32_t status;
7672 int i;
7673
7674 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7675 return 0;
7676
7677 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
7678 || (sc->sc_type >= WM_T_82575))
7679 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
7680
7681 /* XXX power_up_serdes_link_82575() */
7682
7683 sc->sc_ctrl &= ~CTRL_LRST;
7684 sc->sc_txcw = TXCW_ANE;
7685 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
7686 sc->sc_txcw |= TXCW_FD | TXCW_HD;
7687 else if (ife->ifm_media & IFM_FDX)
7688 sc->sc_txcw |= TXCW_FD;
7689 else
7690 sc->sc_txcw |= TXCW_HD;
7691
7692 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
7693 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
7694
7695 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
7696 device_xname(sc->sc_dev), sc->sc_txcw));
7697 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7698 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7699 CSR_WRITE_FLUSH(sc);
7700 delay(1000);
7701
7702 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
7703 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
7704
7705 /*
7706 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
7707 * optics detect a signal, 0 if they don't.
7708 */
7709 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
7710 /* Have signal; wait for the link to come up. */
7711 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
7712 delay(10000);
7713 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
7714 break;
7715 }
7716
7717 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
7718 device_xname(sc->sc_dev),i));
7719
7720 status = CSR_READ(sc, WMREG_STATUS);
7721 DPRINTF(WM_DEBUG_LINK,
7722 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
7723 device_xname(sc->sc_dev),status, STATUS_LU));
7724 if (status & STATUS_LU) {
7725 /* Link is up. */
7726 DPRINTF(WM_DEBUG_LINK,
7727 ("%s: LINK: set media -> link up %s\n",
7728 device_xname(sc->sc_dev),
7729 (status & STATUS_FD) ? "FDX" : "HDX"));
7730
7731 /*
7732 * NOTE: CTRL will update TFCE and RFCE automatically,
7733 * so we should update sc->sc_ctrl
7734 */
7735 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7736 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7737 sc->sc_fcrtl &= ~FCRTL_XONE;
7738 if (status & STATUS_FD)
7739 sc->sc_tctl |=
7740 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7741 else
7742 sc->sc_tctl |=
7743 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7744 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
7745 sc->sc_fcrtl |= FCRTL_XONE;
7746 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7747 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7748 WMREG_OLD_FCRTL : WMREG_FCRTL,
7749 sc->sc_fcrtl);
7750 sc->sc_tbi_linkup = 1;
7751 } else {
7752 if (i == WM_LINKUP_TIMEOUT)
7753 wm_check_for_link(sc);
7754 /* Link is down. */
7755 DPRINTF(WM_DEBUG_LINK,
7756 ("%s: LINK: set media -> link down\n",
7757 device_xname(sc->sc_dev)));
7758 sc->sc_tbi_linkup = 0;
7759 }
7760 } else {
7761 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
7762 device_xname(sc->sc_dev)));
7763 sc->sc_tbi_linkup = 0;
7764 }
7765
7766 wm_tbi_set_linkled(sc);
7767
7768 return 0;
7769 }
7770
7771 /*
7772 * wm_tbi_set_linkled:
7773 *
7774 * Update the link LED on 1000BASE-X devices.
7775 */
7776 static void
7777 wm_tbi_set_linkled(struct wm_softc *sc)
7778 {
7779
7780 if (sc->sc_tbi_linkup)
7781 sc->sc_ctrl |= CTRL_SWDPIN(0);
7782 else
7783 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7784
7785 /* 82540 or newer devices are active low */
7786 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7787
7788 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7789 }
7790
7791 /*
7792 * wm_tbi_check_link:
7793 *
7794 * Check the link on 1000BASE-X devices.
7795 */
7796 static void
7797 wm_tbi_check_link(struct wm_softc *sc)
7798 {
7799 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7800 uint32_t status;
7801
7802 KASSERT(WM_TX_LOCKED(sc));
7803
7804 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
7805 sc->sc_tbi_linkup = 1;
7806 return;
7807 }
7808
7809 status = CSR_READ(sc, WMREG_STATUS);
7810
7811 /* XXX is this needed? */
7812 (void)CSR_READ(sc, WMREG_RXCW);
7813 (void)CSR_READ(sc, WMREG_CTRL);
7814
7815 /* set link status */
7816 if ((status & STATUS_LU) == 0) {
7817 DPRINTF(WM_DEBUG_LINK,
7818 ("%s: LINK: checklink -> down\n",
7819 device_xname(sc->sc_dev)));
7820 sc->sc_tbi_linkup = 0;
7821 } else if (sc->sc_tbi_linkup == 0) {
7822 DPRINTF(WM_DEBUG_LINK,
7823 ("%s: LINK: checklink -> up %s\n",
7824 device_xname(sc->sc_dev),
7825 (status & STATUS_FD) ? "FDX" : "HDX"));
7826 sc->sc_tbi_linkup = 1;
7827 }
7828
7829 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
7830 && ((status & STATUS_LU) == 0)) {
7831 sc->sc_tbi_linkup = 0;
7832 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7833 /* If the timer expired, retry autonegotiation */
7834 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
7835 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
7836 sc->sc_tbi_ticks = 0;
7837 /*
7838 * Reset the link, and let autonegotiation do
7839 * its thing
7840 */
7841 sc->sc_ctrl |= CTRL_LRST;
7842 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7843 CSR_WRITE_FLUSH(sc);
7844 delay(1000);
7845 sc->sc_ctrl &= ~CTRL_LRST;
7846 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7847 CSR_WRITE_FLUSH(sc);
7848 delay(1000);
7849 CSR_WRITE(sc, WMREG_TXCW,
7850 sc->sc_txcw & ~TXCW_ANE);
7851 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7852 }
7853 }
7854 }
7855
7856 wm_tbi_set_linkled(sc);
7857 }
7858
7859 /* SFP related */
7860
7861 static int
7862 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
7863 {
7864 uint32_t i2ccmd;
7865 int i;
7866
7867 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
7868 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7869
7870 /* Poll the ready bit */
7871 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7872 delay(50);
7873 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7874 if (i2ccmd & I2CCMD_READY)
7875 break;
7876 }
7877 if ((i2ccmd & I2CCMD_READY) == 0)
7878 return -1;
7879 if ((i2ccmd & I2CCMD_ERROR) != 0)
7880 return -1;
7881
7882 *data = i2ccmd & 0x00ff;
7883
7884 return 0;
7885 }
7886
7887 static uint32_t
7888 wm_sfp_get_media_type(struct wm_softc *sc)
7889 {
7890 uint32_t ctrl_ext;
7891 uint8_t val = 0;
7892 int timeout = 3;
7893 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
7894 int rv = -1;
7895
7896 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7897 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
7898 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
7899 CSR_WRITE_FLUSH(sc);
7900
7901 /* Read SFP module data */
7902 while (timeout) {
7903 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
7904 if (rv == 0)
7905 break;
7906 delay(100*1000); /* XXX too big */
7907 timeout--;
7908 }
7909 if (rv != 0)
7910 goto out;
7911 switch (val) {
7912 case SFF_SFP_ID_SFF:
7913 aprint_normal_dev(sc->sc_dev,
7914 "Module/Connector soldered to board\n");
7915 break;
7916 case SFF_SFP_ID_SFP:
7917 aprint_normal_dev(sc->sc_dev, "SFP\n");
7918 break;
7919 case SFF_SFP_ID_UNKNOWN:
7920 goto out;
7921 default:
7922 break;
7923 }
7924
7925 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
7926 if (rv != 0) {
7927 goto out;
7928 }
7929
7930 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
7931 mediatype = WM_MEDIATYPE_SERDES;
7932 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
7933 sc->sc_flags |= WM_F_SGMII;
7934 mediatype = WM_MEDIATYPE_COPPER;
7935 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
7936 sc->sc_flags |= WM_F_SGMII;
7937 mediatype = WM_MEDIATYPE_SERDES;
7938 }
7939
7940 out:
7941 /* Restore I2C interface setting */
7942 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7943
7944 return mediatype;
7945 }
7946 /*
7947 * NVM related.
7948 * Microwire, SPI (w/wo EERD) and Flash.
7949 */
7950
7951 /* Both spi and uwire */
7952
7953 /*
7954 * wm_eeprom_sendbits:
7955 *
7956 * Send a series of bits to the EEPROM.
7957 */
7958 static void
7959 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
7960 {
7961 uint32_t reg;
7962 int x;
7963
7964 reg = CSR_READ(sc, WMREG_EECD);
7965
7966 for (x = nbits; x > 0; x--) {
7967 if (bits & (1U << (x - 1)))
7968 reg |= EECD_DI;
7969 else
7970 reg &= ~EECD_DI;
7971 CSR_WRITE(sc, WMREG_EECD, reg);
7972 CSR_WRITE_FLUSH(sc);
7973 delay(2);
7974 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7975 CSR_WRITE_FLUSH(sc);
7976 delay(2);
7977 CSR_WRITE(sc, WMREG_EECD, reg);
7978 CSR_WRITE_FLUSH(sc);
7979 delay(2);
7980 }
7981 }
7982
7983 /*
7984 * wm_eeprom_recvbits:
7985 *
7986 * Receive a series of bits from the EEPROM.
7987 */
7988 static void
7989 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
7990 {
7991 uint32_t reg, val;
7992 int x;
7993
7994 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
7995
7996 val = 0;
7997 for (x = nbits; x > 0; x--) {
7998 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7999 CSR_WRITE_FLUSH(sc);
8000 delay(2);
8001 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
8002 val |= (1U << (x - 1));
8003 CSR_WRITE(sc, WMREG_EECD, reg);
8004 CSR_WRITE_FLUSH(sc);
8005 delay(2);
8006 }
8007 *valp = val;
8008 }
8009
8010 /* Microwire */
8011
8012 /*
8013 * wm_nvm_read_uwire:
8014 *
8015 * Read a word from the EEPROM using the MicroWire protocol.
8016 */
8017 static int
8018 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8019 {
8020 uint32_t reg, val;
8021 int i;
8022
8023 for (i = 0; i < wordcnt; i++) {
8024 /* Clear SK and DI. */
8025 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
8026 CSR_WRITE(sc, WMREG_EECD, reg);
8027
8028 /*
8029 * XXX: workaround for a bug in qemu-0.12.x and prior
8030 * and Xen.
8031 *
8032 * We use this workaround only for 82540 because qemu's
8033 * e1000 act as 82540.
8034 */
8035 if (sc->sc_type == WM_T_82540) {
8036 reg |= EECD_SK;
8037 CSR_WRITE(sc, WMREG_EECD, reg);
8038 reg &= ~EECD_SK;
8039 CSR_WRITE(sc, WMREG_EECD, reg);
8040 CSR_WRITE_FLUSH(sc);
8041 delay(2);
8042 }
8043 /* XXX: end of workaround */
8044
8045 /* Set CHIP SELECT. */
8046 reg |= EECD_CS;
8047 CSR_WRITE(sc, WMREG_EECD, reg);
8048 CSR_WRITE_FLUSH(sc);
8049 delay(2);
8050
8051 /* Shift in the READ command. */
8052 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
8053
8054 /* Shift in address. */
8055 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
8056
8057 /* Shift out the data. */
8058 wm_eeprom_recvbits(sc, &val, 16);
8059 data[i] = val & 0xffff;
8060
8061 /* Clear CHIP SELECT. */
8062 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
8063 CSR_WRITE(sc, WMREG_EECD, reg);
8064 CSR_WRITE_FLUSH(sc);
8065 delay(2);
8066 }
8067
8068 return 0;
8069 }
8070
8071 /* SPI */
8072
8073 /*
8074 * Set SPI and FLASH related information from the EECD register.
8075 * For 82541 and 82547, the word size is taken from EEPROM.
8076 */
8077 static int
8078 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
8079 {
8080 int size;
8081 uint32_t reg;
8082 uint16_t data;
8083
8084 reg = CSR_READ(sc, WMREG_EECD);
8085 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
8086
8087 /* Read the size of NVM from EECD by default */
8088 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
8089 switch (sc->sc_type) {
8090 case WM_T_82541:
8091 case WM_T_82541_2:
8092 case WM_T_82547:
8093 case WM_T_82547_2:
8094 /* Set dummy value to access EEPROM */
8095 sc->sc_nvm_wordsize = 64;
8096 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
8097 reg = data;
8098 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
8099 if (size == 0)
8100 size = 6; /* 64 word size */
8101 else
8102 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
8103 break;
8104 case WM_T_80003:
8105 case WM_T_82571:
8106 case WM_T_82572:
8107 case WM_T_82573: /* SPI case */
8108 case WM_T_82574: /* SPI case */
8109 case WM_T_82583: /* SPI case */
8110 size += NVM_WORD_SIZE_BASE_SHIFT;
8111 if (size > 14)
8112 size = 14;
8113 break;
8114 case WM_T_82575:
8115 case WM_T_82576:
8116 case WM_T_82580:
8117 case WM_T_I350:
8118 case WM_T_I354:
8119 case WM_T_I210:
8120 case WM_T_I211:
8121 size += NVM_WORD_SIZE_BASE_SHIFT;
8122 if (size > 15)
8123 size = 15;
8124 break;
8125 default:
8126 aprint_error_dev(sc->sc_dev,
8127 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
8128 return -1;
8129 break;
8130 }
8131
8132 sc->sc_nvm_wordsize = 1 << size;
8133
8134 return 0;
8135 }
8136
8137 /*
8138 * wm_nvm_ready_spi:
8139 *
8140 * Wait for a SPI EEPROM to be ready for commands.
8141 */
8142 static int
8143 wm_nvm_ready_spi(struct wm_softc *sc)
8144 {
8145 uint32_t val;
8146 int usec;
8147
8148 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
8149 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
8150 wm_eeprom_recvbits(sc, &val, 8);
8151 if ((val & SPI_SR_RDY) == 0)
8152 break;
8153 }
8154 if (usec >= SPI_MAX_RETRIES) {
8155 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
8156 return 1;
8157 }
8158 return 0;
8159 }
8160
8161 /*
8162 * wm_nvm_read_spi:
8163 *
8164 * Read a work from the EEPROM using the SPI protocol.
8165 */
8166 static int
8167 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8168 {
8169 uint32_t reg, val;
8170 int i;
8171 uint8_t opc;
8172
8173 /* Clear SK and CS. */
8174 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
8175 CSR_WRITE(sc, WMREG_EECD, reg);
8176 CSR_WRITE_FLUSH(sc);
8177 delay(2);
8178
8179 if (wm_nvm_ready_spi(sc))
8180 return 1;
8181
8182 /* Toggle CS to flush commands. */
8183 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
8184 CSR_WRITE_FLUSH(sc);
8185 delay(2);
8186 CSR_WRITE(sc, WMREG_EECD, reg);
8187 CSR_WRITE_FLUSH(sc);
8188 delay(2);
8189
8190 opc = SPI_OPC_READ;
8191 if (sc->sc_nvm_addrbits == 8 && word >= 128)
8192 opc |= SPI_OPC_A8;
8193
8194 wm_eeprom_sendbits(sc, opc, 8);
8195 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
8196
8197 for (i = 0; i < wordcnt; i++) {
8198 wm_eeprom_recvbits(sc, &val, 16);
8199 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
8200 }
8201
8202 /* Raise CS and clear SK. */
8203 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
8204 CSR_WRITE(sc, WMREG_EECD, reg);
8205 CSR_WRITE_FLUSH(sc);
8206 delay(2);
8207
8208 return 0;
8209 }
8210
8211 /* Using with EERD */
8212
8213 static int
8214 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
8215 {
8216 uint32_t attempts = 100000;
8217 uint32_t i, reg = 0;
8218 int32_t done = -1;
8219
8220 for (i = 0; i < attempts; i++) {
8221 reg = CSR_READ(sc, rw);
8222
8223 if (reg & EERD_DONE) {
8224 done = 0;
8225 break;
8226 }
8227 delay(5);
8228 }
8229
8230 return done;
8231 }
8232
8233 static int
8234 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
8235 uint16_t *data)
8236 {
8237 int i, eerd = 0;
8238 int error = 0;
8239
8240 for (i = 0; i < wordcnt; i++) {
8241 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
8242
8243 CSR_WRITE(sc, WMREG_EERD, eerd);
8244 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
8245 if (error != 0)
8246 break;
8247
8248 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
8249 }
8250
8251 return error;
8252 }
8253
8254 /* Flash */
8255
8256 static int
8257 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
8258 {
8259 uint32_t eecd;
8260 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
8261 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
8262 uint8_t sig_byte = 0;
8263
8264 switch (sc->sc_type) {
8265 case WM_T_ICH8:
8266 case WM_T_ICH9:
8267 eecd = CSR_READ(sc, WMREG_EECD);
8268 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
8269 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
8270 return 0;
8271 }
8272 /* FALLTHROUGH */
8273 default:
8274 /* Default to 0 */
8275 *bank = 0;
8276
8277 /* Check bank 0 */
8278 wm_read_ich8_byte(sc, act_offset, &sig_byte);
8279 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
8280 *bank = 0;
8281 return 0;
8282 }
8283
8284 /* Check bank 1 */
8285 wm_read_ich8_byte(sc, act_offset + bank1_offset,
8286 &sig_byte);
8287 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
8288 *bank = 1;
8289 return 0;
8290 }
8291 }
8292
8293 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
8294 device_xname(sc->sc_dev)));
8295 return -1;
8296 }
8297
8298 /******************************************************************************
8299 * This function does initial flash setup so that a new read/write/erase cycle
8300 * can be started.
8301 *
8302 * sc - The pointer to the hw structure
8303 ****************************************************************************/
8304 static int32_t
8305 wm_ich8_cycle_init(struct wm_softc *sc)
8306 {
8307 uint16_t hsfsts;
8308 int32_t error = 1;
8309 int32_t i = 0;
8310
8311 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8312
8313 /* May be check the Flash Des Valid bit in Hw status */
8314 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
8315 return error;
8316 }
8317
8318 /* Clear FCERR in Hw status by writing 1 */
8319 /* Clear DAEL in Hw status by writing a 1 */
8320 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
8321
8322 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8323
8324 /*
8325 * Either we should have a hardware SPI cycle in progress bit to check
8326 * against, in order to start a new cycle or FDONE bit should be
8327 * changed in the hardware so that it is 1 after harware reset, which
8328 * can then be used as an indication whether a cycle is in progress or
8329 * has been completed .. we should also have some software semaphore
8330 * mechanism to guard FDONE or the cycle in progress bit so that two
8331 * threads access to those bits can be sequentiallized or a way so that
8332 * 2 threads dont start the cycle at the same time
8333 */
8334
8335 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8336 /*
8337 * There is no cycle running at present, so we can start a
8338 * cycle
8339 */
8340
8341 /* Begin by setting Flash Cycle Done. */
8342 hsfsts |= HSFSTS_DONE;
8343 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8344 error = 0;
8345 } else {
8346 /*
8347 * otherwise poll for sometime so the current cycle has a
8348 * chance to end before giving up.
8349 */
8350 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
8351 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8352 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8353 error = 0;
8354 break;
8355 }
8356 delay(1);
8357 }
8358 if (error == 0) {
8359 /*
8360 * Successful in waiting for previous cycle to timeout,
8361 * now set the Flash Cycle Done.
8362 */
8363 hsfsts |= HSFSTS_DONE;
8364 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8365 }
8366 }
8367 return error;
8368 }
8369
8370 /******************************************************************************
8371 * This function starts a flash cycle and waits for its completion
8372 *
8373 * sc - The pointer to the hw structure
8374 ****************************************************************************/
8375 static int32_t
8376 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
8377 {
8378 uint16_t hsflctl;
8379 uint16_t hsfsts;
8380 int32_t error = 1;
8381 uint32_t i = 0;
8382
8383 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
8384 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8385 hsflctl |= HSFCTL_GO;
8386 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8387
8388 /* Wait till FDONE bit is set to 1 */
8389 do {
8390 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8391 if (hsfsts & HSFSTS_DONE)
8392 break;
8393 delay(1);
8394 i++;
8395 } while (i < timeout);
8396 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
8397 error = 0;
8398
8399 return error;
8400 }
8401
8402 /******************************************************************************
8403 * Reads a byte or word from the NVM using the ICH8 flash access registers.
8404 *
8405 * sc - The pointer to the hw structure
8406 * index - The index of the byte or word to read.
8407 * size - Size of data to read, 1=byte 2=word
8408 * data - Pointer to the word to store the value read.
8409 *****************************************************************************/
8410 static int32_t
8411 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
8412 uint32_t size, uint16_t *data)
8413 {
8414 uint16_t hsfsts;
8415 uint16_t hsflctl;
8416 uint32_t flash_linear_address;
8417 uint32_t flash_data = 0;
8418 int32_t error = 1;
8419 int32_t count = 0;
8420
8421 if (size < 1 || size > 2 || data == 0x0 ||
8422 index > ICH_FLASH_LINEAR_ADDR_MASK)
8423 return error;
8424
8425 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
8426 sc->sc_ich8_flash_base;
8427
8428 do {
8429 delay(1);
8430 /* Steps */
8431 error = wm_ich8_cycle_init(sc);
8432 if (error)
8433 break;
8434
8435 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8436 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
8437 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
8438 & HSFCTL_BCOUNT_MASK;
8439 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
8440 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8441
8442 /*
8443 * Write the last 24 bits of index into Flash Linear address
8444 * field in Flash Address
8445 */
8446 /* TODO: TBD maybe check the index against the size of flash */
8447
8448 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
8449
8450 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
8451
8452 /*
8453 * Check if FCERR is set to 1, if set to 1, clear it and try
8454 * the whole sequence a few more times, else read in (shift in)
8455 * the Flash Data0, the order is least significant byte first
8456 * msb to lsb
8457 */
8458 if (error == 0) {
8459 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
8460 if (size == 1)
8461 *data = (uint8_t)(flash_data & 0x000000FF);
8462 else if (size == 2)
8463 *data = (uint16_t)(flash_data & 0x0000FFFF);
8464 break;
8465 } else {
8466 /*
8467 * If we've gotten here, then things are probably
8468 * completely hosed, but if the error condition is
8469 * detected, it won't hurt to give it another try...
8470 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
8471 */
8472 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8473 if (hsfsts & HSFSTS_ERR) {
8474 /* Repeat for some time before giving up. */
8475 continue;
8476 } else if ((hsfsts & HSFSTS_DONE) == 0)
8477 break;
8478 }
8479 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
8480
8481 return error;
8482 }
8483
8484 /******************************************************************************
8485 * Reads a single byte from the NVM using the ICH8 flash access registers.
8486 *
8487 * sc - pointer to wm_hw structure
8488 * index - The index of the byte to read.
8489 * data - Pointer to a byte to store the value read.
8490 *****************************************************************************/
8491 static int32_t
8492 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
8493 {
8494 int32_t status;
8495 uint16_t word = 0;
8496
8497 status = wm_read_ich8_data(sc, index, 1, &word);
8498 if (status == 0)
8499 *data = (uint8_t)word;
8500 else
8501 *data = 0;
8502
8503 return status;
8504 }
8505
8506 /******************************************************************************
8507 * Reads a word from the NVM using the ICH8 flash access registers.
8508 *
8509 * sc - pointer to wm_hw structure
8510 * index - The starting byte index of the word to read.
8511 * data - Pointer to a word to store the value read.
8512 *****************************************************************************/
8513 static int32_t
8514 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
8515 {
8516 int32_t status;
8517
8518 status = wm_read_ich8_data(sc, index, 2, data);
8519 return status;
8520 }
8521
8522 /******************************************************************************
8523 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
8524 * register.
8525 *
8526 * sc - Struct containing variables accessed by shared code
8527 * offset - offset of word in the EEPROM to read
8528 * data - word read from the EEPROM
8529 * words - number of words to read
8530 *****************************************************************************/
8531 static int
8532 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
8533 {
8534 int32_t error = 0;
8535 uint32_t flash_bank = 0;
8536 uint32_t act_offset = 0;
8537 uint32_t bank_offset = 0;
8538 uint16_t word = 0;
8539 uint16_t i = 0;
8540
8541 /*
8542 * We need to know which is the valid flash bank. In the event
8543 * that we didn't allocate eeprom_shadow_ram, we may not be
8544 * managing flash_bank. So it cannot be trusted and needs
8545 * to be updated with each read.
8546 */
8547 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
8548 if (error) {
8549 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
8550 device_xname(sc->sc_dev)));
8551 flash_bank = 0;
8552 }
8553
8554 /*
8555 * Adjust offset appropriately if we're on bank 1 - adjust for word
8556 * size
8557 */
8558 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
8559
8560 error = wm_get_swfwhw_semaphore(sc);
8561 if (error) {
8562 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8563 __func__);
8564 return error;
8565 }
8566
8567 for (i = 0; i < words; i++) {
8568 /* The NVM part needs a byte offset, hence * 2 */
8569 act_offset = bank_offset + ((offset + i) * 2);
8570 error = wm_read_ich8_word(sc, act_offset, &word);
8571 if (error) {
8572 aprint_error_dev(sc->sc_dev,
8573 "%s: failed to read NVM\n", __func__);
8574 break;
8575 }
8576 data[i] = word;
8577 }
8578
8579 wm_put_swfwhw_semaphore(sc);
8580 return error;
8581 }
8582
8583 /* iNVM */
8584
8585 static int
8586 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
8587 {
8588 int32_t rv = 0;
8589 uint32_t invm_dword;
8590 uint16_t i;
8591 uint8_t record_type, word_address;
8592
8593 for (i = 0; i < INVM_SIZE; i++) {
8594 invm_dword = CSR_READ(sc, E1000_INVM_DATA_REG(i));
8595 /* Get record type */
8596 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
8597 if (record_type == INVM_UNINITIALIZED_STRUCTURE)
8598 break;
8599 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
8600 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
8601 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
8602 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
8603 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
8604 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
8605 if (word_address == address) {
8606 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
8607 rv = 0;
8608 break;
8609 }
8610 }
8611 }
8612
8613 return rv;
8614 }
8615
8616 static int
8617 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
8618 {
8619 int rv = 0;
8620 int i;
8621
8622 for (i = 0; i < words; i++) {
8623 switch (offset + i) {
8624 case NVM_OFF_MACADDR:
8625 case NVM_OFF_MACADDR1:
8626 case NVM_OFF_MACADDR2:
8627 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
8628 if (rv != 0) {
8629 data[i] = 0xffff;
8630 rv = -1;
8631 }
8632 break;
8633 case NVM_OFF_CFG2:
8634 rv = wm_nvm_read_word_invm(sc, offset, data);
8635 if (rv != 0) {
8636 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
8637 rv = 0;
8638 }
8639 break;
8640 case NVM_OFF_CFG4:
8641 rv = wm_nvm_read_word_invm(sc, offset, data);
8642 if (rv != 0) {
8643 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
8644 rv = 0;
8645 }
8646 break;
8647 case NVM_OFF_LED_1_CFG:
8648 rv = wm_nvm_read_word_invm(sc, offset, data);
8649 if (rv != 0) {
8650 *data = NVM_LED_1_CFG_DEFAULT_I211;
8651 rv = 0;
8652 }
8653 break;
8654 case NVM_OFF_LED_0_2_CFG:
8655 rv = wm_nvm_read_word_invm(sc, offset, data);
8656 if (rv != 0) {
8657 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
8658 rv = 0;
8659 }
8660 break;
8661 case NVM_OFF_ID_LED_SETTINGS:
8662 rv = wm_nvm_read_word_invm(sc, offset, data);
8663 if (rv != 0) {
8664 *data = ID_LED_RESERVED_FFFF;
8665 rv = 0;
8666 }
8667 break;
8668 default:
8669 DPRINTF(WM_DEBUG_NVM,
8670 ("NVM word 0x%02x is not mapped.\n", offset));
8671 *data = NVM_RESERVED_WORD;
8672 break;
8673 }
8674 }
8675
8676 return rv;
8677 }
8678
8679 /* Lock, detecting NVM type, validate checksum and read */
8680
8681 /*
8682 * wm_nvm_acquire:
8683 *
8684 * Perform the EEPROM handshake required on some chips.
8685 */
8686 static int
8687 wm_nvm_acquire(struct wm_softc *sc)
8688 {
8689 uint32_t reg;
8690 int x;
8691 int ret = 0;
8692
8693 /* always success */
8694 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8695 return 0;
8696
8697 if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8698 ret = wm_get_swfwhw_semaphore(sc);
8699 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
8700 /* This will also do wm_get_swsm_semaphore() if needed */
8701 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
8702 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
8703 ret = wm_get_swsm_semaphore(sc);
8704 }
8705
8706 if (ret) {
8707 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8708 __func__);
8709 return 1;
8710 }
8711
8712 if (sc->sc_flags & WM_F_LOCK_EECD) {
8713 reg = CSR_READ(sc, WMREG_EECD);
8714
8715 /* Request EEPROM access. */
8716 reg |= EECD_EE_REQ;
8717 CSR_WRITE(sc, WMREG_EECD, reg);
8718
8719 /* ..and wait for it to be granted. */
8720 for (x = 0; x < 1000; x++) {
8721 reg = CSR_READ(sc, WMREG_EECD);
8722 if (reg & EECD_EE_GNT)
8723 break;
8724 delay(5);
8725 }
8726 if ((reg & EECD_EE_GNT) == 0) {
8727 aprint_error_dev(sc->sc_dev,
8728 "could not acquire EEPROM GNT\n");
8729 reg &= ~EECD_EE_REQ;
8730 CSR_WRITE(sc, WMREG_EECD, reg);
8731 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8732 wm_put_swfwhw_semaphore(sc);
8733 if (sc->sc_flags & WM_F_LOCK_SWFW)
8734 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8735 else if (sc->sc_flags & WM_F_LOCK_SWSM)
8736 wm_put_swsm_semaphore(sc);
8737 return 1;
8738 }
8739 }
8740
8741 return 0;
8742 }
8743
8744 /*
8745 * wm_nvm_release:
8746 *
8747 * Release the EEPROM mutex.
8748 */
8749 static void
8750 wm_nvm_release(struct wm_softc *sc)
8751 {
8752 uint32_t reg;
8753
8754 /* always success */
8755 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8756 return;
8757
8758 if (sc->sc_flags & WM_F_LOCK_EECD) {
8759 reg = CSR_READ(sc, WMREG_EECD);
8760 reg &= ~EECD_EE_REQ;
8761 CSR_WRITE(sc, WMREG_EECD, reg);
8762 }
8763
8764 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8765 wm_put_swfwhw_semaphore(sc);
8766 if (sc->sc_flags & WM_F_LOCK_SWFW)
8767 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8768 else if (sc->sc_flags & WM_F_LOCK_SWSM)
8769 wm_put_swsm_semaphore(sc);
8770 }
8771
8772 static int
8773 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
8774 {
8775 uint32_t eecd = 0;
8776
8777 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
8778 || sc->sc_type == WM_T_82583) {
8779 eecd = CSR_READ(sc, WMREG_EECD);
8780
8781 /* Isolate bits 15 & 16 */
8782 eecd = ((eecd >> 15) & 0x03);
8783
8784 /* If both bits are set, device is Flash type */
8785 if (eecd == 0x03)
8786 return 0;
8787 }
8788 return 1;
8789 }
8790
8791 static int
8792 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
8793 {
8794 uint32_t eec;
8795
8796 eec = CSR_READ(sc, WMREG_EEC);
8797 if ((eec & EEC_FLASH_DETECTED) != 0)
8798 return 1;
8799
8800 return 0;
8801 }
8802
8803 /*
8804 * wm_nvm_validate_checksum
8805 *
8806 * The checksum is defined as the sum of the first 64 (16 bit) words.
8807 */
8808 static int
8809 wm_nvm_validate_checksum(struct wm_softc *sc)
8810 {
8811 uint16_t checksum;
8812 uint16_t eeprom_data;
8813 #ifdef WM_DEBUG
8814 uint16_t csum_wordaddr, valid_checksum;
8815 #endif
8816 int i;
8817
8818 checksum = 0;
8819
8820 /* Don't check for I211 */
8821 if (sc->sc_type == WM_T_I211)
8822 return 0;
8823
8824 #ifdef WM_DEBUG
8825 if (sc->sc_type == WM_T_PCH_LPT) {
8826 csum_wordaddr = NVM_OFF_COMPAT;
8827 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
8828 } else {
8829 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
8830 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
8831 }
8832
8833 /* Dump EEPROM image for debug */
8834 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8835 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8836 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
8837 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
8838 if ((eeprom_data & valid_checksum) == 0) {
8839 DPRINTF(WM_DEBUG_NVM,
8840 ("%s: NVM need to be updated (%04x != %04x)\n",
8841 device_xname(sc->sc_dev), eeprom_data,
8842 valid_checksum));
8843 }
8844 }
8845
8846 if ((wm_debug & WM_DEBUG_NVM) != 0) {
8847 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
8848 for (i = 0; i < NVM_SIZE; i++) {
8849 if (wm_nvm_read(sc, i, 1, &eeprom_data))
8850 printf("XXXX ");
8851 else
8852 printf("%04hx ", eeprom_data);
8853 if (i % 8 == 7)
8854 printf("\n");
8855 }
8856 }
8857
8858 #endif /* WM_DEBUG */
8859
8860 for (i = 0; i < NVM_SIZE; i++) {
8861 if (wm_nvm_read(sc, i, 1, &eeprom_data))
8862 return 1;
8863 checksum += eeprom_data;
8864 }
8865
8866 if (checksum != (uint16_t) NVM_CHECKSUM) {
8867 #ifdef WM_DEBUG
8868 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
8869 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
8870 #endif
8871 }
8872
8873 return 0;
8874 }
8875
8876 /*
8877 * wm_nvm_read:
8878 *
8879 * Read data from the serial EEPROM.
8880 */
8881 static int
8882 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8883 {
8884 int rv;
8885
8886 if (sc->sc_flags & WM_F_EEPROM_INVALID)
8887 return 1;
8888
8889 if (wm_nvm_acquire(sc))
8890 return 1;
8891
8892 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8893 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8894 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
8895 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
8896 else if (sc->sc_flags & WM_F_EEPROM_INVM)
8897 rv = wm_nvm_read_invm(sc, word, wordcnt, data);
8898 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
8899 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
8900 else if (sc->sc_flags & WM_F_EEPROM_SPI)
8901 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
8902 else
8903 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
8904
8905 wm_nvm_release(sc);
8906 return rv;
8907 }
8908
8909 /*
8910 * Hardware semaphores.
8911 * Very complexed...
8912 */
8913
8914 static int
8915 wm_get_swsm_semaphore(struct wm_softc *sc)
8916 {
8917 int32_t timeout;
8918 uint32_t swsm;
8919
8920 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8921 /* Get the SW semaphore. */
8922 timeout = sc->sc_nvm_wordsize + 1;
8923 while (timeout) {
8924 swsm = CSR_READ(sc, WMREG_SWSM);
8925
8926 if ((swsm & SWSM_SMBI) == 0)
8927 break;
8928
8929 delay(50);
8930 timeout--;
8931 }
8932
8933 if (timeout == 0) {
8934 aprint_error_dev(sc->sc_dev,
8935 "could not acquire SWSM SMBI\n");
8936 return 1;
8937 }
8938 }
8939
8940 /* Get the FW semaphore. */
8941 timeout = sc->sc_nvm_wordsize + 1;
8942 while (timeout) {
8943 swsm = CSR_READ(sc, WMREG_SWSM);
8944 swsm |= SWSM_SWESMBI;
8945 CSR_WRITE(sc, WMREG_SWSM, swsm);
8946 /* If we managed to set the bit we got the semaphore. */
8947 swsm = CSR_READ(sc, WMREG_SWSM);
8948 if (swsm & SWSM_SWESMBI)
8949 break;
8950
8951 delay(50);
8952 timeout--;
8953 }
8954
8955 if (timeout == 0) {
8956 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
8957 /* Release semaphores */
8958 wm_put_swsm_semaphore(sc);
8959 return 1;
8960 }
8961 return 0;
8962 }
8963
8964 static void
8965 wm_put_swsm_semaphore(struct wm_softc *sc)
8966 {
8967 uint32_t swsm;
8968
8969 swsm = CSR_READ(sc, WMREG_SWSM);
8970 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
8971 CSR_WRITE(sc, WMREG_SWSM, swsm);
8972 }
8973
8974 static int
8975 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8976 {
8977 uint32_t swfw_sync;
8978 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
8979 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
8980 int timeout = 200;
8981
8982 for (timeout = 0; timeout < 200; timeout++) {
8983 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8984 if (wm_get_swsm_semaphore(sc)) {
8985 aprint_error_dev(sc->sc_dev,
8986 "%s: failed to get semaphore\n",
8987 __func__);
8988 return 1;
8989 }
8990 }
8991 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8992 if ((swfw_sync & (swmask | fwmask)) == 0) {
8993 swfw_sync |= swmask;
8994 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8995 if (sc->sc_flags & WM_F_LOCK_SWSM)
8996 wm_put_swsm_semaphore(sc);
8997 return 0;
8998 }
8999 if (sc->sc_flags & WM_F_LOCK_SWSM)
9000 wm_put_swsm_semaphore(sc);
9001 delay(5000);
9002 }
9003 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
9004 device_xname(sc->sc_dev), mask, swfw_sync);
9005 return 1;
9006 }
9007
9008 static void
9009 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
9010 {
9011 uint32_t swfw_sync;
9012
9013 if (sc->sc_flags & WM_F_LOCK_SWSM) {
9014 while (wm_get_swsm_semaphore(sc) != 0)
9015 continue;
9016 }
9017 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
9018 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
9019 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
9020 if (sc->sc_flags & WM_F_LOCK_SWSM)
9021 wm_put_swsm_semaphore(sc);
9022 }
9023
9024 static int
9025 wm_get_swfwhw_semaphore(struct wm_softc *sc)
9026 {
9027 uint32_t ext_ctrl;
9028 int timeout = 200;
9029
9030 for (timeout = 0; timeout < 200; timeout++) {
9031 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
9032 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
9033 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
9034
9035 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
9036 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
9037 return 0;
9038 delay(5000);
9039 }
9040 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
9041 device_xname(sc->sc_dev), ext_ctrl);
9042 return 1;
9043 }
9044
9045 static void
9046 wm_put_swfwhw_semaphore(struct wm_softc *sc)
9047 {
9048 uint32_t ext_ctrl;
9049 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
9050 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
9051 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
9052 }
9053
9054 static int
9055 wm_get_hw_semaphore_82573(struct wm_softc *sc)
9056 {
9057 int i = 0;
9058 uint32_t reg;
9059
9060 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
9061 do {
9062 CSR_WRITE(sc, WMREG_EXTCNFCTR,
9063 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
9064 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
9065 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
9066 break;
9067 delay(2*1000);
9068 i++;
9069 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
9070
9071 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
9072 wm_put_hw_semaphore_82573(sc);
9073 log(LOG_ERR, "%s: Driver can't access the PHY\n",
9074 device_xname(sc->sc_dev));
9075 return -1;
9076 }
9077
9078 return 0;
9079 }
9080
9081 static void
9082 wm_put_hw_semaphore_82573(struct wm_softc *sc)
9083 {
9084 uint32_t reg;
9085
9086 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
9087 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
9088 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
9089 }
9090
9091 /*
9092 * Management mode and power management related subroutines.
9093 * BMC, AMT, suspend/resume and EEE.
9094 */
9095
9096 static int
9097 wm_check_mng_mode(struct wm_softc *sc)
9098 {
9099 int rv;
9100
9101 switch (sc->sc_type) {
9102 case WM_T_ICH8:
9103 case WM_T_ICH9:
9104 case WM_T_ICH10:
9105 case WM_T_PCH:
9106 case WM_T_PCH2:
9107 case WM_T_PCH_LPT:
9108 rv = wm_check_mng_mode_ich8lan(sc);
9109 break;
9110 case WM_T_82574:
9111 case WM_T_82583:
9112 rv = wm_check_mng_mode_82574(sc);
9113 break;
9114 case WM_T_82571:
9115 case WM_T_82572:
9116 case WM_T_82573:
9117 case WM_T_80003:
9118 rv = wm_check_mng_mode_generic(sc);
9119 break;
9120 default:
9121 /* noting to do */
9122 rv = 0;
9123 break;
9124 }
9125
9126 return rv;
9127 }
9128
9129 static int
9130 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
9131 {
9132 uint32_t fwsm;
9133
9134 fwsm = CSR_READ(sc, WMREG_FWSM);
9135
9136 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
9137 return 1;
9138
9139 return 0;
9140 }
9141
9142 static int
9143 wm_check_mng_mode_82574(struct wm_softc *sc)
9144 {
9145 uint16_t data;
9146
9147 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
9148
9149 if ((data & NVM_CFG2_MNGM_MASK) != 0)
9150 return 1;
9151
9152 return 0;
9153 }
9154
9155 static int
9156 wm_check_mng_mode_generic(struct wm_softc *sc)
9157 {
9158 uint32_t fwsm;
9159
9160 fwsm = CSR_READ(sc, WMREG_FWSM);
9161
9162 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
9163 return 1;
9164
9165 return 0;
9166 }
9167
9168 static int
9169 wm_enable_mng_pass_thru(struct wm_softc *sc)
9170 {
9171 uint32_t manc, fwsm, factps;
9172
9173 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
9174 return 0;
9175
9176 manc = CSR_READ(sc, WMREG_MANC);
9177
9178 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
9179 device_xname(sc->sc_dev), manc));
9180 if ((manc & MANC_RECV_TCO_EN) == 0)
9181 return 0;
9182
9183 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
9184 fwsm = CSR_READ(sc, WMREG_FWSM);
9185 factps = CSR_READ(sc, WMREG_FACTPS);
9186 if (((factps & FACTPS_MNGCG) == 0)
9187 && ((fwsm & FWSM_MODE_MASK)
9188 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
9189 return 1;
9190 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
9191 uint16_t data;
9192
9193 factps = CSR_READ(sc, WMREG_FACTPS);
9194 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
9195 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
9196 device_xname(sc->sc_dev), factps, data));
9197 if (((factps & FACTPS_MNGCG) == 0)
9198 && ((data & NVM_CFG2_MNGM_MASK)
9199 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
9200 return 1;
9201 } else if (((manc & MANC_SMBUS_EN) != 0)
9202 && ((manc & MANC_ASF_EN) == 0))
9203 return 1;
9204
9205 return 0;
9206 }
9207
9208 static int
9209 wm_check_reset_block(struct wm_softc *sc)
9210 {
9211 uint32_t reg;
9212
9213 switch (sc->sc_type) {
9214 case WM_T_ICH8:
9215 case WM_T_ICH9:
9216 case WM_T_ICH10:
9217 case WM_T_PCH:
9218 case WM_T_PCH2:
9219 case WM_T_PCH_LPT:
9220 reg = CSR_READ(sc, WMREG_FWSM);
9221 if ((reg & FWSM_RSPCIPHY) != 0)
9222 return 0;
9223 else
9224 return -1;
9225 break;
9226 case WM_T_82571:
9227 case WM_T_82572:
9228 case WM_T_82573:
9229 case WM_T_82574:
9230 case WM_T_82583:
9231 case WM_T_80003:
9232 reg = CSR_READ(sc, WMREG_MANC);
9233 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
9234 return -1;
9235 else
9236 return 0;
9237 break;
9238 default:
9239 /* no problem */
9240 break;
9241 }
9242
9243 return 0;
9244 }
9245
9246 static void
9247 wm_get_hw_control(struct wm_softc *sc)
9248 {
9249 uint32_t reg;
9250
9251 switch (sc->sc_type) {
9252 case WM_T_82573:
9253 reg = CSR_READ(sc, WMREG_SWSM);
9254 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
9255 break;
9256 case WM_T_82571:
9257 case WM_T_82572:
9258 case WM_T_82574:
9259 case WM_T_82583:
9260 case WM_T_80003:
9261 case WM_T_ICH8:
9262 case WM_T_ICH9:
9263 case WM_T_ICH10:
9264 case WM_T_PCH:
9265 case WM_T_PCH2:
9266 case WM_T_PCH_LPT:
9267 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9268 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
9269 break;
9270 default:
9271 break;
9272 }
9273 }
9274
9275 static void
9276 wm_release_hw_control(struct wm_softc *sc)
9277 {
9278 uint32_t reg;
9279
9280 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
9281 return;
9282
9283 if (sc->sc_type == WM_T_82573) {
9284 reg = CSR_READ(sc, WMREG_SWSM);
9285 reg &= ~SWSM_DRV_LOAD;
9286 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
9287 } else {
9288 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9289 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
9290 }
9291 }
9292
9293 static void
9294 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
9295 {
9296 uint32_t reg;
9297
9298 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
9299
9300 if (on != 0)
9301 reg |= EXTCNFCTR_GATE_PHY_CFG;
9302 else
9303 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
9304
9305 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
9306 }
9307
9308 static void
9309 wm_smbustopci(struct wm_softc *sc)
9310 {
9311 uint32_t fwsm;
9312
9313 fwsm = CSR_READ(sc, WMREG_FWSM);
9314 if (((fwsm & FWSM_FW_VALID) == 0)
9315 && ((wm_check_reset_block(sc) == 0))) {
9316 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
9317 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
9318 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9319 CSR_WRITE_FLUSH(sc);
9320 delay(10);
9321 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
9322 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9323 CSR_WRITE_FLUSH(sc);
9324 delay(50*1000);
9325
9326 /*
9327 * Gate automatic PHY configuration by hardware on non-managed
9328 * 82579
9329 */
9330 if (sc->sc_type == WM_T_PCH2)
9331 wm_gate_hw_phy_config_ich8lan(sc, 1);
9332 }
9333 }
9334
9335 static void
9336 wm_init_manageability(struct wm_softc *sc)
9337 {
9338
9339 if (sc->sc_flags & WM_F_HAS_MANAGE) {
9340 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
9341 uint32_t manc = CSR_READ(sc, WMREG_MANC);
9342
9343 /* Disable hardware interception of ARP */
9344 manc &= ~MANC_ARP_EN;
9345
9346 /* Enable receiving management packets to the host */
9347 if (sc->sc_type >= WM_T_82571) {
9348 manc |= MANC_EN_MNG2HOST;
9349 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
9350 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
9351
9352 }
9353
9354 CSR_WRITE(sc, WMREG_MANC, manc);
9355 }
9356 }
9357
9358 static void
9359 wm_release_manageability(struct wm_softc *sc)
9360 {
9361
9362 if (sc->sc_flags & WM_F_HAS_MANAGE) {
9363 uint32_t manc = CSR_READ(sc, WMREG_MANC);
9364
9365 manc |= MANC_ARP_EN;
9366 if (sc->sc_type >= WM_T_82571)
9367 manc &= ~MANC_EN_MNG2HOST;
9368
9369 CSR_WRITE(sc, WMREG_MANC, manc);
9370 }
9371 }
9372
9373 static void
9374 wm_get_wakeup(struct wm_softc *sc)
9375 {
9376
9377 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
9378 switch (sc->sc_type) {
9379 case WM_T_82573:
9380 case WM_T_82583:
9381 sc->sc_flags |= WM_F_HAS_AMT;
9382 /* FALLTHROUGH */
9383 case WM_T_80003:
9384 case WM_T_82541:
9385 case WM_T_82547:
9386 case WM_T_82571:
9387 case WM_T_82572:
9388 case WM_T_82574:
9389 case WM_T_82575:
9390 case WM_T_82576:
9391 case WM_T_82580:
9392 case WM_T_I350:
9393 case WM_T_I354:
9394 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
9395 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
9396 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
9397 break;
9398 case WM_T_ICH8:
9399 case WM_T_ICH9:
9400 case WM_T_ICH10:
9401 case WM_T_PCH:
9402 case WM_T_PCH2:
9403 case WM_T_PCH_LPT:
9404 sc->sc_flags |= WM_F_HAS_AMT;
9405 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
9406 break;
9407 default:
9408 break;
9409 }
9410
9411 /* 1: HAS_MANAGE */
9412 if (wm_enable_mng_pass_thru(sc) != 0)
9413 sc->sc_flags |= WM_F_HAS_MANAGE;
9414
9415 #ifdef WM_DEBUG
9416 printf("\n");
9417 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
9418 printf("HAS_AMT,");
9419 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
9420 printf("ARC_SUBSYS_VALID,");
9421 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
9422 printf("ASF_FIRMWARE_PRES,");
9423 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
9424 printf("HAS_MANAGE,");
9425 printf("\n");
9426 #endif
9427 /*
9428 * Note that the WOL flags is set after the resetting of the eeprom
9429 * stuff
9430 */
9431 }
9432
9433 #ifdef WM_WOL
9434 /* WOL in the newer chipset interfaces (pchlan) */
9435 static void
9436 wm_enable_phy_wakeup(struct wm_softc *sc)
9437 {
9438 #if 0
9439 uint16_t preg;
9440
9441 /* Copy MAC RARs to PHY RARs */
9442
9443 /* Copy MAC MTA to PHY MTA */
9444
9445 /* Configure PHY Rx Control register */
9446
9447 /* Enable PHY wakeup in MAC register */
9448
9449 /* Configure and enable PHY wakeup in PHY registers */
9450
9451 /* Activate PHY wakeup */
9452
9453 /* XXX */
9454 #endif
9455 }
9456
9457 /* Power down workaround on D3 */
9458 static void
9459 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
9460 {
9461 uint32_t reg;
9462 int i;
9463
9464 for (i = 0; i < 2; i++) {
9465 /* Disable link */
9466 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9467 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9468 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9469
9470 /*
9471 * Call gig speed drop workaround on Gig disable before
9472 * accessing any PHY registers
9473 */
9474 if (sc->sc_type == WM_T_ICH8)
9475 wm_gig_downshift_workaround_ich8lan(sc);
9476
9477 /* Write VR power-down enable */
9478 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
9479 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
9480 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
9481 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
9482
9483 /* Read it back and test */
9484 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
9485 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
9486 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
9487 break;
9488
9489 /* Issue PHY reset and repeat at most one more time */
9490 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
9491 }
9492 }
9493
9494 static void
9495 wm_enable_wakeup(struct wm_softc *sc)
9496 {
9497 uint32_t reg, pmreg;
9498 pcireg_t pmode;
9499
9500 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
9501 &pmreg, NULL) == 0)
9502 return;
9503
9504 /* Advertise the wakeup capability */
9505 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
9506 | CTRL_SWDPIN(3));
9507 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
9508
9509 /* ICH workaround */
9510 switch (sc->sc_type) {
9511 case WM_T_ICH8:
9512 case WM_T_ICH9:
9513 case WM_T_ICH10:
9514 case WM_T_PCH:
9515 case WM_T_PCH2:
9516 case WM_T_PCH_LPT:
9517 /* Disable gig during WOL */
9518 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9519 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
9520 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9521 if (sc->sc_type == WM_T_PCH)
9522 wm_gmii_reset(sc);
9523
9524 /* Power down workaround */
9525 if (sc->sc_phytype == WMPHY_82577) {
9526 struct mii_softc *child;
9527
9528 /* Assume that the PHY is copper */
9529 child = LIST_FIRST(&sc->sc_mii.mii_phys);
9530 if (child->mii_mpd_rev <= 2)
9531 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
9532 (768 << 5) | 25, 0x0444); /* magic num */
9533 }
9534 break;
9535 default:
9536 break;
9537 }
9538
9539 /* Keep the laser running on fiber adapters */
9540 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
9541 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
9542 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9543 reg |= CTRL_EXT_SWDPIN(3);
9544 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9545 }
9546
9547 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
9548 #if 0 /* for the multicast packet */
9549 reg |= WUFC_MC;
9550 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
9551 #endif
9552
9553 if (sc->sc_type == WM_T_PCH) {
9554 wm_enable_phy_wakeup(sc);
9555 } else {
9556 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
9557 CSR_WRITE(sc, WMREG_WUFC, reg);
9558 }
9559
9560 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9561 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9562 || (sc->sc_type == WM_T_PCH2))
9563 && (sc->sc_phytype == WMPHY_IGP_3))
9564 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
9565
9566 /* Request PME */
9567 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
9568 #if 0
9569 /* Disable WOL */
9570 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
9571 #else
9572 /* For WOL */
9573 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
9574 #endif
9575 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
9576 }
9577 #endif /* WM_WOL */
9578
9579 /* EEE */
9580
9581 static void
9582 wm_set_eee_i350(struct wm_softc *sc)
9583 {
9584 uint32_t ipcnfg, eeer;
9585
9586 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
9587 eeer = CSR_READ(sc, WMREG_EEER);
9588
9589 if ((sc->sc_flags & WM_F_EEE) != 0) {
9590 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9591 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
9592 | EEER_LPI_FC);
9593 } else {
9594 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9595 ipcnfg &= ~IPCNFG_10BASE_TE;
9596 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
9597 | EEER_LPI_FC);
9598 }
9599
9600 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
9601 CSR_WRITE(sc, WMREG_EEER, eeer);
9602 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
9603 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
9604 }
9605
9606 /*
9607 * Workarounds (mainly PHY related).
9608 * Basically, PHY's workarounds are in the PHY drivers.
9609 */
9610
9611 /* Work-around for 82566 Kumeran PCS lock loss */
9612 static void
9613 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
9614 {
9615 int miistatus, active, i;
9616 int reg;
9617
9618 miistatus = sc->sc_mii.mii_media_status;
9619
9620 /* If the link is not up, do nothing */
9621 if ((miistatus & IFM_ACTIVE) != 0)
9622 return;
9623
9624 active = sc->sc_mii.mii_media_active;
9625
9626 /* Nothing to do if the link is other than 1Gbps */
9627 if (IFM_SUBTYPE(active) != IFM_1000_T)
9628 return;
9629
9630 for (i = 0; i < 10; i++) {
9631 /* read twice */
9632 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9633 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9634 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
9635 goto out; /* GOOD! */
9636
9637 /* Reset the PHY */
9638 wm_gmii_reset(sc);
9639 delay(5*1000);
9640 }
9641
9642 /* Disable GigE link negotiation */
9643 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9644 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9645 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9646
9647 /*
9648 * Call gig speed drop workaround on Gig disable before accessing
9649 * any PHY registers.
9650 */
9651 wm_gig_downshift_workaround_ich8lan(sc);
9652
9653 out:
9654 return;
9655 }
9656
9657 /* WOL from S5 stops working */
9658 static void
9659 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
9660 {
9661 uint16_t kmrn_reg;
9662
9663 /* Only for igp3 */
9664 if (sc->sc_phytype == WMPHY_IGP_3) {
9665 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
9666 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
9667 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9668 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
9669 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9670 }
9671 }
9672
9673 /*
9674 * Workaround for pch's PHYs
9675 * XXX should be moved to new PHY driver?
9676 */
9677 static void
9678 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
9679 {
9680 if (sc->sc_phytype == WMPHY_82577)
9681 wm_set_mdio_slow_mode_hv(sc);
9682
9683 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
9684
9685 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
9686
9687 /* 82578 */
9688 if (sc->sc_phytype == WMPHY_82578) {
9689 /* PCH rev. < 3 */
9690 if (sc->sc_rev < 3) {
9691 /* XXX 6 bit shift? Why? Is it page2? */
9692 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
9693 0x66c0);
9694 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
9695 0xffff);
9696 }
9697
9698 /* XXX phy rev. < 2 */
9699 }
9700
9701 /* Select page 0 */
9702
9703 /* XXX acquire semaphore */
9704 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
9705 /* XXX release semaphore */
9706
9707 /*
9708 * Configure the K1 Si workaround during phy reset assuming there is
9709 * link so that it disables K1 if link is in 1Gbps.
9710 */
9711 wm_k1_gig_workaround_hv(sc, 1);
9712 }
9713
9714 static void
9715 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
9716 {
9717
9718 wm_set_mdio_slow_mode_hv(sc);
9719 }
9720
9721 static void
9722 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
9723 {
9724 int k1_enable = sc->sc_nvm_k1_enabled;
9725
9726 /* XXX acquire semaphore */
9727
9728 if (link) {
9729 k1_enable = 0;
9730
9731 /* Link stall fix for link up */
9732 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
9733 } else {
9734 /* Link stall fix for link down */
9735 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
9736 }
9737
9738 wm_configure_k1_ich8lan(sc, k1_enable);
9739
9740 /* XXX release semaphore */
9741 }
9742
9743 static void
9744 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
9745 {
9746 uint32_t reg;
9747
9748 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
9749 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
9750 reg | HV_KMRN_MDIO_SLOW);
9751 }
9752
9753 static void
9754 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
9755 {
9756 uint32_t ctrl, ctrl_ext, tmp;
9757 uint16_t kmrn_reg;
9758
9759 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
9760
9761 if (k1_enable)
9762 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
9763 else
9764 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
9765
9766 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
9767
9768 delay(20);
9769
9770 ctrl = CSR_READ(sc, WMREG_CTRL);
9771 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9772
9773 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
9774 tmp |= CTRL_FRCSPD;
9775
9776 CSR_WRITE(sc, WMREG_CTRL, tmp);
9777 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
9778 CSR_WRITE_FLUSH(sc);
9779 delay(20);
9780
9781 CSR_WRITE(sc, WMREG_CTRL, ctrl);
9782 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9783 CSR_WRITE_FLUSH(sc);
9784 delay(20);
9785 }
9786
9787 /* special case - for 82575 - need to do manual init ... */
9788 static void
9789 wm_reset_init_script_82575(struct wm_softc *sc)
9790 {
9791 /*
9792 * remark: this is untested code - we have no board without EEPROM
9793 * same setup as mentioned int the FreeBSD driver for the i82575
9794 */
9795
9796 /* SerDes configuration via SERDESCTRL */
9797 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
9798 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
9799 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
9800 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
9801
9802 /* CCM configuration via CCMCTL register */
9803 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
9804 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
9805
9806 /* PCIe lanes configuration */
9807 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
9808 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
9809 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
9810 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
9811
9812 /* PCIe PLL Configuration */
9813 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
9814 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
9815 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
9816 }
9817