if_wm.c revision 1.320 1 /* $NetBSD: if_wm.c,v 1.320 2015/05/04 10:10:42 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - EEE (Energy Efficiency Ethernet)
77 * - MSI/MSI-X
78 * - Virtual Function
79 * - Set LED correctly (based on contents in EEPROM)
80 * - Rework how parameters are loaded from the EEPROM.
81 */
82
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.320 2015/05/04 10:10:42 msaitoh Exp $");
85
86 #ifdef _KERNEL_OPT
87 #include "opt_net_mpsafe.h"
88 #endif
89
90 #include <sys/param.h>
91 #include <sys/systm.h>
92 #include <sys/callout.h>
93 #include <sys/mbuf.h>
94 #include <sys/malloc.h>
95 #include <sys/kernel.h>
96 #include <sys/socket.h>
97 #include <sys/ioctl.h>
98 #include <sys/errno.h>
99 #include <sys/device.h>
100 #include <sys/queue.h>
101 #include <sys/syslog.h>
102
103 #include <sys/rndsource.h>
104
105 #include <net/if.h>
106 #include <net/if_dl.h>
107 #include <net/if_media.h>
108 #include <net/if_ether.h>
109
110 #include <net/bpf.h>
111
112 #include <netinet/in.h> /* XXX for struct ip */
113 #include <netinet/in_systm.h> /* XXX for struct ip */
114 #include <netinet/ip.h> /* XXX for struct ip */
115 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
116 #include <netinet/tcp.h> /* XXX for struct tcphdr */
117
118 #include <sys/bus.h>
119 #include <sys/intr.h>
120 #include <machine/endian.h>
121
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
124 #include <dev/mii/miidevs.h>
125 #include <dev/mii/mii_bitbang.h>
126 #include <dev/mii/ikphyreg.h>
127 #include <dev/mii/igphyreg.h>
128 #include <dev/mii/igphyvar.h>
129 #include <dev/mii/inbmphyreg.h>
130
131 #include <dev/pci/pcireg.h>
132 #include <dev/pci/pcivar.h>
133 #include <dev/pci/pcidevs.h>
134
135 #include <dev/pci/if_wmreg.h>
136 #include <dev/pci/if_wmvar.h>
137
138 #ifdef WM_DEBUG
139 #define WM_DEBUG_LINK 0x01
140 #define WM_DEBUG_TX 0x02
141 #define WM_DEBUG_RX 0x04
142 #define WM_DEBUG_GMII 0x08
143 #define WM_DEBUG_MANAGE 0x10
144 #define WM_DEBUG_NVM 0x20
145 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
146 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
147
148 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
149 #else
150 #define DPRINTF(x, y) /* nothing */
151 #endif /* WM_DEBUG */
152
153 #ifdef NET_MPSAFE
154 #define WM_MPSAFE 1
155 #endif
156
157 /*
158 * Transmit descriptor list size. Due to errata, we can only have
159 * 256 hardware descriptors in the ring on < 82544, but we use 4096
160 * on >= 82544. We tell the upper layers that they can queue a lot
161 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
162 * of them at a time.
163 *
164 * We allow up to 256 (!) DMA segments per packet. Pathological packet
165 * chains containing many small mbufs have been observed in zero-copy
166 * situations with jumbo frames.
167 */
168 #define WM_NTXSEGS 256
169 #define WM_IFQUEUELEN 256
170 #define WM_TXQUEUELEN_MAX 64
171 #define WM_TXQUEUELEN_MAX_82547 16
172 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
173 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
174 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
175 #define WM_NTXDESC_82542 256
176 #define WM_NTXDESC_82544 4096
177 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
178 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
179 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
180 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
181 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
182
183 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
184
185 /*
186 * Receive descriptor list size. We have one Rx buffer for normal
187 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
188 * packet. We allocate 256 receive descriptors, each with a 2k
189 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
190 */
191 #define WM_NRXDESC 256
192 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
193 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
194 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
195
196 /*
197 * Control structures are DMA'd to the i82542 chip. We allocate them in
198 * a single clump that maps to a single DMA segment to make several things
199 * easier.
200 */
201 struct wm_control_data_82544 {
202 /*
203 * The receive descriptors.
204 */
205 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
206
207 /*
208 * The transmit descriptors. Put these at the end, because
209 * we might use a smaller number of them.
210 */
211 union {
212 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
213 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
214 } wdc_u;
215 };
216
217 struct wm_control_data_82542 {
218 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
219 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
220 };
221
222 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
223 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
224 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
225
226 /*
227 * Software state for transmit jobs.
228 */
229 struct wm_txsoft {
230 struct mbuf *txs_mbuf; /* head of our mbuf chain */
231 bus_dmamap_t txs_dmamap; /* our DMA map */
232 int txs_firstdesc; /* first descriptor in packet */
233 int txs_lastdesc; /* last descriptor in packet */
234 int txs_ndesc; /* # of descriptors used */
235 };
236
237 /*
238 * Software state for receive buffers. Each descriptor gets a
239 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
240 * more than one buffer, we chain them together.
241 */
242 struct wm_rxsoft {
243 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
244 bus_dmamap_t rxs_dmamap; /* our DMA map */
245 };
246
247 #define WM_LINKUP_TIMEOUT 50
248
249 static uint16_t swfwphysem[] = {
250 SWFW_PHY0_SM,
251 SWFW_PHY1_SM,
252 SWFW_PHY2_SM,
253 SWFW_PHY3_SM
254 };
255
256 static const uint32_t wm_82580_rxpbs_table[] = {
257 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
258 };
259
260 /*
261 * Software state per device.
262 */
263 struct wm_softc {
264 device_t sc_dev; /* generic device information */
265 bus_space_tag_t sc_st; /* bus space tag */
266 bus_space_handle_t sc_sh; /* bus space handle */
267 bus_size_t sc_ss; /* bus space size */
268 bus_space_tag_t sc_iot; /* I/O space tag */
269 bus_space_handle_t sc_ioh; /* I/O space handle */
270 bus_size_t sc_ios; /* I/O space size */
271 bus_space_tag_t sc_flasht; /* flash registers space tag */
272 bus_space_handle_t sc_flashh; /* flash registers space handle */
273 bus_dma_tag_t sc_dmat; /* bus DMA tag */
274
275 struct ethercom sc_ethercom; /* ethernet common data */
276 struct mii_data sc_mii; /* MII/media information */
277
278 pci_chipset_tag_t sc_pc;
279 pcitag_t sc_pcitag;
280 int sc_bus_speed; /* PCI/PCIX bus speed */
281 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
282
283 uint16_t sc_pcidevid; /* PCI device ID */
284 wm_chip_type sc_type; /* MAC type */
285 int sc_rev; /* MAC revision */
286 wm_phy_type sc_phytype; /* PHY type */
287 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
288 #define WM_MEDIATYPE_UNKNOWN 0x00
289 #define WM_MEDIATYPE_FIBER 0x01
290 #define WM_MEDIATYPE_COPPER 0x02
291 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
292 int sc_funcid; /* unit number of the chip (0 to 3) */
293 int sc_flags; /* flags; see below */
294 int sc_if_flags; /* last if_flags */
295 int sc_flowflags; /* 802.3x flow control flags */
296 int sc_align_tweak;
297
298 void *sc_ih; /* interrupt cookie */
299 callout_t sc_tick_ch; /* tick callout */
300 bool sc_stopping;
301
302 int sc_nvm_addrbits; /* NVM address bits */
303 unsigned int sc_nvm_wordsize; /* NVM word size */
304 int sc_ich8_flash_base;
305 int sc_ich8_flash_bank_size;
306 int sc_nvm_k1_enabled;
307
308 /* Software state for the transmit and receive descriptors. */
309 int sc_txnum; /* must be a power of two */
310 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
311 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
312
313 /* Control data structures. */
314 int sc_ntxdesc; /* must be a power of two */
315 struct wm_control_data_82544 *sc_control_data;
316 bus_dmamap_t sc_cddmamap; /* control data DMA map */
317 bus_dma_segment_t sc_cd_seg; /* control data segment */
318 int sc_cd_rseg; /* real number of control segment */
319 size_t sc_cd_size; /* control data size */
320 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
321 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
322 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
323 #define sc_rxdescs sc_control_data->wcd_rxdescs
324
325 #ifdef WM_EVENT_COUNTERS
326 /* Event counters. */
327 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
328 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
329 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
330 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
331 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
332 struct evcnt sc_ev_rxintr; /* Rx interrupts */
333 struct evcnt sc_ev_linkintr; /* Link interrupts */
334
335 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
336 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
337 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
338 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
339 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
340 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
341 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
342 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
343
344 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
345 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
346
347 struct evcnt sc_ev_tu; /* Tx underrun */
348
349 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
350 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
351 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
352 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
353 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
354 #endif /* WM_EVENT_COUNTERS */
355
356 bus_addr_t sc_tdt_reg; /* offset of TDT register */
357
358 int sc_txfree; /* number of free Tx descriptors */
359 int sc_txnext; /* next ready Tx descriptor */
360
361 int sc_txsfree; /* number of free Tx jobs */
362 int sc_txsnext; /* next free Tx job */
363 int sc_txsdirty; /* dirty Tx jobs */
364
365 /* These 5 variables are used only on the 82547. */
366 int sc_txfifo_size; /* Tx FIFO size */
367 int sc_txfifo_head; /* current head of FIFO */
368 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
369 int sc_txfifo_stall; /* Tx FIFO is stalled */
370 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
371
372 bus_addr_t sc_rdt_reg; /* offset of RDT register */
373
374 int sc_rxptr; /* next ready Rx descriptor/queue ent */
375 int sc_rxdiscard;
376 int sc_rxlen;
377 struct mbuf *sc_rxhead;
378 struct mbuf *sc_rxtail;
379 struct mbuf **sc_rxtailp;
380
381 uint32_t sc_ctrl; /* prototype CTRL register */
382 #if 0
383 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
384 #endif
385 uint32_t sc_icr; /* prototype interrupt bits */
386 uint32_t sc_itr; /* prototype intr throttling reg */
387 uint32_t sc_tctl; /* prototype TCTL register */
388 uint32_t sc_rctl; /* prototype RCTL register */
389 uint32_t sc_txcw; /* prototype TXCW register */
390 uint32_t sc_tipg; /* prototype TIPG register */
391 uint32_t sc_fcrtl; /* prototype FCRTL register */
392 uint32_t sc_pba; /* prototype PBA register */
393
394 int sc_tbi_linkup; /* TBI link status */
395 int sc_tbi_anegticks; /* autonegotiation ticks */
396 int sc_tbi_ticks; /* tbi ticks */
397
398 int sc_mchash_type; /* multicast filter offset */
399
400 krndsource_t rnd_source; /* random source */
401
402 kmutex_t *sc_tx_lock; /* lock for tx operations */
403 kmutex_t *sc_rx_lock; /* lock for rx operations */
404 };
405
406 #define WM_TX_LOCK(_sc) if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
407 #define WM_TX_UNLOCK(_sc) if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
408 #define WM_TX_LOCKED(_sc) (!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
409 #define WM_RX_LOCK(_sc) if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
410 #define WM_RX_UNLOCK(_sc) if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
411 #define WM_RX_LOCKED(_sc) (!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
412 #define WM_BOTH_LOCK(_sc) do {WM_TX_LOCK(_sc); WM_RX_LOCK(_sc);} while (0)
413 #define WM_BOTH_UNLOCK(_sc) do {WM_RX_UNLOCK(_sc); WM_TX_UNLOCK(_sc);} while (0)
414 #define WM_BOTH_LOCKED(_sc) (WM_TX_LOCKED(_sc) && WM_RX_LOCKED(_sc))
415
416 #ifdef WM_MPSAFE
417 #define CALLOUT_FLAGS CALLOUT_MPSAFE
418 #else
419 #define CALLOUT_FLAGS 0
420 #endif
421
422 #define WM_RXCHAIN_RESET(sc) \
423 do { \
424 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
425 *(sc)->sc_rxtailp = NULL; \
426 (sc)->sc_rxlen = 0; \
427 } while (/*CONSTCOND*/0)
428
429 #define WM_RXCHAIN_LINK(sc, m) \
430 do { \
431 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
432 (sc)->sc_rxtailp = &(m)->m_next; \
433 } while (/*CONSTCOND*/0)
434
435 #ifdef WM_EVENT_COUNTERS
436 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
437 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
438 #else
439 #define WM_EVCNT_INCR(ev) /* nothing */
440 #define WM_EVCNT_ADD(ev, val) /* nothing */
441 #endif
442
443 #define CSR_READ(sc, reg) \
444 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
445 #define CSR_WRITE(sc, reg, val) \
446 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
447 #define CSR_WRITE_FLUSH(sc) \
448 (void) CSR_READ((sc), WMREG_STATUS)
449
450 #define ICH8_FLASH_READ32(sc, reg) \
451 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
452 #define ICH8_FLASH_WRITE32(sc, reg, data) \
453 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
454
455 #define ICH8_FLASH_READ16(sc, reg) \
456 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
457 #define ICH8_FLASH_WRITE16(sc, reg, data) \
458 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
459
460 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
461 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
462
463 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
464 #define WM_CDTXADDR_HI(sc, x) \
465 (sizeof(bus_addr_t) == 8 ? \
466 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
467
468 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
469 #define WM_CDRXADDR_HI(sc, x) \
470 (sizeof(bus_addr_t) == 8 ? \
471 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
472
473 #define WM_CDTXSYNC(sc, x, n, ops) \
474 do { \
475 int __x, __n; \
476 \
477 __x = (x); \
478 __n = (n); \
479 \
480 /* If it will wrap around, sync to the end of the ring. */ \
481 if ((__x + __n) > WM_NTXDESC(sc)) { \
482 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
483 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
484 (WM_NTXDESC(sc) - __x), (ops)); \
485 __n -= (WM_NTXDESC(sc) - __x); \
486 __x = 0; \
487 } \
488 \
489 /* Now sync whatever is left. */ \
490 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
491 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
492 } while (/*CONSTCOND*/0)
493
494 #define WM_CDRXSYNC(sc, x, ops) \
495 do { \
496 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
497 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
498 } while (/*CONSTCOND*/0)
499
500 #define WM_INIT_RXDESC(sc, x) \
501 do { \
502 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
503 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
504 struct mbuf *__m = __rxs->rxs_mbuf; \
505 \
506 /* \
507 * Note: We scoot the packet forward 2 bytes in the buffer \
508 * so that the payload after the Ethernet header is aligned \
509 * to a 4-byte boundary. \
510 * \
511 * XXX BRAINDAMAGE ALERT! \
512 * The stupid chip uses the same size for every buffer, which \
513 * is set in the Receive Control register. We are using the 2K \
514 * size option, but what we REALLY want is (2K - 2)! For this \
515 * reason, we can't "scoot" packets longer than the standard \
516 * Ethernet MTU. On strict-alignment platforms, if the total \
517 * size exceeds (2K - 2) we set align_tweak to 0 and let \
518 * the upper layer copy the headers. \
519 */ \
520 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
521 \
522 wm_set_dma_addr(&__rxd->wrx_addr, \
523 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
524 __rxd->wrx_len = 0; \
525 __rxd->wrx_cksum = 0; \
526 __rxd->wrx_status = 0; \
527 __rxd->wrx_errors = 0; \
528 __rxd->wrx_special = 0; \
529 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
530 \
531 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
532 } while (/*CONSTCOND*/0)
533
534 /*
535 * Register read/write functions.
536 * Other than CSR_{READ|WRITE}().
537 */
538 #if 0
539 static inline uint32_t wm_io_read(struct wm_softc *, int);
540 #endif
541 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
542 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
543 uint32_t, uint32_t);
544 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
545
546 /*
547 * Device driver interface functions and commonly used functions.
548 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
549 */
550 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
551 static int wm_match(device_t, cfdata_t, void *);
552 static void wm_attach(device_t, device_t, void *);
553 static int wm_detach(device_t, int);
554 static bool wm_suspend(device_t, const pmf_qual_t *);
555 static bool wm_resume(device_t, const pmf_qual_t *);
556 static void wm_watchdog(struct ifnet *);
557 static void wm_tick(void *);
558 static int wm_ifflags_cb(struct ethercom *);
559 static int wm_ioctl(struct ifnet *, u_long, void *);
560 /* MAC address related */
561 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
562 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
563 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
564 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
565 static void wm_set_filter(struct wm_softc *);
566 /* Reset and init related */
567 static void wm_set_vlan(struct wm_softc *);
568 static void wm_set_pcie_completion_timeout(struct wm_softc *);
569 static void wm_get_auto_rd_done(struct wm_softc *);
570 static void wm_lan_init_done(struct wm_softc *);
571 static void wm_get_cfg_done(struct wm_softc *);
572 static void wm_initialize_hardware_bits(struct wm_softc *);
573 static uint32_t wm_rxpbs_adjust_82580(uint32_t);
574 static void wm_reset(struct wm_softc *);
575 static int wm_add_rxbuf(struct wm_softc *, int);
576 static void wm_rxdrain(struct wm_softc *);
577 static int wm_init(struct ifnet *);
578 static int wm_init_locked(struct ifnet *);
579 static void wm_stop(struct ifnet *, int);
580 static void wm_stop_locked(struct ifnet *, int);
581 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
582 uint32_t *, uint8_t *);
583 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
584 static void wm_82547_txfifo_stall(void *);
585 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
586 /* Start */
587 static void wm_start(struct ifnet *);
588 static void wm_start_locked(struct ifnet *);
589 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
590 uint32_t *, uint32_t *, bool *);
591 static void wm_nq_start(struct ifnet *);
592 static void wm_nq_start_locked(struct ifnet *);
593 /* Interrupt */
594 static void wm_txintr(struct wm_softc *);
595 static void wm_rxintr(struct wm_softc *);
596 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
597 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
598 static void wm_linkintr(struct wm_softc *, uint32_t);
599 static int wm_intr(void *);
600
601 /*
602 * Media related.
603 * GMII, SGMII, TBI, SERDES and SFP.
604 */
605 /* GMII related */
606 static void wm_gmii_reset(struct wm_softc *);
607 static int wm_get_phy_id_82575(struct wm_softc *);
608 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
609 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
610 static int wm_gmii_mediachange(struct ifnet *);
611 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
612 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
613 static int wm_gmii_i82543_readreg(device_t, int, int);
614 static void wm_gmii_i82543_writereg(device_t, int, int, int);
615 static int wm_gmii_i82544_readreg(device_t, int, int);
616 static void wm_gmii_i82544_writereg(device_t, int, int, int);
617 static int wm_gmii_i80003_readreg(device_t, int, int);
618 static void wm_gmii_i80003_writereg(device_t, int, int, int);
619 static int wm_gmii_bm_readreg(device_t, int, int);
620 static void wm_gmii_bm_writereg(device_t, int, int, int);
621 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
622 static int wm_gmii_hv_readreg(device_t, int, int);
623 static void wm_gmii_hv_writereg(device_t, int, int, int);
624 static int wm_gmii_82580_readreg(device_t, int, int);
625 static void wm_gmii_82580_writereg(device_t, int, int, int);
626 static void wm_gmii_statchg(struct ifnet *);
627 static int wm_kmrn_readreg(struct wm_softc *, int);
628 static void wm_kmrn_writereg(struct wm_softc *, int, int);
629 /* SGMII */
630 static bool wm_sgmii_uses_mdio(struct wm_softc *);
631 static int wm_sgmii_readreg(device_t, int, int);
632 static void wm_sgmii_writereg(device_t, int, int, int);
633 /* TBI related */
634 static int wm_check_for_link(struct wm_softc *);
635 static void wm_tbi_mediainit(struct wm_softc *);
636 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
637 static int wm_tbi_mediachange(struct ifnet *);
638 static void wm_tbi_set_linkled(struct wm_softc *);
639 static void wm_tbi_check_link(struct wm_softc *);
640 /* SFP related */
641 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
642 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
643
644 /*
645 * NVM related.
646 * Microwire, SPI (w/wo EERD) and Flash.
647 */
648 /* Misc functions */
649 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
650 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
651 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
652 /* Microwire */
653 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
654 /* SPI */
655 static int wm_nvm_ready_spi(struct wm_softc *);
656 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
657 /* Using with EERD */
658 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
659 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
660 /* Flash */
661 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
662 unsigned int *);
663 static int32_t wm_ich8_cycle_init(struct wm_softc *);
664 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
665 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
666 uint16_t *);
667 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
668 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
669 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
670 /* Lock, detecting NVM type, validate checksum and read */
671 static int wm_nvm_acquire(struct wm_softc *);
672 static void wm_nvm_release(struct wm_softc *);
673 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
674 static int wm_nvm_validate_checksum(struct wm_softc *);
675 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
676
677 /*
678 * Hardware semaphores.
679 * Very complexed...
680 */
681 static int wm_get_swsm_semaphore(struct wm_softc *);
682 static void wm_put_swsm_semaphore(struct wm_softc *);
683 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
684 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
685 static int wm_get_swfwhw_semaphore(struct wm_softc *);
686 static void wm_put_swfwhw_semaphore(struct wm_softc *);
687 static int wm_get_hw_semaphore_82573(struct wm_softc *);
688 static void wm_put_hw_semaphore_82573(struct wm_softc *);
689
690 /*
691 * Management mode and power management related subroutines.
692 * BMC, AMT, suspend/resume and EEE.
693 */
694 static int wm_check_mng_mode(struct wm_softc *);
695 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
696 static int wm_check_mng_mode_82574(struct wm_softc *);
697 static int wm_check_mng_mode_generic(struct wm_softc *);
698 static int wm_enable_mng_pass_thru(struct wm_softc *);
699 static int wm_check_reset_block(struct wm_softc *);
700 static void wm_get_hw_control(struct wm_softc *);
701 static void wm_release_hw_control(struct wm_softc *);
702 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
703 static void wm_smbustopci(struct wm_softc *);
704 static void wm_init_manageability(struct wm_softc *);
705 static void wm_release_manageability(struct wm_softc *);
706 static void wm_get_wakeup(struct wm_softc *);
707 #ifdef WM_WOL
708 static void wm_enable_phy_wakeup(struct wm_softc *);
709 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
710 static void wm_enable_wakeup(struct wm_softc *);
711 #endif
712 /* EEE */
713 static void wm_set_eee_i350(struct wm_softc *);
714
715 /*
716 * Workarounds (mainly PHY related).
717 * Basically, PHY's workarounds are in the PHY drivers.
718 */
719 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
720 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
721 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
722 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
723 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
724 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
725 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
726 static void wm_reset_init_script_82575(struct wm_softc *);
727
728 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
729 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
730
731 /*
732 * Devices supported by this driver.
733 */
734 static const struct wm_product {
735 pci_vendor_id_t wmp_vendor;
736 pci_product_id_t wmp_product;
737 const char *wmp_name;
738 wm_chip_type wmp_type;
739 uint32_t wmp_flags;
740 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
741 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
742 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
743 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
744 #define WMP_MEDIATYPE(x) ((x) & 0x03)
745 } wm_products[] = {
746 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
747 "Intel i82542 1000BASE-X Ethernet",
748 WM_T_82542_2_1, WMP_F_FIBER },
749
750 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
751 "Intel i82543GC 1000BASE-X Ethernet",
752 WM_T_82543, WMP_F_FIBER },
753
754 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
755 "Intel i82543GC 1000BASE-T Ethernet",
756 WM_T_82543, WMP_F_COPPER },
757
758 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
759 "Intel i82544EI 1000BASE-T Ethernet",
760 WM_T_82544, WMP_F_COPPER },
761
762 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
763 "Intel i82544EI 1000BASE-X Ethernet",
764 WM_T_82544, WMP_F_FIBER },
765
766 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
767 "Intel i82544GC 1000BASE-T Ethernet",
768 WM_T_82544, WMP_F_COPPER },
769
770 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
771 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
772 WM_T_82544, WMP_F_COPPER },
773
774 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
775 "Intel i82540EM 1000BASE-T Ethernet",
776 WM_T_82540, WMP_F_COPPER },
777
778 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
779 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
780 WM_T_82540, WMP_F_COPPER },
781
782 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
783 "Intel i82540EP 1000BASE-T Ethernet",
784 WM_T_82540, WMP_F_COPPER },
785
786 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
787 "Intel i82540EP 1000BASE-T Ethernet",
788 WM_T_82540, WMP_F_COPPER },
789
790 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
791 "Intel i82540EP 1000BASE-T Ethernet",
792 WM_T_82540, WMP_F_COPPER },
793
794 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
795 "Intel i82545EM 1000BASE-T Ethernet",
796 WM_T_82545, WMP_F_COPPER },
797
798 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
799 "Intel i82545GM 1000BASE-T Ethernet",
800 WM_T_82545_3, WMP_F_COPPER },
801
802 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
803 "Intel i82545GM 1000BASE-X Ethernet",
804 WM_T_82545_3, WMP_F_FIBER },
805
806 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
807 "Intel i82545GM Gigabit Ethernet (SERDES)",
808 WM_T_82545_3, WMP_F_SERDES },
809
810 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
811 "Intel i82546EB 1000BASE-T Ethernet",
812 WM_T_82546, WMP_F_COPPER },
813
814 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
815 "Intel i82546EB 1000BASE-T Ethernet",
816 WM_T_82546, WMP_F_COPPER },
817
818 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
819 "Intel i82545EM 1000BASE-X Ethernet",
820 WM_T_82545, WMP_F_FIBER },
821
822 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
823 "Intel i82546EB 1000BASE-X Ethernet",
824 WM_T_82546, WMP_F_FIBER },
825
826 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
827 "Intel i82546GB 1000BASE-T Ethernet",
828 WM_T_82546_3, WMP_F_COPPER },
829
830 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
831 "Intel i82546GB 1000BASE-X Ethernet",
832 WM_T_82546_3, WMP_F_FIBER },
833
834 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
835 "Intel i82546GB Gigabit Ethernet (SERDES)",
836 WM_T_82546_3, WMP_F_SERDES },
837
838 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
839 "i82546GB quad-port Gigabit Ethernet",
840 WM_T_82546_3, WMP_F_COPPER },
841
842 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
843 "i82546GB quad-port Gigabit Ethernet (KSP3)",
844 WM_T_82546_3, WMP_F_COPPER },
845
846 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
847 "Intel PRO/1000MT (82546GB)",
848 WM_T_82546_3, WMP_F_COPPER },
849
850 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
851 "Intel i82541EI 1000BASE-T Ethernet",
852 WM_T_82541, WMP_F_COPPER },
853
854 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
855 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
856 WM_T_82541, WMP_F_COPPER },
857
858 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
859 "Intel i82541EI Mobile 1000BASE-T Ethernet",
860 WM_T_82541, WMP_F_COPPER },
861
862 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
863 "Intel i82541ER 1000BASE-T Ethernet",
864 WM_T_82541_2, WMP_F_COPPER },
865
866 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
867 "Intel i82541GI 1000BASE-T Ethernet",
868 WM_T_82541_2, WMP_F_COPPER },
869
870 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
871 "Intel i82541GI Mobile 1000BASE-T Ethernet",
872 WM_T_82541_2, WMP_F_COPPER },
873
874 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
875 "Intel i82541PI 1000BASE-T Ethernet",
876 WM_T_82541_2, WMP_F_COPPER },
877
878 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
879 "Intel i82547EI 1000BASE-T Ethernet",
880 WM_T_82547, WMP_F_COPPER },
881
882 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
883 "Intel i82547EI Mobile 1000BASE-T Ethernet",
884 WM_T_82547, WMP_F_COPPER },
885
886 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
887 "Intel i82547GI 1000BASE-T Ethernet",
888 WM_T_82547_2, WMP_F_COPPER },
889
890 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
891 "Intel PRO/1000 PT (82571EB)",
892 WM_T_82571, WMP_F_COPPER },
893
894 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
895 "Intel PRO/1000 PF (82571EB)",
896 WM_T_82571, WMP_F_FIBER },
897
898 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
899 "Intel PRO/1000 PB (82571EB)",
900 WM_T_82571, WMP_F_SERDES },
901
902 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
903 "Intel PRO/1000 QT (82571EB)",
904 WM_T_82571, WMP_F_COPPER },
905
906 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
907 "Intel PRO/1000 PT Quad Port Server Adapter",
908 WM_T_82571, WMP_F_COPPER, },
909
910 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
911 "Intel Gigabit PT Quad Port Server ExpressModule",
912 WM_T_82571, WMP_F_COPPER, },
913
914 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
915 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
916 WM_T_82571, WMP_F_SERDES, },
917
918 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
919 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
920 WM_T_82571, WMP_F_SERDES, },
921
922 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
923 "Intel 82571EB Quad 1000baseX Ethernet",
924 WM_T_82571, WMP_F_FIBER, },
925
926 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
927 "Intel i82572EI 1000baseT Ethernet",
928 WM_T_82572, WMP_F_COPPER },
929
930 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
931 "Intel i82572EI 1000baseX Ethernet",
932 WM_T_82572, WMP_F_FIBER },
933
934 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
935 "Intel i82572EI Gigabit Ethernet (SERDES)",
936 WM_T_82572, WMP_F_SERDES },
937
938 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
939 "Intel i82572EI 1000baseT Ethernet",
940 WM_T_82572, WMP_F_COPPER },
941
942 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
943 "Intel i82573E",
944 WM_T_82573, WMP_F_COPPER },
945
946 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
947 "Intel i82573E IAMT",
948 WM_T_82573, WMP_F_COPPER },
949
950 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
951 "Intel i82573L Gigabit Ethernet",
952 WM_T_82573, WMP_F_COPPER },
953
954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
955 "Intel i82574L",
956 WM_T_82574, WMP_F_COPPER },
957
958 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
959 "Intel i82574L",
960 WM_T_82574, WMP_F_COPPER },
961
962 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
963 "Intel i82583V",
964 WM_T_82583, WMP_F_COPPER },
965
966 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
967 "i80003 dual 1000baseT Ethernet",
968 WM_T_80003, WMP_F_COPPER },
969
970 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
971 "i80003 dual 1000baseX Ethernet",
972 WM_T_80003, WMP_F_COPPER },
973
974 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
975 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
976 WM_T_80003, WMP_F_SERDES },
977
978 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
979 "Intel i80003 1000baseT Ethernet",
980 WM_T_80003, WMP_F_COPPER },
981
982 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
983 "Intel i80003 Gigabit Ethernet (SERDES)",
984 WM_T_80003, WMP_F_SERDES },
985
986 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
987 "Intel i82801H (M_AMT) LAN Controller",
988 WM_T_ICH8, WMP_F_COPPER },
989 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
990 "Intel i82801H (AMT) LAN Controller",
991 WM_T_ICH8, WMP_F_COPPER },
992 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
993 "Intel i82801H LAN Controller",
994 WM_T_ICH8, WMP_F_COPPER },
995 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
996 "Intel i82801H (IFE) LAN Controller",
997 WM_T_ICH8, WMP_F_COPPER },
998 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
999 "Intel i82801H (M) LAN Controller",
1000 WM_T_ICH8, WMP_F_COPPER },
1001 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1002 "Intel i82801H IFE (GT) LAN Controller",
1003 WM_T_ICH8, WMP_F_COPPER },
1004 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1005 "Intel i82801H IFE (G) LAN Controller",
1006 WM_T_ICH8, WMP_F_COPPER },
1007 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1008 "82801I (AMT) LAN Controller",
1009 WM_T_ICH9, WMP_F_COPPER },
1010 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1011 "82801I LAN Controller",
1012 WM_T_ICH9, WMP_F_COPPER },
1013 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1014 "82801I (G) LAN Controller",
1015 WM_T_ICH9, WMP_F_COPPER },
1016 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1017 "82801I (GT) LAN Controller",
1018 WM_T_ICH9, WMP_F_COPPER },
1019 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1020 "82801I (C) LAN Controller",
1021 WM_T_ICH9, WMP_F_COPPER },
1022 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1023 "82801I mobile LAN Controller",
1024 WM_T_ICH9, WMP_F_COPPER },
1025 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1026 "82801I mobile (V) LAN Controller",
1027 WM_T_ICH9, WMP_F_COPPER },
1028 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1029 "82801I mobile (AMT) LAN Controller",
1030 WM_T_ICH9, WMP_F_COPPER },
1031 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1032 "82567LM-4 LAN Controller",
1033 WM_T_ICH9, WMP_F_COPPER },
1034 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
1035 "82567V-3 LAN Controller",
1036 WM_T_ICH9, WMP_F_COPPER },
1037 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1038 "82567LM-2 LAN Controller",
1039 WM_T_ICH10, WMP_F_COPPER },
1040 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1041 "82567LF-2 LAN Controller",
1042 WM_T_ICH10, WMP_F_COPPER },
1043 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1044 "82567LM-3 LAN Controller",
1045 WM_T_ICH10, WMP_F_COPPER },
1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1047 "82567LF-3 LAN Controller",
1048 WM_T_ICH10, WMP_F_COPPER },
1049 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1050 "82567V-2 LAN Controller",
1051 WM_T_ICH10, WMP_F_COPPER },
1052 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1053 "82567V-3? LAN Controller",
1054 WM_T_ICH10, WMP_F_COPPER },
1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1056 "HANKSVILLE LAN Controller",
1057 WM_T_ICH10, WMP_F_COPPER },
1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1059 "PCH LAN (82577LM) Controller",
1060 WM_T_PCH, WMP_F_COPPER },
1061 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1062 "PCH LAN (82577LC) Controller",
1063 WM_T_PCH, WMP_F_COPPER },
1064 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1065 "PCH LAN (82578DM) Controller",
1066 WM_T_PCH, WMP_F_COPPER },
1067 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1068 "PCH LAN (82578DC) Controller",
1069 WM_T_PCH, WMP_F_COPPER },
1070 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1071 "PCH2 LAN (82579LM) Controller",
1072 WM_T_PCH2, WMP_F_COPPER },
1073 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1074 "PCH2 LAN (82579V) Controller",
1075 WM_T_PCH2, WMP_F_COPPER },
1076 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1077 "82575EB dual-1000baseT Ethernet",
1078 WM_T_82575, WMP_F_COPPER },
1079 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1080 "82575EB dual-1000baseX Ethernet (SERDES)",
1081 WM_T_82575, WMP_F_SERDES },
1082 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1083 "82575GB quad-1000baseT Ethernet",
1084 WM_T_82575, WMP_F_COPPER },
1085 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1086 "82575GB quad-1000baseT Ethernet (PM)",
1087 WM_T_82575, WMP_F_COPPER },
1088 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1089 "82576 1000BaseT Ethernet",
1090 WM_T_82576, WMP_F_COPPER },
1091 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1092 "82576 1000BaseX Ethernet",
1093 WM_T_82576, WMP_F_FIBER },
1094
1095 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1096 "82576 gigabit Ethernet (SERDES)",
1097 WM_T_82576, WMP_F_SERDES },
1098
1099 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1100 "82576 quad-1000BaseT Ethernet",
1101 WM_T_82576, WMP_F_COPPER },
1102
1103 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1104 "82576 Gigabit ET2 Quad Port Server Adapter",
1105 WM_T_82576, WMP_F_COPPER },
1106
1107 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1108 "82576 gigabit Ethernet",
1109 WM_T_82576, WMP_F_COPPER },
1110
1111 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1112 "82576 gigabit Ethernet (SERDES)",
1113 WM_T_82576, WMP_F_SERDES },
1114 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1115 "82576 quad-gigabit Ethernet (SERDES)",
1116 WM_T_82576, WMP_F_SERDES },
1117
1118 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1119 "82580 1000BaseT Ethernet",
1120 WM_T_82580, WMP_F_COPPER },
1121 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1122 "82580 1000BaseX Ethernet",
1123 WM_T_82580, WMP_F_FIBER },
1124
1125 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1126 "82580 1000BaseT Ethernet (SERDES)",
1127 WM_T_82580, WMP_F_SERDES },
1128
1129 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1130 "82580 gigabit Ethernet (SGMII)",
1131 WM_T_82580, WMP_F_COPPER },
1132 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1133 "82580 dual-1000BaseT Ethernet",
1134 WM_T_82580, WMP_F_COPPER },
1135
1136 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1137 "82580 quad-1000BaseX Ethernet",
1138 WM_T_82580, WMP_F_FIBER },
1139
1140 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1141 "DH89XXCC Gigabit Ethernet (SGMII)",
1142 WM_T_82580, WMP_F_COPPER },
1143
1144 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1145 "DH89XXCC Gigabit Ethernet (SERDES)",
1146 WM_T_82580, WMP_F_SERDES },
1147
1148 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1149 "DH89XXCC 1000BASE-KX Ethernet",
1150 WM_T_82580, WMP_F_SERDES },
1151
1152 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1153 "DH89XXCC Gigabit Ethernet (SFP)",
1154 WM_T_82580, WMP_F_SERDES },
1155
1156 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1157 "I350 Gigabit Network Connection",
1158 WM_T_I350, WMP_F_COPPER },
1159
1160 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1161 "I350 Gigabit Fiber Network Connection",
1162 WM_T_I350, WMP_F_FIBER },
1163
1164 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1165 "I350 Gigabit Backplane Connection",
1166 WM_T_I350, WMP_F_SERDES },
1167
1168 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1169 "I350 Quad Port Gigabit Ethernet",
1170 WM_T_I350, WMP_F_SERDES },
1171
1172 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1173 "I350 Gigabit Connection",
1174 WM_T_I350, WMP_F_COPPER },
1175
1176 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1177 "I354 Gigabit Ethernet (KX)",
1178 WM_T_I354, WMP_F_SERDES },
1179
1180 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1181 "I354 Gigabit Ethernet (SGMII)",
1182 WM_T_I354, WMP_F_COPPER },
1183
1184 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1185 "I354 Gigabit Ethernet (2.5G)",
1186 WM_T_I354, WMP_F_COPPER },
1187
1188 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1189 "I210-T1 Ethernet Server Adapter",
1190 WM_T_I210, WMP_F_COPPER },
1191
1192 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1193 "I210 Ethernet (Copper OEM)",
1194 WM_T_I210, WMP_F_COPPER },
1195
1196 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1197 "I210 Ethernet (Copper IT)",
1198 WM_T_I210, WMP_F_COPPER },
1199
1200 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1201 "I210 Ethernet (FLASH less)",
1202 WM_T_I210, WMP_F_COPPER },
1203
1204 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1205 "I210 Gigabit Ethernet (Fiber)",
1206 WM_T_I210, WMP_F_FIBER },
1207
1208 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1209 "I210 Gigabit Ethernet (SERDES)",
1210 WM_T_I210, WMP_F_SERDES },
1211
1212 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1213 "I210 Gigabit Ethernet (FLASH less)",
1214 WM_T_I210, WMP_F_SERDES },
1215
1216 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1217 "I210 Gigabit Ethernet (SGMII)",
1218 WM_T_I210, WMP_F_COPPER },
1219
1220 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1221 "I211 Ethernet (COPPER)",
1222 WM_T_I211, WMP_F_COPPER },
1223 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1224 "I217 V Ethernet Connection",
1225 WM_T_PCH_LPT, WMP_F_COPPER },
1226 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1227 "I217 LM Ethernet Connection",
1228 WM_T_PCH_LPT, WMP_F_COPPER },
1229 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1230 "I218 V Ethernet Connection",
1231 WM_T_PCH_LPT, WMP_F_COPPER },
1232 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1233 "I218 V Ethernet Connection",
1234 WM_T_PCH_LPT, WMP_F_COPPER },
1235 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1236 "I218 V Ethernet Connection",
1237 WM_T_PCH_LPT, WMP_F_COPPER },
1238 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1239 "I218 LM Ethernet Connection",
1240 WM_T_PCH_LPT, WMP_F_COPPER },
1241 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1242 "I218 LM Ethernet Connection",
1243 WM_T_PCH_LPT, WMP_F_COPPER },
1244 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1245 "I218 LM Ethernet Connection",
1246 WM_T_PCH_LPT, WMP_F_COPPER },
1247 { 0, 0,
1248 NULL,
1249 0, 0 },
1250 };
1251
1252 #ifdef WM_EVENT_COUNTERS
1253 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1254 #endif /* WM_EVENT_COUNTERS */
1255
1256
1257 /*
1258 * Register read/write functions.
1259 * Other than CSR_{READ|WRITE}().
1260 */
1261
1262 #if 0 /* Not currently used */
1263 static inline uint32_t
1264 wm_io_read(struct wm_softc *sc, int reg)
1265 {
1266
1267 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1268 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1269 }
1270 #endif
1271
1272 static inline void
1273 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1274 {
1275
1276 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1277 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1278 }
1279
1280 static inline void
1281 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1282 uint32_t data)
1283 {
1284 uint32_t regval;
1285 int i;
1286
1287 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1288
1289 CSR_WRITE(sc, reg, regval);
1290
1291 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1292 delay(5);
1293 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1294 break;
1295 }
1296 if (i == SCTL_CTL_POLL_TIMEOUT) {
1297 aprint_error("%s: WARNING:"
1298 " i82575 reg 0x%08x setup did not indicate ready\n",
1299 device_xname(sc->sc_dev), reg);
1300 }
1301 }
1302
1303 static inline void
1304 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1305 {
1306 wa->wa_low = htole32(v & 0xffffffffU);
1307 if (sizeof(bus_addr_t) == 8)
1308 wa->wa_high = htole32((uint64_t) v >> 32);
1309 else
1310 wa->wa_high = 0;
1311 }
1312
1313 /*
1314 * Device driver interface functions and commonly used functions.
1315 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1316 */
1317
1318 /* Lookup supported device table */
1319 static const struct wm_product *
1320 wm_lookup(const struct pci_attach_args *pa)
1321 {
1322 const struct wm_product *wmp;
1323
1324 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1325 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1326 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1327 return wmp;
1328 }
1329 return NULL;
1330 }
1331
1332 /* The match function (ca_match) */
1333 static int
1334 wm_match(device_t parent, cfdata_t cf, void *aux)
1335 {
1336 struct pci_attach_args *pa = aux;
1337
1338 if (wm_lookup(pa) != NULL)
1339 return 1;
1340
1341 return 0;
1342 }
1343
1344 /* The attach function (ca_attach) */
1345 static void
1346 wm_attach(device_t parent, device_t self, void *aux)
1347 {
1348 struct wm_softc *sc = device_private(self);
1349 struct pci_attach_args *pa = aux;
1350 prop_dictionary_t dict;
1351 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1352 pci_chipset_tag_t pc = pa->pa_pc;
1353 pci_intr_handle_t ih;
1354 const char *intrstr = NULL;
1355 const char *eetype, *xname;
1356 bus_space_tag_t memt;
1357 bus_space_handle_t memh;
1358 bus_size_t memsize;
1359 int memh_valid;
1360 int i, error;
1361 const struct wm_product *wmp;
1362 prop_data_t ea;
1363 prop_number_t pn;
1364 uint8_t enaddr[ETHER_ADDR_LEN];
1365 uint16_t cfg1, cfg2, swdpin, io3;
1366 pcireg_t preg, memtype;
1367 uint16_t eeprom_data, apme_mask;
1368 bool force_clear_smbi;
1369 uint32_t link_mode;
1370 uint32_t reg;
1371 char intrbuf[PCI_INTRSTR_LEN];
1372
1373 sc->sc_dev = self;
1374 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1375 sc->sc_stopping = false;
1376
1377 wmp = wm_lookup(pa);
1378 #ifdef DIAGNOSTIC
1379 if (wmp == NULL) {
1380 printf("\n");
1381 panic("wm_attach: impossible");
1382 }
1383 #endif
1384 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1385
1386 sc->sc_pc = pa->pa_pc;
1387 sc->sc_pcitag = pa->pa_tag;
1388
1389 if (pci_dma64_available(pa))
1390 sc->sc_dmat = pa->pa_dmat64;
1391 else
1392 sc->sc_dmat = pa->pa_dmat;
1393
1394 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1395 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1396 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1397
1398 sc->sc_type = wmp->wmp_type;
1399 if (sc->sc_type < WM_T_82543) {
1400 if (sc->sc_rev < 2) {
1401 aprint_error_dev(sc->sc_dev,
1402 "i82542 must be at least rev. 2\n");
1403 return;
1404 }
1405 if (sc->sc_rev < 3)
1406 sc->sc_type = WM_T_82542_2_0;
1407 }
1408
1409 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1410 || (sc->sc_type == WM_T_82580)
1411 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1412 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1413 sc->sc_flags |= WM_F_NEWQUEUE;
1414
1415 /* Set device properties (mactype) */
1416 dict = device_properties(sc->sc_dev);
1417 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1418
1419 /*
1420 * Map the device. All devices support memory-mapped acccess,
1421 * and it is really required for normal operation.
1422 */
1423 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1424 switch (memtype) {
1425 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1426 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1427 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1428 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1429 break;
1430 default:
1431 memh_valid = 0;
1432 break;
1433 }
1434
1435 if (memh_valid) {
1436 sc->sc_st = memt;
1437 sc->sc_sh = memh;
1438 sc->sc_ss = memsize;
1439 } else {
1440 aprint_error_dev(sc->sc_dev,
1441 "unable to map device registers\n");
1442 return;
1443 }
1444
1445 /*
1446 * In addition, i82544 and later support I/O mapped indirect
1447 * register access. It is not desirable (nor supported in
1448 * this driver) to use it for normal operation, though it is
1449 * required to work around bugs in some chip versions.
1450 */
1451 if (sc->sc_type >= WM_T_82544) {
1452 /* First we have to find the I/O BAR. */
1453 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1454 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1455 if (memtype == PCI_MAPREG_TYPE_IO)
1456 break;
1457 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1458 PCI_MAPREG_MEM_TYPE_64BIT)
1459 i += 4; /* skip high bits, too */
1460 }
1461 if (i < PCI_MAPREG_END) {
1462 /*
1463 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1464 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1465 * It's no problem because newer chips has no this
1466 * bug.
1467 *
1468 * The i8254x doesn't apparently respond when the
1469 * I/O BAR is 0, which looks somewhat like it's not
1470 * been configured.
1471 */
1472 preg = pci_conf_read(pc, pa->pa_tag, i);
1473 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1474 aprint_error_dev(sc->sc_dev,
1475 "WARNING: I/O BAR at zero.\n");
1476 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1477 0, &sc->sc_iot, &sc->sc_ioh,
1478 NULL, &sc->sc_ios) == 0) {
1479 sc->sc_flags |= WM_F_IOH_VALID;
1480 } else {
1481 aprint_error_dev(sc->sc_dev,
1482 "WARNING: unable to map I/O space\n");
1483 }
1484 }
1485
1486 }
1487
1488 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1489 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1490 preg |= PCI_COMMAND_MASTER_ENABLE;
1491 if (sc->sc_type < WM_T_82542_2_1)
1492 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1493 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1494
1495 /* power up chip */
1496 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1497 NULL)) && error != EOPNOTSUPP) {
1498 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1499 return;
1500 }
1501
1502 /*
1503 * Map and establish our interrupt.
1504 */
1505 if (pci_intr_map(pa, &ih)) {
1506 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1507 return;
1508 }
1509 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1510 #ifdef WM_MPSAFE
1511 pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
1512 #endif
1513 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1514 if (sc->sc_ih == NULL) {
1515 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1516 if (intrstr != NULL)
1517 aprint_error(" at %s", intrstr);
1518 aprint_error("\n");
1519 return;
1520 }
1521 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1522
1523 /*
1524 * Check the function ID (unit number of the chip).
1525 */
1526 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1527 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1528 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1529 || (sc->sc_type == WM_T_82580)
1530 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1531 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1532 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1533 else
1534 sc->sc_funcid = 0;
1535
1536 /*
1537 * Determine a few things about the bus we're connected to.
1538 */
1539 if (sc->sc_type < WM_T_82543) {
1540 /* We don't really know the bus characteristics here. */
1541 sc->sc_bus_speed = 33;
1542 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1543 /*
1544 * CSA (Communication Streaming Architecture) is about as fast
1545 * a 32-bit 66MHz PCI Bus.
1546 */
1547 sc->sc_flags |= WM_F_CSA;
1548 sc->sc_bus_speed = 66;
1549 aprint_verbose_dev(sc->sc_dev,
1550 "Communication Streaming Architecture\n");
1551 if (sc->sc_type == WM_T_82547) {
1552 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1553 callout_setfunc(&sc->sc_txfifo_ch,
1554 wm_82547_txfifo_stall, sc);
1555 aprint_verbose_dev(sc->sc_dev,
1556 "using 82547 Tx FIFO stall work-around\n");
1557 }
1558 } else if (sc->sc_type >= WM_T_82571) {
1559 sc->sc_flags |= WM_F_PCIE;
1560 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1561 && (sc->sc_type != WM_T_ICH10)
1562 && (sc->sc_type != WM_T_PCH)
1563 && (sc->sc_type != WM_T_PCH2)
1564 && (sc->sc_type != WM_T_PCH_LPT)) {
1565 /* ICH* and PCH* have no PCIe capability registers */
1566 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1567 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1568 NULL) == 0)
1569 aprint_error_dev(sc->sc_dev,
1570 "unable to find PCIe capability\n");
1571 }
1572 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1573 } else {
1574 reg = CSR_READ(sc, WMREG_STATUS);
1575 if (reg & STATUS_BUS64)
1576 sc->sc_flags |= WM_F_BUS64;
1577 if ((reg & STATUS_PCIX_MODE) != 0) {
1578 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1579
1580 sc->sc_flags |= WM_F_PCIX;
1581 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1582 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1583 aprint_error_dev(sc->sc_dev,
1584 "unable to find PCIX capability\n");
1585 else if (sc->sc_type != WM_T_82545_3 &&
1586 sc->sc_type != WM_T_82546_3) {
1587 /*
1588 * Work around a problem caused by the BIOS
1589 * setting the max memory read byte count
1590 * incorrectly.
1591 */
1592 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1593 sc->sc_pcixe_capoff + PCIX_CMD);
1594 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1595 sc->sc_pcixe_capoff + PCIX_STATUS);
1596
1597 bytecnt =
1598 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1599 PCIX_CMD_BYTECNT_SHIFT;
1600 maxb =
1601 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1602 PCIX_STATUS_MAXB_SHIFT;
1603 if (bytecnt > maxb) {
1604 aprint_verbose_dev(sc->sc_dev,
1605 "resetting PCI-X MMRBC: %d -> %d\n",
1606 512 << bytecnt, 512 << maxb);
1607 pcix_cmd = (pcix_cmd &
1608 ~PCIX_CMD_BYTECNT_MASK) |
1609 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1610 pci_conf_write(pa->pa_pc, pa->pa_tag,
1611 sc->sc_pcixe_capoff + PCIX_CMD,
1612 pcix_cmd);
1613 }
1614 }
1615 }
1616 /*
1617 * The quad port adapter is special; it has a PCIX-PCIX
1618 * bridge on the board, and can run the secondary bus at
1619 * a higher speed.
1620 */
1621 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1622 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1623 : 66;
1624 } else if (sc->sc_flags & WM_F_PCIX) {
1625 switch (reg & STATUS_PCIXSPD_MASK) {
1626 case STATUS_PCIXSPD_50_66:
1627 sc->sc_bus_speed = 66;
1628 break;
1629 case STATUS_PCIXSPD_66_100:
1630 sc->sc_bus_speed = 100;
1631 break;
1632 case STATUS_PCIXSPD_100_133:
1633 sc->sc_bus_speed = 133;
1634 break;
1635 default:
1636 aprint_error_dev(sc->sc_dev,
1637 "unknown PCIXSPD %d; assuming 66MHz\n",
1638 reg & STATUS_PCIXSPD_MASK);
1639 sc->sc_bus_speed = 66;
1640 break;
1641 }
1642 } else
1643 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1644 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1645 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1646 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1647 }
1648
1649 /*
1650 * Allocate the control data structures, and create and load the
1651 * DMA map for it.
1652 *
1653 * NOTE: All Tx descriptors must be in the same 4G segment of
1654 * memory. So must Rx descriptors. We simplify by allocating
1655 * both sets within the same 4G segment.
1656 */
1657 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1658 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1659 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1660 sizeof(struct wm_control_data_82542) :
1661 sizeof(struct wm_control_data_82544);
1662 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1663 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1664 &sc->sc_cd_rseg, 0)) != 0) {
1665 aprint_error_dev(sc->sc_dev,
1666 "unable to allocate control data, error = %d\n",
1667 error);
1668 goto fail_0;
1669 }
1670
1671 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1672 sc->sc_cd_rseg, sc->sc_cd_size,
1673 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1674 aprint_error_dev(sc->sc_dev,
1675 "unable to map control data, error = %d\n", error);
1676 goto fail_1;
1677 }
1678
1679 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1680 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1681 aprint_error_dev(sc->sc_dev,
1682 "unable to create control data DMA map, error = %d\n",
1683 error);
1684 goto fail_2;
1685 }
1686
1687 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1688 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1689 aprint_error_dev(sc->sc_dev,
1690 "unable to load control data DMA map, error = %d\n",
1691 error);
1692 goto fail_3;
1693 }
1694
1695 /* Create the transmit buffer DMA maps. */
1696 WM_TXQUEUELEN(sc) =
1697 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1698 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1699 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1700 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1701 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1702 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1703 aprint_error_dev(sc->sc_dev,
1704 "unable to create Tx DMA map %d, error = %d\n",
1705 i, error);
1706 goto fail_4;
1707 }
1708 }
1709
1710 /* Create the receive buffer DMA maps. */
1711 for (i = 0; i < WM_NRXDESC; i++) {
1712 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1713 MCLBYTES, 0, 0,
1714 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1715 aprint_error_dev(sc->sc_dev,
1716 "unable to create Rx DMA map %d error = %d\n",
1717 i, error);
1718 goto fail_5;
1719 }
1720 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1721 }
1722
1723 /* clear interesting stat counters */
1724 CSR_READ(sc, WMREG_COLC);
1725 CSR_READ(sc, WMREG_RXERRC);
1726
1727 /* get PHY control from SMBus to PCIe */
1728 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1729 || (sc->sc_type == WM_T_PCH_LPT))
1730 wm_smbustopci(sc);
1731
1732 /* Reset the chip to a known state. */
1733 wm_reset(sc);
1734
1735 /* Get some information about the EEPROM. */
1736 switch (sc->sc_type) {
1737 case WM_T_82542_2_0:
1738 case WM_T_82542_2_1:
1739 case WM_T_82543:
1740 case WM_T_82544:
1741 /* Microwire */
1742 sc->sc_nvm_wordsize = 64;
1743 sc->sc_nvm_addrbits = 6;
1744 break;
1745 case WM_T_82540:
1746 case WM_T_82545:
1747 case WM_T_82545_3:
1748 case WM_T_82546:
1749 case WM_T_82546_3:
1750 /* Microwire */
1751 reg = CSR_READ(sc, WMREG_EECD);
1752 if (reg & EECD_EE_SIZE) {
1753 sc->sc_nvm_wordsize = 256;
1754 sc->sc_nvm_addrbits = 8;
1755 } else {
1756 sc->sc_nvm_wordsize = 64;
1757 sc->sc_nvm_addrbits = 6;
1758 }
1759 sc->sc_flags |= WM_F_LOCK_EECD;
1760 break;
1761 case WM_T_82541:
1762 case WM_T_82541_2:
1763 case WM_T_82547:
1764 case WM_T_82547_2:
1765 sc->sc_flags |= WM_F_LOCK_EECD;
1766 reg = CSR_READ(sc, WMREG_EECD);
1767 if (reg & EECD_EE_TYPE) {
1768 /* SPI */
1769 sc->sc_flags |= WM_F_EEPROM_SPI;
1770 wm_nvm_set_addrbits_size_eecd(sc);
1771 } else {
1772 /* Microwire */
1773 if ((reg & EECD_EE_ABITS) != 0) {
1774 sc->sc_nvm_wordsize = 256;
1775 sc->sc_nvm_addrbits = 8;
1776 } else {
1777 sc->sc_nvm_wordsize = 64;
1778 sc->sc_nvm_addrbits = 6;
1779 }
1780 }
1781 break;
1782 case WM_T_82571:
1783 case WM_T_82572:
1784 /* SPI */
1785 sc->sc_flags |= WM_F_EEPROM_SPI;
1786 wm_nvm_set_addrbits_size_eecd(sc);
1787 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1788 break;
1789 case WM_T_82573:
1790 sc->sc_flags |= WM_F_LOCK_SWSM;
1791 /* FALLTHROUGH */
1792 case WM_T_82574:
1793 case WM_T_82583:
1794 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1795 sc->sc_flags |= WM_F_EEPROM_FLASH;
1796 sc->sc_nvm_wordsize = 2048;
1797 } else {
1798 /* SPI */
1799 sc->sc_flags |= WM_F_EEPROM_SPI;
1800 wm_nvm_set_addrbits_size_eecd(sc);
1801 }
1802 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1803 break;
1804 case WM_T_82575:
1805 case WM_T_82576:
1806 case WM_T_82580:
1807 case WM_T_I350:
1808 case WM_T_I354:
1809 case WM_T_80003:
1810 /* SPI */
1811 sc->sc_flags |= WM_F_EEPROM_SPI;
1812 wm_nvm_set_addrbits_size_eecd(sc);
1813 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1814 | WM_F_LOCK_SWSM;
1815 break;
1816 case WM_T_ICH8:
1817 case WM_T_ICH9:
1818 case WM_T_ICH10:
1819 case WM_T_PCH:
1820 case WM_T_PCH2:
1821 case WM_T_PCH_LPT:
1822 /* FLASH */
1823 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1824 sc->sc_nvm_wordsize = 2048;
1825 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1826 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1827 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1828 aprint_error_dev(sc->sc_dev,
1829 "can't map FLASH registers\n");
1830 goto fail_5;
1831 }
1832 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1833 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1834 ICH_FLASH_SECTOR_SIZE;
1835 sc->sc_ich8_flash_bank_size =
1836 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1837 sc->sc_ich8_flash_bank_size -=
1838 (reg & ICH_GFPREG_BASE_MASK);
1839 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1840 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1841 break;
1842 case WM_T_I210:
1843 case WM_T_I211:
1844 wm_nvm_set_addrbits_size_eecd(sc);
1845 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1846 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1847 break;
1848 default:
1849 break;
1850 }
1851
1852 /* Ensure the SMBI bit is clear before first NVM or PHY access */
1853 switch (sc->sc_type) {
1854 case WM_T_82571:
1855 case WM_T_82572:
1856 reg = CSR_READ(sc, WMREG_SWSM2);
1857 if ((reg & SWSM2_LOCK) == 0) {
1858 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
1859 force_clear_smbi = true;
1860 } else
1861 force_clear_smbi = false;
1862 break;
1863 case WM_T_82573:
1864 case WM_T_82574:
1865 case WM_T_82583:
1866 force_clear_smbi = true;
1867 break;
1868 default:
1869 force_clear_smbi = false;
1870 break;
1871 }
1872 if (force_clear_smbi) {
1873 reg = CSR_READ(sc, WMREG_SWSM);
1874 if ((reg & SWSM_SMBI) != 0)
1875 aprint_error_dev(sc->sc_dev,
1876 "Please update the Bootagent\n");
1877 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
1878 }
1879
1880 /*
1881 * Defer printing the EEPROM type until after verifying the checksum
1882 * This allows the EEPROM type to be printed correctly in the case
1883 * that no EEPROM is attached.
1884 */
1885 /*
1886 * Validate the EEPROM checksum. If the checksum fails, flag
1887 * this for later, so we can fail future reads from the EEPROM.
1888 */
1889 if (wm_nvm_validate_checksum(sc)) {
1890 /*
1891 * Read twice again because some PCI-e parts fail the
1892 * first check due to the link being in sleep state.
1893 */
1894 if (wm_nvm_validate_checksum(sc))
1895 sc->sc_flags |= WM_F_EEPROM_INVALID;
1896 }
1897
1898 /* Set device properties (macflags) */
1899 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1900
1901 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1902 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1903 else {
1904 aprint_verbose_dev(sc->sc_dev, "%u words ",
1905 sc->sc_nvm_wordsize);
1906 if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
1907 aprint_verbose("FLASH(HW)\n");
1908 } else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1909 aprint_verbose("FLASH\n");
1910 } else {
1911 if (sc->sc_flags & WM_F_EEPROM_SPI)
1912 eetype = "SPI";
1913 else
1914 eetype = "MicroWire";
1915 aprint_verbose("(%d address bits) %s EEPROM\n",
1916 sc->sc_nvm_addrbits, eetype);
1917 }
1918 }
1919
1920 switch (sc->sc_type) {
1921 case WM_T_82571:
1922 case WM_T_82572:
1923 case WM_T_82573:
1924 case WM_T_82574:
1925 case WM_T_82583:
1926 case WM_T_80003:
1927 case WM_T_ICH8:
1928 case WM_T_ICH9:
1929 case WM_T_ICH10:
1930 case WM_T_PCH:
1931 case WM_T_PCH2:
1932 case WM_T_PCH_LPT:
1933 if (wm_check_mng_mode(sc) != 0)
1934 wm_get_hw_control(sc);
1935 break;
1936 default:
1937 break;
1938 }
1939 wm_get_wakeup(sc);
1940 /*
1941 * Read the Ethernet address from the EEPROM, if not first found
1942 * in device properties.
1943 */
1944 ea = prop_dictionary_get(dict, "mac-address");
1945 if (ea != NULL) {
1946 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1947 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1948 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1949 } else {
1950 if (wm_read_mac_addr(sc, enaddr) != 0) {
1951 aprint_error_dev(sc->sc_dev,
1952 "unable to read Ethernet address\n");
1953 goto fail_5;
1954 }
1955 }
1956
1957 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1958 ether_sprintf(enaddr));
1959
1960 /*
1961 * Read the config info from the EEPROM, and set up various
1962 * bits in the control registers based on their contents.
1963 */
1964 pn = prop_dictionary_get(dict, "i82543-cfg1");
1965 if (pn != NULL) {
1966 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1967 cfg1 = (uint16_t) prop_number_integer_value(pn);
1968 } else {
1969 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
1970 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1971 goto fail_5;
1972 }
1973 }
1974
1975 pn = prop_dictionary_get(dict, "i82543-cfg2");
1976 if (pn != NULL) {
1977 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1978 cfg2 = (uint16_t) prop_number_integer_value(pn);
1979 } else {
1980 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
1981 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1982 goto fail_5;
1983 }
1984 }
1985
1986 /* check for WM_F_WOL */
1987 switch (sc->sc_type) {
1988 case WM_T_82542_2_0:
1989 case WM_T_82542_2_1:
1990 case WM_T_82543:
1991 /* dummy? */
1992 eeprom_data = 0;
1993 apme_mask = NVM_CFG3_APME;
1994 break;
1995 case WM_T_82544:
1996 apme_mask = NVM_CFG2_82544_APM_EN;
1997 eeprom_data = cfg2;
1998 break;
1999 case WM_T_82546:
2000 case WM_T_82546_3:
2001 case WM_T_82571:
2002 case WM_T_82572:
2003 case WM_T_82573:
2004 case WM_T_82574:
2005 case WM_T_82583:
2006 case WM_T_80003:
2007 default:
2008 apme_mask = NVM_CFG3_APME;
2009 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2010 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2011 break;
2012 case WM_T_82575:
2013 case WM_T_82576:
2014 case WM_T_82580:
2015 case WM_T_I350:
2016 case WM_T_I354: /* XXX ok? */
2017 case WM_T_ICH8:
2018 case WM_T_ICH9:
2019 case WM_T_ICH10:
2020 case WM_T_PCH:
2021 case WM_T_PCH2:
2022 case WM_T_PCH_LPT:
2023 /* XXX The funcid should be checked on some devices */
2024 apme_mask = WUC_APME;
2025 eeprom_data = CSR_READ(sc, WMREG_WUC);
2026 break;
2027 }
2028
2029 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2030 if ((eeprom_data & apme_mask) != 0)
2031 sc->sc_flags |= WM_F_WOL;
2032 #ifdef WM_DEBUG
2033 if ((sc->sc_flags & WM_F_WOL) != 0)
2034 printf("WOL\n");
2035 #endif
2036
2037 /*
2038 * XXX need special handling for some multiple port cards
2039 * to disable a paticular port.
2040 */
2041
2042 if (sc->sc_type >= WM_T_82544) {
2043 pn = prop_dictionary_get(dict, "i82543-swdpin");
2044 if (pn != NULL) {
2045 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2046 swdpin = (uint16_t) prop_number_integer_value(pn);
2047 } else {
2048 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2049 aprint_error_dev(sc->sc_dev,
2050 "unable to read SWDPIN\n");
2051 goto fail_5;
2052 }
2053 }
2054 }
2055
2056 if (cfg1 & NVM_CFG1_ILOS)
2057 sc->sc_ctrl |= CTRL_ILOS;
2058 if (sc->sc_type >= WM_T_82544) {
2059 sc->sc_ctrl |=
2060 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2061 CTRL_SWDPIO_SHIFT;
2062 sc->sc_ctrl |=
2063 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2064 CTRL_SWDPINS_SHIFT;
2065 } else {
2066 sc->sc_ctrl |=
2067 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2068 CTRL_SWDPIO_SHIFT;
2069 }
2070
2071 #if 0
2072 if (sc->sc_type >= WM_T_82544) {
2073 if (cfg1 & NVM_CFG1_IPS0)
2074 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2075 if (cfg1 & NVM_CFG1_IPS1)
2076 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2077 sc->sc_ctrl_ext |=
2078 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2079 CTRL_EXT_SWDPIO_SHIFT;
2080 sc->sc_ctrl_ext |=
2081 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2082 CTRL_EXT_SWDPINS_SHIFT;
2083 } else {
2084 sc->sc_ctrl_ext |=
2085 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2086 CTRL_EXT_SWDPIO_SHIFT;
2087 }
2088 #endif
2089
2090 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2091 #if 0
2092 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2093 #endif
2094
2095 /*
2096 * Set up some register offsets that are different between
2097 * the i82542 and the i82543 and later chips.
2098 */
2099 if (sc->sc_type < WM_T_82543) {
2100 sc->sc_rdt_reg = WMREG_OLD_RDT0;
2101 sc->sc_tdt_reg = WMREG_OLD_TDT;
2102 } else {
2103 sc->sc_rdt_reg = WMREG_RDT;
2104 sc->sc_tdt_reg = WMREG_TDT;
2105 }
2106
2107 if (sc->sc_type == WM_T_PCH) {
2108 uint16_t val;
2109
2110 /* Save the NVM K1 bit setting */
2111 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2112
2113 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2114 sc->sc_nvm_k1_enabled = 1;
2115 else
2116 sc->sc_nvm_k1_enabled = 0;
2117 }
2118
2119 /*
2120 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2121 * media structures accordingly.
2122 */
2123 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2124 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2125 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2126 || sc->sc_type == WM_T_82573
2127 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2128 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2129 wm_gmii_mediainit(sc, wmp->wmp_product);
2130 } else if (sc->sc_type < WM_T_82543 ||
2131 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2132 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2133 aprint_error_dev(sc->sc_dev,
2134 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2135 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2136 }
2137 wm_tbi_mediainit(sc);
2138 } else {
2139 switch (sc->sc_type) {
2140 case WM_T_82575:
2141 case WM_T_82576:
2142 case WM_T_82580:
2143 case WM_T_I350:
2144 case WM_T_I354:
2145 case WM_T_I210:
2146 case WM_T_I211:
2147 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2148 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2149 switch (link_mode) {
2150 case CTRL_EXT_LINK_MODE_1000KX:
2151 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2152 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2153 break;
2154 case CTRL_EXT_LINK_MODE_SGMII:
2155 if (wm_sgmii_uses_mdio(sc)) {
2156 aprint_verbose_dev(sc->sc_dev,
2157 "SGMII(MDIO)\n");
2158 sc->sc_flags |= WM_F_SGMII;
2159 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2160 break;
2161 }
2162 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2163 /*FALLTHROUGH*/
2164 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2165 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2166 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2167 if (link_mode
2168 == CTRL_EXT_LINK_MODE_SGMII) {
2169 sc->sc_mediatype
2170 = WM_MEDIATYPE_COPPER;
2171 sc->sc_flags |= WM_F_SGMII;
2172 } else {
2173 sc->sc_mediatype
2174 = WM_MEDIATYPE_SERDES;
2175 aprint_verbose_dev(sc->sc_dev,
2176 "SERDES\n");
2177 }
2178 break;
2179 }
2180 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2181 aprint_verbose_dev(sc->sc_dev,
2182 "SERDES\n");
2183
2184 /* Change current link mode setting */
2185 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2186 switch (sc->sc_mediatype) {
2187 case WM_MEDIATYPE_COPPER:
2188 reg |= CTRL_EXT_LINK_MODE_SGMII;
2189 break;
2190 case WM_MEDIATYPE_SERDES:
2191 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2192 break;
2193 default:
2194 break;
2195 }
2196 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2197 break;
2198 case CTRL_EXT_LINK_MODE_GMII:
2199 default:
2200 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2201 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2202 break;
2203 }
2204
2205 reg &= ~CTRL_EXT_I2C_ENA;
2206 if ((sc->sc_flags & WM_F_SGMII) != 0)
2207 reg |= CTRL_EXT_I2C_ENA;
2208 else
2209 reg &= ~CTRL_EXT_I2C_ENA;
2210 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2211
2212 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2213 wm_gmii_mediainit(sc, wmp->wmp_product);
2214 else
2215 wm_tbi_mediainit(sc);
2216 break;
2217 default:
2218 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
2219 aprint_error_dev(sc->sc_dev,
2220 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2221 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2222 wm_gmii_mediainit(sc, wmp->wmp_product);
2223 }
2224 }
2225
2226 ifp = &sc->sc_ethercom.ec_if;
2227 xname = device_xname(sc->sc_dev);
2228 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2229 ifp->if_softc = sc;
2230 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2231 ifp->if_ioctl = wm_ioctl;
2232 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2233 ifp->if_start = wm_nq_start;
2234 else
2235 ifp->if_start = wm_start;
2236 ifp->if_watchdog = wm_watchdog;
2237 ifp->if_init = wm_init;
2238 ifp->if_stop = wm_stop;
2239 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2240 IFQ_SET_READY(&ifp->if_snd);
2241
2242 /* Check for jumbo frame */
2243 switch (sc->sc_type) {
2244 case WM_T_82573:
2245 /* XXX limited to 9234 if ASPM is disabled */
2246 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &io3);
2247 if ((io3 & NVM_3GIO_3_ASPM_MASK) != 0)
2248 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2249 break;
2250 case WM_T_82571:
2251 case WM_T_82572:
2252 case WM_T_82574:
2253 case WM_T_82575:
2254 case WM_T_82576:
2255 case WM_T_82580:
2256 case WM_T_I350:
2257 case WM_T_I354: /* XXXX ok? */
2258 case WM_T_I210:
2259 case WM_T_I211:
2260 case WM_T_80003:
2261 case WM_T_ICH9:
2262 case WM_T_ICH10:
2263 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2264 case WM_T_PCH_LPT:
2265 /* XXX limited to 9234 */
2266 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2267 break;
2268 case WM_T_PCH:
2269 /* XXX limited to 4096 */
2270 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2271 break;
2272 case WM_T_82542_2_0:
2273 case WM_T_82542_2_1:
2274 case WM_T_82583:
2275 case WM_T_ICH8:
2276 /* No support for jumbo frame */
2277 break;
2278 default:
2279 /* ETHER_MAX_LEN_JUMBO */
2280 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2281 break;
2282 }
2283
2284 /* If we're a i82543 or greater, we can support VLANs. */
2285 if (sc->sc_type >= WM_T_82543)
2286 sc->sc_ethercom.ec_capabilities |=
2287 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2288
2289 /*
2290 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2291 * on i82543 and later.
2292 */
2293 if (sc->sc_type >= WM_T_82543) {
2294 ifp->if_capabilities |=
2295 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2296 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2297 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2298 IFCAP_CSUM_TCPv6_Tx |
2299 IFCAP_CSUM_UDPv6_Tx;
2300 }
2301
2302 /*
2303 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2304 *
2305 * 82541GI (8086:1076) ... no
2306 * 82572EI (8086:10b9) ... yes
2307 */
2308 if (sc->sc_type >= WM_T_82571) {
2309 ifp->if_capabilities |=
2310 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2311 }
2312
2313 /*
2314 * If we're a i82544 or greater (except i82547), we can do
2315 * TCP segmentation offload.
2316 */
2317 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2318 ifp->if_capabilities |= IFCAP_TSOv4;
2319 }
2320
2321 if (sc->sc_type >= WM_T_82571) {
2322 ifp->if_capabilities |= IFCAP_TSOv6;
2323 }
2324
2325 #ifdef WM_MPSAFE
2326 sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2327 sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2328 #else
2329 sc->sc_tx_lock = NULL;
2330 sc->sc_rx_lock = NULL;
2331 #endif
2332
2333 /* Attach the interface. */
2334 if_attach(ifp);
2335 ether_ifattach(ifp, enaddr);
2336 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2337 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2338 RND_FLAG_DEFAULT);
2339
2340 #ifdef WM_EVENT_COUNTERS
2341 /* Attach event counters. */
2342 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2343 NULL, xname, "txsstall");
2344 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2345 NULL, xname, "txdstall");
2346 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2347 NULL, xname, "txfifo_stall");
2348 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2349 NULL, xname, "txdw");
2350 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2351 NULL, xname, "txqe");
2352 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2353 NULL, xname, "rxintr");
2354 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2355 NULL, xname, "linkintr");
2356
2357 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2358 NULL, xname, "rxipsum");
2359 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2360 NULL, xname, "rxtusum");
2361 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2362 NULL, xname, "txipsum");
2363 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2364 NULL, xname, "txtusum");
2365 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2366 NULL, xname, "txtusum6");
2367
2368 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2369 NULL, xname, "txtso");
2370 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2371 NULL, xname, "txtso6");
2372 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2373 NULL, xname, "txtsopain");
2374
2375 for (i = 0; i < WM_NTXSEGS; i++) {
2376 snprintf(wm_txseg_evcnt_names[i],
2377 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2378 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2379 NULL, xname, wm_txseg_evcnt_names[i]);
2380 }
2381
2382 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2383 NULL, xname, "txdrop");
2384
2385 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2386 NULL, xname, "tu");
2387
2388 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2389 NULL, xname, "tx_xoff");
2390 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2391 NULL, xname, "tx_xon");
2392 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2393 NULL, xname, "rx_xoff");
2394 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2395 NULL, xname, "rx_xon");
2396 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2397 NULL, xname, "rx_macctl");
2398 #endif /* WM_EVENT_COUNTERS */
2399
2400 if (pmf_device_register(self, wm_suspend, wm_resume))
2401 pmf_class_network_register(self, ifp);
2402 else
2403 aprint_error_dev(self, "couldn't establish power handler\n");
2404
2405 sc->sc_flags |= WM_F_ATTACHED;
2406 return;
2407
2408 /*
2409 * Free any resources we've allocated during the failed attach
2410 * attempt. Do this in reverse order and fall through.
2411 */
2412 fail_5:
2413 for (i = 0; i < WM_NRXDESC; i++) {
2414 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2415 bus_dmamap_destroy(sc->sc_dmat,
2416 sc->sc_rxsoft[i].rxs_dmamap);
2417 }
2418 fail_4:
2419 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2420 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2421 bus_dmamap_destroy(sc->sc_dmat,
2422 sc->sc_txsoft[i].txs_dmamap);
2423 }
2424 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2425 fail_3:
2426 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2427 fail_2:
2428 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2429 sc->sc_cd_size);
2430 fail_1:
2431 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2432 fail_0:
2433 return;
2434 }
2435
2436 /* The detach function (ca_detach) */
2437 static int
2438 wm_detach(device_t self, int flags __unused)
2439 {
2440 struct wm_softc *sc = device_private(self);
2441 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2442 int i;
2443 #ifndef WM_MPSAFE
2444 int s;
2445 #endif
2446
2447 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2448 return 0;
2449
2450 #ifndef WM_MPSAFE
2451 s = splnet();
2452 #endif
2453 /* Stop the interface. Callouts are stopped in it. */
2454 wm_stop(ifp, 1);
2455
2456 #ifndef WM_MPSAFE
2457 splx(s);
2458 #endif
2459
2460 pmf_device_deregister(self);
2461
2462 /* Tell the firmware about the release */
2463 WM_BOTH_LOCK(sc);
2464 wm_release_manageability(sc);
2465 wm_release_hw_control(sc);
2466 WM_BOTH_UNLOCK(sc);
2467
2468 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2469
2470 /* Delete all remaining media. */
2471 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2472
2473 ether_ifdetach(ifp);
2474 if_detach(ifp);
2475
2476
2477 /* Unload RX dmamaps and free mbufs */
2478 WM_RX_LOCK(sc);
2479 wm_rxdrain(sc);
2480 WM_RX_UNLOCK(sc);
2481 /* Must unlock here */
2482
2483 /* Free dmamap. It's the same as the end of the wm_attach() function */
2484 for (i = 0; i < WM_NRXDESC; i++) {
2485 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2486 bus_dmamap_destroy(sc->sc_dmat,
2487 sc->sc_rxsoft[i].rxs_dmamap);
2488 }
2489 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2490 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2491 bus_dmamap_destroy(sc->sc_dmat,
2492 sc->sc_txsoft[i].txs_dmamap);
2493 }
2494 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2495 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2496 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2497 sc->sc_cd_size);
2498 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2499
2500 /* Disestablish the interrupt handler */
2501 if (sc->sc_ih != NULL) {
2502 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2503 sc->sc_ih = NULL;
2504 }
2505
2506 /* Unmap the registers */
2507 if (sc->sc_ss) {
2508 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2509 sc->sc_ss = 0;
2510 }
2511
2512 if (sc->sc_ios) {
2513 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2514 sc->sc_ios = 0;
2515 }
2516
2517 if (sc->sc_tx_lock)
2518 mutex_obj_free(sc->sc_tx_lock);
2519 if (sc->sc_rx_lock)
2520 mutex_obj_free(sc->sc_rx_lock);
2521
2522 return 0;
2523 }
2524
2525 static bool
2526 wm_suspend(device_t self, const pmf_qual_t *qual)
2527 {
2528 struct wm_softc *sc = device_private(self);
2529
2530 wm_release_manageability(sc);
2531 wm_release_hw_control(sc);
2532 #ifdef WM_WOL
2533 wm_enable_wakeup(sc);
2534 #endif
2535
2536 return true;
2537 }
2538
2539 static bool
2540 wm_resume(device_t self, const pmf_qual_t *qual)
2541 {
2542 struct wm_softc *sc = device_private(self);
2543
2544 wm_init_manageability(sc);
2545
2546 return true;
2547 }
2548
2549 /*
2550 * wm_watchdog: [ifnet interface function]
2551 *
2552 * Watchdog timer handler.
2553 */
2554 static void
2555 wm_watchdog(struct ifnet *ifp)
2556 {
2557 struct wm_softc *sc = ifp->if_softc;
2558
2559 /*
2560 * Since we're using delayed interrupts, sweep up
2561 * before we report an error.
2562 */
2563 WM_TX_LOCK(sc);
2564 wm_txintr(sc);
2565 WM_TX_UNLOCK(sc);
2566
2567 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2568 #ifdef WM_DEBUG
2569 int i, j;
2570 struct wm_txsoft *txs;
2571 #endif
2572 log(LOG_ERR,
2573 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2574 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2575 sc->sc_txnext);
2576 ifp->if_oerrors++;
2577 #ifdef WM_DEBUG
2578 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
2579 i = WM_NEXTTXS(sc, i)) {
2580 txs = &sc->sc_txsoft[i];
2581 printf("txs %d tx %d -> %d\n",
2582 i, txs->txs_firstdesc, txs->txs_lastdesc);
2583 for (j = txs->txs_firstdesc; ;
2584 j = WM_NEXTTX(sc, j)) {
2585 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2586 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
2587 printf("\t %#08x%08x\n",
2588 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
2589 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
2590 if (j == txs->txs_lastdesc)
2591 break;
2592 }
2593 }
2594 #endif
2595 /* Reset the interface. */
2596 (void) wm_init(ifp);
2597 }
2598
2599 /* Try to get more packets going. */
2600 ifp->if_start(ifp);
2601 }
2602
2603 /*
2604 * wm_tick:
2605 *
2606 * One second timer, used to check link status, sweep up
2607 * completed transmit jobs, etc.
2608 */
2609 static void
2610 wm_tick(void *arg)
2611 {
2612 struct wm_softc *sc = arg;
2613 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2614 #ifndef WM_MPSAFE
2615 int s;
2616
2617 s = splnet();
2618 #endif
2619
2620 WM_TX_LOCK(sc);
2621
2622 if (sc->sc_stopping)
2623 goto out;
2624
2625 if (sc->sc_type >= WM_T_82542_2_1) {
2626 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2627 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2628 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2629 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2630 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2631 }
2632
2633 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2634 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2635 + CSR_READ(sc, WMREG_CRCERRS)
2636 + CSR_READ(sc, WMREG_ALGNERRC)
2637 + CSR_READ(sc, WMREG_SYMERRC)
2638 + CSR_READ(sc, WMREG_RXERRC)
2639 + CSR_READ(sc, WMREG_SEC)
2640 + CSR_READ(sc, WMREG_CEXTERR)
2641 + CSR_READ(sc, WMREG_RLEC);
2642 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2643
2644 if (sc->sc_flags & WM_F_HAS_MII)
2645 mii_tick(&sc->sc_mii);
2646 else
2647 wm_tbi_check_link(sc);
2648
2649 out:
2650 WM_TX_UNLOCK(sc);
2651 #ifndef WM_MPSAFE
2652 splx(s);
2653 #endif
2654
2655 if (!sc->sc_stopping)
2656 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2657 }
2658
2659 static int
2660 wm_ifflags_cb(struct ethercom *ec)
2661 {
2662 struct ifnet *ifp = &ec->ec_if;
2663 struct wm_softc *sc = ifp->if_softc;
2664 int change = ifp->if_flags ^ sc->sc_if_flags;
2665 int rc = 0;
2666
2667 WM_BOTH_LOCK(sc);
2668
2669 if (change != 0)
2670 sc->sc_if_flags = ifp->if_flags;
2671
2672 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
2673 rc = ENETRESET;
2674 goto out;
2675 }
2676
2677 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2678 wm_set_filter(sc);
2679
2680 wm_set_vlan(sc);
2681
2682 out:
2683 WM_BOTH_UNLOCK(sc);
2684
2685 return rc;
2686 }
2687
2688 /*
2689 * wm_ioctl: [ifnet interface function]
2690 *
2691 * Handle control requests from the operator.
2692 */
2693 static int
2694 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2695 {
2696 struct wm_softc *sc = ifp->if_softc;
2697 struct ifreq *ifr = (struct ifreq *) data;
2698 struct ifaddr *ifa = (struct ifaddr *)data;
2699 struct sockaddr_dl *sdl;
2700 int s, error;
2701
2702 #ifndef WM_MPSAFE
2703 s = splnet();
2704 #endif
2705 switch (cmd) {
2706 case SIOCSIFMEDIA:
2707 case SIOCGIFMEDIA:
2708 WM_BOTH_LOCK(sc);
2709 /* Flow control requires full-duplex mode. */
2710 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2711 (ifr->ifr_media & IFM_FDX) == 0)
2712 ifr->ifr_media &= ~IFM_ETH_FMASK;
2713 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2714 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2715 /* We can do both TXPAUSE and RXPAUSE. */
2716 ifr->ifr_media |=
2717 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2718 }
2719 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2720 }
2721 WM_BOTH_UNLOCK(sc);
2722 #ifdef WM_MPSAFE
2723 s = splnet();
2724 #endif
2725 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2726 #ifdef WM_MPSAFE
2727 splx(s);
2728 #endif
2729 break;
2730 case SIOCINITIFADDR:
2731 WM_BOTH_LOCK(sc);
2732 if (ifa->ifa_addr->sa_family == AF_LINK) {
2733 sdl = satosdl(ifp->if_dl->ifa_addr);
2734 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2735 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2736 /* unicast address is first multicast entry */
2737 wm_set_filter(sc);
2738 error = 0;
2739 WM_BOTH_UNLOCK(sc);
2740 break;
2741 }
2742 WM_BOTH_UNLOCK(sc);
2743 /*FALLTHROUGH*/
2744 default:
2745 #ifdef WM_MPSAFE
2746 s = splnet();
2747 #endif
2748 /* It may call wm_start, so unlock here */
2749 error = ether_ioctl(ifp, cmd, data);
2750 #ifdef WM_MPSAFE
2751 splx(s);
2752 #endif
2753 if (error != ENETRESET)
2754 break;
2755
2756 error = 0;
2757
2758 if (cmd == SIOCSIFCAP) {
2759 error = (*ifp->if_init)(ifp);
2760 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2761 ;
2762 else if (ifp->if_flags & IFF_RUNNING) {
2763 /*
2764 * Multicast list has changed; set the hardware filter
2765 * accordingly.
2766 */
2767 WM_BOTH_LOCK(sc);
2768 wm_set_filter(sc);
2769 WM_BOTH_UNLOCK(sc);
2770 }
2771 break;
2772 }
2773
2774 /* Try to get more packets going. */
2775 ifp->if_start(ifp);
2776
2777 #ifndef WM_MPSAFE
2778 splx(s);
2779 #endif
2780 return error;
2781 }
2782
2783 /* MAC address related */
2784
2785 /*
2786 * Get the offset of MAC address and return it.
2787 * If error occured, use offset 0.
2788 */
2789 static uint16_t
2790 wm_check_alt_mac_addr(struct wm_softc *sc)
2791 {
2792 uint16_t myea[ETHER_ADDR_LEN / 2];
2793 uint16_t offset = NVM_OFF_MACADDR;
2794
2795 /* Try to read alternative MAC address pointer */
2796 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2797 return 0;
2798
2799 /* Check pointer if it's valid or not. */
2800 if ((offset == 0x0000) || (offset == 0xffff))
2801 return 0;
2802
2803 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
2804 /*
2805 * Check whether alternative MAC address is valid or not.
2806 * Some cards have non 0xffff pointer but those don't use
2807 * alternative MAC address in reality.
2808 *
2809 * Check whether the broadcast bit is set or not.
2810 */
2811 if (wm_nvm_read(sc, offset, 1, myea) == 0)
2812 if (((myea[0] & 0xff) & 0x01) == 0)
2813 return offset; /* Found */
2814
2815 /* Not found */
2816 return 0;
2817 }
2818
2819 static int
2820 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2821 {
2822 uint16_t myea[ETHER_ADDR_LEN / 2];
2823 uint16_t offset = NVM_OFF_MACADDR;
2824 int do_invert = 0;
2825
2826 switch (sc->sc_type) {
2827 case WM_T_82580:
2828 case WM_T_I350:
2829 case WM_T_I354:
2830 /* EEPROM Top Level Partitioning */
2831 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
2832 break;
2833 case WM_T_82571:
2834 case WM_T_82575:
2835 case WM_T_82576:
2836 case WM_T_80003:
2837 case WM_T_I210:
2838 case WM_T_I211:
2839 offset = wm_check_alt_mac_addr(sc);
2840 if (offset == 0)
2841 if ((sc->sc_funcid & 0x01) == 1)
2842 do_invert = 1;
2843 break;
2844 default:
2845 if ((sc->sc_funcid & 0x01) == 1)
2846 do_invert = 1;
2847 break;
2848 }
2849
2850 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
2851 myea) != 0)
2852 goto bad;
2853
2854 enaddr[0] = myea[0] & 0xff;
2855 enaddr[1] = myea[0] >> 8;
2856 enaddr[2] = myea[1] & 0xff;
2857 enaddr[3] = myea[1] >> 8;
2858 enaddr[4] = myea[2] & 0xff;
2859 enaddr[5] = myea[2] >> 8;
2860
2861 /*
2862 * Toggle the LSB of the MAC address on the second port
2863 * of some dual port cards.
2864 */
2865 if (do_invert != 0)
2866 enaddr[5] ^= 1;
2867
2868 return 0;
2869
2870 bad:
2871 return -1;
2872 }
2873
2874 /*
2875 * wm_set_ral:
2876 *
2877 * Set an entery in the receive address list.
2878 */
2879 static void
2880 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2881 {
2882 uint32_t ral_lo, ral_hi;
2883
2884 if (enaddr != NULL) {
2885 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2886 (enaddr[3] << 24);
2887 ral_hi = enaddr[4] | (enaddr[5] << 8);
2888 ral_hi |= RAL_AV;
2889 } else {
2890 ral_lo = 0;
2891 ral_hi = 0;
2892 }
2893
2894 if (sc->sc_type >= WM_T_82544) {
2895 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2896 ral_lo);
2897 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2898 ral_hi);
2899 } else {
2900 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2901 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2902 }
2903 }
2904
2905 /*
2906 * wm_mchash:
2907 *
2908 * Compute the hash of the multicast address for the 4096-bit
2909 * multicast filter.
2910 */
2911 static uint32_t
2912 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2913 {
2914 static const int lo_shift[4] = { 4, 3, 2, 0 };
2915 static const int hi_shift[4] = { 4, 5, 6, 8 };
2916 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
2917 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
2918 uint32_t hash;
2919
2920 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2921 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2922 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
2923 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
2924 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
2925 return (hash & 0x3ff);
2926 }
2927 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2928 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2929
2930 return (hash & 0xfff);
2931 }
2932
2933 /*
2934 * wm_set_filter:
2935 *
2936 * Set up the receive filter.
2937 */
2938 static void
2939 wm_set_filter(struct wm_softc *sc)
2940 {
2941 struct ethercom *ec = &sc->sc_ethercom;
2942 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2943 struct ether_multi *enm;
2944 struct ether_multistep step;
2945 bus_addr_t mta_reg;
2946 uint32_t hash, reg, bit;
2947 int i, size;
2948
2949 if (sc->sc_type >= WM_T_82544)
2950 mta_reg = WMREG_CORDOVA_MTA;
2951 else
2952 mta_reg = WMREG_MTA;
2953
2954 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2955
2956 if (ifp->if_flags & IFF_BROADCAST)
2957 sc->sc_rctl |= RCTL_BAM;
2958 if (ifp->if_flags & IFF_PROMISC) {
2959 sc->sc_rctl |= RCTL_UPE;
2960 goto allmulti;
2961 }
2962
2963 /*
2964 * Set the station address in the first RAL slot, and
2965 * clear the remaining slots.
2966 */
2967 if (sc->sc_type == WM_T_ICH8)
2968 size = WM_RAL_TABSIZE_ICH8 -1;
2969 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
2970 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
2971 || (sc->sc_type == WM_T_PCH_LPT))
2972 size = WM_RAL_TABSIZE_ICH8;
2973 else if (sc->sc_type == WM_T_82575)
2974 size = WM_RAL_TABSIZE_82575;
2975 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
2976 size = WM_RAL_TABSIZE_82576;
2977 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2978 size = WM_RAL_TABSIZE_I350;
2979 else
2980 size = WM_RAL_TABSIZE;
2981 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
2982 for (i = 1; i < size; i++)
2983 wm_set_ral(sc, NULL, i);
2984
2985 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2986 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2987 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
2988 size = WM_ICH8_MC_TABSIZE;
2989 else
2990 size = WM_MC_TABSIZE;
2991 /* Clear out the multicast table. */
2992 for (i = 0; i < size; i++)
2993 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2994
2995 ETHER_FIRST_MULTI(step, ec, enm);
2996 while (enm != NULL) {
2997 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2998 /*
2999 * We must listen to a range of multicast addresses.
3000 * For now, just accept all multicasts, rather than
3001 * trying to set only those filter bits needed to match
3002 * the range. (At this time, the only use of address
3003 * ranges is for IP multicast routing, for which the
3004 * range is big enough to require all bits set.)
3005 */
3006 goto allmulti;
3007 }
3008
3009 hash = wm_mchash(sc, enm->enm_addrlo);
3010
3011 reg = (hash >> 5);
3012 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3013 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3014 || (sc->sc_type == WM_T_PCH2)
3015 || (sc->sc_type == WM_T_PCH_LPT))
3016 reg &= 0x1f;
3017 else
3018 reg &= 0x7f;
3019 bit = hash & 0x1f;
3020
3021 hash = CSR_READ(sc, mta_reg + (reg << 2));
3022 hash |= 1U << bit;
3023
3024 /* XXX Hardware bug?? */
3025 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3026 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3027 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3028 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3029 } else
3030 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3031
3032 ETHER_NEXT_MULTI(step, enm);
3033 }
3034
3035 ifp->if_flags &= ~IFF_ALLMULTI;
3036 goto setit;
3037
3038 allmulti:
3039 ifp->if_flags |= IFF_ALLMULTI;
3040 sc->sc_rctl |= RCTL_MPE;
3041
3042 setit:
3043 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3044 }
3045
3046 /* Reset and init related */
3047
3048 static void
3049 wm_set_vlan(struct wm_softc *sc)
3050 {
3051 /* Deal with VLAN enables. */
3052 if (VLAN_ATTACHED(&sc->sc_ethercom))
3053 sc->sc_ctrl |= CTRL_VME;
3054 else
3055 sc->sc_ctrl &= ~CTRL_VME;
3056
3057 /* Write the control registers. */
3058 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3059 }
3060
3061 static void
3062 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3063 {
3064 uint32_t gcr;
3065 pcireg_t ctrl2;
3066
3067 gcr = CSR_READ(sc, WMREG_GCR);
3068
3069 /* Only take action if timeout value is defaulted to 0 */
3070 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3071 goto out;
3072
3073 if ((gcr & GCR_CAP_VER2) == 0) {
3074 gcr |= GCR_CMPL_TMOUT_10MS;
3075 goto out;
3076 }
3077
3078 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3079 sc->sc_pcixe_capoff + PCIE_DCSR2);
3080 ctrl2 |= WM_PCIE_DCSR2_16MS;
3081 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3082 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3083
3084 out:
3085 /* Disable completion timeout resend */
3086 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3087
3088 CSR_WRITE(sc, WMREG_GCR, gcr);
3089 }
3090
3091 void
3092 wm_get_auto_rd_done(struct wm_softc *sc)
3093 {
3094 int i;
3095
3096 /* wait for eeprom to reload */
3097 switch (sc->sc_type) {
3098 case WM_T_82571:
3099 case WM_T_82572:
3100 case WM_T_82573:
3101 case WM_T_82574:
3102 case WM_T_82583:
3103 case WM_T_82575:
3104 case WM_T_82576:
3105 case WM_T_82580:
3106 case WM_T_I350:
3107 case WM_T_I354:
3108 case WM_T_I210:
3109 case WM_T_I211:
3110 case WM_T_80003:
3111 case WM_T_ICH8:
3112 case WM_T_ICH9:
3113 for (i = 0; i < 10; i++) {
3114 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3115 break;
3116 delay(1000);
3117 }
3118 if (i == 10) {
3119 log(LOG_ERR, "%s: auto read from eeprom failed to "
3120 "complete\n", device_xname(sc->sc_dev));
3121 }
3122 break;
3123 default:
3124 break;
3125 }
3126 }
3127
3128 void
3129 wm_lan_init_done(struct wm_softc *sc)
3130 {
3131 uint32_t reg = 0;
3132 int i;
3133
3134 /* wait for eeprom to reload */
3135 switch (sc->sc_type) {
3136 case WM_T_ICH10:
3137 case WM_T_PCH:
3138 case WM_T_PCH2:
3139 case WM_T_PCH_LPT:
3140 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3141 reg = CSR_READ(sc, WMREG_STATUS);
3142 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3143 break;
3144 delay(100);
3145 }
3146 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3147 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3148 "complete\n", device_xname(sc->sc_dev), __func__);
3149 }
3150 break;
3151 default:
3152 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3153 __func__);
3154 break;
3155 }
3156
3157 reg &= ~STATUS_LAN_INIT_DONE;
3158 CSR_WRITE(sc, WMREG_STATUS, reg);
3159 }
3160
3161 void
3162 wm_get_cfg_done(struct wm_softc *sc)
3163 {
3164 int mask;
3165 uint32_t reg;
3166 int i;
3167
3168 /* wait for eeprom to reload */
3169 switch (sc->sc_type) {
3170 case WM_T_82542_2_0:
3171 case WM_T_82542_2_1:
3172 /* null */
3173 break;
3174 case WM_T_82543:
3175 case WM_T_82544:
3176 case WM_T_82540:
3177 case WM_T_82545:
3178 case WM_T_82545_3:
3179 case WM_T_82546:
3180 case WM_T_82546_3:
3181 case WM_T_82541:
3182 case WM_T_82541_2:
3183 case WM_T_82547:
3184 case WM_T_82547_2:
3185 case WM_T_82573:
3186 case WM_T_82574:
3187 case WM_T_82583:
3188 /* generic */
3189 delay(10*1000);
3190 break;
3191 case WM_T_80003:
3192 case WM_T_82571:
3193 case WM_T_82572:
3194 case WM_T_82575:
3195 case WM_T_82576:
3196 case WM_T_82580:
3197 case WM_T_I350:
3198 case WM_T_I354:
3199 case WM_T_I210:
3200 case WM_T_I211:
3201 if (sc->sc_type == WM_T_82571) {
3202 /* Only 82571 shares port 0 */
3203 mask = EEMNGCTL_CFGDONE_0;
3204 } else
3205 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3206 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3207 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3208 break;
3209 delay(1000);
3210 }
3211 if (i >= WM_PHY_CFG_TIMEOUT) {
3212 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3213 device_xname(sc->sc_dev), __func__));
3214 }
3215 break;
3216 case WM_T_ICH8:
3217 case WM_T_ICH9:
3218 case WM_T_ICH10:
3219 case WM_T_PCH:
3220 case WM_T_PCH2:
3221 case WM_T_PCH_LPT:
3222 delay(10*1000);
3223 if (sc->sc_type >= WM_T_ICH10)
3224 wm_lan_init_done(sc);
3225 else
3226 wm_get_auto_rd_done(sc);
3227
3228 reg = CSR_READ(sc, WMREG_STATUS);
3229 if ((reg & STATUS_PHYRA) != 0)
3230 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3231 break;
3232 default:
3233 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3234 __func__);
3235 break;
3236 }
3237 }
3238
3239 /* Init hardware bits */
3240 void
3241 wm_initialize_hardware_bits(struct wm_softc *sc)
3242 {
3243 uint32_t tarc0, tarc1, reg;
3244
3245 /* For 82571 variant, 80003 and ICHs */
3246 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3247 || (sc->sc_type >= WM_T_80003)) {
3248
3249 /* Transmit Descriptor Control 0 */
3250 reg = CSR_READ(sc, WMREG_TXDCTL(0));
3251 reg |= TXDCTL_COUNT_DESC;
3252 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3253
3254 /* Transmit Descriptor Control 1 */
3255 reg = CSR_READ(sc, WMREG_TXDCTL(1));
3256 reg |= TXDCTL_COUNT_DESC;
3257 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3258
3259 /* TARC0 */
3260 tarc0 = CSR_READ(sc, WMREG_TARC0);
3261 switch (sc->sc_type) {
3262 case WM_T_82571:
3263 case WM_T_82572:
3264 case WM_T_82573:
3265 case WM_T_82574:
3266 case WM_T_82583:
3267 case WM_T_80003:
3268 /* Clear bits 30..27 */
3269 tarc0 &= ~__BITS(30, 27);
3270 break;
3271 default:
3272 break;
3273 }
3274
3275 switch (sc->sc_type) {
3276 case WM_T_82571:
3277 case WM_T_82572:
3278 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3279
3280 tarc1 = CSR_READ(sc, WMREG_TARC1);
3281 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3282 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3283 /* 8257[12] Errata No.7 */
3284 tarc1 |= __BIT(22); /* TARC1 bits 22 */
3285
3286 /* TARC1 bit 28 */
3287 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3288 tarc1 &= ~__BIT(28);
3289 else
3290 tarc1 |= __BIT(28);
3291 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3292
3293 /*
3294 * 8257[12] Errata No.13
3295 * Disable Dyamic Clock Gating.
3296 */
3297 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3298 reg &= ~CTRL_EXT_DMA_DYN_CLK;
3299 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3300 break;
3301 case WM_T_82573:
3302 case WM_T_82574:
3303 case WM_T_82583:
3304 if ((sc->sc_type == WM_T_82574)
3305 || (sc->sc_type == WM_T_82583))
3306 tarc0 |= __BIT(26); /* TARC0 bit 26 */
3307
3308 /* Extended Device Control */
3309 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3310 reg &= ~__BIT(23); /* Clear bit 23 */
3311 reg |= __BIT(22); /* Set bit 22 */
3312 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3313
3314 /* Device Control */
3315 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
3316 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3317
3318 /* PCIe Control Register */
3319 if ((sc->sc_type == WM_T_82574)
3320 || (sc->sc_type == WM_T_82583)) {
3321 /*
3322 * Document says this bit must be set for
3323 * proper operation.
3324 */
3325 reg = CSR_READ(sc, WMREG_GCR);
3326 reg |= __BIT(22);
3327 CSR_WRITE(sc, WMREG_GCR, reg);
3328
3329 /*
3330 * Apply workaround for hardware errata
3331 * documented in errata docs Fixes issue where
3332 * some error prone or unreliable PCIe
3333 * completions are occurring, particularly
3334 * with ASPM enabled. Without fix, issue can
3335 * cause Tx timeouts.
3336 */
3337 reg = CSR_READ(sc, WMREG_GCR2);
3338 reg |= __BIT(0);
3339 CSR_WRITE(sc, WMREG_GCR2, reg);
3340 }
3341 break;
3342 case WM_T_80003:
3343 /* TARC0 */
3344 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3345 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3346 tarc0 &= ~__BIT(20); /* Clear bits 20 */
3347
3348 /* TARC1 bit 28 */
3349 tarc1 = CSR_READ(sc, WMREG_TARC1);
3350 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3351 tarc1 &= ~__BIT(28);
3352 else
3353 tarc1 |= __BIT(28);
3354 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3355 break;
3356 case WM_T_ICH8:
3357 case WM_T_ICH9:
3358 case WM_T_ICH10:
3359 case WM_T_PCH:
3360 case WM_T_PCH2:
3361 case WM_T_PCH_LPT:
3362 /* TARC 0 */
3363 if (sc->sc_type == WM_T_ICH8) {
3364 /* Set TARC0 bits 29 and 28 */
3365 tarc0 |= __BITS(29, 28);
3366 }
3367 /* Set TARC0 bits 23,24,26,27 */
3368 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3369
3370 /* CTRL_EXT */
3371 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3372 reg |= __BIT(22); /* Set bit 22 */
3373 /*
3374 * Enable PHY low-power state when MAC is at D3
3375 * w/o WoL
3376 */
3377 if (sc->sc_type >= WM_T_PCH)
3378 reg |= CTRL_EXT_PHYPDEN;
3379 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3380
3381 /* TARC1 */
3382 tarc1 = CSR_READ(sc, WMREG_TARC1);
3383 /* bit 28 */
3384 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3385 tarc1 &= ~__BIT(28);
3386 else
3387 tarc1 |= __BIT(28);
3388 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3389 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3390
3391 /* Device Status */
3392 if (sc->sc_type == WM_T_ICH8) {
3393 reg = CSR_READ(sc, WMREG_STATUS);
3394 reg &= ~__BIT(31);
3395 CSR_WRITE(sc, WMREG_STATUS, reg);
3396
3397 }
3398
3399 /*
3400 * Work-around descriptor data corruption issue during
3401 * NFS v2 UDP traffic, just disable the NFS filtering
3402 * capability.
3403 */
3404 reg = CSR_READ(sc, WMREG_RFCTL);
3405 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3406 CSR_WRITE(sc, WMREG_RFCTL, reg);
3407 break;
3408 default:
3409 break;
3410 }
3411 CSR_WRITE(sc, WMREG_TARC0, tarc0);
3412
3413 /*
3414 * 8257[12] Errata No.52 and some others.
3415 * Avoid RSS Hash Value bug.
3416 */
3417 switch (sc->sc_type) {
3418 case WM_T_82571:
3419 case WM_T_82572:
3420 case WM_T_82573:
3421 case WM_T_80003:
3422 case WM_T_ICH8:
3423 reg = CSR_READ(sc, WMREG_RFCTL);
3424 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3425 CSR_WRITE(sc, WMREG_RFCTL, reg);
3426 break;
3427 default:
3428 break;
3429 }
3430 }
3431 }
3432
3433 static uint32_t
3434 wm_rxpbs_adjust_82580(uint32_t val)
3435 {
3436 uint32_t rv = 0;
3437
3438 if (val < __arraycount(wm_82580_rxpbs_table))
3439 rv = wm_82580_rxpbs_table[val];
3440
3441 return rv;
3442 }
3443
3444 /*
3445 * wm_reset:
3446 *
3447 * Reset the i82542 chip.
3448 */
3449 static void
3450 wm_reset(struct wm_softc *sc)
3451 {
3452 int phy_reset = 0;
3453 int error = 0;
3454 uint32_t reg, mask;
3455
3456 /*
3457 * Allocate on-chip memory according to the MTU size.
3458 * The Packet Buffer Allocation register must be written
3459 * before the chip is reset.
3460 */
3461 switch (sc->sc_type) {
3462 case WM_T_82547:
3463 case WM_T_82547_2:
3464 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3465 PBA_22K : PBA_30K;
3466 sc->sc_txfifo_head = 0;
3467 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3468 sc->sc_txfifo_size =
3469 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3470 sc->sc_txfifo_stall = 0;
3471 break;
3472 case WM_T_82571:
3473 case WM_T_82572:
3474 case WM_T_82575: /* XXX need special handing for jumbo frames */
3475 case WM_T_80003:
3476 sc->sc_pba = PBA_32K;
3477 break;
3478 case WM_T_82573:
3479 sc->sc_pba = PBA_12K;
3480 break;
3481 case WM_T_82574:
3482 case WM_T_82583:
3483 sc->sc_pba = PBA_20K;
3484 break;
3485 case WM_T_82576:
3486 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
3487 sc->sc_pba &= RXPBS_SIZE_MASK_82576;
3488 break;
3489 case WM_T_82580:
3490 case WM_T_I350:
3491 case WM_T_I354:
3492 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
3493 break;
3494 case WM_T_I210:
3495 case WM_T_I211:
3496 sc->sc_pba = PBA_34K;
3497 break;
3498 case WM_T_ICH8:
3499 /* Workaround for a bit corruption issue in FIFO memory */
3500 sc->sc_pba = PBA_8K;
3501 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3502 break;
3503 case WM_T_ICH9:
3504 case WM_T_ICH10:
3505 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3506 PBA_14K : PBA_10K;
3507 break;
3508 case WM_T_PCH:
3509 case WM_T_PCH2:
3510 case WM_T_PCH_LPT:
3511 sc->sc_pba = PBA_26K;
3512 break;
3513 default:
3514 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3515 PBA_40K : PBA_48K;
3516 break;
3517 }
3518 /*
3519 * Only old or non-multiqueue devices have the PBA register
3520 * XXX Need special handling for 82575.
3521 */
3522 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3523 || (sc->sc_type == WM_T_82575))
3524 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3525
3526 /* Prevent the PCI-E bus from sticking */
3527 if (sc->sc_flags & WM_F_PCIE) {
3528 int timeout = 800;
3529
3530 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3531 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3532
3533 while (timeout--) {
3534 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3535 == 0)
3536 break;
3537 delay(100);
3538 }
3539 }
3540
3541 /* Set the completion timeout for interface */
3542 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3543 || (sc->sc_type == WM_T_82580)
3544 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3545 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3546 wm_set_pcie_completion_timeout(sc);
3547
3548 /* Clear interrupt */
3549 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3550
3551 /* Stop the transmit and receive processes. */
3552 CSR_WRITE(sc, WMREG_RCTL, 0);
3553 sc->sc_rctl &= ~RCTL_EN;
3554 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3555 CSR_WRITE_FLUSH(sc);
3556
3557 /* XXX set_tbi_sbp_82543() */
3558
3559 delay(10*1000);
3560
3561 /* Must acquire the MDIO ownership before MAC reset */
3562 switch (sc->sc_type) {
3563 case WM_T_82573:
3564 case WM_T_82574:
3565 case WM_T_82583:
3566 error = wm_get_hw_semaphore_82573(sc);
3567 break;
3568 default:
3569 break;
3570 }
3571
3572 /*
3573 * 82541 Errata 29? & 82547 Errata 28?
3574 * See also the description about PHY_RST bit in CTRL register
3575 * in 8254x_GBe_SDM.pdf.
3576 */
3577 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3578 CSR_WRITE(sc, WMREG_CTRL,
3579 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3580 CSR_WRITE_FLUSH(sc);
3581 delay(5000);
3582 }
3583
3584 switch (sc->sc_type) {
3585 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3586 case WM_T_82541:
3587 case WM_T_82541_2:
3588 case WM_T_82547:
3589 case WM_T_82547_2:
3590 /*
3591 * On some chipsets, a reset through a memory-mapped write
3592 * cycle can cause the chip to reset before completing the
3593 * write cycle. This causes major headache that can be
3594 * avoided by issuing the reset via indirect register writes
3595 * through I/O space.
3596 *
3597 * So, if we successfully mapped the I/O BAR at attach time,
3598 * use that. Otherwise, try our luck with a memory-mapped
3599 * reset.
3600 */
3601 if (sc->sc_flags & WM_F_IOH_VALID)
3602 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3603 else
3604 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3605 break;
3606 case WM_T_82545_3:
3607 case WM_T_82546_3:
3608 /* Use the shadow control register on these chips. */
3609 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3610 break;
3611 case WM_T_80003:
3612 mask = swfwphysem[sc->sc_funcid];
3613 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3614 wm_get_swfw_semaphore(sc, mask);
3615 CSR_WRITE(sc, WMREG_CTRL, reg);
3616 wm_put_swfw_semaphore(sc, mask);
3617 break;
3618 case WM_T_ICH8:
3619 case WM_T_ICH9:
3620 case WM_T_ICH10:
3621 case WM_T_PCH:
3622 case WM_T_PCH2:
3623 case WM_T_PCH_LPT:
3624 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3625 if (wm_check_reset_block(sc) == 0) {
3626 /*
3627 * Gate automatic PHY configuration by hardware on
3628 * non-managed 82579
3629 */
3630 if ((sc->sc_type == WM_T_PCH2)
3631 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3632 != 0))
3633 wm_gate_hw_phy_config_ich8lan(sc, 1);
3634
3635
3636 reg |= CTRL_PHY_RESET;
3637 phy_reset = 1;
3638 }
3639 wm_get_swfwhw_semaphore(sc);
3640 CSR_WRITE(sc, WMREG_CTRL, reg);
3641 /* Don't insert a completion barrier when reset */
3642 delay(20*1000);
3643 wm_put_swfwhw_semaphore(sc);
3644 break;
3645 case WM_T_82580:
3646 case WM_T_I350:
3647 case WM_T_I354:
3648 case WM_T_I210:
3649 case WM_T_I211:
3650 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3651 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3652 CSR_WRITE_FLUSH(sc);
3653 delay(5000);
3654 break;
3655 case WM_T_82542_2_0:
3656 case WM_T_82542_2_1:
3657 case WM_T_82543:
3658 case WM_T_82540:
3659 case WM_T_82545:
3660 case WM_T_82546:
3661 case WM_T_82571:
3662 case WM_T_82572:
3663 case WM_T_82573:
3664 case WM_T_82574:
3665 case WM_T_82575:
3666 case WM_T_82576:
3667 case WM_T_82583:
3668 default:
3669 /* Everything else can safely use the documented method. */
3670 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3671 break;
3672 }
3673
3674 /* Must release the MDIO ownership after MAC reset */
3675 switch (sc->sc_type) {
3676 case WM_T_82573:
3677 case WM_T_82574:
3678 case WM_T_82583:
3679 if (error == 0)
3680 wm_put_hw_semaphore_82573(sc);
3681 break;
3682 default:
3683 break;
3684 }
3685
3686 if (phy_reset != 0)
3687 wm_get_cfg_done(sc);
3688
3689 /* reload EEPROM */
3690 switch (sc->sc_type) {
3691 case WM_T_82542_2_0:
3692 case WM_T_82542_2_1:
3693 case WM_T_82543:
3694 case WM_T_82544:
3695 delay(10);
3696 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3697 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3698 CSR_WRITE_FLUSH(sc);
3699 delay(2000);
3700 break;
3701 case WM_T_82540:
3702 case WM_T_82545:
3703 case WM_T_82545_3:
3704 case WM_T_82546:
3705 case WM_T_82546_3:
3706 delay(5*1000);
3707 /* XXX Disable HW ARPs on ASF enabled adapters */
3708 break;
3709 case WM_T_82541:
3710 case WM_T_82541_2:
3711 case WM_T_82547:
3712 case WM_T_82547_2:
3713 delay(20000);
3714 /* XXX Disable HW ARPs on ASF enabled adapters */
3715 break;
3716 case WM_T_82571:
3717 case WM_T_82572:
3718 case WM_T_82573:
3719 case WM_T_82574:
3720 case WM_T_82583:
3721 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3722 delay(10);
3723 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3724 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3725 CSR_WRITE_FLUSH(sc);
3726 }
3727 /* check EECD_EE_AUTORD */
3728 wm_get_auto_rd_done(sc);
3729 /*
3730 * Phy configuration from NVM just starts after EECD_AUTO_RD
3731 * is set.
3732 */
3733 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3734 || (sc->sc_type == WM_T_82583))
3735 delay(25*1000);
3736 break;
3737 case WM_T_82575:
3738 case WM_T_82576:
3739 case WM_T_82580:
3740 case WM_T_I350:
3741 case WM_T_I354:
3742 case WM_T_I210:
3743 case WM_T_I211:
3744 case WM_T_80003:
3745 /* check EECD_EE_AUTORD */
3746 wm_get_auto_rd_done(sc);
3747 break;
3748 case WM_T_ICH8:
3749 case WM_T_ICH9:
3750 case WM_T_ICH10:
3751 case WM_T_PCH:
3752 case WM_T_PCH2:
3753 case WM_T_PCH_LPT:
3754 break;
3755 default:
3756 panic("%s: unknown type\n", __func__);
3757 }
3758
3759 /* Check whether EEPROM is present or not */
3760 switch (sc->sc_type) {
3761 case WM_T_82575:
3762 case WM_T_82576:
3763 #if 0 /* XXX */
3764 case WM_T_82580:
3765 #endif
3766 case WM_T_I350:
3767 case WM_T_I354:
3768 case WM_T_ICH8:
3769 case WM_T_ICH9:
3770 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3771 /* Not found */
3772 sc->sc_flags |= WM_F_EEPROM_INVALID;
3773 if ((sc->sc_type == WM_T_82575)
3774 || (sc->sc_type == WM_T_82576)
3775 || (sc->sc_type == WM_T_82580)
3776 || (sc->sc_type == WM_T_I350)
3777 || (sc->sc_type == WM_T_I354))
3778 wm_reset_init_script_82575(sc);
3779 }
3780 break;
3781 default:
3782 break;
3783 }
3784
3785 if ((sc->sc_type == WM_T_82580)
3786 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
3787 /* clear global device reset status bit */
3788 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3789 }
3790
3791 /* Clear any pending interrupt events. */
3792 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3793 reg = CSR_READ(sc, WMREG_ICR);
3794
3795 /* reload sc_ctrl */
3796 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3797
3798 if (sc->sc_type == WM_T_I350)
3799 wm_set_eee_i350(sc);
3800
3801 /* dummy read from WUC */
3802 if (sc->sc_type == WM_T_PCH)
3803 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3804 /*
3805 * For PCH, this write will make sure that any noise will be detected
3806 * as a CRC error and be dropped rather than show up as a bad packet
3807 * to the DMA engine
3808 */
3809 if (sc->sc_type == WM_T_PCH)
3810 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3811
3812 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3813 CSR_WRITE(sc, WMREG_WUC, 0);
3814
3815 /* XXX need special handling for 82580 */
3816 }
3817
3818 /*
3819 * wm_add_rxbuf:
3820 *
3821 * Add a receive buffer to the indiciated descriptor.
3822 */
3823 static int
3824 wm_add_rxbuf(struct wm_softc *sc, int idx)
3825 {
3826 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3827 struct mbuf *m;
3828 int error;
3829
3830 KASSERT(WM_RX_LOCKED(sc));
3831
3832 MGETHDR(m, M_DONTWAIT, MT_DATA);
3833 if (m == NULL)
3834 return ENOBUFS;
3835
3836 MCLGET(m, M_DONTWAIT);
3837 if ((m->m_flags & M_EXT) == 0) {
3838 m_freem(m);
3839 return ENOBUFS;
3840 }
3841
3842 if (rxs->rxs_mbuf != NULL)
3843 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3844
3845 rxs->rxs_mbuf = m;
3846
3847 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3848 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3849 BUS_DMA_READ|BUS_DMA_NOWAIT);
3850 if (error) {
3851 /* XXX XXX XXX */
3852 aprint_error_dev(sc->sc_dev,
3853 "unable to load rx DMA map %d, error = %d\n",
3854 idx, error);
3855 panic("wm_add_rxbuf");
3856 }
3857
3858 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3859 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3860
3861 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3862 if ((sc->sc_rctl & RCTL_EN) != 0)
3863 WM_INIT_RXDESC(sc, idx);
3864 } else
3865 WM_INIT_RXDESC(sc, idx);
3866
3867 return 0;
3868 }
3869
3870 /*
3871 * wm_rxdrain:
3872 *
3873 * Drain the receive queue.
3874 */
3875 static void
3876 wm_rxdrain(struct wm_softc *sc)
3877 {
3878 struct wm_rxsoft *rxs;
3879 int i;
3880
3881 KASSERT(WM_RX_LOCKED(sc));
3882
3883 for (i = 0; i < WM_NRXDESC; i++) {
3884 rxs = &sc->sc_rxsoft[i];
3885 if (rxs->rxs_mbuf != NULL) {
3886 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3887 m_freem(rxs->rxs_mbuf);
3888 rxs->rxs_mbuf = NULL;
3889 }
3890 }
3891 }
3892
3893 /*
3894 * wm_init: [ifnet interface function]
3895 *
3896 * Initialize the interface.
3897 */
3898 static int
3899 wm_init(struct ifnet *ifp)
3900 {
3901 struct wm_softc *sc = ifp->if_softc;
3902 int ret;
3903
3904 WM_BOTH_LOCK(sc);
3905 ret = wm_init_locked(ifp);
3906 WM_BOTH_UNLOCK(sc);
3907
3908 return ret;
3909 }
3910
3911 static int
3912 wm_init_locked(struct ifnet *ifp)
3913 {
3914 struct wm_softc *sc = ifp->if_softc;
3915 struct wm_rxsoft *rxs;
3916 int i, j, trynum, error = 0;
3917 uint32_t reg;
3918
3919 KASSERT(WM_BOTH_LOCKED(sc));
3920 /*
3921 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3922 * There is a small but measurable benefit to avoiding the adjusment
3923 * of the descriptor so that the headers are aligned, for normal mtu,
3924 * on such platforms. One possibility is that the DMA itself is
3925 * slightly more efficient if the front of the entire packet (instead
3926 * of the front of the headers) is aligned.
3927 *
3928 * Note we must always set align_tweak to 0 if we are using
3929 * jumbo frames.
3930 */
3931 #ifdef __NO_STRICT_ALIGNMENT
3932 sc->sc_align_tweak = 0;
3933 #else
3934 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3935 sc->sc_align_tweak = 0;
3936 else
3937 sc->sc_align_tweak = 2;
3938 #endif /* __NO_STRICT_ALIGNMENT */
3939
3940 /* Cancel any pending I/O. */
3941 wm_stop_locked(ifp, 0);
3942
3943 /* update statistics before reset */
3944 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3945 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3946
3947 /* Reset the chip to a known state. */
3948 wm_reset(sc);
3949
3950 switch (sc->sc_type) {
3951 case WM_T_82571:
3952 case WM_T_82572:
3953 case WM_T_82573:
3954 case WM_T_82574:
3955 case WM_T_82583:
3956 case WM_T_80003:
3957 case WM_T_ICH8:
3958 case WM_T_ICH9:
3959 case WM_T_ICH10:
3960 case WM_T_PCH:
3961 case WM_T_PCH2:
3962 case WM_T_PCH_LPT:
3963 if (wm_check_mng_mode(sc) != 0)
3964 wm_get_hw_control(sc);
3965 break;
3966 default:
3967 break;
3968 }
3969
3970 /* Init hardware bits */
3971 wm_initialize_hardware_bits(sc);
3972
3973 /* Reset the PHY. */
3974 if (sc->sc_flags & WM_F_HAS_MII)
3975 wm_gmii_reset(sc);
3976
3977 /* Calculate (E)ITR value */
3978 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3979 sc->sc_itr = 450; /* For EITR */
3980 } else if (sc->sc_type >= WM_T_82543) {
3981 /*
3982 * Set up the interrupt throttling register (units of 256ns)
3983 * Note that a footnote in Intel's documentation says this
3984 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3985 * or 10Mbit mode. Empirically, it appears to be the case
3986 * that that is also true for the 1024ns units of the other
3987 * interrupt-related timer registers -- so, really, we ought
3988 * to divide this value by 4 when the link speed is low.
3989 *
3990 * XXX implement this division at link speed change!
3991 */
3992
3993 /*
3994 * For N interrupts/sec, set this value to:
3995 * 1000000000 / (N * 256). Note that we set the
3996 * absolute and packet timer values to this value
3997 * divided by 4 to get "simple timer" behavior.
3998 */
3999
4000 sc->sc_itr = 1500; /* 2604 ints/sec */
4001 }
4002
4003 /* Initialize the transmit descriptor ring. */
4004 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
4005 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
4006 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4007 sc->sc_txfree = WM_NTXDESC(sc);
4008 sc->sc_txnext = 0;
4009
4010 if (sc->sc_type < WM_T_82543) {
4011 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
4012 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
4013 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
4014 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
4015 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
4016 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
4017 } else {
4018 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
4019 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
4020 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
4021 CSR_WRITE(sc, WMREG_TDH, 0);
4022
4023 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4024 /*
4025 * Don't write TDT before TCTL.EN is set.
4026 * See the document.
4027 */
4028 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_QUEUE_ENABLE
4029 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
4030 | TXDCTL_WTHRESH(0));
4031 else {
4032 /* ITR / 4 */
4033 CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
4034 if (sc->sc_type >= WM_T_82540) {
4035 /* should be same */
4036 CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
4037 }
4038
4039 CSR_WRITE(sc, WMREG_TDT, 0);
4040 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_PTHRESH(0) |
4041 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
4042 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
4043 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
4044 }
4045 }
4046
4047 /* Initialize the transmit job descriptors. */
4048 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
4049 sc->sc_txsoft[i].txs_mbuf = NULL;
4050 sc->sc_txsfree = WM_TXQUEUELEN(sc);
4051 sc->sc_txsnext = 0;
4052 sc->sc_txsdirty = 0;
4053
4054 /*
4055 * Initialize the receive descriptor and receive job
4056 * descriptor rings.
4057 */
4058 if (sc->sc_type < WM_T_82543) {
4059 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4060 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4061 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4062 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4063 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4064 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4065
4066 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4067 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4068 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4069 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4070 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4071 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4072 } else {
4073 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4074 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4075 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4076
4077 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4078 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4079 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4080 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4081 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4082 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4083 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4084 | RXDCTL_WTHRESH(1));
4085 } else {
4086 CSR_WRITE(sc, WMREG_RDH, 0);
4087 CSR_WRITE(sc, WMREG_RDT, 0);
4088 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
4089 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
4090 }
4091 }
4092 for (i = 0; i < WM_NRXDESC; i++) {
4093 rxs = &sc->sc_rxsoft[i];
4094 if (rxs->rxs_mbuf == NULL) {
4095 if ((error = wm_add_rxbuf(sc, i)) != 0) {
4096 log(LOG_ERR, "%s: unable to allocate or map "
4097 "rx buffer %d, error = %d\n",
4098 device_xname(sc->sc_dev), i, error);
4099 /*
4100 * XXX Should attempt to run with fewer receive
4101 * XXX buffers instead of just failing.
4102 */
4103 wm_rxdrain(sc);
4104 goto out;
4105 }
4106 } else {
4107 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4108 WM_INIT_RXDESC(sc, i);
4109 /*
4110 * For 82575 and newer device, the RX descriptors
4111 * must be initialized after the setting of RCTL.EN in
4112 * wm_set_filter()
4113 */
4114 }
4115 }
4116 sc->sc_rxptr = 0;
4117 sc->sc_rxdiscard = 0;
4118 WM_RXCHAIN_RESET(sc);
4119
4120 /*
4121 * Clear out the VLAN table -- we don't use it (yet).
4122 */
4123 CSR_WRITE(sc, WMREG_VET, 0);
4124 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4125 trynum = 10; /* Due to hw errata */
4126 else
4127 trynum = 1;
4128 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4129 for (j = 0; j < trynum; j++)
4130 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4131
4132 /*
4133 * Set up flow-control parameters.
4134 *
4135 * XXX Values could probably stand some tuning.
4136 */
4137 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4138 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4139 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
4140 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4141 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4142 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4143 }
4144
4145 sc->sc_fcrtl = FCRTL_DFLT;
4146 if (sc->sc_type < WM_T_82543) {
4147 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4148 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4149 } else {
4150 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4151 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4152 }
4153
4154 if (sc->sc_type == WM_T_80003)
4155 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4156 else
4157 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4158
4159 /* Writes the control register. */
4160 wm_set_vlan(sc);
4161
4162 if (sc->sc_flags & WM_F_HAS_MII) {
4163 int val;
4164
4165 switch (sc->sc_type) {
4166 case WM_T_80003:
4167 case WM_T_ICH8:
4168 case WM_T_ICH9:
4169 case WM_T_ICH10:
4170 case WM_T_PCH:
4171 case WM_T_PCH2:
4172 case WM_T_PCH_LPT:
4173 /*
4174 * Set the mac to wait the maximum time between each
4175 * iteration and increase the max iterations when
4176 * polling the phy; this fixes erroneous timeouts at
4177 * 10Mbps.
4178 */
4179 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4180 0xFFFF);
4181 val = wm_kmrn_readreg(sc,
4182 KUMCTRLSTA_OFFSET_INB_PARAM);
4183 val |= 0x3F;
4184 wm_kmrn_writereg(sc,
4185 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4186 break;
4187 default:
4188 break;
4189 }
4190
4191 if (sc->sc_type == WM_T_80003) {
4192 val = CSR_READ(sc, WMREG_CTRL_EXT);
4193 val &= ~CTRL_EXT_LINK_MODE_MASK;
4194 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4195
4196 /* Bypass RX and TX FIFO's */
4197 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4198 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4199 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4200 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4201 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4202 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4203 }
4204 }
4205 #if 0
4206 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4207 #endif
4208
4209 /* Set up checksum offload parameters. */
4210 reg = CSR_READ(sc, WMREG_RXCSUM);
4211 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4212 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4213 reg |= RXCSUM_IPOFL;
4214 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4215 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4216 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4217 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4218 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4219
4220 /* Set up the interrupt registers. */
4221 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4222 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4223 ICR_RXO | ICR_RXT0;
4224 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4225
4226 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4227 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4228 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4229 reg = CSR_READ(sc, WMREG_KABGTXD);
4230 reg |= KABGTXD_BGSQLBIAS;
4231 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4232 }
4233
4234 /* Set up the inter-packet gap. */
4235 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4236
4237 if (sc->sc_type >= WM_T_82543) {
4238 /*
4239 * XXX 82574 has both ITR and EITR. SET EITR when we use
4240 * the multi queue function with MSI-X.
4241 */
4242 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4243 CSR_WRITE(sc, WMREG_EITR(0), sc->sc_itr);
4244 else
4245 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4246 }
4247
4248 /* Set the VLAN ethernetype. */
4249 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4250
4251 /*
4252 * Set up the transmit control register; we start out with
4253 * a collision distance suitable for FDX, but update it whe
4254 * we resolve the media type.
4255 */
4256 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4257 | TCTL_CT(TX_COLLISION_THRESHOLD)
4258 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4259 if (sc->sc_type >= WM_T_82571)
4260 sc->sc_tctl |= TCTL_MULR;
4261 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4262
4263 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4264 /* Write TDT after TCTL.EN is set. See the document. */
4265 CSR_WRITE(sc, WMREG_TDT, 0);
4266 }
4267
4268 if (sc->sc_type == WM_T_80003) {
4269 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4270 reg &= ~TCTL_EXT_GCEX_MASK;
4271 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4272 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4273 }
4274
4275 /* Set the media. */
4276 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4277 goto out;
4278
4279 /* Configure for OS presence */
4280 wm_init_manageability(sc);
4281
4282 /*
4283 * Set up the receive control register; we actually program
4284 * the register when we set the receive filter. Use multicast
4285 * address offset type 0.
4286 *
4287 * Only the i82544 has the ability to strip the incoming
4288 * CRC, so we don't enable that feature.
4289 */
4290 sc->sc_mchash_type = 0;
4291 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4292 | RCTL_MO(sc->sc_mchash_type);
4293
4294 /*
4295 * The I350 has a bug where it always strips the CRC whether
4296 * asked to or not. So ask for stripped CRC here and cope in rxeof
4297 */
4298 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4299 || (sc->sc_type == WM_T_I210))
4300 sc->sc_rctl |= RCTL_SECRC;
4301
4302 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4303 && (ifp->if_mtu > ETHERMTU)) {
4304 sc->sc_rctl |= RCTL_LPE;
4305 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4306 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4307 }
4308
4309 if (MCLBYTES == 2048) {
4310 sc->sc_rctl |= RCTL_2k;
4311 } else {
4312 if (sc->sc_type >= WM_T_82543) {
4313 switch (MCLBYTES) {
4314 case 4096:
4315 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4316 break;
4317 case 8192:
4318 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4319 break;
4320 case 16384:
4321 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4322 break;
4323 default:
4324 panic("wm_init: MCLBYTES %d unsupported",
4325 MCLBYTES);
4326 break;
4327 }
4328 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4329 }
4330
4331 /* Set the receive filter. */
4332 wm_set_filter(sc);
4333
4334 /* Enable ECC */
4335 switch (sc->sc_type) {
4336 case WM_T_82571:
4337 reg = CSR_READ(sc, WMREG_PBA_ECC);
4338 reg |= PBA_ECC_CORR_EN;
4339 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4340 break;
4341 case WM_T_PCH_LPT:
4342 reg = CSR_READ(sc, WMREG_PBECCSTS);
4343 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4344 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4345
4346 reg = CSR_READ(sc, WMREG_CTRL);
4347 reg |= CTRL_MEHE;
4348 CSR_WRITE(sc, WMREG_CTRL, reg);
4349 break;
4350 default:
4351 break;
4352 }
4353
4354 /* On 575 and later set RDT only if RX enabled */
4355 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4356 for (i = 0; i < WM_NRXDESC; i++)
4357 WM_INIT_RXDESC(sc, i);
4358
4359 sc->sc_stopping = false;
4360
4361 /* Start the one second link check clock. */
4362 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4363
4364 /* ...all done! */
4365 ifp->if_flags |= IFF_RUNNING;
4366 ifp->if_flags &= ~IFF_OACTIVE;
4367
4368 out:
4369 sc->sc_if_flags = ifp->if_flags;
4370 if (error)
4371 log(LOG_ERR, "%s: interface not running\n",
4372 device_xname(sc->sc_dev));
4373 return error;
4374 }
4375
4376 /*
4377 * wm_stop: [ifnet interface function]
4378 *
4379 * Stop transmission on the interface.
4380 */
4381 static void
4382 wm_stop(struct ifnet *ifp, int disable)
4383 {
4384 struct wm_softc *sc = ifp->if_softc;
4385
4386 WM_BOTH_LOCK(sc);
4387 wm_stop_locked(ifp, disable);
4388 WM_BOTH_UNLOCK(sc);
4389 }
4390
4391 static void
4392 wm_stop_locked(struct ifnet *ifp, int disable)
4393 {
4394 struct wm_softc *sc = ifp->if_softc;
4395 struct wm_txsoft *txs;
4396 int i;
4397
4398 KASSERT(WM_BOTH_LOCKED(sc));
4399
4400 sc->sc_stopping = true;
4401
4402 /* Stop the one second clock. */
4403 callout_stop(&sc->sc_tick_ch);
4404
4405 /* Stop the 82547 Tx FIFO stall check timer. */
4406 if (sc->sc_type == WM_T_82547)
4407 callout_stop(&sc->sc_txfifo_ch);
4408
4409 if (sc->sc_flags & WM_F_HAS_MII) {
4410 /* Down the MII. */
4411 mii_down(&sc->sc_mii);
4412 } else {
4413 #if 0
4414 /* Should we clear PHY's status properly? */
4415 wm_reset(sc);
4416 #endif
4417 }
4418
4419 /* Stop the transmit and receive processes. */
4420 CSR_WRITE(sc, WMREG_TCTL, 0);
4421 CSR_WRITE(sc, WMREG_RCTL, 0);
4422 sc->sc_rctl &= ~RCTL_EN;
4423
4424 /*
4425 * Clear the interrupt mask to ensure the device cannot assert its
4426 * interrupt line.
4427 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4428 * any currently pending or shared interrupt.
4429 */
4430 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4431 sc->sc_icr = 0;
4432
4433 /* Release any queued transmit buffers. */
4434 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4435 txs = &sc->sc_txsoft[i];
4436 if (txs->txs_mbuf != NULL) {
4437 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4438 m_freem(txs->txs_mbuf);
4439 txs->txs_mbuf = NULL;
4440 }
4441 }
4442
4443 /* Mark the interface as down and cancel the watchdog timer. */
4444 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4445 ifp->if_timer = 0;
4446
4447 if (disable)
4448 wm_rxdrain(sc);
4449
4450 #if 0 /* notyet */
4451 if (sc->sc_type >= WM_T_82544)
4452 CSR_WRITE(sc, WMREG_WUC, 0);
4453 #endif
4454 }
4455
4456 /*
4457 * wm_tx_offload:
4458 *
4459 * Set up TCP/IP checksumming parameters for the
4460 * specified packet.
4461 */
4462 static int
4463 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
4464 uint8_t *fieldsp)
4465 {
4466 struct mbuf *m0 = txs->txs_mbuf;
4467 struct livengood_tcpip_ctxdesc *t;
4468 uint32_t ipcs, tucs, cmd, cmdlen, seg;
4469 uint32_t ipcse;
4470 struct ether_header *eh;
4471 int offset, iphl;
4472 uint8_t fields;
4473
4474 /*
4475 * XXX It would be nice if the mbuf pkthdr had offset
4476 * fields for the protocol headers.
4477 */
4478
4479 eh = mtod(m0, struct ether_header *);
4480 switch (htons(eh->ether_type)) {
4481 case ETHERTYPE_IP:
4482 case ETHERTYPE_IPV6:
4483 offset = ETHER_HDR_LEN;
4484 break;
4485
4486 case ETHERTYPE_VLAN:
4487 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4488 break;
4489
4490 default:
4491 /*
4492 * Don't support this protocol or encapsulation.
4493 */
4494 *fieldsp = 0;
4495 *cmdp = 0;
4496 return 0;
4497 }
4498
4499 if ((m0->m_pkthdr.csum_flags &
4500 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
4501 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4502 } else {
4503 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4504 }
4505 ipcse = offset + iphl - 1;
4506
4507 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
4508 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
4509 seg = 0;
4510 fields = 0;
4511
4512 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4513 int hlen = offset + iphl;
4514 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4515
4516 if (__predict_false(m0->m_len <
4517 (hlen + sizeof(struct tcphdr)))) {
4518 /*
4519 * TCP/IP headers are not in the first mbuf; we need
4520 * to do this the slow and painful way. Let's just
4521 * hope this doesn't happen very often.
4522 */
4523 struct tcphdr th;
4524
4525 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4526
4527 m_copydata(m0, hlen, sizeof(th), &th);
4528 if (v4) {
4529 struct ip ip;
4530
4531 m_copydata(m0, offset, sizeof(ip), &ip);
4532 ip.ip_len = 0;
4533 m_copyback(m0,
4534 offset + offsetof(struct ip, ip_len),
4535 sizeof(ip.ip_len), &ip.ip_len);
4536 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4537 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4538 } else {
4539 struct ip6_hdr ip6;
4540
4541 m_copydata(m0, offset, sizeof(ip6), &ip6);
4542 ip6.ip6_plen = 0;
4543 m_copyback(m0,
4544 offset + offsetof(struct ip6_hdr, ip6_plen),
4545 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4546 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4547 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4548 }
4549 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4550 sizeof(th.th_sum), &th.th_sum);
4551
4552 hlen += th.th_off << 2;
4553 } else {
4554 /*
4555 * TCP/IP headers are in the first mbuf; we can do
4556 * this the easy way.
4557 */
4558 struct tcphdr *th;
4559
4560 if (v4) {
4561 struct ip *ip =
4562 (void *)(mtod(m0, char *) + offset);
4563 th = (void *)(mtod(m0, char *) + hlen);
4564
4565 ip->ip_len = 0;
4566 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4567 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4568 } else {
4569 struct ip6_hdr *ip6 =
4570 (void *)(mtod(m0, char *) + offset);
4571 th = (void *)(mtod(m0, char *) + hlen);
4572
4573 ip6->ip6_plen = 0;
4574 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4575 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4576 }
4577 hlen += th->th_off << 2;
4578 }
4579
4580 if (v4) {
4581 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4582 cmdlen |= WTX_TCPIP_CMD_IP;
4583 } else {
4584 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4585 ipcse = 0;
4586 }
4587 cmd |= WTX_TCPIP_CMD_TSE;
4588 cmdlen |= WTX_TCPIP_CMD_TSE |
4589 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
4590 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
4591 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
4592 }
4593
4594 /*
4595 * NOTE: Even if we're not using the IP or TCP/UDP checksum
4596 * offload feature, if we load the context descriptor, we
4597 * MUST provide valid values for IPCSS and TUCSS fields.
4598 */
4599
4600 ipcs = WTX_TCPIP_IPCSS(offset) |
4601 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
4602 WTX_TCPIP_IPCSE(ipcse);
4603 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
4604 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
4605 fields |= WTX_IXSM;
4606 }
4607
4608 offset += iphl;
4609
4610 if (m0->m_pkthdr.csum_flags &
4611 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
4612 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
4613 fields |= WTX_TXSM;
4614 tucs = WTX_TCPIP_TUCSS(offset) |
4615 WTX_TCPIP_TUCSO(offset +
4616 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
4617 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4618 } else if ((m0->m_pkthdr.csum_flags &
4619 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
4620 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
4621 fields |= WTX_TXSM;
4622 tucs = WTX_TCPIP_TUCSS(offset) |
4623 WTX_TCPIP_TUCSO(offset +
4624 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
4625 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4626 } else {
4627 /* Just initialize it to a valid TCP context. */
4628 tucs = WTX_TCPIP_TUCSS(offset) |
4629 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
4630 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4631 }
4632
4633 /* Fill in the context descriptor. */
4634 t = (struct livengood_tcpip_ctxdesc *)
4635 &sc->sc_txdescs[sc->sc_txnext];
4636 t->tcpip_ipcs = htole32(ipcs);
4637 t->tcpip_tucs = htole32(tucs);
4638 t->tcpip_cmdlen = htole32(cmdlen);
4639 t->tcpip_seg = htole32(seg);
4640 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
4641
4642 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
4643 txs->txs_ndesc++;
4644
4645 *cmdp = cmd;
4646 *fieldsp = fields;
4647
4648 return 0;
4649 }
4650
4651 static void
4652 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
4653 {
4654 struct mbuf *m;
4655 int i;
4656
4657 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
4658 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
4659 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
4660 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
4661 m->m_data, m->m_len, m->m_flags);
4662 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
4663 i, i == 1 ? "" : "s");
4664 }
4665
4666 /*
4667 * wm_82547_txfifo_stall:
4668 *
4669 * Callout used to wait for the 82547 Tx FIFO to drain,
4670 * reset the FIFO pointers, and restart packet transmission.
4671 */
4672 static void
4673 wm_82547_txfifo_stall(void *arg)
4674 {
4675 struct wm_softc *sc = arg;
4676 #ifndef WM_MPSAFE
4677 int s;
4678
4679 s = splnet();
4680 #endif
4681 WM_TX_LOCK(sc);
4682
4683 if (sc->sc_stopping)
4684 goto out;
4685
4686 if (sc->sc_txfifo_stall) {
4687 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
4688 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
4689 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
4690 /*
4691 * Packets have drained. Stop transmitter, reset
4692 * FIFO pointers, restart transmitter, and kick
4693 * the packet queue.
4694 */
4695 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
4696 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
4697 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
4698 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
4699 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
4700 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
4701 CSR_WRITE(sc, WMREG_TCTL, tctl);
4702 CSR_WRITE_FLUSH(sc);
4703
4704 sc->sc_txfifo_head = 0;
4705 sc->sc_txfifo_stall = 0;
4706 wm_start_locked(&sc->sc_ethercom.ec_if);
4707 } else {
4708 /*
4709 * Still waiting for packets to drain; try again in
4710 * another tick.
4711 */
4712 callout_schedule(&sc->sc_txfifo_ch, 1);
4713 }
4714 }
4715
4716 out:
4717 WM_TX_UNLOCK(sc);
4718 #ifndef WM_MPSAFE
4719 splx(s);
4720 #endif
4721 }
4722
4723 /*
4724 * wm_82547_txfifo_bugchk:
4725 *
4726 * Check for bug condition in the 82547 Tx FIFO. We need to
4727 * prevent enqueueing a packet that would wrap around the end
4728 * if the Tx FIFO ring buffer, otherwise the chip will croak.
4729 *
4730 * We do this by checking the amount of space before the end
4731 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
4732 * the Tx FIFO, wait for all remaining packets to drain, reset
4733 * the internal FIFO pointers to the beginning, and restart
4734 * transmission on the interface.
4735 */
4736 #define WM_FIFO_HDR 0x10
4737 #define WM_82547_PAD_LEN 0x3e0
4738 static int
4739 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
4740 {
4741 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
4742 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
4743
4744 /* Just return if already stalled. */
4745 if (sc->sc_txfifo_stall)
4746 return 1;
4747
4748 if (sc->sc_mii.mii_media_active & IFM_FDX) {
4749 /* Stall only occurs in half-duplex mode. */
4750 goto send_packet;
4751 }
4752
4753 if (len >= WM_82547_PAD_LEN + space) {
4754 sc->sc_txfifo_stall = 1;
4755 callout_schedule(&sc->sc_txfifo_ch, 1);
4756 return 1;
4757 }
4758
4759 send_packet:
4760 sc->sc_txfifo_head += len;
4761 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
4762 sc->sc_txfifo_head -= sc->sc_txfifo_size;
4763
4764 return 0;
4765 }
4766
4767 /*
4768 * wm_start: [ifnet interface function]
4769 *
4770 * Start packet transmission on the interface.
4771 */
4772 static void
4773 wm_start(struct ifnet *ifp)
4774 {
4775 struct wm_softc *sc = ifp->if_softc;
4776
4777 WM_TX_LOCK(sc);
4778 if (!sc->sc_stopping)
4779 wm_start_locked(ifp);
4780 WM_TX_UNLOCK(sc);
4781 }
4782
4783 static void
4784 wm_start_locked(struct ifnet *ifp)
4785 {
4786 struct wm_softc *sc = ifp->if_softc;
4787 struct mbuf *m0;
4788 struct m_tag *mtag;
4789 struct wm_txsoft *txs;
4790 bus_dmamap_t dmamap;
4791 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
4792 bus_addr_t curaddr;
4793 bus_size_t seglen, curlen;
4794 uint32_t cksumcmd;
4795 uint8_t cksumfields;
4796
4797 KASSERT(WM_TX_LOCKED(sc));
4798
4799 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
4800 return;
4801
4802 /* Remember the previous number of free descriptors. */
4803 ofree = sc->sc_txfree;
4804
4805 /*
4806 * Loop through the send queue, setting up transmit descriptors
4807 * until we drain the queue, or use up all available transmit
4808 * descriptors.
4809 */
4810 for (;;) {
4811 m0 = NULL;
4812
4813 /* Get a work queue entry. */
4814 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
4815 wm_txintr(sc);
4816 if (sc->sc_txsfree == 0) {
4817 DPRINTF(WM_DEBUG_TX,
4818 ("%s: TX: no free job descriptors\n",
4819 device_xname(sc->sc_dev)));
4820 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
4821 break;
4822 }
4823 }
4824
4825 /* Grab a packet off the queue. */
4826 IFQ_DEQUEUE(&ifp->if_snd, m0);
4827 if (m0 == NULL)
4828 break;
4829
4830 DPRINTF(WM_DEBUG_TX,
4831 ("%s: TX: have packet to transmit: %p\n",
4832 device_xname(sc->sc_dev), m0));
4833
4834 txs = &sc->sc_txsoft[sc->sc_txsnext];
4835 dmamap = txs->txs_dmamap;
4836
4837 use_tso = (m0->m_pkthdr.csum_flags &
4838 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
4839
4840 /*
4841 * So says the Linux driver:
4842 * The controller does a simple calculation to make sure
4843 * there is enough room in the FIFO before initiating the
4844 * DMA for each buffer. The calc is:
4845 * 4 = ceil(buffer len / MSS)
4846 * To make sure we don't overrun the FIFO, adjust the max
4847 * buffer len if the MSS drops.
4848 */
4849 dmamap->dm_maxsegsz =
4850 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
4851 ? m0->m_pkthdr.segsz << 2
4852 : WTX_MAX_LEN;
4853
4854 /*
4855 * Load the DMA map. If this fails, the packet either
4856 * didn't fit in the allotted number of segments, or we
4857 * were short on resources. For the too-many-segments
4858 * case, we simply report an error and drop the packet,
4859 * since we can't sanely copy a jumbo packet to a single
4860 * buffer.
4861 */
4862 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
4863 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
4864 if (error) {
4865 if (error == EFBIG) {
4866 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4867 log(LOG_ERR, "%s: Tx packet consumes too many "
4868 "DMA segments, dropping...\n",
4869 device_xname(sc->sc_dev));
4870 wm_dump_mbuf_chain(sc, m0);
4871 m_freem(m0);
4872 continue;
4873 }
4874 /* Short on resources, just stop for now. */
4875 DPRINTF(WM_DEBUG_TX,
4876 ("%s: TX: dmamap load failed: %d\n",
4877 device_xname(sc->sc_dev), error));
4878 break;
4879 }
4880
4881 segs_needed = dmamap->dm_nsegs;
4882 if (use_tso) {
4883 /* For sentinel descriptor; see below. */
4884 segs_needed++;
4885 }
4886
4887 /*
4888 * Ensure we have enough descriptors free to describe
4889 * the packet. Note, we always reserve one descriptor
4890 * at the end of the ring due to the semantics of the
4891 * TDT register, plus one more in the event we need
4892 * to load offload context.
4893 */
4894 if (segs_needed > sc->sc_txfree - 2) {
4895 /*
4896 * Not enough free descriptors to transmit this
4897 * packet. We haven't committed anything yet,
4898 * so just unload the DMA map, put the packet
4899 * pack on the queue, and punt. Notify the upper
4900 * layer that there are no more slots left.
4901 */
4902 DPRINTF(WM_DEBUG_TX,
4903 ("%s: TX: need %d (%d) descriptors, have %d\n",
4904 device_xname(sc->sc_dev), dmamap->dm_nsegs,
4905 segs_needed, sc->sc_txfree - 1));
4906 ifp->if_flags |= IFF_OACTIVE;
4907 bus_dmamap_unload(sc->sc_dmat, dmamap);
4908 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
4909 break;
4910 }
4911
4912 /*
4913 * Check for 82547 Tx FIFO bug. We need to do this
4914 * once we know we can transmit the packet, since we
4915 * do some internal FIFO space accounting here.
4916 */
4917 if (sc->sc_type == WM_T_82547 &&
4918 wm_82547_txfifo_bugchk(sc, m0)) {
4919 DPRINTF(WM_DEBUG_TX,
4920 ("%s: TX: 82547 Tx FIFO bug detected\n",
4921 device_xname(sc->sc_dev)));
4922 ifp->if_flags |= IFF_OACTIVE;
4923 bus_dmamap_unload(sc->sc_dmat, dmamap);
4924 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
4925 break;
4926 }
4927
4928 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
4929
4930 DPRINTF(WM_DEBUG_TX,
4931 ("%s: TX: packet has %d (%d) DMA segments\n",
4932 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
4933
4934 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
4935
4936 /*
4937 * Store a pointer to the packet so that we can free it
4938 * later.
4939 *
4940 * Initially, we consider the number of descriptors the
4941 * packet uses the number of DMA segments. This may be
4942 * incremented by 1 if we do checksum offload (a descriptor
4943 * is used to set the checksum context).
4944 */
4945 txs->txs_mbuf = m0;
4946 txs->txs_firstdesc = sc->sc_txnext;
4947 txs->txs_ndesc = segs_needed;
4948
4949 /* Set up offload parameters for this packet. */
4950 if (m0->m_pkthdr.csum_flags &
4951 (M_CSUM_TSOv4|M_CSUM_TSOv6|
4952 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
4953 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
4954 if (wm_tx_offload(sc, txs, &cksumcmd,
4955 &cksumfields) != 0) {
4956 /* Error message already displayed. */
4957 bus_dmamap_unload(sc->sc_dmat, dmamap);
4958 continue;
4959 }
4960 } else {
4961 cksumcmd = 0;
4962 cksumfields = 0;
4963 }
4964
4965 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
4966
4967 /* Sync the DMA map. */
4968 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
4969 BUS_DMASYNC_PREWRITE);
4970
4971 /* Initialize the transmit descriptor. */
4972 for (nexttx = sc->sc_txnext, seg = 0;
4973 seg < dmamap->dm_nsegs; seg++) {
4974 for (seglen = dmamap->dm_segs[seg].ds_len,
4975 curaddr = dmamap->dm_segs[seg].ds_addr;
4976 seglen != 0;
4977 curaddr += curlen, seglen -= curlen,
4978 nexttx = WM_NEXTTX(sc, nexttx)) {
4979 curlen = seglen;
4980
4981 /*
4982 * So says the Linux driver:
4983 * Work around for premature descriptor
4984 * write-backs in TSO mode. Append a
4985 * 4-byte sentinel descriptor.
4986 */
4987 if (use_tso &&
4988 seg == dmamap->dm_nsegs - 1 &&
4989 curlen > 8)
4990 curlen -= 4;
4991
4992 wm_set_dma_addr(
4993 &sc->sc_txdescs[nexttx].wtx_addr,
4994 curaddr);
4995 sc->sc_txdescs[nexttx].wtx_cmdlen =
4996 htole32(cksumcmd | curlen);
4997 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
4998 0;
4999 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
5000 cksumfields;
5001 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
5002 lasttx = nexttx;
5003
5004 DPRINTF(WM_DEBUG_TX,
5005 ("%s: TX: desc %d: low %#" PRIx64 ", "
5006 "len %#04zx\n",
5007 device_xname(sc->sc_dev), nexttx,
5008 (uint64_t)curaddr, curlen));
5009 }
5010 }
5011
5012 KASSERT(lasttx != -1);
5013
5014 /*
5015 * Set up the command byte on the last descriptor of
5016 * the packet. If we're in the interrupt delay window,
5017 * delay the interrupt.
5018 */
5019 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5020 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5021
5022 /*
5023 * If VLANs are enabled and the packet has a VLAN tag, set
5024 * up the descriptor to encapsulate the packet for us.
5025 *
5026 * This is only valid on the last descriptor of the packet.
5027 */
5028 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
5029 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5030 htole32(WTX_CMD_VLE);
5031 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
5032 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5033 }
5034
5035 txs->txs_lastdesc = lasttx;
5036
5037 DPRINTF(WM_DEBUG_TX,
5038 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5039 device_xname(sc->sc_dev),
5040 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5041
5042 /* Sync the descriptors we're using. */
5043 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5044 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5045
5046 /* Give the packet to the chip. */
5047 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5048
5049 DPRINTF(WM_DEBUG_TX,
5050 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5051
5052 DPRINTF(WM_DEBUG_TX,
5053 ("%s: TX: finished transmitting packet, job %d\n",
5054 device_xname(sc->sc_dev), sc->sc_txsnext));
5055
5056 /* Advance the tx pointer. */
5057 sc->sc_txfree -= txs->txs_ndesc;
5058 sc->sc_txnext = nexttx;
5059
5060 sc->sc_txsfree--;
5061 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5062
5063 /* Pass the packet to any BPF listeners. */
5064 bpf_mtap(ifp, m0);
5065 }
5066
5067 if (m0 != NULL) {
5068 ifp->if_flags |= IFF_OACTIVE;
5069 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5070 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5071 m_freem(m0);
5072 }
5073
5074 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5075 /* No more slots; notify upper layer. */
5076 ifp->if_flags |= IFF_OACTIVE;
5077 }
5078
5079 if (sc->sc_txfree != ofree) {
5080 /* Set a watchdog timer in case the chip flakes out. */
5081 ifp->if_timer = 5;
5082 }
5083 }
5084
5085 /*
5086 * wm_nq_tx_offload:
5087 *
5088 * Set up TCP/IP checksumming parameters for the
5089 * specified packet, for NEWQUEUE devices
5090 */
5091 static int
5092 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
5093 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
5094 {
5095 struct mbuf *m0 = txs->txs_mbuf;
5096 struct m_tag *mtag;
5097 uint32_t vl_len, mssidx, cmdc;
5098 struct ether_header *eh;
5099 int offset, iphl;
5100
5101 /*
5102 * XXX It would be nice if the mbuf pkthdr had offset
5103 * fields for the protocol headers.
5104 */
5105 *cmdlenp = 0;
5106 *fieldsp = 0;
5107
5108 eh = mtod(m0, struct ether_header *);
5109 switch (htons(eh->ether_type)) {
5110 case ETHERTYPE_IP:
5111 case ETHERTYPE_IPV6:
5112 offset = ETHER_HDR_LEN;
5113 break;
5114
5115 case ETHERTYPE_VLAN:
5116 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5117 break;
5118
5119 default:
5120 /* Don't support this protocol or encapsulation. */
5121 *do_csum = false;
5122 return 0;
5123 }
5124 *do_csum = true;
5125 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
5126 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
5127
5128 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
5129 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
5130
5131 if ((m0->m_pkthdr.csum_flags &
5132 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
5133 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
5134 } else {
5135 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
5136 }
5137 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
5138 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
5139
5140 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
5141 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
5142 << NQTXC_VLLEN_VLAN_SHIFT);
5143 *cmdlenp |= NQTX_CMD_VLE;
5144 }
5145
5146 mssidx = 0;
5147
5148 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
5149 int hlen = offset + iphl;
5150 int tcp_hlen;
5151 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
5152
5153 if (__predict_false(m0->m_len <
5154 (hlen + sizeof(struct tcphdr)))) {
5155 /*
5156 * TCP/IP headers are not in the first mbuf; we need
5157 * to do this the slow and painful way. Let's just
5158 * hope this doesn't happen very often.
5159 */
5160 struct tcphdr th;
5161
5162 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
5163
5164 m_copydata(m0, hlen, sizeof(th), &th);
5165 if (v4) {
5166 struct ip ip;
5167
5168 m_copydata(m0, offset, sizeof(ip), &ip);
5169 ip.ip_len = 0;
5170 m_copyback(m0,
5171 offset + offsetof(struct ip, ip_len),
5172 sizeof(ip.ip_len), &ip.ip_len);
5173 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
5174 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
5175 } else {
5176 struct ip6_hdr ip6;
5177
5178 m_copydata(m0, offset, sizeof(ip6), &ip6);
5179 ip6.ip6_plen = 0;
5180 m_copyback(m0,
5181 offset + offsetof(struct ip6_hdr, ip6_plen),
5182 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
5183 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
5184 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
5185 }
5186 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
5187 sizeof(th.th_sum), &th.th_sum);
5188
5189 tcp_hlen = th.th_off << 2;
5190 } else {
5191 /*
5192 * TCP/IP headers are in the first mbuf; we can do
5193 * this the easy way.
5194 */
5195 struct tcphdr *th;
5196
5197 if (v4) {
5198 struct ip *ip =
5199 (void *)(mtod(m0, char *) + offset);
5200 th = (void *)(mtod(m0, char *) + hlen);
5201
5202 ip->ip_len = 0;
5203 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
5204 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
5205 } else {
5206 struct ip6_hdr *ip6 =
5207 (void *)(mtod(m0, char *) + offset);
5208 th = (void *)(mtod(m0, char *) + hlen);
5209
5210 ip6->ip6_plen = 0;
5211 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
5212 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
5213 }
5214 tcp_hlen = th->th_off << 2;
5215 }
5216 hlen += tcp_hlen;
5217 *cmdlenp |= NQTX_CMD_TSE;
5218
5219 if (v4) {
5220 WM_EVCNT_INCR(&sc->sc_ev_txtso);
5221 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
5222 } else {
5223 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
5224 *fieldsp |= NQTXD_FIELDS_TUXSM;
5225 }
5226 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
5227 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5228 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
5229 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
5230 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
5231 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
5232 } else {
5233 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
5234 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5235 }
5236
5237 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
5238 *fieldsp |= NQTXD_FIELDS_IXSM;
5239 cmdc |= NQTXC_CMD_IP4;
5240 }
5241
5242 if (m0->m_pkthdr.csum_flags &
5243 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5244 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5245 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5246 cmdc |= NQTXC_CMD_TCP;
5247 } else {
5248 cmdc |= NQTXC_CMD_UDP;
5249 }
5250 cmdc |= NQTXC_CMD_IP4;
5251 *fieldsp |= NQTXD_FIELDS_TUXSM;
5252 }
5253 if (m0->m_pkthdr.csum_flags &
5254 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5255 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5256 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5257 cmdc |= NQTXC_CMD_TCP;
5258 } else {
5259 cmdc |= NQTXC_CMD_UDP;
5260 }
5261 cmdc |= NQTXC_CMD_IP6;
5262 *fieldsp |= NQTXD_FIELDS_TUXSM;
5263 }
5264
5265 /* Fill in the context descriptor. */
5266 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
5267 htole32(vl_len);
5268 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
5269 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
5270 htole32(cmdc);
5271 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
5272 htole32(mssidx);
5273 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
5274 DPRINTF(WM_DEBUG_TX,
5275 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
5276 sc->sc_txnext, 0, vl_len));
5277 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
5278 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
5279 txs->txs_ndesc++;
5280 return 0;
5281 }
5282
5283 /*
5284 * wm_nq_start: [ifnet interface function]
5285 *
5286 * Start packet transmission on the interface for NEWQUEUE devices
5287 */
5288 static void
5289 wm_nq_start(struct ifnet *ifp)
5290 {
5291 struct wm_softc *sc = ifp->if_softc;
5292
5293 WM_TX_LOCK(sc);
5294 if (!sc->sc_stopping)
5295 wm_nq_start_locked(ifp);
5296 WM_TX_UNLOCK(sc);
5297 }
5298
5299 static void
5300 wm_nq_start_locked(struct ifnet *ifp)
5301 {
5302 struct wm_softc *sc = ifp->if_softc;
5303 struct mbuf *m0;
5304 struct m_tag *mtag;
5305 struct wm_txsoft *txs;
5306 bus_dmamap_t dmamap;
5307 int error, nexttx, lasttx = -1, seg, segs_needed;
5308 bool do_csum, sent;
5309
5310 KASSERT(WM_TX_LOCKED(sc));
5311
5312 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5313 return;
5314
5315 sent = false;
5316
5317 /*
5318 * Loop through the send queue, setting up transmit descriptors
5319 * until we drain the queue, or use up all available transmit
5320 * descriptors.
5321 */
5322 for (;;) {
5323 m0 = NULL;
5324
5325 /* Get a work queue entry. */
5326 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
5327 wm_txintr(sc);
5328 if (sc->sc_txsfree == 0) {
5329 DPRINTF(WM_DEBUG_TX,
5330 ("%s: TX: no free job descriptors\n",
5331 device_xname(sc->sc_dev)));
5332 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
5333 break;
5334 }
5335 }
5336
5337 /* Grab a packet off the queue. */
5338 IFQ_DEQUEUE(&ifp->if_snd, m0);
5339 if (m0 == NULL)
5340 break;
5341
5342 DPRINTF(WM_DEBUG_TX,
5343 ("%s: TX: have packet to transmit: %p\n",
5344 device_xname(sc->sc_dev), m0));
5345
5346 txs = &sc->sc_txsoft[sc->sc_txsnext];
5347 dmamap = txs->txs_dmamap;
5348
5349 /*
5350 * Load the DMA map. If this fails, the packet either
5351 * didn't fit in the allotted number of segments, or we
5352 * were short on resources. For the too-many-segments
5353 * case, we simply report an error and drop the packet,
5354 * since we can't sanely copy a jumbo packet to a single
5355 * buffer.
5356 */
5357 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5358 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5359 if (error) {
5360 if (error == EFBIG) {
5361 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5362 log(LOG_ERR, "%s: Tx packet consumes too many "
5363 "DMA segments, dropping...\n",
5364 device_xname(sc->sc_dev));
5365 wm_dump_mbuf_chain(sc, m0);
5366 m_freem(m0);
5367 continue;
5368 }
5369 /* Short on resources, just stop for now. */
5370 DPRINTF(WM_DEBUG_TX,
5371 ("%s: TX: dmamap load failed: %d\n",
5372 device_xname(sc->sc_dev), error));
5373 break;
5374 }
5375
5376 segs_needed = dmamap->dm_nsegs;
5377
5378 /*
5379 * Ensure we have enough descriptors free to describe
5380 * the packet. Note, we always reserve one descriptor
5381 * at the end of the ring due to the semantics of the
5382 * TDT register, plus one more in the event we need
5383 * to load offload context.
5384 */
5385 if (segs_needed > sc->sc_txfree - 2) {
5386 /*
5387 * Not enough free descriptors to transmit this
5388 * packet. We haven't committed anything yet,
5389 * so just unload the DMA map, put the packet
5390 * pack on the queue, and punt. Notify the upper
5391 * layer that there are no more slots left.
5392 */
5393 DPRINTF(WM_DEBUG_TX,
5394 ("%s: TX: need %d (%d) descriptors, have %d\n",
5395 device_xname(sc->sc_dev), dmamap->dm_nsegs,
5396 segs_needed, sc->sc_txfree - 1));
5397 ifp->if_flags |= IFF_OACTIVE;
5398 bus_dmamap_unload(sc->sc_dmat, dmamap);
5399 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5400 break;
5401 }
5402
5403 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5404
5405 DPRINTF(WM_DEBUG_TX,
5406 ("%s: TX: packet has %d (%d) DMA segments\n",
5407 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5408
5409 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5410
5411 /*
5412 * Store a pointer to the packet so that we can free it
5413 * later.
5414 *
5415 * Initially, we consider the number of descriptors the
5416 * packet uses the number of DMA segments. This may be
5417 * incremented by 1 if we do checksum offload (a descriptor
5418 * is used to set the checksum context).
5419 */
5420 txs->txs_mbuf = m0;
5421 txs->txs_firstdesc = sc->sc_txnext;
5422 txs->txs_ndesc = segs_needed;
5423
5424 /* Set up offload parameters for this packet. */
5425 uint32_t cmdlen, fields, dcmdlen;
5426 if (m0->m_pkthdr.csum_flags &
5427 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5428 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5429 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5430 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
5431 &do_csum) != 0) {
5432 /* Error message already displayed. */
5433 bus_dmamap_unload(sc->sc_dmat, dmamap);
5434 continue;
5435 }
5436 } else {
5437 do_csum = false;
5438 cmdlen = 0;
5439 fields = 0;
5440 }
5441
5442 /* Sync the DMA map. */
5443 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5444 BUS_DMASYNC_PREWRITE);
5445
5446 /* Initialize the first transmit descriptor. */
5447 nexttx = sc->sc_txnext;
5448 if (!do_csum) {
5449 /* setup a legacy descriptor */
5450 wm_set_dma_addr(
5451 &sc->sc_txdescs[nexttx].wtx_addr,
5452 dmamap->dm_segs[0].ds_addr);
5453 sc->sc_txdescs[nexttx].wtx_cmdlen =
5454 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
5455 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
5456 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
5457 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
5458 NULL) {
5459 sc->sc_txdescs[nexttx].wtx_cmdlen |=
5460 htole32(WTX_CMD_VLE);
5461 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
5462 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5463 } else {
5464 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
5465 }
5466 dcmdlen = 0;
5467 } else {
5468 /* setup an advanced data descriptor */
5469 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5470 htole64(dmamap->dm_segs[0].ds_addr);
5471 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
5472 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5473 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
5474 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
5475 htole32(fields);
5476 DPRINTF(WM_DEBUG_TX,
5477 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
5478 device_xname(sc->sc_dev), nexttx,
5479 (uint64_t)dmamap->dm_segs[0].ds_addr));
5480 DPRINTF(WM_DEBUG_TX,
5481 ("\t 0x%08x%08x\n", fields,
5482 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
5483 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
5484 }
5485
5486 lasttx = nexttx;
5487 nexttx = WM_NEXTTX(sc, nexttx);
5488 /*
5489 * fill in the next descriptors. legacy or adcanced format
5490 * is the same here
5491 */
5492 for (seg = 1; seg < dmamap->dm_nsegs;
5493 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
5494 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5495 htole64(dmamap->dm_segs[seg].ds_addr);
5496 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5497 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
5498 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
5499 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
5500 lasttx = nexttx;
5501
5502 DPRINTF(WM_DEBUG_TX,
5503 ("%s: TX: desc %d: %#" PRIx64 ", "
5504 "len %#04zx\n",
5505 device_xname(sc->sc_dev), nexttx,
5506 (uint64_t)dmamap->dm_segs[seg].ds_addr,
5507 dmamap->dm_segs[seg].ds_len));
5508 }
5509
5510 KASSERT(lasttx != -1);
5511
5512 /*
5513 * Set up the command byte on the last descriptor of
5514 * the packet. If we're in the interrupt delay window,
5515 * delay the interrupt.
5516 */
5517 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
5518 (NQTX_CMD_EOP | NQTX_CMD_RS));
5519 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5520 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5521
5522 txs->txs_lastdesc = lasttx;
5523
5524 DPRINTF(WM_DEBUG_TX,
5525 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5526 device_xname(sc->sc_dev),
5527 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5528
5529 /* Sync the descriptors we're using. */
5530 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5531 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5532
5533 /* Give the packet to the chip. */
5534 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5535 sent = true;
5536
5537 DPRINTF(WM_DEBUG_TX,
5538 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5539
5540 DPRINTF(WM_DEBUG_TX,
5541 ("%s: TX: finished transmitting packet, job %d\n",
5542 device_xname(sc->sc_dev), sc->sc_txsnext));
5543
5544 /* Advance the tx pointer. */
5545 sc->sc_txfree -= txs->txs_ndesc;
5546 sc->sc_txnext = nexttx;
5547
5548 sc->sc_txsfree--;
5549 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5550
5551 /* Pass the packet to any BPF listeners. */
5552 bpf_mtap(ifp, m0);
5553 }
5554
5555 if (m0 != NULL) {
5556 ifp->if_flags |= IFF_OACTIVE;
5557 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5558 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5559 m_freem(m0);
5560 }
5561
5562 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5563 /* No more slots; notify upper layer. */
5564 ifp->if_flags |= IFF_OACTIVE;
5565 }
5566
5567 if (sent) {
5568 /* Set a watchdog timer in case the chip flakes out. */
5569 ifp->if_timer = 5;
5570 }
5571 }
5572
5573 /* Interrupt */
5574
5575 /*
5576 * wm_txintr:
5577 *
5578 * Helper; handle transmit interrupts.
5579 */
5580 static void
5581 wm_txintr(struct wm_softc *sc)
5582 {
5583 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5584 struct wm_txsoft *txs;
5585 uint8_t status;
5586 int i;
5587
5588 if (sc->sc_stopping)
5589 return;
5590
5591 ifp->if_flags &= ~IFF_OACTIVE;
5592
5593 /*
5594 * Go through the Tx list and free mbufs for those
5595 * frames which have been transmitted.
5596 */
5597 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
5598 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
5599 txs = &sc->sc_txsoft[i];
5600
5601 DPRINTF(WM_DEBUG_TX,
5602 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
5603
5604 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
5605 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5606
5607 status =
5608 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
5609 if ((status & WTX_ST_DD) == 0) {
5610 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
5611 BUS_DMASYNC_PREREAD);
5612 break;
5613 }
5614
5615 DPRINTF(WM_DEBUG_TX,
5616 ("%s: TX: job %d done: descs %d..%d\n",
5617 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
5618 txs->txs_lastdesc));
5619
5620 /*
5621 * XXX We should probably be using the statistics
5622 * XXX registers, but I don't know if they exist
5623 * XXX on chips before the i82544.
5624 */
5625
5626 #ifdef WM_EVENT_COUNTERS
5627 if (status & WTX_ST_TU)
5628 WM_EVCNT_INCR(&sc->sc_ev_tu);
5629 #endif /* WM_EVENT_COUNTERS */
5630
5631 if (status & (WTX_ST_EC|WTX_ST_LC)) {
5632 ifp->if_oerrors++;
5633 if (status & WTX_ST_LC)
5634 log(LOG_WARNING, "%s: late collision\n",
5635 device_xname(sc->sc_dev));
5636 else if (status & WTX_ST_EC) {
5637 ifp->if_collisions += 16;
5638 log(LOG_WARNING, "%s: excessive collisions\n",
5639 device_xname(sc->sc_dev));
5640 }
5641 } else
5642 ifp->if_opackets++;
5643
5644 sc->sc_txfree += txs->txs_ndesc;
5645 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
5646 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
5647 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
5648 m_freem(txs->txs_mbuf);
5649 txs->txs_mbuf = NULL;
5650 }
5651
5652 /* Update the dirty transmit buffer pointer. */
5653 sc->sc_txsdirty = i;
5654 DPRINTF(WM_DEBUG_TX,
5655 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
5656
5657 /*
5658 * If there are no more pending transmissions, cancel the watchdog
5659 * timer.
5660 */
5661 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
5662 ifp->if_timer = 0;
5663 }
5664
5665 /*
5666 * wm_rxintr:
5667 *
5668 * Helper; handle receive interrupts.
5669 */
5670 static void
5671 wm_rxintr(struct wm_softc *sc)
5672 {
5673 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5674 struct wm_rxsoft *rxs;
5675 struct mbuf *m;
5676 int i, len;
5677 uint8_t status, errors;
5678 uint16_t vlantag;
5679
5680 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
5681 rxs = &sc->sc_rxsoft[i];
5682
5683 DPRINTF(WM_DEBUG_RX,
5684 ("%s: RX: checking descriptor %d\n",
5685 device_xname(sc->sc_dev), i));
5686
5687 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5688
5689 status = sc->sc_rxdescs[i].wrx_status;
5690 errors = sc->sc_rxdescs[i].wrx_errors;
5691 len = le16toh(sc->sc_rxdescs[i].wrx_len);
5692 vlantag = sc->sc_rxdescs[i].wrx_special;
5693
5694 if ((status & WRX_ST_DD) == 0) {
5695 /* We have processed all of the receive descriptors. */
5696 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
5697 break;
5698 }
5699
5700 if (__predict_false(sc->sc_rxdiscard)) {
5701 DPRINTF(WM_DEBUG_RX,
5702 ("%s: RX: discarding contents of descriptor %d\n",
5703 device_xname(sc->sc_dev), i));
5704 WM_INIT_RXDESC(sc, i);
5705 if (status & WRX_ST_EOP) {
5706 /* Reset our state. */
5707 DPRINTF(WM_DEBUG_RX,
5708 ("%s: RX: resetting rxdiscard -> 0\n",
5709 device_xname(sc->sc_dev)));
5710 sc->sc_rxdiscard = 0;
5711 }
5712 continue;
5713 }
5714
5715 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5716 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
5717
5718 m = rxs->rxs_mbuf;
5719
5720 /*
5721 * Add a new receive buffer to the ring, unless of
5722 * course the length is zero. Treat the latter as a
5723 * failed mapping.
5724 */
5725 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
5726 /*
5727 * Failed, throw away what we've done so
5728 * far, and discard the rest of the packet.
5729 */
5730 ifp->if_ierrors++;
5731 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5732 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5733 WM_INIT_RXDESC(sc, i);
5734 if ((status & WRX_ST_EOP) == 0)
5735 sc->sc_rxdiscard = 1;
5736 if (sc->sc_rxhead != NULL)
5737 m_freem(sc->sc_rxhead);
5738 WM_RXCHAIN_RESET(sc);
5739 DPRINTF(WM_DEBUG_RX,
5740 ("%s: RX: Rx buffer allocation failed, "
5741 "dropping packet%s\n", device_xname(sc->sc_dev),
5742 sc->sc_rxdiscard ? " (discard)" : ""));
5743 continue;
5744 }
5745
5746 m->m_len = len;
5747 sc->sc_rxlen += len;
5748 DPRINTF(WM_DEBUG_RX,
5749 ("%s: RX: buffer at %p len %d\n",
5750 device_xname(sc->sc_dev), m->m_data, len));
5751
5752 /* If this is not the end of the packet, keep looking. */
5753 if ((status & WRX_ST_EOP) == 0) {
5754 WM_RXCHAIN_LINK(sc, m);
5755 DPRINTF(WM_DEBUG_RX,
5756 ("%s: RX: not yet EOP, rxlen -> %d\n",
5757 device_xname(sc->sc_dev), sc->sc_rxlen));
5758 continue;
5759 }
5760
5761 /*
5762 * Okay, we have the entire packet now. The chip is
5763 * configured to include the FCS except I350 and I21[01]
5764 * (not all chips can be configured to strip it),
5765 * so we need to trim it.
5766 * May need to adjust length of previous mbuf in the
5767 * chain if the current mbuf is too short.
5768 * For an eratta, the RCTL_SECRC bit in RCTL register
5769 * is always set in I350, so we don't trim it.
5770 */
5771 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
5772 && (sc->sc_type != WM_T_I210)
5773 && (sc->sc_type != WM_T_I211)) {
5774 if (m->m_len < ETHER_CRC_LEN) {
5775 sc->sc_rxtail->m_len
5776 -= (ETHER_CRC_LEN - m->m_len);
5777 m->m_len = 0;
5778 } else
5779 m->m_len -= ETHER_CRC_LEN;
5780 len = sc->sc_rxlen - ETHER_CRC_LEN;
5781 } else
5782 len = sc->sc_rxlen;
5783
5784 WM_RXCHAIN_LINK(sc, m);
5785
5786 *sc->sc_rxtailp = NULL;
5787 m = sc->sc_rxhead;
5788
5789 WM_RXCHAIN_RESET(sc);
5790
5791 DPRINTF(WM_DEBUG_RX,
5792 ("%s: RX: have entire packet, len -> %d\n",
5793 device_xname(sc->sc_dev), len));
5794
5795 /* If an error occurred, update stats and drop the packet. */
5796 if (errors &
5797 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
5798 if (errors & WRX_ER_SE)
5799 log(LOG_WARNING, "%s: symbol error\n",
5800 device_xname(sc->sc_dev));
5801 else if (errors & WRX_ER_SEQ)
5802 log(LOG_WARNING, "%s: receive sequence error\n",
5803 device_xname(sc->sc_dev));
5804 else if (errors & WRX_ER_CE)
5805 log(LOG_WARNING, "%s: CRC error\n",
5806 device_xname(sc->sc_dev));
5807 m_freem(m);
5808 continue;
5809 }
5810
5811 /* No errors. Receive the packet. */
5812 m->m_pkthdr.rcvif = ifp;
5813 m->m_pkthdr.len = len;
5814
5815 /*
5816 * If VLANs are enabled, VLAN packets have been unwrapped
5817 * for us. Associate the tag with the packet.
5818 */
5819 /* XXXX should check for i350 and i354 */
5820 if ((status & WRX_ST_VP) != 0) {
5821 VLAN_INPUT_TAG(ifp, m,
5822 le16toh(vlantag),
5823 continue);
5824 }
5825
5826 /* Set up checksum info for this packet. */
5827 if ((status & WRX_ST_IXSM) == 0) {
5828 if (status & WRX_ST_IPCS) {
5829 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
5830 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
5831 if (errors & WRX_ER_IPE)
5832 m->m_pkthdr.csum_flags |=
5833 M_CSUM_IPv4_BAD;
5834 }
5835 if (status & WRX_ST_TCPCS) {
5836 /*
5837 * Note: we don't know if this was TCP or UDP,
5838 * so we just set both bits, and expect the
5839 * upper layers to deal.
5840 */
5841 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
5842 m->m_pkthdr.csum_flags |=
5843 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
5844 M_CSUM_TCPv6 | M_CSUM_UDPv6;
5845 if (errors & WRX_ER_TCPE)
5846 m->m_pkthdr.csum_flags |=
5847 M_CSUM_TCP_UDP_BAD;
5848 }
5849 }
5850
5851 ifp->if_ipackets++;
5852
5853 WM_RX_UNLOCK(sc);
5854
5855 /* Pass this up to any BPF listeners. */
5856 bpf_mtap(ifp, m);
5857
5858 /* Pass it on. */
5859 (*ifp->if_input)(ifp, m);
5860
5861 WM_RX_LOCK(sc);
5862
5863 if (sc->sc_stopping)
5864 break;
5865 }
5866
5867 /* Update the receive pointer. */
5868 sc->sc_rxptr = i;
5869
5870 DPRINTF(WM_DEBUG_RX,
5871 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
5872 }
5873
5874 /*
5875 * wm_linkintr_gmii:
5876 *
5877 * Helper; handle link interrupts for GMII.
5878 */
5879 static void
5880 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
5881 {
5882
5883 KASSERT(WM_TX_LOCKED(sc));
5884
5885 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5886 __func__));
5887
5888 if (icr & ICR_LSC) {
5889 DPRINTF(WM_DEBUG_LINK,
5890 ("%s: LINK: LSC -> mii_pollstat\n",
5891 device_xname(sc->sc_dev)));
5892 mii_pollstat(&sc->sc_mii);
5893 if (sc->sc_type == WM_T_82543) {
5894 int miistatus, active;
5895
5896 /*
5897 * With 82543, we need to force speed and
5898 * duplex on the MAC equal to what the PHY
5899 * speed and duplex configuration is.
5900 */
5901 miistatus = sc->sc_mii.mii_media_status;
5902
5903 if (miistatus & IFM_ACTIVE) {
5904 active = sc->sc_mii.mii_media_active;
5905 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5906 switch (IFM_SUBTYPE(active)) {
5907 case IFM_10_T:
5908 sc->sc_ctrl |= CTRL_SPEED_10;
5909 break;
5910 case IFM_100_TX:
5911 sc->sc_ctrl |= CTRL_SPEED_100;
5912 break;
5913 case IFM_1000_T:
5914 sc->sc_ctrl |= CTRL_SPEED_1000;
5915 break;
5916 default:
5917 /*
5918 * fiber?
5919 * Shoud not enter here.
5920 */
5921 printf("unknown media (%x)\n",
5922 active);
5923 break;
5924 }
5925 if (active & IFM_FDX)
5926 sc->sc_ctrl |= CTRL_FD;
5927 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5928 }
5929 } else if ((sc->sc_type == WM_T_ICH8)
5930 && (sc->sc_phytype == WMPHY_IGP_3)) {
5931 wm_kmrn_lock_loss_workaround_ich8lan(sc);
5932 } else if (sc->sc_type == WM_T_PCH) {
5933 wm_k1_gig_workaround_hv(sc,
5934 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
5935 }
5936
5937 if ((sc->sc_phytype == WMPHY_82578)
5938 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
5939 == IFM_1000_T)) {
5940
5941 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
5942 delay(200*1000); /* XXX too big */
5943
5944 /* Link stall fix for link up */
5945 wm_gmii_hv_writereg(sc->sc_dev, 1,
5946 HV_MUX_DATA_CTRL,
5947 HV_MUX_DATA_CTRL_GEN_TO_MAC
5948 | HV_MUX_DATA_CTRL_FORCE_SPEED);
5949 wm_gmii_hv_writereg(sc->sc_dev, 1,
5950 HV_MUX_DATA_CTRL,
5951 HV_MUX_DATA_CTRL_GEN_TO_MAC);
5952 }
5953 }
5954 } else if (icr & ICR_RXSEQ) {
5955 DPRINTF(WM_DEBUG_LINK,
5956 ("%s: LINK Receive sequence error\n",
5957 device_xname(sc->sc_dev)));
5958 }
5959 }
5960
5961 /*
5962 * wm_linkintr_tbi:
5963 *
5964 * Helper; handle link interrupts for TBI mode.
5965 */
5966 static void
5967 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
5968 {
5969 uint32_t status;
5970
5971 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5972 __func__));
5973
5974 status = CSR_READ(sc, WMREG_STATUS);
5975 if (icr & ICR_LSC) {
5976 if (status & STATUS_LU) {
5977 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
5978 device_xname(sc->sc_dev),
5979 (status & STATUS_FD) ? "FDX" : "HDX"));
5980 /*
5981 * NOTE: CTRL will update TFCE and RFCE automatically,
5982 * so we should update sc->sc_ctrl
5983 */
5984
5985 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5986 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5987 sc->sc_fcrtl &= ~FCRTL_XONE;
5988 if (status & STATUS_FD)
5989 sc->sc_tctl |=
5990 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5991 else
5992 sc->sc_tctl |=
5993 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5994 if (sc->sc_ctrl & CTRL_TFCE)
5995 sc->sc_fcrtl |= FCRTL_XONE;
5996 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5997 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5998 WMREG_OLD_FCRTL : WMREG_FCRTL,
5999 sc->sc_fcrtl);
6000 sc->sc_tbi_linkup = 1;
6001 } else {
6002 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
6003 device_xname(sc->sc_dev)));
6004 sc->sc_tbi_linkup = 0;
6005 }
6006 wm_tbi_set_linkled(sc);
6007 } else if (icr & ICR_RXSEQ) {
6008 DPRINTF(WM_DEBUG_LINK,
6009 ("%s: LINK: Receive sequence error\n",
6010 device_xname(sc->sc_dev)));
6011 }
6012 }
6013
6014 /*
6015 * wm_linkintr:
6016 *
6017 * Helper; handle link interrupts.
6018 */
6019 static void
6020 wm_linkintr(struct wm_softc *sc, uint32_t icr)
6021 {
6022
6023 if (sc->sc_flags & WM_F_HAS_MII)
6024 wm_linkintr_gmii(sc, icr);
6025 else
6026 wm_linkintr_tbi(sc, icr);
6027 }
6028
6029 /*
6030 * wm_intr:
6031 *
6032 * Interrupt service routine.
6033 */
6034 static int
6035 wm_intr(void *arg)
6036 {
6037 struct wm_softc *sc = arg;
6038 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6039 uint32_t icr;
6040 int handled = 0;
6041
6042 while (1 /* CONSTCOND */) {
6043 icr = CSR_READ(sc, WMREG_ICR);
6044 if ((icr & sc->sc_icr) == 0)
6045 break;
6046 rnd_add_uint32(&sc->rnd_source, icr);
6047
6048 WM_RX_LOCK(sc);
6049
6050 if (sc->sc_stopping) {
6051 WM_RX_UNLOCK(sc);
6052 break;
6053 }
6054
6055 handled = 1;
6056
6057 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
6058 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
6059 DPRINTF(WM_DEBUG_RX,
6060 ("%s: RX: got Rx intr 0x%08x\n",
6061 device_xname(sc->sc_dev),
6062 icr & (ICR_RXDMT0|ICR_RXT0)));
6063 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
6064 }
6065 #endif
6066 wm_rxintr(sc);
6067
6068 WM_RX_UNLOCK(sc);
6069 WM_TX_LOCK(sc);
6070
6071 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
6072 if (icr & ICR_TXDW) {
6073 DPRINTF(WM_DEBUG_TX,
6074 ("%s: TX: got TXDW interrupt\n",
6075 device_xname(sc->sc_dev)));
6076 WM_EVCNT_INCR(&sc->sc_ev_txdw);
6077 }
6078 #endif
6079 wm_txintr(sc);
6080
6081 if (icr & (ICR_LSC|ICR_RXSEQ)) {
6082 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
6083 wm_linkintr(sc, icr);
6084 }
6085
6086 WM_TX_UNLOCK(sc);
6087
6088 if (icr & ICR_RXO) {
6089 #if defined(WM_DEBUG)
6090 log(LOG_WARNING, "%s: Receive overrun\n",
6091 device_xname(sc->sc_dev));
6092 #endif /* defined(WM_DEBUG) */
6093 }
6094 }
6095
6096 if (handled) {
6097 /* Try to get more packets going. */
6098 ifp->if_start(ifp);
6099 }
6100
6101 return handled;
6102 }
6103
6104 /*
6105 * Media related.
6106 * GMII, SGMII, TBI (and SERDES)
6107 */
6108
6109 /* GMII related */
6110
6111 /*
6112 * wm_gmii_reset:
6113 *
6114 * Reset the PHY.
6115 */
6116 static void
6117 wm_gmii_reset(struct wm_softc *sc)
6118 {
6119 uint32_t reg;
6120 int rv;
6121
6122 /* get phy semaphore */
6123 switch (sc->sc_type) {
6124 case WM_T_82571:
6125 case WM_T_82572:
6126 case WM_T_82573:
6127 case WM_T_82574:
6128 case WM_T_82583:
6129 /* XXX should get sw semaphore, too */
6130 rv = wm_get_swsm_semaphore(sc);
6131 break;
6132 case WM_T_82575:
6133 case WM_T_82576:
6134 case WM_T_82580:
6135 case WM_T_I350:
6136 case WM_T_I354:
6137 case WM_T_I210:
6138 case WM_T_I211:
6139 case WM_T_80003:
6140 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6141 break;
6142 case WM_T_ICH8:
6143 case WM_T_ICH9:
6144 case WM_T_ICH10:
6145 case WM_T_PCH:
6146 case WM_T_PCH2:
6147 case WM_T_PCH_LPT:
6148 rv = wm_get_swfwhw_semaphore(sc);
6149 break;
6150 default:
6151 /* nothing to do*/
6152 rv = 0;
6153 break;
6154 }
6155 if (rv != 0) {
6156 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6157 __func__);
6158 return;
6159 }
6160
6161 switch (sc->sc_type) {
6162 case WM_T_82542_2_0:
6163 case WM_T_82542_2_1:
6164 /* null */
6165 break;
6166 case WM_T_82543:
6167 /*
6168 * With 82543, we need to force speed and duplex on the MAC
6169 * equal to what the PHY speed and duplex configuration is.
6170 * In addition, we need to perform a hardware reset on the PHY
6171 * to take it out of reset.
6172 */
6173 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6174 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6175
6176 /* The PHY reset pin is active-low. */
6177 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6178 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6179 CTRL_EXT_SWDPIN(4));
6180 reg |= CTRL_EXT_SWDPIO(4);
6181
6182 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6183 CSR_WRITE_FLUSH(sc);
6184 delay(10*1000);
6185
6186 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6187 CSR_WRITE_FLUSH(sc);
6188 delay(150);
6189 #if 0
6190 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6191 #endif
6192 delay(20*1000); /* XXX extra delay to get PHY ID? */
6193 break;
6194 case WM_T_82544: /* reset 10000us */
6195 case WM_T_82540:
6196 case WM_T_82545:
6197 case WM_T_82545_3:
6198 case WM_T_82546:
6199 case WM_T_82546_3:
6200 case WM_T_82541:
6201 case WM_T_82541_2:
6202 case WM_T_82547:
6203 case WM_T_82547_2:
6204 case WM_T_82571: /* reset 100us */
6205 case WM_T_82572:
6206 case WM_T_82573:
6207 case WM_T_82574:
6208 case WM_T_82575:
6209 case WM_T_82576:
6210 case WM_T_82580:
6211 case WM_T_I350:
6212 case WM_T_I354:
6213 case WM_T_I210:
6214 case WM_T_I211:
6215 case WM_T_82583:
6216 case WM_T_80003:
6217 /* generic reset */
6218 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6219 CSR_WRITE_FLUSH(sc);
6220 delay(20000);
6221 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6222 CSR_WRITE_FLUSH(sc);
6223 delay(20000);
6224
6225 if ((sc->sc_type == WM_T_82541)
6226 || (sc->sc_type == WM_T_82541_2)
6227 || (sc->sc_type == WM_T_82547)
6228 || (sc->sc_type == WM_T_82547_2)) {
6229 /* workaround for igp are done in igp_reset() */
6230 /* XXX add code to set LED after phy reset */
6231 }
6232 break;
6233 case WM_T_ICH8:
6234 case WM_T_ICH9:
6235 case WM_T_ICH10:
6236 case WM_T_PCH:
6237 case WM_T_PCH2:
6238 case WM_T_PCH_LPT:
6239 /* generic reset */
6240 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6241 CSR_WRITE_FLUSH(sc);
6242 delay(100);
6243 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6244 CSR_WRITE_FLUSH(sc);
6245 delay(150);
6246 break;
6247 default:
6248 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6249 __func__);
6250 break;
6251 }
6252
6253 /* release PHY semaphore */
6254 switch (sc->sc_type) {
6255 case WM_T_82571:
6256 case WM_T_82572:
6257 case WM_T_82573:
6258 case WM_T_82574:
6259 case WM_T_82583:
6260 /* XXX should put sw semaphore, too */
6261 wm_put_swsm_semaphore(sc);
6262 break;
6263 case WM_T_82575:
6264 case WM_T_82576:
6265 case WM_T_82580:
6266 case WM_T_I350:
6267 case WM_T_I354:
6268 case WM_T_I210:
6269 case WM_T_I211:
6270 case WM_T_80003:
6271 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6272 break;
6273 case WM_T_ICH8:
6274 case WM_T_ICH9:
6275 case WM_T_ICH10:
6276 case WM_T_PCH:
6277 case WM_T_PCH2:
6278 case WM_T_PCH_LPT:
6279 wm_put_swfwhw_semaphore(sc);
6280 break;
6281 default:
6282 /* nothing to do*/
6283 rv = 0;
6284 break;
6285 }
6286
6287 /* get_cfg_done */
6288 wm_get_cfg_done(sc);
6289
6290 /* extra setup */
6291 switch (sc->sc_type) {
6292 case WM_T_82542_2_0:
6293 case WM_T_82542_2_1:
6294 case WM_T_82543:
6295 case WM_T_82544:
6296 case WM_T_82540:
6297 case WM_T_82545:
6298 case WM_T_82545_3:
6299 case WM_T_82546:
6300 case WM_T_82546_3:
6301 case WM_T_82541_2:
6302 case WM_T_82547_2:
6303 case WM_T_82571:
6304 case WM_T_82572:
6305 case WM_T_82573:
6306 case WM_T_82574:
6307 case WM_T_82575:
6308 case WM_T_82576:
6309 case WM_T_82580:
6310 case WM_T_I350:
6311 case WM_T_I354:
6312 case WM_T_I210:
6313 case WM_T_I211:
6314 case WM_T_82583:
6315 case WM_T_80003:
6316 /* null */
6317 break;
6318 case WM_T_82541:
6319 case WM_T_82547:
6320 /* XXX Configure actively LED after PHY reset */
6321 break;
6322 case WM_T_ICH8:
6323 case WM_T_ICH9:
6324 case WM_T_ICH10:
6325 case WM_T_PCH:
6326 case WM_T_PCH2:
6327 case WM_T_PCH_LPT:
6328 /* Allow time for h/w to get to a quiescent state afer reset */
6329 delay(10*1000);
6330
6331 if (sc->sc_type == WM_T_PCH)
6332 wm_hv_phy_workaround_ich8lan(sc);
6333
6334 if (sc->sc_type == WM_T_PCH2)
6335 wm_lv_phy_workaround_ich8lan(sc);
6336
6337 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6338 /*
6339 * dummy read to clear the phy wakeup bit after lcd
6340 * reset
6341 */
6342 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6343 }
6344
6345 /*
6346 * XXX Configure the LCD with th extended configuration region
6347 * in NVM
6348 */
6349
6350 /* Configure the LCD with the OEM bits in NVM */
6351 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6352 || (sc->sc_type == WM_T_PCH_LPT)) {
6353 /*
6354 * Disable LPLU.
6355 * XXX It seems that 82567 has LPLU, too.
6356 */
6357 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6358 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6359 reg |= HV_OEM_BITS_ANEGNOW;
6360 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6361 }
6362 break;
6363 default:
6364 panic("%s: unknown type\n", __func__);
6365 break;
6366 }
6367 }
6368
6369 /*
6370 * wm_get_phy_id_82575:
6371 *
6372 * Return PHY ID. Return -1 if it failed.
6373 */
6374 static int
6375 wm_get_phy_id_82575(struct wm_softc *sc)
6376 {
6377 uint32_t reg;
6378 int phyid = -1;
6379
6380 /* XXX */
6381 if ((sc->sc_flags & WM_F_SGMII) == 0)
6382 return -1;
6383
6384 if (wm_sgmii_uses_mdio(sc)) {
6385 switch (sc->sc_type) {
6386 case WM_T_82575:
6387 case WM_T_82576:
6388 reg = CSR_READ(sc, WMREG_MDIC);
6389 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
6390 break;
6391 case WM_T_82580:
6392 case WM_T_I350:
6393 case WM_T_I354:
6394 case WM_T_I210:
6395 case WM_T_I211:
6396 reg = CSR_READ(sc, WMREG_MDICNFG);
6397 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
6398 break;
6399 default:
6400 return -1;
6401 }
6402 }
6403
6404 return phyid;
6405 }
6406
6407
6408 /*
6409 * wm_gmii_mediainit:
6410 *
6411 * Initialize media for use on 1000BASE-T devices.
6412 */
6413 static void
6414 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6415 {
6416 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6417 struct mii_data *mii = &sc->sc_mii;
6418 uint32_t reg;
6419
6420 /* We have GMII. */
6421 sc->sc_flags |= WM_F_HAS_MII;
6422
6423 if (sc->sc_type == WM_T_80003)
6424 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6425 else
6426 sc->sc_tipg = TIPG_1000T_DFLT;
6427
6428 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
6429 if ((sc->sc_type == WM_T_82580)
6430 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
6431 || (sc->sc_type == WM_T_I211)) {
6432 reg = CSR_READ(sc, WMREG_PHPM);
6433 reg &= ~PHPM_GO_LINK_D;
6434 CSR_WRITE(sc, WMREG_PHPM, reg);
6435 }
6436
6437 /*
6438 * Let the chip set speed/duplex on its own based on
6439 * signals from the PHY.
6440 * XXXbouyer - I'm not sure this is right for the 80003,
6441 * the em driver only sets CTRL_SLU here - but it seems to work.
6442 */
6443 sc->sc_ctrl |= CTRL_SLU;
6444 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6445
6446 /* Initialize our media structures and probe the GMII. */
6447 mii->mii_ifp = ifp;
6448
6449 /*
6450 * Determine the PHY access method.
6451 *
6452 * For SGMII, use SGMII specific method.
6453 *
6454 * For some devices, we can determine the PHY access method
6455 * from sc_type.
6456 *
6457 * For ICH and PCH variants, it's difficult to determine the PHY
6458 * access method by sc_type, so use the PCI product ID for some
6459 * devices.
6460 * For other ICH8 variants, try to use igp's method. If the PHY
6461 * can't detect, then use bm's method.
6462 */
6463 switch (prodid) {
6464 case PCI_PRODUCT_INTEL_PCH_M_LM:
6465 case PCI_PRODUCT_INTEL_PCH_M_LC:
6466 /* 82577 */
6467 sc->sc_phytype = WMPHY_82577;
6468 break;
6469 case PCI_PRODUCT_INTEL_PCH_D_DM:
6470 case PCI_PRODUCT_INTEL_PCH_D_DC:
6471 /* 82578 */
6472 sc->sc_phytype = WMPHY_82578;
6473 break;
6474 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6475 case PCI_PRODUCT_INTEL_PCH2_LV_V:
6476 /* 82579 */
6477 sc->sc_phytype = WMPHY_82579;
6478 break;
6479 case PCI_PRODUCT_INTEL_82801I_BM:
6480 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6481 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6482 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6483 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6484 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6485 /* 82567 */
6486 sc->sc_phytype = WMPHY_BM;
6487 mii->mii_readreg = wm_gmii_bm_readreg;
6488 mii->mii_writereg = wm_gmii_bm_writereg;
6489 break;
6490 default:
6491 if (((sc->sc_flags & WM_F_SGMII) != 0)
6492 && !wm_sgmii_uses_mdio(sc)){
6493 mii->mii_readreg = wm_sgmii_readreg;
6494 mii->mii_writereg = wm_sgmii_writereg;
6495 } else if (sc->sc_type >= WM_T_80003) {
6496 mii->mii_readreg = wm_gmii_i80003_readreg;
6497 mii->mii_writereg = wm_gmii_i80003_writereg;
6498 } else if (sc->sc_type >= WM_T_I210) {
6499 mii->mii_readreg = wm_gmii_i82544_readreg;
6500 mii->mii_writereg = wm_gmii_i82544_writereg;
6501 } else if (sc->sc_type >= WM_T_82580) {
6502 sc->sc_phytype = WMPHY_82580;
6503 mii->mii_readreg = wm_gmii_82580_readreg;
6504 mii->mii_writereg = wm_gmii_82580_writereg;
6505 } else if (sc->sc_type >= WM_T_82544) {
6506 mii->mii_readreg = wm_gmii_i82544_readreg;
6507 mii->mii_writereg = wm_gmii_i82544_writereg;
6508 } else {
6509 mii->mii_readreg = wm_gmii_i82543_readreg;
6510 mii->mii_writereg = wm_gmii_i82543_writereg;
6511 }
6512 break;
6513 }
6514 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) {
6515 /* All PCH* use _hv_ */
6516 mii->mii_readreg = wm_gmii_hv_readreg;
6517 mii->mii_writereg = wm_gmii_hv_writereg;
6518 }
6519 mii->mii_statchg = wm_gmii_statchg;
6520
6521 wm_gmii_reset(sc);
6522
6523 sc->sc_ethercom.ec_mii = &sc->sc_mii;
6524 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6525 wm_gmii_mediastatus);
6526
6527 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6528 || (sc->sc_type == WM_T_82580)
6529 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
6530 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
6531 if ((sc->sc_flags & WM_F_SGMII) == 0) {
6532 /* Attach only one port */
6533 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6534 MII_OFFSET_ANY, MIIF_DOPAUSE);
6535 } else {
6536 int i, id;
6537 uint32_t ctrl_ext;
6538
6539 id = wm_get_phy_id_82575(sc);
6540 if (id != -1) {
6541 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6542 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
6543 }
6544 if ((id == -1)
6545 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
6546 /* Power on sgmii phy if it is disabled */
6547 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6548 CSR_WRITE(sc, WMREG_CTRL_EXT,
6549 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6550 CSR_WRITE_FLUSH(sc);
6551 delay(300*1000); /* XXX too long */
6552
6553 /* from 1 to 8 */
6554 for (i = 1; i < 8; i++)
6555 mii_attach(sc->sc_dev, &sc->sc_mii,
6556 0xffffffff, i, MII_OFFSET_ANY,
6557 MIIF_DOPAUSE);
6558
6559 /* restore previous sfp cage power state */
6560 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6561 }
6562 }
6563 } else {
6564 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6565 MII_OFFSET_ANY, MIIF_DOPAUSE);
6566 }
6567
6568 /*
6569 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
6570 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6571 */
6572 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
6573 (LIST_FIRST(&mii->mii_phys) == NULL)) {
6574 wm_set_mdio_slow_mode_hv(sc);
6575 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6576 MII_OFFSET_ANY, MIIF_DOPAUSE);
6577 }
6578
6579 /*
6580 * (For ICH8 variants)
6581 * If PHY detection failed, use BM's r/w function and retry.
6582 */
6583 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6584 /* if failed, retry with *_bm_* */
6585 mii->mii_readreg = wm_gmii_bm_readreg;
6586 mii->mii_writereg = wm_gmii_bm_writereg;
6587
6588 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6589 MII_OFFSET_ANY, MIIF_DOPAUSE);
6590 }
6591
6592 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6593 /* Any PHY wasn't find */
6594 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6595 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6596 sc->sc_phytype = WMPHY_NONE;
6597 } else {
6598 /*
6599 * PHY Found!
6600 * Check PHY type.
6601 */
6602 uint32_t model;
6603 struct mii_softc *child;
6604
6605 child = LIST_FIRST(&mii->mii_phys);
6606 if (device_is_a(child->mii_dev, "igphy")) {
6607 struct igphy_softc *isc = (struct igphy_softc *)child;
6608
6609 model = isc->sc_mii.mii_mpd_model;
6610 if (model == MII_MODEL_yyINTEL_I82566)
6611 sc->sc_phytype = WMPHY_IGP_3;
6612 }
6613
6614 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6615 }
6616 }
6617
6618 /*
6619 * wm_gmii_mediastatus: [ifmedia interface function]
6620 *
6621 * Get the current interface media status on a 1000BASE-T device.
6622 */
6623 static void
6624 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6625 {
6626 struct wm_softc *sc = ifp->if_softc;
6627
6628 ether_mediastatus(ifp, ifmr);
6629 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6630 | sc->sc_flowflags;
6631 }
6632
6633 /*
6634 * wm_gmii_mediachange: [ifmedia interface function]
6635 *
6636 * Set hardware to newly-selected media on a 1000BASE-T device.
6637 */
6638 static int
6639 wm_gmii_mediachange(struct ifnet *ifp)
6640 {
6641 struct wm_softc *sc = ifp->if_softc;
6642 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6643 int rc;
6644
6645 if ((ifp->if_flags & IFF_UP) == 0)
6646 return 0;
6647
6648 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6649 sc->sc_ctrl |= CTRL_SLU;
6650 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6651 || (sc->sc_type > WM_T_82543)) {
6652 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6653 } else {
6654 sc->sc_ctrl &= ~CTRL_ASDE;
6655 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6656 if (ife->ifm_media & IFM_FDX)
6657 sc->sc_ctrl |= CTRL_FD;
6658 switch (IFM_SUBTYPE(ife->ifm_media)) {
6659 case IFM_10_T:
6660 sc->sc_ctrl |= CTRL_SPEED_10;
6661 break;
6662 case IFM_100_TX:
6663 sc->sc_ctrl |= CTRL_SPEED_100;
6664 break;
6665 case IFM_1000_T:
6666 sc->sc_ctrl |= CTRL_SPEED_1000;
6667 break;
6668 default:
6669 panic("wm_gmii_mediachange: bad media 0x%x",
6670 ife->ifm_media);
6671 }
6672 }
6673 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6674 if (sc->sc_type <= WM_T_82543)
6675 wm_gmii_reset(sc);
6676
6677 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6678 return 0;
6679 return rc;
6680 }
6681
6682 #define MDI_IO CTRL_SWDPIN(2)
6683 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
6684 #define MDI_CLK CTRL_SWDPIN(3)
6685
6686 static void
6687 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6688 {
6689 uint32_t i, v;
6690
6691 v = CSR_READ(sc, WMREG_CTRL);
6692 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6693 v |= MDI_DIR | CTRL_SWDPIO(3);
6694
6695 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6696 if (data & i)
6697 v |= MDI_IO;
6698 else
6699 v &= ~MDI_IO;
6700 CSR_WRITE(sc, WMREG_CTRL, v);
6701 CSR_WRITE_FLUSH(sc);
6702 delay(10);
6703 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6704 CSR_WRITE_FLUSH(sc);
6705 delay(10);
6706 CSR_WRITE(sc, WMREG_CTRL, v);
6707 CSR_WRITE_FLUSH(sc);
6708 delay(10);
6709 }
6710 }
6711
6712 static uint32_t
6713 wm_i82543_mii_recvbits(struct wm_softc *sc)
6714 {
6715 uint32_t v, i, data = 0;
6716
6717 v = CSR_READ(sc, WMREG_CTRL);
6718 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6719 v |= CTRL_SWDPIO(3);
6720
6721 CSR_WRITE(sc, WMREG_CTRL, v);
6722 CSR_WRITE_FLUSH(sc);
6723 delay(10);
6724 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6725 CSR_WRITE_FLUSH(sc);
6726 delay(10);
6727 CSR_WRITE(sc, WMREG_CTRL, v);
6728 CSR_WRITE_FLUSH(sc);
6729 delay(10);
6730
6731 for (i = 0; i < 16; i++) {
6732 data <<= 1;
6733 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6734 CSR_WRITE_FLUSH(sc);
6735 delay(10);
6736 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6737 data |= 1;
6738 CSR_WRITE(sc, WMREG_CTRL, v);
6739 CSR_WRITE_FLUSH(sc);
6740 delay(10);
6741 }
6742
6743 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6744 CSR_WRITE_FLUSH(sc);
6745 delay(10);
6746 CSR_WRITE(sc, WMREG_CTRL, v);
6747 CSR_WRITE_FLUSH(sc);
6748 delay(10);
6749
6750 return data;
6751 }
6752
6753 #undef MDI_IO
6754 #undef MDI_DIR
6755 #undef MDI_CLK
6756
6757 /*
6758 * wm_gmii_i82543_readreg: [mii interface function]
6759 *
6760 * Read a PHY register on the GMII (i82543 version).
6761 */
6762 static int
6763 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6764 {
6765 struct wm_softc *sc = device_private(self);
6766 int rv;
6767
6768 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6769 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
6770 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6771 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
6772
6773 DPRINTF(WM_DEBUG_GMII,
6774 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6775 device_xname(sc->sc_dev), phy, reg, rv));
6776
6777 return rv;
6778 }
6779
6780 /*
6781 * wm_gmii_i82543_writereg: [mii interface function]
6782 *
6783 * Write a PHY register on the GMII (i82543 version).
6784 */
6785 static void
6786 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6787 {
6788 struct wm_softc *sc = device_private(self);
6789
6790 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6791 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6792 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6793 (MII_COMMAND_START << 30), 32);
6794 }
6795
6796 /*
6797 * wm_gmii_i82544_readreg: [mii interface function]
6798 *
6799 * Read a PHY register on the GMII.
6800 */
6801 static int
6802 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6803 {
6804 struct wm_softc *sc = device_private(self);
6805 uint32_t mdic = 0;
6806 int i, rv;
6807
6808 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6809 MDIC_REGADD(reg));
6810
6811 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6812 mdic = CSR_READ(sc, WMREG_MDIC);
6813 if (mdic & MDIC_READY)
6814 break;
6815 delay(50);
6816 }
6817
6818 if ((mdic & MDIC_READY) == 0) {
6819 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6820 device_xname(sc->sc_dev), phy, reg);
6821 rv = 0;
6822 } else if (mdic & MDIC_E) {
6823 #if 0 /* This is normal if no PHY is present. */
6824 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6825 device_xname(sc->sc_dev), phy, reg);
6826 #endif
6827 rv = 0;
6828 } else {
6829 rv = MDIC_DATA(mdic);
6830 if (rv == 0xffff)
6831 rv = 0;
6832 }
6833
6834 return rv;
6835 }
6836
6837 /*
6838 * wm_gmii_i82544_writereg: [mii interface function]
6839 *
6840 * Write a PHY register on the GMII.
6841 */
6842 static void
6843 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6844 {
6845 struct wm_softc *sc = device_private(self);
6846 uint32_t mdic = 0;
6847 int i;
6848
6849 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6850 MDIC_REGADD(reg) | MDIC_DATA(val));
6851
6852 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6853 mdic = CSR_READ(sc, WMREG_MDIC);
6854 if (mdic & MDIC_READY)
6855 break;
6856 delay(50);
6857 }
6858
6859 if ((mdic & MDIC_READY) == 0)
6860 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6861 device_xname(sc->sc_dev), phy, reg);
6862 else if (mdic & MDIC_E)
6863 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6864 device_xname(sc->sc_dev), phy, reg);
6865 }
6866
6867 /*
6868 * wm_gmii_i80003_readreg: [mii interface function]
6869 *
6870 * Read a PHY register on the kumeran
6871 * This could be handled by the PHY layer if we didn't have to lock the
6872 * ressource ...
6873 */
6874 static int
6875 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6876 {
6877 struct wm_softc *sc = device_private(self);
6878 int sem;
6879 int rv;
6880
6881 if (phy != 1) /* only one PHY on kumeran bus */
6882 return 0;
6883
6884 sem = swfwphysem[sc->sc_funcid];
6885 if (wm_get_swfw_semaphore(sc, sem)) {
6886 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6887 __func__);
6888 return 0;
6889 }
6890
6891 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6892 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6893 reg >> GG82563_PAGE_SHIFT);
6894 } else {
6895 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6896 reg >> GG82563_PAGE_SHIFT);
6897 }
6898 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6899 delay(200);
6900 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6901 delay(200);
6902
6903 wm_put_swfw_semaphore(sc, sem);
6904 return rv;
6905 }
6906
6907 /*
6908 * wm_gmii_i80003_writereg: [mii interface function]
6909 *
6910 * Write a PHY register on the kumeran.
6911 * This could be handled by the PHY layer if we didn't have to lock the
6912 * ressource ...
6913 */
6914 static void
6915 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6916 {
6917 struct wm_softc *sc = device_private(self);
6918 int sem;
6919
6920 if (phy != 1) /* only one PHY on kumeran bus */
6921 return;
6922
6923 sem = swfwphysem[sc->sc_funcid];
6924 if (wm_get_swfw_semaphore(sc, sem)) {
6925 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6926 __func__);
6927 return;
6928 }
6929
6930 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6931 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6932 reg >> GG82563_PAGE_SHIFT);
6933 } else {
6934 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6935 reg >> GG82563_PAGE_SHIFT);
6936 }
6937 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6938 delay(200);
6939 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6940 delay(200);
6941
6942 wm_put_swfw_semaphore(sc, sem);
6943 }
6944
6945 /*
6946 * wm_gmii_bm_readreg: [mii interface function]
6947 *
6948 * Read a PHY register on the kumeran
6949 * This could be handled by the PHY layer if we didn't have to lock the
6950 * ressource ...
6951 */
6952 static int
6953 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6954 {
6955 struct wm_softc *sc = device_private(self);
6956 int sem;
6957 int rv;
6958
6959 sem = swfwphysem[sc->sc_funcid];
6960 if (wm_get_swfw_semaphore(sc, sem)) {
6961 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6962 __func__);
6963 return 0;
6964 }
6965
6966 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6967 if (phy == 1)
6968 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6969 reg);
6970 else
6971 wm_gmii_i82544_writereg(self, phy,
6972 GG82563_PHY_PAGE_SELECT,
6973 reg >> GG82563_PAGE_SHIFT);
6974 }
6975
6976 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6977 wm_put_swfw_semaphore(sc, sem);
6978 return rv;
6979 }
6980
6981 /*
6982 * wm_gmii_bm_writereg: [mii interface function]
6983 *
6984 * Write a PHY register on the kumeran.
6985 * This could be handled by the PHY layer if we didn't have to lock the
6986 * ressource ...
6987 */
6988 static void
6989 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6990 {
6991 struct wm_softc *sc = device_private(self);
6992 int sem;
6993
6994 sem = swfwphysem[sc->sc_funcid];
6995 if (wm_get_swfw_semaphore(sc, sem)) {
6996 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6997 __func__);
6998 return;
6999 }
7000
7001 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7002 if (phy == 1)
7003 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7004 reg);
7005 else
7006 wm_gmii_i82544_writereg(self, phy,
7007 GG82563_PHY_PAGE_SELECT,
7008 reg >> GG82563_PAGE_SHIFT);
7009 }
7010
7011 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7012 wm_put_swfw_semaphore(sc, sem);
7013 }
7014
7015 static void
7016 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
7017 {
7018 struct wm_softc *sc = device_private(self);
7019 uint16_t regnum = BM_PHY_REG_NUM(offset);
7020 uint16_t wuce;
7021
7022 /* XXX Gig must be disabled for MDIO accesses to page 800 */
7023 if (sc->sc_type == WM_T_PCH) {
7024 /* XXX e1000 driver do nothing... why? */
7025 }
7026
7027 /* Set page 769 */
7028 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7029 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7030
7031 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
7032
7033 wuce &= ~BM_WUC_HOST_WU_BIT;
7034 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
7035 wuce | BM_WUC_ENABLE_BIT);
7036
7037 /* Select page 800 */
7038 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7039 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
7040
7041 /* Write page 800 */
7042 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
7043
7044 if (rd)
7045 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
7046 else
7047 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
7048
7049 /* Set page 769 */
7050 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7051 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7052
7053 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
7054 }
7055
7056 /*
7057 * wm_gmii_hv_readreg: [mii interface function]
7058 *
7059 * Read a PHY register on the kumeran
7060 * This could be handled by the PHY layer if we didn't have to lock the
7061 * ressource ...
7062 */
7063 static int
7064 wm_gmii_hv_readreg(device_t self, int phy, int reg)
7065 {
7066 struct wm_softc *sc = device_private(self);
7067 uint16_t page = BM_PHY_REG_PAGE(reg);
7068 uint16_t regnum = BM_PHY_REG_NUM(reg);
7069 uint16_t val;
7070 int rv;
7071
7072 if (wm_get_swfwhw_semaphore(sc)) {
7073 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7074 __func__);
7075 return 0;
7076 }
7077
7078 /* XXX Workaround failure in MDIO access while cable is disconnected */
7079 if (sc->sc_phytype == WMPHY_82577) {
7080 /* XXX must write */
7081 }
7082
7083 /* Page 800 works differently than the rest so it has its own func */
7084 if (page == BM_WUC_PAGE) {
7085 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
7086 return val;
7087 }
7088
7089 /*
7090 * Lower than page 768 works differently than the rest so it has its
7091 * own func
7092 */
7093 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7094 printf("gmii_hv_readreg!!!\n");
7095 return 0;
7096 }
7097
7098 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7099 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7100 page << BME1000_PAGE_SHIFT);
7101 }
7102
7103 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
7104 wm_put_swfwhw_semaphore(sc);
7105 return rv;
7106 }
7107
7108 /*
7109 * wm_gmii_hv_writereg: [mii interface function]
7110 *
7111 * Write a PHY register on the kumeran.
7112 * This could be handled by the PHY layer if we didn't have to lock the
7113 * ressource ...
7114 */
7115 static void
7116 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
7117 {
7118 struct wm_softc *sc = device_private(self);
7119 uint16_t page = BM_PHY_REG_PAGE(reg);
7120 uint16_t regnum = BM_PHY_REG_NUM(reg);
7121
7122 if (wm_get_swfwhw_semaphore(sc)) {
7123 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7124 __func__);
7125 return;
7126 }
7127
7128 /* XXX Workaround failure in MDIO access while cable is disconnected */
7129
7130 /* Page 800 works differently than the rest so it has its own func */
7131 if (page == BM_WUC_PAGE) {
7132 uint16_t tmp;
7133
7134 tmp = val;
7135 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
7136 return;
7137 }
7138
7139 /*
7140 * Lower than page 768 works differently than the rest so it has its
7141 * own func
7142 */
7143 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7144 printf("gmii_hv_writereg!!!\n");
7145 return;
7146 }
7147
7148 /*
7149 * XXX Workaround MDIO accesses being disabled after entering IEEE
7150 * Power Down (whenever bit 11 of the PHY control register is set)
7151 */
7152
7153 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7154 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7155 page << BME1000_PAGE_SHIFT);
7156 }
7157
7158 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
7159 wm_put_swfwhw_semaphore(sc);
7160 }
7161
7162 /*
7163 * wm_gmii_82580_readreg: [mii interface function]
7164 *
7165 * Read a PHY register on the 82580 and I350.
7166 * This could be handled by the PHY layer if we didn't have to lock the
7167 * ressource ...
7168 */
7169 static int
7170 wm_gmii_82580_readreg(device_t self, int phy, int reg)
7171 {
7172 struct wm_softc *sc = device_private(self);
7173 int sem;
7174 int rv;
7175
7176 sem = swfwphysem[sc->sc_funcid];
7177 if (wm_get_swfw_semaphore(sc, sem)) {
7178 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7179 __func__);
7180 return 0;
7181 }
7182
7183 rv = wm_gmii_i82544_readreg(self, phy, reg);
7184
7185 wm_put_swfw_semaphore(sc, sem);
7186 return rv;
7187 }
7188
7189 /*
7190 * wm_gmii_82580_writereg: [mii interface function]
7191 *
7192 * Write a PHY register on the 82580 and I350.
7193 * This could be handled by the PHY layer if we didn't have to lock the
7194 * ressource ...
7195 */
7196 static void
7197 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
7198 {
7199 struct wm_softc *sc = device_private(self);
7200 int sem;
7201
7202 sem = swfwphysem[sc->sc_funcid];
7203 if (wm_get_swfw_semaphore(sc, sem)) {
7204 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7205 __func__);
7206 return;
7207 }
7208
7209 wm_gmii_i82544_writereg(self, phy, reg, val);
7210
7211 wm_put_swfw_semaphore(sc, sem);
7212 }
7213
7214 /*
7215 * wm_gmii_statchg: [mii interface function]
7216 *
7217 * Callback from MII layer when media changes.
7218 */
7219 static void
7220 wm_gmii_statchg(struct ifnet *ifp)
7221 {
7222 struct wm_softc *sc = ifp->if_softc;
7223 struct mii_data *mii = &sc->sc_mii;
7224
7225 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7226 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7227 sc->sc_fcrtl &= ~FCRTL_XONE;
7228
7229 /*
7230 * Get flow control negotiation result.
7231 */
7232 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7233 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7234 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7235 mii->mii_media_active &= ~IFM_ETH_FMASK;
7236 }
7237
7238 if (sc->sc_flowflags & IFM_FLOW) {
7239 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7240 sc->sc_ctrl |= CTRL_TFCE;
7241 sc->sc_fcrtl |= FCRTL_XONE;
7242 }
7243 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7244 sc->sc_ctrl |= CTRL_RFCE;
7245 }
7246
7247 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7248 DPRINTF(WM_DEBUG_LINK,
7249 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7250 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7251 } else {
7252 DPRINTF(WM_DEBUG_LINK,
7253 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7254 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7255 }
7256
7257 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7258 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7259 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7260 : WMREG_FCRTL, sc->sc_fcrtl);
7261 if (sc->sc_type == WM_T_80003) {
7262 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7263 case IFM_1000_T:
7264 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7265 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7266 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7267 break;
7268 default:
7269 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7270 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7271 sc->sc_tipg = TIPG_10_100_80003_DFLT;
7272 break;
7273 }
7274 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7275 }
7276 }
7277
7278 /*
7279 * wm_kmrn_readreg:
7280 *
7281 * Read a kumeran register
7282 */
7283 static int
7284 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7285 {
7286 int rv;
7287
7288 if (sc->sc_flags == WM_F_LOCK_SWFW) {
7289 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7290 aprint_error_dev(sc->sc_dev,
7291 "%s: failed to get semaphore\n", __func__);
7292 return 0;
7293 }
7294 } else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
7295 if (wm_get_swfwhw_semaphore(sc)) {
7296 aprint_error_dev(sc->sc_dev,
7297 "%s: failed to get semaphore\n", __func__);
7298 return 0;
7299 }
7300 }
7301
7302 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7303 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7304 KUMCTRLSTA_REN);
7305 CSR_WRITE_FLUSH(sc);
7306 delay(2);
7307
7308 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7309
7310 if (sc->sc_flags == WM_F_LOCK_SWFW)
7311 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7312 else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
7313 wm_put_swfwhw_semaphore(sc);
7314
7315 return rv;
7316 }
7317
7318 /*
7319 * wm_kmrn_writereg:
7320 *
7321 * Write a kumeran register
7322 */
7323 static void
7324 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7325 {
7326
7327 if (sc->sc_flags == WM_F_LOCK_SWFW) {
7328 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7329 aprint_error_dev(sc->sc_dev,
7330 "%s: failed to get semaphore\n", __func__);
7331 return;
7332 }
7333 } else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
7334 if (wm_get_swfwhw_semaphore(sc)) {
7335 aprint_error_dev(sc->sc_dev,
7336 "%s: failed to get semaphore\n", __func__);
7337 return;
7338 }
7339 }
7340
7341 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7342 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7343 (val & KUMCTRLSTA_MASK));
7344
7345 if (sc->sc_flags == WM_F_LOCK_SWFW)
7346 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7347 else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
7348 wm_put_swfwhw_semaphore(sc);
7349 }
7350
7351 /* SGMII related */
7352
7353 /*
7354 * wm_sgmii_uses_mdio
7355 *
7356 * Check whether the transaction is to the internal PHY or the external
7357 * MDIO interface. Return true if it's MDIO.
7358 */
7359 static bool
7360 wm_sgmii_uses_mdio(struct wm_softc *sc)
7361 {
7362 uint32_t reg;
7363 bool ismdio = false;
7364
7365 switch (sc->sc_type) {
7366 case WM_T_82575:
7367 case WM_T_82576:
7368 reg = CSR_READ(sc, WMREG_MDIC);
7369 ismdio = ((reg & MDIC_DEST) != 0);
7370 break;
7371 case WM_T_82580:
7372 case WM_T_I350:
7373 case WM_T_I354:
7374 case WM_T_I210:
7375 case WM_T_I211:
7376 reg = CSR_READ(sc, WMREG_MDICNFG);
7377 ismdio = ((reg & MDICNFG_DEST) != 0);
7378 break;
7379 default:
7380 break;
7381 }
7382
7383 return ismdio;
7384 }
7385
7386 /*
7387 * wm_sgmii_readreg: [mii interface function]
7388 *
7389 * Read a PHY register on the SGMII
7390 * This could be handled by the PHY layer if we didn't have to lock the
7391 * ressource ...
7392 */
7393 static int
7394 wm_sgmii_readreg(device_t self, int phy, int reg)
7395 {
7396 struct wm_softc *sc = device_private(self);
7397 uint32_t i2ccmd;
7398 int i, rv;
7399
7400 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7401 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7402 __func__);
7403 return 0;
7404 }
7405
7406 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7407 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7408 | I2CCMD_OPCODE_READ;
7409 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7410
7411 /* Poll the ready bit */
7412 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7413 delay(50);
7414 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7415 if (i2ccmd & I2CCMD_READY)
7416 break;
7417 }
7418 if ((i2ccmd & I2CCMD_READY) == 0)
7419 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7420 if ((i2ccmd & I2CCMD_ERROR) != 0)
7421 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7422
7423 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7424
7425 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7426 return rv;
7427 }
7428
7429 /*
7430 * wm_sgmii_writereg: [mii interface function]
7431 *
7432 * Write a PHY register on the SGMII.
7433 * This could be handled by the PHY layer if we didn't have to lock the
7434 * ressource ...
7435 */
7436 static void
7437 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7438 {
7439 struct wm_softc *sc = device_private(self);
7440 uint32_t i2ccmd;
7441 int i;
7442 int val_swapped;
7443
7444 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7445 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7446 __func__);
7447 return;
7448 }
7449 /* Swap the data bytes for the I2C interface */
7450 val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
7451 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7452 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7453 | I2CCMD_OPCODE_WRITE | val_swapped;
7454 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7455
7456 /* Poll the ready bit */
7457 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7458 delay(50);
7459 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7460 if (i2ccmd & I2CCMD_READY)
7461 break;
7462 }
7463 if ((i2ccmd & I2CCMD_READY) == 0)
7464 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7465 if ((i2ccmd & I2CCMD_ERROR) != 0)
7466 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7467
7468 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7469 }
7470
7471 /* TBI related */
7472
7473 /* XXX Currently TBI only */
7474 static int
7475 wm_check_for_link(struct wm_softc *sc)
7476 {
7477 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7478 uint32_t rxcw;
7479 uint32_t ctrl;
7480 uint32_t status;
7481 uint32_t sig;
7482
7483 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
7484 sc->sc_tbi_linkup = 1;
7485 return 0;
7486 }
7487
7488 rxcw = CSR_READ(sc, WMREG_RXCW);
7489 ctrl = CSR_READ(sc, WMREG_CTRL);
7490 status = CSR_READ(sc, WMREG_STATUS);
7491
7492 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7493
7494 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7495 device_xname(sc->sc_dev), __func__,
7496 ((ctrl & CTRL_SWDPIN(1)) == sig),
7497 ((status & STATUS_LU) != 0),
7498 ((rxcw & RXCW_C) != 0)
7499 ));
7500
7501 /*
7502 * SWDPIN LU RXCW
7503 * 0 0 0
7504 * 0 0 1 (should not happen)
7505 * 0 1 0 (should not happen)
7506 * 0 1 1 (should not happen)
7507 * 1 0 0 Disable autonego and force linkup
7508 * 1 0 1 got /C/ but not linkup yet
7509 * 1 1 0 (linkup)
7510 * 1 1 1 If IFM_AUTO, back to autonego
7511 *
7512 */
7513 if (((ctrl & CTRL_SWDPIN(1)) == sig)
7514 && ((status & STATUS_LU) == 0)
7515 && ((rxcw & RXCW_C) == 0)) {
7516 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7517 __func__));
7518 sc->sc_tbi_linkup = 0;
7519 /* Disable auto-negotiation in the TXCW register */
7520 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7521
7522 /*
7523 * Force link-up and also force full-duplex.
7524 *
7525 * NOTE: CTRL was updated TFCE and RFCE automatically,
7526 * so we should update sc->sc_ctrl
7527 */
7528 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7529 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7530 } else if (((status & STATUS_LU) != 0)
7531 && ((rxcw & RXCW_C) != 0)
7532 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7533 sc->sc_tbi_linkup = 1;
7534 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7535 __func__));
7536 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7537 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7538 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7539 && ((rxcw & RXCW_C) != 0)) {
7540 DPRINTF(WM_DEBUG_LINK, ("/C/"));
7541 } else {
7542 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7543 status));
7544 }
7545
7546 return 0;
7547 }
7548
7549 /*
7550 * wm_tbi_mediainit:
7551 *
7552 * Initialize media for use on 1000BASE-X devices.
7553 */
7554 static void
7555 wm_tbi_mediainit(struct wm_softc *sc)
7556 {
7557 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7558 const char *sep = "";
7559
7560 if (sc->sc_type < WM_T_82543)
7561 sc->sc_tipg = TIPG_WM_DFLT;
7562 else
7563 sc->sc_tipg = TIPG_LG_DFLT;
7564
7565 sc->sc_tbi_anegticks = 5;
7566
7567 /* Initialize our media structures */
7568 sc->sc_mii.mii_ifp = ifp;
7569
7570 sc->sc_ethercom.ec_mii = &sc->sc_mii;
7571 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
7572 wm_tbi_mediastatus);
7573
7574 /*
7575 * SWD Pins:
7576 *
7577 * 0 = Link LED (output)
7578 * 1 = Loss Of Signal (input)
7579 */
7580 sc->sc_ctrl |= CTRL_SWDPIO(0);
7581 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
7582 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7583 sc->sc_ctrl &= ~CTRL_LRST;
7584
7585 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7586
7587 #define ADD(ss, mm, dd) \
7588 do { \
7589 aprint_normal("%s%s", sep, ss); \
7590 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
7591 sep = ", "; \
7592 } while (/*CONSTCOND*/0)
7593
7594 aprint_normal_dev(sc->sc_dev, "");
7595
7596 /* Only 82545 is LX */
7597 if (sc->sc_type == WM_T_82545) {
7598 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
7599 ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
7600 } else {
7601 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
7602 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
7603 }
7604 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
7605 aprint_normal("\n");
7606
7607 #undef ADD
7608
7609 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
7610 }
7611
7612 /*
7613 * wm_tbi_mediastatus: [ifmedia interface function]
7614 *
7615 * Get the current interface media status on a 1000BASE-X device.
7616 */
7617 static void
7618 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
7619 {
7620 struct wm_softc *sc = ifp->if_softc;
7621 uint32_t ctrl, status;
7622
7623 ifmr->ifm_status = IFM_AVALID;
7624 ifmr->ifm_active = IFM_ETHER;
7625
7626 status = CSR_READ(sc, WMREG_STATUS);
7627 if ((status & STATUS_LU) == 0) {
7628 ifmr->ifm_active |= IFM_NONE;
7629 return;
7630 }
7631
7632 ifmr->ifm_status |= IFM_ACTIVE;
7633 /* Only 82545 is LX */
7634 if (sc->sc_type == WM_T_82545)
7635 ifmr->ifm_active |= IFM_1000_LX;
7636 else
7637 ifmr->ifm_active |= IFM_1000_SX;
7638 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
7639 ifmr->ifm_active |= IFM_FDX;
7640 else
7641 ifmr->ifm_active |= IFM_HDX;
7642 ctrl = CSR_READ(sc, WMREG_CTRL);
7643 if (ctrl & CTRL_RFCE)
7644 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
7645 if (ctrl & CTRL_TFCE)
7646 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
7647 }
7648
7649 /*
7650 * wm_tbi_mediachange: [ifmedia interface function]
7651 *
7652 * Set hardware to newly-selected media on a 1000BASE-X device.
7653 */
7654 static int
7655 wm_tbi_mediachange(struct ifnet *ifp)
7656 {
7657 struct wm_softc *sc = ifp->if_softc;
7658 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7659 uint32_t status;
7660 int i;
7661
7662 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7663 return 0;
7664
7665 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
7666 || (sc->sc_type >= WM_T_82575))
7667 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
7668
7669 /* XXX power_up_serdes_link_82575() */
7670
7671 sc->sc_ctrl &= ~CTRL_LRST;
7672 sc->sc_txcw = TXCW_ANE;
7673 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
7674 sc->sc_txcw |= TXCW_FD | TXCW_HD;
7675 else if (ife->ifm_media & IFM_FDX)
7676 sc->sc_txcw |= TXCW_FD;
7677 else
7678 sc->sc_txcw |= TXCW_HD;
7679
7680 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
7681 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
7682
7683 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
7684 device_xname(sc->sc_dev), sc->sc_txcw));
7685 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7686 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7687 CSR_WRITE_FLUSH(sc);
7688 delay(1000);
7689
7690 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
7691 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
7692
7693 /*
7694 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
7695 * optics detect a signal, 0 if they don't.
7696 */
7697 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
7698 /* Have signal; wait for the link to come up. */
7699 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
7700 delay(10000);
7701 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
7702 break;
7703 }
7704
7705 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
7706 device_xname(sc->sc_dev),i));
7707
7708 status = CSR_READ(sc, WMREG_STATUS);
7709 DPRINTF(WM_DEBUG_LINK,
7710 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
7711 device_xname(sc->sc_dev),status, STATUS_LU));
7712 if (status & STATUS_LU) {
7713 /* Link is up. */
7714 DPRINTF(WM_DEBUG_LINK,
7715 ("%s: LINK: set media -> link up %s\n",
7716 device_xname(sc->sc_dev),
7717 (status & STATUS_FD) ? "FDX" : "HDX"));
7718
7719 /*
7720 * NOTE: CTRL will update TFCE and RFCE automatically,
7721 * so we should update sc->sc_ctrl
7722 */
7723 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7724 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7725 sc->sc_fcrtl &= ~FCRTL_XONE;
7726 if (status & STATUS_FD)
7727 sc->sc_tctl |=
7728 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7729 else
7730 sc->sc_tctl |=
7731 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7732 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
7733 sc->sc_fcrtl |= FCRTL_XONE;
7734 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7735 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7736 WMREG_OLD_FCRTL : WMREG_FCRTL,
7737 sc->sc_fcrtl);
7738 sc->sc_tbi_linkup = 1;
7739 } else {
7740 if (i == WM_LINKUP_TIMEOUT)
7741 wm_check_for_link(sc);
7742 /* Link is down. */
7743 DPRINTF(WM_DEBUG_LINK,
7744 ("%s: LINK: set media -> link down\n",
7745 device_xname(sc->sc_dev)));
7746 sc->sc_tbi_linkup = 0;
7747 }
7748 } else {
7749 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
7750 device_xname(sc->sc_dev)));
7751 sc->sc_tbi_linkup = 0;
7752 }
7753
7754 wm_tbi_set_linkled(sc);
7755
7756 return 0;
7757 }
7758
7759 /*
7760 * wm_tbi_set_linkled:
7761 *
7762 * Update the link LED on 1000BASE-X devices.
7763 */
7764 static void
7765 wm_tbi_set_linkled(struct wm_softc *sc)
7766 {
7767
7768 if (sc->sc_tbi_linkup)
7769 sc->sc_ctrl |= CTRL_SWDPIN(0);
7770 else
7771 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7772
7773 /* 82540 or newer devices are active low */
7774 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7775
7776 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7777 }
7778
7779 /*
7780 * wm_tbi_check_link:
7781 *
7782 * Check the link on 1000BASE-X devices.
7783 */
7784 static void
7785 wm_tbi_check_link(struct wm_softc *sc)
7786 {
7787 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7788 uint32_t status;
7789
7790 KASSERT(WM_TX_LOCKED(sc));
7791
7792 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
7793 sc->sc_tbi_linkup = 1;
7794 return;
7795 }
7796
7797 status = CSR_READ(sc, WMREG_STATUS);
7798
7799 /* XXX is this needed? */
7800 (void)CSR_READ(sc, WMREG_RXCW);
7801 (void)CSR_READ(sc, WMREG_CTRL);
7802
7803 /* set link status */
7804 if ((status & STATUS_LU) == 0) {
7805 DPRINTF(WM_DEBUG_LINK,
7806 ("%s: LINK: checklink -> down\n",
7807 device_xname(sc->sc_dev)));
7808 sc->sc_tbi_linkup = 0;
7809 } else if (sc->sc_tbi_linkup == 0) {
7810 DPRINTF(WM_DEBUG_LINK,
7811 ("%s: LINK: checklink -> up %s\n",
7812 device_xname(sc->sc_dev),
7813 (status & STATUS_FD) ? "FDX" : "HDX"));
7814 sc->sc_tbi_linkup = 1;
7815 }
7816
7817 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
7818 && ((status & STATUS_LU) == 0)) {
7819 sc->sc_tbi_linkup = 0;
7820 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7821 /* If the timer expired, retry autonegotiation */
7822 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
7823 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
7824 sc->sc_tbi_ticks = 0;
7825 /*
7826 * Reset the link, and let autonegotiation do
7827 * its thing
7828 */
7829 sc->sc_ctrl |= CTRL_LRST;
7830 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7831 CSR_WRITE_FLUSH(sc);
7832 delay(1000);
7833 sc->sc_ctrl &= ~CTRL_LRST;
7834 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7835 CSR_WRITE_FLUSH(sc);
7836 delay(1000);
7837 CSR_WRITE(sc, WMREG_TXCW,
7838 sc->sc_txcw & ~TXCW_ANE);
7839 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7840 }
7841 }
7842 }
7843
7844 wm_tbi_set_linkled(sc);
7845 }
7846
7847 /* SFP related */
7848
7849 static int
7850 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
7851 {
7852 uint32_t i2ccmd;
7853 int i;
7854
7855 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
7856 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7857
7858 /* Poll the ready bit */
7859 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7860 delay(50);
7861 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7862 if (i2ccmd & I2CCMD_READY)
7863 break;
7864 }
7865 if ((i2ccmd & I2CCMD_READY) == 0)
7866 return -1;
7867 if ((i2ccmd & I2CCMD_ERROR) != 0)
7868 return -1;
7869
7870 *data = i2ccmd & 0x00ff;
7871
7872 return 0;
7873 }
7874
7875 static uint32_t
7876 wm_sfp_get_media_type(struct wm_softc *sc)
7877 {
7878 uint32_t ctrl_ext;
7879 uint8_t val = 0;
7880 int timeout = 3;
7881 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
7882 int rv = -1;
7883
7884 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7885 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
7886 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
7887 CSR_WRITE_FLUSH(sc);
7888
7889 /* Read SFP module data */
7890 while (timeout) {
7891 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
7892 if (rv == 0)
7893 break;
7894 delay(100*1000); /* XXX too big */
7895 timeout--;
7896 }
7897 if (rv != 0)
7898 goto out;
7899 switch (val) {
7900 case SFF_SFP_ID_SFF:
7901 aprint_normal_dev(sc->sc_dev,
7902 "Module/Connector soldered to board\n");
7903 break;
7904 case SFF_SFP_ID_SFP:
7905 aprint_normal_dev(sc->sc_dev, "SFP\n");
7906 break;
7907 case SFF_SFP_ID_UNKNOWN:
7908 goto out;
7909 default:
7910 break;
7911 }
7912
7913 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
7914 if (rv != 0) {
7915 goto out;
7916 }
7917
7918 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
7919 mediatype = WM_MEDIATYPE_SERDES;
7920 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
7921 sc->sc_flags |= WM_F_SGMII;
7922 mediatype = WM_MEDIATYPE_COPPER;
7923 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
7924 sc->sc_flags |= WM_F_SGMII;
7925 mediatype = WM_MEDIATYPE_SERDES;
7926 }
7927
7928 out:
7929 /* Restore I2C interface setting */
7930 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7931
7932 return mediatype;
7933 }
7934 /*
7935 * NVM related.
7936 * Microwire, SPI (w/wo EERD) and Flash.
7937 */
7938
7939 /* Both spi and uwire */
7940
7941 /*
7942 * wm_eeprom_sendbits:
7943 *
7944 * Send a series of bits to the EEPROM.
7945 */
7946 static void
7947 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
7948 {
7949 uint32_t reg;
7950 int x;
7951
7952 reg = CSR_READ(sc, WMREG_EECD);
7953
7954 for (x = nbits; x > 0; x--) {
7955 if (bits & (1U << (x - 1)))
7956 reg |= EECD_DI;
7957 else
7958 reg &= ~EECD_DI;
7959 CSR_WRITE(sc, WMREG_EECD, reg);
7960 CSR_WRITE_FLUSH(sc);
7961 delay(2);
7962 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7963 CSR_WRITE_FLUSH(sc);
7964 delay(2);
7965 CSR_WRITE(sc, WMREG_EECD, reg);
7966 CSR_WRITE_FLUSH(sc);
7967 delay(2);
7968 }
7969 }
7970
7971 /*
7972 * wm_eeprom_recvbits:
7973 *
7974 * Receive a series of bits from the EEPROM.
7975 */
7976 static void
7977 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
7978 {
7979 uint32_t reg, val;
7980 int x;
7981
7982 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
7983
7984 val = 0;
7985 for (x = nbits; x > 0; x--) {
7986 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7987 CSR_WRITE_FLUSH(sc);
7988 delay(2);
7989 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
7990 val |= (1U << (x - 1));
7991 CSR_WRITE(sc, WMREG_EECD, reg);
7992 CSR_WRITE_FLUSH(sc);
7993 delay(2);
7994 }
7995 *valp = val;
7996 }
7997
7998 /* Microwire */
7999
8000 /*
8001 * wm_nvm_read_uwire:
8002 *
8003 * Read a word from the EEPROM using the MicroWire protocol.
8004 */
8005 static int
8006 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8007 {
8008 uint32_t reg, val;
8009 int i;
8010
8011 for (i = 0; i < wordcnt; i++) {
8012 /* Clear SK and DI. */
8013 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
8014 CSR_WRITE(sc, WMREG_EECD, reg);
8015
8016 /*
8017 * XXX: workaround for a bug in qemu-0.12.x and prior
8018 * and Xen.
8019 *
8020 * We use this workaround only for 82540 because qemu's
8021 * e1000 act as 82540.
8022 */
8023 if (sc->sc_type == WM_T_82540) {
8024 reg |= EECD_SK;
8025 CSR_WRITE(sc, WMREG_EECD, reg);
8026 reg &= ~EECD_SK;
8027 CSR_WRITE(sc, WMREG_EECD, reg);
8028 CSR_WRITE_FLUSH(sc);
8029 delay(2);
8030 }
8031 /* XXX: end of workaround */
8032
8033 /* Set CHIP SELECT. */
8034 reg |= EECD_CS;
8035 CSR_WRITE(sc, WMREG_EECD, reg);
8036 CSR_WRITE_FLUSH(sc);
8037 delay(2);
8038
8039 /* Shift in the READ command. */
8040 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
8041
8042 /* Shift in address. */
8043 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
8044
8045 /* Shift out the data. */
8046 wm_eeprom_recvbits(sc, &val, 16);
8047 data[i] = val & 0xffff;
8048
8049 /* Clear CHIP SELECT. */
8050 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
8051 CSR_WRITE(sc, WMREG_EECD, reg);
8052 CSR_WRITE_FLUSH(sc);
8053 delay(2);
8054 }
8055
8056 return 0;
8057 }
8058
8059 /* SPI */
8060
8061 /*
8062 * Set SPI and FLASH related information from the EECD register.
8063 * For 82541 and 82547, the word size is taken from EEPROM.
8064 */
8065 static int
8066 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
8067 {
8068 int size;
8069 uint32_t reg;
8070 uint16_t data;
8071
8072 reg = CSR_READ(sc, WMREG_EECD);
8073 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
8074
8075 /* Read the size of NVM from EECD by default */
8076 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
8077 switch (sc->sc_type) {
8078 case WM_T_82541:
8079 case WM_T_82541_2:
8080 case WM_T_82547:
8081 case WM_T_82547_2:
8082 /* Set dummy value to access EEPROM */
8083 sc->sc_nvm_wordsize = 64;
8084 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
8085 reg = data;
8086 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
8087 if (size == 0)
8088 size = 6; /* 64 word size */
8089 else
8090 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
8091 break;
8092 case WM_T_80003:
8093 case WM_T_82571:
8094 case WM_T_82572:
8095 case WM_T_82573: /* SPI case */
8096 case WM_T_82574: /* SPI case */
8097 case WM_T_82583: /* SPI case */
8098 size += NVM_WORD_SIZE_BASE_SHIFT;
8099 if (size > 14)
8100 size = 14;
8101 break;
8102 case WM_T_82575:
8103 case WM_T_82576:
8104 case WM_T_82580:
8105 case WM_T_I350:
8106 case WM_T_I354:
8107 case WM_T_I210:
8108 case WM_T_I211:
8109 size += NVM_WORD_SIZE_BASE_SHIFT;
8110 if (size > 15)
8111 size = 15;
8112 break;
8113 default:
8114 aprint_error_dev(sc->sc_dev,
8115 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
8116 return -1;
8117 break;
8118 }
8119
8120 sc->sc_nvm_wordsize = 1 << size;
8121
8122 return 0;
8123 }
8124
8125 /*
8126 * wm_nvm_ready_spi:
8127 *
8128 * Wait for a SPI EEPROM to be ready for commands.
8129 */
8130 static int
8131 wm_nvm_ready_spi(struct wm_softc *sc)
8132 {
8133 uint32_t val;
8134 int usec;
8135
8136 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
8137 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
8138 wm_eeprom_recvbits(sc, &val, 8);
8139 if ((val & SPI_SR_RDY) == 0)
8140 break;
8141 }
8142 if (usec >= SPI_MAX_RETRIES) {
8143 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
8144 return 1;
8145 }
8146 return 0;
8147 }
8148
8149 /*
8150 * wm_nvm_read_spi:
8151 *
8152 * Read a work from the EEPROM using the SPI protocol.
8153 */
8154 static int
8155 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8156 {
8157 uint32_t reg, val;
8158 int i;
8159 uint8_t opc;
8160
8161 /* Clear SK and CS. */
8162 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
8163 CSR_WRITE(sc, WMREG_EECD, reg);
8164 CSR_WRITE_FLUSH(sc);
8165 delay(2);
8166
8167 if (wm_nvm_ready_spi(sc))
8168 return 1;
8169
8170 /* Toggle CS to flush commands. */
8171 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
8172 CSR_WRITE_FLUSH(sc);
8173 delay(2);
8174 CSR_WRITE(sc, WMREG_EECD, reg);
8175 CSR_WRITE_FLUSH(sc);
8176 delay(2);
8177
8178 opc = SPI_OPC_READ;
8179 if (sc->sc_nvm_addrbits == 8 && word >= 128)
8180 opc |= SPI_OPC_A8;
8181
8182 wm_eeprom_sendbits(sc, opc, 8);
8183 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
8184
8185 for (i = 0; i < wordcnt; i++) {
8186 wm_eeprom_recvbits(sc, &val, 16);
8187 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
8188 }
8189
8190 /* Raise CS and clear SK. */
8191 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
8192 CSR_WRITE(sc, WMREG_EECD, reg);
8193 CSR_WRITE_FLUSH(sc);
8194 delay(2);
8195
8196 return 0;
8197 }
8198
8199 /* Using with EERD */
8200
8201 static int
8202 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
8203 {
8204 uint32_t attempts = 100000;
8205 uint32_t i, reg = 0;
8206 int32_t done = -1;
8207
8208 for (i = 0; i < attempts; i++) {
8209 reg = CSR_READ(sc, rw);
8210
8211 if (reg & EERD_DONE) {
8212 done = 0;
8213 break;
8214 }
8215 delay(5);
8216 }
8217
8218 return done;
8219 }
8220
8221 static int
8222 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
8223 uint16_t *data)
8224 {
8225 int i, eerd = 0;
8226 int error = 0;
8227
8228 for (i = 0; i < wordcnt; i++) {
8229 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
8230
8231 CSR_WRITE(sc, WMREG_EERD, eerd);
8232 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
8233 if (error != 0)
8234 break;
8235
8236 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
8237 }
8238
8239 return error;
8240 }
8241
8242 /* Flash */
8243
8244 static int
8245 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
8246 {
8247 uint32_t eecd;
8248 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
8249 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
8250 uint8_t sig_byte = 0;
8251
8252 switch (sc->sc_type) {
8253 case WM_T_ICH8:
8254 case WM_T_ICH9:
8255 eecd = CSR_READ(sc, WMREG_EECD);
8256 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
8257 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
8258 return 0;
8259 }
8260 /* FALLTHROUGH */
8261 default:
8262 /* Default to 0 */
8263 *bank = 0;
8264
8265 /* Check bank 0 */
8266 wm_read_ich8_byte(sc, act_offset, &sig_byte);
8267 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
8268 *bank = 0;
8269 return 0;
8270 }
8271
8272 /* Check bank 1 */
8273 wm_read_ich8_byte(sc, act_offset + bank1_offset,
8274 &sig_byte);
8275 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
8276 *bank = 1;
8277 return 0;
8278 }
8279 }
8280
8281 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
8282 device_xname(sc->sc_dev)));
8283 return -1;
8284 }
8285
8286 /******************************************************************************
8287 * This function does initial flash setup so that a new read/write/erase cycle
8288 * can be started.
8289 *
8290 * sc - The pointer to the hw structure
8291 ****************************************************************************/
8292 static int32_t
8293 wm_ich8_cycle_init(struct wm_softc *sc)
8294 {
8295 uint16_t hsfsts;
8296 int32_t error = 1;
8297 int32_t i = 0;
8298
8299 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8300
8301 /* May be check the Flash Des Valid bit in Hw status */
8302 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
8303 return error;
8304 }
8305
8306 /* Clear FCERR in Hw status by writing 1 */
8307 /* Clear DAEL in Hw status by writing a 1 */
8308 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
8309
8310 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8311
8312 /*
8313 * Either we should have a hardware SPI cycle in progress bit to check
8314 * against, in order to start a new cycle or FDONE bit should be
8315 * changed in the hardware so that it is 1 after harware reset, which
8316 * can then be used as an indication whether a cycle is in progress or
8317 * has been completed .. we should also have some software semaphore
8318 * mechanism to guard FDONE or the cycle in progress bit so that two
8319 * threads access to those bits can be sequentiallized or a way so that
8320 * 2 threads dont start the cycle at the same time
8321 */
8322
8323 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8324 /*
8325 * There is no cycle running at present, so we can start a
8326 * cycle
8327 */
8328
8329 /* Begin by setting Flash Cycle Done. */
8330 hsfsts |= HSFSTS_DONE;
8331 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8332 error = 0;
8333 } else {
8334 /*
8335 * otherwise poll for sometime so the current cycle has a
8336 * chance to end before giving up.
8337 */
8338 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
8339 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8340 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8341 error = 0;
8342 break;
8343 }
8344 delay(1);
8345 }
8346 if (error == 0) {
8347 /*
8348 * Successful in waiting for previous cycle to timeout,
8349 * now set the Flash Cycle Done.
8350 */
8351 hsfsts |= HSFSTS_DONE;
8352 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8353 }
8354 }
8355 return error;
8356 }
8357
8358 /******************************************************************************
8359 * This function starts a flash cycle and waits for its completion
8360 *
8361 * sc - The pointer to the hw structure
8362 ****************************************************************************/
8363 static int32_t
8364 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
8365 {
8366 uint16_t hsflctl;
8367 uint16_t hsfsts;
8368 int32_t error = 1;
8369 uint32_t i = 0;
8370
8371 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
8372 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8373 hsflctl |= HSFCTL_GO;
8374 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8375
8376 /* Wait till FDONE bit is set to 1 */
8377 do {
8378 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8379 if (hsfsts & HSFSTS_DONE)
8380 break;
8381 delay(1);
8382 i++;
8383 } while (i < timeout);
8384 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
8385 error = 0;
8386
8387 return error;
8388 }
8389
8390 /******************************************************************************
8391 * Reads a byte or word from the NVM using the ICH8 flash access registers.
8392 *
8393 * sc - The pointer to the hw structure
8394 * index - The index of the byte or word to read.
8395 * size - Size of data to read, 1=byte 2=word
8396 * data - Pointer to the word to store the value read.
8397 *****************************************************************************/
8398 static int32_t
8399 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
8400 uint32_t size, uint16_t *data)
8401 {
8402 uint16_t hsfsts;
8403 uint16_t hsflctl;
8404 uint32_t flash_linear_address;
8405 uint32_t flash_data = 0;
8406 int32_t error = 1;
8407 int32_t count = 0;
8408
8409 if (size < 1 || size > 2 || data == 0x0 ||
8410 index > ICH_FLASH_LINEAR_ADDR_MASK)
8411 return error;
8412
8413 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
8414 sc->sc_ich8_flash_base;
8415
8416 do {
8417 delay(1);
8418 /* Steps */
8419 error = wm_ich8_cycle_init(sc);
8420 if (error)
8421 break;
8422
8423 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8424 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
8425 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
8426 & HSFCTL_BCOUNT_MASK;
8427 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
8428 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8429
8430 /*
8431 * Write the last 24 bits of index into Flash Linear address
8432 * field in Flash Address
8433 */
8434 /* TODO: TBD maybe check the index against the size of flash */
8435
8436 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
8437
8438 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
8439
8440 /*
8441 * Check if FCERR is set to 1, if set to 1, clear it and try
8442 * the whole sequence a few more times, else read in (shift in)
8443 * the Flash Data0, the order is least significant byte first
8444 * msb to lsb
8445 */
8446 if (error == 0) {
8447 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
8448 if (size == 1)
8449 *data = (uint8_t)(flash_data & 0x000000FF);
8450 else if (size == 2)
8451 *data = (uint16_t)(flash_data & 0x0000FFFF);
8452 break;
8453 } else {
8454 /*
8455 * If we've gotten here, then things are probably
8456 * completely hosed, but if the error condition is
8457 * detected, it won't hurt to give it another try...
8458 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
8459 */
8460 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8461 if (hsfsts & HSFSTS_ERR) {
8462 /* Repeat for some time before giving up. */
8463 continue;
8464 } else if ((hsfsts & HSFSTS_DONE) == 0)
8465 break;
8466 }
8467 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
8468
8469 return error;
8470 }
8471
8472 /******************************************************************************
8473 * Reads a single byte from the NVM using the ICH8 flash access registers.
8474 *
8475 * sc - pointer to wm_hw structure
8476 * index - The index of the byte to read.
8477 * data - Pointer to a byte to store the value read.
8478 *****************************************************************************/
8479 static int32_t
8480 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
8481 {
8482 int32_t status;
8483 uint16_t word = 0;
8484
8485 status = wm_read_ich8_data(sc, index, 1, &word);
8486 if (status == 0)
8487 *data = (uint8_t)word;
8488 else
8489 *data = 0;
8490
8491 return status;
8492 }
8493
8494 /******************************************************************************
8495 * Reads a word from the NVM using the ICH8 flash access registers.
8496 *
8497 * sc - pointer to wm_hw structure
8498 * index - The starting byte index of the word to read.
8499 * data - Pointer to a word to store the value read.
8500 *****************************************************************************/
8501 static int32_t
8502 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
8503 {
8504 int32_t status;
8505
8506 status = wm_read_ich8_data(sc, index, 2, data);
8507 return status;
8508 }
8509
8510 /******************************************************************************
8511 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
8512 * register.
8513 *
8514 * sc - Struct containing variables accessed by shared code
8515 * offset - offset of word in the EEPROM to read
8516 * data - word read from the EEPROM
8517 * words - number of words to read
8518 *****************************************************************************/
8519 static int
8520 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
8521 {
8522 int32_t error = 0;
8523 uint32_t flash_bank = 0;
8524 uint32_t act_offset = 0;
8525 uint32_t bank_offset = 0;
8526 uint16_t word = 0;
8527 uint16_t i = 0;
8528
8529 /*
8530 * We need to know which is the valid flash bank. In the event
8531 * that we didn't allocate eeprom_shadow_ram, we may not be
8532 * managing flash_bank. So it cannot be trusted and needs
8533 * to be updated with each read.
8534 */
8535 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
8536 if (error) {
8537 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
8538 device_xname(sc->sc_dev)));
8539 flash_bank = 0;
8540 }
8541
8542 /*
8543 * Adjust offset appropriately if we're on bank 1 - adjust for word
8544 * size
8545 */
8546 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
8547
8548 error = wm_get_swfwhw_semaphore(sc);
8549 if (error) {
8550 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8551 __func__);
8552 return error;
8553 }
8554
8555 for (i = 0; i < words; i++) {
8556 /* The NVM part needs a byte offset, hence * 2 */
8557 act_offset = bank_offset + ((offset + i) * 2);
8558 error = wm_read_ich8_word(sc, act_offset, &word);
8559 if (error) {
8560 aprint_error_dev(sc->sc_dev,
8561 "%s: failed to read NVM\n", __func__);
8562 break;
8563 }
8564 data[i] = word;
8565 }
8566
8567 wm_put_swfwhw_semaphore(sc);
8568 return error;
8569 }
8570
8571 /* Lock, detecting NVM type, validate checksum and read */
8572
8573 /*
8574 * wm_nvm_acquire:
8575 *
8576 * Perform the EEPROM handshake required on some chips.
8577 */
8578 static int
8579 wm_nvm_acquire(struct wm_softc *sc)
8580 {
8581 uint32_t reg;
8582 int x;
8583 int ret = 0;
8584
8585 /* always success */
8586 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8587 return 0;
8588
8589 if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8590 ret = wm_get_swfwhw_semaphore(sc);
8591 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
8592 /* This will also do wm_get_swsm_semaphore() if needed */
8593 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
8594 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
8595 ret = wm_get_swsm_semaphore(sc);
8596 }
8597
8598 if (ret) {
8599 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8600 __func__);
8601 return 1;
8602 }
8603
8604 if (sc->sc_flags & WM_F_LOCK_EECD) {
8605 reg = CSR_READ(sc, WMREG_EECD);
8606
8607 /* Request EEPROM access. */
8608 reg |= EECD_EE_REQ;
8609 CSR_WRITE(sc, WMREG_EECD, reg);
8610
8611 /* ..and wait for it to be granted. */
8612 for (x = 0; x < 1000; x++) {
8613 reg = CSR_READ(sc, WMREG_EECD);
8614 if (reg & EECD_EE_GNT)
8615 break;
8616 delay(5);
8617 }
8618 if ((reg & EECD_EE_GNT) == 0) {
8619 aprint_error_dev(sc->sc_dev,
8620 "could not acquire EEPROM GNT\n");
8621 reg &= ~EECD_EE_REQ;
8622 CSR_WRITE(sc, WMREG_EECD, reg);
8623 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8624 wm_put_swfwhw_semaphore(sc);
8625 if (sc->sc_flags & WM_F_LOCK_SWFW)
8626 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8627 else if (sc->sc_flags & WM_F_LOCK_SWSM)
8628 wm_put_swsm_semaphore(sc);
8629 return 1;
8630 }
8631 }
8632
8633 return 0;
8634 }
8635
8636 /*
8637 * wm_nvm_release:
8638 *
8639 * Release the EEPROM mutex.
8640 */
8641 static void
8642 wm_nvm_release(struct wm_softc *sc)
8643 {
8644 uint32_t reg;
8645
8646 /* always success */
8647 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8648 return;
8649
8650 if (sc->sc_flags & WM_F_LOCK_EECD) {
8651 reg = CSR_READ(sc, WMREG_EECD);
8652 reg &= ~EECD_EE_REQ;
8653 CSR_WRITE(sc, WMREG_EECD, reg);
8654 }
8655
8656 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8657 wm_put_swfwhw_semaphore(sc);
8658 if (sc->sc_flags & WM_F_LOCK_SWFW)
8659 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8660 else if (sc->sc_flags & WM_F_LOCK_SWSM)
8661 wm_put_swsm_semaphore(sc);
8662 }
8663
8664 static int
8665 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
8666 {
8667 uint32_t eecd = 0;
8668
8669 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
8670 || sc->sc_type == WM_T_82583) {
8671 eecd = CSR_READ(sc, WMREG_EECD);
8672
8673 /* Isolate bits 15 & 16 */
8674 eecd = ((eecd >> 15) & 0x03);
8675
8676 /* If both bits are set, device is Flash type */
8677 if (eecd == 0x03)
8678 return 0;
8679 }
8680 return 1;
8681 }
8682
8683 /*
8684 * wm_nvm_validate_checksum
8685 *
8686 * The checksum is defined as the sum of the first 64 (16 bit) words.
8687 */
8688 static int
8689 wm_nvm_validate_checksum(struct wm_softc *sc)
8690 {
8691 uint16_t checksum;
8692 uint16_t eeprom_data;
8693 #ifdef WM_DEBUG
8694 uint16_t csum_wordaddr, valid_checksum;
8695 #endif
8696 int i;
8697
8698 checksum = 0;
8699
8700 /* Don't check for I211 */
8701 if (sc->sc_type == WM_T_I211)
8702 return 0;
8703
8704 #ifdef WM_DEBUG
8705 if (sc->sc_type == WM_T_PCH_LPT) {
8706 csum_wordaddr = NVM_OFF_COMPAT;
8707 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
8708 } else {
8709 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
8710 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
8711 }
8712
8713 /* Dump EEPROM image for debug */
8714 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8715 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8716 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
8717 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
8718 if ((eeprom_data & valid_checksum) == 0) {
8719 DPRINTF(WM_DEBUG_NVM,
8720 ("%s: NVM need to be updated (%04x != %04x)\n",
8721 device_xname(sc->sc_dev), eeprom_data,
8722 valid_checksum));
8723 }
8724 }
8725
8726 if ((wm_debug & WM_DEBUG_NVM) != 0) {
8727 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
8728 for (i = 0; i < NVM_SIZE; i++) {
8729 if (wm_nvm_read(sc, i, 1, &eeprom_data))
8730 printf("XXXX ");
8731 else
8732 printf("%04hx ", eeprom_data);
8733 if (i % 8 == 7)
8734 printf("\n");
8735 }
8736 }
8737
8738 #endif /* WM_DEBUG */
8739
8740 for (i = 0; i < NVM_SIZE; i++) {
8741 if (wm_nvm_read(sc, i, 1, &eeprom_data))
8742 return 1;
8743 checksum += eeprom_data;
8744 }
8745
8746 if (checksum != (uint16_t) NVM_CHECKSUM) {
8747 #ifdef WM_DEBUG
8748 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
8749 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
8750 #endif
8751 }
8752
8753 return 0;
8754 }
8755
8756 /*
8757 * wm_nvm_read:
8758 *
8759 * Read data from the serial EEPROM.
8760 */
8761 static int
8762 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8763 {
8764 int rv;
8765
8766 if (sc->sc_flags & WM_F_EEPROM_INVALID)
8767 return 1;
8768
8769 if (wm_nvm_acquire(sc))
8770 return 1;
8771
8772 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8773 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8774 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
8775 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
8776 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
8777 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
8778 else if (sc->sc_flags & WM_F_EEPROM_SPI)
8779 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
8780 else
8781 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
8782
8783 wm_nvm_release(sc);
8784 return rv;
8785 }
8786
8787 /*
8788 * Hardware semaphores.
8789 * Very complexed...
8790 */
8791
8792 static int
8793 wm_get_swsm_semaphore(struct wm_softc *sc)
8794 {
8795 int32_t timeout;
8796 uint32_t swsm;
8797
8798 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8799 /* Get the SW semaphore. */
8800 timeout = sc->sc_nvm_wordsize + 1;
8801 while (timeout) {
8802 swsm = CSR_READ(sc, WMREG_SWSM);
8803
8804 if ((swsm & SWSM_SMBI) == 0)
8805 break;
8806
8807 delay(50);
8808 timeout--;
8809 }
8810
8811 if (timeout == 0) {
8812 aprint_error_dev(sc->sc_dev,
8813 "could not acquire SWSM SMBI\n");
8814 return 1;
8815 }
8816 }
8817
8818 /* Get the FW semaphore. */
8819 timeout = sc->sc_nvm_wordsize + 1;
8820 while (timeout) {
8821 swsm = CSR_READ(sc, WMREG_SWSM);
8822 swsm |= SWSM_SWESMBI;
8823 CSR_WRITE(sc, WMREG_SWSM, swsm);
8824 /* If we managed to set the bit we got the semaphore. */
8825 swsm = CSR_READ(sc, WMREG_SWSM);
8826 if (swsm & SWSM_SWESMBI)
8827 break;
8828
8829 delay(50);
8830 timeout--;
8831 }
8832
8833 if (timeout == 0) {
8834 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
8835 /* Release semaphores */
8836 wm_put_swsm_semaphore(sc);
8837 return 1;
8838 }
8839 return 0;
8840 }
8841
8842 static void
8843 wm_put_swsm_semaphore(struct wm_softc *sc)
8844 {
8845 uint32_t swsm;
8846
8847 swsm = CSR_READ(sc, WMREG_SWSM);
8848 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
8849 CSR_WRITE(sc, WMREG_SWSM, swsm);
8850 }
8851
8852 static int
8853 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8854 {
8855 uint32_t swfw_sync;
8856 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
8857 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
8858 int timeout = 200;
8859
8860 for (timeout = 0; timeout < 200; timeout++) {
8861 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8862 if (wm_get_swsm_semaphore(sc)) {
8863 aprint_error_dev(sc->sc_dev,
8864 "%s: failed to get semaphore\n",
8865 __func__);
8866 return 1;
8867 }
8868 }
8869 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8870 if ((swfw_sync & (swmask | fwmask)) == 0) {
8871 swfw_sync |= swmask;
8872 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8873 if (sc->sc_flags & WM_F_LOCK_SWSM)
8874 wm_put_swsm_semaphore(sc);
8875 return 0;
8876 }
8877 if (sc->sc_flags & WM_F_LOCK_SWSM)
8878 wm_put_swsm_semaphore(sc);
8879 delay(5000);
8880 }
8881 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
8882 device_xname(sc->sc_dev), mask, swfw_sync);
8883 return 1;
8884 }
8885
8886 static void
8887 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8888 {
8889 uint32_t swfw_sync;
8890
8891 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8892 while (wm_get_swsm_semaphore(sc) != 0)
8893 continue;
8894 }
8895 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8896 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
8897 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8898 if (sc->sc_flags & WM_F_LOCK_SWSM)
8899 wm_put_swsm_semaphore(sc);
8900 }
8901
8902 static int
8903 wm_get_swfwhw_semaphore(struct wm_softc *sc)
8904 {
8905 uint32_t ext_ctrl;
8906 int timeout = 200;
8907
8908 for (timeout = 0; timeout < 200; timeout++) {
8909 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8910 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
8911 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8912
8913 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8914 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
8915 return 0;
8916 delay(5000);
8917 }
8918 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
8919 device_xname(sc->sc_dev), ext_ctrl);
8920 return 1;
8921 }
8922
8923 static void
8924 wm_put_swfwhw_semaphore(struct wm_softc *sc)
8925 {
8926 uint32_t ext_ctrl;
8927 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8928 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
8929 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8930 }
8931
8932 static int
8933 wm_get_hw_semaphore_82573(struct wm_softc *sc)
8934 {
8935 int i = 0;
8936 uint32_t reg;
8937
8938 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8939 do {
8940 CSR_WRITE(sc, WMREG_EXTCNFCTR,
8941 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
8942 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8943 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
8944 break;
8945 delay(2*1000);
8946 i++;
8947 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
8948
8949 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
8950 wm_put_hw_semaphore_82573(sc);
8951 log(LOG_ERR, "%s: Driver can't access the PHY\n",
8952 device_xname(sc->sc_dev));
8953 return -1;
8954 }
8955
8956 return 0;
8957 }
8958
8959 static void
8960 wm_put_hw_semaphore_82573(struct wm_softc *sc)
8961 {
8962 uint32_t reg;
8963
8964 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8965 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
8966 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
8967 }
8968
8969 /*
8970 * Management mode and power management related subroutines.
8971 * BMC, AMT, suspend/resume and EEE.
8972 */
8973
8974 static int
8975 wm_check_mng_mode(struct wm_softc *sc)
8976 {
8977 int rv;
8978
8979 switch (sc->sc_type) {
8980 case WM_T_ICH8:
8981 case WM_T_ICH9:
8982 case WM_T_ICH10:
8983 case WM_T_PCH:
8984 case WM_T_PCH2:
8985 case WM_T_PCH_LPT:
8986 rv = wm_check_mng_mode_ich8lan(sc);
8987 break;
8988 case WM_T_82574:
8989 case WM_T_82583:
8990 rv = wm_check_mng_mode_82574(sc);
8991 break;
8992 case WM_T_82571:
8993 case WM_T_82572:
8994 case WM_T_82573:
8995 case WM_T_80003:
8996 rv = wm_check_mng_mode_generic(sc);
8997 break;
8998 default:
8999 /* noting to do */
9000 rv = 0;
9001 break;
9002 }
9003
9004 return rv;
9005 }
9006
9007 static int
9008 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
9009 {
9010 uint32_t fwsm;
9011
9012 fwsm = CSR_READ(sc, WMREG_FWSM);
9013
9014 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
9015 return 1;
9016
9017 return 0;
9018 }
9019
9020 static int
9021 wm_check_mng_mode_82574(struct wm_softc *sc)
9022 {
9023 uint16_t data;
9024
9025 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
9026
9027 if ((data & NVM_CFG2_MNGM_MASK) != 0)
9028 return 1;
9029
9030 return 0;
9031 }
9032
9033 static int
9034 wm_check_mng_mode_generic(struct wm_softc *sc)
9035 {
9036 uint32_t fwsm;
9037
9038 fwsm = CSR_READ(sc, WMREG_FWSM);
9039
9040 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
9041 return 1;
9042
9043 return 0;
9044 }
9045
9046 static int
9047 wm_enable_mng_pass_thru(struct wm_softc *sc)
9048 {
9049 uint32_t manc, fwsm, factps;
9050
9051 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
9052 return 0;
9053
9054 manc = CSR_READ(sc, WMREG_MANC);
9055
9056 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
9057 device_xname(sc->sc_dev), manc));
9058 if ((manc & MANC_RECV_TCO_EN) == 0)
9059 return 0;
9060
9061 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
9062 fwsm = CSR_READ(sc, WMREG_FWSM);
9063 factps = CSR_READ(sc, WMREG_FACTPS);
9064 if (((factps & FACTPS_MNGCG) == 0)
9065 && ((fwsm & FWSM_MODE_MASK)
9066 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
9067 return 1;
9068 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
9069 uint16_t data;
9070
9071 factps = CSR_READ(sc, WMREG_FACTPS);
9072 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
9073 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
9074 device_xname(sc->sc_dev), factps, data));
9075 if (((factps & FACTPS_MNGCG) == 0)
9076 && ((data & NVM_CFG2_MNGM_MASK)
9077 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
9078 return 1;
9079 } else if (((manc & MANC_SMBUS_EN) != 0)
9080 && ((manc & MANC_ASF_EN) == 0))
9081 return 1;
9082
9083 return 0;
9084 }
9085
9086 static int
9087 wm_check_reset_block(struct wm_softc *sc)
9088 {
9089 uint32_t reg;
9090
9091 switch (sc->sc_type) {
9092 case WM_T_ICH8:
9093 case WM_T_ICH9:
9094 case WM_T_ICH10:
9095 case WM_T_PCH:
9096 case WM_T_PCH2:
9097 case WM_T_PCH_LPT:
9098 reg = CSR_READ(sc, WMREG_FWSM);
9099 if ((reg & FWSM_RSPCIPHY) != 0)
9100 return 0;
9101 else
9102 return -1;
9103 break;
9104 case WM_T_82571:
9105 case WM_T_82572:
9106 case WM_T_82573:
9107 case WM_T_82574:
9108 case WM_T_82583:
9109 case WM_T_80003:
9110 reg = CSR_READ(sc, WMREG_MANC);
9111 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
9112 return -1;
9113 else
9114 return 0;
9115 break;
9116 default:
9117 /* no problem */
9118 break;
9119 }
9120
9121 return 0;
9122 }
9123
9124 static void
9125 wm_get_hw_control(struct wm_softc *sc)
9126 {
9127 uint32_t reg;
9128
9129 switch (sc->sc_type) {
9130 case WM_T_82573:
9131 reg = CSR_READ(sc, WMREG_SWSM);
9132 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
9133 break;
9134 case WM_T_82571:
9135 case WM_T_82572:
9136 case WM_T_82574:
9137 case WM_T_82583:
9138 case WM_T_80003:
9139 case WM_T_ICH8:
9140 case WM_T_ICH9:
9141 case WM_T_ICH10:
9142 case WM_T_PCH:
9143 case WM_T_PCH2:
9144 case WM_T_PCH_LPT:
9145 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9146 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
9147 break;
9148 default:
9149 break;
9150 }
9151 }
9152
9153 static void
9154 wm_release_hw_control(struct wm_softc *sc)
9155 {
9156 uint32_t reg;
9157
9158 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
9159 return;
9160
9161 if (sc->sc_type == WM_T_82573) {
9162 reg = CSR_READ(sc, WMREG_SWSM);
9163 reg &= ~SWSM_DRV_LOAD;
9164 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
9165 } else {
9166 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9167 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
9168 }
9169 }
9170
9171 static void
9172 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
9173 {
9174 uint32_t reg;
9175
9176 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
9177
9178 if (on != 0)
9179 reg |= EXTCNFCTR_GATE_PHY_CFG;
9180 else
9181 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
9182
9183 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
9184 }
9185
9186 static void
9187 wm_smbustopci(struct wm_softc *sc)
9188 {
9189 uint32_t fwsm;
9190
9191 fwsm = CSR_READ(sc, WMREG_FWSM);
9192 if (((fwsm & FWSM_FW_VALID) == 0)
9193 && ((wm_check_reset_block(sc) == 0))) {
9194 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
9195 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
9196 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9197 CSR_WRITE_FLUSH(sc);
9198 delay(10);
9199 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
9200 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9201 CSR_WRITE_FLUSH(sc);
9202 delay(50*1000);
9203
9204 /*
9205 * Gate automatic PHY configuration by hardware on non-managed
9206 * 82579
9207 */
9208 if (sc->sc_type == WM_T_PCH2)
9209 wm_gate_hw_phy_config_ich8lan(sc, 1);
9210 }
9211 }
9212
9213 static void
9214 wm_init_manageability(struct wm_softc *sc)
9215 {
9216
9217 if (sc->sc_flags & WM_F_HAS_MANAGE) {
9218 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
9219 uint32_t manc = CSR_READ(sc, WMREG_MANC);
9220
9221 /* Disable hardware interception of ARP */
9222 manc &= ~MANC_ARP_EN;
9223
9224 /* Enable receiving management packets to the host */
9225 if (sc->sc_type >= WM_T_82571) {
9226 manc |= MANC_EN_MNG2HOST;
9227 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
9228 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
9229
9230 }
9231
9232 CSR_WRITE(sc, WMREG_MANC, manc);
9233 }
9234 }
9235
9236 static void
9237 wm_release_manageability(struct wm_softc *sc)
9238 {
9239
9240 if (sc->sc_flags & WM_F_HAS_MANAGE) {
9241 uint32_t manc = CSR_READ(sc, WMREG_MANC);
9242
9243 manc |= MANC_ARP_EN;
9244 if (sc->sc_type >= WM_T_82571)
9245 manc &= ~MANC_EN_MNG2HOST;
9246
9247 CSR_WRITE(sc, WMREG_MANC, manc);
9248 }
9249 }
9250
9251 static void
9252 wm_get_wakeup(struct wm_softc *sc)
9253 {
9254
9255 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
9256 switch (sc->sc_type) {
9257 case WM_T_82573:
9258 case WM_T_82583:
9259 sc->sc_flags |= WM_F_HAS_AMT;
9260 /* FALLTHROUGH */
9261 case WM_T_80003:
9262 case WM_T_82541:
9263 case WM_T_82547:
9264 case WM_T_82571:
9265 case WM_T_82572:
9266 case WM_T_82574:
9267 case WM_T_82575:
9268 case WM_T_82576:
9269 case WM_T_82580:
9270 case WM_T_I350:
9271 case WM_T_I354:
9272 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
9273 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
9274 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
9275 break;
9276 case WM_T_ICH8:
9277 case WM_T_ICH9:
9278 case WM_T_ICH10:
9279 case WM_T_PCH:
9280 case WM_T_PCH2:
9281 case WM_T_PCH_LPT:
9282 sc->sc_flags |= WM_F_HAS_AMT;
9283 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
9284 break;
9285 default:
9286 break;
9287 }
9288
9289 /* 1: HAS_MANAGE */
9290 if (wm_enable_mng_pass_thru(sc) != 0)
9291 sc->sc_flags |= WM_F_HAS_MANAGE;
9292
9293 #ifdef WM_DEBUG
9294 printf("\n");
9295 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
9296 printf("HAS_AMT,");
9297 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
9298 printf("ARC_SUBSYS_VALID,");
9299 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
9300 printf("ASF_FIRMWARE_PRES,");
9301 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
9302 printf("HAS_MANAGE,");
9303 printf("\n");
9304 #endif
9305 /*
9306 * Note that the WOL flags is set after the resetting of the eeprom
9307 * stuff
9308 */
9309 }
9310
9311 #ifdef WM_WOL
9312 /* WOL in the newer chipset interfaces (pchlan) */
9313 static void
9314 wm_enable_phy_wakeup(struct wm_softc *sc)
9315 {
9316 #if 0
9317 uint16_t preg;
9318
9319 /* Copy MAC RARs to PHY RARs */
9320
9321 /* Copy MAC MTA to PHY MTA */
9322
9323 /* Configure PHY Rx Control register */
9324
9325 /* Enable PHY wakeup in MAC register */
9326
9327 /* Configure and enable PHY wakeup in PHY registers */
9328
9329 /* Activate PHY wakeup */
9330
9331 /* XXX */
9332 #endif
9333 }
9334
9335 /* Power down workaround on D3 */
9336 static void
9337 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
9338 {
9339 uint32_t reg;
9340 int i;
9341
9342 for (i = 0; i < 2; i++) {
9343 /* Disable link */
9344 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9345 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9346 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9347
9348 /*
9349 * Call gig speed drop workaround on Gig disable before
9350 * accessing any PHY registers
9351 */
9352 if (sc->sc_type == WM_T_ICH8)
9353 wm_gig_downshift_workaround_ich8lan(sc);
9354
9355 /* Write VR power-down enable */
9356 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
9357 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
9358 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
9359 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
9360
9361 /* Read it back and test */
9362 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
9363 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
9364 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
9365 break;
9366
9367 /* Issue PHY reset and repeat at most one more time */
9368 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
9369 }
9370 }
9371
9372 static void
9373 wm_enable_wakeup(struct wm_softc *sc)
9374 {
9375 uint32_t reg, pmreg;
9376 pcireg_t pmode;
9377
9378 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
9379 &pmreg, NULL) == 0)
9380 return;
9381
9382 /* Advertise the wakeup capability */
9383 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
9384 | CTRL_SWDPIN(3));
9385 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
9386
9387 /* ICH workaround */
9388 switch (sc->sc_type) {
9389 case WM_T_ICH8:
9390 case WM_T_ICH9:
9391 case WM_T_ICH10:
9392 case WM_T_PCH:
9393 case WM_T_PCH2:
9394 case WM_T_PCH_LPT:
9395 /* Disable gig during WOL */
9396 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9397 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
9398 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9399 if (sc->sc_type == WM_T_PCH)
9400 wm_gmii_reset(sc);
9401
9402 /* Power down workaround */
9403 if (sc->sc_phytype == WMPHY_82577) {
9404 struct mii_softc *child;
9405
9406 /* Assume that the PHY is copper */
9407 child = LIST_FIRST(&sc->sc_mii.mii_phys);
9408 if (child->mii_mpd_rev <= 2)
9409 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
9410 (768 << 5) | 25, 0x0444); /* magic num */
9411 }
9412 break;
9413 default:
9414 break;
9415 }
9416
9417 /* Keep the laser running on fiber adapters */
9418 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
9419 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
9420 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9421 reg |= CTRL_EXT_SWDPIN(3);
9422 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9423 }
9424
9425 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
9426 #if 0 /* for the multicast packet */
9427 reg |= WUFC_MC;
9428 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
9429 #endif
9430
9431 if (sc->sc_type == WM_T_PCH) {
9432 wm_enable_phy_wakeup(sc);
9433 } else {
9434 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
9435 CSR_WRITE(sc, WMREG_WUFC, reg);
9436 }
9437
9438 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9439 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9440 || (sc->sc_type == WM_T_PCH2))
9441 && (sc->sc_phytype == WMPHY_IGP_3))
9442 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
9443
9444 /* Request PME */
9445 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
9446 #if 0
9447 /* Disable WOL */
9448 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
9449 #else
9450 /* For WOL */
9451 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
9452 #endif
9453 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
9454 }
9455 #endif /* WM_WOL */
9456
9457 /* EEE */
9458
9459 static void
9460 wm_set_eee_i350(struct wm_softc *sc)
9461 {
9462 uint32_t ipcnfg, eeer;
9463
9464 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
9465 eeer = CSR_READ(sc, WMREG_EEER);
9466
9467 if ((sc->sc_flags & WM_F_EEE) != 0) {
9468 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9469 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
9470 | EEER_LPI_FC);
9471 } else {
9472 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9473 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
9474 | EEER_LPI_FC);
9475 }
9476
9477 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
9478 CSR_WRITE(sc, WMREG_EEER, eeer);
9479 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
9480 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
9481 }
9482
9483 /*
9484 * Workarounds (mainly PHY related).
9485 * Basically, PHY's workarounds are in the PHY drivers.
9486 */
9487
9488 /* Work-around for 82566 Kumeran PCS lock loss */
9489 static void
9490 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
9491 {
9492 int miistatus, active, i;
9493 int reg;
9494
9495 miistatus = sc->sc_mii.mii_media_status;
9496
9497 /* If the link is not up, do nothing */
9498 if ((miistatus & IFM_ACTIVE) != 0)
9499 return;
9500
9501 active = sc->sc_mii.mii_media_active;
9502
9503 /* Nothing to do if the link is other than 1Gbps */
9504 if (IFM_SUBTYPE(active) != IFM_1000_T)
9505 return;
9506
9507 for (i = 0; i < 10; i++) {
9508 /* read twice */
9509 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9510 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9511 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
9512 goto out; /* GOOD! */
9513
9514 /* Reset the PHY */
9515 wm_gmii_reset(sc);
9516 delay(5*1000);
9517 }
9518
9519 /* Disable GigE link negotiation */
9520 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9521 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9522 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9523
9524 /*
9525 * Call gig speed drop workaround on Gig disable before accessing
9526 * any PHY registers.
9527 */
9528 wm_gig_downshift_workaround_ich8lan(sc);
9529
9530 out:
9531 return;
9532 }
9533
9534 /* WOL from S5 stops working */
9535 static void
9536 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
9537 {
9538 uint16_t kmrn_reg;
9539
9540 /* Only for igp3 */
9541 if (sc->sc_phytype == WMPHY_IGP_3) {
9542 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
9543 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
9544 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9545 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
9546 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9547 }
9548 }
9549
9550 /*
9551 * Workaround for pch's PHYs
9552 * XXX should be moved to new PHY driver?
9553 */
9554 static void
9555 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
9556 {
9557 if (sc->sc_phytype == WMPHY_82577)
9558 wm_set_mdio_slow_mode_hv(sc);
9559
9560 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
9561
9562 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
9563
9564 /* 82578 */
9565 if (sc->sc_phytype == WMPHY_82578) {
9566 /* PCH rev. < 3 */
9567 if (sc->sc_rev < 3) {
9568 /* XXX 6 bit shift? Why? Is it page2? */
9569 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
9570 0x66c0);
9571 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
9572 0xffff);
9573 }
9574
9575 /* XXX phy rev. < 2 */
9576 }
9577
9578 /* Select page 0 */
9579
9580 /* XXX acquire semaphore */
9581 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
9582 /* XXX release semaphore */
9583
9584 /*
9585 * Configure the K1 Si workaround during phy reset assuming there is
9586 * link so that it disables K1 if link is in 1Gbps.
9587 */
9588 wm_k1_gig_workaround_hv(sc, 1);
9589 }
9590
9591 static void
9592 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
9593 {
9594
9595 wm_set_mdio_slow_mode_hv(sc);
9596 }
9597
9598 static void
9599 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
9600 {
9601 int k1_enable = sc->sc_nvm_k1_enabled;
9602
9603 /* XXX acquire semaphore */
9604
9605 if (link) {
9606 k1_enable = 0;
9607
9608 /* Link stall fix for link up */
9609 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
9610 } else {
9611 /* Link stall fix for link down */
9612 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
9613 }
9614
9615 wm_configure_k1_ich8lan(sc, k1_enable);
9616
9617 /* XXX release semaphore */
9618 }
9619
9620 static void
9621 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
9622 {
9623 uint32_t reg;
9624
9625 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
9626 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
9627 reg | HV_KMRN_MDIO_SLOW);
9628 }
9629
9630 static void
9631 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
9632 {
9633 uint32_t ctrl, ctrl_ext, tmp;
9634 uint16_t kmrn_reg;
9635
9636 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
9637
9638 if (k1_enable)
9639 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
9640 else
9641 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
9642
9643 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
9644
9645 delay(20);
9646
9647 ctrl = CSR_READ(sc, WMREG_CTRL);
9648 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9649
9650 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
9651 tmp |= CTRL_FRCSPD;
9652
9653 CSR_WRITE(sc, WMREG_CTRL, tmp);
9654 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
9655 CSR_WRITE_FLUSH(sc);
9656 delay(20);
9657
9658 CSR_WRITE(sc, WMREG_CTRL, ctrl);
9659 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9660 CSR_WRITE_FLUSH(sc);
9661 delay(20);
9662 }
9663
9664 /* special case - for 82575 - need to do manual init ... */
9665 static void
9666 wm_reset_init_script_82575(struct wm_softc *sc)
9667 {
9668 /*
9669 * remark: this is untested code - we have no board without EEPROM
9670 * same setup as mentioned int the FreeBSD driver for the i82575
9671 */
9672
9673 /* SerDes configuration via SERDESCTRL */
9674 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
9675 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
9676 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
9677 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
9678
9679 /* CCM configuration via CCMCTL register */
9680 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
9681 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
9682
9683 /* PCIe lanes configuration */
9684 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
9685 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
9686 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
9687 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
9688
9689 /* PCIe PLL Configuration */
9690 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
9691 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
9692 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
9693 }
9694