if_wm.c revision 1.310 1 /* $NetBSD: if_wm.c,v 1.310 2015/01/29 02:59:17 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - EEE (Energy Efficiency Ethernet)
77 * - MSI/MSI-X
78 * - Virtual Function
79 * - Set LED correctly (based on contents in EEPROM)
80 * - Rework how parameters are loaded from the EEPROM.
81 */
82
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.310 2015/01/29 02:59:17 msaitoh Exp $");
85
86 #ifdef _KERNEL_OPT
87 #include "opt_net_mpsafe.h"
88 #endif
89
90 #include <sys/param.h>
91 #include <sys/systm.h>
92 #include <sys/callout.h>
93 #include <sys/mbuf.h>
94 #include <sys/malloc.h>
95 #include <sys/kernel.h>
96 #include <sys/socket.h>
97 #include <sys/ioctl.h>
98 #include <sys/errno.h>
99 #include <sys/device.h>
100 #include <sys/queue.h>
101 #include <sys/syslog.h>
102
103 #include <sys/rnd.h>
104
105 #include <net/if.h>
106 #include <net/if_dl.h>
107 #include <net/if_media.h>
108 #include <net/if_ether.h>
109
110 #include <net/bpf.h>
111
112 #include <netinet/in.h> /* XXX for struct ip */
113 #include <netinet/in_systm.h> /* XXX for struct ip */
114 #include <netinet/ip.h> /* XXX for struct ip */
115 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
116 #include <netinet/tcp.h> /* XXX for struct tcphdr */
117
118 #include <sys/bus.h>
119 #include <sys/intr.h>
120 #include <machine/endian.h>
121
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
124 #include <dev/mii/miidevs.h>
125 #include <dev/mii/mii_bitbang.h>
126 #include <dev/mii/ikphyreg.h>
127 #include <dev/mii/igphyreg.h>
128 #include <dev/mii/igphyvar.h>
129 #include <dev/mii/inbmphyreg.h>
130
131 #include <dev/pci/pcireg.h>
132 #include <dev/pci/pcivar.h>
133 #include <dev/pci/pcidevs.h>
134
135 #include <dev/pci/if_wmreg.h>
136 #include <dev/pci/if_wmvar.h>
137
138 #ifdef WM_DEBUG
139 #define WM_DEBUG_LINK 0x01
140 #define WM_DEBUG_TX 0x02
141 #define WM_DEBUG_RX 0x04
142 #define WM_DEBUG_GMII 0x08
143 #define WM_DEBUG_MANAGE 0x10
144 #define WM_DEBUG_NVM 0x20
145 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
146 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
147
148 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
149 #else
150 #define DPRINTF(x, y) /* nothing */
151 #endif /* WM_DEBUG */
152
153 #ifdef NET_MPSAFE
154 #define WM_MPSAFE 1
155 #endif
156
157 /*
158 * Transmit descriptor list size. Due to errata, we can only have
159 * 256 hardware descriptors in the ring on < 82544, but we use 4096
160 * on >= 82544. We tell the upper layers that they can queue a lot
161 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
162 * of them at a time.
163 *
164 * We allow up to 256 (!) DMA segments per packet. Pathological packet
165 * chains containing many small mbufs have been observed in zero-copy
166 * situations with jumbo frames.
167 */
168 #define WM_NTXSEGS 256
169 #define WM_IFQUEUELEN 256
170 #define WM_TXQUEUELEN_MAX 64
171 #define WM_TXQUEUELEN_MAX_82547 16
172 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
173 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
174 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
175 #define WM_NTXDESC_82542 256
176 #define WM_NTXDESC_82544 4096
177 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
178 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
179 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
180 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
181 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
182
183 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
184
185 /*
186 * Receive descriptor list size. We have one Rx buffer for normal
187 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
188 * packet. We allocate 256 receive descriptors, each with a 2k
189 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
190 */
191 #define WM_NRXDESC 256
192 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
193 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
194 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
195
196 /*
197 * Control structures are DMA'd to the i82542 chip. We allocate them in
198 * a single clump that maps to a single DMA segment to make several things
199 * easier.
200 */
201 struct wm_control_data_82544 {
202 /*
203 * The receive descriptors.
204 */
205 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
206
207 /*
208 * The transmit descriptors. Put these at the end, because
209 * we might use a smaller number of them.
210 */
211 union {
212 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
213 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
214 } wdc_u;
215 };
216
217 struct wm_control_data_82542 {
218 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
219 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
220 };
221
222 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
223 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
224 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
225
226 /*
227 * Software state for transmit jobs.
228 */
229 struct wm_txsoft {
230 struct mbuf *txs_mbuf; /* head of our mbuf chain */
231 bus_dmamap_t txs_dmamap; /* our DMA map */
232 int txs_firstdesc; /* first descriptor in packet */
233 int txs_lastdesc; /* last descriptor in packet */
234 int txs_ndesc; /* # of descriptors used */
235 };
236
237 /*
238 * Software state for receive buffers. Each descriptor gets a
239 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
240 * more than one buffer, we chain them together.
241 */
242 struct wm_rxsoft {
243 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
244 bus_dmamap_t rxs_dmamap; /* our DMA map */
245 };
246
247 #define WM_LINKUP_TIMEOUT 50
248
249 static uint16_t swfwphysem[] = {
250 SWFW_PHY0_SM,
251 SWFW_PHY1_SM,
252 SWFW_PHY2_SM,
253 SWFW_PHY3_SM
254 };
255
256 /*
257 * Software state per device.
258 */
259 struct wm_softc {
260 device_t sc_dev; /* generic device information */
261 bus_space_tag_t sc_st; /* bus space tag */
262 bus_space_handle_t sc_sh; /* bus space handle */
263 bus_size_t sc_ss; /* bus space size */
264 bus_space_tag_t sc_iot; /* I/O space tag */
265 bus_space_handle_t sc_ioh; /* I/O space handle */
266 bus_size_t sc_ios; /* I/O space size */
267 bus_space_tag_t sc_flasht; /* flash registers space tag */
268 bus_space_handle_t sc_flashh; /* flash registers space handle */
269 bus_dma_tag_t sc_dmat; /* bus DMA tag */
270
271 struct ethercom sc_ethercom; /* ethernet common data */
272 struct mii_data sc_mii; /* MII/media information */
273
274 pci_chipset_tag_t sc_pc;
275 pcitag_t sc_pcitag;
276 int sc_bus_speed; /* PCI/PCIX bus speed */
277 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
278
279 uint16_t sc_pcidevid; /* PCI device ID */
280 wm_chip_type sc_type; /* MAC type */
281 int sc_rev; /* MAC revision */
282 wm_phy_type sc_phytype; /* PHY type */
283 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
284 int sc_funcid; /* unit number of the chip (0 to 3) */
285 int sc_flags; /* flags; see below */
286 int sc_if_flags; /* last if_flags */
287 int sc_flowflags; /* 802.3x flow control flags */
288 int sc_align_tweak;
289
290 void *sc_ih; /* interrupt cookie */
291 callout_t sc_tick_ch; /* tick callout */
292 bool sc_stopping;
293
294 int sc_nvm_addrbits; /* NVM address bits */
295 unsigned int sc_nvm_wordsize; /* NVM word size */
296 int sc_ich8_flash_base;
297 int sc_ich8_flash_bank_size;
298 int sc_nvm_k1_enabled;
299
300 /* Software state for the transmit and receive descriptors. */
301 int sc_txnum; /* must be a power of two */
302 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
303 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
304
305 /* Control data structures. */
306 int sc_ntxdesc; /* must be a power of two */
307 struct wm_control_data_82544 *sc_control_data;
308 bus_dmamap_t sc_cddmamap; /* control data DMA map */
309 bus_dma_segment_t sc_cd_seg; /* control data segment */
310 int sc_cd_rseg; /* real number of control segment */
311 size_t sc_cd_size; /* control data size */
312 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
313 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
314 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
315 #define sc_rxdescs sc_control_data->wcd_rxdescs
316
317 #ifdef WM_EVENT_COUNTERS
318 /* Event counters. */
319 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
320 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
321 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
322 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
323 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
324 struct evcnt sc_ev_rxintr; /* Rx interrupts */
325 struct evcnt sc_ev_linkintr; /* Link interrupts */
326
327 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
328 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
329 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
330 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
331 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
332 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
333 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
334 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
335
336 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
337 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
338
339 struct evcnt sc_ev_tu; /* Tx underrun */
340
341 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
342 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
343 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
344 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
345 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
346 #endif /* WM_EVENT_COUNTERS */
347
348 bus_addr_t sc_tdt_reg; /* offset of TDT register */
349
350 int sc_txfree; /* number of free Tx descriptors */
351 int sc_txnext; /* next ready Tx descriptor */
352
353 int sc_txsfree; /* number of free Tx jobs */
354 int sc_txsnext; /* next free Tx job */
355 int sc_txsdirty; /* dirty Tx jobs */
356
357 /* These 5 variables are used only on the 82547. */
358 int sc_txfifo_size; /* Tx FIFO size */
359 int sc_txfifo_head; /* current head of FIFO */
360 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
361 int sc_txfifo_stall; /* Tx FIFO is stalled */
362 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
363
364 bus_addr_t sc_rdt_reg; /* offset of RDT register */
365
366 int sc_rxptr; /* next ready Rx descriptor/queue ent */
367 int sc_rxdiscard;
368 int sc_rxlen;
369 struct mbuf *sc_rxhead;
370 struct mbuf *sc_rxtail;
371 struct mbuf **sc_rxtailp;
372
373 uint32_t sc_ctrl; /* prototype CTRL register */
374 #if 0
375 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
376 #endif
377 uint32_t sc_icr; /* prototype interrupt bits */
378 uint32_t sc_itr; /* prototype intr throttling reg */
379 uint32_t sc_tctl; /* prototype TCTL register */
380 uint32_t sc_rctl; /* prototype RCTL register */
381 uint32_t sc_txcw; /* prototype TXCW register */
382 uint32_t sc_tipg; /* prototype TIPG register */
383 uint32_t sc_fcrtl; /* prototype FCRTL register */
384 uint32_t sc_pba; /* prototype PBA register */
385
386 int sc_tbi_linkup; /* TBI link status */
387 int sc_tbi_anegticks; /* autonegotiation ticks */
388 int sc_tbi_ticks; /* tbi ticks */
389
390 int sc_mchash_type; /* multicast filter offset */
391
392 krndsource_t rnd_source; /* random source */
393
394 kmutex_t *sc_tx_lock; /* lock for tx operations */
395 kmutex_t *sc_rx_lock; /* lock for rx operations */
396 };
397
398 #define WM_TX_LOCK(_sc) if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
399 #define WM_TX_UNLOCK(_sc) if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
400 #define WM_TX_LOCKED(_sc) (!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
401 #define WM_RX_LOCK(_sc) if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
402 #define WM_RX_UNLOCK(_sc) if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
403 #define WM_RX_LOCKED(_sc) (!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
404 #define WM_BOTH_LOCK(_sc) do {WM_TX_LOCK(_sc); WM_RX_LOCK(_sc);} while (0)
405 #define WM_BOTH_UNLOCK(_sc) do {WM_RX_UNLOCK(_sc); WM_TX_UNLOCK(_sc);} while (0)
406 #define WM_BOTH_LOCKED(_sc) (WM_TX_LOCKED(_sc) && WM_RX_LOCKED(_sc))
407
408 #ifdef WM_MPSAFE
409 #define CALLOUT_FLAGS CALLOUT_MPSAFE
410 #else
411 #define CALLOUT_FLAGS 0
412 #endif
413
414 #define WM_RXCHAIN_RESET(sc) \
415 do { \
416 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
417 *(sc)->sc_rxtailp = NULL; \
418 (sc)->sc_rxlen = 0; \
419 } while (/*CONSTCOND*/0)
420
421 #define WM_RXCHAIN_LINK(sc, m) \
422 do { \
423 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
424 (sc)->sc_rxtailp = &(m)->m_next; \
425 } while (/*CONSTCOND*/0)
426
427 #ifdef WM_EVENT_COUNTERS
428 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
429 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
430 #else
431 #define WM_EVCNT_INCR(ev) /* nothing */
432 #define WM_EVCNT_ADD(ev, val) /* nothing */
433 #endif
434
435 #define CSR_READ(sc, reg) \
436 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
437 #define CSR_WRITE(sc, reg, val) \
438 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
439 #define CSR_WRITE_FLUSH(sc) \
440 (void) CSR_READ((sc), WMREG_STATUS)
441
442 #define ICH8_FLASH_READ32(sc, reg) \
443 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
444 #define ICH8_FLASH_WRITE32(sc, reg, data) \
445 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
446
447 #define ICH8_FLASH_READ16(sc, reg) \
448 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
449 #define ICH8_FLASH_WRITE16(sc, reg, data) \
450 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
451
452 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
453 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
454
455 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
456 #define WM_CDTXADDR_HI(sc, x) \
457 (sizeof(bus_addr_t) == 8 ? \
458 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
459
460 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
461 #define WM_CDRXADDR_HI(sc, x) \
462 (sizeof(bus_addr_t) == 8 ? \
463 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
464
465 #define WM_CDTXSYNC(sc, x, n, ops) \
466 do { \
467 int __x, __n; \
468 \
469 __x = (x); \
470 __n = (n); \
471 \
472 /* If it will wrap around, sync to the end of the ring. */ \
473 if ((__x + __n) > WM_NTXDESC(sc)) { \
474 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
475 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
476 (WM_NTXDESC(sc) - __x), (ops)); \
477 __n -= (WM_NTXDESC(sc) - __x); \
478 __x = 0; \
479 } \
480 \
481 /* Now sync whatever is left. */ \
482 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
483 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
484 } while (/*CONSTCOND*/0)
485
486 #define WM_CDRXSYNC(sc, x, ops) \
487 do { \
488 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
489 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
490 } while (/*CONSTCOND*/0)
491
492 #define WM_INIT_RXDESC(sc, x) \
493 do { \
494 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
495 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
496 struct mbuf *__m = __rxs->rxs_mbuf; \
497 \
498 /* \
499 * Note: We scoot the packet forward 2 bytes in the buffer \
500 * so that the payload after the Ethernet header is aligned \
501 * to a 4-byte boundary. \
502 * \
503 * XXX BRAINDAMAGE ALERT! \
504 * The stupid chip uses the same size for every buffer, which \
505 * is set in the Receive Control register. We are using the 2K \
506 * size option, but what we REALLY want is (2K - 2)! For this \
507 * reason, we can't "scoot" packets longer than the standard \
508 * Ethernet MTU. On strict-alignment platforms, if the total \
509 * size exceeds (2K - 2) we set align_tweak to 0 and let \
510 * the upper layer copy the headers. \
511 */ \
512 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
513 \
514 wm_set_dma_addr(&__rxd->wrx_addr, \
515 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
516 __rxd->wrx_len = 0; \
517 __rxd->wrx_cksum = 0; \
518 __rxd->wrx_status = 0; \
519 __rxd->wrx_errors = 0; \
520 __rxd->wrx_special = 0; \
521 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
522 \
523 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
524 } while (/*CONSTCOND*/0)
525
526 /*
527 * Register read/write functions.
528 * Other than CSR_{READ|WRITE}().
529 */
530 #if 0
531 static inline uint32_t wm_io_read(struct wm_softc *, int);
532 #endif
533 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
534 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
535 uint32_t, uint32_t);
536 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
537
538 /*
539 * Device driver interface functions and commonly used functions.
540 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
541 */
542 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
543 static int wm_match(device_t, cfdata_t, void *);
544 static void wm_attach(device_t, device_t, void *);
545 static int wm_detach(device_t, int);
546 static bool wm_suspend(device_t, const pmf_qual_t *);
547 static bool wm_resume(device_t, const pmf_qual_t *);
548 static void wm_watchdog(struct ifnet *);
549 static void wm_tick(void *);
550 static int wm_ifflags_cb(struct ethercom *);
551 static int wm_ioctl(struct ifnet *, u_long, void *);
552 /* MAC address related */
553 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
554 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
555 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
556 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
557 static void wm_set_filter(struct wm_softc *);
558 /* Reset and init related */
559 static void wm_set_vlan(struct wm_softc *);
560 static void wm_set_pcie_completion_timeout(struct wm_softc *);
561 static void wm_get_auto_rd_done(struct wm_softc *);
562 static void wm_lan_init_done(struct wm_softc *);
563 static void wm_get_cfg_done(struct wm_softc *);
564 static void wm_reset(struct wm_softc *);
565 static int wm_add_rxbuf(struct wm_softc *, int);
566 static void wm_rxdrain(struct wm_softc *);
567 static int wm_init(struct ifnet *);
568 static int wm_init_locked(struct ifnet *);
569 static void wm_stop(struct ifnet *, int);
570 static void wm_stop_locked(struct ifnet *, int);
571 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
572 uint32_t *, uint8_t *);
573 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
574 static void wm_82547_txfifo_stall(void *);
575 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
576 /* Start */
577 static void wm_start(struct ifnet *);
578 static void wm_start_locked(struct ifnet *);
579 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
580 uint32_t *, uint32_t *, bool *);
581 static void wm_nq_start(struct ifnet *);
582 static void wm_nq_start_locked(struct ifnet *);
583 /* Interrupt */
584 static void wm_txintr(struct wm_softc *);
585 static void wm_rxintr(struct wm_softc *);
586 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
587 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
588 static void wm_linkintr(struct wm_softc *, uint32_t);
589 static int wm_intr(void *);
590
591 /*
592 * Media related.
593 * GMII, SGMII, TBI, SERDES and SFP.
594 */
595 /* GMII related */
596 static void wm_gmii_reset(struct wm_softc *);
597 static int wm_get_phy_id_82575(struct wm_softc *);
598 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
599 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
600 static int wm_gmii_mediachange(struct ifnet *);
601 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
602 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
603 static int wm_gmii_i82543_readreg(device_t, int, int);
604 static void wm_gmii_i82543_writereg(device_t, int, int, int);
605 static int wm_gmii_i82544_readreg(device_t, int, int);
606 static void wm_gmii_i82544_writereg(device_t, int, int, int);
607 static int wm_gmii_i80003_readreg(device_t, int, int);
608 static void wm_gmii_i80003_writereg(device_t, int, int, int);
609 static int wm_gmii_bm_readreg(device_t, int, int);
610 static void wm_gmii_bm_writereg(device_t, int, int, int);
611 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
612 static int wm_gmii_hv_readreg(device_t, int, int);
613 static void wm_gmii_hv_writereg(device_t, int, int, int);
614 static int wm_gmii_82580_readreg(device_t, int, int);
615 static void wm_gmii_82580_writereg(device_t, int, int, int);
616 static void wm_gmii_statchg(struct ifnet *);
617 static int wm_kmrn_readreg(struct wm_softc *, int);
618 static void wm_kmrn_writereg(struct wm_softc *, int, int);
619 /* SGMII */
620 static bool wm_sgmii_uses_mdio(struct wm_softc *);
621 static int wm_sgmii_readreg(device_t, int, int);
622 static void wm_sgmii_writereg(device_t, int, int, int);
623 /* TBI related */
624 static int wm_check_for_link(struct wm_softc *);
625 static void wm_tbi_mediainit(struct wm_softc *);
626 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
627 static int wm_tbi_mediachange(struct ifnet *);
628 static void wm_tbi_set_linkled(struct wm_softc *);
629 static void wm_tbi_check_link(struct wm_softc *);
630 /* SFP related */
631 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
632 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
633
634 /*
635 * NVM related.
636 * Microwire, SPI (w/wo EERD) and Flash.
637 */
638 /* Misc functions */
639 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
640 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
641 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
642 /* Microwire */
643 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
644 /* SPI */
645 static int wm_nvm_ready_spi(struct wm_softc *);
646 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
647 /* Using with EERD */
648 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
649 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
650 /* Flash */
651 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
652 unsigned int *);
653 static int32_t wm_ich8_cycle_init(struct wm_softc *);
654 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
655 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
656 uint16_t *);
657 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
658 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
659 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
660 /* Lock, detecting NVM type, validate checksum and read */
661 static int wm_nvm_acquire(struct wm_softc *);
662 static void wm_nvm_release(struct wm_softc *);
663 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
664 static int wm_nvm_validate_checksum(struct wm_softc *);
665 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
666
667 /*
668 * Hardware semaphores.
669 * Very complexed...
670 */
671 static int wm_get_swsm_semaphore(struct wm_softc *);
672 static void wm_put_swsm_semaphore(struct wm_softc *);
673 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
674 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
675 static int wm_get_swfwhw_semaphore(struct wm_softc *);
676 static void wm_put_swfwhw_semaphore(struct wm_softc *);
677 static int wm_get_hw_semaphore_82573(struct wm_softc *);
678 static void wm_put_hw_semaphore_82573(struct wm_softc *);
679
680 /*
681 * Management mode and power management related subroutines.
682 * BMC, AMT, suspend/resume and EEE.
683 */
684 static int wm_check_mng_mode(struct wm_softc *);
685 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
686 static int wm_check_mng_mode_82574(struct wm_softc *);
687 static int wm_check_mng_mode_generic(struct wm_softc *);
688 static int wm_enable_mng_pass_thru(struct wm_softc *);
689 static int wm_check_reset_block(struct wm_softc *);
690 static void wm_get_hw_control(struct wm_softc *);
691 static void wm_release_hw_control(struct wm_softc *);
692 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
693 static void wm_smbustopci(struct wm_softc *);
694 static void wm_init_manageability(struct wm_softc *);
695 static void wm_release_manageability(struct wm_softc *);
696 static void wm_get_wakeup(struct wm_softc *);
697 #ifdef WM_WOL
698 static void wm_enable_phy_wakeup(struct wm_softc *);
699 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
700 static void wm_enable_wakeup(struct wm_softc *);
701 #endif
702 /* EEE */
703 static void wm_set_eee_i350(struct wm_softc *);
704
705 /*
706 * Workarounds (mainly PHY related).
707 * Basically, PHY's workarounds are in the PHY drivers.
708 */
709 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
710 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
711 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
712 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
713 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
714 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
715 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
716 static void wm_reset_init_script_82575(struct wm_softc *);
717
718 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
719 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
720
721 /*
722 * Devices supported by this driver.
723 */
724 static const struct wm_product {
725 pci_vendor_id_t wmp_vendor;
726 pci_product_id_t wmp_product;
727 const char *wmp_name;
728 wm_chip_type wmp_type;
729 uint32_t wmp_flags;
730 #define WMP_F_UNKNOWN 0x00
731 #define WMP_F_FIBER 0x01
732 #define WMP_F_COPPER 0x02
733 #define WMP_F_SERDES 0x03 /* Internal SERDES */
734 #define WMP_MEDIATYPE(x) ((x) & 0x03)
735 } wm_products[] = {
736 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
737 "Intel i82542 1000BASE-X Ethernet",
738 WM_T_82542_2_1, WMP_F_FIBER },
739
740 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
741 "Intel i82543GC 1000BASE-X Ethernet",
742 WM_T_82543, WMP_F_FIBER },
743
744 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
745 "Intel i82543GC 1000BASE-T Ethernet",
746 WM_T_82543, WMP_F_COPPER },
747
748 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
749 "Intel i82544EI 1000BASE-T Ethernet",
750 WM_T_82544, WMP_F_COPPER },
751
752 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
753 "Intel i82544EI 1000BASE-X Ethernet",
754 WM_T_82544, WMP_F_FIBER },
755
756 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
757 "Intel i82544GC 1000BASE-T Ethernet",
758 WM_T_82544, WMP_F_COPPER },
759
760 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
761 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
762 WM_T_82544, WMP_F_COPPER },
763
764 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
765 "Intel i82540EM 1000BASE-T Ethernet",
766 WM_T_82540, WMP_F_COPPER },
767
768 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
769 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
770 WM_T_82540, WMP_F_COPPER },
771
772 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
773 "Intel i82540EP 1000BASE-T Ethernet",
774 WM_T_82540, WMP_F_COPPER },
775
776 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
777 "Intel i82540EP 1000BASE-T Ethernet",
778 WM_T_82540, WMP_F_COPPER },
779
780 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
781 "Intel i82540EP 1000BASE-T Ethernet",
782 WM_T_82540, WMP_F_COPPER },
783
784 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
785 "Intel i82545EM 1000BASE-T Ethernet",
786 WM_T_82545, WMP_F_COPPER },
787
788 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
789 "Intel i82545GM 1000BASE-T Ethernet",
790 WM_T_82545_3, WMP_F_COPPER },
791
792 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
793 "Intel i82545GM 1000BASE-X Ethernet",
794 WM_T_82545_3, WMP_F_FIBER },
795
796 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
797 "Intel i82545GM Gigabit Ethernet (SERDES)",
798 WM_T_82545_3, WMP_F_SERDES },
799
800 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
801 "Intel i82546EB 1000BASE-T Ethernet",
802 WM_T_82546, WMP_F_COPPER },
803
804 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
805 "Intel i82546EB 1000BASE-T Ethernet",
806 WM_T_82546, WMP_F_COPPER },
807
808 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
809 "Intel i82545EM 1000BASE-X Ethernet",
810 WM_T_82545, WMP_F_FIBER },
811
812 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
813 "Intel i82546EB 1000BASE-X Ethernet",
814 WM_T_82546, WMP_F_FIBER },
815
816 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
817 "Intel i82546GB 1000BASE-T Ethernet",
818 WM_T_82546_3, WMP_F_COPPER },
819
820 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
821 "Intel i82546GB 1000BASE-X Ethernet",
822 WM_T_82546_3, WMP_F_FIBER },
823
824 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
825 "Intel i82546GB Gigabit Ethernet (SERDES)",
826 WM_T_82546_3, WMP_F_SERDES },
827
828 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
829 "i82546GB quad-port Gigabit Ethernet",
830 WM_T_82546_3, WMP_F_COPPER },
831
832 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
833 "i82546GB quad-port Gigabit Ethernet (KSP3)",
834 WM_T_82546_3, WMP_F_COPPER },
835
836 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
837 "Intel PRO/1000MT (82546GB)",
838 WM_T_82546_3, WMP_F_COPPER },
839
840 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
841 "Intel i82541EI 1000BASE-T Ethernet",
842 WM_T_82541, WMP_F_COPPER },
843
844 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
845 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
846 WM_T_82541, WMP_F_COPPER },
847
848 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
849 "Intel i82541EI Mobile 1000BASE-T Ethernet",
850 WM_T_82541, WMP_F_COPPER },
851
852 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
853 "Intel i82541ER 1000BASE-T Ethernet",
854 WM_T_82541_2, WMP_F_COPPER },
855
856 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
857 "Intel i82541GI 1000BASE-T Ethernet",
858 WM_T_82541_2, WMP_F_COPPER },
859
860 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
861 "Intel i82541GI Mobile 1000BASE-T Ethernet",
862 WM_T_82541_2, WMP_F_COPPER },
863
864 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
865 "Intel i82541PI 1000BASE-T Ethernet",
866 WM_T_82541_2, WMP_F_COPPER },
867
868 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
869 "Intel i82547EI 1000BASE-T Ethernet",
870 WM_T_82547, WMP_F_COPPER },
871
872 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
873 "Intel i82547EI Mobile 1000BASE-T Ethernet",
874 WM_T_82547, WMP_F_COPPER },
875
876 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
877 "Intel i82547GI 1000BASE-T Ethernet",
878 WM_T_82547_2, WMP_F_COPPER },
879
880 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
881 "Intel PRO/1000 PT (82571EB)",
882 WM_T_82571, WMP_F_COPPER },
883
884 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
885 "Intel PRO/1000 PF (82571EB)",
886 WM_T_82571, WMP_F_FIBER },
887
888 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
889 "Intel PRO/1000 PB (82571EB)",
890 WM_T_82571, WMP_F_SERDES },
891
892 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
893 "Intel PRO/1000 QT (82571EB)",
894 WM_T_82571, WMP_F_COPPER },
895
896 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
897 "Intel PRO/1000 PT Quad Port Server Adapter",
898 WM_T_82571, WMP_F_COPPER, },
899
900 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
901 "Intel Gigabit PT Quad Port Server ExpressModule",
902 WM_T_82571, WMP_F_COPPER, },
903
904 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
905 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
906 WM_T_82571, WMP_F_SERDES, },
907
908 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
909 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
910 WM_T_82571, WMP_F_SERDES, },
911
912 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
913 "Intel 82571EB Quad 1000baseX Ethernet",
914 WM_T_82571, WMP_F_FIBER, },
915
916 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
917 "Intel i82572EI 1000baseT Ethernet",
918 WM_T_82572, WMP_F_COPPER },
919
920 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
921 "Intel i82572EI 1000baseX Ethernet",
922 WM_T_82572, WMP_F_FIBER },
923
924 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
925 "Intel i82572EI Gigabit Ethernet (SERDES)",
926 WM_T_82572, WMP_F_SERDES },
927
928 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
929 "Intel i82572EI 1000baseT Ethernet",
930 WM_T_82572, WMP_F_COPPER },
931
932 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
933 "Intel i82573E",
934 WM_T_82573, WMP_F_COPPER },
935
936 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
937 "Intel i82573E IAMT",
938 WM_T_82573, WMP_F_COPPER },
939
940 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
941 "Intel i82573L Gigabit Ethernet",
942 WM_T_82573, WMP_F_COPPER },
943
944 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
945 "Intel i82574L",
946 WM_T_82574, WMP_F_COPPER },
947
948 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
949 "Intel i82574L",
950 WM_T_82574, WMP_F_COPPER },
951
952 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
953 "Intel i82583V",
954 WM_T_82583, WMP_F_COPPER },
955
956 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
957 "i80003 dual 1000baseT Ethernet",
958 WM_T_80003, WMP_F_COPPER },
959
960 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
961 "i80003 dual 1000baseX Ethernet",
962 WM_T_80003, WMP_F_COPPER },
963
964 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
965 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
966 WM_T_80003, WMP_F_SERDES },
967
968 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
969 "Intel i80003 1000baseT Ethernet",
970 WM_T_80003, WMP_F_COPPER },
971
972 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
973 "Intel i80003 Gigabit Ethernet (SERDES)",
974 WM_T_80003, WMP_F_SERDES },
975
976 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
977 "Intel i82801H (M_AMT) LAN Controller",
978 WM_T_ICH8, WMP_F_COPPER },
979 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
980 "Intel i82801H (AMT) LAN Controller",
981 WM_T_ICH8, WMP_F_COPPER },
982 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
983 "Intel i82801H LAN Controller",
984 WM_T_ICH8, WMP_F_COPPER },
985 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
986 "Intel i82801H (IFE) LAN Controller",
987 WM_T_ICH8, WMP_F_COPPER },
988 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
989 "Intel i82801H (M) LAN Controller",
990 WM_T_ICH8, WMP_F_COPPER },
991 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
992 "Intel i82801H IFE (GT) LAN Controller",
993 WM_T_ICH8, WMP_F_COPPER },
994 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
995 "Intel i82801H IFE (G) LAN Controller",
996 WM_T_ICH8, WMP_F_COPPER },
997 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
998 "82801I (AMT) LAN Controller",
999 WM_T_ICH9, WMP_F_COPPER },
1000 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1001 "82801I LAN Controller",
1002 WM_T_ICH9, WMP_F_COPPER },
1003 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1004 "82801I (G) LAN Controller",
1005 WM_T_ICH9, WMP_F_COPPER },
1006 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1007 "82801I (GT) LAN Controller",
1008 WM_T_ICH9, WMP_F_COPPER },
1009 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1010 "82801I (C) LAN Controller",
1011 WM_T_ICH9, WMP_F_COPPER },
1012 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1013 "82801I mobile LAN Controller",
1014 WM_T_ICH9, WMP_F_COPPER },
1015 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1016 "82801I mobile (V) LAN Controller",
1017 WM_T_ICH9, WMP_F_COPPER },
1018 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1019 "82801I mobile (AMT) LAN Controller",
1020 WM_T_ICH9, WMP_F_COPPER },
1021 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1022 "82567LM-4 LAN Controller",
1023 WM_T_ICH9, WMP_F_COPPER },
1024 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
1025 "82567V-3 LAN Controller",
1026 WM_T_ICH9, WMP_F_COPPER },
1027 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1028 "82567LM-2 LAN Controller",
1029 WM_T_ICH10, WMP_F_COPPER },
1030 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1031 "82567LF-2 LAN Controller",
1032 WM_T_ICH10, WMP_F_COPPER },
1033 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1034 "82567LM-3 LAN Controller",
1035 WM_T_ICH10, WMP_F_COPPER },
1036 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1037 "82567LF-3 LAN Controller",
1038 WM_T_ICH10, WMP_F_COPPER },
1039 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1040 "82567V-2 LAN Controller",
1041 WM_T_ICH10, WMP_F_COPPER },
1042 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1043 "82567V-3? LAN Controller",
1044 WM_T_ICH10, WMP_F_COPPER },
1045 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1046 "HANKSVILLE LAN Controller",
1047 WM_T_ICH10, WMP_F_COPPER },
1048 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1049 "PCH LAN (82577LM) Controller",
1050 WM_T_PCH, WMP_F_COPPER },
1051 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1052 "PCH LAN (82577LC) Controller",
1053 WM_T_PCH, WMP_F_COPPER },
1054 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1055 "PCH LAN (82578DM) Controller",
1056 WM_T_PCH, WMP_F_COPPER },
1057 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1058 "PCH LAN (82578DC) Controller",
1059 WM_T_PCH, WMP_F_COPPER },
1060 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1061 "PCH2 LAN (82579LM) Controller",
1062 WM_T_PCH2, WMP_F_COPPER },
1063 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1064 "PCH2 LAN (82579V) Controller",
1065 WM_T_PCH2, WMP_F_COPPER },
1066 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1067 "82575EB dual-1000baseT Ethernet",
1068 WM_T_82575, WMP_F_COPPER },
1069 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1070 "82575EB dual-1000baseX Ethernet (SERDES)",
1071 WM_T_82575, WMP_F_SERDES },
1072 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1073 "82575GB quad-1000baseT Ethernet",
1074 WM_T_82575, WMP_F_COPPER },
1075 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1076 "82575GB quad-1000baseT Ethernet (PM)",
1077 WM_T_82575, WMP_F_COPPER },
1078 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1079 "82576 1000BaseT Ethernet",
1080 WM_T_82576, WMP_F_COPPER },
1081 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1082 "82576 1000BaseX Ethernet",
1083 WM_T_82576, WMP_F_FIBER },
1084
1085 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1086 "82576 gigabit Ethernet (SERDES)",
1087 WM_T_82576, WMP_F_SERDES },
1088
1089 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1090 "82576 quad-1000BaseT Ethernet",
1091 WM_T_82576, WMP_F_COPPER },
1092
1093 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1094 "82576 Gigabit ET2 Quad Port Server Adapter",
1095 WM_T_82576, WMP_F_COPPER },
1096
1097 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1098 "82576 gigabit Ethernet",
1099 WM_T_82576, WMP_F_COPPER },
1100
1101 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1102 "82576 gigabit Ethernet (SERDES)",
1103 WM_T_82576, WMP_F_SERDES },
1104 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1105 "82576 quad-gigabit Ethernet (SERDES)",
1106 WM_T_82576, WMP_F_SERDES },
1107
1108 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1109 "82580 1000BaseT Ethernet",
1110 WM_T_82580, WMP_F_COPPER },
1111 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1112 "82580 1000BaseX Ethernet",
1113 WM_T_82580, WMP_F_FIBER },
1114
1115 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1116 "82580 1000BaseT Ethernet (SERDES)",
1117 WM_T_82580, WMP_F_SERDES },
1118
1119 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1120 "82580 gigabit Ethernet (SGMII)",
1121 WM_T_82580, WMP_F_COPPER },
1122 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1123 "82580 dual-1000BaseT Ethernet",
1124 WM_T_82580, WMP_F_COPPER },
1125
1126 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1127 "82580 quad-1000BaseX Ethernet",
1128 WM_T_82580, WMP_F_FIBER },
1129
1130 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1131 "DH89XXCC Gigabit Ethernet (SGMII)",
1132 WM_T_82580, WMP_F_COPPER },
1133
1134 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1135 "DH89XXCC Gigabit Ethernet (SERDES)",
1136 WM_T_82580, WMP_F_SERDES },
1137
1138 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1139 "DH89XXCC 1000BASE-KX Ethernet",
1140 WM_T_82580, WMP_F_SERDES },
1141
1142 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1143 "DH89XXCC Gigabit Ethernet (SFP)",
1144 WM_T_82580, WMP_F_SERDES },
1145
1146 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1147 "I350 Gigabit Network Connection",
1148 WM_T_I350, WMP_F_COPPER },
1149
1150 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1151 "I350 Gigabit Fiber Network Connection",
1152 WM_T_I350, WMP_F_FIBER },
1153
1154 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1155 "I350 Gigabit Backplane Connection",
1156 WM_T_I350, WMP_F_SERDES },
1157
1158 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1159 "I350 Quad Port Gigabit Ethernet",
1160 WM_T_I350, WMP_F_SERDES },
1161
1162 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1163 "I350 Gigabit Connection",
1164 WM_T_I350, WMP_F_COPPER },
1165
1166 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1167 "I354 Gigabit Ethernet (KX)",
1168 WM_T_I354, WMP_F_SERDES },
1169
1170 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1171 "I354 Gigabit Ethernet (SGMII)",
1172 WM_T_I354, WMP_F_COPPER },
1173
1174 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1175 "I354 Gigabit Ethernet (2.5G)",
1176 WM_T_I354, WMP_F_COPPER },
1177
1178 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1179 "I210-T1 Ethernet Server Adapter",
1180 WM_T_I210, WMP_F_COPPER },
1181
1182 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1183 "I210 Ethernet (Copper OEM)",
1184 WM_T_I210, WMP_F_COPPER },
1185
1186 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1187 "I210 Ethernet (Copper IT)",
1188 WM_T_I210, WMP_F_COPPER },
1189
1190 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1191 "I210 Ethernet (FLASH less)",
1192 WM_T_I210, WMP_F_COPPER },
1193
1194 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1195 "I210 Gigabit Ethernet (Fiber)",
1196 WM_T_I210, WMP_F_FIBER },
1197
1198 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1199 "I210 Gigabit Ethernet (SERDES)",
1200 WM_T_I210, WMP_F_SERDES },
1201
1202 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1203 "I210 Gigabit Ethernet (FLASH less)",
1204 WM_T_I210, WMP_F_SERDES },
1205
1206 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1207 "I210 Gigabit Ethernet (SGMII)",
1208 WM_T_I210, WMP_F_COPPER },
1209
1210 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1211 "I211 Ethernet (COPPER)",
1212 WM_T_I211, WMP_F_COPPER },
1213 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1214 "I217 V Ethernet Connection",
1215 WM_T_PCH_LPT, WMP_F_COPPER },
1216 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1217 "I217 LM Ethernet Connection",
1218 WM_T_PCH_LPT, WMP_F_COPPER },
1219 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1220 "I218 V Ethernet Connection",
1221 WM_T_PCH_LPT, WMP_F_COPPER },
1222 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1223 "I218 V Ethernet Connection",
1224 WM_T_PCH_LPT, WMP_F_COPPER },
1225 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1226 "I218 V Ethernet Connection",
1227 WM_T_PCH_LPT, WMP_F_COPPER },
1228 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1229 "I218 LM Ethernet Connection",
1230 WM_T_PCH_LPT, WMP_F_COPPER },
1231 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1232 "I218 LM Ethernet Connection",
1233 WM_T_PCH_LPT, WMP_F_COPPER },
1234 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1235 "I218 LM Ethernet Connection",
1236 WM_T_PCH_LPT, WMP_F_COPPER },
1237 { 0, 0,
1238 NULL,
1239 0, 0 },
1240 };
1241
1242 #ifdef WM_EVENT_COUNTERS
1243 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1244 #endif /* WM_EVENT_COUNTERS */
1245
1246
1247 /*
1248 * Register read/write functions.
1249 * Other than CSR_{READ|WRITE}().
1250 */
1251
1252 #if 0 /* Not currently used */
1253 static inline uint32_t
1254 wm_io_read(struct wm_softc *sc, int reg)
1255 {
1256
1257 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1258 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1259 }
1260 #endif
1261
1262 static inline void
1263 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1264 {
1265
1266 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1267 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1268 }
1269
1270 static inline void
1271 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1272 uint32_t data)
1273 {
1274 uint32_t regval;
1275 int i;
1276
1277 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1278
1279 CSR_WRITE(sc, reg, regval);
1280
1281 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1282 delay(5);
1283 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1284 break;
1285 }
1286 if (i == SCTL_CTL_POLL_TIMEOUT) {
1287 aprint_error("%s: WARNING:"
1288 " i82575 reg 0x%08x setup did not indicate ready\n",
1289 device_xname(sc->sc_dev), reg);
1290 }
1291 }
1292
1293 static inline void
1294 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1295 {
1296 wa->wa_low = htole32(v & 0xffffffffU);
1297 if (sizeof(bus_addr_t) == 8)
1298 wa->wa_high = htole32((uint64_t) v >> 32);
1299 else
1300 wa->wa_high = 0;
1301 }
1302
1303 /*
1304 * Device driver interface functions and commonly used functions.
1305 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1306 */
1307
1308 /* Lookup supported device table */
1309 static const struct wm_product *
1310 wm_lookup(const struct pci_attach_args *pa)
1311 {
1312 const struct wm_product *wmp;
1313
1314 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1315 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1316 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1317 return wmp;
1318 }
1319 return NULL;
1320 }
1321
1322 /* The match function (ca_match) */
1323 static int
1324 wm_match(device_t parent, cfdata_t cf, void *aux)
1325 {
1326 struct pci_attach_args *pa = aux;
1327
1328 if (wm_lookup(pa) != NULL)
1329 return 1;
1330
1331 return 0;
1332 }
1333
1334 /* The attach function (ca_attach) */
1335 static void
1336 wm_attach(device_t parent, device_t self, void *aux)
1337 {
1338 struct wm_softc *sc = device_private(self);
1339 struct pci_attach_args *pa = aux;
1340 prop_dictionary_t dict;
1341 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1342 pci_chipset_tag_t pc = pa->pa_pc;
1343 pci_intr_handle_t ih;
1344 const char *intrstr = NULL;
1345 const char *eetype, *xname;
1346 bus_space_tag_t memt;
1347 bus_space_handle_t memh;
1348 bus_size_t memsize;
1349 int memh_valid;
1350 int i, error;
1351 const struct wm_product *wmp;
1352 prop_data_t ea;
1353 prop_number_t pn;
1354 uint8_t enaddr[ETHER_ADDR_LEN];
1355 uint16_t cfg1, cfg2, swdpin, io3;
1356 pcireg_t preg, memtype;
1357 uint16_t eeprom_data, apme_mask;
1358 bool force_clear_smbi;
1359 uint32_t link_mode;
1360 uint32_t reg;
1361 char intrbuf[PCI_INTRSTR_LEN];
1362
1363 sc->sc_dev = self;
1364 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1365 sc->sc_stopping = false;
1366
1367 wmp = wm_lookup(pa);
1368 #ifdef DIAGNOSTIC
1369 if (wmp == NULL) {
1370 printf("\n");
1371 panic("wm_attach: impossible");
1372 }
1373 #endif
1374 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1375
1376 sc->sc_pc = pa->pa_pc;
1377 sc->sc_pcitag = pa->pa_tag;
1378
1379 if (pci_dma64_available(pa))
1380 sc->sc_dmat = pa->pa_dmat64;
1381 else
1382 sc->sc_dmat = pa->pa_dmat;
1383
1384 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1385 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1386 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1387
1388 sc->sc_type = wmp->wmp_type;
1389 if (sc->sc_type < WM_T_82543) {
1390 if (sc->sc_rev < 2) {
1391 aprint_error_dev(sc->sc_dev,
1392 "i82542 must be at least rev. 2\n");
1393 return;
1394 }
1395 if (sc->sc_rev < 3)
1396 sc->sc_type = WM_T_82542_2_0;
1397 }
1398
1399 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1400 || (sc->sc_type == WM_T_82580)
1401 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1402 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1403 sc->sc_flags |= WM_F_NEWQUEUE;
1404
1405 /* Set device properties (mactype) */
1406 dict = device_properties(sc->sc_dev);
1407 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1408
1409 /*
1410 * Map the device. All devices support memory-mapped acccess,
1411 * and it is really required for normal operation.
1412 */
1413 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1414 switch (memtype) {
1415 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1416 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1417 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1418 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1419 break;
1420 default:
1421 memh_valid = 0;
1422 break;
1423 }
1424
1425 if (memh_valid) {
1426 sc->sc_st = memt;
1427 sc->sc_sh = memh;
1428 sc->sc_ss = memsize;
1429 } else {
1430 aprint_error_dev(sc->sc_dev,
1431 "unable to map device registers\n");
1432 return;
1433 }
1434
1435 /*
1436 * In addition, i82544 and later support I/O mapped indirect
1437 * register access. It is not desirable (nor supported in
1438 * this driver) to use it for normal operation, though it is
1439 * required to work around bugs in some chip versions.
1440 */
1441 if (sc->sc_type >= WM_T_82544) {
1442 /* First we have to find the I/O BAR. */
1443 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1444 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1445 if (memtype == PCI_MAPREG_TYPE_IO)
1446 break;
1447 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1448 PCI_MAPREG_MEM_TYPE_64BIT)
1449 i += 4; /* skip high bits, too */
1450 }
1451 if (i < PCI_MAPREG_END) {
1452 /*
1453 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1454 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1455 * It's no problem because newer chips has no this
1456 * bug.
1457 *
1458 * The i8254x doesn't apparently respond when the
1459 * I/O BAR is 0, which looks somewhat like it's not
1460 * been configured.
1461 */
1462 preg = pci_conf_read(pc, pa->pa_tag, i);
1463 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1464 aprint_error_dev(sc->sc_dev,
1465 "WARNING: I/O BAR at zero.\n");
1466 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1467 0, &sc->sc_iot, &sc->sc_ioh,
1468 NULL, &sc->sc_ios) == 0) {
1469 sc->sc_flags |= WM_F_IOH_VALID;
1470 } else {
1471 aprint_error_dev(sc->sc_dev,
1472 "WARNING: unable to map I/O space\n");
1473 }
1474 }
1475
1476 }
1477
1478 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1479 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1480 preg |= PCI_COMMAND_MASTER_ENABLE;
1481 if (sc->sc_type < WM_T_82542_2_1)
1482 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1483 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1484
1485 /* power up chip */
1486 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1487 NULL)) && error != EOPNOTSUPP) {
1488 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1489 return;
1490 }
1491
1492 /*
1493 * Map and establish our interrupt.
1494 */
1495 if (pci_intr_map(pa, &ih)) {
1496 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1497 return;
1498 }
1499 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1500 #ifdef WM_MPSAFE
1501 pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
1502 #endif
1503 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1504 if (sc->sc_ih == NULL) {
1505 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1506 if (intrstr != NULL)
1507 aprint_error(" at %s", intrstr);
1508 aprint_error("\n");
1509 return;
1510 }
1511 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1512
1513 /*
1514 * Check the function ID (unit number of the chip).
1515 */
1516 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1517 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1518 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1519 || (sc->sc_type == WM_T_82580)
1520 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1521 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1522 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1523 else
1524 sc->sc_funcid = 0;
1525
1526 /*
1527 * Determine a few things about the bus we're connected to.
1528 */
1529 if (sc->sc_type < WM_T_82543) {
1530 /* We don't really know the bus characteristics here. */
1531 sc->sc_bus_speed = 33;
1532 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1533 /*
1534 * CSA (Communication Streaming Architecture) is about as fast
1535 * a 32-bit 66MHz PCI Bus.
1536 */
1537 sc->sc_flags |= WM_F_CSA;
1538 sc->sc_bus_speed = 66;
1539 aprint_verbose_dev(sc->sc_dev,
1540 "Communication Streaming Architecture\n");
1541 if (sc->sc_type == WM_T_82547) {
1542 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1543 callout_setfunc(&sc->sc_txfifo_ch,
1544 wm_82547_txfifo_stall, sc);
1545 aprint_verbose_dev(sc->sc_dev,
1546 "using 82547 Tx FIFO stall work-around\n");
1547 }
1548 } else if (sc->sc_type >= WM_T_82571) {
1549 sc->sc_flags |= WM_F_PCIE;
1550 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1551 && (sc->sc_type != WM_T_ICH10)
1552 && (sc->sc_type != WM_T_PCH)
1553 && (sc->sc_type != WM_T_PCH2)
1554 && (sc->sc_type != WM_T_PCH_LPT)) {
1555 /* ICH* and PCH* have no PCIe capability registers */
1556 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1557 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1558 NULL) == 0)
1559 aprint_error_dev(sc->sc_dev,
1560 "unable to find PCIe capability\n");
1561 }
1562 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1563 } else {
1564 reg = CSR_READ(sc, WMREG_STATUS);
1565 if (reg & STATUS_BUS64)
1566 sc->sc_flags |= WM_F_BUS64;
1567 if ((reg & STATUS_PCIX_MODE) != 0) {
1568 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1569
1570 sc->sc_flags |= WM_F_PCIX;
1571 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1572 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1573 aprint_error_dev(sc->sc_dev,
1574 "unable to find PCIX capability\n");
1575 else if (sc->sc_type != WM_T_82545_3 &&
1576 sc->sc_type != WM_T_82546_3) {
1577 /*
1578 * Work around a problem caused by the BIOS
1579 * setting the max memory read byte count
1580 * incorrectly.
1581 */
1582 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1583 sc->sc_pcixe_capoff + PCIX_CMD);
1584 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1585 sc->sc_pcixe_capoff + PCIX_STATUS);
1586
1587 bytecnt =
1588 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1589 PCIX_CMD_BYTECNT_SHIFT;
1590 maxb =
1591 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1592 PCIX_STATUS_MAXB_SHIFT;
1593 if (bytecnt > maxb) {
1594 aprint_verbose_dev(sc->sc_dev,
1595 "resetting PCI-X MMRBC: %d -> %d\n",
1596 512 << bytecnt, 512 << maxb);
1597 pcix_cmd = (pcix_cmd &
1598 ~PCIX_CMD_BYTECNT_MASK) |
1599 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1600 pci_conf_write(pa->pa_pc, pa->pa_tag,
1601 sc->sc_pcixe_capoff + PCIX_CMD,
1602 pcix_cmd);
1603 }
1604 }
1605 }
1606 /*
1607 * The quad port adapter is special; it has a PCIX-PCIX
1608 * bridge on the board, and can run the secondary bus at
1609 * a higher speed.
1610 */
1611 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1612 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1613 : 66;
1614 } else if (sc->sc_flags & WM_F_PCIX) {
1615 switch (reg & STATUS_PCIXSPD_MASK) {
1616 case STATUS_PCIXSPD_50_66:
1617 sc->sc_bus_speed = 66;
1618 break;
1619 case STATUS_PCIXSPD_66_100:
1620 sc->sc_bus_speed = 100;
1621 break;
1622 case STATUS_PCIXSPD_100_133:
1623 sc->sc_bus_speed = 133;
1624 break;
1625 default:
1626 aprint_error_dev(sc->sc_dev,
1627 "unknown PCIXSPD %d; assuming 66MHz\n",
1628 reg & STATUS_PCIXSPD_MASK);
1629 sc->sc_bus_speed = 66;
1630 break;
1631 }
1632 } else
1633 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1634 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1635 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1636 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1637 }
1638
1639 /*
1640 * Allocate the control data structures, and create and load the
1641 * DMA map for it.
1642 *
1643 * NOTE: All Tx descriptors must be in the same 4G segment of
1644 * memory. So must Rx descriptors. We simplify by allocating
1645 * both sets within the same 4G segment.
1646 */
1647 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1648 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1649 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1650 sizeof(struct wm_control_data_82542) :
1651 sizeof(struct wm_control_data_82544);
1652 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1653 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1654 &sc->sc_cd_rseg, 0)) != 0) {
1655 aprint_error_dev(sc->sc_dev,
1656 "unable to allocate control data, error = %d\n",
1657 error);
1658 goto fail_0;
1659 }
1660
1661 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1662 sc->sc_cd_rseg, sc->sc_cd_size,
1663 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1664 aprint_error_dev(sc->sc_dev,
1665 "unable to map control data, error = %d\n", error);
1666 goto fail_1;
1667 }
1668
1669 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1670 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1671 aprint_error_dev(sc->sc_dev,
1672 "unable to create control data DMA map, error = %d\n",
1673 error);
1674 goto fail_2;
1675 }
1676
1677 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1678 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1679 aprint_error_dev(sc->sc_dev,
1680 "unable to load control data DMA map, error = %d\n",
1681 error);
1682 goto fail_3;
1683 }
1684
1685 /* Create the transmit buffer DMA maps. */
1686 WM_TXQUEUELEN(sc) =
1687 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1688 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1689 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1690 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1691 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1692 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1693 aprint_error_dev(sc->sc_dev,
1694 "unable to create Tx DMA map %d, error = %d\n",
1695 i, error);
1696 goto fail_4;
1697 }
1698 }
1699
1700 /* Create the receive buffer DMA maps. */
1701 for (i = 0; i < WM_NRXDESC; i++) {
1702 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1703 MCLBYTES, 0, 0,
1704 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1705 aprint_error_dev(sc->sc_dev,
1706 "unable to create Rx DMA map %d error = %d\n",
1707 i, error);
1708 goto fail_5;
1709 }
1710 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1711 }
1712
1713 /* clear interesting stat counters */
1714 CSR_READ(sc, WMREG_COLC);
1715 CSR_READ(sc, WMREG_RXERRC);
1716
1717 /* get PHY control from SMBus to PCIe */
1718 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1719 || (sc->sc_type == WM_T_PCH_LPT))
1720 wm_smbustopci(sc);
1721
1722 /* Reset the chip to a known state. */
1723 wm_reset(sc);
1724
1725 /* Get some information about the EEPROM. */
1726 switch (sc->sc_type) {
1727 case WM_T_82542_2_0:
1728 case WM_T_82542_2_1:
1729 case WM_T_82543:
1730 case WM_T_82544:
1731 /* Microwire */
1732 sc->sc_nvm_wordsize = 64;
1733 sc->sc_nvm_addrbits = 6;
1734 break;
1735 case WM_T_82540:
1736 case WM_T_82545:
1737 case WM_T_82545_3:
1738 case WM_T_82546:
1739 case WM_T_82546_3:
1740 /* Microwire */
1741 reg = CSR_READ(sc, WMREG_EECD);
1742 if (reg & EECD_EE_SIZE) {
1743 sc->sc_nvm_wordsize = 256;
1744 sc->sc_nvm_addrbits = 8;
1745 } else {
1746 sc->sc_nvm_wordsize = 64;
1747 sc->sc_nvm_addrbits = 6;
1748 }
1749 sc->sc_flags |= WM_F_LOCK_EECD;
1750 break;
1751 case WM_T_82541:
1752 case WM_T_82541_2:
1753 case WM_T_82547:
1754 case WM_T_82547_2:
1755 reg = CSR_READ(sc, WMREG_EECD);
1756 if (reg & EECD_EE_TYPE) {
1757 /* SPI */
1758 sc->sc_flags |= WM_F_EEPROM_SPI;
1759 wm_nvm_set_addrbits_size_eecd(sc);
1760 } else {
1761 /* Microwire */
1762 if ((reg & EECD_EE_ABITS) != 0) {
1763 sc->sc_nvm_wordsize = 256;
1764 sc->sc_nvm_addrbits = 8;
1765 } else {
1766 sc->sc_nvm_wordsize = 64;
1767 sc->sc_nvm_addrbits = 6;
1768 }
1769 }
1770 sc->sc_flags |= WM_F_LOCK_EECD;
1771 break;
1772 case WM_T_82571:
1773 case WM_T_82572:
1774 /* SPI */
1775 sc->sc_flags |= WM_F_EEPROM_SPI;
1776 wm_nvm_set_addrbits_size_eecd(sc);
1777 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1778 break;
1779 case WM_T_82573:
1780 sc->sc_flags |= WM_F_LOCK_SWSM;
1781 /* FALLTHROUGH */
1782 case WM_T_82574:
1783 case WM_T_82583:
1784 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1785 sc->sc_flags |= WM_F_EEPROM_FLASH;
1786 sc->sc_nvm_wordsize = 2048;
1787 } else {
1788 /* SPI */
1789 sc->sc_flags |= WM_F_EEPROM_SPI;
1790 wm_nvm_set_addrbits_size_eecd(sc);
1791 }
1792 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1793 break;
1794 case WM_T_82575:
1795 case WM_T_82576:
1796 case WM_T_82580:
1797 case WM_T_I350:
1798 case WM_T_I354:
1799 case WM_T_80003:
1800 /* SPI */
1801 sc->sc_flags |= WM_F_EEPROM_SPI;
1802 wm_nvm_set_addrbits_size_eecd(sc);
1803 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1804 | WM_F_LOCK_SWSM;
1805 break;
1806 case WM_T_ICH8:
1807 case WM_T_ICH9:
1808 case WM_T_ICH10:
1809 case WM_T_PCH:
1810 case WM_T_PCH2:
1811 case WM_T_PCH_LPT:
1812 /* FLASH */
1813 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1814 sc->sc_nvm_wordsize = 2048;
1815 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1816 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1817 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1818 aprint_error_dev(sc->sc_dev,
1819 "can't map FLASH registers\n");
1820 goto fail_5;
1821 }
1822 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1823 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1824 ICH_FLASH_SECTOR_SIZE;
1825 sc->sc_ich8_flash_bank_size =
1826 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1827 sc->sc_ich8_flash_bank_size -=
1828 (reg & ICH_GFPREG_BASE_MASK);
1829 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1830 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1831 break;
1832 case WM_T_I210:
1833 case WM_T_I211:
1834 wm_nvm_set_addrbits_size_eecd(sc);
1835 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1836 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1837 break;
1838 default:
1839 break;
1840 }
1841
1842 /* Ensure the SMBI bit is clear before first NVM or PHY access */
1843 switch (sc->sc_type) {
1844 case WM_T_82571:
1845 case WM_T_82572:
1846 reg = CSR_READ(sc, WMREG_SWSM2);
1847 if ((reg & SWSM2_LOCK) == 0) {
1848 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
1849 force_clear_smbi = true;
1850 } else
1851 force_clear_smbi = false;
1852 break;
1853 case WM_T_82573:
1854 case WM_T_82574:
1855 case WM_T_82583:
1856 force_clear_smbi = true;
1857 break;
1858 default:
1859 force_clear_smbi = false;
1860 break;
1861 }
1862 if (force_clear_smbi) {
1863 reg = CSR_READ(sc, WMREG_SWSM);
1864 if ((reg & SWSM_SMBI) != 0)
1865 aprint_error_dev(sc->sc_dev,
1866 "Please update the Bootagent\n");
1867 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
1868 }
1869
1870 /*
1871 * Defer printing the EEPROM type until after verifying the checksum
1872 * This allows the EEPROM type to be printed correctly in the case
1873 * that no EEPROM is attached.
1874 */
1875 /*
1876 * Validate the EEPROM checksum. If the checksum fails, flag
1877 * this for later, so we can fail future reads from the EEPROM.
1878 */
1879 if (wm_nvm_validate_checksum(sc)) {
1880 /*
1881 * Read twice again because some PCI-e parts fail the
1882 * first check due to the link being in sleep state.
1883 */
1884 if (wm_nvm_validate_checksum(sc))
1885 sc->sc_flags |= WM_F_EEPROM_INVALID;
1886 }
1887
1888 /* Set device properties (macflags) */
1889 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1890
1891 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1892 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1893 else {
1894 aprint_verbose_dev(sc->sc_dev, "%u words ",
1895 sc->sc_nvm_wordsize);
1896 if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
1897 aprint_verbose("FLASH(HW)\n");
1898 } else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1899 aprint_verbose("FLASH\n");
1900 } else {
1901 if (sc->sc_flags & WM_F_EEPROM_SPI)
1902 eetype = "SPI";
1903 else
1904 eetype = "MicroWire";
1905 aprint_verbose("(%d address bits) %s EEPROM\n",
1906 sc->sc_nvm_addrbits, eetype);
1907 }
1908 }
1909
1910 switch (sc->sc_type) {
1911 case WM_T_82571:
1912 case WM_T_82572:
1913 case WM_T_82573:
1914 case WM_T_82574:
1915 case WM_T_82583:
1916 case WM_T_80003:
1917 case WM_T_ICH8:
1918 case WM_T_ICH9:
1919 case WM_T_ICH10:
1920 case WM_T_PCH:
1921 case WM_T_PCH2:
1922 case WM_T_PCH_LPT:
1923 if (wm_check_mng_mode(sc) != 0)
1924 wm_get_hw_control(sc);
1925 break;
1926 default:
1927 break;
1928 }
1929 wm_get_wakeup(sc);
1930 /*
1931 * Read the Ethernet address from the EEPROM, if not first found
1932 * in device properties.
1933 */
1934 ea = prop_dictionary_get(dict, "mac-address");
1935 if (ea != NULL) {
1936 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1937 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1938 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1939 } else {
1940 if (wm_read_mac_addr(sc, enaddr) != 0) {
1941 aprint_error_dev(sc->sc_dev,
1942 "unable to read Ethernet address\n");
1943 goto fail_5;
1944 }
1945 }
1946
1947 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1948 ether_sprintf(enaddr));
1949
1950 /*
1951 * Read the config info from the EEPROM, and set up various
1952 * bits in the control registers based on their contents.
1953 */
1954 pn = prop_dictionary_get(dict, "i82543-cfg1");
1955 if (pn != NULL) {
1956 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1957 cfg1 = (uint16_t) prop_number_integer_value(pn);
1958 } else {
1959 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
1960 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1961 goto fail_5;
1962 }
1963 }
1964
1965 pn = prop_dictionary_get(dict, "i82543-cfg2");
1966 if (pn != NULL) {
1967 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1968 cfg2 = (uint16_t) prop_number_integer_value(pn);
1969 } else {
1970 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
1971 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1972 goto fail_5;
1973 }
1974 }
1975
1976 /* check for WM_F_WOL */
1977 switch (sc->sc_type) {
1978 case WM_T_82542_2_0:
1979 case WM_T_82542_2_1:
1980 case WM_T_82543:
1981 /* dummy? */
1982 eeprom_data = 0;
1983 apme_mask = NVM_CFG3_APME;
1984 break;
1985 case WM_T_82544:
1986 apme_mask = NVM_CFG2_82544_APM_EN;
1987 eeprom_data = cfg2;
1988 break;
1989 case WM_T_82546:
1990 case WM_T_82546_3:
1991 case WM_T_82571:
1992 case WM_T_82572:
1993 case WM_T_82573:
1994 case WM_T_82574:
1995 case WM_T_82583:
1996 case WM_T_80003:
1997 default:
1998 apme_mask = NVM_CFG3_APME;
1999 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2000 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2001 break;
2002 case WM_T_82575:
2003 case WM_T_82576:
2004 case WM_T_82580:
2005 case WM_T_I350:
2006 case WM_T_I354: /* XXX ok? */
2007 case WM_T_ICH8:
2008 case WM_T_ICH9:
2009 case WM_T_ICH10:
2010 case WM_T_PCH:
2011 case WM_T_PCH2:
2012 case WM_T_PCH_LPT:
2013 /* XXX The funcid should be checked on some devices */
2014 apme_mask = WUC_APME;
2015 eeprom_data = CSR_READ(sc, WMREG_WUC);
2016 break;
2017 }
2018
2019 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2020 if ((eeprom_data & apme_mask) != 0)
2021 sc->sc_flags |= WM_F_WOL;
2022 #ifdef WM_DEBUG
2023 if ((sc->sc_flags & WM_F_WOL) != 0)
2024 printf("WOL\n");
2025 #endif
2026
2027 /*
2028 * XXX need special handling for some multiple port cards
2029 * to disable a paticular port.
2030 */
2031
2032 if (sc->sc_type >= WM_T_82544) {
2033 pn = prop_dictionary_get(dict, "i82543-swdpin");
2034 if (pn != NULL) {
2035 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2036 swdpin = (uint16_t) prop_number_integer_value(pn);
2037 } else {
2038 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2039 aprint_error_dev(sc->sc_dev,
2040 "unable to read SWDPIN\n");
2041 goto fail_5;
2042 }
2043 }
2044 }
2045
2046 if (cfg1 & NVM_CFG1_ILOS)
2047 sc->sc_ctrl |= CTRL_ILOS;
2048 if (sc->sc_type >= WM_T_82544) {
2049 sc->sc_ctrl |=
2050 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2051 CTRL_SWDPIO_SHIFT;
2052 sc->sc_ctrl |=
2053 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2054 CTRL_SWDPINS_SHIFT;
2055 } else {
2056 sc->sc_ctrl |=
2057 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2058 CTRL_SWDPIO_SHIFT;
2059 }
2060
2061 #if 0
2062 if (sc->sc_type >= WM_T_82544) {
2063 if (cfg1 & NVM_CFG1_IPS0)
2064 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2065 if (cfg1 & NVM_CFG1_IPS1)
2066 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2067 sc->sc_ctrl_ext |=
2068 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2069 CTRL_EXT_SWDPIO_SHIFT;
2070 sc->sc_ctrl_ext |=
2071 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2072 CTRL_EXT_SWDPINS_SHIFT;
2073 } else {
2074 sc->sc_ctrl_ext |=
2075 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2076 CTRL_EXT_SWDPIO_SHIFT;
2077 }
2078 #endif
2079
2080 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2081 #if 0
2082 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2083 #endif
2084
2085 /*
2086 * Set up some register offsets that are different between
2087 * the i82542 and the i82543 and later chips.
2088 */
2089 if (sc->sc_type < WM_T_82543) {
2090 sc->sc_rdt_reg = WMREG_OLD_RDT0;
2091 sc->sc_tdt_reg = WMREG_OLD_TDT;
2092 } else {
2093 sc->sc_rdt_reg = WMREG_RDT;
2094 sc->sc_tdt_reg = WMREG_TDT;
2095 }
2096
2097 if (sc->sc_type == WM_T_PCH) {
2098 uint16_t val;
2099
2100 /* Save the NVM K1 bit setting */
2101 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2102
2103 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2104 sc->sc_nvm_k1_enabled = 1;
2105 else
2106 sc->sc_nvm_k1_enabled = 0;
2107 }
2108
2109 /*
2110 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2111 * media structures accordingly.
2112 */
2113 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2114 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2115 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2116 || sc->sc_type == WM_T_82573
2117 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2118 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2119 wm_gmii_mediainit(sc, wmp->wmp_product);
2120 } else if (sc->sc_type < WM_T_82543 ||
2121 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2122 if (sc->sc_mediatype & WMP_F_COPPER) {
2123 aprint_error_dev(sc->sc_dev,
2124 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2125 sc->sc_mediatype = WMP_F_FIBER;
2126 }
2127 wm_tbi_mediainit(sc);
2128 } else {
2129 switch (sc->sc_type) {
2130 case WM_T_82575:
2131 case WM_T_82576:
2132 case WM_T_82580:
2133 case WM_T_I350:
2134 case WM_T_I354:
2135 case WM_T_I210:
2136 case WM_T_I211:
2137 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2138 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2139 switch (link_mode) {
2140 case CTRL_EXT_LINK_MODE_1000KX:
2141 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2142 sc->sc_mediatype = WMP_F_SERDES;
2143 break;
2144 case CTRL_EXT_LINK_MODE_SGMII:
2145 if (wm_sgmii_uses_mdio(sc)) {
2146 aprint_verbose_dev(sc->sc_dev,
2147 "SGMII(MDIO)\n");
2148 sc->sc_flags |= WM_F_SGMII;
2149 sc->sc_mediatype = WMP_F_COPPER;
2150 break;
2151 }
2152 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2153 /*FALLTHROUGH*/
2154 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2155 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2156 if (sc->sc_mediatype == WMP_F_UNKNOWN) {
2157 if (link_mode
2158 == CTRL_EXT_LINK_MODE_SGMII) {
2159 sc->sc_mediatype
2160 = WMP_F_COPPER;
2161 sc->sc_flags |= WM_F_SGMII;
2162 } else {
2163 sc->sc_mediatype
2164 = WMP_F_SERDES;
2165 aprint_verbose_dev(sc->sc_dev,
2166 "SERDES\n");
2167 }
2168 break;
2169 }
2170 if (sc->sc_mediatype == WMP_F_SERDES)
2171 aprint_verbose_dev(sc->sc_dev,
2172 "SERDES\n");
2173
2174 /* Change current link mode setting */
2175 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2176 switch (sc->sc_mediatype) {
2177 case WMP_F_COPPER:
2178 reg |= CTRL_EXT_LINK_MODE_SGMII;
2179 break;
2180 case WMP_F_SERDES:
2181 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2182 break;
2183 default:
2184 break;
2185 }
2186 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2187 break;
2188 case CTRL_EXT_LINK_MODE_GMII:
2189 default:
2190 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2191 sc->sc_mediatype = WMP_F_COPPER;
2192 break;
2193 }
2194
2195 reg &= ~CTRL_EXT_I2C_ENA;
2196 if ((sc->sc_flags & WM_F_SGMII) != 0)
2197 reg |= CTRL_EXT_I2C_ENA;
2198 else
2199 reg &= ~CTRL_EXT_I2C_ENA;
2200 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2201
2202 if (sc->sc_mediatype == WMP_F_COPPER)
2203 wm_gmii_mediainit(sc, wmp->wmp_product);
2204 else
2205 wm_tbi_mediainit(sc);
2206 break;
2207 default:
2208 if (sc->sc_mediatype & WMP_F_FIBER)
2209 aprint_error_dev(sc->sc_dev,
2210 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2211 sc->sc_mediatype = WMP_F_COPPER;
2212 wm_gmii_mediainit(sc, wmp->wmp_product);
2213 }
2214 }
2215
2216 ifp = &sc->sc_ethercom.ec_if;
2217 xname = device_xname(sc->sc_dev);
2218 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2219 ifp->if_softc = sc;
2220 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2221 ifp->if_ioctl = wm_ioctl;
2222 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2223 ifp->if_start = wm_nq_start;
2224 else
2225 ifp->if_start = wm_start;
2226 ifp->if_watchdog = wm_watchdog;
2227 ifp->if_init = wm_init;
2228 ifp->if_stop = wm_stop;
2229 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2230 IFQ_SET_READY(&ifp->if_snd);
2231
2232 /* Check for jumbo frame */
2233 switch (sc->sc_type) {
2234 case WM_T_82573:
2235 /* XXX limited to 9234 if ASPM is disabled */
2236 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &io3);
2237 if ((io3 & NVM_3GIO_3_ASPM_MASK) != 0)
2238 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2239 break;
2240 case WM_T_82571:
2241 case WM_T_82572:
2242 case WM_T_82574:
2243 case WM_T_82575:
2244 case WM_T_82576:
2245 case WM_T_82580:
2246 case WM_T_I350:
2247 case WM_T_I354: /* XXXX ok? */
2248 case WM_T_I210:
2249 case WM_T_I211:
2250 case WM_T_80003:
2251 case WM_T_ICH9:
2252 case WM_T_ICH10:
2253 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2254 case WM_T_PCH_LPT:
2255 /* XXX limited to 9234 */
2256 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2257 break;
2258 case WM_T_PCH:
2259 /* XXX limited to 4096 */
2260 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2261 break;
2262 case WM_T_82542_2_0:
2263 case WM_T_82542_2_1:
2264 case WM_T_82583:
2265 case WM_T_ICH8:
2266 /* No support for jumbo frame */
2267 break;
2268 default:
2269 /* ETHER_MAX_LEN_JUMBO */
2270 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2271 break;
2272 }
2273
2274 /* If we're a i82543 or greater, we can support VLANs. */
2275 if (sc->sc_type >= WM_T_82543)
2276 sc->sc_ethercom.ec_capabilities |=
2277 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2278
2279 /*
2280 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2281 * on i82543 and later.
2282 */
2283 if (sc->sc_type >= WM_T_82543) {
2284 ifp->if_capabilities |=
2285 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2286 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2287 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2288 IFCAP_CSUM_TCPv6_Tx |
2289 IFCAP_CSUM_UDPv6_Tx;
2290 }
2291
2292 /*
2293 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2294 *
2295 * 82541GI (8086:1076) ... no
2296 * 82572EI (8086:10b9) ... yes
2297 */
2298 if (sc->sc_type >= WM_T_82571) {
2299 ifp->if_capabilities |=
2300 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2301 }
2302
2303 /*
2304 * If we're a i82544 or greater (except i82547), we can do
2305 * TCP segmentation offload.
2306 */
2307 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2308 ifp->if_capabilities |= IFCAP_TSOv4;
2309 }
2310
2311 if (sc->sc_type >= WM_T_82571) {
2312 ifp->if_capabilities |= IFCAP_TSOv6;
2313 }
2314
2315 #ifdef WM_MPSAFE
2316 sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2317 sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2318 #else
2319 sc->sc_tx_lock = NULL;
2320 sc->sc_rx_lock = NULL;
2321 #endif
2322
2323 /* Attach the interface. */
2324 if_attach(ifp);
2325 ether_ifattach(ifp, enaddr);
2326 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2327 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2328 RND_FLAG_DEFAULT);
2329
2330 #ifdef WM_EVENT_COUNTERS
2331 /* Attach event counters. */
2332 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2333 NULL, xname, "txsstall");
2334 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2335 NULL, xname, "txdstall");
2336 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2337 NULL, xname, "txfifo_stall");
2338 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2339 NULL, xname, "txdw");
2340 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2341 NULL, xname, "txqe");
2342 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2343 NULL, xname, "rxintr");
2344 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2345 NULL, xname, "linkintr");
2346
2347 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2348 NULL, xname, "rxipsum");
2349 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2350 NULL, xname, "rxtusum");
2351 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2352 NULL, xname, "txipsum");
2353 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2354 NULL, xname, "txtusum");
2355 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2356 NULL, xname, "txtusum6");
2357
2358 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2359 NULL, xname, "txtso");
2360 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2361 NULL, xname, "txtso6");
2362 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2363 NULL, xname, "txtsopain");
2364
2365 for (i = 0; i < WM_NTXSEGS; i++) {
2366 snprintf(wm_txseg_evcnt_names[i],
2367 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2368 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2369 NULL, xname, wm_txseg_evcnt_names[i]);
2370 }
2371
2372 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2373 NULL, xname, "txdrop");
2374
2375 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2376 NULL, xname, "tu");
2377
2378 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2379 NULL, xname, "tx_xoff");
2380 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2381 NULL, xname, "tx_xon");
2382 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2383 NULL, xname, "rx_xoff");
2384 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2385 NULL, xname, "rx_xon");
2386 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2387 NULL, xname, "rx_macctl");
2388 #endif /* WM_EVENT_COUNTERS */
2389
2390 if (pmf_device_register(self, wm_suspend, wm_resume))
2391 pmf_class_network_register(self, ifp);
2392 else
2393 aprint_error_dev(self, "couldn't establish power handler\n");
2394
2395 sc->sc_flags |= WM_F_ATTACHED;
2396 return;
2397
2398 /*
2399 * Free any resources we've allocated during the failed attach
2400 * attempt. Do this in reverse order and fall through.
2401 */
2402 fail_5:
2403 for (i = 0; i < WM_NRXDESC; i++) {
2404 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2405 bus_dmamap_destroy(sc->sc_dmat,
2406 sc->sc_rxsoft[i].rxs_dmamap);
2407 }
2408 fail_4:
2409 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2410 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2411 bus_dmamap_destroy(sc->sc_dmat,
2412 sc->sc_txsoft[i].txs_dmamap);
2413 }
2414 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2415 fail_3:
2416 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2417 fail_2:
2418 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2419 sc->sc_cd_size);
2420 fail_1:
2421 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2422 fail_0:
2423 return;
2424 }
2425
2426 /* The detach function (ca_detach) */
2427 static int
2428 wm_detach(device_t self, int flags __unused)
2429 {
2430 struct wm_softc *sc = device_private(self);
2431 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2432 int i;
2433 #ifndef WM_MPSAFE
2434 int s;
2435 #endif
2436
2437 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2438 return 0;
2439
2440 #ifndef WM_MPSAFE
2441 s = splnet();
2442 #endif
2443 /* Stop the interface. Callouts are stopped in it. */
2444 wm_stop(ifp, 1);
2445
2446 #ifndef WM_MPSAFE
2447 splx(s);
2448 #endif
2449
2450 pmf_device_deregister(self);
2451
2452 /* Tell the firmware about the release */
2453 WM_BOTH_LOCK(sc);
2454 wm_release_manageability(sc);
2455 wm_release_hw_control(sc);
2456 WM_BOTH_UNLOCK(sc);
2457
2458 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2459
2460 /* Delete all remaining media. */
2461 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2462
2463 ether_ifdetach(ifp);
2464 if_detach(ifp);
2465
2466
2467 /* Unload RX dmamaps and free mbufs */
2468 WM_RX_LOCK(sc);
2469 wm_rxdrain(sc);
2470 WM_RX_UNLOCK(sc);
2471 /* Must unlock here */
2472
2473 /* Free dmamap. It's the same as the end of the wm_attach() function */
2474 for (i = 0; i < WM_NRXDESC; i++) {
2475 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2476 bus_dmamap_destroy(sc->sc_dmat,
2477 sc->sc_rxsoft[i].rxs_dmamap);
2478 }
2479 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2480 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2481 bus_dmamap_destroy(sc->sc_dmat,
2482 sc->sc_txsoft[i].txs_dmamap);
2483 }
2484 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2485 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2486 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2487 sc->sc_cd_size);
2488 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2489
2490 /* Disestablish the interrupt handler */
2491 if (sc->sc_ih != NULL) {
2492 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2493 sc->sc_ih = NULL;
2494 }
2495
2496 /* Unmap the registers */
2497 if (sc->sc_ss) {
2498 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2499 sc->sc_ss = 0;
2500 }
2501
2502 if (sc->sc_ios) {
2503 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2504 sc->sc_ios = 0;
2505 }
2506
2507 if (sc->sc_tx_lock)
2508 mutex_obj_free(sc->sc_tx_lock);
2509 if (sc->sc_rx_lock)
2510 mutex_obj_free(sc->sc_rx_lock);
2511
2512 return 0;
2513 }
2514
2515 static bool
2516 wm_suspend(device_t self, const pmf_qual_t *qual)
2517 {
2518 struct wm_softc *sc = device_private(self);
2519
2520 wm_release_manageability(sc);
2521 wm_release_hw_control(sc);
2522 #ifdef WM_WOL
2523 wm_enable_wakeup(sc);
2524 #endif
2525
2526 return true;
2527 }
2528
2529 static bool
2530 wm_resume(device_t self, const pmf_qual_t *qual)
2531 {
2532 struct wm_softc *sc = device_private(self);
2533
2534 wm_init_manageability(sc);
2535
2536 return true;
2537 }
2538
2539 /*
2540 * wm_watchdog: [ifnet interface function]
2541 *
2542 * Watchdog timer handler.
2543 */
2544 static void
2545 wm_watchdog(struct ifnet *ifp)
2546 {
2547 struct wm_softc *sc = ifp->if_softc;
2548
2549 /*
2550 * Since we're using delayed interrupts, sweep up
2551 * before we report an error.
2552 */
2553 WM_TX_LOCK(sc);
2554 wm_txintr(sc);
2555 WM_TX_UNLOCK(sc);
2556
2557 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2558 #ifdef WM_DEBUG
2559 int i, j;
2560 struct wm_txsoft *txs;
2561 #endif
2562 log(LOG_ERR,
2563 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2564 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2565 sc->sc_txnext);
2566 ifp->if_oerrors++;
2567 #ifdef WM_DEBUG
2568 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
2569 i = WM_NEXTTXS(sc, i)) {
2570 txs = &sc->sc_txsoft[i];
2571 printf("txs %d tx %d -> %d\n",
2572 i, txs->txs_firstdesc, txs->txs_lastdesc);
2573 for (j = txs->txs_firstdesc; ;
2574 j = WM_NEXTTX(sc, j)) {
2575 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2576 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
2577 printf("\t %#08x%08x\n",
2578 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
2579 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
2580 if (j == txs->txs_lastdesc)
2581 break;
2582 }
2583 }
2584 #endif
2585 /* Reset the interface. */
2586 (void) wm_init(ifp);
2587 }
2588
2589 /* Try to get more packets going. */
2590 ifp->if_start(ifp);
2591 }
2592
2593 /*
2594 * wm_tick:
2595 *
2596 * One second timer, used to check link status, sweep up
2597 * completed transmit jobs, etc.
2598 */
2599 static void
2600 wm_tick(void *arg)
2601 {
2602 struct wm_softc *sc = arg;
2603 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2604 #ifndef WM_MPSAFE
2605 int s;
2606
2607 s = splnet();
2608 #endif
2609
2610 WM_TX_LOCK(sc);
2611
2612 if (sc->sc_stopping)
2613 goto out;
2614
2615 if (sc->sc_type >= WM_T_82542_2_1) {
2616 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2617 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2618 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2619 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2620 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2621 }
2622
2623 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2624 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2625 + CSR_READ(sc, WMREG_CRCERRS)
2626 + CSR_READ(sc, WMREG_ALGNERRC)
2627 + CSR_READ(sc, WMREG_SYMERRC)
2628 + CSR_READ(sc, WMREG_RXERRC)
2629 + CSR_READ(sc, WMREG_SEC)
2630 + CSR_READ(sc, WMREG_CEXTERR)
2631 + CSR_READ(sc, WMREG_RLEC);
2632 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2633
2634 if (sc->sc_flags & WM_F_HAS_MII)
2635 mii_tick(&sc->sc_mii);
2636 else
2637 wm_tbi_check_link(sc);
2638
2639 out:
2640 WM_TX_UNLOCK(sc);
2641 #ifndef WM_MPSAFE
2642 splx(s);
2643 #endif
2644
2645 if (!sc->sc_stopping)
2646 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2647 }
2648
2649 static int
2650 wm_ifflags_cb(struct ethercom *ec)
2651 {
2652 struct ifnet *ifp = &ec->ec_if;
2653 struct wm_softc *sc = ifp->if_softc;
2654 int change = ifp->if_flags ^ sc->sc_if_flags;
2655 int rc = 0;
2656
2657 WM_BOTH_LOCK(sc);
2658
2659 if (change != 0)
2660 sc->sc_if_flags = ifp->if_flags;
2661
2662 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
2663 rc = ENETRESET;
2664 goto out;
2665 }
2666
2667 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2668 wm_set_filter(sc);
2669
2670 wm_set_vlan(sc);
2671
2672 out:
2673 WM_BOTH_UNLOCK(sc);
2674
2675 return rc;
2676 }
2677
2678 /*
2679 * wm_ioctl: [ifnet interface function]
2680 *
2681 * Handle control requests from the operator.
2682 */
2683 static int
2684 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2685 {
2686 struct wm_softc *sc = ifp->if_softc;
2687 struct ifreq *ifr = (struct ifreq *) data;
2688 struct ifaddr *ifa = (struct ifaddr *)data;
2689 struct sockaddr_dl *sdl;
2690 int s, error;
2691
2692 #ifndef WM_MPSAFE
2693 s = splnet();
2694 #endif
2695 switch (cmd) {
2696 case SIOCSIFMEDIA:
2697 case SIOCGIFMEDIA:
2698 WM_BOTH_LOCK(sc);
2699 /* Flow control requires full-duplex mode. */
2700 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2701 (ifr->ifr_media & IFM_FDX) == 0)
2702 ifr->ifr_media &= ~IFM_ETH_FMASK;
2703 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2704 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2705 /* We can do both TXPAUSE and RXPAUSE. */
2706 ifr->ifr_media |=
2707 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2708 }
2709 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2710 }
2711 WM_BOTH_UNLOCK(sc);
2712 #ifdef WM_MPSAFE
2713 s = splnet();
2714 #endif
2715 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2716 #ifdef WM_MPSAFE
2717 splx(s);
2718 #endif
2719 break;
2720 case SIOCINITIFADDR:
2721 WM_BOTH_LOCK(sc);
2722 if (ifa->ifa_addr->sa_family == AF_LINK) {
2723 sdl = satosdl(ifp->if_dl->ifa_addr);
2724 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2725 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2726 /* unicast address is first multicast entry */
2727 wm_set_filter(sc);
2728 error = 0;
2729 WM_BOTH_UNLOCK(sc);
2730 break;
2731 }
2732 WM_BOTH_UNLOCK(sc);
2733 /*FALLTHROUGH*/
2734 default:
2735 #ifdef WM_MPSAFE
2736 s = splnet();
2737 #endif
2738 /* It may call wm_start, so unlock here */
2739 error = ether_ioctl(ifp, cmd, data);
2740 #ifdef WM_MPSAFE
2741 splx(s);
2742 #endif
2743 if (error != ENETRESET)
2744 break;
2745
2746 error = 0;
2747
2748 if (cmd == SIOCSIFCAP) {
2749 error = (*ifp->if_init)(ifp);
2750 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2751 ;
2752 else if (ifp->if_flags & IFF_RUNNING) {
2753 /*
2754 * Multicast list has changed; set the hardware filter
2755 * accordingly.
2756 */
2757 WM_BOTH_LOCK(sc);
2758 wm_set_filter(sc);
2759 WM_BOTH_UNLOCK(sc);
2760 }
2761 break;
2762 }
2763
2764 /* Try to get more packets going. */
2765 ifp->if_start(ifp);
2766
2767 #ifndef WM_MPSAFE
2768 splx(s);
2769 #endif
2770 return error;
2771 }
2772
2773 /* MAC address related */
2774
2775 /*
2776 * Get the offset of MAC address and return it.
2777 * If error occured, use offset 0.
2778 */
2779 static uint16_t
2780 wm_check_alt_mac_addr(struct wm_softc *sc)
2781 {
2782 uint16_t myea[ETHER_ADDR_LEN / 2];
2783 uint16_t offset = NVM_OFF_MACADDR;
2784
2785 /* Try to read alternative MAC address pointer */
2786 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2787 return 0;
2788
2789 /* Check pointer if it's valid or not. */
2790 if ((offset == 0x0000) || (offset == 0xffff))
2791 return 0;
2792
2793 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
2794 /*
2795 * Check whether alternative MAC address is valid or not.
2796 * Some cards have non 0xffff pointer but those don't use
2797 * alternative MAC address in reality.
2798 *
2799 * Check whether the broadcast bit is set or not.
2800 */
2801 if (wm_nvm_read(sc, offset, 1, myea) == 0)
2802 if (((myea[0] & 0xff) & 0x01) == 0)
2803 return offset; /* Found */
2804
2805 /* Not found */
2806 return 0;
2807 }
2808
2809 static int
2810 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2811 {
2812 uint16_t myea[ETHER_ADDR_LEN / 2];
2813 uint16_t offset = NVM_OFF_MACADDR;
2814 int do_invert = 0;
2815
2816 switch (sc->sc_type) {
2817 case WM_T_82580:
2818 case WM_T_I350:
2819 case WM_T_I354:
2820 /* EEPROM Top Level Partitioning */
2821 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
2822 break;
2823 case WM_T_82571:
2824 case WM_T_82575:
2825 case WM_T_82576:
2826 case WM_T_80003:
2827 case WM_T_I210:
2828 case WM_T_I211:
2829 offset = wm_check_alt_mac_addr(sc);
2830 if (offset == 0)
2831 if ((sc->sc_funcid & 0x01) == 1)
2832 do_invert = 1;
2833 break;
2834 default:
2835 if ((sc->sc_funcid & 0x01) == 1)
2836 do_invert = 1;
2837 break;
2838 }
2839
2840 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
2841 myea) != 0)
2842 goto bad;
2843
2844 enaddr[0] = myea[0] & 0xff;
2845 enaddr[1] = myea[0] >> 8;
2846 enaddr[2] = myea[1] & 0xff;
2847 enaddr[3] = myea[1] >> 8;
2848 enaddr[4] = myea[2] & 0xff;
2849 enaddr[5] = myea[2] >> 8;
2850
2851 /*
2852 * Toggle the LSB of the MAC address on the second port
2853 * of some dual port cards.
2854 */
2855 if (do_invert != 0)
2856 enaddr[5] ^= 1;
2857
2858 return 0;
2859
2860 bad:
2861 return -1;
2862 }
2863
2864 /*
2865 * wm_set_ral:
2866 *
2867 * Set an entery in the receive address list.
2868 */
2869 static void
2870 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2871 {
2872 uint32_t ral_lo, ral_hi;
2873
2874 if (enaddr != NULL) {
2875 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2876 (enaddr[3] << 24);
2877 ral_hi = enaddr[4] | (enaddr[5] << 8);
2878 ral_hi |= RAL_AV;
2879 } else {
2880 ral_lo = 0;
2881 ral_hi = 0;
2882 }
2883
2884 if (sc->sc_type >= WM_T_82544) {
2885 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2886 ral_lo);
2887 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2888 ral_hi);
2889 } else {
2890 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2891 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2892 }
2893 }
2894
2895 /*
2896 * wm_mchash:
2897 *
2898 * Compute the hash of the multicast address for the 4096-bit
2899 * multicast filter.
2900 */
2901 static uint32_t
2902 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2903 {
2904 static const int lo_shift[4] = { 4, 3, 2, 0 };
2905 static const int hi_shift[4] = { 4, 5, 6, 8 };
2906 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
2907 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
2908 uint32_t hash;
2909
2910 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2911 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2912 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
2913 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
2914 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
2915 return (hash & 0x3ff);
2916 }
2917 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2918 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2919
2920 return (hash & 0xfff);
2921 }
2922
2923 /*
2924 * wm_set_filter:
2925 *
2926 * Set up the receive filter.
2927 */
2928 static void
2929 wm_set_filter(struct wm_softc *sc)
2930 {
2931 struct ethercom *ec = &sc->sc_ethercom;
2932 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2933 struct ether_multi *enm;
2934 struct ether_multistep step;
2935 bus_addr_t mta_reg;
2936 uint32_t hash, reg, bit;
2937 int i, size;
2938
2939 if (sc->sc_type >= WM_T_82544)
2940 mta_reg = WMREG_CORDOVA_MTA;
2941 else
2942 mta_reg = WMREG_MTA;
2943
2944 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2945
2946 if (ifp->if_flags & IFF_BROADCAST)
2947 sc->sc_rctl |= RCTL_BAM;
2948 if (ifp->if_flags & IFF_PROMISC) {
2949 sc->sc_rctl |= RCTL_UPE;
2950 goto allmulti;
2951 }
2952
2953 /*
2954 * Set the station address in the first RAL slot, and
2955 * clear the remaining slots.
2956 */
2957 if (sc->sc_type == WM_T_ICH8)
2958 size = WM_RAL_TABSIZE_ICH8 -1;
2959 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
2960 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
2961 || (sc->sc_type == WM_T_PCH_LPT))
2962 size = WM_RAL_TABSIZE_ICH8;
2963 else if (sc->sc_type == WM_T_82575)
2964 size = WM_RAL_TABSIZE_82575;
2965 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
2966 size = WM_RAL_TABSIZE_82576;
2967 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2968 size = WM_RAL_TABSIZE_I350;
2969 else
2970 size = WM_RAL_TABSIZE;
2971 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
2972 for (i = 1; i < size; i++)
2973 wm_set_ral(sc, NULL, i);
2974
2975 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2976 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2977 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
2978 size = WM_ICH8_MC_TABSIZE;
2979 else
2980 size = WM_MC_TABSIZE;
2981 /* Clear out the multicast table. */
2982 for (i = 0; i < size; i++)
2983 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2984
2985 ETHER_FIRST_MULTI(step, ec, enm);
2986 while (enm != NULL) {
2987 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2988 /*
2989 * We must listen to a range of multicast addresses.
2990 * For now, just accept all multicasts, rather than
2991 * trying to set only those filter bits needed to match
2992 * the range. (At this time, the only use of address
2993 * ranges is for IP multicast routing, for which the
2994 * range is big enough to require all bits set.)
2995 */
2996 goto allmulti;
2997 }
2998
2999 hash = wm_mchash(sc, enm->enm_addrlo);
3000
3001 reg = (hash >> 5);
3002 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3003 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3004 || (sc->sc_type == WM_T_PCH2)
3005 || (sc->sc_type == WM_T_PCH_LPT))
3006 reg &= 0x1f;
3007 else
3008 reg &= 0x7f;
3009 bit = hash & 0x1f;
3010
3011 hash = CSR_READ(sc, mta_reg + (reg << 2));
3012 hash |= 1U << bit;
3013
3014 /* XXX Hardware bug?? */
3015 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3016 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3017 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3018 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3019 } else
3020 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3021
3022 ETHER_NEXT_MULTI(step, enm);
3023 }
3024
3025 ifp->if_flags &= ~IFF_ALLMULTI;
3026 goto setit;
3027
3028 allmulti:
3029 ifp->if_flags |= IFF_ALLMULTI;
3030 sc->sc_rctl |= RCTL_MPE;
3031
3032 setit:
3033 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3034 }
3035
3036 /* Reset and init related */
3037
3038 static void
3039 wm_set_vlan(struct wm_softc *sc)
3040 {
3041 /* Deal with VLAN enables. */
3042 if (VLAN_ATTACHED(&sc->sc_ethercom))
3043 sc->sc_ctrl |= CTRL_VME;
3044 else
3045 sc->sc_ctrl &= ~CTRL_VME;
3046
3047 /* Write the control registers. */
3048 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3049 }
3050
3051 static void
3052 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3053 {
3054 uint32_t gcr;
3055 pcireg_t ctrl2;
3056
3057 gcr = CSR_READ(sc, WMREG_GCR);
3058
3059 /* Only take action if timeout value is defaulted to 0 */
3060 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3061 goto out;
3062
3063 if ((gcr & GCR_CAP_VER2) == 0) {
3064 gcr |= GCR_CMPL_TMOUT_10MS;
3065 goto out;
3066 }
3067
3068 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3069 sc->sc_pcixe_capoff + PCIE_DCSR2);
3070 ctrl2 |= WM_PCIE_DCSR2_16MS;
3071 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3072 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3073
3074 out:
3075 /* Disable completion timeout resend */
3076 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3077
3078 CSR_WRITE(sc, WMREG_GCR, gcr);
3079 }
3080
3081 void
3082 wm_get_auto_rd_done(struct wm_softc *sc)
3083 {
3084 int i;
3085
3086 /* wait for eeprom to reload */
3087 switch (sc->sc_type) {
3088 case WM_T_82571:
3089 case WM_T_82572:
3090 case WM_T_82573:
3091 case WM_T_82574:
3092 case WM_T_82583:
3093 case WM_T_82575:
3094 case WM_T_82576:
3095 case WM_T_82580:
3096 case WM_T_I350:
3097 case WM_T_I354:
3098 case WM_T_I210:
3099 case WM_T_I211:
3100 case WM_T_80003:
3101 case WM_T_ICH8:
3102 case WM_T_ICH9:
3103 for (i = 0; i < 10; i++) {
3104 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3105 break;
3106 delay(1000);
3107 }
3108 if (i == 10) {
3109 log(LOG_ERR, "%s: auto read from eeprom failed to "
3110 "complete\n", device_xname(sc->sc_dev));
3111 }
3112 break;
3113 default:
3114 break;
3115 }
3116 }
3117
3118 void
3119 wm_lan_init_done(struct wm_softc *sc)
3120 {
3121 uint32_t reg = 0;
3122 int i;
3123
3124 /* wait for eeprom to reload */
3125 switch (sc->sc_type) {
3126 case WM_T_ICH10:
3127 case WM_T_PCH:
3128 case WM_T_PCH2:
3129 case WM_T_PCH_LPT:
3130 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3131 reg = CSR_READ(sc, WMREG_STATUS);
3132 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3133 break;
3134 delay(100);
3135 }
3136 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3137 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3138 "complete\n", device_xname(sc->sc_dev), __func__);
3139 }
3140 break;
3141 default:
3142 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3143 __func__);
3144 break;
3145 }
3146
3147 reg &= ~STATUS_LAN_INIT_DONE;
3148 CSR_WRITE(sc, WMREG_STATUS, reg);
3149 }
3150
3151 void
3152 wm_get_cfg_done(struct wm_softc *sc)
3153 {
3154 int mask;
3155 uint32_t reg;
3156 int i;
3157
3158 /* wait for eeprom to reload */
3159 switch (sc->sc_type) {
3160 case WM_T_82542_2_0:
3161 case WM_T_82542_2_1:
3162 /* null */
3163 break;
3164 case WM_T_82543:
3165 case WM_T_82544:
3166 case WM_T_82540:
3167 case WM_T_82545:
3168 case WM_T_82545_3:
3169 case WM_T_82546:
3170 case WM_T_82546_3:
3171 case WM_T_82541:
3172 case WM_T_82541_2:
3173 case WM_T_82547:
3174 case WM_T_82547_2:
3175 case WM_T_82573:
3176 case WM_T_82574:
3177 case WM_T_82583:
3178 /* generic */
3179 delay(10*1000);
3180 break;
3181 case WM_T_80003:
3182 case WM_T_82571:
3183 case WM_T_82572:
3184 case WM_T_82575:
3185 case WM_T_82576:
3186 case WM_T_82580:
3187 case WM_T_I350:
3188 case WM_T_I354:
3189 case WM_T_I210:
3190 case WM_T_I211:
3191 if (sc->sc_type == WM_T_82571) {
3192 /* Only 82571 shares port 0 */
3193 mask = EEMNGCTL_CFGDONE_0;
3194 } else
3195 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3196 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3197 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3198 break;
3199 delay(1000);
3200 }
3201 if (i >= WM_PHY_CFG_TIMEOUT) {
3202 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3203 device_xname(sc->sc_dev), __func__));
3204 }
3205 break;
3206 case WM_T_ICH8:
3207 case WM_T_ICH9:
3208 case WM_T_ICH10:
3209 case WM_T_PCH:
3210 case WM_T_PCH2:
3211 case WM_T_PCH_LPT:
3212 delay(10*1000);
3213 if (sc->sc_type >= WM_T_ICH10)
3214 wm_lan_init_done(sc);
3215 else
3216 wm_get_auto_rd_done(sc);
3217
3218 reg = CSR_READ(sc, WMREG_STATUS);
3219 if ((reg & STATUS_PHYRA) != 0)
3220 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3221 break;
3222 default:
3223 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3224 __func__);
3225 break;
3226 }
3227 }
3228
3229 /*
3230 * wm_reset:
3231 *
3232 * Reset the i82542 chip.
3233 */
3234 static void
3235 wm_reset(struct wm_softc *sc)
3236 {
3237 int phy_reset = 0;
3238 int error = 0;
3239 uint32_t reg, mask;
3240
3241 /*
3242 * Allocate on-chip memory according to the MTU size.
3243 * The Packet Buffer Allocation register must be written
3244 * before the chip is reset.
3245 */
3246 switch (sc->sc_type) {
3247 case WM_T_82547:
3248 case WM_T_82547_2:
3249 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3250 PBA_22K : PBA_30K;
3251 sc->sc_txfifo_head = 0;
3252 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3253 sc->sc_txfifo_size =
3254 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3255 sc->sc_txfifo_stall = 0;
3256 break;
3257 case WM_T_82571:
3258 case WM_T_82572:
3259 case WM_T_82575: /* XXX need special handing for jumbo frames */
3260 case WM_T_I350:
3261 case WM_T_I354:
3262 case WM_T_80003:
3263 sc->sc_pba = PBA_32K;
3264 break;
3265 case WM_T_82580:
3266 sc->sc_pba = PBA_35K;
3267 break;
3268 case WM_T_I210:
3269 case WM_T_I211:
3270 sc->sc_pba = PBA_34K;
3271 break;
3272 case WM_T_82576:
3273 sc->sc_pba = PBA_64K;
3274 break;
3275 case WM_T_82573:
3276 sc->sc_pba = PBA_12K;
3277 break;
3278 case WM_T_82574:
3279 case WM_T_82583:
3280 sc->sc_pba = PBA_20K;
3281 break;
3282 case WM_T_ICH8:
3283 sc->sc_pba = PBA_8K;
3284 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3285 break;
3286 case WM_T_ICH9:
3287 case WM_T_ICH10:
3288 sc->sc_pba = PBA_10K;
3289 break;
3290 case WM_T_PCH:
3291 case WM_T_PCH2:
3292 case WM_T_PCH_LPT:
3293 sc->sc_pba = PBA_26K;
3294 break;
3295 default:
3296 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3297 PBA_40K : PBA_48K;
3298 break;
3299 }
3300 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3301
3302 /* Prevent the PCI-E bus from sticking */
3303 if (sc->sc_flags & WM_F_PCIE) {
3304 int timeout = 800;
3305
3306 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3307 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3308
3309 while (timeout--) {
3310 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3311 == 0)
3312 break;
3313 delay(100);
3314 }
3315 }
3316
3317 /* Set the completion timeout for interface */
3318 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3319 || (sc->sc_type == WM_T_82580)
3320 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3321 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3322 wm_set_pcie_completion_timeout(sc);
3323
3324 /* Clear interrupt */
3325 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3326
3327 /* Stop the transmit and receive processes. */
3328 CSR_WRITE(sc, WMREG_RCTL, 0);
3329 sc->sc_rctl &= ~RCTL_EN;
3330 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3331 CSR_WRITE_FLUSH(sc);
3332
3333 /* XXX set_tbi_sbp_82543() */
3334
3335 delay(10*1000);
3336
3337 /* Must acquire the MDIO ownership before MAC reset */
3338 switch (sc->sc_type) {
3339 case WM_T_82573:
3340 case WM_T_82574:
3341 case WM_T_82583:
3342 error = wm_get_hw_semaphore_82573(sc);
3343 break;
3344 default:
3345 break;
3346 }
3347
3348 /*
3349 * 82541 Errata 29? & 82547 Errata 28?
3350 * See also the description about PHY_RST bit in CTRL register
3351 * in 8254x_GBe_SDM.pdf.
3352 */
3353 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3354 CSR_WRITE(sc, WMREG_CTRL,
3355 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3356 CSR_WRITE_FLUSH(sc);
3357 delay(5000);
3358 }
3359
3360 switch (sc->sc_type) {
3361 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3362 case WM_T_82541:
3363 case WM_T_82541_2:
3364 case WM_T_82547:
3365 case WM_T_82547_2:
3366 /*
3367 * On some chipsets, a reset through a memory-mapped write
3368 * cycle can cause the chip to reset before completing the
3369 * write cycle. This causes major headache that can be
3370 * avoided by issuing the reset via indirect register writes
3371 * through I/O space.
3372 *
3373 * So, if we successfully mapped the I/O BAR at attach time,
3374 * use that. Otherwise, try our luck with a memory-mapped
3375 * reset.
3376 */
3377 if (sc->sc_flags & WM_F_IOH_VALID)
3378 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3379 else
3380 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3381 break;
3382 case WM_T_82545_3:
3383 case WM_T_82546_3:
3384 /* Use the shadow control register on these chips. */
3385 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3386 break;
3387 case WM_T_80003:
3388 mask = swfwphysem[sc->sc_funcid];
3389 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3390 wm_get_swfw_semaphore(sc, mask);
3391 CSR_WRITE(sc, WMREG_CTRL, reg);
3392 wm_put_swfw_semaphore(sc, mask);
3393 break;
3394 case WM_T_ICH8:
3395 case WM_T_ICH9:
3396 case WM_T_ICH10:
3397 case WM_T_PCH:
3398 case WM_T_PCH2:
3399 case WM_T_PCH_LPT:
3400 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3401 if (wm_check_reset_block(sc) == 0) {
3402 /*
3403 * Gate automatic PHY configuration by hardware on
3404 * non-managed 82579
3405 */
3406 if ((sc->sc_type == WM_T_PCH2)
3407 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3408 != 0))
3409 wm_gate_hw_phy_config_ich8lan(sc, 1);
3410
3411
3412 reg |= CTRL_PHY_RESET;
3413 phy_reset = 1;
3414 }
3415 wm_get_swfwhw_semaphore(sc);
3416 CSR_WRITE(sc, WMREG_CTRL, reg);
3417 /* Don't insert a completion barrier when reset */
3418 delay(20*1000);
3419 wm_put_swfwhw_semaphore(sc);
3420 break;
3421 case WM_T_82580:
3422 case WM_T_I350:
3423 case WM_T_I354:
3424 case WM_T_I210:
3425 case WM_T_I211:
3426 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3427 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3428 CSR_WRITE_FLUSH(sc);
3429 delay(5000);
3430 break;
3431 case WM_T_82542_2_0:
3432 case WM_T_82542_2_1:
3433 case WM_T_82543:
3434 case WM_T_82540:
3435 case WM_T_82545:
3436 case WM_T_82546:
3437 case WM_T_82571:
3438 case WM_T_82572:
3439 case WM_T_82573:
3440 case WM_T_82574:
3441 case WM_T_82575:
3442 case WM_T_82576:
3443 case WM_T_82583:
3444 default:
3445 /* Everything else can safely use the documented method. */
3446 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3447 break;
3448 }
3449
3450 /* Must release the MDIO ownership after MAC reset */
3451 switch (sc->sc_type) {
3452 case WM_T_82573:
3453 case WM_T_82574:
3454 case WM_T_82583:
3455 if (error == 0)
3456 wm_put_hw_semaphore_82573(sc);
3457 break;
3458 default:
3459 break;
3460 }
3461
3462 if (phy_reset != 0)
3463 wm_get_cfg_done(sc);
3464
3465 /* reload EEPROM */
3466 switch (sc->sc_type) {
3467 case WM_T_82542_2_0:
3468 case WM_T_82542_2_1:
3469 case WM_T_82543:
3470 case WM_T_82544:
3471 delay(10);
3472 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3473 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3474 CSR_WRITE_FLUSH(sc);
3475 delay(2000);
3476 break;
3477 case WM_T_82540:
3478 case WM_T_82545:
3479 case WM_T_82545_3:
3480 case WM_T_82546:
3481 case WM_T_82546_3:
3482 delay(5*1000);
3483 /* XXX Disable HW ARPs on ASF enabled adapters */
3484 break;
3485 case WM_T_82541:
3486 case WM_T_82541_2:
3487 case WM_T_82547:
3488 case WM_T_82547_2:
3489 delay(20000);
3490 /* XXX Disable HW ARPs on ASF enabled adapters */
3491 break;
3492 case WM_T_82571:
3493 case WM_T_82572:
3494 case WM_T_82573:
3495 case WM_T_82574:
3496 case WM_T_82583:
3497 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3498 delay(10);
3499 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3500 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3501 CSR_WRITE_FLUSH(sc);
3502 }
3503 /* check EECD_EE_AUTORD */
3504 wm_get_auto_rd_done(sc);
3505 /*
3506 * Phy configuration from NVM just starts after EECD_AUTO_RD
3507 * is set.
3508 */
3509 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3510 || (sc->sc_type == WM_T_82583))
3511 delay(25*1000);
3512 break;
3513 case WM_T_82575:
3514 case WM_T_82576:
3515 case WM_T_82580:
3516 case WM_T_I350:
3517 case WM_T_I354:
3518 case WM_T_I210:
3519 case WM_T_I211:
3520 case WM_T_80003:
3521 /* check EECD_EE_AUTORD */
3522 wm_get_auto_rd_done(sc);
3523 break;
3524 case WM_T_ICH8:
3525 case WM_T_ICH9:
3526 case WM_T_ICH10:
3527 case WM_T_PCH:
3528 case WM_T_PCH2:
3529 case WM_T_PCH_LPT:
3530 break;
3531 default:
3532 panic("%s: unknown type\n", __func__);
3533 }
3534
3535 /* Check whether EEPROM is present or not */
3536 switch (sc->sc_type) {
3537 case WM_T_82575:
3538 case WM_T_82576:
3539 #if 0 /* XXX */
3540 case WM_T_82580:
3541 #endif
3542 case WM_T_I350:
3543 case WM_T_I354:
3544 case WM_T_ICH8:
3545 case WM_T_ICH9:
3546 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3547 /* Not found */
3548 sc->sc_flags |= WM_F_EEPROM_INVALID;
3549 if ((sc->sc_type == WM_T_82575)
3550 || (sc->sc_type == WM_T_82576)
3551 || (sc->sc_type == WM_T_82580)
3552 || (sc->sc_type == WM_T_I350)
3553 || (sc->sc_type == WM_T_I354))
3554 wm_reset_init_script_82575(sc);
3555 }
3556 break;
3557 default:
3558 break;
3559 }
3560
3561 if ((sc->sc_type == WM_T_82580)
3562 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
3563 /* clear global device reset status bit */
3564 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3565 }
3566
3567 /* Clear any pending interrupt events. */
3568 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3569 reg = CSR_READ(sc, WMREG_ICR);
3570
3571 /* reload sc_ctrl */
3572 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3573
3574 if (sc->sc_type == WM_T_I350)
3575 wm_set_eee_i350(sc);
3576
3577 /* dummy read from WUC */
3578 if (sc->sc_type == WM_T_PCH)
3579 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3580 /*
3581 * For PCH, this write will make sure that any noise will be detected
3582 * as a CRC error and be dropped rather than show up as a bad packet
3583 * to the DMA engine
3584 */
3585 if (sc->sc_type == WM_T_PCH)
3586 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3587
3588 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3589 CSR_WRITE(sc, WMREG_WUC, 0);
3590
3591 /* XXX need special handling for 82580 */
3592 }
3593
3594 /*
3595 * wm_add_rxbuf:
3596 *
3597 * Add a receive buffer to the indiciated descriptor.
3598 */
3599 static int
3600 wm_add_rxbuf(struct wm_softc *sc, int idx)
3601 {
3602 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3603 struct mbuf *m;
3604 int error;
3605
3606 KASSERT(WM_RX_LOCKED(sc));
3607
3608 MGETHDR(m, M_DONTWAIT, MT_DATA);
3609 if (m == NULL)
3610 return ENOBUFS;
3611
3612 MCLGET(m, M_DONTWAIT);
3613 if ((m->m_flags & M_EXT) == 0) {
3614 m_freem(m);
3615 return ENOBUFS;
3616 }
3617
3618 if (rxs->rxs_mbuf != NULL)
3619 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3620
3621 rxs->rxs_mbuf = m;
3622
3623 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3624 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3625 BUS_DMA_READ|BUS_DMA_NOWAIT);
3626 if (error) {
3627 /* XXX XXX XXX */
3628 aprint_error_dev(sc->sc_dev,
3629 "unable to load rx DMA map %d, error = %d\n",
3630 idx, error);
3631 panic("wm_add_rxbuf");
3632 }
3633
3634 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3635 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3636
3637 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3638 if ((sc->sc_rctl & RCTL_EN) != 0)
3639 WM_INIT_RXDESC(sc, idx);
3640 } else
3641 WM_INIT_RXDESC(sc, idx);
3642
3643 return 0;
3644 }
3645
3646 /*
3647 * wm_rxdrain:
3648 *
3649 * Drain the receive queue.
3650 */
3651 static void
3652 wm_rxdrain(struct wm_softc *sc)
3653 {
3654 struct wm_rxsoft *rxs;
3655 int i;
3656
3657 KASSERT(WM_RX_LOCKED(sc));
3658
3659 for (i = 0; i < WM_NRXDESC; i++) {
3660 rxs = &sc->sc_rxsoft[i];
3661 if (rxs->rxs_mbuf != NULL) {
3662 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3663 m_freem(rxs->rxs_mbuf);
3664 rxs->rxs_mbuf = NULL;
3665 }
3666 }
3667 }
3668
3669 /*
3670 * wm_init: [ifnet interface function]
3671 *
3672 * Initialize the interface.
3673 */
3674 static int
3675 wm_init(struct ifnet *ifp)
3676 {
3677 struct wm_softc *sc = ifp->if_softc;
3678 int ret;
3679
3680 WM_BOTH_LOCK(sc);
3681 ret = wm_init_locked(ifp);
3682 WM_BOTH_UNLOCK(sc);
3683
3684 return ret;
3685 }
3686
3687 static int
3688 wm_init_locked(struct ifnet *ifp)
3689 {
3690 struct wm_softc *sc = ifp->if_softc;
3691 struct wm_rxsoft *rxs;
3692 int i, j, trynum, error = 0;
3693 uint32_t reg;
3694
3695 KASSERT(WM_BOTH_LOCKED(sc));
3696 /*
3697 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3698 * There is a small but measurable benefit to avoiding the adjusment
3699 * of the descriptor so that the headers are aligned, for normal mtu,
3700 * on such platforms. One possibility is that the DMA itself is
3701 * slightly more efficient if the front of the entire packet (instead
3702 * of the front of the headers) is aligned.
3703 *
3704 * Note we must always set align_tweak to 0 if we are using
3705 * jumbo frames.
3706 */
3707 #ifdef __NO_STRICT_ALIGNMENT
3708 sc->sc_align_tweak = 0;
3709 #else
3710 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3711 sc->sc_align_tweak = 0;
3712 else
3713 sc->sc_align_tweak = 2;
3714 #endif /* __NO_STRICT_ALIGNMENT */
3715
3716 /* Cancel any pending I/O. */
3717 wm_stop_locked(ifp, 0);
3718
3719 /* update statistics before reset */
3720 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3721 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3722
3723 /* Reset the chip to a known state. */
3724 wm_reset(sc);
3725
3726 switch (sc->sc_type) {
3727 case WM_T_82571:
3728 case WM_T_82572:
3729 case WM_T_82573:
3730 case WM_T_82574:
3731 case WM_T_82583:
3732 case WM_T_80003:
3733 case WM_T_ICH8:
3734 case WM_T_ICH9:
3735 case WM_T_ICH10:
3736 case WM_T_PCH:
3737 case WM_T_PCH2:
3738 case WM_T_PCH_LPT:
3739 if (wm_check_mng_mode(sc) != 0)
3740 wm_get_hw_control(sc);
3741 break;
3742 default:
3743 break;
3744 }
3745
3746 /* Reset the PHY. */
3747 if (sc->sc_flags & WM_F_HAS_MII)
3748 wm_gmii_reset(sc);
3749
3750 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3751 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3752 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
3753 || (sc->sc_type == WM_T_PCH_LPT))
3754 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
3755
3756 /* Initialize the transmit descriptor ring. */
3757 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3758 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3759 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3760 sc->sc_txfree = WM_NTXDESC(sc);
3761 sc->sc_txnext = 0;
3762
3763 if (sc->sc_type < WM_T_82543) {
3764 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
3765 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
3766 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3767 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3768 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3769 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3770 } else {
3771 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
3772 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
3773 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3774 CSR_WRITE(sc, WMREG_TDH, 0);
3775 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3776 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3777
3778 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3779 /*
3780 * Don't write TDT before TCTL.EN is set.
3781 * See the document.
3782 */
3783 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
3784 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
3785 | TXDCTL_WTHRESH(0));
3786 else {
3787 CSR_WRITE(sc, WMREG_TDT, 0);
3788 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3789 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3790 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3791 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3792 }
3793 }
3794 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3795 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3796
3797 /* Initialize the transmit job descriptors. */
3798 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3799 sc->sc_txsoft[i].txs_mbuf = NULL;
3800 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3801 sc->sc_txsnext = 0;
3802 sc->sc_txsdirty = 0;
3803
3804 /*
3805 * Initialize the receive descriptor and receive job
3806 * descriptor rings.
3807 */
3808 if (sc->sc_type < WM_T_82543) {
3809 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3810 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3811 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3812 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3813 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3814 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3815
3816 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3817 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3818 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3819 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3820 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3821 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3822 } else {
3823 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3824 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3825 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3826 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3827 CSR_WRITE(sc, WMREG_EITR(0), 450);
3828 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
3829 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
3830 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
3831 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
3832 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
3833 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
3834 | RXDCTL_WTHRESH(1));
3835 } else {
3836 CSR_WRITE(sc, WMREG_RDH, 0);
3837 CSR_WRITE(sc, WMREG_RDT, 0);
3838 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3839 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
3840 }
3841 }
3842 for (i = 0; i < WM_NRXDESC; i++) {
3843 rxs = &sc->sc_rxsoft[i];
3844 if (rxs->rxs_mbuf == NULL) {
3845 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3846 log(LOG_ERR, "%s: unable to allocate or map "
3847 "rx buffer %d, error = %d\n",
3848 device_xname(sc->sc_dev), i, error);
3849 /*
3850 * XXX Should attempt to run with fewer receive
3851 * XXX buffers instead of just failing.
3852 */
3853 wm_rxdrain(sc);
3854 goto out;
3855 }
3856 } else {
3857 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3858 WM_INIT_RXDESC(sc, i);
3859 /*
3860 * For 82575 and newer device, the RX descriptors
3861 * must be initialized after the setting of RCTL.EN in
3862 * wm_set_filter()
3863 */
3864 }
3865 }
3866 sc->sc_rxptr = 0;
3867 sc->sc_rxdiscard = 0;
3868 WM_RXCHAIN_RESET(sc);
3869
3870 /*
3871 * Clear out the VLAN table -- we don't use it (yet).
3872 */
3873 CSR_WRITE(sc, WMREG_VET, 0);
3874 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3875 trynum = 10; /* Due to hw errata */
3876 else
3877 trynum = 1;
3878 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3879 for (j = 0; j < trynum; j++)
3880 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3881
3882 /*
3883 * Set up flow-control parameters.
3884 *
3885 * XXX Values could probably stand some tuning.
3886 */
3887 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3888 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
3889 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
3890 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3891 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3892 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3893 }
3894
3895 sc->sc_fcrtl = FCRTL_DFLT;
3896 if (sc->sc_type < WM_T_82543) {
3897 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3898 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3899 } else {
3900 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3901 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3902 }
3903
3904 if (sc->sc_type == WM_T_80003)
3905 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3906 else
3907 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3908
3909 /* Writes the control register. */
3910 wm_set_vlan(sc);
3911
3912 if (sc->sc_flags & WM_F_HAS_MII) {
3913 int val;
3914
3915 switch (sc->sc_type) {
3916 case WM_T_80003:
3917 case WM_T_ICH8:
3918 case WM_T_ICH9:
3919 case WM_T_ICH10:
3920 case WM_T_PCH:
3921 case WM_T_PCH2:
3922 case WM_T_PCH_LPT:
3923 /*
3924 * Set the mac to wait the maximum time between each
3925 * iteration and increase the max iterations when
3926 * polling the phy; this fixes erroneous timeouts at
3927 * 10Mbps.
3928 */
3929 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
3930 0xFFFF);
3931 val = wm_kmrn_readreg(sc,
3932 KUMCTRLSTA_OFFSET_INB_PARAM);
3933 val |= 0x3F;
3934 wm_kmrn_writereg(sc,
3935 KUMCTRLSTA_OFFSET_INB_PARAM, val);
3936 break;
3937 default:
3938 break;
3939 }
3940
3941 if (sc->sc_type == WM_T_80003) {
3942 val = CSR_READ(sc, WMREG_CTRL_EXT);
3943 val &= ~CTRL_EXT_LINK_MODE_MASK;
3944 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3945
3946 /* Bypass RX and TX FIFO's */
3947 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3948 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
3949 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3950 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3951 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3952 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3953 }
3954 }
3955 #if 0
3956 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3957 #endif
3958
3959 /* Set up checksum offload parameters. */
3960 reg = CSR_READ(sc, WMREG_RXCSUM);
3961 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3962 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3963 reg |= RXCSUM_IPOFL;
3964 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3965 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3966 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3967 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3968 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3969
3970 /* Set up the interrupt registers. */
3971 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3972 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3973 ICR_RXO | ICR_RXT0;
3974 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3975
3976 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3977 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3978 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
3979 reg = CSR_READ(sc, WMREG_KABGTXD);
3980 reg |= KABGTXD_BGSQLBIAS;
3981 CSR_WRITE(sc, WMREG_KABGTXD, reg);
3982 }
3983
3984 /* Set up the inter-packet gap. */
3985 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3986
3987 if (sc->sc_type >= WM_T_82543) {
3988 /*
3989 * Set up the interrupt throttling register (units of 256ns)
3990 * Note that a footnote in Intel's documentation says this
3991 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3992 * or 10Mbit mode. Empirically, it appears to be the case
3993 * that that is also true for the 1024ns units of the other
3994 * interrupt-related timer registers -- so, really, we ought
3995 * to divide this value by 4 when the link speed is low.
3996 *
3997 * XXX implement this division at link speed change!
3998 */
3999
4000 /*
4001 * For N interrupts/sec, set this value to:
4002 * 1000000000 / (N * 256). Note that we set the
4003 * absolute and packet timer values to this value
4004 * divided by 4 to get "simple timer" behavior.
4005 */
4006
4007 sc->sc_itr = 1500; /* 2604 ints/sec */
4008 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4009 }
4010
4011 /* Set the VLAN ethernetype. */
4012 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4013
4014 /*
4015 * Set up the transmit control register; we start out with
4016 * a collision distance suitable for FDX, but update it whe
4017 * we resolve the media type.
4018 */
4019 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4020 | TCTL_CT(TX_COLLISION_THRESHOLD)
4021 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4022 if (sc->sc_type >= WM_T_82571)
4023 sc->sc_tctl |= TCTL_MULR;
4024 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4025
4026 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4027 /* Write TDT after TCTL.EN is set. See the document. */
4028 CSR_WRITE(sc, WMREG_TDT, 0);
4029 }
4030
4031 if (sc->sc_type == WM_T_80003) {
4032 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4033 reg &= ~TCTL_EXT_GCEX_MASK;
4034 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4035 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4036 }
4037
4038 /* Set the media. */
4039 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4040 goto out;
4041
4042 /* Configure for OS presence */
4043 wm_init_manageability(sc);
4044
4045 /*
4046 * Set up the receive control register; we actually program
4047 * the register when we set the receive filter. Use multicast
4048 * address offset type 0.
4049 *
4050 * Only the i82544 has the ability to strip the incoming
4051 * CRC, so we don't enable that feature.
4052 */
4053 sc->sc_mchash_type = 0;
4054 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4055 | RCTL_MO(sc->sc_mchash_type);
4056
4057 /*
4058 * The I350 has a bug where it always strips the CRC whether
4059 * asked to or not. So ask for stripped CRC here and cope in rxeof
4060 */
4061 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4062 || (sc->sc_type == WM_T_I210))
4063 sc->sc_rctl |= RCTL_SECRC;
4064
4065 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4066 && (ifp->if_mtu > ETHERMTU)) {
4067 sc->sc_rctl |= RCTL_LPE;
4068 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4069 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4070 }
4071
4072 if (MCLBYTES == 2048) {
4073 sc->sc_rctl |= RCTL_2k;
4074 } else {
4075 if (sc->sc_type >= WM_T_82543) {
4076 switch (MCLBYTES) {
4077 case 4096:
4078 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4079 break;
4080 case 8192:
4081 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4082 break;
4083 case 16384:
4084 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4085 break;
4086 default:
4087 panic("wm_init: MCLBYTES %d unsupported",
4088 MCLBYTES);
4089 break;
4090 }
4091 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4092 }
4093
4094 /* Set the receive filter. */
4095 wm_set_filter(sc);
4096
4097 /* Enable ECC */
4098 switch (sc->sc_type) {
4099 case WM_T_82571:
4100 reg = CSR_READ(sc, WMREG_PBA_ECC);
4101 reg |= PBA_ECC_CORR_EN;
4102 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4103 break;
4104 case WM_T_PCH_LPT:
4105 reg = CSR_READ(sc, WMREG_PBECCSTS);
4106 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4107 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4108
4109 reg = CSR_READ(sc, WMREG_CTRL);
4110 reg |= CTRL_MEHE;
4111 CSR_WRITE(sc, WMREG_CTRL, reg);
4112 break;
4113 default:
4114 break;
4115 }
4116
4117 /* On 575 and later set RDT only if RX enabled */
4118 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4119 for (i = 0; i < WM_NRXDESC; i++)
4120 WM_INIT_RXDESC(sc, i);
4121
4122 sc->sc_stopping = false;
4123
4124 /* Start the one second link check clock. */
4125 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4126
4127 /* ...all done! */
4128 ifp->if_flags |= IFF_RUNNING;
4129 ifp->if_flags &= ~IFF_OACTIVE;
4130
4131 out:
4132 sc->sc_if_flags = ifp->if_flags;
4133 if (error)
4134 log(LOG_ERR, "%s: interface not running\n",
4135 device_xname(sc->sc_dev));
4136 return error;
4137 }
4138
4139 /*
4140 * wm_stop: [ifnet interface function]
4141 *
4142 * Stop transmission on the interface.
4143 */
4144 static void
4145 wm_stop(struct ifnet *ifp, int disable)
4146 {
4147 struct wm_softc *sc = ifp->if_softc;
4148
4149 WM_BOTH_LOCK(sc);
4150 wm_stop_locked(ifp, disable);
4151 WM_BOTH_UNLOCK(sc);
4152 }
4153
4154 static void
4155 wm_stop_locked(struct ifnet *ifp, int disable)
4156 {
4157 struct wm_softc *sc = ifp->if_softc;
4158 struct wm_txsoft *txs;
4159 int i;
4160
4161 KASSERT(WM_BOTH_LOCKED(sc));
4162
4163 sc->sc_stopping = true;
4164
4165 /* Stop the one second clock. */
4166 callout_stop(&sc->sc_tick_ch);
4167
4168 /* Stop the 82547 Tx FIFO stall check timer. */
4169 if (sc->sc_type == WM_T_82547)
4170 callout_stop(&sc->sc_txfifo_ch);
4171
4172 if (sc->sc_flags & WM_F_HAS_MII) {
4173 /* Down the MII. */
4174 mii_down(&sc->sc_mii);
4175 } else {
4176 #if 0
4177 /* Should we clear PHY's status properly? */
4178 wm_reset(sc);
4179 #endif
4180 }
4181
4182 /* Stop the transmit and receive processes. */
4183 CSR_WRITE(sc, WMREG_TCTL, 0);
4184 CSR_WRITE(sc, WMREG_RCTL, 0);
4185 sc->sc_rctl &= ~RCTL_EN;
4186
4187 /*
4188 * Clear the interrupt mask to ensure the device cannot assert its
4189 * interrupt line.
4190 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4191 * any currently pending or shared interrupt.
4192 */
4193 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4194 sc->sc_icr = 0;
4195
4196 /* Release any queued transmit buffers. */
4197 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4198 txs = &sc->sc_txsoft[i];
4199 if (txs->txs_mbuf != NULL) {
4200 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4201 m_freem(txs->txs_mbuf);
4202 txs->txs_mbuf = NULL;
4203 }
4204 }
4205
4206 /* Mark the interface as down and cancel the watchdog timer. */
4207 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4208 ifp->if_timer = 0;
4209
4210 if (disable)
4211 wm_rxdrain(sc);
4212
4213 #if 0 /* notyet */
4214 if (sc->sc_type >= WM_T_82544)
4215 CSR_WRITE(sc, WMREG_WUC, 0);
4216 #endif
4217 }
4218
4219 /*
4220 * wm_tx_offload:
4221 *
4222 * Set up TCP/IP checksumming parameters for the
4223 * specified packet.
4224 */
4225 static int
4226 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
4227 uint8_t *fieldsp)
4228 {
4229 struct mbuf *m0 = txs->txs_mbuf;
4230 struct livengood_tcpip_ctxdesc *t;
4231 uint32_t ipcs, tucs, cmd, cmdlen, seg;
4232 uint32_t ipcse;
4233 struct ether_header *eh;
4234 int offset, iphl;
4235 uint8_t fields;
4236
4237 /*
4238 * XXX It would be nice if the mbuf pkthdr had offset
4239 * fields for the protocol headers.
4240 */
4241
4242 eh = mtod(m0, struct ether_header *);
4243 switch (htons(eh->ether_type)) {
4244 case ETHERTYPE_IP:
4245 case ETHERTYPE_IPV6:
4246 offset = ETHER_HDR_LEN;
4247 break;
4248
4249 case ETHERTYPE_VLAN:
4250 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4251 break;
4252
4253 default:
4254 /*
4255 * Don't support this protocol or encapsulation.
4256 */
4257 *fieldsp = 0;
4258 *cmdp = 0;
4259 return 0;
4260 }
4261
4262 if ((m0->m_pkthdr.csum_flags &
4263 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
4264 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4265 } else {
4266 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4267 }
4268 ipcse = offset + iphl - 1;
4269
4270 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
4271 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
4272 seg = 0;
4273 fields = 0;
4274
4275 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4276 int hlen = offset + iphl;
4277 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4278
4279 if (__predict_false(m0->m_len <
4280 (hlen + sizeof(struct tcphdr)))) {
4281 /*
4282 * TCP/IP headers are not in the first mbuf; we need
4283 * to do this the slow and painful way. Let's just
4284 * hope this doesn't happen very often.
4285 */
4286 struct tcphdr th;
4287
4288 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4289
4290 m_copydata(m0, hlen, sizeof(th), &th);
4291 if (v4) {
4292 struct ip ip;
4293
4294 m_copydata(m0, offset, sizeof(ip), &ip);
4295 ip.ip_len = 0;
4296 m_copyback(m0,
4297 offset + offsetof(struct ip, ip_len),
4298 sizeof(ip.ip_len), &ip.ip_len);
4299 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4300 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4301 } else {
4302 struct ip6_hdr ip6;
4303
4304 m_copydata(m0, offset, sizeof(ip6), &ip6);
4305 ip6.ip6_plen = 0;
4306 m_copyback(m0,
4307 offset + offsetof(struct ip6_hdr, ip6_plen),
4308 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4309 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4310 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4311 }
4312 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4313 sizeof(th.th_sum), &th.th_sum);
4314
4315 hlen += th.th_off << 2;
4316 } else {
4317 /*
4318 * TCP/IP headers are in the first mbuf; we can do
4319 * this the easy way.
4320 */
4321 struct tcphdr *th;
4322
4323 if (v4) {
4324 struct ip *ip =
4325 (void *)(mtod(m0, char *) + offset);
4326 th = (void *)(mtod(m0, char *) + hlen);
4327
4328 ip->ip_len = 0;
4329 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4330 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4331 } else {
4332 struct ip6_hdr *ip6 =
4333 (void *)(mtod(m0, char *) + offset);
4334 th = (void *)(mtod(m0, char *) + hlen);
4335
4336 ip6->ip6_plen = 0;
4337 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4338 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4339 }
4340 hlen += th->th_off << 2;
4341 }
4342
4343 if (v4) {
4344 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4345 cmdlen |= WTX_TCPIP_CMD_IP;
4346 } else {
4347 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4348 ipcse = 0;
4349 }
4350 cmd |= WTX_TCPIP_CMD_TSE;
4351 cmdlen |= WTX_TCPIP_CMD_TSE |
4352 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
4353 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
4354 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
4355 }
4356
4357 /*
4358 * NOTE: Even if we're not using the IP or TCP/UDP checksum
4359 * offload feature, if we load the context descriptor, we
4360 * MUST provide valid values for IPCSS and TUCSS fields.
4361 */
4362
4363 ipcs = WTX_TCPIP_IPCSS(offset) |
4364 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
4365 WTX_TCPIP_IPCSE(ipcse);
4366 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
4367 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
4368 fields |= WTX_IXSM;
4369 }
4370
4371 offset += iphl;
4372
4373 if (m0->m_pkthdr.csum_flags &
4374 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
4375 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
4376 fields |= WTX_TXSM;
4377 tucs = WTX_TCPIP_TUCSS(offset) |
4378 WTX_TCPIP_TUCSO(offset +
4379 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
4380 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4381 } else if ((m0->m_pkthdr.csum_flags &
4382 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
4383 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
4384 fields |= WTX_TXSM;
4385 tucs = WTX_TCPIP_TUCSS(offset) |
4386 WTX_TCPIP_TUCSO(offset +
4387 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
4388 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4389 } else {
4390 /* Just initialize it to a valid TCP context. */
4391 tucs = WTX_TCPIP_TUCSS(offset) |
4392 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
4393 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4394 }
4395
4396 /* Fill in the context descriptor. */
4397 t = (struct livengood_tcpip_ctxdesc *)
4398 &sc->sc_txdescs[sc->sc_txnext];
4399 t->tcpip_ipcs = htole32(ipcs);
4400 t->tcpip_tucs = htole32(tucs);
4401 t->tcpip_cmdlen = htole32(cmdlen);
4402 t->tcpip_seg = htole32(seg);
4403 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
4404
4405 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
4406 txs->txs_ndesc++;
4407
4408 *cmdp = cmd;
4409 *fieldsp = fields;
4410
4411 return 0;
4412 }
4413
4414 static void
4415 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
4416 {
4417 struct mbuf *m;
4418 int i;
4419
4420 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
4421 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
4422 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
4423 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
4424 m->m_data, m->m_len, m->m_flags);
4425 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
4426 i, i == 1 ? "" : "s");
4427 }
4428
4429 /*
4430 * wm_82547_txfifo_stall:
4431 *
4432 * Callout used to wait for the 82547 Tx FIFO to drain,
4433 * reset the FIFO pointers, and restart packet transmission.
4434 */
4435 static void
4436 wm_82547_txfifo_stall(void *arg)
4437 {
4438 struct wm_softc *sc = arg;
4439 #ifndef WM_MPSAFE
4440 int s;
4441
4442 s = splnet();
4443 #endif
4444 WM_TX_LOCK(sc);
4445
4446 if (sc->sc_stopping)
4447 goto out;
4448
4449 if (sc->sc_txfifo_stall) {
4450 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
4451 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
4452 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
4453 /*
4454 * Packets have drained. Stop transmitter, reset
4455 * FIFO pointers, restart transmitter, and kick
4456 * the packet queue.
4457 */
4458 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
4459 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
4460 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
4461 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
4462 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
4463 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
4464 CSR_WRITE(sc, WMREG_TCTL, tctl);
4465 CSR_WRITE_FLUSH(sc);
4466
4467 sc->sc_txfifo_head = 0;
4468 sc->sc_txfifo_stall = 0;
4469 wm_start_locked(&sc->sc_ethercom.ec_if);
4470 } else {
4471 /*
4472 * Still waiting for packets to drain; try again in
4473 * another tick.
4474 */
4475 callout_schedule(&sc->sc_txfifo_ch, 1);
4476 }
4477 }
4478
4479 out:
4480 WM_TX_UNLOCK(sc);
4481 #ifndef WM_MPSAFE
4482 splx(s);
4483 #endif
4484 }
4485
4486 /*
4487 * wm_82547_txfifo_bugchk:
4488 *
4489 * Check for bug condition in the 82547 Tx FIFO. We need to
4490 * prevent enqueueing a packet that would wrap around the end
4491 * if the Tx FIFO ring buffer, otherwise the chip will croak.
4492 *
4493 * We do this by checking the amount of space before the end
4494 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
4495 * the Tx FIFO, wait for all remaining packets to drain, reset
4496 * the internal FIFO pointers to the beginning, and restart
4497 * transmission on the interface.
4498 */
4499 #define WM_FIFO_HDR 0x10
4500 #define WM_82547_PAD_LEN 0x3e0
4501 static int
4502 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
4503 {
4504 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
4505 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
4506
4507 /* Just return if already stalled. */
4508 if (sc->sc_txfifo_stall)
4509 return 1;
4510
4511 if (sc->sc_mii.mii_media_active & IFM_FDX) {
4512 /* Stall only occurs in half-duplex mode. */
4513 goto send_packet;
4514 }
4515
4516 if (len >= WM_82547_PAD_LEN + space) {
4517 sc->sc_txfifo_stall = 1;
4518 callout_schedule(&sc->sc_txfifo_ch, 1);
4519 return 1;
4520 }
4521
4522 send_packet:
4523 sc->sc_txfifo_head += len;
4524 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
4525 sc->sc_txfifo_head -= sc->sc_txfifo_size;
4526
4527 return 0;
4528 }
4529
4530 /*
4531 * wm_start: [ifnet interface function]
4532 *
4533 * Start packet transmission on the interface.
4534 */
4535 static void
4536 wm_start(struct ifnet *ifp)
4537 {
4538 struct wm_softc *sc = ifp->if_softc;
4539
4540 WM_TX_LOCK(sc);
4541 if (!sc->sc_stopping)
4542 wm_start_locked(ifp);
4543 WM_TX_UNLOCK(sc);
4544 }
4545
4546 static void
4547 wm_start_locked(struct ifnet *ifp)
4548 {
4549 struct wm_softc *sc = ifp->if_softc;
4550 struct mbuf *m0;
4551 struct m_tag *mtag;
4552 struct wm_txsoft *txs;
4553 bus_dmamap_t dmamap;
4554 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
4555 bus_addr_t curaddr;
4556 bus_size_t seglen, curlen;
4557 uint32_t cksumcmd;
4558 uint8_t cksumfields;
4559
4560 KASSERT(WM_TX_LOCKED(sc));
4561
4562 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
4563 return;
4564
4565 /* Remember the previous number of free descriptors. */
4566 ofree = sc->sc_txfree;
4567
4568 /*
4569 * Loop through the send queue, setting up transmit descriptors
4570 * until we drain the queue, or use up all available transmit
4571 * descriptors.
4572 */
4573 for (;;) {
4574 m0 = NULL;
4575
4576 /* Get a work queue entry. */
4577 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
4578 wm_txintr(sc);
4579 if (sc->sc_txsfree == 0) {
4580 DPRINTF(WM_DEBUG_TX,
4581 ("%s: TX: no free job descriptors\n",
4582 device_xname(sc->sc_dev)));
4583 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
4584 break;
4585 }
4586 }
4587
4588 /* Grab a packet off the queue. */
4589 IFQ_DEQUEUE(&ifp->if_snd, m0);
4590 if (m0 == NULL)
4591 break;
4592
4593 DPRINTF(WM_DEBUG_TX,
4594 ("%s: TX: have packet to transmit: %p\n",
4595 device_xname(sc->sc_dev), m0));
4596
4597 txs = &sc->sc_txsoft[sc->sc_txsnext];
4598 dmamap = txs->txs_dmamap;
4599
4600 use_tso = (m0->m_pkthdr.csum_flags &
4601 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
4602
4603 /*
4604 * So says the Linux driver:
4605 * The controller does a simple calculation to make sure
4606 * there is enough room in the FIFO before initiating the
4607 * DMA for each buffer. The calc is:
4608 * 4 = ceil(buffer len / MSS)
4609 * To make sure we don't overrun the FIFO, adjust the max
4610 * buffer len if the MSS drops.
4611 */
4612 dmamap->dm_maxsegsz =
4613 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
4614 ? m0->m_pkthdr.segsz << 2
4615 : WTX_MAX_LEN;
4616
4617 /*
4618 * Load the DMA map. If this fails, the packet either
4619 * didn't fit in the allotted number of segments, or we
4620 * were short on resources. For the too-many-segments
4621 * case, we simply report an error and drop the packet,
4622 * since we can't sanely copy a jumbo packet to a single
4623 * buffer.
4624 */
4625 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
4626 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
4627 if (error) {
4628 if (error == EFBIG) {
4629 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4630 log(LOG_ERR, "%s: Tx packet consumes too many "
4631 "DMA segments, dropping...\n",
4632 device_xname(sc->sc_dev));
4633 wm_dump_mbuf_chain(sc, m0);
4634 m_freem(m0);
4635 continue;
4636 }
4637 /* Short on resources, just stop for now. */
4638 DPRINTF(WM_DEBUG_TX,
4639 ("%s: TX: dmamap load failed: %d\n",
4640 device_xname(sc->sc_dev), error));
4641 break;
4642 }
4643
4644 segs_needed = dmamap->dm_nsegs;
4645 if (use_tso) {
4646 /* For sentinel descriptor; see below. */
4647 segs_needed++;
4648 }
4649
4650 /*
4651 * Ensure we have enough descriptors free to describe
4652 * the packet. Note, we always reserve one descriptor
4653 * at the end of the ring due to the semantics of the
4654 * TDT register, plus one more in the event we need
4655 * to load offload context.
4656 */
4657 if (segs_needed > sc->sc_txfree - 2) {
4658 /*
4659 * Not enough free descriptors to transmit this
4660 * packet. We haven't committed anything yet,
4661 * so just unload the DMA map, put the packet
4662 * pack on the queue, and punt. Notify the upper
4663 * layer that there are no more slots left.
4664 */
4665 DPRINTF(WM_DEBUG_TX,
4666 ("%s: TX: need %d (%d) descriptors, have %d\n",
4667 device_xname(sc->sc_dev), dmamap->dm_nsegs,
4668 segs_needed, sc->sc_txfree - 1));
4669 ifp->if_flags |= IFF_OACTIVE;
4670 bus_dmamap_unload(sc->sc_dmat, dmamap);
4671 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
4672 break;
4673 }
4674
4675 /*
4676 * Check for 82547 Tx FIFO bug. We need to do this
4677 * once we know we can transmit the packet, since we
4678 * do some internal FIFO space accounting here.
4679 */
4680 if (sc->sc_type == WM_T_82547 &&
4681 wm_82547_txfifo_bugchk(sc, m0)) {
4682 DPRINTF(WM_DEBUG_TX,
4683 ("%s: TX: 82547 Tx FIFO bug detected\n",
4684 device_xname(sc->sc_dev)));
4685 ifp->if_flags |= IFF_OACTIVE;
4686 bus_dmamap_unload(sc->sc_dmat, dmamap);
4687 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
4688 break;
4689 }
4690
4691 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
4692
4693 DPRINTF(WM_DEBUG_TX,
4694 ("%s: TX: packet has %d (%d) DMA segments\n",
4695 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
4696
4697 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
4698
4699 /*
4700 * Store a pointer to the packet so that we can free it
4701 * later.
4702 *
4703 * Initially, we consider the number of descriptors the
4704 * packet uses the number of DMA segments. This may be
4705 * incremented by 1 if we do checksum offload (a descriptor
4706 * is used to set the checksum context).
4707 */
4708 txs->txs_mbuf = m0;
4709 txs->txs_firstdesc = sc->sc_txnext;
4710 txs->txs_ndesc = segs_needed;
4711
4712 /* Set up offload parameters for this packet. */
4713 if (m0->m_pkthdr.csum_flags &
4714 (M_CSUM_TSOv4|M_CSUM_TSOv6|
4715 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
4716 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
4717 if (wm_tx_offload(sc, txs, &cksumcmd,
4718 &cksumfields) != 0) {
4719 /* Error message already displayed. */
4720 bus_dmamap_unload(sc->sc_dmat, dmamap);
4721 continue;
4722 }
4723 } else {
4724 cksumcmd = 0;
4725 cksumfields = 0;
4726 }
4727
4728 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
4729
4730 /* Sync the DMA map. */
4731 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
4732 BUS_DMASYNC_PREWRITE);
4733
4734 /* Initialize the transmit descriptor. */
4735 for (nexttx = sc->sc_txnext, seg = 0;
4736 seg < dmamap->dm_nsegs; seg++) {
4737 for (seglen = dmamap->dm_segs[seg].ds_len,
4738 curaddr = dmamap->dm_segs[seg].ds_addr;
4739 seglen != 0;
4740 curaddr += curlen, seglen -= curlen,
4741 nexttx = WM_NEXTTX(sc, nexttx)) {
4742 curlen = seglen;
4743
4744 /*
4745 * So says the Linux driver:
4746 * Work around for premature descriptor
4747 * write-backs in TSO mode. Append a
4748 * 4-byte sentinel descriptor.
4749 */
4750 if (use_tso &&
4751 seg == dmamap->dm_nsegs - 1 &&
4752 curlen > 8)
4753 curlen -= 4;
4754
4755 wm_set_dma_addr(
4756 &sc->sc_txdescs[nexttx].wtx_addr,
4757 curaddr);
4758 sc->sc_txdescs[nexttx].wtx_cmdlen =
4759 htole32(cksumcmd | curlen);
4760 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
4761 0;
4762 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
4763 cksumfields;
4764 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
4765 lasttx = nexttx;
4766
4767 DPRINTF(WM_DEBUG_TX,
4768 ("%s: TX: desc %d: low %#" PRIx64 ", "
4769 "len %#04zx\n",
4770 device_xname(sc->sc_dev), nexttx,
4771 (uint64_t)curaddr, curlen));
4772 }
4773 }
4774
4775 KASSERT(lasttx != -1);
4776
4777 /*
4778 * Set up the command byte on the last descriptor of
4779 * the packet. If we're in the interrupt delay window,
4780 * delay the interrupt.
4781 */
4782 sc->sc_txdescs[lasttx].wtx_cmdlen |=
4783 htole32(WTX_CMD_EOP | WTX_CMD_RS);
4784
4785 /*
4786 * If VLANs are enabled and the packet has a VLAN tag, set
4787 * up the descriptor to encapsulate the packet for us.
4788 *
4789 * This is only valid on the last descriptor of the packet.
4790 */
4791 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
4792 sc->sc_txdescs[lasttx].wtx_cmdlen |=
4793 htole32(WTX_CMD_VLE);
4794 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
4795 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
4796 }
4797
4798 txs->txs_lastdesc = lasttx;
4799
4800 DPRINTF(WM_DEBUG_TX,
4801 ("%s: TX: desc %d: cmdlen 0x%08x\n",
4802 device_xname(sc->sc_dev),
4803 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
4804
4805 /* Sync the descriptors we're using. */
4806 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
4807 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4808
4809 /* Give the packet to the chip. */
4810 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
4811
4812 DPRINTF(WM_DEBUG_TX,
4813 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
4814
4815 DPRINTF(WM_DEBUG_TX,
4816 ("%s: TX: finished transmitting packet, job %d\n",
4817 device_xname(sc->sc_dev), sc->sc_txsnext));
4818
4819 /* Advance the tx pointer. */
4820 sc->sc_txfree -= txs->txs_ndesc;
4821 sc->sc_txnext = nexttx;
4822
4823 sc->sc_txsfree--;
4824 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
4825
4826 /* Pass the packet to any BPF listeners. */
4827 bpf_mtap(ifp, m0);
4828 }
4829
4830 if (m0 != NULL) {
4831 ifp->if_flags |= IFF_OACTIVE;
4832 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4833 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
4834 m_freem(m0);
4835 }
4836
4837 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
4838 /* No more slots; notify upper layer. */
4839 ifp->if_flags |= IFF_OACTIVE;
4840 }
4841
4842 if (sc->sc_txfree != ofree) {
4843 /* Set a watchdog timer in case the chip flakes out. */
4844 ifp->if_timer = 5;
4845 }
4846 }
4847
4848 /*
4849 * wm_nq_tx_offload:
4850 *
4851 * Set up TCP/IP checksumming parameters for the
4852 * specified packet, for NEWQUEUE devices
4853 */
4854 static int
4855 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
4856 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
4857 {
4858 struct mbuf *m0 = txs->txs_mbuf;
4859 struct m_tag *mtag;
4860 uint32_t vl_len, mssidx, cmdc;
4861 struct ether_header *eh;
4862 int offset, iphl;
4863
4864 /*
4865 * XXX It would be nice if the mbuf pkthdr had offset
4866 * fields for the protocol headers.
4867 */
4868 *cmdlenp = 0;
4869 *fieldsp = 0;
4870
4871 eh = mtod(m0, struct ether_header *);
4872 switch (htons(eh->ether_type)) {
4873 case ETHERTYPE_IP:
4874 case ETHERTYPE_IPV6:
4875 offset = ETHER_HDR_LEN;
4876 break;
4877
4878 case ETHERTYPE_VLAN:
4879 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4880 break;
4881
4882 default:
4883 /* Don't support this protocol or encapsulation. */
4884 *do_csum = false;
4885 return 0;
4886 }
4887 *do_csum = true;
4888 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
4889 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
4890
4891 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
4892 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
4893
4894 if ((m0->m_pkthdr.csum_flags &
4895 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
4896 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4897 } else {
4898 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4899 }
4900 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
4901 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
4902
4903 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
4904 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
4905 << NQTXC_VLLEN_VLAN_SHIFT);
4906 *cmdlenp |= NQTX_CMD_VLE;
4907 }
4908
4909 mssidx = 0;
4910
4911 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4912 int hlen = offset + iphl;
4913 int tcp_hlen;
4914 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4915
4916 if (__predict_false(m0->m_len <
4917 (hlen + sizeof(struct tcphdr)))) {
4918 /*
4919 * TCP/IP headers are not in the first mbuf; we need
4920 * to do this the slow and painful way. Let's just
4921 * hope this doesn't happen very often.
4922 */
4923 struct tcphdr th;
4924
4925 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4926
4927 m_copydata(m0, hlen, sizeof(th), &th);
4928 if (v4) {
4929 struct ip ip;
4930
4931 m_copydata(m0, offset, sizeof(ip), &ip);
4932 ip.ip_len = 0;
4933 m_copyback(m0,
4934 offset + offsetof(struct ip, ip_len),
4935 sizeof(ip.ip_len), &ip.ip_len);
4936 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4937 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4938 } else {
4939 struct ip6_hdr ip6;
4940
4941 m_copydata(m0, offset, sizeof(ip6), &ip6);
4942 ip6.ip6_plen = 0;
4943 m_copyback(m0,
4944 offset + offsetof(struct ip6_hdr, ip6_plen),
4945 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4946 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4947 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4948 }
4949 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4950 sizeof(th.th_sum), &th.th_sum);
4951
4952 tcp_hlen = th.th_off << 2;
4953 } else {
4954 /*
4955 * TCP/IP headers are in the first mbuf; we can do
4956 * this the easy way.
4957 */
4958 struct tcphdr *th;
4959
4960 if (v4) {
4961 struct ip *ip =
4962 (void *)(mtod(m0, char *) + offset);
4963 th = (void *)(mtod(m0, char *) + hlen);
4964
4965 ip->ip_len = 0;
4966 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4967 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4968 } else {
4969 struct ip6_hdr *ip6 =
4970 (void *)(mtod(m0, char *) + offset);
4971 th = (void *)(mtod(m0, char *) + hlen);
4972
4973 ip6->ip6_plen = 0;
4974 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4975 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4976 }
4977 tcp_hlen = th->th_off << 2;
4978 }
4979 hlen += tcp_hlen;
4980 *cmdlenp |= NQTX_CMD_TSE;
4981
4982 if (v4) {
4983 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4984 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
4985 } else {
4986 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4987 *fieldsp |= NQTXD_FIELDS_TUXSM;
4988 }
4989 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
4990 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
4991 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
4992 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
4993 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
4994 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
4995 } else {
4996 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
4997 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
4998 }
4999
5000 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
5001 *fieldsp |= NQTXD_FIELDS_IXSM;
5002 cmdc |= NQTXC_CMD_IP4;
5003 }
5004
5005 if (m0->m_pkthdr.csum_flags &
5006 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5007 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5008 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5009 cmdc |= NQTXC_CMD_TCP;
5010 } else {
5011 cmdc |= NQTXC_CMD_UDP;
5012 }
5013 cmdc |= NQTXC_CMD_IP4;
5014 *fieldsp |= NQTXD_FIELDS_TUXSM;
5015 }
5016 if (m0->m_pkthdr.csum_flags &
5017 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5018 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5019 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5020 cmdc |= NQTXC_CMD_TCP;
5021 } else {
5022 cmdc |= NQTXC_CMD_UDP;
5023 }
5024 cmdc |= NQTXC_CMD_IP6;
5025 *fieldsp |= NQTXD_FIELDS_TUXSM;
5026 }
5027
5028 /* Fill in the context descriptor. */
5029 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
5030 htole32(vl_len);
5031 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
5032 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
5033 htole32(cmdc);
5034 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
5035 htole32(mssidx);
5036 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
5037 DPRINTF(WM_DEBUG_TX,
5038 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
5039 sc->sc_txnext, 0, vl_len));
5040 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
5041 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
5042 txs->txs_ndesc++;
5043 return 0;
5044 }
5045
5046 /*
5047 * wm_nq_start: [ifnet interface function]
5048 *
5049 * Start packet transmission on the interface for NEWQUEUE devices
5050 */
5051 static void
5052 wm_nq_start(struct ifnet *ifp)
5053 {
5054 struct wm_softc *sc = ifp->if_softc;
5055
5056 WM_TX_LOCK(sc);
5057 if (!sc->sc_stopping)
5058 wm_nq_start_locked(ifp);
5059 WM_TX_UNLOCK(sc);
5060 }
5061
5062 static void
5063 wm_nq_start_locked(struct ifnet *ifp)
5064 {
5065 struct wm_softc *sc = ifp->if_softc;
5066 struct mbuf *m0;
5067 struct m_tag *mtag;
5068 struct wm_txsoft *txs;
5069 bus_dmamap_t dmamap;
5070 int error, nexttx, lasttx = -1, seg, segs_needed;
5071 bool do_csum, sent;
5072
5073 KASSERT(WM_TX_LOCKED(sc));
5074
5075 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5076 return;
5077
5078 sent = false;
5079
5080 /*
5081 * Loop through the send queue, setting up transmit descriptors
5082 * until we drain the queue, or use up all available transmit
5083 * descriptors.
5084 */
5085 for (;;) {
5086 m0 = NULL;
5087
5088 /* Get a work queue entry. */
5089 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
5090 wm_txintr(sc);
5091 if (sc->sc_txsfree == 0) {
5092 DPRINTF(WM_DEBUG_TX,
5093 ("%s: TX: no free job descriptors\n",
5094 device_xname(sc->sc_dev)));
5095 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
5096 break;
5097 }
5098 }
5099
5100 /* Grab a packet off the queue. */
5101 IFQ_DEQUEUE(&ifp->if_snd, m0);
5102 if (m0 == NULL)
5103 break;
5104
5105 DPRINTF(WM_DEBUG_TX,
5106 ("%s: TX: have packet to transmit: %p\n",
5107 device_xname(sc->sc_dev), m0));
5108
5109 txs = &sc->sc_txsoft[sc->sc_txsnext];
5110 dmamap = txs->txs_dmamap;
5111
5112 /*
5113 * Load the DMA map. If this fails, the packet either
5114 * didn't fit in the allotted number of segments, or we
5115 * were short on resources. For the too-many-segments
5116 * case, we simply report an error and drop the packet,
5117 * since we can't sanely copy a jumbo packet to a single
5118 * buffer.
5119 */
5120 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5121 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5122 if (error) {
5123 if (error == EFBIG) {
5124 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5125 log(LOG_ERR, "%s: Tx packet consumes too many "
5126 "DMA segments, dropping...\n",
5127 device_xname(sc->sc_dev));
5128 wm_dump_mbuf_chain(sc, m0);
5129 m_freem(m0);
5130 continue;
5131 }
5132 /* Short on resources, just stop for now. */
5133 DPRINTF(WM_DEBUG_TX,
5134 ("%s: TX: dmamap load failed: %d\n",
5135 device_xname(sc->sc_dev), error));
5136 break;
5137 }
5138
5139 segs_needed = dmamap->dm_nsegs;
5140
5141 /*
5142 * Ensure we have enough descriptors free to describe
5143 * the packet. Note, we always reserve one descriptor
5144 * at the end of the ring due to the semantics of the
5145 * TDT register, plus one more in the event we need
5146 * to load offload context.
5147 */
5148 if (segs_needed > sc->sc_txfree - 2) {
5149 /*
5150 * Not enough free descriptors to transmit this
5151 * packet. We haven't committed anything yet,
5152 * so just unload the DMA map, put the packet
5153 * pack on the queue, and punt. Notify the upper
5154 * layer that there are no more slots left.
5155 */
5156 DPRINTF(WM_DEBUG_TX,
5157 ("%s: TX: need %d (%d) descriptors, have %d\n",
5158 device_xname(sc->sc_dev), dmamap->dm_nsegs,
5159 segs_needed, sc->sc_txfree - 1));
5160 ifp->if_flags |= IFF_OACTIVE;
5161 bus_dmamap_unload(sc->sc_dmat, dmamap);
5162 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5163 break;
5164 }
5165
5166 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5167
5168 DPRINTF(WM_DEBUG_TX,
5169 ("%s: TX: packet has %d (%d) DMA segments\n",
5170 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5171
5172 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5173
5174 /*
5175 * Store a pointer to the packet so that we can free it
5176 * later.
5177 *
5178 * Initially, we consider the number of descriptors the
5179 * packet uses the number of DMA segments. This may be
5180 * incremented by 1 if we do checksum offload (a descriptor
5181 * is used to set the checksum context).
5182 */
5183 txs->txs_mbuf = m0;
5184 txs->txs_firstdesc = sc->sc_txnext;
5185 txs->txs_ndesc = segs_needed;
5186
5187 /* Set up offload parameters for this packet. */
5188 uint32_t cmdlen, fields, dcmdlen;
5189 if (m0->m_pkthdr.csum_flags &
5190 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5191 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5192 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5193 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
5194 &do_csum) != 0) {
5195 /* Error message already displayed. */
5196 bus_dmamap_unload(sc->sc_dmat, dmamap);
5197 continue;
5198 }
5199 } else {
5200 do_csum = false;
5201 cmdlen = 0;
5202 fields = 0;
5203 }
5204
5205 /* Sync the DMA map. */
5206 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5207 BUS_DMASYNC_PREWRITE);
5208
5209 /* Initialize the first transmit descriptor. */
5210 nexttx = sc->sc_txnext;
5211 if (!do_csum) {
5212 /* setup a legacy descriptor */
5213 wm_set_dma_addr(
5214 &sc->sc_txdescs[nexttx].wtx_addr,
5215 dmamap->dm_segs[0].ds_addr);
5216 sc->sc_txdescs[nexttx].wtx_cmdlen =
5217 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
5218 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
5219 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
5220 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
5221 NULL) {
5222 sc->sc_txdescs[nexttx].wtx_cmdlen |=
5223 htole32(WTX_CMD_VLE);
5224 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
5225 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5226 } else {
5227 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
5228 }
5229 dcmdlen = 0;
5230 } else {
5231 /* setup an advanced data descriptor */
5232 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5233 htole64(dmamap->dm_segs[0].ds_addr);
5234 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
5235 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5236 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
5237 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
5238 htole32(fields);
5239 DPRINTF(WM_DEBUG_TX,
5240 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
5241 device_xname(sc->sc_dev), nexttx,
5242 (uint64_t)dmamap->dm_segs[0].ds_addr));
5243 DPRINTF(WM_DEBUG_TX,
5244 ("\t 0x%08x%08x\n", fields,
5245 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
5246 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
5247 }
5248
5249 lasttx = nexttx;
5250 nexttx = WM_NEXTTX(sc, nexttx);
5251 /*
5252 * fill in the next descriptors. legacy or adcanced format
5253 * is the same here
5254 */
5255 for (seg = 1; seg < dmamap->dm_nsegs;
5256 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
5257 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5258 htole64(dmamap->dm_segs[seg].ds_addr);
5259 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5260 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
5261 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
5262 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
5263 lasttx = nexttx;
5264
5265 DPRINTF(WM_DEBUG_TX,
5266 ("%s: TX: desc %d: %#" PRIx64 ", "
5267 "len %#04zx\n",
5268 device_xname(sc->sc_dev), nexttx,
5269 (uint64_t)dmamap->dm_segs[seg].ds_addr,
5270 dmamap->dm_segs[seg].ds_len));
5271 }
5272
5273 KASSERT(lasttx != -1);
5274
5275 /*
5276 * Set up the command byte on the last descriptor of
5277 * the packet. If we're in the interrupt delay window,
5278 * delay the interrupt.
5279 */
5280 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
5281 (NQTX_CMD_EOP | NQTX_CMD_RS));
5282 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5283 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5284
5285 txs->txs_lastdesc = lasttx;
5286
5287 DPRINTF(WM_DEBUG_TX,
5288 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5289 device_xname(sc->sc_dev),
5290 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5291
5292 /* Sync the descriptors we're using. */
5293 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5294 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5295
5296 /* Give the packet to the chip. */
5297 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5298 sent = true;
5299
5300 DPRINTF(WM_DEBUG_TX,
5301 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5302
5303 DPRINTF(WM_DEBUG_TX,
5304 ("%s: TX: finished transmitting packet, job %d\n",
5305 device_xname(sc->sc_dev), sc->sc_txsnext));
5306
5307 /* Advance the tx pointer. */
5308 sc->sc_txfree -= txs->txs_ndesc;
5309 sc->sc_txnext = nexttx;
5310
5311 sc->sc_txsfree--;
5312 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5313
5314 /* Pass the packet to any BPF listeners. */
5315 bpf_mtap(ifp, m0);
5316 }
5317
5318 if (m0 != NULL) {
5319 ifp->if_flags |= IFF_OACTIVE;
5320 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5321 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5322 m_freem(m0);
5323 }
5324
5325 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5326 /* No more slots; notify upper layer. */
5327 ifp->if_flags |= IFF_OACTIVE;
5328 }
5329
5330 if (sent) {
5331 /* Set a watchdog timer in case the chip flakes out. */
5332 ifp->if_timer = 5;
5333 }
5334 }
5335
5336 /* Interrupt */
5337
5338 /*
5339 * wm_txintr:
5340 *
5341 * Helper; handle transmit interrupts.
5342 */
5343 static void
5344 wm_txintr(struct wm_softc *sc)
5345 {
5346 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5347 struct wm_txsoft *txs;
5348 uint8_t status;
5349 int i;
5350
5351 if (sc->sc_stopping)
5352 return;
5353
5354 ifp->if_flags &= ~IFF_OACTIVE;
5355
5356 /*
5357 * Go through the Tx list and free mbufs for those
5358 * frames which have been transmitted.
5359 */
5360 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
5361 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
5362 txs = &sc->sc_txsoft[i];
5363
5364 DPRINTF(WM_DEBUG_TX,
5365 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
5366
5367 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
5368 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5369
5370 status =
5371 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
5372 if ((status & WTX_ST_DD) == 0) {
5373 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
5374 BUS_DMASYNC_PREREAD);
5375 break;
5376 }
5377
5378 DPRINTF(WM_DEBUG_TX,
5379 ("%s: TX: job %d done: descs %d..%d\n",
5380 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
5381 txs->txs_lastdesc));
5382
5383 /*
5384 * XXX We should probably be using the statistics
5385 * XXX registers, but I don't know if they exist
5386 * XXX on chips before the i82544.
5387 */
5388
5389 #ifdef WM_EVENT_COUNTERS
5390 if (status & WTX_ST_TU)
5391 WM_EVCNT_INCR(&sc->sc_ev_tu);
5392 #endif /* WM_EVENT_COUNTERS */
5393
5394 if (status & (WTX_ST_EC|WTX_ST_LC)) {
5395 ifp->if_oerrors++;
5396 if (status & WTX_ST_LC)
5397 log(LOG_WARNING, "%s: late collision\n",
5398 device_xname(sc->sc_dev));
5399 else if (status & WTX_ST_EC) {
5400 ifp->if_collisions += 16;
5401 log(LOG_WARNING, "%s: excessive collisions\n",
5402 device_xname(sc->sc_dev));
5403 }
5404 } else
5405 ifp->if_opackets++;
5406
5407 sc->sc_txfree += txs->txs_ndesc;
5408 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
5409 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
5410 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
5411 m_freem(txs->txs_mbuf);
5412 txs->txs_mbuf = NULL;
5413 }
5414
5415 /* Update the dirty transmit buffer pointer. */
5416 sc->sc_txsdirty = i;
5417 DPRINTF(WM_DEBUG_TX,
5418 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
5419
5420 /*
5421 * If there are no more pending transmissions, cancel the watchdog
5422 * timer.
5423 */
5424 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
5425 ifp->if_timer = 0;
5426 }
5427
5428 /*
5429 * wm_rxintr:
5430 *
5431 * Helper; handle receive interrupts.
5432 */
5433 static void
5434 wm_rxintr(struct wm_softc *sc)
5435 {
5436 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5437 struct wm_rxsoft *rxs;
5438 struct mbuf *m;
5439 int i, len;
5440 uint8_t status, errors;
5441 uint16_t vlantag;
5442
5443 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
5444 rxs = &sc->sc_rxsoft[i];
5445
5446 DPRINTF(WM_DEBUG_RX,
5447 ("%s: RX: checking descriptor %d\n",
5448 device_xname(sc->sc_dev), i));
5449
5450 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5451
5452 status = sc->sc_rxdescs[i].wrx_status;
5453 errors = sc->sc_rxdescs[i].wrx_errors;
5454 len = le16toh(sc->sc_rxdescs[i].wrx_len);
5455 vlantag = sc->sc_rxdescs[i].wrx_special;
5456
5457 if ((status & WRX_ST_DD) == 0) {
5458 /* We have processed all of the receive descriptors. */
5459 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
5460 break;
5461 }
5462
5463 if (__predict_false(sc->sc_rxdiscard)) {
5464 DPRINTF(WM_DEBUG_RX,
5465 ("%s: RX: discarding contents of descriptor %d\n",
5466 device_xname(sc->sc_dev), i));
5467 WM_INIT_RXDESC(sc, i);
5468 if (status & WRX_ST_EOP) {
5469 /* Reset our state. */
5470 DPRINTF(WM_DEBUG_RX,
5471 ("%s: RX: resetting rxdiscard -> 0\n",
5472 device_xname(sc->sc_dev)));
5473 sc->sc_rxdiscard = 0;
5474 }
5475 continue;
5476 }
5477
5478 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5479 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
5480
5481 m = rxs->rxs_mbuf;
5482
5483 /*
5484 * Add a new receive buffer to the ring, unless of
5485 * course the length is zero. Treat the latter as a
5486 * failed mapping.
5487 */
5488 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
5489 /*
5490 * Failed, throw away what we've done so
5491 * far, and discard the rest of the packet.
5492 */
5493 ifp->if_ierrors++;
5494 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5495 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5496 WM_INIT_RXDESC(sc, i);
5497 if ((status & WRX_ST_EOP) == 0)
5498 sc->sc_rxdiscard = 1;
5499 if (sc->sc_rxhead != NULL)
5500 m_freem(sc->sc_rxhead);
5501 WM_RXCHAIN_RESET(sc);
5502 DPRINTF(WM_DEBUG_RX,
5503 ("%s: RX: Rx buffer allocation failed, "
5504 "dropping packet%s\n", device_xname(sc->sc_dev),
5505 sc->sc_rxdiscard ? " (discard)" : ""));
5506 continue;
5507 }
5508
5509 m->m_len = len;
5510 sc->sc_rxlen += len;
5511 DPRINTF(WM_DEBUG_RX,
5512 ("%s: RX: buffer at %p len %d\n",
5513 device_xname(sc->sc_dev), m->m_data, len));
5514
5515 /* If this is not the end of the packet, keep looking. */
5516 if ((status & WRX_ST_EOP) == 0) {
5517 WM_RXCHAIN_LINK(sc, m);
5518 DPRINTF(WM_DEBUG_RX,
5519 ("%s: RX: not yet EOP, rxlen -> %d\n",
5520 device_xname(sc->sc_dev), sc->sc_rxlen));
5521 continue;
5522 }
5523
5524 /*
5525 * Okay, we have the entire packet now. The chip is
5526 * configured to include the FCS except I350 and I21[01]
5527 * (not all chips can be configured to strip it),
5528 * so we need to trim it.
5529 * May need to adjust length of previous mbuf in the
5530 * chain if the current mbuf is too short.
5531 * For an eratta, the RCTL_SECRC bit in RCTL register
5532 * is always set in I350, so we don't trim it.
5533 */
5534 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
5535 && (sc->sc_type != WM_T_I210)
5536 && (sc->sc_type != WM_T_I211)) {
5537 if (m->m_len < ETHER_CRC_LEN) {
5538 sc->sc_rxtail->m_len
5539 -= (ETHER_CRC_LEN - m->m_len);
5540 m->m_len = 0;
5541 } else
5542 m->m_len -= ETHER_CRC_LEN;
5543 len = sc->sc_rxlen - ETHER_CRC_LEN;
5544 } else
5545 len = sc->sc_rxlen;
5546
5547 WM_RXCHAIN_LINK(sc, m);
5548
5549 *sc->sc_rxtailp = NULL;
5550 m = sc->sc_rxhead;
5551
5552 WM_RXCHAIN_RESET(sc);
5553
5554 DPRINTF(WM_DEBUG_RX,
5555 ("%s: RX: have entire packet, len -> %d\n",
5556 device_xname(sc->sc_dev), len));
5557
5558 /* If an error occurred, update stats and drop the packet. */
5559 if (errors &
5560 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
5561 if (errors & WRX_ER_SE)
5562 log(LOG_WARNING, "%s: symbol error\n",
5563 device_xname(sc->sc_dev));
5564 else if (errors & WRX_ER_SEQ)
5565 log(LOG_WARNING, "%s: receive sequence error\n",
5566 device_xname(sc->sc_dev));
5567 else if (errors & WRX_ER_CE)
5568 log(LOG_WARNING, "%s: CRC error\n",
5569 device_xname(sc->sc_dev));
5570 m_freem(m);
5571 continue;
5572 }
5573
5574 /* No errors. Receive the packet. */
5575 m->m_pkthdr.rcvif = ifp;
5576 m->m_pkthdr.len = len;
5577
5578 /*
5579 * If VLANs are enabled, VLAN packets have been unwrapped
5580 * for us. Associate the tag with the packet.
5581 */
5582 /* XXXX should check for i350 and i354 */
5583 if ((status & WRX_ST_VP) != 0) {
5584 VLAN_INPUT_TAG(ifp, m,
5585 le16toh(vlantag),
5586 continue);
5587 }
5588
5589 /* Set up checksum info for this packet. */
5590 if ((status & WRX_ST_IXSM) == 0) {
5591 if (status & WRX_ST_IPCS) {
5592 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
5593 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
5594 if (errors & WRX_ER_IPE)
5595 m->m_pkthdr.csum_flags |=
5596 M_CSUM_IPv4_BAD;
5597 }
5598 if (status & WRX_ST_TCPCS) {
5599 /*
5600 * Note: we don't know if this was TCP or UDP,
5601 * so we just set both bits, and expect the
5602 * upper layers to deal.
5603 */
5604 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
5605 m->m_pkthdr.csum_flags |=
5606 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
5607 M_CSUM_TCPv6 | M_CSUM_UDPv6;
5608 if (errors & WRX_ER_TCPE)
5609 m->m_pkthdr.csum_flags |=
5610 M_CSUM_TCP_UDP_BAD;
5611 }
5612 }
5613
5614 ifp->if_ipackets++;
5615
5616 WM_RX_UNLOCK(sc);
5617
5618 /* Pass this up to any BPF listeners. */
5619 bpf_mtap(ifp, m);
5620
5621 /* Pass it on. */
5622 (*ifp->if_input)(ifp, m);
5623
5624 WM_RX_LOCK(sc);
5625
5626 if (sc->sc_stopping)
5627 break;
5628 }
5629
5630 /* Update the receive pointer. */
5631 sc->sc_rxptr = i;
5632
5633 DPRINTF(WM_DEBUG_RX,
5634 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
5635 }
5636
5637 /*
5638 * wm_linkintr_gmii:
5639 *
5640 * Helper; handle link interrupts for GMII.
5641 */
5642 static void
5643 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
5644 {
5645
5646 KASSERT(WM_TX_LOCKED(sc));
5647
5648 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5649 __func__));
5650
5651 if (icr & ICR_LSC) {
5652 DPRINTF(WM_DEBUG_LINK,
5653 ("%s: LINK: LSC -> mii_pollstat\n",
5654 device_xname(sc->sc_dev)));
5655 mii_pollstat(&sc->sc_mii);
5656 if (sc->sc_type == WM_T_82543) {
5657 int miistatus, active;
5658
5659 /*
5660 * With 82543, we need to force speed and
5661 * duplex on the MAC equal to what the PHY
5662 * speed and duplex configuration is.
5663 */
5664 miistatus = sc->sc_mii.mii_media_status;
5665
5666 if (miistatus & IFM_ACTIVE) {
5667 active = sc->sc_mii.mii_media_active;
5668 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5669 switch (IFM_SUBTYPE(active)) {
5670 case IFM_10_T:
5671 sc->sc_ctrl |= CTRL_SPEED_10;
5672 break;
5673 case IFM_100_TX:
5674 sc->sc_ctrl |= CTRL_SPEED_100;
5675 break;
5676 case IFM_1000_T:
5677 sc->sc_ctrl |= CTRL_SPEED_1000;
5678 break;
5679 default:
5680 /*
5681 * fiber?
5682 * Shoud not enter here.
5683 */
5684 printf("unknown media (%x)\n",
5685 active);
5686 break;
5687 }
5688 if (active & IFM_FDX)
5689 sc->sc_ctrl |= CTRL_FD;
5690 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5691 }
5692 } else if ((sc->sc_type == WM_T_ICH8)
5693 && (sc->sc_phytype == WMPHY_IGP_3)) {
5694 wm_kmrn_lock_loss_workaround_ich8lan(sc);
5695 } else if (sc->sc_type == WM_T_PCH) {
5696 wm_k1_gig_workaround_hv(sc,
5697 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
5698 }
5699
5700 if ((sc->sc_phytype == WMPHY_82578)
5701 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
5702 == IFM_1000_T)) {
5703
5704 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
5705 delay(200*1000); /* XXX too big */
5706
5707 /* Link stall fix for link up */
5708 wm_gmii_hv_writereg(sc->sc_dev, 1,
5709 HV_MUX_DATA_CTRL,
5710 HV_MUX_DATA_CTRL_GEN_TO_MAC
5711 | HV_MUX_DATA_CTRL_FORCE_SPEED);
5712 wm_gmii_hv_writereg(sc->sc_dev, 1,
5713 HV_MUX_DATA_CTRL,
5714 HV_MUX_DATA_CTRL_GEN_TO_MAC);
5715 }
5716 }
5717 } else if (icr & ICR_RXSEQ) {
5718 DPRINTF(WM_DEBUG_LINK,
5719 ("%s: LINK Receive sequence error\n",
5720 device_xname(sc->sc_dev)));
5721 }
5722 }
5723
5724 /*
5725 * wm_linkintr_tbi:
5726 *
5727 * Helper; handle link interrupts for TBI mode.
5728 */
5729 static void
5730 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
5731 {
5732 uint32_t status;
5733
5734 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5735 __func__));
5736
5737 status = CSR_READ(sc, WMREG_STATUS);
5738 if (icr & ICR_LSC) {
5739 if (status & STATUS_LU) {
5740 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
5741 device_xname(sc->sc_dev),
5742 (status & STATUS_FD) ? "FDX" : "HDX"));
5743 /*
5744 * NOTE: CTRL will update TFCE and RFCE automatically,
5745 * so we should update sc->sc_ctrl
5746 */
5747
5748 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5749 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5750 sc->sc_fcrtl &= ~FCRTL_XONE;
5751 if (status & STATUS_FD)
5752 sc->sc_tctl |=
5753 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5754 else
5755 sc->sc_tctl |=
5756 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5757 if (sc->sc_ctrl & CTRL_TFCE)
5758 sc->sc_fcrtl |= FCRTL_XONE;
5759 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5760 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5761 WMREG_OLD_FCRTL : WMREG_FCRTL,
5762 sc->sc_fcrtl);
5763 sc->sc_tbi_linkup = 1;
5764 } else {
5765 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
5766 device_xname(sc->sc_dev)));
5767 sc->sc_tbi_linkup = 0;
5768 }
5769 wm_tbi_set_linkled(sc);
5770 } else if (icr & ICR_RXSEQ) {
5771 DPRINTF(WM_DEBUG_LINK,
5772 ("%s: LINK: Receive sequence error\n",
5773 device_xname(sc->sc_dev)));
5774 }
5775 }
5776
5777 /*
5778 * wm_linkintr:
5779 *
5780 * Helper; handle link interrupts.
5781 */
5782 static void
5783 wm_linkintr(struct wm_softc *sc, uint32_t icr)
5784 {
5785
5786 if (sc->sc_flags & WM_F_HAS_MII)
5787 wm_linkintr_gmii(sc, icr);
5788 else
5789 wm_linkintr_tbi(sc, icr);
5790 }
5791
5792 /*
5793 * wm_intr:
5794 *
5795 * Interrupt service routine.
5796 */
5797 static int
5798 wm_intr(void *arg)
5799 {
5800 struct wm_softc *sc = arg;
5801 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5802 uint32_t icr;
5803 int handled = 0;
5804
5805 while (1 /* CONSTCOND */) {
5806 icr = CSR_READ(sc, WMREG_ICR);
5807 if ((icr & sc->sc_icr) == 0)
5808 break;
5809 rnd_add_uint32(&sc->rnd_source, icr);
5810
5811 WM_RX_LOCK(sc);
5812
5813 if (sc->sc_stopping) {
5814 WM_RX_UNLOCK(sc);
5815 break;
5816 }
5817
5818 handled = 1;
5819
5820 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
5821 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
5822 DPRINTF(WM_DEBUG_RX,
5823 ("%s: RX: got Rx intr 0x%08x\n",
5824 device_xname(sc->sc_dev),
5825 icr & (ICR_RXDMT0|ICR_RXT0)));
5826 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
5827 }
5828 #endif
5829 wm_rxintr(sc);
5830
5831 WM_RX_UNLOCK(sc);
5832 WM_TX_LOCK(sc);
5833
5834 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
5835 if (icr & ICR_TXDW) {
5836 DPRINTF(WM_DEBUG_TX,
5837 ("%s: TX: got TXDW interrupt\n",
5838 device_xname(sc->sc_dev)));
5839 WM_EVCNT_INCR(&sc->sc_ev_txdw);
5840 }
5841 #endif
5842 wm_txintr(sc);
5843
5844 if (icr & (ICR_LSC|ICR_RXSEQ)) {
5845 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
5846 wm_linkintr(sc, icr);
5847 }
5848
5849 WM_TX_UNLOCK(sc);
5850
5851 if (icr & ICR_RXO) {
5852 #if defined(WM_DEBUG)
5853 log(LOG_WARNING, "%s: Receive overrun\n",
5854 device_xname(sc->sc_dev));
5855 #endif /* defined(WM_DEBUG) */
5856 }
5857 }
5858
5859 if (handled) {
5860 /* Try to get more packets going. */
5861 ifp->if_start(ifp);
5862 }
5863
5864 return handled;
5865 }
5866
5867 /*
5868 * Media related.
5869 * GMII, SGMII, TBI (and SERDES)
5870 */
5871
5872 /* GMII related */
5873
5874 /*
5875 * wm_gmii_reset:
5876 *
5877 * Reset the PHY.
5878 */
5879 static void
5880 wm_gmii_reset(struct wm_softc *sc)
5881 {
5882 uint32_t reg;
5883 int rv;
5884
5885 /* get phy semaphore */
5886 switch (sc->sc_type) {
5887 case WM_T_82571:
5888 case WM_T_82572:
5889 case WM_T_82573:
5890 case WM_T_82574:
5891 case WM_T_82583:
5892 /* XXX should get sw semaphore, too */
5893 rv = wm_get_swsm_semaphore(sc);
5894 break;
5895 case WM_T_82575:
5896 case WM_T_82576:
5897 case WM_T_82580:
5898 case WM_T_I350:
5899 case WM_T_I354:
5900 case WM_T_I210:
5901 case WM_T_I211:
5902 case WM_T_80003:
5903 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5904 break;
5905 case WM_T_ICH8:
5906 case WM_T_ICH9:
5907 case WM_T_ICH10:
5908 case WM_T_PCH:
5909 case WM_T_PCH2:
5910 case WM_T_PCH_LPT:
5911 rv = wm_get_swfwhw_semaphore(sc);
5912 break;
5913 default:
5914 /* nothing to do*/
5915 rv = 0;
5916 break;
5917 }
5918 if (rv != 0) {
5919 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5920 __func__);
5921 return;
5922 }
5923
5924 switch (sc->sc_type) {
5925 case WM_T_82542_2_0:
5926 case WM_T_82542_2_1:
5927 /* null */
5928 break;
5929 case WM_T_82543:
5930 /*
5931 * With 82543, we need to force speed and duplex on the MAC
5932 * equal to what the PHY speed and duplex configuration is.
5933 * In addition, we need to perform a hardware reset on the PHY
5934 * to take it out of reset.
5935 */
5936 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5937 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5938
5939 /* The PHY reset pin is active-low. */
5940 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5941 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
5942 CTRL_EXT_SWDPIN(4));
5943 reg |= CTRL_EXT_SWDPIO(4);
5944
5945 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5946 CSR_WRITE_FLUSH(sc);
5947 delay(10*1000);
5948
5949 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
5950 CSR_WRITE_FLUSH(sc);
5951 delay(150);
5952 #if 0
5953 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
5954 #endif
5955 delay(20*1000); /* XXX extra delay to get PHY ID? */
5956 break;
5957 case WM_T_82544: /* reset 10000us */
5958 case WM_T_82540:
5959 case WM_T_82545:
5960 case WM_T_82545_3:
5961 case WM_T_82546:
5962 case WM_T_82546_3:
5963 case WM_T_82541:
5964 case WM_T_82541_2:
5965 case WM_T_82547:
5966 case WM_T_82547_2:
5967 case WM_T_82571: /* reset 100us */
5968 case WM_T_82572:
5969 case WM_T_82573:
5970 case WM_T_82574:
5971 case WM_T_82575:
5972 case WM_T_82576:
5973 case WM_T_82580:
5974 case WM_T_I350:
5975 case WM_T_I354:
5976 case WM_T_I210:
5977 case WM_T_I211:
5978 case WM_T_82583:
5979 case WM_T_80003:
5980 /* generic reset */
5981 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5982 CSR_WRITE_FLUSH(sc);
5983 delay(20000);
5984 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5985 CSR_WRITE_FLUSH(sc);
5986 delay(20000);
5987
5988 if ((sc->sc_type == WM_T_82541)
5989 || (sc->sc_type == WM_T_82541_2)
5990 || (sc->sc_type == WM_T_82547)
5991 || (sc->sc_type == WM_T_82547_2)) {
5992 /* workaround for igp are done in igp_reset() */
5993 /* XXX add code to set LED after phy reset */
5994 }
5995 break;
5996 case WM_T_ICH8:
5997 case WM_T_ICH9:
5998 case WM_T_ICH10:
5999 case WM_T_PCH:
6000 case WM_T_PCH2:
6001 case WM_T_PCH_LPT:
6002 /* generic reset */
6003 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6004 CSR_WRITE_FLUSH(sc);
6005 delay(100);
6006 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6007 CSR_WRITE_FLUSH(sc);
6008 delay(150);
6009 break;
6010 default:
6011 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6012 __func__);
6013 break;
6014 }
6015
6016 /* release PHY semaphore */
6017 switch (sc->sc_type) {
6018 case WM_T_82571:
6019 case WM_T_82572:
6020 case WM_T_82573:
6021 case WM_T_82574:
6022 case WM_T_82583:
6023 /* XXX should put sw semaphore, too */
6024 wm_put_swsm_semaphore(sc);
6025 break;
6026 case WM_T_82575:
6027 case WM_T_82576:
6028 case WM_T_82580:
6029 case WM_T_I350:
6030 case WM_T_I354:
6031 case WM_T_I210:
6032 case WM_T_I211:
6033 case WM_T_80003:
6034 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6035 break;
6036 case WM_T_ICH8:
6037 case WM_T_ICH9:
6038 case WM_T_ICH10:
6039 case WM_T_PCH:
6040 case WM_T_PCH2:
6041 case WM_T_PCH_LPT:
6042 wm_put_swfwhw_semaphore(sc);
6043 break;
6044 default:
6045 /* nothing to do*/
6046 rv = 0;
6047 break;
6048 }
6049
6050 /* get_cfg_done */
6051 wm_get_cfg_done(sc);
6052
6053 /* extra setup */
6054 switch (sc->sc_type) {
6055 case WM_T_82542_2_0:
6056 case WM_T_82542_2_1:
6057 case WM_T_82543:
6058 case WM_T_82544:
6059 case WM_T_82540:
6060 case WM_T_82545:
6061 case WM_T_82545_3:
6062 case WM_T_82546:
6063 case WM_T_82546_3:
6064 case WM_T_82541_2:
6065 case WM_T_82547_2:
6066 case WM_T_82571:
6067 case WM_T_82572:
6068 case WM_T_82573:
6069 case WM_T_82574:
6070 case WM_T_82575:
6071 case WM_T_82576:
6072 case WM_T_82580:
6073 case WM_T_I350:
6074 case WM_T_I354:
6075 case WM_T_I210:
6076 case WM_T_I211:
6077 case WM_T_82583:
6078 case WM_T_80003:
6079 /* null */
6080 break;
6081 case WM_T_82541:
6082 case WM_T_82547:
6083 /* XXX Configure actively LED after PHY reset */
6084 break;
6085 case WM_T_ICH8:
6086 case WM_T_ICH9:
6087 case WM_T_ICH10:
6088 case WM_T_PCH:
6089 case WM_T_PCH2:
6090 case WM_T_PCH_LPT:
6091 /* Allow time for h/w to get to a quiescent state afer reset */
6092 delay(10*1000);
6093
6094 if (sc->sc_type == WM_T_PCH)
6095 wm_hv_phy_workaround_ich8lan(sc);
6096
6097 if (sc->sc_type == WM_T_PCH2)
6098 wm_lv_phy_workaround_ich8lan(sc);
6099
6100 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6101 /*
6102 * dummy read to clear the phy wakeup bit after lcd
6103 * reset
6104 */
6105 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6106 }
6107
6108 /*
6109 * XXX Configure the LCD with th extended configuration region
6110 * in NVM
6111 */
6112
6113 /* Configure the LCD with the OEM bits in NVM */
6114 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6115 || (sc->sc_type == WM_T_PCH_LPT)) {
6116 /*
6117 * Disable LPLU.
6118 * XXX It seems that 82567 has LPLU, too.
6119 */
6120 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6121 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6122 reg |= HV_OEM_BITS_ANEGNOW;
6123 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6124 }
6125 break;
6126 default:
6127 panic("%s: unknown type\n", __func__);
6128 break;
6129 }
6130 }
6131
6132 /*
6133 * wm_get_phy_id_82575:
6134 *
6135 * Return PHY ID. Return -1 if it failed.
6136 */
6137 static int
6138 wm_get_phy_id_82575(struct wm_softc *sc)
6139 {
6140 uint32_t reg;
6141 int phyid = -1;
6142
6143 /* XXX */
6144 if ((sc->sc_flags & WM_F_SGMII) == 0)
6145 return -1;
6146
6147 if (wm_sgmii_uses_mdio(sc)) {
6148 switch (sc->sc_type) {
6149 case WM_T_82575:
6150 case WM_T_82576:
6151 reg = CSR_READ(sc, WMREG_MDIC);
6152 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
6153 break;
6154 case WM_T_82580:
6155 case WM_T_I350:
6156 case WM_T_I354:
6157 case WM_T_I210:
6158 case WM_T_I211:
6159 reg = CSR_READ(sc, WMREG_MDICNFG);
6160 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
6161 break;
6162 default:
6163 return -1;
6164 }
6165 }
6166
6167 return phyid;
6168 }
6169
6170
6171 /*
6172 * wm_gmii_mediainit:
6173 *
6174 * Initialize media for use on 1000BASE-T devices.
6175 */
6176 static void
6177 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6178 {
6179 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6180 struct mii_data *mii = &sc->sc_mii;
6181 uint32_t reg;
6182
6183 /* We have GMII. */
6184 sc->sc_flags |= WM_F_HAS_MII;
6185
6186 if (sc->sc_type == WM_T_80003)
6187 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6188 else
6189 sc->sc_tipg = TIPG_1000T_DFLT;
6190
6191 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
6192 if ((sc->sc_type == WM_T_82580)
6193 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
6194 || (sc->sc_type == WM_T_I211)) {
6195 reg = CSR_READ(sc, WMREG_PHPM);
6196 reg &= ~PHPM_GO_LINK_D;
6197 CSR_WRITE(sc, WMREG_PHPM, reg);
6198 }
6199
6200 /*
6201 * Let the chip set speed/duplex on its own based on
6202 * signals from the PHY.
6203 * XXXbouyer - I'm not sure this is right for the 80003,
6204 * the em driver only sets CTRL_SLU here - but it seems to work.
6205 */
6206 sc->sc_ctrl |= CTRL_SLU;
6207 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6208
6209 /* Initialize our media structures and probe the GMII. */
6210 mii->mii_ifp = ifp;
6211
6212 /*
6213 * Determine the PHY access method.
6214 *
6215 * For SGMII, use SGMII specific method.
6216 *
6217 * For some devices, we can determine the PHY access method
6218 * from sc_type.
6219 *
6220 * For ICH8 variants, it's difficult to determine the PHY access
6221 * method by sc_type, so use the PCI product ID for some devices.
6222 * For other ICH8 variants, try to use igp's method. If the PHY
6223 * can't detect, then use bm's method.
6224 */
6225 switch (prodid) {
6226 case PCI_PRODUCT_INTEL_PCH_M_LM:
6227 case PCI_PRODUCT_INTEL_PCH_M_LC:
6228 /* 82577 */
6229 sc->sc_phytype = WMPHY_82577;
6230 mii->mii_readreg = wm_gmii_hv_readreg;
6231 mii->mii_writereg = wm_gmii_hv_writereg;
6232 break;
6233 case PCI_PRODUCT_INTEL_PCH_D_DM:
6234 case PCI_PRODUCT_INTEL_PCH_D_DC:
6235 /* 82578 */
6236 sc->sc_phytype = WMPHY_82578;
6237 mii->mii_readreg = wm_gmii_hv_readreg;
6238 mii->mii_writereg = wm_gmii_hv_writereg;
6239 break;
6240 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6241 case PCI_PRODUCT_INTEL_PCH2_LV_V:
6242 /* 82579 */
6243 sc->sc_phytype = WMPHY_82579;
6244 mii->mii_readreg = wm_gmii_hv_readreg;
6245 mii->mii_writereg = wm_gmii_hv_writereg;
6246 break;
6247 case PCI_PRODUCT_INTEL_I217_LM:
6248 case PCI_PRODUCT_INTEL_I217_V:
6249 case PCI_PRODUCT_INTEL_I218_LM:
6250 case PCI_PRODUCT_INTEL_I218_V:
6251 /* I21[78] */
6252 mii->mii_readreg = wm_gmii_hv_readreg;
6253 mii->mii_writereg = wm_gmii_hv_writereg;
6254 break;
6255 case PCI_PRODUCT_INTEL_82801I_BM:
6256 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6257 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6258 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6259 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6260 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6261 /* 82567 */
6262 sc->sc_phytype = WMPHY_BM;
6263 mii->mii_readreg = wm_gmii_bm_readreg;
6264 mii->mii_writereg = wm_gmii_bm_writereg;
6265 break;
6266 default:
6267 if (((sc->sc_flags & WM_F_SGMII) != 0)
6268 && !wm_sgmii_uses_mdio(sc)){
6269 mii->mii_readreg = wm_sgmii_readreg;
6270 mii->mii_writereg = wm_sgmii_writereg;
6271 } else if (sc->sc_type >= WM_T_80003) {
6272 mii->mii_readreg = wm_gmii_i80003_readreg;
6273 mii->mii_writereg = wm_gmii_i80003_writereg;
6274 } else if (sc->sc_type >= WM_T_I210) {
6275 mii->mii_readreg = wm_gmii_i82544_readreg;
6276 mii->mii_writereg = wm_gmii_i82544_writereg;
6277 } else if (sc->sc_type >= WM_T_82580) {
6278 sc->sc_phytype = WMPHY_82580;
6279 mii->mii_readreg = wm_gmii_82580_readreg;
6280 mii->mii_writereg = wm_gmii_82580_writereg;
6281 } else if (sc->sc_type >= WM_T_82544) {
6282 mii->mii_readreg = wm_gmii_i82544_readreg;
6283 mii->mii_writereg = wm_gmii_i82544_writereg;
6284 } else {
6285 mii->mii_readreg = wm_gmii_i82543_readreg;
6286 mii->mii_writereg = wm_gmii_i82543_writereg;
6287 }
6288 break;
6289 }
6290 mii->mii_statchg = wm_gmii_statchg;
6291
6292 wm_gmii_reset(sc);
6293
6294 sc->sc_ethercom.ec_mii = &sc->sc_mii;
6295 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6296 wm_gmii_mediastatus);
6297
6298 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6299 || (sc->sc_type == WM_T_82580)
6300 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
6301 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
6302 if ((sc->sc_flags & WM_F_SGMII) == 0) {
6303 /* Attach only one port */
6304 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6305 MII_OFFSET_ANY, MIIF_DOPAUSE);
6306 } else {
6307 int i, id;
6308 uint32_t ctrl_ext;
6309
6310 id = wm_get_phy_id_82575(sc);
6311 if (id != -1) {
6312 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6313 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
6314 }
6315 if ((id == -1)
6316 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
6317 /* Power on sgmii phy if it is disabled */
6318 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6319 CSR_WRITE(sc, WMREG_CTRL_EXT,
6320 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6321 CSR_WRITE_FLUSH(sc);
6322 delay(300*1000); /* XXX too long */
6323
6324 /* from 1 to 8 */
6325 for (i = 1; i < 8; i++)
6326 mii_attach(sc->sc_dev, &sc->sc_mii,
6327 0xffffffff, i, MII_OFFSET_ANY,
6328 MIIF_DOPAUSE);
6329
6330 /* restore previous sfp cage power state */
6331 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6332 }
6333 }
6334 } else {
6335 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6336 MII_OFFSET_ANY, MIIF_DOPAUSE);
6337 }
6338
6339 /*
6340 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
6341 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6342 */
6343 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
6344 (LIST_FIRST(&mii->mii_phys) == NULL)) {
6345 wm_set_mdio_slow_mode_hv(sc);
6346 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6347 MII_OFFSET_ANY, MIIF_DOPAUSE);
6348 }
6349
6350 /*
6351 * (For ICH8 variants)
6352 * If PHY detection failed, use BM's r/w function and retry.
6353 */
6354 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6355 /* if failed, retry with *_bm_* */
6356 mii->mii_readreg = wm_gmii_bm_readreg;
6357 mii->mii_writereg = wm_gmii_bm_writereg;
6358
6359 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6360 MII_OFFSET_ANY, MIIF_DOPAUSE);
6361 }
6362
6363 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6364 /* Any PHY wasn't find */
6365 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6366 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6367 sc->sc_phytype = WMPHY_NONE;
6368 } else {
6369 /*
6370 * PHY Found!
6371 * Check PHY type.
6372 */
6373 uint32_t model;
6374 struct mii_softc *child;
6375
6376 child = LIST_FIRST(&mii->mii_phys);
6377 if (device_is_a(child->mii_dev, "igphy")) {
6378 struct igphy_softc *isc = (struct igphy_softc *)child;
6379
6380 model = isc->sc_mii.mii_mpd_model;
6381 if (model == MII_MODEL_yyINTEL_I82566)
6382 sc->sc_phytype = WMPHY_IGP_3;
6383 }
6384
6385 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6386 }
6387 }
6388
6389 /*
6390 * wm_gmii_mediastatus: [ifmedia interface function]
6391 *
6392 * Get the current interface media status on a 1000BASE-T device.
6393 */
6394 static void
6395 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6396 {
6397 struct wm_softc *sc = ifp->if_softc;
6398
6399 ether_mediastatus(ifp, ifmr);
6400 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6401 | sc->sc_flowflags;
6402 }
6403
6404 /*
6405 * wm_gmii_mediachange: [ifmedia interface function]
6406 *
6407 * Set hardware to newly-selected media on a 1000BASE-T device.
6408 */
6409 static int
6410 wm_gmii_mediachange(struct ifnet *ifp)
6411 {
6412 struct wm_softc *sc = ifp->if_softc;
6413 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6414 int rc;
6415
6416 if ((ifp->if_flags & IFF_UP) == 0)
6417 return 0;
6418
6419 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6420 sc->sc_ctrl |= CTRL_SLU;
6421 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6422 || (sc->sc_type > WM_T_82543)) {
6423 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6424 } else {
6425 sc->sc_ctrl &= ~CTRL_ASDE;
6426 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6427 if (ife->ifm_media & IFM_FDX)
6428 sc->sc_ctrl |= CTRL_FD;
6429 switch (IFM_SUBTYPE(ife->ifm_media)) {
6430 case IFM_10_T:
6431 sc->sc_ctrl |= CTRL_SPEED_10;
6432 break;
6433 case IFM_100_TX:
6434 sc->sc_ctrl |= CTRL_SPEED_100;
6435 break;
6436 case IFM_1000_T:
6437 sc->sc_ctrl |= CTRL_SPEED_1000;
6438 break;
6439 default:
6440 panic("wm_gmii_mediachange: bad media 0x%x",
6441 ife->ifm_media);
6442 }
6443 }
6444 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6445 if (sc->sc_type <= WM_T_82543)
6446 wm_gmii_reset(sc);
6447
6448 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6449 return 0;
6450 return rc;
6451 }
6452
6453 #define MDI_IO CTRL_SWDPIN(2)
6454 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
6455 #define MDI_CLK CTRL_SWDPIN(3)
6456
6457 static void
6458 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6459 {
6460 uint32_t i, v;
6461
6462 v = CSR_READ(sc, WMREG_CTRL);
6463 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6464 v |= MDI_DIR | CTRL_SWDPIO(3);
6465
6466 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6467 if (data & i)
6468 v |= MDI_IO;
6469 else
6470 v &= ~MDI_IO;
6471 CSR_WRITE(sc, WMREG_CTRL, v);
6472 CSR_WRITE_FLUSH(sc);
6473 delay(10);
6474 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6475 CSR_WRITE_FLUSH(sc);
6476 delay(10);
6477 CSR_WRITE(sc, WMREG_CTRL, v);
6478 CSR_WRITE_FLUSH(sc);
6479 delay(10);
6480 }
6481 }
6482
6483 static uint32_t
6484 wm_i82543_mii_recvbits(struct wm_softc *sc)
6485 {
6486 uint32_t v, i, data = 0;
6487
6488 v = CSR_READ(sc, WMREG_CTRL);
6489 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6490 v |= CTRL_SWDPIO(3);
6491
6492 CSR_WRITE(sc, WMREG_CTRL, v);
6493 CSR_WRITE_FLUSH(sc);
6494 delay(10);
6495 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6496 CSR_WRITE_FLUSH(sc);
6497 delay(10);
6498 CSR_WRITE(sc, WMREG_CTRL, v);
6499 CSR_WRITE_FLUSH(sc);
6500 delay(10);
6501
6502 for (i = 0; i < 16; i++) {
6503 data <<= 1;
6504 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6505 CSR_WRITE_FLUSH(sc);
6506 delay(10);
6507 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6508 data |= 1;
6509 CSR_WRITE(sc, WMREG_CTRL, v);
6510 CSR_WRITE_FLUSH(sc);
6511 delay(10);
6512 }
6513
6514 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6515 CSR_WRITE_FLUSH(sc);
6516 delay(10);
6517 CSR_WRITE(sc, WMREG_CTRL, v);
6518 CSR_WRITE_FLUSH(sc);
6519 delay(10);
6520
6521 return data;
6522 }
6523
6524 #undef MDI_IO
6525 #undef MDI_DIR
6526 #undef MDI_CLK
6527
6528 /*
6529 * wm_gmii_i82543_readreg: [mii interface function]
6530 *
6531 * Read a PHY register on the GMII (i82543 version).
6532 */
6533 static int
6534 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6535 {
6536 struct wm_softc *sc = device_private(self);
6537 int rv;
6538
6539 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6540 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
6541 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6542 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
6543
6544 DPRINTF(WM_DEBUG_GMII,
6545 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6546 device_xname(sc->sc_dev), phy, reg, rv));
6547
6548 return rv;
6549 }
6550
6551 /*
6552 * wm_gmii_i82543_writereg: [mii interface function]
6553 *
6554 * Write a PHY register on the GMII (i82543 version).
6555 */
6556 static void
6557 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6558 {
6559 struct wm_softc *sc = device_private(self);
6560
6561 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6562 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6563 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6564 (MII_COMMAND_START << 30), 32);
6565 }
6566
6567 /*
6568 * wm_gmii_i82544_readreg: [mii interface function]
6569 *
6570 * Read a PHY register on the GMII.
6571 */
6572 static int
6573 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6574 {
6575 struct wm_softc *sc = device_private(self);
6576 uint32_t mdic = 0;
6577 int i, rv;
6578
6579 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6580 MDIC_REGADD(reg));
6581
6582 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6583 mdic = CSR_READ(sc, WMREG_MDIC);
6584 if (mdic & MDIC_READY)
6585 break;
6586 delay(50);
6587 }
6588
6589 if ((mdic & MDIC_READY) == 0) {
6590 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6591 device_xname(sc->sc_dev), phy, reg);
6592 rv = 0;
6593 } else if (mdic & MDIC_E) {
6594 #if 0 /* This is normal if no PHY is present. */
6595 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6596 device_xname(sc->sc_dev), phy, reg);
6597 #endif
6598 rv = 0;
6599 } else {
6600 rv = MDIC_DATA(mdic);
6601 if (rv == 0xffff)
6602 rv = 0;
6603 }
6604
6605 return rv;
6606 }
6607
6608 /*
6609 * wm_gmii_i82544_writereg: [mii interface function]
6610 *
6611 * Write a PHY register on the GMII.
6612 */
6613 static void
6614 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6615 {
6616 struct wm_softc *sc = device_private(self);
6617 uint32_t mdic = 0;
6618 int i;
6619
6620 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6621 MDIC_REGADD(reg) | MDIC_DATA(val));
6622
6623 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6624 mdic = CSR_READ(sc, WMREG_MDIC);
6625 if (mdic & MDIC_READY)
6626 break;
6627 delay(50);
6628 }
6629
6630 if ((mdic & MDIC_READY) == 0)
6631 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6632 device_xname(sc->sc_dev), phy, reg);
6633 else if (mdic & MDIC_E)
6634 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6635 device_xname(sc->sc_dev), phy, reg);
6636 }
6637
6638 /*
6639 * wm_gmii_i80003_readreg: [mii interface function]
6640 *
6641 * Read a PHY register on the kumeran
6642 * This could be handled by the PHY layer if we didn't have to lock the
6643 * ressource ...
6644 */
6645 static int
6646 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6647 {
6648 struct wm_softc *sc = device_private(self);
6649 int sem;
6650 int rv;
6651
6652 if (phy != 1) /* only one PHY on kumeran bus */
6653 return 0;
6654
6655 sem = swfwphysem[sc->sc_funcid];
6656 if (wm_get_swfw_semaphore(sc, sem)) {
6657 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6658 __func__);
6659 return 0;
6660 }
6661
6662 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6663 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6664 reg >> GG82563_PAGE_SHIFT);
6665 } else {
6666 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6667 reg >> GG82563_PAGE_SHIFT);
6668 }
6669 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6670 delay(200);
6671 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6672 delay(200);
6673
6674 wm_put_swfw_semaphore(sc, sem);
6675 return rv;
6676 }
6677
6678 /*
6679 * wm_gmii_i80003_writereg: [mii interface function]
6680 *
6681 * Write a PHY register on the kumeran.
6682 * This could be handled by the PHY layer if we didn't have to lock the
6683 * ressource ...
6684 */
6685 static void
6686 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6687 {
6688 struct wm_softc *sc = device_private(self);
6689 int sem;
6690
6691 if (phy != 1) /* only one PHY on kumeran bus */
6692 return;
6693
6694 sem = swfwphysem[sc->sc_funcid];
6695 if (wm_get_swfw_semaphore(sc, sem)) {
6696 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6697 __func__);
6698 return;
6699 }
6700
6701 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6702 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6703 reg >> GG82563_PAGE_SHIFT);
6704 } else {
6705 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6706 reg >> GG82563_PAGE_SHIFT);
6707 }
6708 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6709 delay(200);
6710 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6711 delay(200);
6712
6713 wm_put_swfw_semaphore(sc, sem);
6714 }
6715
6716 /*
6717 * wm_gmii_bm_readreg: [mii interface function]
6718 *
6719 * Read a PHY register on the kumeran
6720 * This could be handled by the PHY layer if we didn't have to lock the
6721 * ressource ...
6722 */
6723 static int
6724 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6725 {
6726 struct wm_softc *sc = device_private(self);
6727 int sem;
6728 int rv;
6729
6730 sem = swfwphysem[sc->sc_funcid];
6731 if (wm_get_swfw_semaphore(sc, sem)) {
6732 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6733 __func__);
6734 return 0;
6735 }
6736
6737 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6738 if (phy == 1)
6739 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6740 reg);
6741 else
6742 wm_gmii_i82544_writereg(self, phy,
6743 GG82563_PHY_PAGE_SELECT,
6744 reg >> GG82563_PAGE_SHIFT);
6745 }
6746
6747 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6748 wm_put_swfw_semaphore(sc, sem);
6749 return rv;
6750 }
6751
6752 /*
6753 * wm_gmii_bm_writereg: [mii interface function]
6754 *
6755 * Write a PHY register on the kumeran.
6756 * This could be handled by the PHY layer if we didn't have to lock the
6757 * ressource ...
6758 */
6759 static void
6760 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6761 {
6762 struct wm_softc *sc = device_private(self);
6763 int sem;
6764
6765 sem = swfwphysem[sc->sc_funcid];
6766 if (wm_get_swfw_semaphore(sc, sem)) {
6767 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6768 __func__);
6769 return;
6770 }
6771
6772 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6773 if (phy == 1)
6774 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6775 reg);
6776 else
6777 wm_gmii_i82544_writereg(self, phy,
6778 GG82563_PHY_PAGE_SELECT,
6779 reg >> GG82563_PAGE_SHIFT);
6780 }
6781
6782 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6783 wm_put_swfw_semaphore(sc, sem);
6784 }
6785
6786 static void
6787 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6788 {
6789 struct wm_softc *sc = device_private(self);
6790 uint16_t regnum = BM_PHY_REG_NUM(offset);
6791 uint16_t wuce;
6792
6793 /* XXX Gig must be disabled for MDIO accesses to page 800 */
6794 if (sc->sc_type == WM_T_PCH) {
6795 /* XXX e1000 driver do nothing... why? */
6796 }
6797
6798 /* Set page 769 */
6799 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6800 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6801
6802 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6803
6804 wuce &= ~BM_WUC_HOST_WU_BIT;
6805 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6806 wuce | BM_WUC_ENABLE_BIT);
6807
6808 /* Select page 800 */
6809 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6810 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6811
6812 /* Write page 800 */
6813 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6814
6815 if (rd)
6816 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6817 else
6818 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6819
6820 /* Set page 769 */
6821 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6822 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6823
6824 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6825 }
6826
6827 /*
6828 * wm_gmii_hv_readreg: [mii interface function]
6829 *
6830 * Read a PHY register on the kumeran
6831 * This could be handled by the PHY layer if we didn't have to lock the
6832 * ressource ...
6833 */
6834 static int
6835 wm_gmii_hv_readreg(device_t self, int phy, int reg)
6836 {
6837 struct wm_softc *sc = device_private(self);
6838 uint16_t page = BM_PHY_REG_PAGE(reg);
6839 uint16_t regnum = BM_PHY_REG_NUM(reg);
6840 uint16_t val;
6841 int rv;
6842
6843 if (wm_get_swfwhw_semaphore(sc)) {
6844 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6845 __func__);
6846 return 0;
6847 }
6848
6849 /* XXX Workaround failure in MDIO access while cable is disconnected */
6850 if (sc->sc_phytype == WMPHY_82577) {
6851 /* XXX must write */
6852 }
6853
6854 /* Page 800 works differently than the rest so it has its own func */
6855 if (page == BM_WUC_PAGE) {
6856 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
6857 return val;
6858 }
6859
6860 /*
6861 * Lower than page 768 works differently than the rest so it has its
6862 * own func
6863 */
6864 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6865 printf("gmii_hv_readreg!!!\n");
6866 return 0;
6867 }
6868
6869 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6870 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6871 page << BME1000_PAGE_SHIFT);
6872 }
6873
6874 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
6875 wm_put_swfwhw_semaphore(sc);
6876 return rv;
6877 }
6878
6879 /*
6880 * wm_gmii_hv_writereg: [mii interface function]
6881 *
6882 * Write a PHY register on the kumeran.
6883 * This could be handled by the PHY layer if we didn't have to lock the
6884 * ressource ...
6885 */
6886 static void
6887 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
6888 {
6889 struct wm_softc *sc = device_private(self);
6890 uint16_t page = BM_PHY_REG_PAGE(reg);
6891 uint16_t regnum = BM_PHY_REG_NUM(reg);
6892
6893 if (wm_get_swfwhw_semaphore(sc)) {
6894 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6895 __func__);
6896 return;
6897 }
6898
6899 /* XXX Workaround failure in MDIO access while cable is disconnected */
6900
6901 /* Page 800 works differently than the rest so it has its own func */
6902 if (page == BM_WUC_PAGE) {
6903 uint16_t tmp;
6904
6905 tmp = val;
6906 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
6907 return;
6908 }
6909
6910 /*
6911 * Lower than page 768 works differently than the rest so it has its
6912 * own func
6913 */
6914 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6915 printf("gmii_hv_writereg!!!\n");
6916 return;
6917 }
6918
6919 /*
6920 * XXX Workaround MDIO accesses being disabled after entering IEEE
6921 * Power Down (whenever bit 11 of the PHY control register is set)
6922 */
6923
6924 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6925 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6926 page << BME1000_PAGE_SHIFT);
6927 }
6928
6929 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
6930 wm_put_swfwhw_semaphore(sc);
6931 }
6932
6933 /*
6934 * wm_gmii_82580_readreg: [mii interface function]
6935 *
6936 * Read a PHY register on the 82580 and I350.
6937 * This could be handled by the PHY layer if we didn't have to lock the
6938 * ressource ...
6939 */
6940 static int
6941 wm_gmii_82580_readreg(device_t self, int phy, int reg)
6942 {
6943 struct wm_softc *sc = device_private(self);
6944 int sem;
6945 int rv;
6946
6947 sem = swfwphysem[sc->sc_funcid];
6948 if (wm_get_swfw_semaphore(sc, sem)) {
6949 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6950 __func__);
6951 return 0;
6952 }
6953
6954 rv = wm_gmii_i82544_readreg(self, phy, reg);
6955
6956 wm_put_swfw_semaphore(sc, sem);
6957 return rv;
6958 }
6959
6960 /*
6961 * wm_gmii_82580_writereg: [mii interface function]
6962 *
6963 * Write a PHY register on the 82580 and I350.
6964 * This could be handled by the PHY layer if we didn't have to lock the
6965 * ressource ...
6966 */
6967 static void
6968 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
6969 {
6970 struct wm_softc *sc = device_private(self);
6971 int sem;
6972
6973 sem = swfwphysem[sc->sc_funcid];
6974 if (wm_get_swfw_semaphore(sc, sem)) {
6975 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6976 __func__);
6977 return;
6978 }
6979
6980 wm_gmii_i82544_writereg(self, phy, reg, val);
6981
6982 wm_put_swfw_semaphore(sc, sem);
6983 }
6984
6985 /*
6986 * wm_gmii_statchg: [mii interface function]
6987 *
6988 * Callback from MII layer when media changes.
6989 */
6990 static void
6991 wm_gmii_statchg(struct ifnet *ifp)
6992 {
6993 struct wm_softc *sc = ifp->if_softc;
6994 struct mii_data *mii = &sc->sc_mii;
6995
6996 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
6997 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6998 sc->sc_fcrtl &= ~FCRTL_XONE;
6999
7000 /*
7001 * Get flow control negotiation result.
7002 */
7003 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7004 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7005 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7006 mii->mii_media_active &= ~IFM_ETH_FMASK;
7007 }
7008
7009 if (sc->sc_flowflags & IFM_FLOW) {
7010 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7011 sc->sc_ctrl |= CTRL_TFCE;
7012 sc->sc_fcrtl |= FCRTL_XONE;
7013 }
7014 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7015 sc->sc_ctrl |= CTRL_RFCE;
7016 }
7017
7018 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7019 DPRINTF(WM_DEBUG_LINK,
7020 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7021 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7022 } else {
7023 DPRINTF(WM_DEBUG_LINK,
7024 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7025 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7026 }
7027
7028 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7029 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7030 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7031 : WMREG_FCRTL, sc->sc_fcrtl);
7032 if (sc->sc_type == WM_T_80003) {
7033 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7034 case IFM_1000_T:
7035 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7036 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7037 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7038 break;
7039 default:
7040 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7041 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7042 sc->sc_tipg = TIPG_10_100_80003_DFLT;
7043 break;
7044 }
7045 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7046 }
7047 }
7048
7049 /*
7050 * wm_kmrn_readreg:
7051 *
7052 * Read a kumeran register
7053 */
7054 static int
7055 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7056 {
7057 int rv;
7058
7059 if (sc->sc_flags == WM_F_LOCK_SWFW) {
7060 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7061 aprint_error_dev(sc->sc_dev,
7062 "%s: failed to get semaphore\n", __func__);
7063 return 0;
7064 }
7065 } else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
7066 if (wm_get_swfwhw_semaphore(sc)) {
7067 aprint_error_dev(sc->sc_dev,
7068 "%s: failed to get semaphore\n", __func__);
7069 return 0;
7070 }
7071 }
7072
7073 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7074 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7075 KUMCTRLSTA_REN);
7076 CSR_WRITE_FLUSH(sc);
7077 delay(2);
7078
7079 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7080
7081 if (sc->sc_flags == WM_F_LOCK_SWFW)
7082 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7083 else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
7084 wm_put_swfwhw_semaphore(sc);
7085
7086 return rv;
7087 }
7088
7089 /*
7090 * wm_kmrn_writereg:
7091 *
7092 * Write a kumeran register
7093 */
7094 static void
7095 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7096 {
7097
7098 if (sc->sc_flags == WM_F_LOCK_SWFW) {
7099 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7100 aprint_error_dev(sc->sc_dev,
7101 "%s: failed to get semaphore\n", __func__);
7102 return;
7103 }
7104 } else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
7105 if (wm_get_swfwhw_semaphore(sc)) {
7106 aprint_error_dev(sc->sc_dev,
7107 "%s: failed to get semaphore\n", __func__);
7108 return;
7109 }
7110 }
7111
7112 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7113 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7114 (val & KUMCTRLSTA_MASK));
7115
7116 if (sc->sc_flags == WM_F_LOCK_SWFW)
7117 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7118 else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
7119 wm_put_swfwhw_semaphore(sc);
7120 }
7121
7122 /* SGMII related */
7123
7124 /*
7125 * wm_sgmii_uses_mdio
7126 *
7127 * Check whether the transaction is to the internal PHY or the external
7128 * MDIO interface. Return true if it's MDIO.
7129 */
7130 static bool
7131 wm_sgmii_uses_mdio(struct wm_softc *sc)
7132 {
7133 uint32_t reg;
7134 bool ismdio = false;
7135
7136 switch (sc->sc_type) {
7137 case WM_T_82575:
7138 case WM_T_82576:
7139 reg = CSR_READ(sc, WMREG_MDIC);
7140 ismdio = ((reg & MDIC_DEST) != 0);
7141 break;
7142 case WM_T_82580:
7143 case WM_T_I350:
7144 case WM_T_I354:
7145 case WM_T_I210:
7146 case WM_T_I211:
7147 reg = CSR_READ(sc, WMREG_MDICNFG);
7148 ismdio = ((reg & MDICNFG_DEST) != 0);
7149 break;
7150 default:
7151 break;
7152 }
7153
7154 return ismdio;
7155 }
7156
7157 /*
7158 * wm_sgmii_readreg: [mii interface function]
7159 *
7160 * Read a PHY register on the SGMII
7161 * This could be handled by the PHY layer if we didn't have to lock the
7162 * ressource ...
7163 */
7164 static int
7165 wm_sgmii_readreg(device_t self, int phy, int reg)
7166 {
7167 struct wm_softc *sc = device_private(self);
7168 uint32_t i2ccmd;
7169 int i, rv;
7170
7171 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7172 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7173 __func__);
7174 return 0;
7175 }
7176
7177 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7178 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7179 | I2CCMD_OPCODE_READ;
7180 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7181
7182 /* Poll the ready bit */
7183 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7184 delay(50);
7185 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7186 if (i2ccmd & I2CCMD_READY)
7187 break;
7188 }
7189 if ((i2ccmd & I2CCMD_READY) == 0)
7190 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7191 if ((i2ccmd & I2CCMD_ERROR) != 0)
7192 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7193
7194 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7195
7196 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7197 return rv;
7198 }
7199
7200 /*
7201 * wm_sgmii_writereg: [mii interface function]
7202 *
7203 * Write a PHY register on the SGMII.
7204 * This could be handled by the PHY layer if we didn't have to lock the
7205 * ressource ...
7206 */
7207 static void
7208 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7209 {
7210 struct wm_softc *sc = device_private(self);
7211 uint32_t i2ccmd;
7212 int i;
7213
7214 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7215 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7216 __func__);
7217 return;
7218 }
7219
7220 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7221 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7222 | I2CCMD_OPCODE_WRITE;
7223 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7224
7225 /* Poll the ready bit */
7226 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7227 delay(50);
7228 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7229 if (i2ccmd & I2CCMD_READY)
7230 break;
7231 }
7232 if ((i2ccmd & I2CCMD_READY) == 0)
7233 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7234 if ((i2ccmd & I2CCMD_ERROR) != 0)
7235 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7236
7237 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7238 }
7239
7240 /* TBI related */
7241
7242 /* XXX Currently TBI only */
7243 static int
7244 wm_check_for_link(struct wm_softc *sc)
7245 {
7246 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7247 uint32_t rxcw;
7248 uint32_t ctrl;
7249 uint32_t status;
7250 uint32_t sig;
7251
7252 if (sc->sc_mediatype & WMP_F_SERDES) {
7253 sc->sc_tbi_linkup = 1;
7254 return 0;
7255 }
7256
7257 rxcw = CSR_READ(sc, WMREG_RXCW);
7258 ctrl = CSR_READ(sc, WMREG_CTRL);
7259 status = CSR_READ(sc, WMREG_STATUS);
7260
7261 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7262
7263 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7264 device_xname(sc->sc_dev), __func__,
7265 ((ctrl & CTRL_SWDPIN(1)) == sig),
7266 ((status & STATUS_LU) != 0),
7267 ((rxcw & RXCW_C) != 0)
7268 ));
7269
7270 /*
7271 * SWDPIN LU RXCW
7272 * 0 0 0
7273 * 0 0 1 (should not happen)
7274 * 0 1 0 (should not happen)
7275 * 0 1 1 (should not happen)
7276 * 1 0 0 Disable autonego and force linkup
7277 * 1 0 1 got /C/ but not linkup yet
7278 * 1 1 0 (linkup)
7279 * 1 1 1 If IFM_AUTO, back to autonego
7280 *
7281 */
7282 if (((ctrl & CTRL_SWDPIN(1)) == sig)
7283 && ((status & STATUS_LU) == 0)
7284 && ((rxcw & RXCW_C) == 0)) {
7285 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7286 __func__));
7287 sc->sc_tbi_linkup = 0;
7288 /* Disable auto-negotiation in the TXCW register */
7289 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7290
7291 /*
7292 * Force link-up and also force full-duplex.
7293 *
7294 * NOTE: CTRL was updated TFCE and RFCE automatically,
7295 * so we should update sc->sc_ctrl
7296 */
7297 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7298 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7299 } else if (((status & STATUS_LU) != 0)
7300 && ((rxcw & RXCW_C) != 0)
7301 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7302 sc->sc_tbi_linkup = 1;
7303 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7304 __func__));
7305 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7306 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7307 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7308 && ((rxcw & RXCW_C) != 0)) {
7309 DPRINTF(WM_DEBUG_LINK, ("/C/"));
7310 } else {
7311 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7312 status));
7313 }
7314
7315 return 0;
7316 }
7317
7318 /*
7319 * wm_tbi_mediainit:
7320 *
7321 * Initialize media for use on 1000BASE-X devices.
7322 */
7323 static void
7324 wm_tbi_mediainit(struct wm_softc *sc)
7325 {
7326 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7327 const char *sep = "";
7328
7329 if (sc->sc_type < WM_T_82543)
7330 sc->sc_tipg = TIPG_WM_DFLT;
7331 else
7332 sc->sc_tipg = TIPG_LG_DFLT;
7333
7334 sc->sc_tbi_anegticks = 5;
7335
7336 /* Initialize our media structures */
7337 sc->sc_mii.mii_ifp = ifp;
7338
7339 sc->sc_ethercom.ec_mii = &sc->sc_mii;
7340 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
7341 wm_tbi_mediastatus);
7342
7343 /*
7344 * SWD Pins:
7345 *
7346 * 0 = Link LED (output)
7347 * 1 = Loss Of Signal (input)
7348 */
7349 sc->sc_ctrl |= CTRL_SWDPIO(0);
7350 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
7351 if (sc->sc_mediatype & WMP_F_SERDES)
7352 sc->sc_ctrl &= ~CTRL_LRST;
7353
7354 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7355
7356 #define ADD(ss, mm, dd) \
7357 do { \
7358 aprint_normal("%s%s", sep, ss); \
7359 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
7360 sep = ", "; \
7361 } while (/*CONSTCOND*/0)
7362
7363 aprint_normal_dev(sc->sc_dev, "");
7364
7365 /* Only 82545 is LX */
7366 if (sc->sc_type == WM_T_82545) {
7367 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
7368 ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
7369 } else {
7370 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
7371 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
7372 }
7373 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
7374 aprint_normal("\n");
7375
7376 #undef ADD
7377
7378 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
7379 }
7380
7381 /*
7382 * wm_tbi_mediastatus: [ifmedia interface function]
7383 *
7384 * Get the current interface media status on a 1000BASE-X device.
7385 */
7386 static void
7387 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
7388 {
7389 struct wm_softc *sc = ifp->if_softc;
7390 uint32_t ctrl, status;
7391
7392 ifmr->ifm_status = IFM_AVALID;
7393 ifmr->ifm_active = IFM_ETHER;
7394
7395 status = CSR_READ(sc, WMREG_STATUS);
7396 if ((status & STATUS_LU) == 0) {
7397 ifmr->ifm_active |= IFM_NONE;
7398 return;
7399 }
7400
7401 ifmr->ifm_status |= IFM_ACTIVE;
7402 /* Only 82545 is LX */
7403 if (sc->sc_type == WM_T_82545)
7404 ifmr->ifm_active |= IFM_1000_LX;
7405 else
7406 ifmr->ifm_active |= IFM_1000_SX;
7407 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
7408 ifmr->ifm_active |= IFM_FDX;
7409 else
7410 ifmr->ifm_active |= IFM_HDX;
7411 ctrl = CSR_READ(sc, WMREG_CTRL);
7412 if (ctrl & CTRL_RFCE)
7413 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
7414 if (ctrl & CTRL_TFCE)
7415 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
7416 }
7417
7418 /*
7419 * wm_tbi_mediachange: [ifmedia interface function]
7420 *
7421 * Set hardware to newly-selected media on a 1000BASE-X device.
7422 */
7423 static int
7424 wm_tbi_mediachange(struct ifnet *ifp)
7425 {
7426 struct wm_softc *sc = ifp->if_softc;
7427 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7428 uint32_t status;
7429 int i;
7430
7431 if (sc->sc_mediatype & WMP_F_SERDES)
7432 return 0;
7433
7434 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
7435 || (sc->sc_type >= WM_T_82575))
7436 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
7437
7438 /* XXX power_up_serdes_link_82575() */
7439
7440 sc->sc_ctrl &= ~CTRL_LRST;
7441 sc->sc_txcw = TXCW_ANE;
7442 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
7443 sc->sc_txcw |= TXCW_FD | TXCW_HD;
7444 else if (ife->ifm_media & IFM_FDX)
7445 sc->sc_txcw |= TXCW_FD;
7446 else
7447 sc->sc_txcw |= TXCW_HD;
7448
7449 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
7450 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
7451
7452 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
7453 device_xname(sc->sc_dev), sc->sc_txcw));
7454 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7455 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7456 CSR_WRITE_FLUSH(sc);
7457 delay(1000);
7458
7459 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
7460 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
7461
7462 /*
7463 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
7464 * optics detect a signal, 0 if they don't.
7465 */
7466 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
7467 /* Have signal; wait for the link to come up. */
7468 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
7469 delay(10000);
7470 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
7471 break;
7472 }
7473
7474 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
7475 device_xname(sc->sc_dev),i));
7476
7477 status = CSR_READ(sc, WMREG_STATUS);
7478 DPRINTF(WM_DEBUG_LINK,
7479 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
7480 device_xname(sc->sc_dev),status, STATUS_LU));
7481 if (status & STATUS_LU) {
7482 /* Link is up. */
7483 DPRINTF(WM_DEBUG_LINK,
7484 ("%s: LINK: set media -> link up %s\n",
7485 device_xname(sc->sc_dev),
7486 (status & STATUS_FD) ? "FDX" : "HDX"));
7487
7488 /*
7489 * NOTE: CTRL will update TFCE and RFCE automatically,
7490 * so we should update sc->sc_ctrl
7491 */
7492 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7493 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7494 sc->sc_fcrtl &= ~FCRTL_XONE;
7495 if (status & STATUS_FD)
7496 sc->sc_tctl |=
7497 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7498 else
7499 sc->sc_tctl |=
7500 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7501 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
7502 sc->sc_fcrtl |= FCRTL_XONE;
7503 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7504 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7505 WMREG_OLD_FCRTL : WMREG_FCRTL,
7506 sc->sc_fcrtl);
7507 sc->sc_tbi_linkup = 1;
7508 } else {
7509 if (i == WM_LINKUP_TIMEOUT)
7510 wm_check_for_link(sc);
7511 /* Link is down. */
7512 DPRINTF(WM_DEBUG_LINK,
7513 ("%s: LINK: set media -> link down\n",
7514 device_xname(sc->sc_dev)));
7515 sc->sc_tbi_linkup = 0;
7516 }
7517 } else {
7518 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
7519 device_xname(sc->sc_dev)));
7520 sc->sc_tbi_linkup = 0;
7521 }
7522
7523 wm_tbi_set_linkled(sc);
7524
7525 return 0;
7526 }
7527
7528 /*
7529 * wm_tbi_set_linkled:
7530 *
7531 * Update the link LED on 1000BASE-X devices.
7532 */
7533 static void
7534 wm_tbi_set_linkled(struct wm_softc *sc)
7535 {
7536
7537 if (sc->sc_tbi_linkup)
7538 sc->sc_ctrl |= CTRL_SWDPIN(0);
7539 else
7540 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7541
7542 /* 82540 or newer devices are active low */
7543 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7544
7545 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7546 }
7547
7548 /*
7549 * wm_tbi_check_link:
7550 *
7551 * Check the link on 1000BASE-X devices.
7552 */
7553 static void
7554 wm_tbi_check_link(struct wm_softc *sc)
7555 {
7556 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7557 uint32_t status;
7558
7559 KASSERT(WM_TX_LOCKED(sc));
7560
7561 if (sc->sc_mediatype & WMP_F_SERDES) {
7562 sc->sc_tbi_linkup = 1;
7563 return;
7564 }
7565
7566 status = CSR_READ(sc, WMREG_STATUS);
7567
7568 /* XXX is this needed? */
7569 (void)CSR_READ(sc, WMREG_RXCW);
7570 (void)CSR_READ(sc, WMREG_CTRL);
7571
7572 /* set link status */
7573 if ((status & STATUS_LU) == 0) {
7574 DPRINTF(WM_DEBUG_LINK,
7575 ("%s: LINK: checklink -> down\n",
7576 device_xname(sc->sc_dev)));
7577 sc->sc_tbi_linkup = 0;
7578 } else if (sc->sc_tbi_linkup == 0) {
7579 DPRINTF(WM_DEBUG_LINK,
7580 ("%s: LINK: checklink -> up %s\n",
7581 device_xname(sc->sc_dev),
7582 (status & STATUS_FD) ? "FDX" : "HDX"));
7583 sc->sc_tbi_linkup = 1;
7584 }
7585
7586 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
7587 && ((status & STATUS_LU) == 0)) {
7588 sc->sc_tbi_linkup = 0;
7589 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7590 /* If the timer expired, retry autonegotiation */
7591 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
7592 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
7593 sc->sc_tbi_ticks = 0;
7594 /*
7595 * Reset the link, and let autonegotiation do
7596 * its thing
7597 */
7598 sc->sc_ctrl |= CTRL_LRST;
7599 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7600 CSR_WRITE_FLUSH(sc);
7601 delay(1000);
7602 sc->sc_ctrl &= ~CTRL_LRST;
7603 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7604 CSR_WRITE_FLUSH(sc);
7605 delay(1000);
7606 CSR_WRITE(sc, WMREG_TXCW,
7607 sc->sc_txcw & ~TXCW_ANE);
7608 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7609 }
7610 }
7611 }
7612
7613 wm_tbi_set_linkled(sc);
7614 }
7615
7616 /* SFP related */
7617
7618 static int
7619 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
7620 {
7621 uint32_t i2ccmd;
7622 int i;
7623
7624 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
7625 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7626
7627 /* Poll the ready bit */
7628 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7629 delay(50);
7630 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7631 if (i2ccmd & I2CCMD_READY)
7632 break;
7633 }
7634 if ((i2ccmd & I2CCMD_READY) == 0)
7635 return -1;
7636 if ((i2ccmd & I2CCMD_ERROR) != 0)
7637 return -1;
7638
7639 *data = i2ccmd & 0x00ff;
7640
7641 return 0;
7642 }
7643
7644 static uint32_t
7645 wm_sfp_get_media_type(struct wm_softc *sc)
7646 {
7647 uint32_t ctrl_ext;
7648 uint8_t val = 0;
7649 int timeout = 3;
7650 uint32_t mediatype = WMP_F_UNKNOWN;
7651 int rv = -1;
7652
7653 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7654 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
7655 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
7656 CSR_WRITE_FLUSH(sc);
7657
7658 /* Read SFP module data */
7659 while (timeout) {
7660 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
7661 if (rv == 0)
7662 break;
7663 delay(100*1000); /* XXX too big */
7664 timeout--;
7665 }
7666 if (rv != 0)
7667 goto out;
7668 switch (val) {
7669 case SFF_SFP_ID_SFF:
7670 aprint_normal_dev(sc->sc_dev,
7671 "Module/Connector soldered to board\n");
7672 break;
7673 case SFF_SFP_ID_SFP:
7674 aprint_normal_dev(sc->sc_dev, "SFP\n");
7675 break;
7676 case SFF_SFP_ID_UNKNOWN:
7677 goto out;
7678 default:
7679 break;
7680 }
7681
7682 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
7683 if (rv != 0) {
7684 goto out;
7685 }
7686
7687 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
7688 mediatype = WMP_F_SERDES;
7689 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
7690 sc->sc_flags |= WM_F_SGMII;
7691 mediatype = WMP_F_COPPER;
7692 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
7693 sc->sc_flags |= WM_F_SGMII;
7694 mediatype = WMP_F_SERDES;
7695 }
7696
7697 out:
7698 /* Restore I2C interface setting */
7699 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7700
7701 return mediatype;
7702 }
7703 /*
7704 * NVM related.
7705 * Microwire, SPI (w/wo EERD) and Flash.
7706 */
7707
7708 /* Both spi and uwire */
7709
7710 /*
7711 * wm_eeprom_sendbits:
7712 *
7713 * Send a series of bits to the EEPROM.
7714 */
7715 static void
7716 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
7717 {
7718 uint32_t reg;
7719 int x;
7720
7721 reg = CSR_READ(sc, WMREG_EECD);
7722
7723 for (x = nbits; x > 0; x--) {
7724 if (bits & (1U << (x - 1)))
7725 reg |= EECD_DI;
7726 else
7727 reg &= ~EECD_DI;
7728 CSR_WRITE(sc, WMREG_EECD, reg);
7729 CSR_WRITE_FLUSH(sc);
7730 delay(2);
7731 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7732 CSR_WRITE_FLUSH(sc);
7733 delay(2);
7734 CSR_WRITE(sc, WMREG_EECD, reg);
7735 CSR_WRITE_FLUSH(sc);
7736 delay(2);
7737 }
7738 }
7739
7740 /*
7741 * wm_eeprom_recvbits:
7742 *
7743 * Receive a series of bits from the EEPROM.
7744 */
7745 static void
7746 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
7747 {
7748 uint32_t reg, val;
7749 int x;
7750
7751 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
7752
7753 val = 0;
7754 for (x = nbits; x > 0; x--) {
7755 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7756 CSR_WRITE_FLUSH(sc);
7757 delay(2);
7758 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
7759 val |= (1U << (x - 1));
7760 CSR_WRITE(sc, WMREG_EECD, reg);
7761 CSR_WRITE_FLUSH(sc);
7762 delay(2);
7763 }
7764 *valp = val;
7765 }
7766
7767 /* Microwire */
7768
7769 /*
7770 * wm_nvm_read_uwire:
7771 *
7772 * Read a word from the EEPROM using the MicroWire protocol.
7773 */
7774 static int
7775 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
7776 {
7777 uint32_t reg, val;
7778 int i;
7779
7780 for (i = 0; i < wordcnt; i++) {
7781 /* Clear SK and DI. */
7782 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
7783 CSR_WRITE(sc, WMREG_EECD, reg);
7784
7785 /*
7786 * XXX: workaround for a bug in qemu-0.12.x and prior
7787 * and Xen.
7788 *
7789 * We use this workaround only for 82540 because qemu's
7790 * e1000 act as 82540.
7791 */
7792 if (sc->sc_type == WM_T_82540) {
7793 reg |= EECD_SK;
7794 CSR_WRITE(sc, WMREG_EECD, reg);
7795 reg &= ~EECD_SK;
7796 CSR_WRITE(sc, WMREG_EECD, reg);
7797 CSR_WRITE_FLUSH(sc);
7798 delay(2);
7799 }
7800 /* XXX: end of workaround */
7801
7802 /* Set CHIP SELECT. */
7803 reg |= EECD_CS;
7804 CSR_WRITE(sc, WMREG_EECD, reg);
7805 CSR_WRITE_FLUSH(sc);
7806 delay(2);
7807
7808 /* Shift in the READ command. */
7809 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
7810
7811 /* Shift in address. */
7812 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
7813
7814 /* Shift out the data. */
7815 wm_eeprom_recvbits(sc, &val, 16);
7816 data[i] = val & 0xffff;
7817
7818 /* Clear CHIP SELECT. */
7819 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
7820 CSR_WRITE(sc, WMREG_EECD, reg);
7821 CSR_WRITE_FLUSH(sc);
7822 delay(2);
7823 }
7824
7825 return 0;
7826 }
7827
7828 /* SPI */
7829
7830 /*
7831 * Set SPI and FLASH related information from the EECD register.
7832 * For 82541 and 82547, the word size is taken from EEPROM.
7833 */
7834 static int
7835 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
7836 {
7837 int size;
7838 uint32_t reg;
7839 uint16_t data;
7840
7841 reg = CSR_READ(sc, WMREG_EECD);
7842 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
7843
7844 /* Read the size of NVM from EECD by default */
7845 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
7846 switch (sc->sc_type) {
7847 case WM_T_82541:
7848 case WM_T_82541_2:
7849 case WM_T_82547:
7850 case WM_T_82547_2:
7851 /* Set dummy value to access EEPROM */
7852 sc->sc_nvm_wordsize = 64;
7853 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
7854 reg = data;
7855 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
7856 if (size == 0)
7857 size = 6; /* 64 word size */
7858 else
7859 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
7860 break;
7861 case WM_T_80003:
7862 case WM_T_82571:
7863 case WM_T_82572:
7864 case WM_T_82573: /* SPI case */
7865 case WM_T_82574: /* SPI case */
7866 case WM_T_82583: /* SPI case */
7867 size += NVM_WORD_SIZE_BASE_SHIFT;
7868 if (size > 14)
7869 size = 14;
7870 break;
7871 case WM_T_82575:
7872 case WM_T_82576:
7873 case WM_T_82580:
7874 case WM_T_I350:
7875 case WM_T_I354:
7876 case WM_T_I210:
7877 case WM_T_I211:
7878 size += NVM_WORD_SIZE_BASE_SHIFT;
7879 if (size > 15)
7880 size = 15;
7881 break;
7882 default:
7883 aprint_error_dev(sc->sc_dev,
7884 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
7885 return -1;
7886 break;
7887 }
7888
7889 sc->sc_nvm_wordsize = 1 << size;
7890
7891 return 0;
7892 }
7893
7894 /*
7895 * wm_nvm_ready_spi:
7896 *
7897 * Wait for a SPI EEPROM to be ready for commands.
7898 */
7899 static int
7900 wm_nvm_ready_spi(struct wm_softc *sc)
7901 {
7902 uint32_t val;
7903 int usec;
7904
7905 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
7906 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
7907 wm_eeprom_recvbits(sc, &val, 8);
7908 if ((val & SPI_SR_RDY) == 0)
7909 break;
7910 }
7911 if (usec >= SPI_MAX_RETRIES) {
7912 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
7913 return 1;
7914 }
7915 return 0;
7916 }
7917
7918 /*
7919 * wm_nvm_read_spi:
7920 *
7921 * Read a work from the EEPROM using the SPI protocol.
7922 */
7923 static int
7924 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
7925 {
7926 uint32_t reg, val;
7927 int i;
7928 uint8_t opc;
7929
7930 /* Clear SK and CS. */
7931 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
7932 CSR_WRITE(sc, WMREG_EECD, reg);
7933 CSR_WRITE_FLUSH(sc);
7934 delay(2);
7935
7936 if (wm_nvm_ready_spi(sc))
7937 return 1;
7938
7939 /* Toggle CS to flush commands. */
7940 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
7941 CSR_WRITE_FLUSH(sc);
7942 delay(2);
7943 CSR_WRITE(sc, WMREG_EECD, reg);
7944 CSR_WRITE_FLUSH(sc);
7945 delay(2);
7946
7947 opc = SPI_OPC_READ;
7948 if (sc->sc_nvm_addrbits == 8 && word >= 128)
7949 opc |= SPI_OPC_A8;
7950
7951 wm_eeprom_sendbits(sc, opc, 8);
7952 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
7953
7954 for (i = 0; i < wordcnt; i++) {
7955 wm_eeprom_recvbits(sc, &val, 16);
7956 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
7957 }
7958
7959 /* Raise CS and clear SK. */
7960 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
7961 CSR_WRITE(sc, WMREG_EECD, reg);
7962 CSR_WRITE_FLUSH(sc);
7963 delay(2);
7964
7965 return 0;
7966 }
7967
7968 /* Using with EERD */
7969
7970 static int
7971 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
7972 {
7973 uint32_t attempts = 100000;
7974 uint32_t i, reg = 0;
7975 int32_t done = -1;
7976
7977 for (i = 0; i < attempts; i++) {
7978 reg = CSR_READ(sc, rw);
7979
7980 if (reg & EERD_DONE) {
7981 done = 0;
7982 break;
7983 }
7984 delay(5);
7985 }
7986
7987 return done;
7988 }
7989
7990 static int
7991 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
7992 uint16_t *data)
7993 {
7994 int i, eerd = 0;
7995 int error = 0;
7996
7997 for (i = 0; i < wordcnt; i++) {
7998 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
7999
8000 CSR_WRITE(sc, WMREG_EERD, eerd);
8001 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
8002 if (error != 0)
8003 break;
8004
8005 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
8006 }
8007
8008 return error;
8009 }
8010
8011 /* Flash */
8012
8013 static int
8014 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
8015 {
8016 uint32_t eecd;
8017 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
8018 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
8019 uint8_t sig_byte = 0;
8020
8021 switch (sc->sc_type) {
8022 case WM_T_ICH8:
8023 case WM_T_ICH9:
8024 eecd = CSR_READ(sc, WMREG_EECD);
8025 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
8026 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
8027 return 0;
8028 }
8029 /* FALLTHROUGH */
8030 default:
8031 /* Default to 0 */
8032 *bank = 0;
8033
8034 /* Check bank 0 */
8035 wm_read_ich8_byte(sc, act_offset, &sig_byte);
8036 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
8037 *bank = 0;
8038 return 0;
8039 }
8040
8041 /* Check bank 1 */
8042 wm_read_ich8_byte(sc, act_offset + bank1_offset,
8043 &sig_byte);
8044 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
8045 *bank = 1;
8046 return 0;
8047 }
8048 }
8049
8050 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
8051 device_xname(sc->sc_dev)));
8052 return -1;
8053 }
8054
8055 /******************************************************************************
8056 * This function does initial flash setup so that a new read/write/erase cycle
8057 * can be started.
8058 *
8059 * sc - The pointer to the hw structure
8060 ****************************************************************************/
8061 static int32_t
8062 wm_ich8_cycle_init(struct wm_softc *sc)
8063 {
8064 uint16_t hsfsts;
8065 int32_t error = 1;
8066 int32_t i = 0;
8067
8068 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8069
8070 /* May be check the Flash Des Valid bit in Hw status */
8071 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
8072 return error;
8073 }
8074
8075 /* Clear FCERR in Hw status by writing 1 */
8076 /* Clear DAEL in Hw status by writing a 1 */
8077 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
8078
8079 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8080
8081 /*
8082 * Either we should have a hardware SPI cycle in progress bit to check
8083 * against, in order to start a new cycle or FDONE bit should be
8084 * changed in the hardware so that it is 1 after harware reset, which
8085 * can then be used as an indication whether a cycle is in progress or
8086 * has been completed .. we should also have some software semaphore
8087 * mechanism to guard FDONE or the cycle in progress bit so that two
8088 * threads access to those bits can be sequentiallized or a way so that
8089 * 2 threads dont start the cycle at the same time
8090 */
8091
8092 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8093 /*
8094 * There is no cycle running at present, so we can start a
8095 * cycle
8096 */
8097
8098 /* Begin by setting Flash Cycle Done. */
8099 hsfsts |= HSFSTS_DONE;
8100 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8101 error = 0;
8102 } else {
8103 /*
8104 * otherwise poll for sometime so the current cycle has a
8105 * chance to end before giving up.
8106 */
8107 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
8108 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8109 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8110 error = 0;
8111 break;
8112 }
8113 delay(1);
8114 }
8115 if (error == 0) {
8116 /*
8117 * Successful in waiting for previous cycle to timeout,
8118 * now set the Flash Cycle Done.
8119 */
8120 hsfsts |= HSFSTS_DONE;
8121 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8122 }
8123 }
8124 return error;
8125 }
8126
8127 /******************************************************************************
8128 * This function starts a flash cycle and waits for its completion
8129 *
8130 * sc - The pointer to the hw structure
8131 ****************************************************************************/
8132 static int32_t
8133 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
8134 {
8135 uint16_t hsflctl;
8136 uint16_t hsfsts;
8137 int32_t error = 1;
8138 uint32_t i = 0;
8139
8140 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
8141 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8142 hsflctl |= HSFCTL_GO;
8143 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8144
8145 /* Wait till FDONE bit is set to 1 */
8146 do {
8147 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8148 if (hsfsts & HSFSTS_DONE)
8149 break;
8150 delay(1);
8151 i++;
8152 } while (i < timeout);
8153 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
8154 error = 0;
8155
8156 return error;
8157 }
8158
8159 /******************************************************************************
8160 * Reads a byte or word from the NVM using the ICH8 flash access registers.
8161 *
8162 * sc - The pointer to the hw structure
8163 * index - The index of the byte or word to read.
8164 * size - Size of data to read, 1=byte 2=word
8165 * data - Pointer to the word to store the value read.
8166 *****************************************************************************/
8167 static int32_t
8168 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
8169 uint32_t size, uint16_t *data)
8170 {
8171 uint16_t hsfsts;
8172 uint16_t hsflctl;
8173 uint32_t flash_linear_address;
8174 uint32_t flash_data = 0;
8175 int32_t error = 1;
8176 int32_t count = 0;
8177
8178 if (size < 1 || size > 2 || data == 0x0 ||
8179 index > ICH_FLASH_LINEAR_ADDR_MASK)
8180 return error;
8181
8182 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
8183 sc->sc_ich8_flash_base;
8184
8185 do {
8186 delay(1);
8187 /* Steps */
8188 error = wm_ich8_cycle_init(sc);
8189 if (error)
8190 break;
8191
8192 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8193 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
8194 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
8195 & HSFCTL_BCOUNT_MASK;
8196 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
8197 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8198
8199 /*
8200 * Write the last 24 bits of index into Flash Linear address
8201 * field in Flash Address
8202 */
8203 /* TODO: TBD maybe check the index against the size of flash */
8204
8205 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
8206
8207 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
8208
8209 /*
8210 * Check if FCERR is set to 1, if set to 1, clear it and try
8211 * the whole sequence a few more times, else read in (shift in)
8212 * the Flash Data0, the order is least significant byte first
8213 * msb to lsb
8214 */
8215 if (error == 0) {
8216 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
8217 if (size == 1)
8218 *data = (uint8_t)(flash_data & 0x000000FF);
8219 else if (size == 2)
8220 *data = (uint16_t)(flash_data & 0x0000FFFF);
8221 break;
8222 } else {
8223 /*
8224 * If we've gotten here, then things are probably
8225 * completely hosed, but if the error condition is
8226 * detected, it won't hurt to give it another try...
8227 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
8228 */
8229 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8230 if (hsfsts & HSFSTS_ERR) {
8231 /* Repeat for some time before giving up. */
8232 continue;
8233 } else if ((hsfsts & HSFSTS_DONE) == 0)
8234 break;
8235 }
8236 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
8237
8238 return error;
8239 }
8240
8241 /******************************************************************************
8242 * Reads a single byte from the NVM using the ICH8 flash access registers.
8243 *
8244 * sc - pointer to wm_hw structure
8245 * index - The index of the byte to read.
8246 * data - Pointer to a byte to store the value read.
8247 *****************************************************************************/
8248 static int32_t
8249 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
8250 {
8251 int32_t status;
8252 uint16_t word = 0;
8253
8254 status = wm_read_ich8_data(sc, index, 1, &word);
8255 if (status == 0)
8256 *data = (uint8_t)word;
8257 else
8258 *data = 0;
8259
8260 return status;
8261 }
8262
8263 /******************************************************************************
8264 * Reads a word from the NVM using the ICH8 flash access registers.
8265 *
8266 * sc - pointer to wm_hw structure
8267 * index - The starting byte index of the word to read.
8268 * data - Pointer to a word to store the value read.
8269 *****************************************************************************/
8270 static int32_t
8271 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
8272 {
8273 int32_t status;
8274
8275 status = wm_read_ich8_data(sc, index, 2, data);
8276 return status;
8277 }
8278
8279 /******************************************************************************
8280 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
8281 * register.
8282 *
8283 * sc - Struct containing variables accessed by shared code
8284 * offset - offset of word in the EEPROM to read
8285 * data - word read from the EEPROM
8286 * words - number of words to read
8287 *****************************************************************************/
8288 static int
8289 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
8290 {
8291 int32_t error = 0;
8292 uint32_t flash_bank = 0;
8293 uint32_t act_offset = 0;
8294 uint32_t bank_offset = 0;
8295 uint16_t word = 0;
8296 uint16_t i = 0;
8297
8298 /*
8299 * We need to know which is the valid flash bank. In the event
8300 * that we didn't allocate eeprom_shadow_ram, we may not be
8301 * managing flash_bank. So it cannot be trusted and needs
8302 * to be updated with each read.
8303 */
8304 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
8305 if (error) {
8306 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
8307 device_xname(sc->sc_dev)));
8308 flash_bank = 0;
8309 }
8310
8311 /*
8312 * Adjust offset appropriately if we're on bank 1 - adjust for word
8313 * size
8314 */
8315 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
8316
8317 error = wm_get_swfwhw_semaphore(sc);
8318 if (error) {
8319 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8320 __func__);
8321 return error;
8322 }
8323
8324 for (i = 0; i < words; i++) {
8325 /* The NVM part needs a byte offset, hence * 2 */
8326 act_offset = bank_offset + ((offset + i) * 2);
8327 error = wm_read_ich8_word(sc, act_offset, &word);
8328 if (error) {
8329 aprint_error_dev(sc->sc_dev,
8330 "%s: failed to read NVM\n", __func__);
8331 break;
8332 }
8333 data[i] = word;
8334 }
8335
8336 wm_put_swfwhw_semaphore(sc);
8337 return error;
8338 }
8339
8340 /* Lock, detecting NVM type, validate checksum and read */
8341
8342 /*
8343 * wm_nvm_acquire:
8344 *
8345 * Perform the EEPROM handshake required on some chips.
8346 */
8347 static int
8348 wm_nvm_acquire(struct wm_softc *sc)
8349 {
8350 uint32_t reg;
8351 int x;
8352 int ret = 0;
8353
8354 /* always success */
8355 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8356 return 0;
8357
8358 if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8359 ret = wm_get_swfwhw_semaphore(sc);
8360 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
8361 /* This will also do wm_get_swsm_semaphore() if needed */
8362 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
8363 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
8364 ret = wm_get_swsm_semaphore(sc);
8365 }
8366
8367 if (ret) {
8368 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8369 __func__);
8370 return 1;
8371 }
8372
8373 if (sc->sc_flags & WM_F_LOCK_EECD) {
8374 reg = CSR_READ(sc, WMREG_EECD);
8375
8376 /* Request EEPROM access. */
8377 reg |= EECD_EE_REQ;
8378 CSR_WRITE(sc, WMREG_EECD, reg);
8379
8380 /* ..and wait for it to be granted. */
8381 for (x = 0; x < 1000; x++) {
8382 reg = CSR_READ(sc, WMREG_EECD);
8383 if (reg & EECD_EE_GNT)
8384 break;
8385 delay(5);
8386 }
8387 if ((reg & EECD_EE_GNT) == 0) {
8388 aprint_error_dev(sc->sc_dev,
8389 "could not acquire EEPROM GNT\n");
8390 reg &= ~EECD_EE_REQ;
8391 CSR_WRITE(sc, WMREG_EECD, reg);
8392 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8393 wm_put_swfwhw_semaphore(sc);
8394 if (sc->sc_flags & WM_F_LOCK_SWFW)
8395 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8396 else if (sc->sc_flags & WM_F_LOCK_SWSM)
8397 wm_put_swsm_semaphore(sc);
8398 return 1;
8399 }
8400 }
8401
8402 return 0;
8403 }
8404
8405 /*
8406 * wm_nvm_release:
8407 *
8408 * Release the EEPROM mutex.
8409 */
8410 static void
8411 wm_nvm_release(struct wm_softc *sc)
8412 {
8413 uint32_t reg;
8414
8415 /* always success */
8416 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8417 return;
8418
8419 if (sc->sc_flags & WM_F_LOCK_EECD) {
8420 reg = CSR_READ(sc, WMREG_EECD);
8421 reg &= ~EECD_EE_REQ;
8422 CSR_WRITE(sc, WMREG_EECD, reg);
8423 }
8424
8425 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8426 wm_put_swfwhw_semaphore(sc);
8427 if (sc->sc_flags & WM_F_LOCK_SWFW)
8428 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8429 else if (sc->sc_flags & WM_F_LOCK_SWSM)
8430 wm_put_swsm_semaphore(sc);
8431 }
8432
8433 static int
8434 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
8435 {
8436 uint32_t eecd = 0;
8437
8438 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
8439 || sc->sc_type == WM_T_82583) {
8440 eecd = CSR_READ(sc, WMREG_EECD);
8441
8442 /* Isolate bits 15 & 16 */
8443 eecd = ((eecd >> 15) & 0x03);
8444
8445 /* If both bits are set, device is Flash type */
8446 if (eecd == 0x03)
8447 return 0;
8448 }
8449 return 1;
8450 }
8451
8452 /*
8453 * wm_nvm_validate_checksum
8454 *
8455 * The checksum is defined as the sum of the first 64 (16 bit) words.
8456 */
8457 static int
8458 wm_nvm_validate_checksum(struct wm_softc *sc)
8459 {
8460 uint16_t checksum;
8461 uint16_t eeprom_data;
8462 #ifdef WM_DEBUG
8463 uint16_t csum_wordaddr, valid_checksum;
8464 #endif
8465 int i;
8466
8467 checksum = 0;
8468
8469 /* Don't check for I211 */
8470 if (sc->sc_type == WM_T_I211)
8471 return 0;
8472
8473 #ifdef WM_DEBUG
8474 if (sc->sc_type == WM_T_PCH_LPT) {
8475 csum_wordaddr = NVM_OFF_COMPAT;
8476 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
8477 } else {
8478 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
8479 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
8480 }
8481
8482 /* Dump EEPROM image for debug */
8483 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8484 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8485 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
8486 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
8487 if ((eeprom_data & valid_checksum) == 0) {
8488 DPRINTF(WM_DEBUG_NVM,
8489 ("%s: NVM need to be updated (%04x != %04x)\n",
8490 device_xname(sc->sc_dev), eeprom_data,
8491 valid_checksum));
8492 }
8493 }
8494
8495 if ((wm_debug & WM_DEBUG_NVM) != 0) {
8496 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
8497 for (i = 0; i < NVM_SIZE; i++) {
8498 if (wm_nvm_read(sc, i, 1, &eeprom_data))
8499 printf("XXXX ");
8500 else
8501 printf("%04hx ", eeprom_data);
8502 if (i % 8 == 7)
8503 printf("\n");
8504 }
8505 }
8506
8507 #endif /* WM_DEBUG */
8508
8509 for (i = 0; i < NVM_SIZE; i++) {
8510 if (wm_nvm_read(sc, i, 1, &eeprom_data))
8511 return 1;
8512 checksum += eeprom_data;
8513 }
8514
8515 if (checksum != (uint16_t) NVM_CHECKSUM) {
8516 #ifdef WM_DEBUG
8517 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
8518 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
8519 #endif
8520 }
8521
8522 return 0;
8523 }
8524
8525 /*
8526 * wm_nvm_read:
8527 *
8528 * Read data from the serial EEPROM.
8529 */
8530 static int
8531 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8532 {
8533 int rv;
8534
8535 if (sc->sc_flags & WM_F_EEPROM_INVALID)
8536 return 1;
8537
8538 if (wm_nvm_acquire(sc))
8539 return 1;
8540
8541 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8542 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8543 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
8544 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
8545 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
8546 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
8547 else if (sc->sc_flags & WM_F_EEPROM_SPI)
8548 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
8549 else
8550 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
8551
8552 wm_nvm_release(sc);
8553 return rv;
8554 }
8555
8556 /*
8557 * Hardware semaphores.
8558 * Very complexed...
8559 */
8560
8561 static int
8562 wm_get_swsm_semaphore(struct wm_softc *sc)
8563 {
8564 int32_t timeout;
8565 uint32_t swsm;
8566
8567 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8568 /* Get the SW semaphore. */
8569 timeout = sc->sc_nvm_wordsize + 1;
8570 while (timeout) {
8571 swsm = CSR_READ(sc, WMREG_SWSM);
8572
8573 if ((swsm & SWSM_SMBI) == 0)
8574 break;
8575
8576 delay(50);
8577 timeout--;
8578 }
8579
8580 if (timeout == 0) {
8581 aprint_error_dev(sc->sc_dev,
8582 "could not acquire SWSM SMBI\n");
8583 return 1;
8584 }
8585 }
8586
8587 /* Get the FW semaphore. */
8588 timeout = sc->sc_nvm_wordsize + 1;
8589 while (timeout) {
8590 swsm = CSR_READ(sc, WMREG_SWSM);
8591 swsm |= SWSM_SWESMBI;
8592 CSR_WRITE(sc, WMREG_SWSM, swsm);
8593 /* If we managed to set the bit we got the semaphore. */
8594 swsm = CSR_READ(sc, WMREG_SWSM);
8595 if (swsm & SWSM_SWESMBI)
8596 break;
8597
8598 delay(50);
8599 timeout--;
8600 }
8601
8602 if (timeout == 0) {
8603 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
8604 /* Release semaphores */
8605 wm_put_swsm_semaphore(sc);
8606 return 1;
8607 }
8608 return 0;
8609 }
8610
8611 static void
8612 wm_put_swsm_semaphore(struct wm_softc *sc)
8613 {
8614 uint32_t swsm;
8615
8616 swsm = CSR_READ(sc, WMREG_SWSM);
8617 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
8618 CSR_WRITE(sc, WMREG_SWSM, swsm);
8619 }
8620
8621 static int
8622 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8623 {
8624 uint32_t swfw_sync;
8625 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
8626 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
8627 int timeout = 200;
8628
8629 for (timeout = 0; timeout < 200; timeout++) {
8630 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8631 if (wm_get_swsm_semaphore(sc)) {
8632 aprint_error_dev(sc->sc_dev,
8633 "%s: failed to get semaphore\n",
8634 __func__);
8635 return 1;
8636 }
8637 }
8638 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8639 if ((swfw_sync & (swmask | fwmask)) == 0) {
8640 swfw_sync |= swmask;
8641 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8642 if (sc->sc_flags & WM_F_LOCK_SWSM)
8643 wm_put_swsm_semaphore(sc);
8644 return 0;
8645 }
8646 if (sc->sc_flags & WM_F_LOCK_SWSM)
8647 wm_put_swsm_semaphore(sc);
8648 delay(5000);
8649 }
8650 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
8651 device_xname(sc->sc_dev), mask, swfw_sync);
8652 return 1;
8653 }
8654
8655 static void
8656 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8657 {
8658 uint32_t swfw_sync;
8659
8660 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8661 while (wm_get_swsm_semaphore(sc) != 0)
8662 continue;
8663 }
8664 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8665 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
8666 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8667 if (sc->sc_flags & WM_F_LOCK_SWSM)
8668 wm_put_swsm_semaphore(sc);
8669 }
8670
8671 static int
8672 wm_get_swfwhw_semaphore(struct wm_softc *sc)
8673 {
8674 uint32_t ext_ctrl;
8675 int timeout = 200;
8676
8677 for (timeout = 0; timeout < 200; timeout++) {
8678 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8679 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
8680 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8681
8682 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8683 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
8684 return 0;
8685 delay(5000);
8686 }
8687 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
8688 device_xname(sc->sc_dev), ext_ctrl);
8689 return 1;
8690 }
8691
8692 static void
8693 wm_put_swfwhw_semaphore(struct wm_softc *sc)
8694 {
8695 uint32_t ext_ctrl;
8696 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8697 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
8698 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8699 }
8700
8701 static int
8702 wm_get_hw_semaphore_82573(struct wm_softc *sc)
8703 {
8704 int i = 0;
8705 uint32_t reg;
8706
8707 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8708 do {
8709 CSR_WRITE(sc, WMREG_EXTCNFCTR,
8710 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
8711 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8712 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
8713 break;
8714 delay(2*1000);
8715 i++;
8716 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
8717
8718 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
8719 wm_put_hw_semaphore_82573(sc);
8720 log(LOG_ERR, "%s: Driver can't access the PHY\n",
8721 device_xname(sc->sc_dev));
8722 return -1;
8723 }
8724
8725 return 0;
8726 }
8727
8728 static void
8729 wm_put_hw_semaphore_82573(struct wm_softc *sc)
8730 {
8731 uint32_t reg;
8732
8733 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8734 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
8735 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
8736 }
8737
8738 /*
8739 * Management mode and power management related subroutines.
8740 * BMC, AMT, suspend/resume and EEE.
8741 */
8742
8743 static int
8744 wm_check_mng_mode(struct wm_softc *sc)
8745 {
8746 int rv;
8747
8748 switch (sc->sc_type) {
8749 case WM_T_ICH8:
8750 case WM_T_ICH9:
8751 case WM_T_ICH10:
8752 case WM_T_PCH:
8753 case WM_T_PCH2:
8754 case WM_T_PCH_LPT:
8755 rv = wm_check_mng_mode_ich8lan(sc);
8756 break;
8757 case WM_T_82574:
8758 case WM_T_82583:
8759 rv = wm_check_mng_mode_82574(sc);
8760 break;
8761 case WM_T_82571:
8762 case WM_T_82572:
8763 case WM_T_82573:
8764 case WM_T_80003:
8765 rv = wm_check_mng_mode_generic(sc);
8766 break;
8767 default:
8768 /* noting to do */
8769 rv = 0;
8770 break;
8771 }
8772
8773 return rv;
8774 }
8775
8776 static int
8777 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
8778 {
8779 uint32_t fwsm;
8780
8781 fwsm = CSR_READ(sc, WMREG_FWSM);
8782
8783 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
8784 return 1;
8785
8786 return 0;
8787 }
8788
8789 static int
8790 wm_check_mng_mode_82574(struct wm_softc *sc)
8791 {
8792 uint16_t data;
8793
8794 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
8795
8796 if ((data & NVM_CFG2_MNGM_MASK) != 0)
8797 return 1;
8798
8799 return 0;
8800 }
8801
8802 static int
8803 wm_check_mng_mode_generic(struct wm_softc *sc)
8804 {
8805 uint32_t fwsm;
8806
8807 fwsm = CSR_READ(sc, WMREG_FWSM);
8808
8809 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
8810 return 1;
8811
8812 return 0;
8813 }
8814
8815 static int
8816 wm_enable_mng_pass_thru(struct wm_softc *sc)
8817 {
8818 uint32_t manc, fwsm, factps;
8819
8820 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
8821 return 0;
8822
8823 manc = CSR_READ(sc, WMREG_MANC);
8824
8825 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
8826 device_xname(sc->sc_dev), manc));
8827 if ((manc & MANC_RECV_TCO_EN) == 0)
8828 return 0;
8829
8830 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
8831 fwsm = CSR_READ(sc, WMREG_FWSM);
8832 factps = CSR_READ(sc, WMREG_FACTPS);
8833 if (((factps & FACTPS_MNGCG) == 0)
8834 && ((fwsm & FWSM_MODE_MASK)
8835 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
8836 return 1;
8837 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
8838 uint16_t data;
8839
8840 factps = CSR_READ(sc, WMREG_FACTPS);
8841 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
8842 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
8843 device_xname(sc->sc_dev), factps, data));
8844 if (((factps & FACTPS_MNGCG) == 0)
8845 && ((data & NVM_CFG2_MNGM_MASK)
8846 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
8847 return 1;
8848 } else if (((manc & MANC_SMBUS_EN) != 0)
8849 && ((manc & MANC_ASF_EN) == 0))
8850 return 1;
8851
8852 return 0;
8853 }
8854
8855 static int
8856 wm_check_reset_block(struct wm_softc *sc)
8857 {
8858 uint32_t reg;
8859
8860 switch (sc->sc_type) {
8861 case WM_T_ICH8:
8862 case WM_T_ICH9:
8863 case WM_T_ICH10:
8864 case WM_T_PCH:
8865 case WM_T_PCH2:
8866 case WM_T_PCH_LPT:
8867 reg = CSR_READ(sc, WMREG_FWSM);
8868 if ((reg & FWSM_RSPCIPHY) != 0)
8869 return 0;
8870 else
8871 return -1;
8872 break;
8873 case WM_T_82571:
8874 case WM_T_82572:
8875 case WM_T_82573:
8876 case WM_T_82574:
8877 case WM_T_82583:
8878 case WM_T_80003:
8879 reg = CSR_READ(sc, WMREG_MANC);
8880 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
8881 return -1;
8882 else
8883 return 0;
8884 break;
8885 default:
8886 /* no problem */
8887 break;
8888 }
8889
8890 return 0;
8891 }
8892
8893 static void
8894 wm_get_hw_control(struct wm_softc *sc)
8895 {
8896 uint32_t reg;
8897
8898 switch (sc->sc_type) {
8899 case WM_T_82573:
8900 reg = CSR_READ(sc, WMREG_SWSM);
8901 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
8902 break;
8903 case WM_T_82571:
8904 case WM_T_82572:
8905 case WM_T_82574:
8906 case WM_T_82583:
8907 case WM_T_80003:
8908 case WM_T_ICH8:
8909 case WM_T_ICH9:
8910 case WM_T_ICH10:
8911 case WM_T_PCH:
8912 case WM_T_PCH2:
8913 case WM_T_PCH_LPT:
8914 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8915 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
8916 break;
8917 default:
8918 break;
8919 }
8920 }
8921
8922 static void
8923 wm_release_hw_control(struct wm_softc *sc)
8924 {
8925 uint32_t reg;
8926
8927 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
8928 return;
8929
8930 if (sc->sc_type == WM_T_82573) {
8931 reg = CSR_READ(sc, WMREG_SWSM);
8932 reg &= ~SWSM_DRV_LOAD;
8933 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
8934 } else {
8935 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8936 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
8937 }
8938 }
8939
8940 static void
8941 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
8942 {
8943 uint32_t reg;
8944
8945 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8946
8947 if (on != 0)
8948 reg |= EXTCNFCTR_GATE_PHY_CFG;
8949 else
8950 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
8951
8952 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
8953 }
8954
8955 static void
8956 wm_smbustopci(struct wm_softc *sc)
8957 {
8958 uint32_t fwsm;
8959
8960 fwsm = CSR_READ(sc, WMREG_FWSM);
8961 if (((fwsm & FWSM_FW_VALID) == 0)
8962 && ((wm_check_reset_block(sc) == 0))) {
8963 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
8964 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
8965 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8966 CSR_WRITE_FLUSH(sc);
8967 delay(10);
8968 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
8969 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8970 CSR_WRITE_FLUSH(sc);
8971 delay(50*1000);
8972
8973 /*
8974 * Gate automatic PHY configuration by hardware on non-managed
8975 * 82579
8976 */
8977 if (sc->sc_type == WM_T_PCH2)
8978 wm_gate_hw_phy_config_ich8lan(sc, 1);
8979 }
8980 }
8981
8982 static void
8983 wm_init_manageability(struct wm_softc *sc)
8984 {
8985
8986 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8987 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
8988 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8989
8990 /* Disable hardware interception of ARP */
8991 manc &= ~MANC_ARP_EN;
8992
8993 /* Enable receiving management packets to the host */
8994 if (sc->sc_type >= WM_T_82571) {
8995 manc |= MANC_EN_MNG2HOST;
8996 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
8997 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
8998
8999 }
9000
9001 CSR_WRITE(sc, WMREG_MANC, manc);
9002 }
9003 }
9004
9005 static void
9006 wm_release_manageability(struct wm_softc *sc)
9007 {
9008
9009 if (sc->sc_flags & WM_F_HAS_MANAGE) {
9010 uint32_t manc = CSR_READ(sc, WMREG_MANC);
9011
9012 manc |= MANC_ARP_EN;
9013 if (sc->sc_type >= WM_T_82571)
9014 manc &= ~MANC_EN_MNG2HOST;
9015
9016 CSR_WRITE(sc, WMREG_MANC, manc);
9017 }
9018 }
9019
9020 static void
9021 wm_get_wakeup(struct wm_softc *sc)
9022 {
9023
9024 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
9025 switch (sc->sc_type) {
9026 case WM_T_82573:
9027 case WM_T_82583:
9028 sc->sc_flags |= WM_F_HAS_AMT;
9029 /* FALLTHROUGH */
9030 case WM_T_80003:
9031 case WM_T_82541:
9032 case WM_T_82547:
9033 case WM_T_82571:
9034 case WM_T_82572:
9035 case WM_T_82574:
9036 case WM_T_82575:
9037 case WM_T_82576:
9038 case WM_T_82580:
9039 case WM_T_I350:
9040 case WM_T_I354:
9041 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
9042 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
9043 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
9044 break;
9045 case WM_T_ICH8:
9046 case WM_T_ICH9:
9047 case WM_T_ICH10:
9048 case WM_T_PCH:
9049 case WM_T_PCH2:
9050 case WM_T_PCH_LPT:
9051 sc->sc_flags |= WM_F_HAS_AMT;
9052 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
9053 break;
9054 default:
9055 break;
9056 }
9057
9058 /* 1: HAS_MANAGE */
9059 if (wm_enable_mng_pass_thru(sc) != 0)
9060 sc->sc_flags |= WM_F_HAS_MANAGE;
9061
9062 #ifdef WM_DEBUG
9063 printf("\n");
9064 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
9065 printf("HAS_AMT,");
9066 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
9067 printf("ARC_SUBSYS_VALID,");
9068 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
9069 printf("ASF_FIRMWARE_PRES,");
9070 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
9071 printf("HAS_MANAGE,");
9072 printf("\n");
9073 #endif
9074 /*
9075 * Note that the WOL flags is set after the resetting of the eeprom
9076 * stuff
9077 */
9078 }
9079
9080 #ifdef WM_WOL
9081 /* WOL in the newer chipset interfaces (pchlan) */
9082 static void
9083 wm_enable_phy_wakeup(struct wm_softc *sc)
9084 {
9085 #if 0
9086 uint16_t preg;
9087
9088 /* Copy MAC RARs to PHY RARs */
9089
9090 /* Copy MAC MTA to PHY MTA */
9091
9092 /* Configure PHY Rx Control register */
9093
9094 /* Enable PHY wakeup in MAC register */
9095
9096 /* Configure and enable PHY wakeup in PHY registers */
9097
9098 /* Activate PHY wakeup */
9099
9100 /* XXX */
9101 #endif
9102 }
9103
9104 /* Power down workaround on D3 */
9105 static void
9106 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
9107 {
9108 uint32_t reg;
9109 int i;
9110
9111 for (i = 0; i < 2; i++) {
9112 /* Disable link */
9113 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9114 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9115 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9116
9117 /*
9118 * Call gig speed drop workaround on Gig disable before
9119 * accessing any PHY registers
9120 */
9121 if (sc->sc_type == WM_T_ICH8)
9122 wm_gig_downshift_workaround_ich8lan(sc);
9123
9124 /* Write VR power-down enable */
9125 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
9126 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
9127 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
9128 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
9129
9130 /* Read it back and test */
9131 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
9132 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
9133 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
9134 break;
9135
9136 /* Issue PHY reset and repeat at most one more time */
9137 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
9138 }
9139 }
9140
9141 static void
9142 wm_enable_wakeup(struct wm_softc *sc)
9143 {
9144 uint32_t reg, pmreg;
9145 pcireg_t pmode;
9146
9147 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
9148 &pmreg, NULL) == 0)
9149 return;
9150
9151 /* Advertise the wakeup capability */
9152 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
9153 | CTRL_SWDPIN(3));
9154 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
9155
9156 /* ICH workaround */
9157 switch (sc->sc_type) {
9158 case WM_T_ICH8:
9159 case WM_T_ICH9:
9160 case WM_T_ICH10:
9161 case WM_T_PCH:
9162 case WM_T_PCH2:
9163 case WM_T_PCH_LPT:
9164 /* Disable gig during WOL */
9165 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9166 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
9167 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9168 if (sc->sc_type == WM_T_PCH)
9169 wm_gmii_reset(sc);
9170
9171 /* Power down workaround */
9172 if (sc->sc_phytype == WMPHY_82577) {
9173 struct mii_softc *child;
9174
9175 /* Assume that the PHY is copper */
9176 child = LIST_FIRST(&sc->sc_mii.mii_phys);
9177 if (child->mii_mpd_rev <= 2)
9178 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
9179 (768 << 5) | 25, 0x0444); /* magic num */
9180 }
9181 break;
9182 default:
9183 break;
9184 }
9185
9186 /* Keep the laser running on fiber adapters */
9187 if (((sc->sc_mediatype & WMP_F_FIBER) != 0)
9188 || (sc->sc_mediatype & WMP_F_SERDES) != 0) {
9189 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9190 reg |= CTRL_EXT_SWDPIN(3);
9191 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9192 }
9193
9194 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
9195 #if 0 /* for the multicast packet */
9196 reg |= WUFC_MC;
9197 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
9198 #endif
9199
9200 if (sc->sc_type == WM_T_PCH) {
9201 wm_enable_phy_wakeup(sc);
9202 } else {
9203 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
9204 CSR_WRITE(sc, WMREG_WUFC, reg);
9205 }
9206
9207 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9208 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9209 || (sc->sc_type == WM_T_PCH2))
9210 && (sc->sc_phytype == WMPHY_IGP_3))
9211 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
9212
9213 /* Request PME */
9214 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
9215 #if 0
9216 /* Disable WOL */
9217 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
9218 #else
9219 /* For WOL */
9220 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
9221 #endif
9222 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
9223 }
9224 #endif /* WM_WOL */
9225
9226 /* EEE */
9227
9228 static void
9229 wm_set_eee_i350(struct wm_softc *sc)
9230 {
9231 uint32_t ipcnfg, eeer;
9232
9233 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
9234 eeer = CSR_READ(sc, WMREG_EEER);
9235
9236 if ((sc->sc_flags & WM_F_EEE) != 0) {
9237 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9238 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
9239 | EEER_LPI_FC);
9240 } else {
9241 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9242 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
9243 | EEER_LPI_FC);
9244 }
9245
9246 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
9247 CSR_WRITE(sc, WMREG_EEER, eeer);
9248 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
9249 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
9250 }
9251
9252 /*
9253 * Workarounds (mainly PHY related).
9254 * Basically, PHY's workarounds are in the PHY drivers.
9255 */
9256
9257 /* Work-around for 82566 Kumeran PCS lock loss */
9258 static void
9259 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
9260 {
9261 int miistatus, active, i;
9262 int reg;
9263
9264 miistatus = sc->sc_mii.mii_media_status;
9265
9266 /* If the link is not up, do nothing */
9267 if ((miistatus & IFM_ACTIVE) != 0)
9268 return;
9269
9270 active = sc->sc_mii.mii_media_active;
9271
9272 /* Nothing to do if the link is other than 1Gbps */
9273 if (IFM_SUBTYPE(active) != IFM_1000_T)
9274 return;
9275
9276 for (i = 0; i < 10; i++) {
9277 /* read twice */
9278 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9279 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9280 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
9281 goto out; /* GOOD! */
9282
9283 /* Reset the PHY */
9284 wm_gmii_reset(sc);
9285 delay(5*1000);
9286 }
9287
9288 /* Disable GigE link negotiation */
9289 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9290 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9291 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9292
9293 /*
9294 * Call gig speed drop workaround on Gig disable before accessing
9295 * any PHY registers.
9296 */
9297 wm_gig_downshift_workaround_ich8lan(sc);
9298
9299 out:
9300 return;
9301 }
9302
9303 /* WOL from S5 stops working */
9304 static void
9305 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
9306 {
9307 uint16_t kmrn_reg;
9308
9309 /* Only for igp3 */
9310 if (sc->sc_phytype == WMPHY_IGP_3) {
9311 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
9312 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
9313 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9314 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
9315 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9316 }
9317 }
9318
9319 /*
9320 * Workaround for pch's PHYs
9321 * XXX should be moved to new PHY driver?
9322 */
9323 static void
9324 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
9325 {
9326 if (sc->sc_phytype == WMPHY_82577)
9327 wm_set_mdio_slow_mode_hv(sc);
9328
9329 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
9330
9331 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
9332
9333 /* 82578 */
9334 if (sc->sc_phytype == WMPHY_82578) {
9335 /* PCH rev. < 3 */
9336 if (sc->sc_rev < 3) {
9337 /* XXX 6 bit shift? Why? Is it page2? */
9338 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
9339 0x66c0);
9340 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
9341 0xffff);
9342 }
9343
9344 /* XXX phy rev. < 2 */
9345 }
9346
9347 /* Select page 0 */
9348
9349 /* XXX acquire semaphore */
9350 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
9351 /* XXX release semaphore */
9352
9353 /*
9354 * Configure the K1 Si workaround during phy reset assuming there is
9355 * link so that it disables K1 if link is in 1Gbps.
9356 */
9357 wm_k1_gig_workaround_hv(sc, 1);
9358 }
9359
9360 static void
9361 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
9362 {
9363
9364 wm_set_mdio_slow_mode_hv(sc);
9365 }
9366
9367 static void
9368 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
9369 {
9370 int k1_enable = sc->sc_nvm_k1_enabled;
9371
9372 /* XXX acquire semaphore */
9373
9374 if (link) {
9375 k1_enable = 0;
9376
9377 /* Link stall fix for link up */
9378 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
9379 } else {
9380 /* Link stall fix for link down */
9381 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
9382 }
9383
9384 wm_configure_k1_ich8lan(sc, k1_enable);
9385
9386 /* XXX release semaphore */
9387 }
9388
9389 static void
9390 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
9391 {
9392 uint32_t reg;
9393
9394 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
9395 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
9396 reg | HV_KMRN_MDIO_SLOW);
9397 }
9398
9399 static void
9400 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
9401 {
9402 uint32_t ctrl, ctrl_ext, tmp;
9403 uint16_t kmrn_reg;
9404
9405 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
9406
9407 if (k1_enable)
9408 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
9409 else
9410 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
9411
9412 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
9413
9414 delay(20);
9415
9416 ctrl = CSR_READ(sc, WMREG_CTRL);
9417 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9418
9419 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
9420 tmp |= CTRL_FRCSPD;
9421
9422 CSR_WRITE(sc, WMREG_CTRL, tmp);
9423 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
9424 CSR_WRITE_FLUSH(sc);
9425 delay(20);
9426
9427 CSR_WRITE(sc, WMREG_CTRL, ctrl);
9428 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9429 CSR_WRITE_FLUSH(sc);
9430 delay(20);
9431 }
9432
9433 /* special case - for 82575 - need to do manual init ... */
9434 static void
9435 wm_reset_init_script_82575(struct wm_softc *sc)
9436 {
9437 /*
9438 * remark: this is untested code - we have no board without EEPROM
9439 * same setup as mentioned int the freeBSD driver for the i82575
9440 */
9441
9442 /* SerDes configuration via SERDESCTRL */
9443 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
9444 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
9445 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
9446 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
9447
9448 /* CCM configuration via CCMCTL register */
9449 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
9450 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
9451
9452 /* PCIe lanes configuration */
9453 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
9454 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
9455 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
9456 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
9457
9458 /* PCIe PLL Configuration */
9459 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
9460 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
9461 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
9462 }
9463