if_wm.c revision 1.304 1 /* $NetBSD: if_wm.c,v 1.304 2014/10/10 11:04:21 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - EEE (Energy Efficiency Ethernet)
77 * - MSI/MSI-X
78 * - Virtual Function
79 * - Set LED correctly (based on contents in EEPROM)
80 * - Rework how parameters are loaded from the EEPROM.
81 */
82
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.304 2014/10/10 11:04:21 msaitoh Exp $");
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/callout.h>
89 #include <sys/mbuf.h>
90 #include <sys/malloc.h>
91 #include <sys/kernel.h>
92 #include <sys/socket.h>
93 #include <sys/ioctl.h>
94 #include <sys/errno.h>
95 #include <sys/device.h>
96 #include <sys/queue.h>
97 #include <sys/syslog.h>
98
99 #include <sys/rnd.h>
100
101 #include <net/if.h>
102 #include <net/if_dl.h>
103 #include <net/if_media.h>
104 #include <net/if_ether.h>
105
106 #include <net/bpf.h>
107
108 #include <netinet/in.h> /* XXX for struct ip */
109 #include <netinet/in_systm.h> /* XXX for struct ip */
110 #include <netinet/ip.h> /* XXX for struct ip */
111 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
112 #include <netinet/tcp.h> /* XXX for struct tcphdr */
113
114 #include <sys/bus.h>
115 #include <sys/intr.h>
116 #include <machine/endian.h>
117
118 #include <dev/mii/mii.h>
119 #include <dev/mii/miivar.h>
120 #include <dev/mii/miidevs.h>
121 #include <dev/mii/mii_bitbang.h>
122 #include <dev/mii/ikphyreg.h>
123 #include <dev/mii/igphyreg.h>
124 #include <dev/mii/igphyvar.h>
125 #include <dev/mii/inbmphyreg.h>
126
127 #include <dev/pci/pcireg.h>
128 #include <dev/pci/pcivar.h>
129 #include <dev/pci/pcidevs.h>
130
131 #include <dev/pci/if_wmreg.h>
132 #include <dev/pci/if_wmvar.h>
133
134 #ifdef WM_DEBUG
135 #define WM_DEBUG_LINK 0x01
136 #define WM_DEBUG_TX 0x02
137 #define WM_DEBUG_RX 0x04
138 #define WM_DEBUG_GMII 0x08
139 #define WM_DEBUG_MANAGE 0x10
140 #define WM_DEBUG_NVM 0x20
141 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
142 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
143
144 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
145 #else
146 #define DPRINTF(x, y) /* nothing */
147 #endif /* WM_DEBUG */
148
149 #ifdef NET_MPSAFE
150 #define WM_MPSAFE 1
151 #endif
152
153 /*
154 * Transmit descriptor list size. Due to errata, we can only have
155 * 256 hardware descriptors in the ring on < 82544, but we use 4096
156 * on >= 82544. We tell the upper layers that they can queue a lot
157 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
158 * of them at a time.
159 *
160 * We allow up to 256 (!) DMA segments per packet. Pathological packet
161 * chains containing many small mbufs have been observed in zero-copy
162 * situations with jumbo frames.
163 */
164 #define WM_NTXSEGS 256
165 #define WM_IFQUEUELEN 256
166 #define WM_TXQUEUELEN_MAX 64
167 #define WM_TXQUEUELEN_MAX_82547 16
168 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
169 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
170 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
171 #define WM_NTXDESC_82542 256
172 #define WM_NTXDESC_82544 4096
173 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
174 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
175 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
176 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
177 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
178
179 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
180
181 /*
182 * Receive descriptor list size. We have one Rx buffer for normal
183 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
184 * packet. We allocate 256 receive descriptors, each with a 2k
185 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
186 */
187 #define WM_NRXDESC 256
188 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
189 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
190 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
191
192 /*
193 * Control structures are DMA'd to the i82542 chip. We allocate them in
194 * a single clump that maps to a single DMA segment to make several things
195 * easier.
196 */
197 struct wm_control_data_82544 {
198 /*
199 * The receive descriptors.
200 */
201 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
202
203 /*
204 * The transmit descriptors. Put these at the end, because
205 * we might use a smaller number of them.
206 */
207 union {
208 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
209 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
210 } wdc_u;
211 };
212
213 struct wm_control_data_82542 {
214 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
215 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
216 };
217
218 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
219 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
220 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
221
222 /*
223 * Software state for transmit jobs.
224 */
225 struct wm_txsoft {
226 struct mbuf *txs_mbuf; /* head of our mbuf chain */
227 bus_dmamap_t txs_dmamap; /* our DMA map */
228 int txs_firstdesc; /* first descriptor in packet */
229 int txs_lastdesc; /* last descriptor in packet */
230 int txs_ndesc; /* # of descriptors used */
231 };
232
233 /*
234 * Software state for receive buffers. Each descriptor gets a
235 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
236 * more than one buffer, we chain them together.
237 */
238 struct wm_rxsoft {
239 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
240 bus_dmamap_t rxs_dmamap; /* our DMA map */
241 };
242
243 #define WM_LINKUP_TIMEOUT 50
244
245 static uint16_t swfwphysem[] = {
246 SWFW_PHY0_SM,
247 SWFW_PHY1_SM,
248 SWFW_PHY2_SM,
249 SWFW_PHY3_SM
250 };
251
252 /*
253 * Software state per device.
254 */
255 struct wm_softc {
256 device_t sc_dev; /* generic device information */
257 bus_space_tag_t sc_st; /* bus space tag */
258 bus_space_handle_t sc_sh; /* bus space handle */
259 bus_size_t sc_ss; /* bus space size */
260 bus_space_tag_t sc_iot; /* I/O space tag */
261 bus_space_handle_t sc_ioh; /* I/O space handle */
262 bus_size_t sc_ios; /* I/O space size */
263 bus_space_tag_t sc_flasht; /* flash registers space tag */
264 bus_space_handle_t sc_flashh; /* flash registers space handle */
265 bus_dma_tag_t sc_dmat; /* bus DMA tag */
266
267 struct ethercom sc_ethercom; /* ethernet common data */
268 struct mii_data sc_mii; /* MII/media information */
269
270 pci_chipset_tag_t sc_pc;
271 pcitag_t sc_pcitag;
272 int sc_bus_speed; /* PCI/PCIX bus speed */
273 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
274
275 uint16_t sc_pcidevid; /* PCI device ID */
276 wm_chip_type sc_type; /* MAC type */
277 int sc_rev; /* MAC revision */
278 wm_phy_type sc_phytype; /* PHY type */
279 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
280 int sc_funcid; /* unit number of the chip (0 to 3) */
281 int sc_flags; /* flags; see below */
282 int sc_if_flags; /* last if_flags */
283 int sc_flowflags; /* 802.3x flow control flags */
284 int sc_align_tweak;
285
286 void *sc_ih; /* interrupt cookie */
287 callout_t sc_tick_ch; /* tick callout */
288 bool sc_stopping;
289
290 int sc_nvm_addrbits; /* NVM address bits */
291 unsigned int sc_nvm_wordsize; /* NVM word size */
292 int sc_ich8_flash_base;
293 int sc_ich8_flash_bank_size;
294 int sc_nvm_k1_enabled;
295
296 /* Software state for the transmit and receive descriptors. */
297 int sc_txnum; /* must be a power of two */
298 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
299 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
300
301 /* Control data structures. */
302 int sc_ntxdesc; /* must be a power of two */
303 struct wm_control_data_82544 *sc_control_data;
304 bus_dmamap_t sc_cddmamap; /* control data DMA map */
305 bus_dma_segment_t sc_cd_seg; /* control data segment */
306 int sc_cd_rseg; /* real number of control segment */
307 size_t sc_cd_size; /* control data size */
308 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
309 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
310 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
311 #define sc_rxdescs sc_control_data->wcd_rxdescs
312
313 #ifdef WM_EVENT_COUNTERS
314 /* Event counters. */
315 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
316 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
317 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
318 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
319 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
320 struct evcnt sc_ev_rxintr; /* Rx interrupts */
321 struct evcnt sc_ev_linkintr; /* Link interrupts */
322
323 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
324 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
325 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
326 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
327 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
328 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
329 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
330 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
331
332 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
333 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
334
335 struct evcnt sc_ev_tu; /* Tx underrun */
336
337 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
338 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
339 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
340 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
341 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
342 #endif /* WM_EVENT_COUNTERS */
343
344 bus_addr_t sc_tdt_reg; /* offset of TDT register */
345
346 int sc_txfree; /* number of free Tx descriptors */
347 int sc_txnext; /* next ready Tx descriptor */
348
349 int sc_txsfree; /* number of free Tx jobs */
350 int sc_txsnext; /* next free Tx job */
351 int sc_txsdirty; /* dirty Tx jobs */
352
353 /* These 5 variables are used only on the 82547. */
354 int sc_txfifo_size; /* Tx FIFO size */
355 int sc_txfifo_head; /* current head of FIFO */
356 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
357 int sc_txfifo_stall; /* Tx FIFO is stalled */
358 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
359
360 bus_addr_t sc_rdt_reg; /* offset of RDT register */
361
362 int sc_rxptr; /* next ready Rx descriptor/queue ent */
363 int sc_rxdiscard;
364 int sc_rxlen;
365 struct mbuf *sc_rxhead;
366 struct mbuf *sc_rxtail;
367 struct mbuf **sc_rxtailp;
368
369 uint32_t sc_ctrl; /* prototype CTRL register */
370 #if 0
371 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
372 #endif
373 uint32_t sc_icr; /* prototype interrupt bits */
374 uint32_t sc_itr; /* prototype intr throttling reg */
375 uint32_t sc_tctl; /* prototype TCTL register */
376 uint32_t sc_rctl; /* prototype RCTL register */
377 uint32_t sc_txcw; /* prototype TXCW register */
378 uint32_t sc_tipg; /* prototype TIPG register */
379 uint32_t sc_fcrtl; /* prototype FCRTL register */
380 uint32_t sc_pba; /* prototype PBA register */
381
382 int sc_tbi_linkup; /* TBI link status */
383 int sc_tbi_anegticks; /* autonegotiation ticks */
384 int sc_tbi_ticks; /* tbi ticks */
385
386 int sc_mchash_type; /* multicast filter offset */
387
388 krndsource_t rnd_source; /* random source */
389
390 kmutex_t *sc_tx_lock; /* lock for tx operations */
391 kmutex_t *sc_rx_lock; /* lock for rx operations */
392 };
393
394 #define WM_TX_LOCK(_sc) if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
395 #define WM_TX_UNLOCK(_sc) if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
396 #define WM_TX_LOCKED(_sc) (!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
397 #define WM_RX_LOCK(_sc) if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
398 #define WM_RX_UNLOCK(_sc) if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
399 #define WM_RX_LOCKED(_sc) (!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
400 #define WM_BOTH_LOCK(_sc) do {WM_TX_LOCK(_sc); WM_RX_LOCK(_sc);} while (0)
401 #define WM_BOTH_UNLOCK(_sc) do {WM_RX_UNLOCK(_sc); WM_TX_UNLOCK(_sc);} while (0)
402 #define WM_BOTH_LOCKED(_sc) (WM_TX_LOCKED(_sc) && WM_RX_LOCKED(_sc))
403
404 #ifdef WM_MPSAFE
405 #define CALLOUT_FLAGS CALLOUT_MPSAFE
406 #else
407 #define CALLOUT_FLAGS 0
408 #endif
409
410 #define WM_RXCHAIN_RESET(sc) \
411 do { \
412 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
413 *(sc)->sc_rxtailp = NULL; \
414 (sc)->sc_rxlen = 0; \
415 } while (/*CONSTCOND*/0)
416
417 #define WM_RXCHAIN_LINK(sc, m) \
418 do { \
419 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
420 (sc)->sc_rxtailp = &(m)->m_next; \
421 } while (/*CONSTCOND*/0)
422
423 #ifdef WM_EVENT_COUNTERS
424 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
425 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
426 #else
427 #define WM_EVCNT_INCR(ev) /* nothing */
428 #define WM_EVCNT_ADD(ev, val) /* nothing */
429 #endif
430
431 #define CSR_READ(sc, reg) \
432 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
433 #define CSR_WRITE(sc, reg, val) \
434 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
435 #define CSR_WRITE_FLUSH(sc) \
436 (void) CSR_READ((sc), WMREG_STATUS)
437
438 #define ICH8_FLASH_READ32(sc, reg) \
439 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
440 #define ICH8_FLASH_WRITE32(sc, reg, data) \
441 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
442
443 #define ICH8_FLASH_READ16(sc, reg) \
444 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
445 #define ICH8_FLASH_WRITE16(sc, reg, data) \
446 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
447
448 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
449 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
450
451 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
452 #define WM_CDTXADDR_HI(sc, x) \
453 (sizeof(bus_addr_t) == 8 ? \
454 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
455
456 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
457 #define WM_CDRXADDR_HI(sc, x) \
458 (sizeof(bus_addr_t) == 8 ? \
459 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
460
461 #define WM_CDTXSYNC(sc, x, n, ops) \
462 do { \
463 int __x, __n; \
464 \
465 __x = (x); \
466 __n = (n); \
467 \
468 /* If it will wrap around, sync to the end of the ring. */ \
469 if ((__x + __n) > WM_NTXDESC(sc)) { \
470 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
471 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
472 (WM_NTXDESC(sc) - __x), (ops)); \
473 __n -= (WM_NTXDESC(sc) - __x); \
474 __x = 0; \
475 } \
476 \
477 /* Now sync whatever is left. */ \
478 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
479 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
480 } while (/*CONSTCOND*/0)
481
482 #define WM_CDRXSYNC(sc, x, ops) \
483 do { \
484 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
485 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
486 } while (/*CONSTCOND*/0)
487
488 #define WM_INIT_RXDESC(sc, x) \
489 do { \
490 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
491 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
492 struct mbuf *__m = __rxs->rxs_mbuf; \
493 \
494 /* \
495 * Note: We scoot the packet forward 2 bytes in the buffer \
496 * so that the payload after the Ethernet header is aligned \
497 * to a 4-byte boundary. \
498 * \
499 * XXX BRAINDAMAGE ALERT! \
500 * The stupid chip uses the same size for every buffer, which \
501 * is set in the Receive Control register. We are using the 2K \
502 * size option, but what we REALLY want is (2K - 2)! For this \
503 * reason, we can't "scoot" packets longer than the standard \
504 * Ethernet MTU. On strict-alignment platforms, if the total \
505 * size exceeds (2K - 2) we set align_tweak to 0 and let \
506 * the upper layer copy the headers. \
507 */ \
508 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
509 \
510 wm_set_dma_addr(&__rxd->wrx_addr, \
511 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
512 __rxd->wrx_len = 0; \
513 __rxd->wrx_cksum = 0; \
514 __rxd->wrx_status = 0; \
515 __rxd->wrx_errors = 0; \
516 __rxd->wrx_special = 0; \
517 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
518 \
519 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
520 } while (/*CONSTCOND*/0)
521
522 /*
523 * Register read/write functions.
524 * Other than CSR_{READ|WRITE}().
525 */
526 #if 0
527 static inline uint32_t wm_io_read(struct wm_softc *, int);
528 #endif
529 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
530 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
531 uint32_t, uint32_t);
532 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
533
534 /*
535 * Device driver interface functions and commonly used functions.
536 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
537 */
538 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
539 static int wm_match(device_t, cfdata_t, void *);
540 static void wm_attach(device_t, device_t, void *);
541 static int wm_detach(device_t, int);
542 static bool wm_suspend(device_t, const pmf_qual_t *);
543 static bool wm_resume(device_t, const pmf_qual_t *);
544 static void wm_watchdog(struct ifnet *);
545 static void wm_tick(void *);
546 static int wm_ifflags_cb(struct ethercom *);
547 static int wm_ioctl(struct ifnet *, u_long, void *);
548 /* MAC address related */
549 static int wm_check_alt_mac_addr(struct wm_softc *);
550 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
551 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
552 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
553 static void wm_set_filter(struct wm_softc *);
554 /* Reset and init related */
555 static void wm_set_vlan(struct wm_softc *);
556 static void wm_set_pcie_completion_timeout(struct wm_softc *);
557 static void wm_get_auto_rd_done(struct wm_softc *);
558 static void wm_lan_init_done(struct wm_softc *);
559 static void wm_get_cfg_done(struct wm_softc *);
560 static void wm_reset(struct wm_softc *);
561 static int wm_add_rxbuf(struct wm_softc *, int);
562 static void wm_rxdrain(struct wm_softc *);
563 static int wm_init(struct ifnet *);
564 static int wm_init_locked(struct ifnet *);
565 static void wm_stop(struct ifnet *, int);
566 static void wm_stop_locked(struct ifnet *, int);
567 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
568 uint32_t *, uint8_t *);
569 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
570 static void wm_82547_txfifo_stall(void *);
571 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
572 /* Start */
573 static void wm_start(struct ifnet *);
574 static void wm_start_locked(struct ifnet *);
575 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
576 uint32_t *, uint32_t *, bool *);
577 static void wm_nq_start(struct ifnet *);
578 static void wm_nq_start_locked(struct ifnet *);
579 /* Interrupt */
580 static void wm_txintr(struct wm_softc *);
581 static void wm_rxintr(struct wm_softc *);
582 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
583 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
584 static void wm_linkintr(struct wm_softc *, uint32_t);
585 static int wm_intr(void *);
586
587 /*
588 * Media related.
589 * GMII, SGMII, TBI, SERDES and SFP.
590 */
591 /* GMII related */
592 static void wm_gmii_reset(struct wm_softc *);
593 static int wm_get_phy_id_82575(struct wm_softc *);
594 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
595 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
596 static int wm_gmii_mediachange(struct ifnet *);
597 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
598 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
599 static int wm_gmii_i82543_readreg(device_t, int, int);
600 static void wm_gmii_i82543_writereg(device_t, int, int, int);
601 static int wm_gmii_i82544_readreg(device_t, int, int);
602 static void wm_gmii_i82544_writereg(device_t, int, int, int);
603 static int wm_gmii_i80003_readreg(device_t, int, int);
604 static void wm_gmii_i80003_writereg(device_t, int, int, int);
605 static int wm_gmii_bm_readreg(device_t, int, int);
606 static void wm_gmii_bm_writereg(device_t, int, int, int);
607 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
608 static int wm_gmii_hv_readreg(device_t, int, int);
609 static void wm_gmii_hv_writereg(device_t, int, int, int);
610 static int wm_gmii_82580_readreg(device_t, int, int);
611 static void wm_gmii_82580_writereg(device_t, int, int, int);
612 static void wm_gmii_statchg(struct ifnet *);
613 static int wm_kmrn_readreg(struct wm_softc *, int);
614 static void wm_kmrn_writereg(struct wm_softc *, int, int);
615 /* SGMII */
616 static bool wm_sgmii_uses_mdio(struct wm_softc *);
617 static int wm_sgmii_readreg(device_t, int, int);
618 static void wm_sgmii_writereg(device_t, int, int, int);
619 /* TBI related */
620 static int wm_check_for_link(struct wm_softc *);
621 static void wm_tbi_mediainit(struct wm_softc *);
622 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
623 static int wm_tbi_mediachange(struct ifnet *);
624 static void wm_tbi_set_linkled(struct wm_softc *);
625 static void wm_tbi_check_link(struct wm_softc *);
626 /* SFP related */
627 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
628 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
629
630 /*
631 * NVM related.
632 * Microwire, SPI (w/wo EERD) and Flash.
633 */
634 /* Misc functions */
635 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
636 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
637 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
638 /* Microwire */
639 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
640 /* SPI */
641 static int wm_nvm_ready_spi(struct wm_softc *);
642 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
643 /* Using with EERD */
644 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
645 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
646 /* Flash */
647 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
648 unsigned int *);
649 static int32_t wm_ich8_cycle_init(struct wm_softc *);
650 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
651 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
652 uint16_t *);
653 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
654 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
655 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
656 /* Lock, detecting NVM type, validate checksum and read */
657 static int wm_nvm_acquire(struct wm_softc *);
658 static void wm_nvm_release(struct wm_softc *);
659 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
660 static int wm_nvm_validate_checksum(struct wm_softc *);
661 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
662
663 /*
664 * Hardware semaphores.
665 * Very complexed...
666 */
667 static int wm_get_swsm_semaphore(struct wm_softc *);
668 static void wm_put_swsm_semaphore(struct wm_softc *);
669 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
670 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
671 static int wm_get_swfwhw_semaphore(struct wm_softc *);
672 static void wm_put_swfwhw_semaphore(struct wm_softc *);
673 static int wm_get_hw_semaphore_82573(struct wm_softc *);
674 static void wm_put_hw_semaphore_82573(struct wm_softc *);
675
676 /*
677 * Management mode and power management related subroutines.
678 * BMC, AMT, suspend/resume and EEE.
679 */
680 static int wm_check_mng_mode(struct wm_softc *);
681 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
682 static int wm_check_mng_mode_82574(struct wm_softc *);
683 static int wm_check_mng_mode_generic(struct wm_softc *);
684 static int wm_enable_mng_pass_thru(struct wm_softc *);
685 static int wm_check_reset_block(struct wm_softc *);
686 static void wm_get_hw_control(struct wm_softc *);
687 static void wm_release_hw_control(struct wm_softc *);
688 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
689 static void wm_smbustopci(struct wm_softc *);
690 static void wm_init_manageability(struct wm_softc *);
691 static void wm_release_manageability(struct wm_softc *);
692 static void wm_get_wakeup(struct wm_softc *);
693 #ifdef WM_WOL
694 static void wm_enable_phy_wakeup(struct wm_softc *);
695 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
696 static void wm_enable_wakeup(struct wm_softc *);
697 #endif
698 /* EEE */
699 static void wm_set_eee_i350(struct wm_softc *);
700
701 /*
702 * Workarounds (mainly PHY related).
703 * Basically, PHY's workarounds are in the PHY drivers.
704 */
705 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
706 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
707 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
708 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
709 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
710 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
711 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
712 static void wm_reset_init_script_82575(struct wm_softc *);
713
714 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
715 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
716
717 /*
718 * Devices supported by this driver.
719 */
720 static const struct wm_product {
721 pci_vendor_id_t wmp_vendor;
722 pci_product_id_t wmp_product;
723 const char *wmp_name;
724 wm_chip_type wmp_type;
725 uint32_t wmp_flags;
726 #define WMP_F_UNKNOWN 0x00
727 #define WMP_F_FIBER 0x01
728 #define WMP_F_COPPER 0x02
729 #define WMP_F_SERDES 0x03 /* Internal SERDES */
730 #define WMP_MEDIATYPE(x) ((x) & 0x03)
731 } wm_products[] = {
732 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
733 "Intel i82542 1000BASE-X Ethernet",
734 WM_T_82542_2_1, WMP_F_FIBER },
735
736 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
737 "Intel i82543GC 1000BASE-X Ethernet",
738 WM_T_82543, WMP_F_FIBER },
739
740 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
741 "Intel i82543GC 1000BASE-T Ethernet",
742 WM_T_82543, WMP_F_COPPER },
743
744 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
745 "Intel i82544EI 1000BASE-T Ethernet",
746 WM_T_82544, WMP_F_COPPER },
747
748 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
749 "Intel i82544EI 1000BASE-X Ethernet",
750 WM_T_82544, WMP_F_FIBER },
751
752 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
753 "Intel i82544GC 1000BASE-T Ethernet",
754 WM_T_82544, WMP_F_COPPER },
755
756 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
757 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
758 WM_T_82544, WMP_F_COPPER },
759
760 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
761 "Intel i82540EM 1000BASE-T Ethernet",
762 WM_T_82540, WMP_F_COPPER },
763
764 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
765 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
766 WM_T_82540, WMP_F_COPPER },
767
768 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
769 "Intel i82540EP 1000BASE-T Ethernet",
770 WM_T_82540, WMP_F_COPPER },
771
772 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
773 "Intel i82540EP 1000BASE-T Ethernet",
774 WM_T_82540, WMP_F_COPPER },
775
776 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
777 "Intel i82540EP 1000BASE-T Ethernet",
778 WM_T_82540, WMP_F_COPPER },
779
780 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
781 "Intel i82545EM 1000BASE-T Ethernet",
782 WM_T_82545, WMP_F_COPPER },
783
784 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
785 "Intel i82545GM 1000BASE-T Ethernet",
786 WM_T_82545_3, WMP_F_COPPER },
787
788 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
789 "Intel i82545GM 1000BASE-X Ethernet",
790 WM_T_82545_3, WMP_F_FIBER },
791
792 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
793 "Intel i82545GM Gigabit Ethernet (SERDES)",
794 WM_T_82545_3, WMP_F_SERDES },
795
796 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
797 "Intel i82546EB 1000BASE-T Ethernet",
798 WM_T_82546, WMP_F_COPPER },
799
800 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
801 "Intel i82546EB 1000BASE-T Ethernet",
802 WM_T_82546, WMP_F_COPPER },
803
804 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
805 "Intel i82545EM 1000BASE-X Ethernet",
806 WM_T_82545, WMP_F_FIBER },
807
808 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
809 "Intel i82546EB 1000BASE-X Ethernet",
810 WM_T_82546, WMP_F_FIBER },
811
812 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
813 "Intel i82546GB 1000BASE-T Ethernet",
814 WM_T_82546_3, WMP_F_COPPER },
815
816 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
817 "Intel i82546GB 1000BASE-X Ethernet",
818 WM_T_82546_3, WMP_F_FIBER },
819
820 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
821 "Intel i82546GB Gigabit Ethernet (SERDES)",
822 WM_T_82546_3, WMP_F_SERDES },
823
824 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
825 "i82546GB quad-port Gigabit Ethernet",
826 WM_T_82546_3, WMP_F_COPPER },
827
828 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
829 "i82546GB quad-port Gigabit Ethernet (KSP3)",
830 WM_T_82546_3, WMP_F_COPPER },
831
832 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
833 "Intel PRO/1000MT (82546GB)",
834 WM_T_82546_3, WMP_F_COPPER },
835
836 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
837 "Intel i82541EI 1000BASE-T Ethernet",
838 WM_T_82541, WMP_F_COPPER },
839
840 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
841 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
842 WM_T_82541, WMP_F_COPPER },
843
844 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
845 "Intel i82541EI Mobile 1000BASE-T Ethernet",
846 WM_T_82541, WMP_F_COPPER },
847
848 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
849 "Intel i82541ER 1000BASE-T Ethernet",
850 WM_T_82541_2, WMP_F_COPPER },
851
852 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
853 "Intel i82541GI 1000BASE-T Ethernet",
854 WM_T_82541_2, WMP_F_COPPER },
855
856 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
857 "Intel i82541GI Mobile 1000BASE-T Ethernet",
858 WM_T_82541_2, WMP_F_COPPER },
859
860 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
861 "Intel i82541PI 1000BASE-T Ethernet",
862 WM_T_82541_2, WMP_F_COPPER },
863
864 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
865 "Intel i82547EI 1000BASE-T Ethernet",
866 WM_T_82547, WMP_F_COPPER },
867
868 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
869 "Intel i82547EI Mobile 1000BASE-T Ethernet",
870 WM_T_82547, WMP_F_COPPER },
871
872 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
873 "Intel i82547GI 1000BASE-T Ethernet",
874 WM_T_82547_2, WMP_F_COPPER },
875
876 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
877 "Intel PRO/1000 PT (82571EB)",
878 WM_T_82571, WMP_F_COPPER },
879
880 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
881 "Intel PRO/1000 PF (82571EB)",
882 WM_T_82571, WMP_F_FIBER },
883
884 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
885 "Intel PRO/1000 PB (82571EB)",
886 WM_T_82571, WMP_F_SERDES },
887
888 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
889 "Intel PRO/1000 QT (82571EB)",
890 WM_T_82571, WMP_F_COPPER },
891
892 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
893 "Intel PRO/1000 PT Quad Port Server Adapter",
894 WM_T_82571, WMP_F_COPPER, },
895
896 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
897 "Intel Gigabit PT Quad Port Server ExpressModule",
898 WM_T_82571, WMP_F_COPPER, },
899
900 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
901 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
902 WM_T_82571, WMP_F_SERDES, },
903
904 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
905 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
906 WM_T_82571, WMP_F_SERDES, },
907
908 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
909 "Intel 82571EB Quad 1000baseX Ethernet",
910 WM_T_82571, WMP_F_FIBER, },
911
912 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
913 "Intel i82572EI 1000baseT Ethernet",
914 WM_T_82572, WMP_F_COPPER },
915
916 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
917 "Intel i82572EI 1000baseX Ethernet",
918 WM_T_82572, WMP_F_FIBER },
919
920 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
921 "Intel i82572EI Gigabit Ethernet (SERDES)",
922 WM_T_82572, WMP_F_SERDES },
923
924 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
925 "Intel i82572EI 1000baseT Ethernet",
926 WM_T_82572, WMP_F_COPPER },
927
928 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
929 "Intel i82573E",
930 WM_T_82573, WMP_F_COPPER },
931
932 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
933 "Intel i82573E IAMT",
934 WM_T_82573, WMP_F_COPPER },
935
936 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
937 "Intel i82573L Gigabit Ethernet",
938 WM_T_82573, WMP_F_COPPER },
939
940 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
941 "Intel i82574L",
942 WM_T_82574, WMP_F_COPPER },
943
944 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
945 "Intel i82574L",
946 WM_T_82574, WMP_F_COPPER },
947
948 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
949 "Intel i82583V",
950 WM_T_82583, WMP_F_COPPER },
951
952 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
953 "i80003 dual 1000baseT Ethernet",
954 WM_T_80003, WMP_F_COPPER },
955
956 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
957 "i80003 dual 1000baseX Ethernet",
958 WM_T_80003, WMP_F_COPPER },
959
960 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
961 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
962 WM_T_80003, WMP_F_SERDES },
963
964 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
965 "Intel i80003 1000baseT Ethernet",
966 WM_T_80003, WMP_F_COPPER },
967
968 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
969 "Intel i80003 Gigabit Ethernet (SERDES)",
970 WM_T_80003, WMP_F_SERDES },
971
972 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
973 "Intel i82801H (M_AMT) LAN Controller",
974 WM_T_ICH8, WMP_F_COPPER },
975 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
976 "Intel i82801H (AMT) LAN Controller",
977 WM_T_ICH8, WMP_F_COPPER },
978 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
979 "Intel i82801H LAN Controller",
980 WM_T_ICH8, WMP_F_COPPER },
981 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
982 "Intel i82801H (IFE) LAN Controller",
983 WM_T_ICH8, WMP_F_COPPER },
984 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
985 "Intel i82801H (M) LAN Controller",
986 WM_T_ICH8, WMP_F_COPPER },
987 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
988 "Intel i82801H IFE (GT) LAN Controller",
989 WM_T_ICH8, WMP_F_COPPER },
990 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
991 "Intel i82801H IFE (G) LAN Controller",
992 WM_T_ICH8, WMP_F_COPPER },
993 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
994 "82801I (AMT) LAN Controller",
995 WM_T_ICH9, WMP_F_COPPER },
996 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
997 "82801I LAN Controller",
998 WM_T_ICH9, WMP_F_COPPER },
999 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1000 "82801I (G) LAN Controller",
1001 WM_T_ICH9, WMP_F_COPPER },
1002 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1003 "82801I (GT) LAN Controller",
1004 WM_T_ICH9, WMP_F_COPPER },
1005 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1006 "82801I (C) LAN Controller",
1007 WM_T_ICH9, WMP_F_COPPER },
1008 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1009 "82801I mobile LAN Controller",
1010 WM_T_ICH9, WMP_F_COPPER },
1011 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1012 "82801I mobile (V) LAN Controller",
1013 WM_T_ICH9, WMP_F_COPPER },
1014 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1015 "82801I mobile (AMT) LAN Controller",
1016 WM_T_ICH9, WMP_F_COPPER },
1017 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1018 "82567LM-4 LAN Controller",
1019 WM_T_ICH9, WMP_F_COPPER },
1020 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
1021 "82567V-3 LAN Controller",
1022 WM_T_ICH9, WMP_F_COPPER },
1023 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1024 "82567LM-2 LAN Controller",
1025 WM_T_ICH10, WMP_F_COPPER },
1026 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1027 "82567LF-2 LAN Controller",
1028 WM_T_ICH10, WMP_F_COPPER },
1029 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1030 "82567LM-3 LAN Controller",
1031 WM_T_ICH10, WMP_F_COPPER },
1032 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1033 "82567LF-3 LAN Controller",
1034 WM_T_ICH10, WMP_F_COPPER },
1035 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1036 "82567V-2 LAN Controller",
1037 WM_T_ICH10, WMP_F_COPPER },
1038 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1039 "82567V-3? LAN Controller",
1040 WM_T_ICH10, WMP_F_COPPER },
1041 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1042 "HANKSVILLE LAN Controller",
1043 WM_T_ICH10, WMP_F_COPPER },
1044 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1045 "PCH LAN (82577LM) Controller",
1046 WM_T_PCH, WMP_F_COPPER },
1047 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1048 "PCH LAN (82577LC) Controller",
1049 WM_T_PCH, WMP_F_COPPER },
1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1051 "PCH LAN (82578DM) Controller",
1052 WM_T_PCH, WMP_F_COPPER },
1053 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1054 "PCH LAN (82578DC) Controller",
1055 WM_T_PCH, WMP_F_COPPER },
1056 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1057 "PCH2 LAN (82579LM) Controller",
1058 WM_T_PCH2, WMP_F_COPPER },
1059 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1060 "PCH2 LAN (82579V) Controller",
1061 WM_T_PCH2, WMP_F_COPPER },
1062 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1063 "82575EB dual-1000baseT Ethernet",
1064 WM_T_82575, WMP_F_COPPER },
1065 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1066 "82575EB dual-1000baseX Ethernet (SERDES)",
1067 WM_T_82575, WMP_F_SERDES },
1068 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1069 "82575GB quad-1000baseT Ethernet",
1070 WM_T_82575, WMP_F_COPPER },
1071 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1072 "82575GB quad-1000baseT Ethernet (PM)",
1073 WM_T_82575, WMP_F_COPPER },
1074 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1075 "82576 1000BaseT Ethernet",
1076 WM_T_82576, WMP_F_COPPER },
1077 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1078 "82576 1000BaseX Ethernet",
1079 WM_T_82576, WMP_F_FIBER },
1080
1081 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1082 "82576 gigabit Ethernet (SERDES)",
1083 WM_T_82576, WMP_F_SERDES },
1084
1085 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1086 "82576 quad-1000BaseT Ethernet",
1087 WM_T_82576, WMP_F_COPPER },
1088
1089 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1090 "82576 Gigabit ET2 Quad Port Server Adapter",
1091 WM_T_82576, WMP_F_COPPER },
1092
1093 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1094 "82576 gigabit Ethernet",
1095 WM_T_82576, WMP_F_COPPER },
1096
1097 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1098 "82576 gigabit Ethernet (SERDES)",
1099 WM_T_82576, WMP_F_SERDES },
1100 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1101 "82576 quad-gigabit Ethernet (SERDES)",
1102 WM_T_82576, WMP_F_SERDES },
1103
1104 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1105 "82580 1000BaseT Ethernet",
1106 WM_T_82580, WMP_F_COPPER },
1107 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1108 "82580 1000BaseX Ethernet",
1109 WM_T_82580, WMP_F_FIBER },
1110
1111 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1112 "82580 1000BaseT Ethernet (SERDES)",
1113 WM_T_82580, WMP_F_SERDES },
1114
1115 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1116 "82580 gigabit Ethernet (SGMII)",
1117 WM_T_82580, WMP_F_COPPER },
1118 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1119 "82580 dual-1000BaseT Ethernet",
1120 WM_T_82580, WMP_F_COPPER },
1121
1122 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1123 "82580 quad-1000BaseX Ethernet",
1124 WM_T_82580, WMP_F_FIBER },
1125
1126 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1127 "DH89XXCC Gigabit Ethernet (SGMII)",
1128 WM_T_82580, WMP_F_COPPER },
1129
1130 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1131 "DH89XXCC Gigabit Ethernet (SERDES)",
1132 WM_T_82580, WMP_F_SERDES },
1133
1134 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1135 "DH89XXCC 1000BASE-KX Ethernet",
1136 WM_T_82580, WMP_F_SERDES },
1137
1138 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1139 "DH89XXCC Gigabit Ethernet (SFP)",
1140 WM_T_82580, WMP_F_SERDES },
1141
1142 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1143 "I350 Gigabit Network Connection",
1144 WM_T_I350, WMP_F_COPPER },
1145
1146 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1147 "I350 Gigabit Fiber Network Connection",
1148 WM_T_I350, WMP_F_FIBER },
1149
1150 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1151 "I350 Gigabit Backplane Connection",
1152 WM_T_I350, WMP_F_SERDES },
1153
1154 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1155 "I350 Quad Port Gigabit Ethernet",
1156 WM_T_I350, WMP_F_SERDES },
1157
1158 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1159 "I350 Gigabit Connection",
1160 WM_T_I350, WMP_F_COPPER },
1161
1162 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1163 "I354 Gigabit Connection",
1164 WM_T_I354, WMP_F_COPPER },
1165 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1166 "I210-T1 Ethernet Server Adapter",
1167 WM_T_I210, WMP_F_COPPER },
1168
1169 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1170 "I210 Ethernet (Copper OEM)",
1171 WM_T_I210, WMP_F_COPPER },
1172
1173 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1174 "I210 Ethernet (Copper IT)",
1175 WM_T_I210, WMP_F_COPPER },
1176
1177 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1178 "I210 Ethernet (FLASH less)",
1179 WM_T_I210, WMP_F_COPPER },
1180
1181 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1182 "I210 Gigabit Ethernet (Fiber)",
1183 WM_T_I210, WMP_F_FIBER },
1184
1185 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1186 "I210 Gigabit Ethernet (SERDES)",
1187 WM_T_I210, WMP_F_SERDES },
1188
1189 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1190 "I210 Gigabit Ethernet (FLASH less)",
1191 WM_T_I210, WMP_F_SERDES },
1192
1193 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1194 "I210 Gigabit Ethernet (SGMII)",
1195 WM_T_I210, WMP_F_COPPER },
1196
1197 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1198 "I211 Ethernet (COPPER)",
1199 WM_T_I211, WMP_F_COPPER },
1200 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1201 "I217 V Ethernet Connection",
1202 WM_T_PCH_LPT, WMP_F_COPPER },
1203 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1204 "I217 LM Ethernet Connection",
1205 WM_T_PCH_LPT, WMP_F_COPPER },
1206 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1207 "I218 V Ethernet Connection",
1208 WM_T_PCH_LPT, WMP_F_COPPER },
1209 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1210 "I218 V Ethernet Connection",
1211 WM_T_PCH_LPT, WMP_F_COPPER },
1212 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1213 "I218 V Ethernet Connection",
1214 WM_T_PCH_LPT, WMP_F_COPPER },
1215 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1216 "I218 LM Ethernet Connection",
1217 WM_T_PCH_LPT, WMP_F_COPPER },
1218 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1219 "I218 LM Ethernet Connection",
1220 WM_T_PCH_LPT, WMP_F_COPPER },
1221 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1222 "I218 LM Ethernet Connection",
1223 WM_T_PCH_LPT, WMP_F_COPPER },
1224 { 0, 0,
1225 NULL,
1226 0, 0 },
1227 };
1228
1229 #ifdef WM_EVENT_COUNTERS
1230 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1231 #endif /* WM_EVENT_COUNTERS */
1232
1233
1234 /*
1235 * Register read/write functions.
1236 * Other than CSR_{READ|WRITE}().
1237 */
1238
1239 #if 0 /* Not currently used */
1240 static inline uint32_t
1241 wm_io_read(struct wm_softc *sc, int reg)
1242 {
1243
1244 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1245 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1246 }
1247 #endif
1248
1249 static inline void
1250 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1251 {
1252
1253 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1254 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1255 }
1256
1257 static inline void
1258 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1259 uint32_t data)
1260 {
1261 uint32_t regval;
1262 int i;
1263
1264 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1265
1266 CSR_WRITE(sc, reg, regval);
1267
1268 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1269 delay(5);
1270 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1271 break;
1272 }
1273 if (i == SCTL_CTL_POLL_TIMEOUT) {
1274 aprint_error("%s: WARNING:"
1275 " i82575 reg 0x%08x setup did not indicate ready\n",
1276 device_xname(sc->sc_dev), reg);
1277 }
1278 }
1279
1280 static inline void
1281 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1282 {
1283 wa->wa_low = htole32(v & 0xffffffffU);
1284 if (sizeof(bus_addr_t) == 8)
1285 wa->wa_high = htole32((uint64_t) v >> 32);
1286 else
1287 wa->wa_high = 0;
1288 }
1289
1290 /*
1291 * Device driver interface functions and commonly used functions.
1292 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1293 */
1294
1295 /* Lookup supported device table */
1296 static const struct wm_product *
1297 wm_lookup(const struct pci_attach_args *pa)
1298 {
1299 const struct wm_product *wmp;
1300
1301 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1302 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1303 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1304 return wmp;
1305 }
1306 return NULL;
1307 }
1308
1309 /* The match function (ca_match) */
1310 static int
1311 wm_match(device_t parent, cfdata_t cf, void *aux)
1312 {
1313 struct pci_attach_args *pa = aux;
1314
1315 if (wm_lookup(pa) != NULL)
1316 return 1;
1317
1318 return 0;
1319 }
1320
1321 /* The attach function (ca_attach) */
1322 static void
1323 wm_attach(device_t parent, device_t self, void *aux)
1324 {
1325 struct wm_softc *sc = device_private(self);
1326 struct pci_attach_args *pa = aux;
1327 prop_dictionary_t dict;
1328 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1329 pci_chipset_tag_t pc = pa->pa_pc;
1330 pci_intr_handle_t ih;
1331 const char *intrstr = NULL;
1332 const char *eetype, *xname;
1333 bus_space_tag_t memt;
1334 bus_space_handle_t memh;
1335 bus_size_t memsize;
1336 int memh_valid;
1337 int i, error;
1338 const struct wm_product *wmp;
1339 prop_data_t ea;
1340 prop_number_t pn;
1341 uint8_t enaddr[ETHER_ADDR_LEN];
1342 uint16_t cfg1, cfg2, swdpin, io3;
1343 pcireg_t preg, memtype;
1344 uint16_t eeprom_data, apme_mask;
1345 bool force_clear_smbi;
1346 uint32_t link_mode;
1347 uint32_t reg;
1348 char intrbuf[PCI_INTRSTR_LEN];
1349
1350 sc->sc_dev = self;
1351 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1352 sc->sc_stopping = false;
1353
1354 wmp = wm_lookup(pa);
1355 #ifdef DIAGNOSTIC
1356 if (wmp == NULL) {
1357 printf("\n");
1358 panic("wm_attach: impossible");
1359 }
1360 #endif
1361 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1362
1363 sc->sc_pc = pa->pa_pc;
1364 sc->sc_pcitag = pa->pa_tag;
1365
1366 if (pci_dma64_available(pa))
1367 sc->sc_dmat = pa->pa_dmat64;
1368 else
1369 sc->sc_dmat = pa->pa_dmat;
1370
1371 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1372 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1373 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1374
1375 sc->sc_type = wmp->wmp_type;
1376 if (sc->sc_type < WM_T_82543) {
1377 if (sc->sc_rev < 2) {
1378 aprint_error_dev(sc->sc_dev,
1379 "i82542 must be at least rev. 2\n");
1380 return;
1381 }
1382 if (sc->sc_rev < 3)
1383 sc->sc_type = WM_T_82542_2_0;
1384 }
1385
1386 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1387 || (sc->sc_type == WM_T_82580)
1388 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1389 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1390 sc->sc_flags |= WM_F_NEWQUEUE;
1391
1392 /* Set device properties (mactype) */
1393 dict = device_properties(sc->sc_dev);
1394 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1395
1396 /*
1397 * Map the device. All devices support memory-mapped acccess,
1398 * and it is really required for normal operation.
1399 */
1400 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1401 switch (memtype) {
1402 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1403 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1404 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1405 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1406 break;
1407 default:
1408 memh_valid = 0;
1409 break;
1410 }
1411
1412 if (memh_valid) {
1413 sc->sc_st = memt;
1414 sc->sc_sh = memh;
1415 sc->sc_ss = memsize;
1416 } else {
1417 aprint_error_dev(sc->sc_dev,
1418 "unable to map device registers\n");
1419 return;
1420 }
1421
1422 /*
1423 * In addition, i82544 and later support I/O mapped indirect
1424 * register access. It is not desirable (nor supported in
1425 * this driver) to use it for normal operation, though it is
1426 * required to work around bugs in some chip versions.
1427 */
1428 if (sc->sc_type >= WM_T_82544) {
1429 /* First we have to find the I/O BAR. */
1430 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1431 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1432 if (memtype == PCI_MAPREG_TYPE_IO)
1433 break;
1434 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1435 PCI_MAPREG_MEM_TYPE_64BIT)
1436 i += 4; /* skip high bits, too */
1437 }
1438 if (i < PCI_MAPREG_END) {
1439 /*
1440 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1441 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1442 * It's no problem because newer chips has no this
1443 * bug.
1444 *
1445 * The i8254x doesn't apparently respond when the
1446 * I/O BAR is 0, which looks somewhat like it's not
1447 * been configured.
1448 */
1449 preg = pci_conf_read(pc, pa->pa_tag, i);
1450 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1451 aprint_error_dev(sc->sc_dev,
1452 "WARNING: I/O BAR at zero.\n");
1453 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1454 0, &sc->sc_iot, &sc->sc_ioh,
1455 NULL, &sc->sc_ios) == 0) {
1456 sc->sc_flags |= WM_F_IOH_VALID;
1457 } else {
1458 aprint_error_dev(sc->sc_dev,
1459 "WARNING: unable to map I/O space\n");
1460 }
1461 }
1462
1463 }
1464
1465 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1466 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1467 preg |= PCI_COMMAND_MASTER_ENABLE;
1468 if (sc->sc_type < WM_T_82542_2_1)
1469 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1470 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1471
1472 /* power up chip */
1473 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1474 NULL)) && error != EOPNOTSUPP) {
1475 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1476 return;
1477 }
1478
1479 /*
1480 * Map and establish our interrupt.
1481 */
1482 if (pci_intr_map(pa, &ih)) {
1483 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1484 return;
1485 }
1486 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1487 #ifdef WM_MPSAFE
1488 pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
1489 #endif
1490 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1491 if (sc->sc_ih == NULL) {
1492 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1493 if (intrstr != NULL)
1494 aprint_error(" at %s", intrstr);
1495 aprint_error("\n");
1496 return;
1497 }
1498 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1499
1500 /*
1501 * Check the function ID (unit number of the chip).
1502 */
1503 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1504 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1505 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1506 || (sc->sc_type == WM_T_82580)
1507 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1508 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1509 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1510 else
1511 sc->sc_funcid = 0;
1512
1513 /*
1514 * Determine a few things about the bus we're connected to.
1515 */
1516 if (sc->sc_type < WM_T_82543) {
1517 /* We don't really know the bus characteristics here. */
1518 sc->sc_bus_speed = 33;
1519 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1520 /*
1521 * CSA (Communication Streaming Architecture) is about as fast
1522 * a 32-bit 66MHz PCI Bus.
1523 */
1524 sc->sc_flags |= WM_F_CSA;
1525 sc->sc_bus_speed = 66;
1526 aprint_verbose_dev(sc->sc_dev,
1527 "Communication Streaming Architecture\n");
1528 if (sc->sc_type == WM_T_82547) {
1529 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1530 callout_setfunc(&sc->sc_txfifo_ch,
1531 wm_82547_txfifo_stall, sc);
1532 aprint_verbose_dev(sc->sc_dev,
1533 "using 82547 Tx FIFO stall work-around\n");
1534 }
1535 } else if (sc->sc_type >= WM_T_82571) {
1536 sc->sc_flags |= WM_F_PCIE;
1537 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1538 && (sc->sc_type != WM_T_ICH10)
1539 && (sc->sc_type != WM_T_PCH)
1540 && (sc->sc_type != WM_T_PCH2)
1541 && (sc->sc_type != WM_T_PCH_LPT)) {
1542 /* ICH* and PCH* have no PCIe capability registers */
1543 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1544 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1545 NULL) == 0)
1546 aprint_error_dev(sc->sc_dev,
1547 "unable to find PCIe capability\n");
1548 }
1549 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1550 } else {
1551 reg = CSR_READ(sc, WMREG_STATUS);
1552 if (reg & STATUS_BUS64)
1553 sc->sc_flags |= WM_F_BUS64;
1554 if ((reg & STATUS_PCIX_MODE) != 0) {
1555 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1556
1557 sc->sc_flags |= WM_F_PCIX;
1558 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1559 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1560 aprint_error_dev(sc->sc_dev,
1561 "unable to find PCIX capability\n");
1562 else if (sc->sc_type != WM_T_82545_3 &&
1563 sc->sc_type != WM_T_82546_3) {
1564 /*
1565 * Work around a problem caused by the BIOS
1566 * setting the max memory read byte count
1567 * incorrectly.
1568 */
1569 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1570 sc->sc_pcixe_capoff + PCIX_CMD);
1571 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1572 sc->sc_pcixe_capoff + PCIX_STATUS);
1573
1574 bytecnt =
1575 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1576 PCIX_CMD_BYTECNT_SHIFT;
1577 maxb =
1578 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1579 PCIX_STATUS_MAXB_SHIFT;
1580 if (bytecnt > maxb) {
1581 aprint_verbose_dev(sc->sc_dev,
1582 "resetting PCI-X MMRBC: %d -> %d\n",
1583 512 << bytecnt, 512 << maxb);
1584 pcix_cmd = (pcix_cmd &
1585 ~PCIX_CMD_BYTECNT_MASK) |
1586 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1587 pci_conf_write(pa->pa_pc, pa->pa_tag,
1588 sc->sc_pcixe_capoff + PCIX_CMD,
1589 pcix_cmd);
1590 }
1591 }
1592 }
1593 /*
1594 * The quad port adapter is special; it has a PCIX-PCIX
1595 * bridge on the board, and can run the secondary bus at
1596 * a higher speed.
1597 */
1598 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1599 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1600 : 66;
1601 } else if (sc->sc_flags & WM_F_PCIX) {
1602 switch (reg & STATUS_PCIXSPD_MASK) {
1603 case STATUS_PCIXSPD_50_66:
1604 sc->sc_bus_speed = 66;
1605 break;
1606 case STATUS_PCIXSPD_66_100:
1607 sc->sc_bus_speed = 100;
1608 break;
1609 case STATUS_PCIXSPD_100_133:
1610 sc->sc_bus_speed = 133;
1611 break;
1612 default:
1613 aprint_error_dev(sc->sc_dev,
1614 "unknown PCIXSPD %d; assuming 66MHz\n",
1615 reg & STATUS_PCIXSPD_MASK);
1616 sc->sc_bus_speed = 66;
1617 break;
1618 }
1619 } else
1620 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1621 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1622 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1623 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1624 }
1625
1626 /*
1627 * Allocate the control data structures, and create and load the
1628 * DMA map for it.
1629 *
1630 * NOTE: All Tx descriptors must be in the same 4G segment of
1631 * memory. So must Rx descriptors. We simplify by allocating
1632 * both sets within the same 4G segment.
1633 */
1634 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1635 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1636 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1637 sizeof(struct wm_control_data_82542) :
1638 sizeof(struct wm_control_data_82544);
1639 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1640 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1641 &sc->sc_cd_rseg, 0)) != 0) {
1642 aprint_error_dev(sc->sc_dev,
1643 "unable to allocate control data, error = %d\n",
1644 error);
1645 goto fail_0;
1646 }
1647
1648 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1649 sc->sc_cd_rseg, sc->sc_cd_size,
1650 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1651 aprint_error_dev(sc->sc_dev,
1652 "unable to map control data, error = %d\n", error);
1653 goto fail_1;
1654 }
1655
1656 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1657 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1658 aprint_error_dev(sc->sc_dev,
1659 "unable to create control data DMA map, error = %d\n",
1660 error);
1661 goto fail_2;
1662 }
1663
1664 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1665 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1666 aprint_error_dev(sc->sc_dev,
1667 "unable to load control data DMA map, error = %d\n",
1668 error);
1669 goto fail_3;
1670 }
1671
1672 /* Create the transmit buffer DMA maps. */
1673 WM_TXQUEUELEN(sc) =
1674 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1675 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1676 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1677 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1678 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1679 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1680 aprint_error_dev(sc->sc_dev,
1681 "unable to create Tx DMA map %d, error = %d\n",
1682 i, error);
1683 goto fail_4;
1684 }
1685 }
1686
1687 /* Create the receive buffer DMA maps. */
1688 for (i = 0; i < WM_NRXDESC; i++) {
1689 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1690 MCLBYTES, 0, 0,
1691 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1692 aprint_error_dev(sc->sc_dev,
1693 "unable to create Rx DMA map %d error = %d\n",
1694 i, error);
1695 goto fail_5;
1696 }
1697 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1698 }
1699
1700 /* clear interesting stat counters */
1701 CSR_READ(sc, WMREG_COLC);
1702 CSR_READ(sc, WMREG_RXERRC);
1703
1704 /* get PHY control from SMBus to PCIe */
1705 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1706 || (sc->sc_type == WM_T_PCH_LPT))
1707 wm_smbustopci(sc);
1708
1709 /* Reset the chip to a known state. */
1710 wm_reset(sc);
1711
1712 /* Get some information about the EEPROM. */
1713 switch (sc->sc_type) {
1714 case WM_T_82542_2_0:
1715 case WM_T_82542_2_1:
1716 case WM_T_82543:
1717 case WM_T_82544:
1718 /* Microwire */
1719 sc->sc_nvm_wordsize = 64;
1720 sc->sc_nvm_addrbits = 6;
1721 break;
1722 case WM_T_82540:
1723 case WM_T_82545:
1724 case WM_T_82545_3:
1725 case WM_T_82546:
1726 case WM_T_82546_3:
1727 /* Microwire */
1728 reg = CSR_READ(sc, WMREG_EECD);
1729 if (reg & EECD_EE_SIZE) {
1730 sc->sc_nvm_wordsize = 256;
1731 sc->sc_nvm_addrbits = 8;
1732 } else {
1733 sc->sc_nvm_wordsize = 64;
1734 sc->sc_nvm_addrbits = 6;
1735 }
1736 sc->sc_flags |= WM_F_LOCK_EECD;
1737 break;
1738 case WM_T_82541:
1739 case WM_T_82541_2:
1740 case WM_T_82547:
1741 case WM_T_82547_2:
1742 reg = CSR_READ(sc, WMREG_EECD);
1743 if (reg & EECD_EE_TYPE) {
1744 /* SPI */
1745 sc->sc_flags |= WM_F_EEPROM_SPI;
1746 wm_nvm_set_addrbits_size_eecd(sc);
1747 } else {
1748 /* Microwire */
1749 if ((reg & EECD_EE_ABITS) != 0) {
1750 sc->sc_nvm_wordsize = 256;
1751 sc->sc_nvm_addrbits = 8;
1752 } else {
1753 sc->sc_nvm_wordsize = 64;
1754 sc->sc_nvm_addrbits = 6;
1755 }
1756 }
1757 sc->sc_flags |= WM_F_LOCK_EECD;
1758 break;
1759 case WM_T_82571:
1760 case WM_T_82572:
1761 /* SPI */
1762 sc->sc_flags |= WM_F_EEPROM_SPI;
1763 wm_nvm_set_addrbits_size_eecd(sc);
1764 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1765 break;
1766 case WM_T_82573:
1767 sc->sc_flags |= WM_F_LOCK_SWSM;
1768 /* FALLTHROUGH */
1769 case WM_T_82574:
1770 case WM_T_82583:
1771 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1772 sc->sc_flags |= WM_F_EEPROM_FLASH;
1773 sc->sc_nvm_wordsize = 2048;
1774 } else {
1775 /* SPI */
1776 sc->sc_flags |= WM_F_EEPROM_SPI;
1777 wm_nvm_set_addrbits_size_eecd(sc);
1778 }
1779 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1780 break;
1781 case WM_T_82575:
1782 case WM_T_82576:
1783 case WM_T_82580:
1784 case WM_T_I350:
1785 case WM_T_I354:
1786 case WM_T_80003:
1787 /* SPI */
1788 sc->sc_flags |= WM_F_EEPROM_SPI;
1789 wm_nvm_set_addrbits_size_eecd(sc);
1790 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1791 | WM_F_LOCK_SWSM;
1792 break;
1793 case WM_T_ICH8:
1794 case WM_T_ICH9:
1795 case WM_T_ICH10:
1796 case WM_T_PCH:
1797 case WM_T_PCH2:
1798 case WM_T_PCH_LPT:
1799 /* FLASH */
1800 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1801 sc->sc_nvm_wordsize = 2048;
1802 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1803 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1804 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1805 aprint_error_dev(sc->sc_dev,
1806 "can't map FLASH registers\n");
1807 goto fail_5;
1808 }
1809 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1810 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1811 ICH_FLASH_SECTOR_SIZE;
1812 sc->sc_ich8_flash_bank_size =
1813 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1814 sc->sc_ich8_flash_bank_size -=
1815 (reg & ICH_GFPREG_BASE_MASK);
1816 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1817 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1818 break;
1819 case WM_T_I210:
1820 case WM_T_I211:
1821 wm_nvm_set_addrbits_size_eecd(sc);
1822 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1823 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1824 break;
1825 default:
1826 break;
1827 }
1828
1829 /* Ensure the SMBI bit is clear before first NVM or PHY access */
1830 switch (sc->sc_type) {
1831 case WM_T_82571:
1832 case WM_T_82572:
1833 reg = CSR_READ(sc, WMREG_SWSM2);
1834 if ((reg & SWSM2_LOCK) != 0) {
1835 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
1836 force_clear_smbi = true;
1837 } else
1838 force_clear_smbi = false;
1839 break;
1840 case WM_T_82573:
1841 case WM_T_82574:
1842 case WM_T_82583:
1843 force_clear_smbi = true;
1844 break;
1845 default:
1846 force_clear_smbi = false;
1847 break;
1848 }
1849 if (force_clear_smbi) {
1850 reg = CSR_READ(sc, WMREG_SWSM);
1851 if ((reg & SWSM_SMBI) != 0)
1852 aprint_error_dev(sc->sc_dev,
1853 "Please update the Bootagent\n");
1854 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
1855 }
1856
1857 /*
1858 * Defer printing the EEPROM type until after verifying the checksum
1859 * This allows the EEPROM type to be printed correctly in the case
1860 * that no EEPROM is attached.
1861 */
1862 /*
1863 * Validate the EEPROM checksum. If the checksum fails, flag
1864 * this for later, so we can fail future reads from the EEPROM.
1865 */
1866 if (wm_nvm_validate_checksum(sc)) {
1867 /*
1868 * Read twice again because some PCI-e parts fail the
1869 * first check due to the link being in sleep state.
1870 */
1871 if (wm_nvm_validate_checksum(sc))
1872 sc->sc_flags |= WM_F_EEPROM_INVALID;
1873 }
1874
1875 /* Set device properties (macflags) */
1876 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1877
1878 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1879 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1880 else {
1881 aprint_verbose_dev(sc->sc_dev, "%u words ",
1882 sc->sc_nvm_wordsize);
1883 if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
1884 aprint_verbose("FLASH(HW)\n");
1885 } else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1886 aprint_verbose("FLASH\n");
1887 } else {
1888 if (sc->sc_flags & WM_F_EEPROM_SPI)
1889 eetype = "SPI";
1890 else
1891 eetype = "MicroWire";
1892 aprint_verbose("(%d address bits) %s EEPROM\n",
1893 sc->sc_nvm_addrbits, eetype);
1894 }
1895 }
1896
1897 switch (sc->sc_type) {
1898 case WM_T_82571:
1899 case WM_T_82572:
1900 case WM_T_82573:
1901 case WM_T_82574:
1902 case WM_T_82583:
1903 case WM_T_80003:
1904 case WM_T_ICH8:
1905 case WM_T_ICH9:
1906 case WM_T_ICH10:
1907 case WM_T_PCH:
1908 case WM_T_PCH2:
1909 case WM_T_PCH_LPT:
1910 if (wm_check_mng_mode(sc) != 0)
1911 wm_get_hw_control(sc);
1912 break;
1913 default:
1914 break;
1915 }
1916 wm_get_wakeup(sc);
1917 /*
1918 * Read the Ethernet address from the EEPROM, if not first found
1919 * in device properties.
1920 */
1921 ea = prop_dictionary_get(dict, "mac-address");
1922 if (ea != NULL) {
1923 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1924 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1925 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1926 } else {
1927 if (wm_read_mac_addr(sc, enaddr) != 0) {
1928 aprint_error_dev(sc->sc_dev,
1929 "unable to read Ethernet address\n");
1930 goto fail_5;
1931 }
1932 }
1933
1934 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1935 ether_sprintf(enaddr));
1936
1937 /*
1938 * Read the config info from the EEPROM, and set up various
1939 * bits in the control registers based on their contents.
1940 */
1941 pn = prop_dictionary_get(dict, "i82543-cfg1");
1942 if (pn != NULL) {
1943 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1944 cfg1 = (uint16_t) prop_number_integer_value(pn);
1945 } else {
1946 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
1947 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1948 goto fail_5;
1949 }
1950 }
1951
1952 pn = prop_dictionary_get(dict, "i82543-cfg2");
1953 if (pn != NULL) {
1954 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1955 cfg2 = (uint16_t) prop_number_integer_value(pn);
1956 } else {
1957 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
1958 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1959 goto fail_5;
1960 }
1961 }
1962
1963 /* check for WM_F_WOL */
1964 switch (sc->sc_type) {
1965 case WM_T_82542_2_0:
1966 case WM_T_82542_2_1:
1967 case WM_T_82543:
1968 /* dummy? */
1969 eeprom_data = 0;
1970 apme_mask = NVM_CFG3_APME;
1971 break;
1972 case WM_T_82544:
1973 apme_mask = NVM_CFG2_82544_APM_EN;
1974 eeprom_data = cfg2;
1975 break;
1976 case WM_T_82546:
1977 case WM_T_82546_3:
1978 case WM_T_82571:
1979 case WM_T_82572:
1980 case WM_T_82573:
1981 case WM_T_82574:
1982 case WM_T_82583:
1983 case WM_T_80003:
1984 default:
1985 apme_mask = NVM_CFG3_APME;
1986 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
1987 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
1988 break;
1989 case WM_T_82575:
1990 case WM_T_82576:
1991 case WM_T_82580:
1992 case WM_T_I350:
1993 case WM_T_I354: /* XXX ok? */
1994 case WM_T_ICH8:
1995 case WM_T_ICH9:
1996 case WM_T_ICH10:
1997 case WM_T_PCH:
1998 case WM_T_PCH2:
1999 case WM_T_PCH_LPT:
2000 /* XXX The funcid should be checked on some devices */
2001 apme_mask = WUC_APME;
2002 eeprom_data = CSR_READ(sc, WMREG_WUC);
2003 break;
2004 }
2005
2006 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2007 if ((eeprom_data & apme_mask) != 0)
2008 sc->sc_flags |= WM_F_WOL;
2009 #ifdef WM_DEBUG
2010 if ((sc->sc_flags & WM_F_WOL) != 0)
2011 printf("WOL\n");
2012 #endif
2013
2014 /*
2015 * XXX need special handling for some multiple port cards
2016 * to disable a paticular port.
2017 */
2018
2019 if (sc->sc_type >= WM_T_82544) {
2020 pn = prop_dictionary_get(dict, "i82543-swdpin");
2021 if (pn != NULL) {
2022 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2023 swdpin = (uint16_t) prop_number_integer_value(pn);
2024 } else {
2025 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2026 aprint_error_dev(sc->sc_dev,
2027 "unable to read SWDPIN\n");
2028 goto fail_5;
2029 }
2030 }
2031 }
2032
2033 if (cfg1 & NVM_CFG1_ILOS)
2034 sc->sc_ctrl |= CTRL_ILOS;
2035 if (sc->sc_type >= WM_T_82544) {
2036 sc->sc_ctrl |=
2037 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2038 CTRL_SWDPIO_SHIFT;
2039 sc->sc_ctrl |=
2040 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2041 CTRL_SWDPINS_SHIFT;
2042 } else {
2043 sc->sc_ctrl |=
2044 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2045 CTRL_SWDPIO_SHIFT;
2046 }
2047
2048 #if 0
2049 if (sc->sc_type >= WM_T_82544) {
2050 if (cfg1 & NVM_CFG1_IPS0)
2051 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2052 if (cfg1 & NVM_CFG1_IPS1)
2053 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2054 sc->sc_ctrl_ext |=
2055 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2056 CTRL_EXT_SWDPIO_SHIFT;
2057 sc->sc_ctrl_ext |=
2058 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2059 CTRL_EXT_SWDPINS_SHIFT;
2060 } else {
2061 sc->sc_ctrl_ext |=
2062 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2063 CTRL_EXT_SWDPIO_SHIFT;
2064 }
2065 #endif
2066
2067 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2068 #if 0
2069 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2070 #endif
2071
2072 /*
2073 * Set up some register offsets that are different between
2074 * the i82542 and the i82543 and later chips.
2075 */
2076 if (sc->sc_type < WM_T_82543) {
2077 sc->sc_rdt_reg = WMREG_OLD_RDT0;
2078 sc->sc_tdt_reg = WMREG_OLD_TDT;
2079 } else {
2080 sc->sc_rdt_reg = WMREG_RDT;
2081 sc->sc_tdt_reg = WMREG_TDT;
2082 }
2083
2084 if (sc->sc_type == WM_T_PCH) {
2085 uint16_t val;
2086
2087 /* Save the NVM K1 bit setting */
2088 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2089
2090 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2091 sc->sc_nvm_k1_enabled = 1;
2092 else
2093 sc->sc_nvm_k1_enabled = 0;
2094 }
2095
2096 /*
2097 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2098 * media structures accordingly.
2099 */
2100 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2101 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2102 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2103 || sc->sc_type == WM_T_82573
2104 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2105 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2106 wm_gmii_mediainit(sc, wmp->wmp_product);
2107 } else if (sc->sc_type < WM_T_82543 ||
2108 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2109 if (sc->sc_mediatype & WMP_F_COPPER) {
2110 aprint_error_dev(sc->sc_dev,
2111 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2112 sc->sc_mediatype = WMP_F_FIBER;
2113 }
2114 wm_tbi_mediainit(sc);
2115 } else {
2116 switch (sc->sc_type) {
2117 case WM_T_82575:
2118 case WM_T_82576:
2119 case WM_T_82580:
2120 case WM_T_I350:
2121 case WM_T_I354:
2122 case WM_T_I210:
2123 case WM_T_I211:
2124 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2125 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2126 switch (link_mode) {
2127 case CTRL_EXT_LINK_MODE_1000KX:
2128 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2129 sc->sc_mediatype = WMP_F_SERDES;
2130 break;
2131 case CTRL_EXT_LINK_MODE_SGMII:
2132 if (wm_sgmii_uses_mdio(sc)) {
2133 aprint_verbose_dev(sc->sc_dev,
2134 "SGMII(MDIO)\n");
2135 sc->sc_flags |= WM_F_SGMII;
2136 sc->sc_mediatype = WMP_F_COPPER;
2137 break;
2138 }
2139 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2140 /*FALLTHROUGH*/
2141 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2142 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2143 if (sc->sc_mediatype == WMP_F_UNKNOWN) {
2144 if (link_mode
2145 == CTRL_EXT_LINK_MODE_SGMII) {
2146 sc->sc_mediatype
2147 = WMP_F_COPPER;
2148 sc->sc_flags |= WM_F_SGMII;
2149 } else {
2150 sc->sc_mediatype
2151 = WMP_F_SERDES;
2152 aprint_verbose_dev(sc->sc_dev,
2153 "SERDES\n");
2154 }
2155 break;
2156 }
2157 if (sc->sc_mediatype == WMP_F_SERDES)
2158 aprint_verbose_dev(sc->sc_dev,
2159 "SERDES\n");
2160
2161 /* Change current link mode setting */
2162 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2163 switch (sc->sc_mediatype) {
2164 case WMP_F_COPPER:
2165 reg |= CTRL_EXT_LINK_MODE_SGMII;
2166 break;
2167 case WMP_F_SERDES:
2168 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2169 break;
2170 default:
2171 break;
2172 }
2173 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2174 break;
2175 case CTRL_EXT_LINK_MODE_GMII:
2176 default:
2177 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2178 sc->sc_mediatype = WMP_F_COPPER;
2179 break;
2180 }
2181
2182 reg &= ~CTRL_EXT_I2C_ENA;
2183 if ((sc->sc_flags & WM_F_SGMII) != 0)
2184 reg |= CTRL_EXT_I2C_ENA;
2185 else
2186 reg &= ~CTRL_EXT_I2C_ENA;
2187 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2188
2189 if (sc->sc_mediatype == WMP_F_COPPER)
2190 wm_gmii_mediainit(sc, wmp->wmp_product);
2191 else
2192 wm_tbi_mediainit(sc);
2193 break;
2194 default:
2195 if (sc->sc_mediatype & WMP_F_FIBER)
2196 aprint_error_dev(sc->sc_dev,
2197 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2198 sc->sc_mediatype = WMP_F_COPPER;
2199 wm_gmii_mediainit(sc, wmp->wmp_product);
2200 }
2201 }
2202
2203 ifp = &sc->sc_ethercom.ec_if;
2204 xname = device_xname(sc->sc_dev);
2205 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2206 ifp->if_softc = sc;
2207 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2208 ifp->if_ioctl = wm_ioctl;
2209 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2210 ifp->if_start = wm_nq_start;
2211 else
2212 ifp->if_start = wm_start;
2213 ifp->if_watchdog = wm_watchdog;
2214 ifp->if_init = wm_init;
2215 ifp->if_stop = wm_stop;
2216 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2217 IFQ_SET_READY(&ifp->if_snd);
2218
2219 /* Check for jumbo frame */
2220 switch (sc->sc_type) {
2221 case WM_T_82573:
2222 /* XXX limited to 9234 if ASPM is disabled */
2223 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &io3);
2224 if ((io3 & NVM_3GIO_3_ASPM_MASK) != 0)
2225 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2226 break;
2227 case WM_T_82571:
2228 case WM_T_82572:
2229 case WM_T_82574:
2230 case WM_T_82575:
2231 case WM_T_82576:
2232 case WM_T_82580:
2233 case WM_T_I350:
2234 case WM_T_I354: /* XXXX ok? */
2235 case WM_T_I210:
2236 case WM_T_I211:
2237 case WM_T_80003:
2238 case WM_T_ICH9:
2239 case WM_T_ICH10:
2240 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2241 case WM_T_PCH_LPT:
2242 /* XXX limited to 9234 */
2243 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2244 break;
2245 case WM_T_PCH:
2246 /* XXX limited to 4096 */
2247 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2248 break;
2249 case WM_T_82542_2_0:
2250 case WM_T_82542_2_1:
2251 case WM_T_82583:
2252 case WM_T_ICH8:
2253 /* No support for jumbo frame */
2254 break;
2255 default:
2256 /* ETHER_MAX_LEN_JUMBO */
2257 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2258 break;
2259 }
2260
2261 /* If we're a i82543 or greater, we can support VLANs. */
2262 if (sc->sc_type >= WM_T_82543)
2263 sc->sc_ethercom.ec_capabilities |=
2264 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2265
2266 /*
2267 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2268 * on i82543 and later.
2269 */
2270 if (sc->sc_type >= WM_T_82543) {
2271 ifp->if_capabilities |=
2272 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2273 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2274 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2275 IFCAP_CSUM_TCPv6_Tx |
2276 IFCAP_CSUM_UDPv6_Tx;
2277 }
2278
2279 /*
2280 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2281 *
2282 * 82541GI (8086:1076) ... no
2283 * 82572EI (8086:10b9) ... yes
2284 */
2285 if (sc->sc_type >= WM_T_82571) {
2286 ifp->if_capabilities |=
2287 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2288 }
2289
2290 /*
2291 * If we're a i82544 or greater (except i82547), we can do
2292 * TCP segmentation offload.
2293 */
2294 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2295 ifp->if_capabilities |= IFCAP_TSOv4;
2296 }
2297
2298 if (sc->sc_type >= WM_T_82571) {
2299 ifp->if_capabilities |= IFCAP_TSOv6;
2300 }
2301
2302 #ifdef WM_MPSAFE
2303 sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2304 sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2305 #else
2306 sc->sc_tx_lock = NULL;
2307 sc->sc_rx_lock = NULL;
2308 #endif
2309
2310 /* Attach the interface. */
2311 if_attach(ifp);
2312 ether_ifattach(ifp, enaddr);
2313 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2314 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2315 RND_FLAG_DEFAULT);
2316
2317 #ifdef WM_EVENT_COUNTERS
2318 /* Attach event counters. */
2319 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2320 NULL, xname, "txsstall");
2321 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2322 NULL, xname, "txdstall");
2323 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2324 NULL, xname, "txfifo_stall");
2325 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2326 NULL, xname, "txdw");
2327 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2328 NULL, xname, "txqe");
2329 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2330 NULL, xname, "rxintr");
2331 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2332 NULL, xname, "linkintr");
2333
2334 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2335 NULL, xname, "rxipsum");
2336 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2337 NULL, xname, "rxtusum");
2338 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2339 NULL, xname, "txipsum");
2340 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2341 NULL, xname, "txtusum");
2342 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2343 NULL, xname, "txtusum6");
2344
2345 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2346 NULL, xname, "txtso");
2347 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2348 NULL, xname, "txtso6");
2349 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2350 NULL, xname, "txtsopain");
2351
2352 for (i = 0; i < WM_NTXSEGS; i++) {
2353 snprintf(wm_txseg_evcnt_names[i],
2354 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2355 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2356 NULL, xname, wm_txseg_evcnt_names[i]);
2357 }
2358
2359 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2360 NULL, xname, "txdrop");
2361
2362 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2363 NULL, xname, "tu");
2364
2365 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2366 NULL, xname, "tx_xoff");
2367 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2368 NULL, xname, "tx_xon");
2369 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2370 NULL, xname, "rx_xoff");
2371 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2372 NULL, xname, "rx_xon");
2373 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2374 NULL, xname, "rx_macctl");
2375 #endif /* WM_EVENT_COUNTERS */
2376
2377 if (pmf_device_register(self, wm_suspend, wm_resume))
2378 pmf_class_network_register(self, ifp);
2379 else
2380 aprint_error_dev(self, "couldn't establish power handler\n");
2381
2382 sc->sc_flags |= WM_F_ATTACHED;
2383 return;
2384
2385 /*
2386 * Free any resources we've allocated during the failed attach
2387 * attempt. Do this in reverse order and fall through.
2388 */
2389 fail_5:
2390 for (i = 0; i < WM_NRXDESC; i++) {
2391 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2392 bus_dmamap_destroy(sc->sc_dmat,
2393 sc->sc_rxsoft[i].rxs_dmamap);
2394 }
2395 fail_4:
2396 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2397 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2398 bus_dmamap_destroy(sc->sc_dmat,
2399 sc->sc_txsoft[i].txs_dmamap);
2400 }
2401 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2402 fail_3:
2403 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2404 fail_2:
2405 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2406 sc->sc_cd_size);
2407 fail_1:
2408 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2409 fail_0:
2410 return;
2411 }
2412
2413 /* The detach function (ca_detach) */
2414 static int
2415 wm_detach(device_t self, int flags __unused)
2416 {
2417 struct wm_softc *sc = device_private(self);
2418 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2419 int i;
2420 #ifndef WM_MPSAFE
2421 int s;
2422 #endif
2423
2424 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2425 return 0;
2426
2427 #ifndef WM_MPSAFE
2428 s = splnet();
2429 #endif
2430 /* Stop the interface. Callouts are stopped in it. */
2431 wm_stop(ifp, 1);
2432
2433 #ifndef WM_MPSAFE
2434 splx(s);
2435 #endif
2436
2437 pmf_device_deregister(self);
2438
2439 /* Tell the firmware about the release */
2440 WM_BOTH_LOCK(sc);
2441 wm_release_manageability(sc);
2442 wm_release_hw_control(sc);
2443 WM_BOTH_UNLOCK(sc);
2444
2445 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2446
2447 /* Delete all remaining media. */
2448 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2449
2450 ether_ifdetach(ifp);
2451 if_detach(ifp);
2452
2453
2454 /* Unload RX dmamaps and free mbufs */
2455 WM_RX_LOCK(sc);
2456 wm_rxdrain(sc);
2457 WM_RX_UNLOCK(sc);
2458 /* Must unlock here */
2459
2460 /* Free dmamap. It's the same as the end of the wm_attach() function */
2461 for (i = 0; i < WM_NRXDESC; i++) {
2462 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2463 bus_dmamap_destroy(sc->sc_dmat,
2464 sc->sc_rxsoft[i].rxs_dmamap);
2465 }
2466 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2467 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2468 bus_dmamap_destroy(sc->sc_dmat,
2469 sc->sc_txsoft[i].txs_dmamap);
2470 }
2471 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2472 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2473 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2474 sc->sc_cd_size);
2475 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2476
2477 /* Disestablish the interrupt handler */
2478 if (sc->sc_ih != NULL) {
2479 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2480 sc->sc_ih = NULL;
2481 }
2482
2483 /* Unmap the registers */
2484 if (sc->sc_ss) {
2485 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2486 sc->sc_ss = 0;
2487 }
2488
2489 if (sc->sc_ios) {
2490 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2491 sc->sc_ios = 0;
2492 }
2493
2494 if (sc->sc_tx_lock)
2495 mutex_obj_free(sc->sc_tx_lock);
2496 if (sc->sc_rx_lock)
2497 mutex_obj_free(sc->sc_rx_lock);
2498
2499 return 0;
2500 }
2501
2502 static bool
2503 wm_suspend(device_t self, const pmf_qual_t *qual)
2504 {
2505 struct wm_softc *sc = device_private(self);
2506
2507 wm_release_manageability(sc);
2508 wm_release_hw_control(sc);
2509 #ifdef WM_WOL
2510 wm_enable_wakeup(sc);
2511 #endif
2512
2513 return true;
2514 }
2515
2516 static bool
2517 wm_resume(device_t self, const pmf_qual_t *qual)
2518 {
2519 struct wm_softc *sc = device_private(self);
2520
2521 wm_init_manageability(sc);
2522
2523 return true;
2524 }
2525
2526 /*
2527 * wm_watchdog: [ifnet interface function]
2528 *
2529 * Watchdog timer handler.
2530 */
2531 static void
2532 wm_watchdog(struct ifnet *ifp)
2533 {
2534 struct wm_softc *sc = ifp->if_softc;
2535
2536 /*
2537 * Since we're using delayed interrupts, sweep up
2538 * before we report an error.
2539 */
2540 WM_TX_LOCK(sc);
2541 wm_txintr(sc);
2542 WM_TX_UNLOCK(sc);
2543
2544 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2545 #ifdef WM_DEBUG
2546 int i, j;
2547 struct wm_txsoft *txs;
2548 #endif
2549 log(LOG_ERR,
2550 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2551 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2552 sc->sc_txnext);
2553 ifp->if_oerrors++;
2554 #ifdef WM_DEBUG
2555 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
2556 i = WM_NEXTTXS(sc, i)) {
2557 txs = &sc->sc_txsoft[i];
2558 printf("txs %d tx %d -> %d\n",
2559 i, txs->txs_firstdesc, txs->txs_lastdesc);
2560 for (j = txs->txs_firstdesc; ;
2561 j = WM_NEXTTX(sc, j)) {
2562 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2563 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
2564 printf("\t %#08x%08x\n",
2565 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
2566 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
2567 if (j == txs->txs_lastdesc)
2568 break;
2569 }
2570 }
2571 #endif
2572 /* Reset the interface. */
2573 (void) wm_init(ifp);
2574 }
2575
2576 /* Try to get more packets going. */
2577 ifp->if_start(ifp);
2578 }
2579
2580 /*
2581 * wm_tick:
2582 *
2583 * One second timer, used to check link status, sweep up
2584 * completed transmit jobs, etc.
2585 */
2586 static void
2587 wm_tick(void *arg)
2588 {
2589 struct wm_softc *sc = arg;
2590 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2591 #ifndef WM_MPSAFE
2592 int s;
2593
2594 s = splnet();
2595 #endif
2596
2597 WM_TX_LOCK(sc);
2598
2599 if (sc->sc_stopping)
2600 goto out;
2601
2602 if (sc->sc_type >= WM_T_82542_2_1) {
2603 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2604 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2605 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2606 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2607 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2608 }
2609
2610 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2611 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2612 + CSR_READ(sc, WMREG_CRCERRS)
2613 + CSR_READ(sc, WMREG_ALGNERRC)
2614 + CSR_READ(sc, WMREG_SYMERRC)
2615 + CSR_READ(sc, WMREG_RXERRC)
2616 + CSR_READ(sc, WMREG_SEC)
2617 + CSR_READ(sc, WMREG_CEXTERR)
2618 + CSR_READ(sc, WMREG_RLEC);
2619 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2620
2621 if (sc->sc_flags & WM_F_HAS_MII)
2622 mii_tick(&sc->sc_mii);
2623 else
2624 wm_tbi_check_link(sc);
2625
2626 out:
2627 WM_TX_UNLOCK(sc);
2628 #ifndef WM_MPSAFE
2629 splx(s);
2630 #endif
2631
2632 if (!sc->sc_stopping)
2633 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2634 }
2635
2636 static int
2637 wm_ifflags_cb(struct ethercom *ec)
2638 {
2639 struct ifnet *ifp = &ec->ec_if;
2640 struct wm_softc *sc = ifp->if_softc;
2641 int change = ifp->if_flags ^ sc->sc_if_flags;
2642 int rc = 0;
2643
2644 WM_BOTH_LOCK(sc);
2645
2646 if (change != 0)
2647 sc->sc_if_flags = ifp->if_flags;
2648
2649 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
2650 rc = ENETRESET;
2651 goto out;
2652 }
2653
2654 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2655 wm_set_filter(sc);
2656
2657 wm_set_vlan(sc);
2658
2659 out:
2660 WM_BOTH_UNLOCK(sc);
2661
2662 return rc;
2663 }
2664
2665 /*
2666 * wm_ioctl: [ifnet interface function]
2667 *
2668 * Handle control requests from the operator.
2669 */
2670 static int
2671 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2672 {
2673 struct wm_softc *sc = ifp->if_softc;
2674 struct ifreq *ifr = (struct ifreq *) data;
2675 struct ifaddr *ifa = (struct ifaddr *)data;
2676 struct sockaddr_dl *sdl;
2677 int s, error;
2678
2679 #ifndef WM_MPSAFE
2680 s = splnet();
2681 #endif
2682 switch (cmd) {
2683 case SIOCSIFMEDIA:
2684 case SIOCGIFMEDIA:
2685 WM_BOTH_LOCK(sc);
2686 /* Flow control requires full-duplex mode. */
2687 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2688 (ifr->ifr_media & IFM_FDX) == 0)
2689 ifr->ifr_media &= ~IFM_ETH_FMASK;
2690 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2691 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2692 /* We can do both TXPAUSE and RXPAUSE. */
2693 ifr->ifr_media |=
2694 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2695 }
2696 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2697 }
2698 WM_BOTH_UNLOCK(sc);
2699 #ifdef WM_MPSAFE
2700 s = splnet();
2701 #endif
2702 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2703 #ifdef WM_MPSAFE
2704 splx(s);
2705 #endif
2706 break;
2707 case SIOCINITIFADDR:
2708 WM_BOTH_LOCK(sc);
2709 if (ifa->ifa_addr->sa_family == AF_LINK) {
2710 sdl = satosdl(ifp->if_dl->ifa_addr);
2711 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2712 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2713 /* unicast address is first multicast entry */
2714 wm_set_filter(sc);
2715 error = 0;
2716 WM_BOTH_UNLOCK(sc);
2717 break;
2718 }
2719 WM_BOTH_UNLOCK(sc);
2720 /*FALLTHROUGH*/
2721 default:
2722 #ifdef WM_MPSAFE
2723 s = splnet();
2724 #endif
2725 /* It may call wm_start, so unlock here */
2726 error = ether_ioctl(ifp, cmd, data);
2727 #ifdef WM_MPSAFE
2728 splx(s);
2729 #endif
2730 if (error != ENETRESET)
2731 break;
2732
2733 error = 0;
2734
2735 if (cmd == SIOCSIFCAP) {
2736 error = (*ifp->if_init)(ifp);
2737 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2738 ;
2739 else if (ifp->if_flags & IFF_RUNNING) {
2740 /*
2741 * Multicast list has changed; set the hardware filter
2742 * accordingly.
2743 */
2744 WM_BOTH_LOCK(sc);
2745 wm_set_filter(sc);
2746 WM_BOTH_UNLOCK(sc);
2747 }
2748 break;
2749 }
2750
2751 /* Try to get more packets going. */
2752 ifp->if_start(ifp);
2753
2754 #ifndef WM_MPSAFE
2755 splx(s);
2756 #endif
2757 return error;
2758 }
2759
2760 /* MAC address related */
2761
2762 static int
2763 wm_check_alt_mac_addr(struct wm_softc *sc)
2764 {
2765 uint16_t myea[ETHER_ADDR_LEN / 2];
2766 uint16_t offset = NVM_OFF_MACADDR;
2767
2768 /* Try to read alternative MAC address pointer */
2769 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2770 return -1;
2771
2772 /* Check pointer */
2773 if (offset == 0xffff)
2774 return -1;
2775
2776 /*
2777 * Check whether alternative MAC address is valid or not.
2778 * Some cards have non 0xffff pointer but those don't use
2779 * alternative MAC address in reality.
2780 *
2781 * Check whether the broadcast bit is set or not.
2782 */
2783 if (wm_nvm_read(sc, offset, 1, myea) == 0)
2784 if (((myea[0] & 0xff) & 0x01) == 0)
2785 return 0; /* found! */
2786
2787 /* not found */
2788 return -1;
2789 }
2790
2791 static int
2792 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2793 {
2794 uint16_t myea[ETHER_ADDR_LEN / 2];
2795 uint16_t offset = NVM_OFF_MACADDR;
2796 int do_invert = 0;
2797
2798 switch (sc->sc_type) {
2799 case WM_T_82580:
2800 case WM_T_I350:
2801 case WM_T_I354:
2802 switch (sc->sc_funcid) {
2803 case 0:
2804 /* default value (== NVM_OFF_MACADDR) */
2805 break;
2806 case 1:
2807 offset = NVM_OFF_LAN1;
2808 break;
2809 case 2:
2810 offset = NVM_OFF_LAN2;
2811 break;
2812 case 3:
2813 offset = NVM_OFF_LAN3;
2814 break;
2815 default:
2816 goto bad;
2817 /* NOTREACHED */
2818 break;
2819 }
2820 break;
2821 case WM_T_82571:
2822 case WM_T_82575:
2823 case WM_T_82576:
2824 case WM_T_80003:
2825 case WM_T_I210:
2826 case WM_T_I211:
2827 if (wm_check_alt_mac_addr(sc) != 0) {
2828 /* reset the offset to LAN0 */
2829 offset = NVM_OFF_MACADDR;
2830 if ((sc->sc_funcid & 0x01) == 1)
2831 do_invert = 1;
2832 goto do_read;
2833 }
2834 switch (sc->sc_funcid) {
2835 case 0:
2836 /*
2837 * The offset is the value in NVM_OFF_ALT_MAC_ADDR_PTR
2838 * itself.
2839 */
2840 break;
2841 case 1:
2842 offset += NVM_OFF_MACADDR_LAN1;
2843 break;
2844 case 2:
2845 offset += NVM_OFF_MACADDR_LAN2;
2846 break;
2847 case 3:
2848 offset += NVM_OFF_MACADDR_LAN3;
2849 break;
2850 default:
2851 goto bad;
2852 /* NOTREACHED */
2853 break;
2854 }
2855 break;
2856 default:
2857 if ((sc->sc_funcid & 0x01) == 1)
2858 do_invert = 1;
2859 break;
2860 }
2861
2862 do_read:
2863 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
2864 myea) != 0) {
2865 goto bad;
2866 }
2867
2868 enaddr[0] = myea[0] & 0xff;
2869 enaddr[1] = myea[0] >> 8;
2870 enaddr[2] = myea[1] & 0xff;
2871 enaddr[3] = myea[1] >> 8;
2872 enaddr[4] = myea[2] & 0xff;
2873 enaddr[5] = myea[2] >> 8;
2874
2875 /*
2876 * Toggle the LSB of the MAC address on the second port
2877 * of some dual port cards.
2878 */
2879 if (do_invert != 0)
2880 enaddr[5] ^= 1;
2881
2882 return 0;
2883
2884 bad:
2885 return -1;
2886 }
2887
2888 /*
2889 * wm_set_ral:
2890 *
2891 * Set an entery in the receive address list.
2892 */
2893 static void
2894 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2895 {
2896 uint32_t ral_lo, ral_hi;
2897
2898 if (enaddr != NULL) {
2899 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2900 (enaddr[3] << 24);
2901 ral_hi = enaddr[4] | (enaddr[5] << 8);
2902 ral_hi |= RAL_AV;
2903 } else {
2904 ral_lo = 0;
2905 ral_hi = 0;
2906 }
2907
2908 if (sc->sc_type >= WM_T_82544) {
2909 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2910 ral_lo);
2911 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2912 ral_hi);
2913 } else {
2914 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2915 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2916 }
2917 }
2918
2919 /*
2920 * wm_mchash:
2921 *
2922 * Compute the hash of the multicast address for the 4096-bit
2923 * multicast filter.
2924 */
2925 static uint32_t
2926 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2927 {
2928 static const int lo_shift[4] = { 4, 3, 2, 0 };
2929 static const int hi_shift[4] = { 4, 5, 6, 8 };
2930 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
2931 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
2932 uint32_t hash;
2933
2934 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2935 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2936 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
2937 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
2938 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
2939 return (hash & 0x3ff);
2940 }
2941 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2942 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2943
2944 return (hash & 0xfff);
2945 }
2946
2947 /*
2948 * wm_set_filter:
2949 *
2950 * Set up the receive filter.
2951 */
2952 static void
2953 wm_set_filter(struct wm_softc *sc)
2954 {
2955 struct ethercom *ec = &sc->sc_ethercom;
2956 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2957 struct ether_multi *enm;
2958 struct ether_multistep step;
2959 bus_addr_t mta_reg;
2960 uint32_t hash, reg, bit;
2961 int i, size;
2962
2963 if (sc->sc_type >= WM_T_82544)
2964 mta_reg = WMREG_CORDOVA_MTA;
2965 else
2966 mta_reg = WMREG_MTA;
2967
2968 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2969
2970 if (ifp->if_flags & IFF_BROADCAST)
2971 sc->sc_rctl |= RCTL_BAM;
2972 if (ifp->if_flags & IFF_PROMISC) {
2973 sc->sc_rctl |= RCTL_UPE;
2974 goto allmulti;
2975 }
2976
2977 /*
2978 * Set the station address in the first RAL slot, and
2979 * clear the remaining slots.
2980 */
2981 if (sc->sc_type == WM_T_ICH8)
2982 size = WM_RAL_TABSIZE_ICH8 -1;
2983 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
2984 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
2985 || (sc->sc_type == WM_T_PCH_LPT))
2986 size = WM_RAL_TABSIZE_ICH8;
2987 else if (sc->sc_type == WM_T_82575)
2988 size = WM_RAL_TABSIZE_82575;
2989 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
2990 size = WM_RAL_TABSIZE_82576;
2991 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2992 size = WM_RAL_TABSIZE_I350;
2993 else
2994 size = WM_RAL_TABSIZE;
2995 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
2996 for (i = 1; i < size; i++)
2997 wm_set_ral(sc, NULL, i);
2998
2999 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3000 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3001 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
3002 size = WM_ICH8_MC_TABSIZE;
3003 else
3004 size = WM_MC_TABSIZE;
3005 /* Clear out the multicast table. */
3006 for (i = 0; i < size; i++)
3007 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3008
3009 ETHER_FIRST_MULTI(step, ec, enm);
3010 while (enm != NULL) {
3011 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3012 /*
3013 * We must listen to a range of multicast addresses.
3014 * For now, just accept all multicasts, rather than
3015 * trying to set only those filter bits needed to match
3016 * the range. (At this time, the only use of address
3017 * ranges is for IP multicast routing, for which the
3018 * range is big enough to require all bits set.)
3019 */
3020 goto allmulti;
3021 }
3022
3023 hash = wm_mchash(sc, enm->enm_addrlo);
3024
3025 reg = (hash >> 5);
3026 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3027 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3028 || (sc->sc_type == WM_T_PCH2)
3029 || (sc->sc_type == WM_T_PCH_LPT))
3030 reg &= 0x1f;
3031 else
3032 reg &= 0x7f;
3033 bit = hash & 0x1f;
3034
3035 hash = CSR_READ(sc, mta_reg + (reg << 2));
3036 hash |= 1U << bit;
3037
3038 /* XXX Hardware bug?? */
3039 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3040 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3041 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3042 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3043 } else
3044 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3045
3046 ETHER_NEXT_MULTI(step, enm);
3047 }
3048
3049 ifp->if_flags &= ~IFF_ALLMULTI;
3050 goto setit;
3051
3052 allmulti:
3053 ifp->if_flags |= IFF_ALLMULTI;
3054 sc->sc_rctl |= RCTL_MPE;
3055
3056 setit:
3057 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3058 }
3059
3060 /* Reset and init related */
3061
3062 static void
3063 wm_set_vlan(struct wm_softc *sc)
3064 {
3065 /* Deal with VLAN enables. */
3066 if (VLAN_ATTACHED(&sc->sc_ethercom))
3067 sc->sc_ctrl |= CTRL_VME;
3068 else
3069 sc->sc_ctrl &= ~CTRL_VME;
3070
3071 /* Write the control registers. */
3072 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3073 }
3074
3075 static void
3076 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3077 {
3078 uint32_t gcr;
3079 pcireg_t ctrl2;
3080
3081 gcr = CSR_READ(sc, WMREG_GCR);
3082
3083 /* Only take action if timeout value is defaulted to 0 */
3084 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3085 goto out;
3086
3087 if ((gcr & GCR_CAP_VER2) == 0) {
3088 gcr |= GCR_CMPL_TMOUT_10MS;
3089 goto out;
3090 }
3091
3092 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3093 sc->sc_pcixe_capoff + PCIE_DCSR2);
3094 ctrl2 |= WM_PCIE_DCSR2_16MS;
3095 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3096 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3097
3098 out:
3099 /* Disable completion timeout resend */
3100 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3101
3102 CSR_WRITE(sc, WMREG_GCR, gcr);
3103 }
3104
3105 void
3106 wm_get_auto_rd_done(struct wm_softc *sc)
3107 {
3108 int i;
3109
3110 /* wait for eeprom to reload */
3111 switch (sc->sc_type) {
3112 case WM_T_82571:
3113 case WM_T_82572:
3114 case WM_T_82573:
3115 case WM_T_82574:
3116 case WM_T_82583:
3117 case WM_T_82575:
3118 case WM_T_82576:
3119 case WM_T_82580:
3120 case WM_T_I350:
3121 case WM_T_I354:
3122 case WM_T_I210:
3123 case WM_T_I211:
3124 case WM_T_80003:
3125 case WM_T_ICH8:
3126 case WM_T_ICH9:
3127 for (i = 0; i < 10; i++) {
3128 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3129 break;
3130 delay(1000);
3131 }
3132 if (i == 10) {
3133 log(LOG_ERR, "%s: auto read from eeprom failed to "
3134 "complete\n", device_xname(sc->sc_dev));
3135 }
3136 break;
3137 default:
3138 break;
3139 }
3140 }
3141
3142 void
3143 wm_lan_init_done(struct wm_softc *sc)
3144 {
3145 uint32_t reg = 0;
3146 int i;
3147
3148 /* wait for eeprom to reload */
3149 switch (sc->sc_type) {
3150 case WM_T_ICH10:
3151 case WM_T_PCH:
3152 case WM_T_PCH2:
3153 case WM_T_PCH_LPT:
3154 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3155 reg = CSR_READ(sc, WMREG_STATUS);
3156 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3157 break;
3158 delay(100);
3159 }
3160 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3161 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3162 "complete\n", device_xname(sc->sc_dev), __func__);
3163 }
3164 break;
3165 default:
3166 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3167 __func__);
3168 break;
3169 }
3170
3171 reg &= ~STATUS_LAN_INIT_DONE;
3172 CSR_WRITE(sc, WMREG_STATUS, reg);
3173 }
3174
3175 void
3176 wm_get_cfg_done(struct wm_softc *sc)
3177 {
3178 int mask;
3179 uint32_t reg;
3180 int i;
3181
3182 /* wait for eeprom to reload */
3183 switch (sc->sc_type) {
3184 case WM_T_82542_2_0:
3185 case WM_T_82542_2_1:
3186 /* null */
3187 break;
3188 case WM_T_82543:
3189 case WM_T_82544:
3190 case WM_T_82540:
3191 case WM_T_82545:
3192 case WM_T_82545_3:
3193 case WM_T_82546:
3194 case WM_T_82546_3:
3195 case WM_T_82541:
3196 case WM_T_82541_2:
3197 case WM_T_82547:
3198 case WM_T_82547_2:
3199 case WM_T_82573:
3200 case WM_T_82574:
3201 case WM_T_82583:
3202 /* generic */
3203 delay(10*1000);
3204 break;
3205 case WM_T_80003:
3206 case WM_T_82571:
3207 case WM_T_82572:
3208 case WM_T_82575:
3209 case WM_T_82576:
3210 case WM_T_82580:
3211 case WM_T_I350:
3212 case WM_T_I354:
3213 case WM_T_I210:
3214 case WM_T_I211:
3215 if (sc->sc_type == WM_T_82571) {
3216 /* Only 82571 shares port 0 */
3217 mask = EEMNGCTL_CFGDONE_0;
3218 } else
3219 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3220 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3221 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3222 break;
3223 delay(1000);
3224 }
3225 if (i >= WM_PHY_CFG_TIMEOUT) {
3226 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3227 device_xname(sc->sc_dev), __func__));
3228 }
3229 break;
3230 case WM_T_ICH8:
3231 case WM_T_ICH9:
3232 case WM_T_ICH10:
3233 case WM_T_PCH:
3234 case WM_T_PCH2:
3235 case WM_T_PCH_LPT:
3236 delay(10*1000);
3237 if (sc->sc_type >= WM_T_ICH10)
3238 wm_lan_init_done(sc);
3239 else
3240 wm_get_auto_rd_done(sc);
3241
3242 reg = CSR_READ(sc, WMREG_STATUS);
3243 if ((reg & STATUS_PHYRA) != 0)
3244 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3245 break;
3246 default:
3247 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3248 __func__);
3249 break;
3250 }
3251 }
3252
3253 /*
3254 * wm_reset:
3255 *
3256 * Reset the i82542 chip.
3257 */
3258 static void
3259 wm_reset(struct wm_softc *sc)
3260 {
3261 int phy_reset = 0;
3262 int error = 0;
3263 uint32_t reg, mask;
3264
3265 /*
3266 * Allocate on-chip memory according to the MTU size.
3267 * The Packet Buffer Allocation register must be written
3268 * before the chip is reset.
3269 */
3270 switch (sc->sc_type) {
3271 case WM_T_82547:
3272 case WM_T_82547_2:
3273 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3274 PBA_22K : PBA_30K;
3275 sc->sc_txfifo_head = 0;
3276 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3277 sc->sc_txfifo_size =
3278 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3279 sc->sc_txfifo_stall = 0;
3280 break;
3281 case WM_T_82571:
3282 case WM_T_82572:
3283 case WM_T_82575: /* XXX need special handing for jumbo frames */
3284 case WM_T_I350:
3285 case WM_T_I354:
3286 case WM_T_80003:
3287 sc->sc_pba = PBA_32K;
3288 break;
3289 case WM_T_82580:
3290 sc->sc_pba = PBA_35K;
3291 break;
3292 case WM_T_I210:
3293 case WM_T_I211:
3294 sc->sc_pba = PBA_34K;
3295 break;
3296 case WM_T_82576:
3297 sc->sc_pba = PBA_64K;
3298 break;
3299 case WM_T_82573:
3300 sc->sc_pba = PBA_12K;
3301 break;
3302 case WM_T_82574:
3303 case WM_T_82583:
3304 sc->sc_pba = PBA_20K;
3305 break;
3306 case WM_T_ICH8:
3307 sc->sc_pba = PBA_8K;
3308 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3309 break;
3310 case WM_T_ICH9:
3311 case WM_T_ICH10:
3312 sc->sc_pba = PBA_10K;
3313 break;
3314 case WM_T_PCH:
3315 case WM_T_PCH2:
3316 case WM_T_PCH_LPT:
3317 sc->sc_pba = PBA_26K;
3318 break;
3319 default:
3320 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3321 PBA_40K : PBA_48K;
3322 break;
3323 }
3324 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3325
3326 /* Prevent the PCI-E bus from sticking */
3327 if (sc->sc_flags & WM_F_PCIE) {
3328 int timeout = 800;
3329
3330 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3331 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3332
3333 while (timeout--) {
3334 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3335 == 0)
3336 break;
3337 delay(100);
3338 }
3339 }
3340
3341 /* Set the completion timeout for interface */
3342 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3343 || (sc->sc_type == WM_T_82580)
3344 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3345 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3346 wm_set_pcie_completion_timeout(sc);
3347
3348 /* Clear interrupt */
3349 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3350
3351 /* Stop the transmit and receive processes. */
3352 CSR_WRITE(sc, WMREG_RCTL, 0);
3353 sc->sc_rctl &= ~RCTL_EN;
3354 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3355 CSR_WRITE_FLUSH(sc);
3356
3357 /* XXX set_tbi_sbp_82543() */
3358
3359 delay(10*1000);
3360
3361 /* Must acquire the MDIO ownership before MAC reset */
3362 switch (sc->sc_type) {
3363 case WM_T_82573:
3364 case WM_T_82574:
3365 case WM_T_82583:
3366 error = wm_get_hw_semaphore_82573(sc);
3367 break;
3368 default:
3369 break;
3370 }
3371
3372 /*
3373 * 82541 Errata 29? & 82547 Errata 28?
3374 * See also the description about PHY_RST bit in CTRL register
3375 * in 8254x_GBe_SDM.pdf.
3376 */
3377 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3378 CSR_WRITE(sc, WMREG_CTRL,
3379 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3380 CSR_WRITE_FLUSH(sc);
3381 delay(5000);
3382 }
3383
3384 switch (sc->sc_type) {
3385 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3386 case WM_T_82541:
3387 case WM_T_82541_2:
3388 case WM_T_82547:
3389 case WM_T_82547_2:
3390 /*
3391 * On some chipsets, a reset through a memory-mapped write
3392 * cycle can cause the chip to reset before completing the
3393 * write cycle. This causes major headache that can be
3394 * avoided by issuing the reset via indirect register writes
3395 * through I/O space.
3396 *
3397 * So, if we successfully mapped the I/O BAR at attach time,
3398 * use that. Otherwise, try our luck with a memory-mapped
3399 * reset.
3400 */
3401 if (sc->sc_flags & WM_F_IOH_VALID)
3402 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3403 else
3404 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3405 break;
3406 case WM_T_82545_3:
3407 case WM_T_82546_3:
3408 /* Use the shadow control register on these chips. */
3409 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3410 break;
3411 case WM_T_80003:
3412 mask = swfwphysem[sc->sc_funcid];
3413 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3414 wm_get_swfw_semaphore(sc, mask);
3415 CSR_WRITE(sc, WMREG_CTRL, reg);
3416 wm_put_swfw_semaphore(sc, mask);
3417 break;
3418 case WM_T_ICH8:
3419 case WM_T_ICH9:
3420 case WM_T_ICH10:
3421 case WM_T_PCH:
3422 case WM_T_PCH2:
3423 case WM_T_PCH_LPT:
3424 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3425 if (wm_check_reset_block(sc) == 0) {
3426 /*
3427 * Gate automatic PHY configuration by hardware on
3428 * non-managed 82579
3429 */
3430 if ((sc->sc_type == WM_T_PCH2)
3431 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3432 != 0))
3433 wm_gate_hw_phy_config_ich8lan(sc, 1);
3434
3435
3436 reg |= CTRL_PHY_RESET;
3437 phy_reset = 1;
3438 }
3439 wm_get_swfwhw_semaphore(sc);
3440 CSR_WRITE(sc, WMREG_CTRL, reg);
3441 /* Don't insert a completion barrier when reset */
3442 delay(20*1000);
3443 wm_put_swfwhw_semaphore(sc);
3444 break;
3445 case WM_T_82580:
3446 case WM_T_I350:
3447 case WM_T_I354:
3448 case WM_T_I210:
3449 case WM_T_I211:
3450 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3451 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3452 CSR_WRITE_FLUSH(sc);
3453 delay(5000);
3454 break;
3455 case WM_T_82542_2_0:
3456 case WM_T_82542_2_1:
3457 case WM_T_82543:
3458 case WM_T_82540:
3459 case WM_T_82545:
3460 case WM_T_82546:
3461 case WM_T_82571:
3462 case WM_T_82572:
3463 case WM_T_82573:
3464 case WM_T_82574:
3465 case WM_T_82575:
3466 case WM_T_82576:
3467 case WM_T_82583:
3468 default:
3469 /* Everything else can safely use the documented method. */
3470 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3471 break;
3472 }
3473
3474 /* Must release the MDIO ownership after MAC reset */
3475 switch (sc->sc_type) {
3476 case WM_T_82573:
3477 case WM_T_82574:
3478 case WM_T_82583:
3479 if (error == 0)
3480 wm_put_hw_semaphore_82573(sc);
3481 break;
3482 default:
3483 break;
3484 }
3485
3486 if (phy_reset != 0)
3487 wm_get_cfg_done(sc);
3488
3489 /* reload EEPROM */
3490 switch (sc->sc_type) {
3491 case WM_T_82542_2_0:
3492 case WM_T_82542_2_1:
3493 case WM_T_82543:
3494 case WM_T_82544:
3495 delay(10);
3496 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3497 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3498 CSR_WRITE_FLUSH(sc);
3499 delay(2000);
3500 break;
3501 case WM_T_82540:
3502 case WM_T_82545:
3503 case WM_T_82545_3:
3504 case WM_T_82546:
3505 case WM_T_82546_3:
3506 delay(5*1000);
3507 /* XXX Disable HW ARPs on ASF enabled adapters */
3508 break;
3509 case WM_T_82541:
3510 case WM_T_82541_2:
3511 case WM_T_82547:
3512 case WM_T_82547_2:
3513 delay(20000);
3514 /* XXX Disable HW ARPs on ASF enabled adapters */
3515 break;
3516 case WM_T_82571:
3517 case WM_T_82572:
3518 case WM_T_82573:
3519 case WM_T_82574:
3520 case WM_T_82583:
3521 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3522 delay(10);
3523 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3524 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3525 CSR_WRITE_FLUSH(sc);
3526 }
3527 /* check EECD_EE_AUTORD */
3528 wm_get_auto_rd_done(sc);
3529 /*
3530 * Phy configuration from NVM just starts after EECD_AUTO_RD
3531 * is set.
3532 */
3533 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3534 || (sc->sc_type == WM_T_82583))
3535 delay(25*1000);
3536 break;
3537 case WM_T_82575:
3538 case WM_T_82576:
3539 case WM_T_82580:
3540 case WM_T_I350:
3541 case WM_T_I354:
3542 case WM_T_I210:
3543 case WM_T_I211:
3544 case WM_T_80003:
3545 /* check EECD_EE_AUTORD */
3546 wm_get_auto_rd_done(sc);
3547 break;
3548 case WM_T_ICH8:
3549 case WM_T_ICH9:
3550 case WM_T_ICH10:
3551 case WM_T_PCH:
3552 case WM_T_PCH2:
3553 case WM_T_PCH_LPT:
3554 break;
3555 default:
3556 panic("%s: unknown type\n", __func__);
3557 }
3558
3559 /* Check whether EEPROM is present or not */
3560 switch (sc->sc_type) {
3561 case WM_T_82575:
3562 case WM_T_82576:
3563 #if 0 /* XXX */
3564 case WM_T_82580:
3565 #endif
3566 case WM_T_I350:
3567 case WM_T_I354:
3568 case WM_T_ICH8:
3569 case WM_T_ICH9:
3570 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3571 /* Not found */
3572 sc->sc_flags |= WM_F_EEPROM_INVALID;
3573 if ((sc->sc_type == WM_T_82575)
3574 || (sc->sc_type == WM_T_82576)
3575 || (sc->sc_type == WM_T_82580)
3576 || (sc->sc_type == WM_T_I350)
3577 || (sc->sc_type == WM_T_I354))
3578 wm_reset_init_script_82575(sc);
3579 }
3580 break;
3581 default:
3582 break;
3583 }
3584
3585 if ((sc->sc_type == WM_T_82580)
3586 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
3587 /* clear global device reset status bit */
3588 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3589 }
3590
3591 /* Clear any pending interrupt events. */
3592 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3593 reg = CSR_READ(sc, WMREG_ICR);
3594
3595 /* reload sc_ctrl */
3596 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3597
3598 if (sc->sc_type == WM_T_I350)
3599 wm_set_eee_i350(sc);
3600
3601 /* dummy read from WUC */
3602 if (sc->sc_type == WM_T_PCH)
3603 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3604 /*
3605 * For PCH, this write will make sure that any noise will be detected
3606 * as a CRC error and be dropped rather than show up as a bad packet
3607 * to the DMA engine
3608 */
3609 if (sc->sc_type == WM_T_PCH)
3610 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3611
3612 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3613 CSR_WRITE(sc, WMREG_WUC, 0);
3614
3615 /* XXX need special handling for 82580 */
3616 }
3617
3618 /*
3619 * wm_add_rxbuf:
3620 *
3621 * Add a receive buffer to the indiciated descriptor.
3622 */
3623 static int
3624 wm_add_rxbuf(struct wm_softc *sc, int idx)
3625 {
3626 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3627 struct mbuf *m;
3628 int error;
3629
3630 KASSERT(WM_RX_LOCKED(sc));
3631
3632 MGETHDR(m, M_DONTWAIT, MT_DATA);
3633 if (m == NULL)
3634 return ENOBUFS;
3635
3636 MCLGET(m, M_DONTWAIT);
3637 if ((m->m_flags & M_EXT) == 0) {
3638 m_freem(m);
3639 return ENOBUFS;
3640 }
3641
3642 if (rxs->rxs_mbuf != NULL)
3643 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3644
3645 rxs->rxs_mbuf = m;
3646
3647 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3648 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3649 BUS_DMA_READ|BUS_DMA_NOWAIT);
3650 if (error) {
3651 /* XXX XXX XXX */
3652 aprint_error_dev(sc->sc_dev,
3653 "unable to load rx DMA map %d, error = %d\n",
3654 idx, error);
3655 panic("wm_add_rxbuf");
3656 }
3657
3658 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3659 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3660
3661 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3662 if ((sc->sc_rctl & RCTL_EN) != 0)
3663 WM_INIT_RXDESC(sc, idx);
3664 } else
3665 WM_INIT_RXDESC(sc, idx);
3666
3667 return 0;
3668 }
3669
3670 /*
3671 * wm_rxdrain:
3672 *
3673 * Drain the receive queue.
3674 */
3675 static void
3676 wm_rxdrain(struct wm_softc *sc)
3677 {
3678 struct wm_rxsoft *rxs;
3679 int i;
3680
3681 KASSERT(WM_RX_LOCKED(sc));
3682
3683 for (i = 0; i < WM_NRXDESC; i++) {
3684 rxs = &sc->sc_rxsoft[i];
3685 if (rxs->rxs_mbuf != NULL) {
3686 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3687 m_freem(rxs->rxs_mbuf);
3688 rxs->rxs_mbuf = NULL;
3689 }
3690 }
3691 }
3692
3693 /*
3694 * wm_init: [ifnet interface function]
3695 *
3696 * Initialize the interface.
3697 */
3698 static int
3699 wm_init(struct ifnet *ifp)
3700 {
3701 struct wm_softc *sc = ifp->if_softc;
3702 int ret;
3703
3704 WM_BOTH_LOCK(sc);
3705 ret = wm_init_locked(ifp);
3706 WM_BOTH_UNLOCK(sc);
3707
3708 return ret;
3709 }
3710
3711 static int
3712 wm_init_locked(struct ifnet *ifp)
3713 {
3714 struct wm_softc *sc = ifp->if_softc;
3715 struct wm_rxsoft *rxs;
3716 int i, j, trynum, error = 0;
3717 uint32_t reg;
3718
3719 KASSERT(WM_BOTH_LOCKED(sc));
3720 /*
3721 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3722 * There is a small but measurable benefit to avoiding the adjusment
3723 * of the descriptor so that the headers are aligned, for normal mtu,
3724 * on such platforms. One possibility is that the DMA itself is
3725 * slightly more efficient if the front of the entire packet (instead
3726 * of the front of the headers) is aligned.
3727 *
3728 * Note we must always set align_tweak to 0 if we are using
3729 * jumbo frames.
3730 */
3731 #ifdef __NO_STRICT_ALIGNMENT
3732 sc->sc_align_tweak = 0;
3733 #else
3734 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3735 sc->sc_align_tweak = 0;
3736 else
3737 sc->sc_align_tweak = 2;
3738 #endif /* __NO_STRICT_ALIGNMENT */
3739
3740 /* Cancel any pending I/O. */
3741 wm_stop_locked(ifp, 0);
3742
3743 /* update statistics before reset */
3744 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3745 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3746
3747 /* Reset the chip to a known state. */
3748 wm_reset(sc);
3749
3750 switch (sc->sc_type) {
3751 case WM_T_82571:
3752 case WM_T_82572:
3753 case WM_T_82573:
3754 case WM_T_82574:
3755 case WM_T_82583:
3756 case WM_T_80003:
3757 case WM_T_ICH8:
3758 case WM_T_ICH9:
3759 case WM_T_ICH10:
3760 case WM_T_PCH:
3761 case WM_T_PCH2:
3762 case WM_T_PCH_LPT:
3763 if (wm_check_mng_mode(sc) != 0)
3764 wm_get_hw_control(sc);
3765 break;
3766 default:
3767 break;
3768 }
3769
3770 /* Reset the PHY. */
3771 if (sc->sc_flags & WM_F_HAS_MII)
3772 wm_gmii_reset(sc);
3773
3774 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3775 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3776 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
3777 || (sc->sc_type == WM_T_PCH_LPT))
3778 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
3779
3780 /* Initialize the transmit descriptor ring. */
3781 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3782 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3783 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3784 sc->sc_txfree = WM_NTXDESC(sc);
3785 sc->sc_txnext = 0;
3786
3787 if (sc->sc_type < WM_T_82543) {
3788 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
3789 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
3790 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3791 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3792 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3793 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3794 } else {
3795 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
3796 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
3797 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3798 CSR_WRITE(sc, WMREG_TDH, 0);
3799 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3800 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3801
3802 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3803 /*
3804 * Don't write TDT before TCTL.EN is set.
3805 * See the document.
3806 */
3807 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
3808 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
3809 | TXDCTL_WTHRESH(0));
3810 else {
3811 CSR_WRITE(sc, WMREG_TDT, 0);
3812 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3813 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3814 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3815 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3816 }
3817 }
3818 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3819 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3820
3821 /* Initialize the transmit job descriptors. */
3822 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3823 sc->sc_txsoft[i].txs_mbuf = NULL;
3824 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3825 sc->sc_txsnext = 0;
3826 sc->sc_txsdirty = 0;
3827
3828 /*
3829 * Initialize the receive descriptor and receive job
3830 * descriptor rings.
3831 */
3832 if (sc->sc_type < WM_T_82543) {
3833 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3834 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3835 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3836 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3837 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3838 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3839
3840 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3841 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3842 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3843 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3844 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3845 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3846 } else {
3847 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3848 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3849 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3850 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3851 CSR_WRITE(sc, WMREG_EITR(0), 450);
3852 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
3853 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
3854 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
3855 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
3856 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
3857 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
3858 | RXDCTL_WTHRESH(1));
3859 } else {
3860 CSR_WRITE(sc, WMREG_RDH, 0);
3861 CSR_WRITE(sc, WMREG_RDT, 0);
3862 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3863 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
3864 }
3865 }
3866 for (i = 0; i < WM_NRXDESC; i++) {
3867 rxs = &sc->sc_rxsoft[i];
3868 if (rxs->rxs_mbuf == NULL) {
3869 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3870 log(LOG_ERR, "%s: unable to allocate or map "
3871 "rx buffer %d, error = %d\n",
3872 device_xname(sc->sc_dev), i, error);
3873 /*
3874 * XXX Should attempt to run with fewer receive
3875 * XXX buffers instead of just failing.
3876 */
3877 wm_rxdrain(sc);
3878 goto out;
3879 }
3880 } else {
3881 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3882 WM_INIT_RXDESC(sc, i);
3883 /*
3884 * For 82575 and newer device, the RX descriptors
3885 * must be initialized after the setting of RCTL.EN in
3886 * wm_set_filter()
3887 */
3888 }
3889 }
3890 sc->sc_rxptr = 0;
3891 sc->sc_rxdiscard = 0;
3892 WM_RXCHAIN_RESET(sc);
3893
3894 /*
3895 * Clear out the VLAN table -- we don't use it (yet).
3896 */
3897 CSR_WRITE(sc, WMREG_VET, 0);
3898 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3899 trynum = 10; /* Due to hw errata */
3900 else
3901 trynum = 1;
3902 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3903 for (j = 0; j < trynum; j++)
3904 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3905
3906 /*
3907 * Set up flow-control parameters.
3908 *
3909 * XXX Values could probably stand some tuning.
3910 */
3911 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3912 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
3913 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
3914 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3915 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3916 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3917 }
3918
3919 sc->sc_fcrtl = FCRTL_DFLT;
3920 if (sc->sc_type < WM_T_82543) {
3921 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3922 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3923 } else {
3924 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3925 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3926 }
3927
3928 if (sc->sc_type == WM_T_80003)
3929 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3930 else
3931 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3932
3933 /* Writes the control register. */
3934 wm_set_vlan(sc);
3935
3936 if (sc->sc_flags & WM_F_HAS_MII) {
3937 int val;
3938
3939 switch (sc->sc_type) {
3940 case WM_T_80003:
3941 case WM_T_ICH8:
3942 case WM_T_ICH9:
3943 case WM_T_ICH10:
3944 case WM_T_PCH:
3945 case WM_T_PCH2:
3946 case WM_T_PCH_LPT:
3947 /*
3948 * Set the mac to wait the maximum time between each
3949 * iteration and increase the max iterations when
3950 * polling the phy; this fixes erroneous timeouts at
3951 * 10Mbps.
3952 */
3953 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
3954 0xFFFF);
3955 val = wm_kmrn_readreg(sc,
3956 KUMCTRLSTA_OFFSET_INB_PARAM);
3957 val |= 0x3F;
3958 wm_kmrn_writereg(sc,
3959 KUMCTRLSTA_OFFSET_INB_PARAM, val);
3960 break;
3961 default:
3962 break;
3963 }
3964
3965 if (sc->sc_type == WM_T_80003) {
3966 val = CSR_READ(sc, WMREG_CTRL_EXT);
3967 val &= ~CTRL_EXT_LINK_MODE_MASK;
3968 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3969
3970 /* Bypass RX and TX FIFO's */
3971 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3972 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
3973 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3974 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3975 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3976 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3977 }
3978 }
3979 #if 0
3980 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3981 #endif
3982
3983 /* Set up checksum offload parameters. */
3984 reg = CSR_READ(sc, WMREG_RXCSUM);
3985 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3986 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3987 reg |= RXCSUM_IPOFL;
3988 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3989 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3990 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3991 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3992 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3993
3994 /* Set up the interrupt registers. */
3995 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3996 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3997 ICR_RXO | ICR_RXT0;
3998 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3999
4000 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4001 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4002 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4003 reg = CSR_READ(sc, WMREG_KABGTXD);
4004 reg |= KABGTXD_BGSQLBIAS;
4005 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4006 }
4007
4008 /* Set up the inter-packet gap. */
4009 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4010
4011 if (sc->sc_type >= WM_T_82543) {
4012 /*
4013 * Set up the interrupt throttling register (units of 256ns)
4014 * Note that a footnote in Intel's documentation says this
4015 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4016 * or 10Mbit mode. Empirically, it appears to be the case
4017 * that that is also true for the 1024ns units of the other
4018 * interrupt-related timer registers -- so, really, we ought
4019 * to divide this value by 4 when the link speed is low.
4020 *
4021 * XXX implement this division at link speed change!
4022 */
4023
4024 /*
4025 * For N interrupts/sec, set this value to:
4026 * 1000000000 / (N * 256). Note that we set the
4027 * absolute and packet timer values to this value
4028 * divided by 4 to get "simple timer" behavior.
4029 */
4030
4031 sc->sc_itr = 1500; /* 2604 ints/sec */
4032 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4033 }
4034
4035 /* Set the VLAN ethernetype. */
4036 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4037
4038 /*
4039 * Set up the transmit control register; we start out with
4040 * a collision distance suitable for FDX, but update it whe
4041 * we resolve the media type.
4042 */
4043 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4044 | TCTL_CT(TX_COLLISION_THRESHOLD)
4045 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4046 if (sc->sc_type >= WM_T_82571)
4047 sc->sc_tctl |= TCTL_MULR;
4048 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4049
4050 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4051 /* Write TDT after TCTL.EN is set. See the document. */
4052 CSR_WRITE(sc, WMREG_TDT, 0);
4053 }
4054
4055 if (sc->sc_type == WM_T_80003) {
4056 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4057 reg &= ~TCTL_EXT_GCEX_MASK;
4058 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4059 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4060 }
4061
4062 /* Set the media. */
4063 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4064 goto out;
4065
4066 /* Configure for OS presence */
4067 wm_init_manageability(sc);
4068
4069 /*
4070 * Set up the receive control register; we actually program
4071 * the register when we set the receive filter. Use multicast
4072 * address offset type 0.
4073 *
4074 * Only the i82544 has the ability to strip the incoming
4075 * CRC, so we don't enable that feature.
4076 */
4077 sc->sc_mchash_type = 0;
4078 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4079 | RCTL_MO(sc->sc_mchash_type);
4080
4081 /*
4082 * The I350 has a bug where it always strips the CRC whether
4083 * asked to or not. So ask for stripped CRC here and cope in rxeof
4084 */
4085 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4086 || (sc->sc_type == WM_T_I210))
4087 sc->sc_rctl |= RCTL_SECRC;
4088
4089 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4090 && (ifp->if_mtu > ETHERMTU)) {
4091 sc->sc_rctl |= RCTL_LPE;
4092 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4093 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4094 }
4095
4096 if (MCLBYTES == 2048) {
4097 sc->sc_rctl |= RCTL_2k;
4098 } else {
4099 if (sc->sc_type >= WM_T_82543) {
4100 switch (MCLBYTES) {
4101 case 4096:
4102 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4103 break;
4104 case 8192:
4105 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4106 break;
4107 case 16384:
4108 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4109 break;
4110 default:
4111 panic("wm_init: MCLBYTES %d unsupported",
4112 MCLBYTES);
4113 break;
4114 }
4115 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4116 }
4117
4118 /* Set the receive filter. */
4119 wm_set_filter(sc);
4120
4121 /* Enable ECC */
4122 switch (sc->sc_type) {
4123 case WM_T_82571:
4124 reg = CSR_READ(sc, WMREG_PBA_ECC);
4125 reg |= PBA_ECC_CORR_EN;
4126 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4127 break;
4128 case WM_T_PCH_LPT:
4129 reg = CSR_READ(sc, WMREG_PBECCSTS);
4130 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4131 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4132
4133 reg = CSR_READ(sc, WMREG_CTRL);
4134 reg |= CTRL_MEHE;
4135 CSR_WRITE(sc, WMREG_CTRL, reg);
4136 break;
4137 default:
4138 break;
4139 }
4140
4141 /* On 575 and later set RDT only if RX enabled */
4142 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4143 for (i = 0; i < WM_NRXDESC; i++)
4144 WM_INIT_RXDESC(sc, i);
4145
4146 sc->sc_stopping = false;
4147
4148 /* Start the one second link check clock. */
4149 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4150
4151 /* ...all done! */
4152 ifp->if_flags |= IFF_RUNNING;
4153 ifp->if_flags &= ~IFF_OACTIVE;
4154
4155 out:
4156 sc->sc_if_flags = ifp->if_flags;
4157 if (error)
4158 log(LOG_ERR, "%s: interface not running\n",
4159 device_xname(sc->sc_dev));
4160 return error;
4161 }
4162
4163 /*
4164 * wm_stop: [ifnet interface function]
4165 *
4166 * Stop transmission on the interface.
4167 */
4168 static void
4169 wm_stop(struct ifnet *ifp, int disable)
4170 {
4171 struct wm_softc *sc = ifp->if_softc;
4172
4173 WM_BOTH_LOCK(sc);
4174 wm_stop_locked(ifp, disable);
4175 WM_BOTH_UNLOCK(sc);
4176 }
4177
4178 static void
4179 wm_stop_locked(struct ifnet *ifp, int disable)
4180 {
4181 struct wm_softc *sc = ifp->if_softc;
4182 struct wm_txsoft *txs;
4183 int i;
4184
4185 KASSERT(WM_BOTH_LOCKED(sc));
4186
4187 sc->sc_stopping = true;
4188
4189 /* Stop the one second clock. */
4190 callout_stop(&sc->sc_tick_ch);
4191
4192 /* Stop the 82547 Tx FIFO stall check timer. */
4193 if (sc->sc_type == WM_T_82547)
4194 callout_stop(&sc->sc_txfifo_ch);
4195
4196 if (sc->sc_flags & WM_F_HAS_MII) {
4197 /* Down the MII. */
4198 mii_down(&sc->sc_mii);
4199 } else {
4200 #if 0
4201 /* Should we clear PHY's status properly? */
4202 wm_reset(sc);
4203 #endif
4204 }
4205
4206 /* Stop the transmit and receive processes. */
4207 CSR_WRITE(sc, WMREG_TCTL, 0);
4208 CSR_WRITE(sc, WMREG_RCTL, 0);
4209 sc->sc_rctl &= ~RCTL_EN;
4210
4211 /*
4212 * Clear the interrupt mask to ensure the device cannot assert its
4213 * interrupt line.
4214 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4215 * any currently pending or shared interrupt.
4216 */
4217 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4218 sc->sc_icr = 0;
4219
4220 /* Release any queued transmit buffers. */
4221 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4222 txs = &sc->sc_txsoft[i];
4223 if (txs->txs_mbuf != NULL) {
4224 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4225 m_freem(txs->txs_mbuf);
4226 txs->txs_mbuf = NULL;
4227 }
4228 }
4229
4230 /* Mark the interface as down and cancel the watchdog timer. */
4231 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4232 ifp->if_timer = 0;
4233
4234 if (disable)
4235 wm_rxdrain(sc);
4236
4237 #if 0 /* notyet */
4238 if (sc->sc_type >= WM_T_82544)
4239 CSR_WRITE(sc, WMREG_WUC, 0);
4240 #endif
4241 }
4242
4243 /*
4244 * wm_tx_offload:
4245 *
4246 * Set up TCP/IP checksumming parameters for the
4247 * specified packet.
4248 */
4249 static int
4250 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
4251 uint8_t *fieldsp)
4252 {
4253 struct mbuf *m0 = txs->txs_mbuf;
4254 struct livengood_tcpip_ctxdesc *t;
4255 uint32_t ipcs, tucs, cmd, cmdlen, seg;
4256 uint32_t ipcse;
4257 struct ether_header *eh;
4258 int offset, iphl;
4259 uint8_t fields;
4260
4261 /*
4262 * XXX It would be nice if the mbuf pkthdr had offset
4263 * fields for the protocol headers.
4264 */
4265
4266 eh = mtod(m0, struct ether_header *);
4267 switch (htons(eh->ether_type)) {
4268 case ETHERTYPE_IP:
4269 case ETHERTYPE_IPV6:
4270 offset = ETHER_HDR_LEN;
4271 break;
4272
4273 case ETHERTYPE_VLAN:
4274 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4275 break;
4276
4277 default:
4278 /*
4279 * Don't support this protocol or encapsulation.
4280 */
4281 *fieldsp = 0;
4282 *cmdp = 0;
4283 return 0;
4284 }
4285
4286 if ((m0->m_pkthdr.csum_flags &
4287 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
4288 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4289 } else {
4290 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4291 }
4292 ipcse = offset + iphl - 1;
4293
4294 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
4295 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
4296 seg = 0;
4297 fields = 0;
4298
4299 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4300 int hlen = offset + iphl;
4301 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4302
4303 if (__predict_false(m0->m_len <
4304 (hlen + sizeof(struct tcphdr)))) {
4305 /*
4306 * TCP/IP headers are not in the first mbuf; we need
4307 * to do this the slow and painful way. Let's just
4308 * hope this doesn't happen very often.
4309 */
4310 struct tcphdr th;
4311
4312 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4313
4314 m_copydata(m0, hlen, sizeof(th), &th);
4315 if (v4) {
4316 struct ip ip;
4317
4318 m_copydata(m0, offset, sizeof(ip), &ip);
4319 ip.ip_len = 0;
4320 m_copyback(m0,
4321 offset + offsetof(struct ip, ip_len),
4322 sizeof(ip.ip_len), &ip.ip_len);
4323 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4324 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4325 } else {
4326 struct ip6_hdr ip6;
4327
4328 m_copydata(m0, offset, sizeof(ip6), &ip6);
4329 ip6.ip6_plen = 0;
4330 m_copyback(m0,
4331 offset + offsetof(struct ip6_hdr, ip6_plen),
4332 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4333 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4334 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4335 }
4336 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4337 sizeof(th.th_sum), &th.th_sum);
4338
4339 hlen += th.th_off << 2;
4340 } else {
4341 /*
4342 * TCP/IP headers are in the first mbuf; we can do
4343 * this the easy way.
4344 */
4345 struct tcphdr *th;
4346
4347 if (v4) {
4348 struct ip *ip =
4349 (void *)(mtod(m0, char *) + offset);
4350 th = (void *)(mtod(m0, char *) + hlen);
4351
4352 ip->ip_len = 0;
4353 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4354 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4355 } else {
4356 struct ip6_hdr *ip6 =
4357 (void *)(mtod(m0, char *) + offset);
4358 th = (void *)(mtod(m0, char *) + hlen);
4359
4360 ip6->ip6_plen = 0;
4361 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4362 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4363 }
4364 hlen += th->th_off << 2;
4365 }
4366
4367 if (v4) {
4368 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4369 cmdlen |= WTX_TCPIP_CMD_IP;
4370 } else {
4371 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4372 ipcse = 0;
4373 }
4374 cmd |= WTX_TCPIP_CMD_TSE;
4375 cmdlen |= WTX_TCPIP_CMD_TSE |
4376 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
4377 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
4378 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
4379 }
4380
4381 /*
4382 * NOTE: Even if we're not using the IP or TCP/UDP checksum
4383 * offload feature, if we load the context descriptor, we
4384 * MUST provide valid values for IPCSS and TUCSS fields.
4385 */
4386
4387 ipcs = WTX_TCPIP_IPCSS(offset) |
4388 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
4389 WTX_TCPIP_IPCSE(ipcse);
4390 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
4391 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
4392 fields |= WTX_IXSM;
4393 }
4394
4395 offset += iphl;
4396
4397 if (m0->m_pkthdr.csum_flags &
4398 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
4399 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
4400 fields |= WTX_TXSM;
4401 tucs = WTX_TCPIP_TUCSS(offset) |
4402 WTX_TCPIP_TUCSO(offset +
4403 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
4404 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4405 } else if ((m0->m_pkthdr.csum_flags &
4406 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
4407 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
4408 fields |= WTX_TXSM;
4409 tucs = WTX_TCPIP_TUCSS(offset) |
4410 WTX_TCPIP_TUCSO(offset +
4411 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
4412 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4413 } else {
4414 /* Just initialize it to a valid TCP context. */
4415 tucs = WTX_TCPIP_TUCSS(offset) |
4416 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
4417 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4418 }
4419
4420 /* Fill in the context descriptor. */
4421 t = (struct livengood_tcpip_ctxdesc *)
4422 &sc->sc_txdescs[sc->sc_txnext];
4423 t->tcpip_ipcs = htole32(ipcs);
4424 t->tcpip_tucs = htole32(tucs);
4425 t->tcpip_cmdlen = htole32(cmdlen);
4426 t->tcpip_seg = htole32(seg);
4427 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
4428
4429 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
4430 txs->txs_ndesc++;
4431
4432 *cmdp = cmd;
4433 *fieldsp = fields;
4434
4435 return 0;
4436 }
4437
4438 static void
4439 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
4440 {
4441 struct mbuf *m;
4442 int i;
4443
4444 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
4445 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
4446 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
4447 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
4448 m->m_data, m->m_len, m->m_flags);
4449 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
4450 i, i == 1 ? "" : "s");
4451 }
4452
4453 /*
4454 * wm_82547_txfifo_stall:
4455 *
4456 * Callout used to wait for the 82547 Tx FIFO to drain,
4457 * reset the FIFO pointers, and restart packet transmission.
4458 */
4459 static void
4460 wm_82547_txfifo_stall(void *arg)
4461 {
4462 struct wm_softc *sc = arg;
4463 #ifndef WM_MPSAFE
4464 int s;
4465
4466 s = splnet();
4467 #endif
4468 WM_TX_LOCK(sc);
4469
4470 if (sc->sc_stopping)
4471 goto out;
4472
4473 if (sc->sc_txfifo_stall) {
4474 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
4475 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
4476 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
4477 /*
4478 * Packets have drained. Stop transmitter, reset
4479 * FIFO pointers, restart transmitter, and kick
4480 * the packet queue.
4481 */
4482 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
4483 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
4484 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
4485 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
4486 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
4487 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
4488 CSR_WRITE(sc, WMREG_TCTL, tctl);
4489 CSR_WRITE_FLUSH(sc);
4490
4491 sc->sc_txfifo_head = 0;
4492 sc->sc_txfifo_stall = 0;
4493 wm_start_locked(&sc->sc_ethercom.ec_if);
4494 } else {
4495 /*
4496 * Still waiting for packets to drain; try again in
4497 * another tick.
4498 */
4499 callout_schedule(&sc->sc_txfifo_ch, 1);
4500 }
4501 }
4502
4503 out:
4504 WM_TX_UNLOCK(sc);
4505 #ifndef WM_MPSAFE
4506 splx(s);
4507 #endif
4508 }
4509
4510 /*
4511 * wm_82547_txfifo_bugchk:
4512 *
4513 * Check for bug condition in the 82547 Tx FIFO. We need to
4514 * prevent enqueueing a packet that would wrap around the end
4515 * if the Tx FIFO ring buffer, otherwise the chip will croak.
4516 *
4517 * We do this by checking the amount of space before the end
4518 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
4519 * the Tx FIFO, wait for all remaining packets to drain, reset
4520 * the internal FIFO pointers to the beginning, and restart
4521 * transmission on the interface.
4522 */
4523 #define WM_FIFO_HDR 0x10
4524 #define WM_82547_PAD_LEN 0x3e0
4525 static int
4526 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
4527 {
4528 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
4529 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
4530
4531 /* Just return if already stalled. */
4532 if (sc->sc_txfifo_stall)
4533 return 1;
4534
4535 if (sc->sc_mii.mii_media_active & IFM_FDX) {
4536 /* Stall only occurs in half-duplex mode. */
4537 goto send_packet;
4538 }
4539
4540 if (len >= WM_82547_PAD_LEN + space) {
4541 sc->sc_txfifo_stall = 1;
4542 callout_schedule(&sc->sc_txfifo_ch, 1);
4543 return 1;
4544 }
4545
4546 send_packet:
4547 sc->sc_txfifo_head += len;
4548 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
4549 sc->sc_txfifo_head -= sc->sc_txfifo_size;
4550
4551 return 0;
4552 }
4553
4554 /*
4555 * wm_start: [ifnet interface function]
4556 *
4557 * Start packet transmission on the interface.
4558 */
4559 static void
4560 wm_start(struct ifnet *ifp)
4561 {
4562 struct wm_softc *sc = ifp->if_softc;
4563
4564 WM_TX_LOCK(sc);
4565 if (!sc->sc_stopping)
4566 wm_start_locked(ifp);
4567 WM_TX_UNLOCK(sc);
4568 }
4569
4570 static void
4571 wm_start_locked(struct ifnet *ifp)
4572 {
4573 struct wm_softc *sc = ifp->if_softc;
4574 struct mbuf *m0;
4575 struct m_tag *mtag;
4576 struct wm_txsoft *txs;
4577 bus_dmamap_t dmamap;
4578 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
4579 bus_addr_t curaddr;
4580 bus_size_t seglen, curlen;
4581 uint32_t cksumcmd;
4582 uint8_t cksumfields;
4583
4584 KASSERT(WM_TX_LOCKED(sc));
4585
4586 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
4587 return;
4588
4589 /* Remember the previous number of free descriptors. */
4590 ofree = sc->sc_txfree;
4591
4592 /*
4593 * Loop through the send queue, setting up transmit descriptors
4594 * until we drain the queue, or use up all available transmit
4595 * descriptors.
4596 */
4597 for (;;) {
4598 m0 = NULL;
4599
4600 /* Get a work queue entry. */
4601 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
4602 wm_txintr(sc);
4603 if (sc->sc_txsfree == 0) {
4604 DPRINTF(WM_DEBUG_TX,
4605 ("%s: TX: no free job descriptors\n",
4606 device_xname(sc->sc_dev)));
4607 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
4608 break;
4609 }
4610 }
4611
4612 /* Grab a packet off the queue. */
4613 IFQ_DEQUEUE(&ifp->if_snd, m0);
4614 if (m0 == NULL)
4615 break;
4616
4617 DPRINTF(WM_DEBUG_TX,
4618 ("%s: TX: have packet to transmit: %p\n",
4619 device_xname(sc->sc_dev), m0));
4620
4621 txs = &sc->sc_txsoft[sc->sc_txsnext];
4622 dmamap = txs->txs_dmamap;
4623
4624 use_tso = (m0->m_pkthdr.csum_flags &
4625 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
4626
4627 /*
4628 * So says the Linux driver:
4629 * The controller does a simple calculation to make sure
4630 * there is enough room in the FIFO before initiating the
4631 * DMA for each buffer. The calc is:
4632 * 4 = ceil(buffer len / MSS)
4633 * To make sure we don't overrun the FIFO, adjust the max
4634 * buffer len if the MSS drops.
4635 */
4636 dmamap->dm_maxsegsz =
4637 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
4638 ? m0->m_pkthdr.segsz << 2
4639 : WTX_MAX_LEN;
4640
4641 /*
4642 * Load the DMA map. If this fails, the packet either
4643 * didn't fit in the allotted number of segments, or we
4644 * were short on resources. For the too-many-segments
4645 * case, we simply report an error and drop the packet,
4646 * since we can't sanely copy a jumbo packet to a single
4647 * buffer.
4648 */
4649 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
4650 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
4651 if (error) {
4652 if (error == EFBIG) {
4653 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4654 log(LOG_ERR, "%s: Tx packet consumes too many "
4655 "DMA segments, dropping...\n",
4656 device_xname(sc->sc_dev));
4657 wm_dump_mbuf_chain(sc, m0);
4658 m_freem(m0);
4659 continue;
4660 }
4661 /* Short on resources, just stop for now. */
4662 DPRINTF(WM_DEBUG_TX,
4663 ("%s: TX: dmamap load failed: %d\n",
4664 device_xname(sc->sc_dev), error));
4665 break;
4666 }
4667
4668 segs_needed = dmamap->dm_nsegs;
4669 if (use_tso) {
4670 /* For sentinel descriptor; see below. */
4671 segs_needed++;
4672 }
4673
4674 /*
4675 * Ensure we have enough descriptors free to describe
4676 * the packet. Note, we always reserve one descriptor
4677 * at the end of the ring due to the semantics of the
4678 * TDT register, plus one more in the event we need
4679 * to load offload context.
4680 */
4681 if (segs_needed > sc->sc_txfree - 2) {
4682 /*
4683 * Not enough free descriptors to transmit this
4684 * packet. We haven't committed anything yet,
4685 * so just unload the DMA map, put the packet
4686 * pack on the queue, and punt. Notify the upper
4687 * layer that there are no more slots left.
4688 */
4689 DPRINTF(WM_DEBUG_TX,
4690 ("%s: TX: need %d (%d) descriptors, have %d\n",
4691 device_xname(sc->sc_dev), dmamap->dm_nsegs,
4692 segs_needed, sc->sc_txfree - 1));
4693 ifp->if_flags |= IFF_OACTIVE;
4694 bus_dmamap_unload(sc->sc_dmat, dmamap);
4695 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
4696 break;
4697 }
4698
4699 /*
4700 * Check for 82547 Tx FIFO bug. We need to do this
4701 * once we know we can transmit the packet, since we
4702 * do some internal FIFO space accounting here.
4703 */
4704 if (sc->sc_type == WM_T_82547 &&
4705 wm_82547_txfifo_bugchk(sc, m0)) {
4706 DPRINTF(WM_DEBUG_TX,
4707 ("%s: TX: 82547 Tx FIFO bug detected\n",
4708 device_xname(sc->sc_dev)));
4709 ifp->if_flags |= IFF_OACTIVE;
4710 bus_dmamap_unload(sc->sc_dmat, dmamap);
4711 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
4712 break;
4713 }
4714
4715 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
4716
4717 DPRINTF(WM_DEBUG_TX,
4718 ("%s: TX: packet has %d (%d) DMA segments\n",
4719 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
4720
4721 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
4722
4723 /*
4724 * Store a pointer to the packet so that we can free it
4725 * later.
4726 *
4727 * Initially, we consider the number of descriptors the
4728 * packet uses the number of DMA segments. This may be
4729 * incremented by 1 if we do checksum offload (a descriptor
4730 * is used to set the checksum context).
4731 */
4732 txs->txs_mbuf = m0;
4733 txs->txs_firstdesc = sc->sc_txnext;
4734 txs->txs_ndesc = segs_needed;
4735
4736 /* Set up offload parameters for this packet. */
4737 if (m0->m_pkthdr.csum_flags &
4738 (M_CSUM_TSOv4|M_CSUM_TSOv6|
4739 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
4740 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
4741 if (wm_tx_offload(sc, txs, &cksumcmd,
4742 &cksumfields) != 0) {
4743 /* Error message already displayed. */
4744 bus_dmamap_unload(sc->sc_dmat, dmamap);
4745 continue;
4746 }
4747 } else {
4748 cksumcmd = 0;
4749 cksumfields = 0;
4750 }
4751
4752 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
4753
4754 /* Sync the DMA map. */
4755 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
4756 BUS_DMASYNC_PREWRITE);
4757
4758 /* Initialize the transmit descriptor. */
4759 for (nexttx = sc->sc_txnext, seg = 0;
4760 seg < dmamap->dm_nsegs; seg++) {
4761 for (seglen = dmamap->dm_segs[seg].ds_len,
4762 curaddr = dmamap->dm_segs[seg].ds_addr;
4763 seglen != 0;
4764 curaddr += curlen, seglen -= curlen,
4765 nexttx = WM_NEXTTX(sc, nexttx)) {
4766 curlen = seglen;
4767
4768 /*
4769 * So says the Linux driver:
4770 * Work around for premature descriptor
4771 * write-backs in TSO mode. Append a
4772 * 4-byte sentinel descriptor.
4773 */
4774 if (use_tso &&
4775 seg == dmamap->dm_nsegs - 1 &&
4776 curlen > 8)
4777 curlen -= 4;
4778
4779 wm_set_dma_addr(
4780 &sc->sc_txdescs[nexttx].wtx_addr,
4781 curaddr);
4782 sc->sc_txdescs[nexttx].wtx_cmdlen =
4783 htole32(cksumcmd | curlen);
4784 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
4785 0;
4786 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
4787 cksumfields;
4788 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
4789 lasttx = nexttx;
4790
4791 DPRINTF(WM_DEBUG_TX,
4792 ("%s: TX: desc %d: low %#" PRIx64 ", "
4793 "len %#04zx\n",
4794 device_xname(sc->sc_dev), nexttx,
4795 (uint64_t)curaddr, curlen));
4796 }
4797 }
4798
4799 KASSERT(lasttx != -1);
4800
4801 /*
4802 * Set up the command byte on the last descriptor of
4803 * the packet. If we're in the interrupt delay window,
4804 * delay the interrupt.
4805 */
4806 sc->sc_txdescs[lasttx].wtx_cmdlen |=
4807 htole32(WTX_CMD_EOP | WTX_CMD_RS);
4808
4809 /*
4810 * If VLANs are enabled and the packet has a VLAN tag, set
4811 * up the descriptor to encapsulate the packet for us.
4812 *
4813 * This is only valid on the last descriptor of the packet.
4814 */
4815 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
4816 sc->sc_txdescs[lasttx].wtx_cmdlen |=
4817 htole32(WTX_CMD_VLE);
4818 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
4819 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
4820 }
4821
4822 txs->txs_lastdesc = lasttx;
4823
4824 DPRINTF(WM_DEBUG_TX,
4825 ("%s: TX: desc %d: cmdlen 0x%08x\n",
4826 device_xname(sc->sc_dev),
4827 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
4828
4829 /* Sync the descriptors we're using. */
4830 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
4831 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4832
4833 /* Give the packet to the chip. */
4834 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
4835
4836 DPRINTF(WM_DEBUG_TX,
4837 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
4838
4839 DPRINTF(WM_DEBUG_TX,
4840 ("%s: TX: finished transmitting packet, job %d\n",
4841 device_xname(sc->sc_dev), sc->sc_txsnext));
4842
4843 /* Advance the tx pointer. */
4844 sc->sc_txfree -= txs->txs_ndesc;
4845 sc->sc_txnext = nexttx;
4846
4847 sc->sc_txsfree--;
4848 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
4849
4850 /* Pass the packet to any BPF listeners. */
4851 bpf_mtap(ifp, m0);
4852 }
4853
4854 if (m0 != NULL) {
4855 ifp->if_flags |= IFF_OACTIVE;
4856 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4857 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
4858 m_freem(m0);
4859 }
4860
4861 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
4862 /* No more slots; notify upper layer. */
4863 ifp->if_flags |= IFF_OACTIVE;
4864 }
4865
4866 if (sc->sc_txfree != ofree) {
4867 /* Set a watchdog timer in case the chip flakes out. */
4868 ifp->if_timer = 5;
4869 }
4870 }
4871
4872 /*
4873 * wm_nq_tx_offload:
4874 *
4875 * Set up TCP/IP checksumming parameters for the
4876 * specified packet, for NEWQUEUE devices
4877 */
4878 static int
4879 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
4880 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
4881 {
4882 struct mbuf *m0 = txs->txs_mbuf;
4883 struct m_tag *mtag;
4884 uint32_t vl_len, mssidx, cmdc;
4885 struct ether_header *eh;
4886 int offset, iphl;
4887
4888 /*
4889 * XXX It would be nice if the mbuf pkthdr had offset
4890 * fields for the protocol headers.
4891 */
4892 *cmdlenp = 0;
4893 *fieldsp = 0;
4894
4895 eh = mtod(m0, struct ether_header *);
4896 switch (htons(eh->ether_type)) {
4897 case ETHERTYPE_IP:
4898 case ETHERTYPE_IPV6:
4899 offset = ETHER_HDR_LEN;
4900 break;
4901
4902 case ETHERTYPE_VLAN:
4903 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4904 break;
4905
4906 default:
4907 /* Don't support this protocol or encapsulation. */
4908 *do_csum = false;
4909 return 0;
4910 }
4911 *do_csum = true;
4912 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
4913 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
4914
4915 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
4916 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
4917
4918 if ((m0->m_pkthdr.csum_flags &
4919 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
4920 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4921 } else {
4922 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4923 }
4924 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
4925 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
4926
4927 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
4928 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
4929 << NQTXC_VLLEN_VLAN_SHIFT);
4930 *cmdlenp |= NQTX_CMD_VLE;
4931 }
4932
4933 mssidx = 0;
4934
4935 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4936 int hlen = offset + iphl;
4937 int tcp_hlen;
4938 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4939
4940 if (__predict_false(m0->m_len <
4941 (hlen + sizeof(struct tcphdr)))) {
4942 /*
4943 * TCP/IP headers are not in the first mbuf; we need
4944 * to do this the slow and painful way. Let's just
4945 * hope this doesn't happen very often.
4946 */
4947 struct tcphdr th;
4948
4949 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4950
4951 m_copydata(m0, hlen, sizeof(th), &th);
4952 if (v4) {
4953 struct ip ip;
4954
4955 m_copydata(m0, offset, sizeof(ip), &ip);
4956 ip.ip_len = 0;
4957 m_copyback(m0,
4958 offset + offsetof(struct ip, ip_len),
4959 sizeof(ip.ip_len), &ip.ip_len);
4960 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4961 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4962 } else {
4963 struct ip6_hdr ip6;
4964
4965 m_copydata(m0, offset, sizeof(ip6), &ip6);
4966 ip6.ip6_plen = 0;
4967 m_copyback(m0,
4968 offset + offsetof(struct ip6_hdr, ip6_plen),
4969 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4970 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4971 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4972 }
4973 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4974 sizeof(th.th_sum), &th.th_sum);
4975
4976 tcp_hlen = th.th_off << 2;
4977 } else {
4978 /*
4979 * TCP/IP headers are in the first mbuf; we can do
4980 * this the easy way.
4981 */
4982 struct tcphdr *th;
4983
4984 if (v4) {
4985 struct ip *ip =
4986 (void *)(mtod(m0, char *) + offset);
4987 th = (void *)(mtod(m0, char *) + hlen);
4988
4989 ip->ip_len = 0;
4990 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4991 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4992 } else {
4993 struct ip6_hdr *ip6 =
4994 (void *)(mtod(m0, char *) + offset);
4995 th = (void *)(mtod(m0, char *) + hlen);
4996
4997 ip6->ip6_plen = 0;
4998 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4999 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
5000 }
5001 tcp_hlen = th->th_off << 2;
5002 }
5003 hlen += tcp_hlen;
5004 *cmdlenp |= NQTX_CMD_TSE;
5005
5006 if (v4) {
5007 WM_EVCNT_INCR(&sc->sc_ev_txtso);
5008 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
5009 } else {
5010 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
5011 *fieldsp |= NQTXD_FIELDS_TUXSM;
5012 }
5013 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
5014 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5015 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
5016 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
5017 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
5018 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
5019 } else {
5020 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
5021 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5022 }
5023
5024 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
5025 *fieldsp |= NQTXD_FIELDS_IXSM;
5026 cmdc |= NQTXC_CMD_IP4;
5027 }
5028
5029 if (m0->m_pkthdr.csum_flags &
5030 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5031 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5032 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5033 cmdc |= NQTXC_CMD_TCP;
5034 } else {
5035 cmdc |= NQTXC_CMD_UDP;
5036 }
5037 cmdc |= NQTXC_CMD_IP4;
5038 *fieldsp |= NQTXD_FIELDS_TUXSM;
5039 }
5040 if (m0->m_pkthdr.csum_flags &
5041 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5042 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5043 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5044 cmdc |= NQTXC_CMD_TCP;
5045 } else {
5046 cmdc |= NQTXC_CMD_UDP;
5047 }
5048 cmdc |= NQTXC_CMD_IP6;
5049 *fieldsp |= NQTXD_FIELDS_TUXSM;
5050 }
5051
5052 /* Fill in the context descriptor. */
5053 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
5054 htole32(vl_len);
5055 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
5056 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
5057 htole32(cmdc);
5058 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
5059 htole32(mssidx);
5060 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
5061 DPRINTF(WM_DEBUG_TX,
5062 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
5063 sc->sc_txnext, 0, vl_len));
5064 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
5065 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
5066 txs->txs_ndesc++;
5067 return 0;
5068 }
5069
5070 /*
5071 * wm_nq_start: [ifnet interface function]
5072 *
5073 * Start packet transmission on the interface for NEWQUEUE devices
5074 */
5075 static void
5076 wm_nq_start(struct ifnet *ifp)
5077 {
5078 struct wm_softc *sc = ifp->if_softc;
5079
5080 WM_TX_LOCK(sc);
5081 if (!sc->sc_stopping)
5082 wm_nq_start_locked(ifp);
5083 WM_TX_UNLOCK(sc);
5084 }
5085
5086 static void
5087 wm_nq_start_locked(struct ifnet *ifp)
5088 {
5089 struct wm_softc *sc = ifp->if_softc;
5090 struct mbuf *m0;
5091 struct m_tag *mtag;
5092 struct wm_txsoft *txs;
5093 bus_dmamap_t dmamap;
5094 int error, nexttx, lasttx = -1, seg, segs_needed;
5095 bool do_csum, sent;
5096
5097 KASSERT(WM_TX_LOCKED(sc));
5098
5099 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5100 return;
5101
5102 sent = false;
5103
5104 /*
5105 * Loop through the send queue, setting up transmit descriptors
5106 * until we drain the queue, or use up all available transmit
5107 * descriptors.
5108 */
5109 for (;;) {
5110 m0 = NULL;
5111
5112 /* Get a work queue entry. */
5113 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
5114 wm_txintr(sc);
5115 if (sc->sc_txsfree == 0) {
5116 DPRINTF(WM_DEBUG_TX,
5117 ("%s: TX: no free job descriptors\n",
5118 device_xname(sc->sc_dev)));
5119 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
5120 break;
5121 }
5122 }
5123
5124 /* Grab a packet off the queue. */
5125 IFQ_DEQUEUE(&ifp->if_snd, m0);
5126 if (m0 == NULL)
5127 break;
5128
5129 DPRINTF(WM_DEBUG_TX,
5130 ("%s: TX: have packet to transmit: %p\n",
5131 device_xname(sc->sc_dev), m0));
5132
5133 txs = &sc->sc_txsoft[sc->sc_txsnext];
5134 dmamap = txs->txs_dmamap;
5135
5136 /*
5137 * Load the DMA map. If this fails, the packet either
5138 * didn't fit in the allotted number of segments, or we
5139 * were short on resources. For the too-many-segments
5140 * case, we simply report an error and drop the packet,
5141 * since we can't sanely copy a jumbo packet to a single
5142 * buffer.
5143 */
5144 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5145 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5146 if (error) {
5147 if (error == EFBIG) {
5148 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5149 log(LOG_ERR, "%s: Tx packet consumes too many "
5150 "DMA segments, dropping...\n",
5151 device_xname(sc->sc_dev));
5152 wm_dump_mbuf_chain(sc, m0);
5153 m_freem(m0);
5154 continue;
5155 }
5156 /* Short on resources, just stop for now. */
5157 DPRINTF(WM_DEBUG_TX,
5158 ("%s: TX: dmamap load failed: %d\n",
5159 device_xname(sc->sc_dev), error));
5160 break;
5161 }
5162
5163 segs_needed = dmamap->dm_nsegs;
5164
5165 /*
5166 * Ensure we have enough descriptors free to describe
5167 * the packet. Note, we always reserve one descriptor
5168 * at the end of the ring due to the semantics of the
5169 * TDT register, plus one more in the event we need
5170 * to load offload context.
5171 */
5172 if (segs_needed > sc->sc_txfree - 2) {
5173 /*
5174 * Not enough free descriptors to transmit this
5175 * packet. We haven't committed anything yet,
5176 * so just unload the DMA map, put the packet
5177 * pack on the queue, and punt. Notify the upper
5178 * layer that there are no more slots left.
5179 */
5180 DPRINTF(WM_DEBUG_TX,
5181 ("%s: TX: need %d (%d) descriptors, have %d\n",
5182 device_xname(sc->sc_dev), dmamap->dm_nsegs,
5183 segs_needed, sc->sc_txfree - 1));
5184 ifp->if_flags |= IFF_OACTIVE;
5185 bus_dmamap_unload(sc->sc_dmat, dmamap);
5186 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5187 break;
5188 }
5189
5190 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5191
5192 DPRINTF(WM_DEBUG_TX,
5193 ("%s: TX: packet has %d (%d) DMA segments\n",
5194 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5195
5196 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5197
5198 /*
5199 * Store a pointer to the packet so that we can free it
5200 * later.
5201 *
5202 * Initially, we consider the number of descriptors the
5203 * packet uses the number of DMA segments. This may be
5204 * incremented by 1 if we do checksum offload (a descriptor
5205 * is used to set the checksum context).
5206 */
5207 txs->txs_mbuf = m0;
5208 txs->txs_firstdesc = sc->sc_txnext;
5209 txs->txs_ndesc = segs_needed;
5210
5211 /* Set up offload parameters for this packet. */
5212 uint32_t cmdlen, fields, dcmdlen;
5213 if (m0->m_pkthdr.csum_flags &
5214 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5215 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5216 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5217 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
5218 &do_csum) != 0) {
5219 /* Error message already displayed. */
5220 bus_dmamap_unload(sc->sc_dmat, dmamap);
5221 continue;
5222 }
5223 } else {
5224 do_csum = false;
5225 cmdlen = 0;
5226 fields = 0;
5227 }
5228
5229 /* Sync the DMA map. */
5230 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5231 BUS_DMASYNC_PREWRITE);
5232
5233 /* Initialize the first transmit descriptor. */
5234 nexttx = sc->sc_txnext;
5235 if (!do_csum) {
5236 /* setup a legacy descriptor */
5237 wm_set_dma_addr(
5238 &sc->sc_txdescs[nexttx].wtx_addr,
5239 dmamap->dm_segs[0].ds_addr);
5240 sc->sc_txdescs[nexttx].wtx_cmdlen =
5241 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
5242 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
5243 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
5244 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
5245 NULL) {
5246 sc->sc_txdescs[nexttx].wtx_cmdlen |=
5247 htole32(WTX_CMD_VLE);
5248 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
5249 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5250 } else {
5251 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
5252 }
5253 dcmdlen = 0;
5254 } else {
5255 /* setup an advanced data descriptor */
5256 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5257 htole64(dmamap->dm_segs[0].ds_addr);
5258 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
5259 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5260 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
5261 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
5262 htole32(fields);
5263 DPRINTF(WM_DEBUG_TX,
5264 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
5265 device_xname(sc->sc_dev), nexttx,
5266 (uint64_t)dmamap->dm_segs[0].ds_addr));
5267 DPRINTF(WM_DEBUG_TX,
5268 ("\t 0x%08x%08x\n", fields,
5269 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
5270 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
5271 }
5272
5273 lasttx = nexttx;
5274 nexttx = WM_NEXTTX(sc, nexttx);
5275 /*
5276 * fill in the next descriptors. legacy or adcanced format
5277 * is the same here
5278 */
5279 for (seg = 1; seg < dmamap->dm_nsegs;
5280 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
5281 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5282 htole64(dmamap->dm_segs[seg].ds_addr);
5283 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5284 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
5285 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
5286 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
5287 lasttx = nexttx;
5288
5289 DPRINTF(WM_DEBUG_TX,
5290 ("%s: TX: desc %d: %#" PRIx64 ", "
5291 "len %#04zx\n",
5292 device_xname(sc->sc_dev), nexttx,
5293 (uint64_t)dmamap->dm_segs[seg].ds_addr,
5294 dmamap->dm_segs[seg].ds_len));
5295 }
5296
5297 KASSERT(lasttx != -1);
5298
5299 /*
5300 * Set up the command byte on the last descriptor of
5301 * the packet. If we're in the interrupt delay window,
5302 * delay the interrupt.
5303 */
5304 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
5305 (NQTX_CMD_EOP | NQTX_CMD_RS));
5306 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5307 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5308
5309 txs->txs_lastdesc = lasttx;
5310
5311 DPRINTF(WM_DEBUG_TX,
5312 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5313 device_xname(sc->sc_dev),
5314 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5315
5316 /* Sync the descriptors we're using. */
5317 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5318 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5319
5320 /* Give the packet to the chip. */
5321 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5322 sent = true;
5323
5324 DPRINTF(WM_DEBUG_TX,
5325 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5326
5327 DPRINTF(WM_DEBUG_TX,
5328 ("%s: TX: finished transmitting packet, job %d\n",
5329 device_xname(sc->sc_dev), sc->sc_txsnext));
5330
5331 /* Advance the tx pointer. */
5332 sc->sc_txfree -= txs->txs_ndesc;
5333 sc->sc_txnext = nexttx;
5334
5335 sc->sc_txsfree--;
5336 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5337
5338 /* Pass the packet to any BPF listeners. */
5339 bpf_mtap(ifp, m0);
5340 }
5341
5342 if (m0 != NULL) {
5343 ifp->if_flags |= IFF_OACTIVE;
5344 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5345 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5346 m_freem(m0);
5347 }
5348
5349 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5350 /* No more slots; notify upper layer. */
5351 ifp->if_flags |= IFF_OACTIVE;
5352 }
5353
5354 if (sent) {
5355 /* Set a watchdog timer in case the chip flakes out. */
5356 ifp->if_timer = 5;
5357 }
5358 }
5359
5360 /* Interrupt */
5361
5362 /*
5363 * wm_txintr:
5364 *
5365 * Helper; handle transmit interrupts.
5366 */
5367 static void
5368 wm_txintr(struct wm_softc *sc)
5369 {
5370 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5371 struct wm_txsoft *txs;
5372 uint8_t status;
5373 int i;
5374
5375 if (sc->sc_stopping)
5376 return;
5377
5378 ifp->if_flags &= ~IFF_OACTIVE;
5379
5380 /*
5381 * Go through the Tx list and free mbufs for those
5382 * frames which have been transmitted.
5383 */
5384 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
5385 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
5386 txs = &sc->sc_txsoft[i];
5387
5388 DPRINTF(WM_DEBUG_TX,
5389 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
5390
5391 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
5392 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5393
5394 status =
5395 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
5396 if ((status & WTX_ST_DD) == 0) {
5397 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
5398 BUS_DMASYNC_PREREAD);
5399 break;
5400 }
5401
5402 DPRINTF(WM_DEBUG_TX,
5403 ("%s: TX: job %d done: descs %d..%d\n",
5404 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
5405 txs->txs_lastdesc));
5406
5407 /*
5408 * XXX We should probably be using the statistics
5409 * XXX registers, but I don't know if they exist
5410 * XXX on chips before the i82544.
5411 */
5412
5413 #ifdef WM_EVENT_COUNTERS
5414 if (status & WTX_ST_TU)
5415 WM_EVCNT_INCR(&sc->sc_ev_tu);
5416 #endif /* WM_EVENT_COUNTERS */
5417
5418 if (status & (WTX_ST_EC|WTX_ST_LC)) {
5419 ifp->if_oerrors++;
5420 if (status & WTX_ST_LC)
5421 log(LOG_WARNING, "%s: late collision\n",
5422 device_xname(sc->sc_dev));
5423 else if (status & WTX_ST_EC) {
5424 ifp->if_collisions += 16;
5425 log(LOG_WARNING, "%s: excessive collisions\n",
5426 device_xname(sc->sc_dev));
5427 }
5428 } else
5429 ifp->if_opackets++;
5430
5431 sc->sc_txfree += txs->txs_ndesc;
5432 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
5433 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
5434 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
5435 m_freem(txs->txs_mbuf);
5436 txs->txs_mbuf = NULL;
5437 }
5438
5439 /* Update the dirty transmit buffer pointer. */
5440 sc->sc_txsdirty = i;
5441 DPRINTF(WM_DEBUG_TX,
5442 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
5443
5444 /*
5445 * If there are no more pending transmissions, cancel the watchdog
5446 * timer.
5447 */
5448 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
5449 ifp->if_timer = 0;
5450 }
5451
5452 /*
5453 * wm_rxintr:
5454 *
5455 * Helper; handle receive interrupts.
5456 */
5457 static void
5458 wm_rxintr(struct wm_softc *sc)
5459 {
5460 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5461 struct wm_rxsoft *rxs;
5462 struct mbuf *m;
5463 int i, len;
5464 uint8_t status, errors;
5465 uint16_t vlantag;
5466
5467 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
5468 rxs = &sc->sc_rxsoft[i];
5469
5470 DPRINTF(WM_DEBUG_RX,
5471 ("%s: RX: checking descriptor %d\n",
5472 device_xname(sc->sc_dev), i));
5473
5474 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5475
5476 status = sc->sc_rxdescs[i].wrx_status;
5477 errors = sc->sc_rxdescs[i].wrx_errors;
5478 len = le16toh(sc->sc_rxdescs[i].wrx_len);
5479 vlantag = sc->sc_rxdescs[i].wrx_special;
5480
5481 if ((status & WRX_ST_DD) == 0) {
5482 /* We have processed all of the receive descriptors. */
5483 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
5484 break;
5485 }
5486
5487 if (__predict_false(sc->sc_rxdiscard)) {
5488 DPRINTF(WM_DEBUG_RX,
5489 ("%s: RX: discarding contents of descriptor %d\n",
5490 device_xname(sc->sc_dev), i));
5491 WM_INIT_RXDESC(sc, i);
5492 if (status & WRX_ST_EOP) {
5493 /* Reset our state. */
5494 DPRINTF(WM_DEBUG_RX,
5495 ("%s: RX: resetting rxdiscard -> 0\n",
5496 device_xname(sc->sc_dev)));
5497 sc->sc_rxdiscard = 0;
5498 }
5499 continue;
5500 }
5501
5502 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5503 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
5504
5505 m = rxs->rxs_mbuf;
5506
5507 /*
5508 * Add a new receive buffer to the ring, unless of
5509 * course the length is zero. Treat the latter as a
5510 * failed mapping.
5511 */
5512 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
5513 /*
5514 * Failed, throw away what we've done so
5515 * far, and discard the rest of the packet.
5516 */
5517 ifp->if_ierrors++;
5518 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5519 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5520 WM_INIT_RXDESC(sc, i);
5521 if ((status & WRX_ST_EOP) == 0)
5522 sc->sc_rxdiscard = 1;
5523 if (sc->sc_rxhead != NULL)
5524 m_freem(sc->sc_rxhead);
5525 WM_RXCHAIN_RESET(sc);
5526 DPRINTF(WM_DEBUG_RX,
5527 ("%s: RX: Rx buffer allocation failed, "
5528 "dropping packet%s\n", device_xname(sc->sc_dev),
5529 sc->sc_rxdiscard ? " (discard)" : ""));
5530 continue;
5531 }
5532
5533 m->m_len = len;
5534 sc->sc_rxlen += len;
5535 DPRINTF(WM_DEBUG_RX,
5536 ("%s: RX: buffer at %p len %d\n",
5537 device_xname(sc->sc_dev), m->m_data, len));
5538
5539 /* If this is not the end of the packet, keep looking. */
5540 if ((status & WRX_ST_EOP) == 0) {
5541 WM_RXCHAIN_LINK(sc, m);
5542 DPRINTF(WM_DEBUG_RX,
5543 ("%s: RX: not yet EOP, rxlen -> %d\n",
5544 device_xname(sc->sc_dev), sc->sc_rxlen));
5545 continue;
5546 }
5547
5548 /*
5549 * Okay, we have the entire packet now. The chip is
5550 * configured to include the FCS except I350 and I21[01]
5551 * (not all chips can be configured to strip it),
5552 * so we need to trim it.
5553 * May need to adjust length of previous mbuf in the
5554 * chain if the current mbuf is too short.
5555 * For an eratta, the RCTL_SECRC bit in RCTL register
5556 * is always set in I350, so we don't trim it.
5557 */
5558 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
5559 && (sc->sc_type != WM_T_I210)
5560 && (sc->sc_type != WM_T_I211)) {
5561 if (m->m_len < ETHER_CRC_LEN) {
5562 sc->sc_rxtail->m_len
5563 -= (ETHER_CRC_LEN - m->m_len);
5564 m->m_len = 0;
5565 } else
5566 m->m_len -= ETHER_CRC_LEN;
5567 len = sc->sc_rxlen - ETHER_CRC_LEN;
5568 } else
5569 len = sc->sc_rxlen;
5570
5571 WM_RXCHAIN_LINK(sc, m);
5572
5573 *sc->sc_rxtailp = NULL;
5574 m = sc->sc_rxhead;
5575
5576 WM_RXCHAIN_RESET(sc);
5577
5578 DPRINTF(WM_DEBUG_RX,
5579 ("%s: RX: have entire packet, len -> %d\n",
5580 device_xname(sc->sc_dev), len));
5581
5582 /* If an error occurred, update stats and drop the packet. */
5583 if (errors &
5584 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
5585 if (errors & WRX_ER_SE)
5586 log(LOG_WARNING, "%s: symbol error\n",
5587 device_xname(sc->sc_dev));
5588 else if (errors & WRX_ER_SEQ)
5589 log(LOG_WARNING, "%s: receive sequence error\n",
5590 device_xname(sc->sc_dev));
5591 else if (errors & WRX_ER_CE)
5592 log(LOG_WARNING, "%s: CRC error\n",
5593 device_xname(sc->sc_dev));
5594 m_freem(m);
5595 continue;
5596 }
5597
5598 /* No errors. Receive the packet. */
5599 m->m_pkthdr.rcvif = ifp;
5600 m->m_pkthdr.len = len;
5601
5602 /*
5603 * If VLANs are enabled, VLAN packets have been unwrapped
5604 * for us. Associate the tag with the packet.
5605 */
5606 /* XXXX should check for i350 and i354 */
5607 if ((status & WRX_ST_VP) != 0) {
5608 VLAN_INPUT_TAG(ifp, m,
5609 le16toh(vlantag),
5610 continue);
5611 }
5612
5613 /* Set up checksum info for this packet. */
5614 if ((status & WRX_ST_IXSM) == 0) {
5615 if (status & WRX_ST_IPCS) {
5616 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
5617 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
5618 if (errors & WRX_ER_IPE)
5619 m->m_pkthdr.csum_flags |=
5620 M_CSUM_IPv4_BAD;
5621 }
5622 if (status & WRX_ST_TCPCS) {
5623 /*
5624 * Note: we don't know if this was TCP or UDP,
5625 * so we just set both bits, and expect the
5626 * upper layers to deal.
5627 */
5628 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
5629 m->m_pkthdr.csum_flags |=
5630 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
5631 M_CSUM_TCPv6 | M_CSUM_UDPv6;
5632 if (errors & WRX_ER_TCPE)
5633 m->m_pkthdr.csum_flags |=
5634 M_CSUM_TCP_UDP_BAD;
5635 }
5636 }
5637
5638 ifp->if_ipackets++;
5639
5640 WM_RX_UNLOCK(sc);
5641
5642 /* Pass this up to any BPF listeners. */
5643 bpf_mtap(ifp, m);
5644
5645 /* Pass it on. */
5646 (*ifp->if_input)(ifp, m);
5647
5648 WM_RX_LOCK(sc);
5649
5650 if (sc->sc_stopping)
5651 break;
5652 }
5653
5654 /* Update the receive pointer. */
5655 sc->sc_rxptr = i;
5656
5657 DPRINTF(WM_DEBUG_RX,
5658 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
5659 }
5660
5661 /*
5662 * wm_linkintr_gmii:
5663 *
5664 * Helper; handle link interrupts for GMII.
5665 */
5666 static void
5667 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
5668 {
5669
5670 KASSERT(WM_TX_LOCKED(sc));
5671
5672 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5673 __func__));
5674
5675 if (icr & ICR_LSC) {
5676 DPRINTF(WM_DEBUG_LINK,
5677 ("%s: LINK: LSC -> mii_pollstat\n",
5678 device_xname(sc->sc_dev)));
5679 mii_pollstat(&sc->sc_mii);
5680 if (sc->sc_type == WM_T_82543) {
5681 int miistatus, active;
5682
5683 /*
5684 * With 82543, we need to force speed and
5685 * duplex on the MAC equal to what the PHY
5686 * speed and duplex configuration is.
5687 */
5688 miistatus = sc->sc_mii.mii_media_status;
5689
5690 if (miistatus & IFM_ACTIVE) {
5691 active = sc->sc_mii.mii_media_active;
5692 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5693 switch (IFM_SUBTYPE(active)) {
5694 case IFM_10_T:
5695 sc->sc_ctrl |= CTRL_SPEED_10;
5696 break;
5697 case IFM_100_TX:
5698 sc->sc_ctrl |= CTRL_SPEED_100;
5699 break;
5700 case IFM_1000_T:
5701 sc->sc_ctrl |= CTRL_SPEED_1000;
5702 break;
5703 default:
5704 /*
5705 * fiber?
5706 * Shoud not enter here.
5707 */
5708 printf("unknown media (%x)\n",
5709 active);
5710 break;
5711 }
5712 if (active & IFM_FDX)
5713 sc->sc_ctrl |= CTRL_FD;
5714 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5715 }
5716 } else if ((sc->sc_type == WM_T_ICH8)
5717 && (sc->sc_phytype == WMPHY_IGP_3)) {
5718 wm_kmrn_lock_loss_workaround_ich8lan(sc);
5719 } else if (sc->sc_type == WM_T_PCH) {
5720 wm_k1_gig_workaround_hv(sc,
5721 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
5722 }
5723
5724 if ((sc->sc_phytype == WMPHY_82578)
5725 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
5726 == IFM_1000_T)) {
5727
5728 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
5729 delay(200*1000); /* XXX too big */
5730
5731 /* Link stall fix for link up */
5732 wm_gmii_hv_writereg(sc->sc_dev, 1,
5733 HV_MUX_DATA_CTRL,
5734 HV_MUX_DATA_CTRL_GEN_TO_MAC
5735 | HV_MUX_DATA_CTRL_FORCE_SPEED);
5736 wm_gmii_hv_writereg(sc->sc_dev, 1,
5737 HV_MUX_DATA_CTRL,
5738 HV_MUX_DATA_CTRL_GEN_TO_MAC);
5739 }
5740 }
5741 } else if (icr & ICR_RXSEQ) {
5742 DPRINTF(WM_DEBUG_LINK,
5743 ("%s: LINK Receive sequence error\n",
5744 device_xname(sc->sc_dev)));
5745 }
5746 }
5747
5748 /*
5749 * wm_linkintr_tbi:
5750 *
5751 * Helper; handle link interrupts for TBI mode.
5752 */
5753 static void
5754 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
5755 {
5756 uint32_t status;
5757
5758 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5759 __func__));
5760
5761 status = CSR_READ(sc, WMREG_STATUS);
5762 if (icr & ICR_LSC) {
5763 if (status & STATUS_LU) {
5764 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
5765 device_xname(sc->sc_dev),
5766 (status & STATUS_FD) ? "FDX" : "HDX"));
5767 /*
5768 * NOTE: CTRL will update TFCE and RFCE automatically,
5769 * so we should update sc->sc_ctrl
5770 */
5771
5772 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5773 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5774 sc->sc_fcrtl &= ~FCRTL_XONE;
5775 if (status & STATUS_FD)
5776 sc->sc_tctl |=
5777 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5778 else
5779 sc->sc_tctl |=
5780 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5781 if (sc->sc_ctrl & CTRL_TFCE)
5782 sc->sc_fcrtl |= FCRTL_XONE;
5783 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5784 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5785 WMREG_OLD_FCRTL : WMREG_FCRTL,
5786 sc->sc_fcrtl);
5787 sc->sc_tbi_linkup = 1;
5788 } else {
5789 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
5790 device_xname(sc->sc_dev)));
5791 sc->sc_tbi_linkup = 0;
5792 }
5793 wm_tbi_set_linkled(sc);
5794 } else if (icr & ICR_RXSEQ) {
5795 DPRINTF(WM_DEBUG_LINK,
5796 ("%s: LINK: Receive sequence error\n",
5797 device_xname(sc->sc_dev)));
5798 }
5799 }
5800
5801 /*
5802 * wm_linkintr:
5803 *
5804 * Helper; handle link interrupts.
5805 */
5806 static void
5807 wm_linkintr(struct wm_softc *sc, uint32_t icr)
5808 {
5809
5810 if (sc->sc_flags & WM_F_HAS_MII)
5811 wm_linkintr_gmii(sc, icr);
5812 else
5813 wm_linkintr_tbi(sc, icr);
5814 }
5815
5816 /*
5817 * wm_intr:
5818 *
5819 * Interrupt service routine.
5820 */
5821 static int
5822 wm_intr(void *arg)
5823 {
5824 struct wm_softc *sc = arg;
5825 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5826 uint32_t icr;
5827 int handled = 0;
5828
5829 while (1 /* CONSTCOND */) {
5830 icr = CSR_READ(sc, WMREG_ICR);
5831 if ((icr & sc->sc_icr) == 0)
5832 break;
5833 rnd_add_uint32(&sc->rnd_source, icr);
5834
5835 WM_RX_LOCK(sc);
5836
5837 if (sc->sc_stopping) {
5838 WM_RX_UNLOCK(sc);
5839 break;
5840 }
5841
5842 handled = 1;
5843
5844 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
5845 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
5846 DPRINTF(WM_DEBUG_RX,
5847 ("%s: RX: got Rx intr 0x%08x\n",
5848 device_xname(sc->sc_dev),
5849 icr & (ICR_RXDMT0|ICR_RXT0)));
5850 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
5851 }
5852 #endif
5853 wm_rxintr(sc);
5854
5855 WM_RX_UNLOCK(sc);
5856 WM_TX_LOCK(sc);
5857
5858 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
5859 if (icr & ICR_TXDW) {
5860 DPRINTF(WM_DEBUG_TX,
5861 ("%s: TX: got TXDW interrupt\n",
5862 device_xname(sc->sc_dev)));
5863 WM_EVCNT_INCR(&sc->sc_ev_txdw);
5864 }
5865 #endif
5866 wm_txintr(sc);
5867
5868 if (icr & (ICR_LSC|ICR_RXSEQ)) {
5869 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
5870 wm_linkintr(sc, icr);
5871 }
5872
5873 WM_TX_UNLOCK(sc);
5874
5875 if (icr & ICR_RXO) {
5876 #if defined(WM_DEBUG)
5877 log(LOG_WARNING, "%s: Receive overrun\n",
5878 device_xname(sc->sc_dev));
5879 #endif /* defined(WM_DEBUG) */
5880 }
5881 }
5882
5883 if (handled) {
5884 /* Try to get more packets going. */
5885 ifp->if_start(ifp);
5886 }
5887
5888 return handled;
5889 }
5890
5891 /*
5892 * Media related.
5893 * GMII, SGMII, TBI (and SERDES)
5894 */
5895
5896 /* GMII related */
5897
5898 /*
5899 * wm_gmii_reset:
5900 *
5901 * Reset the PHY.
5902 */
5903 static void
5904 wm_gmii_reset(struct wm_softc *sc)
5905 {
5906 uint32_t reg;
5907 int rv;
5908
5909 /* get phy semaphore */
5910 switch (sc->sc_type) {
5911 case WM_T_82571:
5912 case WM_T_82572:
5913 case WM_T_82573:
5914 case WM_T_82574:
5915 case WM_T_82583:
5916 /* XXX should get sw semaphore, too */
5917 rv = wm_get_swsm_semaphore(sc);
5918 break;
5919 case WM_T_82575:
5920 case WM_T_82576:
5921 case WM_T_82580:
5922 case WM_T_I350:
5923 case WM_T_I354:
5924 case WM_T_I210:
5925 case WM_T_I211:
5926 case WM_T_80003:
5927 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5928 break;
5929 case WM_T_ICH8:
5930 case WM_T_ICH9:
5931 case WM_T_ICH10:
5932 case WM_T_PCH:
5933 case WM_T_PCH2:
5934 case WM_T_PCH_LPT:
5935 rv = wm_get_swfwhw_semaphore(sc);
5936 break;
5937 default:
5938 /* nothing to do*/
5939 rv = 0;
5940 break;
5941 }
5942 if (rv != 0) {
5943 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5944 __func__);
5945 return;
5946 }
5947
5948 switch (sc->sc_type) {
5949 case WM_T_82542_2_0:
5950 case WM_T_82542_2_1:
5951 /* null */
5952 break;
5953 case WM_T_82543:
5954 /*
5955 * With 82543, we need to force speed and duplex on the MAC
5956 * equal to what the PHY speed and duplex configuration is.
5957 * In addition, we need to perform a hardware reset on the PHY
5958 * to take it out of reset.
5959 */
5960 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5961 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5962
5963 /* The PHY reset pin is active-low. */
5964 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5965 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
5966 CTRL_EXT_SWDPIN(4));
5967 reg |= CTRL_EXT_SWDPIO(4);
5968
5969 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5970 CSR_WRITE_FLUSH(sc);
5971 delay(10*1000);
5972
5973 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
5974 CSR_WRITE_FLUSH(sc);
5975 delay(150);
5976 #if 0
5977 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
5978 #endif
5979 delay(20*1000); /* XXX extra delay to get PHY ID? */
5980 break;
5981 case WM_T_82544: /* reset 10000us */
5982 case WM_T_82540:
5983 case WM_T_82545:
5984 case WM_T_82545_3:
5985 case WM_T_82546:
5986 case WM_T_82546_3:
5987 case WM_T_82541:
5988 case WM_T_82541_2:
5989 case WM_T_82547:
5990 case WM_T_82547_2:
5991 case WM_T_82571: /* reset 100us */
5992 case WM_T_82572:
5993 case WM_T_82573:
5994 case WM_T_82574:
5995 case WM_T_82575:
5996 case WM_T_82576:
5997 case WM_T_82580:
5998 case WM_T_I350:
5999 case WM_T_I354:
6000 case WM_T_I210:
6001 case WM_T_I211:
6002 case WM_T_82583:
6003 case WM_T_80003:
6004 /* generic reset */
6005 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6006 CSR_WRITE_FLUSH(sc);
6007 delay(20000);
6008 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6009 CSR_WRITE_FLUSH(sc);
6010 delay(20000);
6011
6012 if ((sc->sc_type == WM_T_82541)
6013 || (sc->sc_type == WM_T_82541_2)
6014 || (sc->sc_type == WM_T_82547)
6015 || (sc->sc_type == WM_T_82547_2)) {
6016 /* workaround for igp are done in igp_reset() */
6017 /* XXX add code to set LED after phy reset */
6018 }
6019 break;
6020 case WM_T_ICH8:
6021 case WM_T_ICH9:
6022 case WM_T_ICH10:
6023 case WM_T_PCH:
6024 case WM_T_PCH2:
6025 case WM_T_PCH_LPT:
6026 /* generic reset */
6027 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6028 CSR_WRITE_FLUSH(sc);
6029 delay(100);
6030 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6031 CSR_WRITE_FLUSH(sc);
6032 delay(150);
6033 break;
6034 default:
6035 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6036 __func__);
6037 break;
6038 }
6039
6040 /* release PHY semaphore */
6041 switch (sc->sc_type) {
6042 case WM_T_82571:
6043 case WM_T_82572:
6044 case WM_T_82573:
6045 case WM_T_82574:
6046 case WM_T_82583:
6047 /* XXX should put sw semaphore, too */
6048 wm_put_swsm_semaphore(sc);
6049 break;
6050 case WM_T_82575:
6051 case WM_T_82576:
6052 case WM_T_82580:
6053 case WM_T_I350:
6054 case WM_T_I354:
6055 case WM_T_I210:
6056 case WM_T_I211:
6057 case WM_T_80003:
6058 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6059 break;
6060 case WM_T_ICH8:
6061 case WM_T_ICH9:
6062 case WM_T_ICH10:
6063 case WM_T_PCH:
6064 case WM_T_PCH2:
6065 case WM_T_PCH_LPT:
6066 wm_put_swfwhw_semaphore(sc);
6067 break;
6068 default:
6069 /* nothing to do*/
6070 rv = 0;
6071 break;
6072 }
6073
6074 /* get_cfg_done */
6075 wm_get_cfg_done(sc);
6076
6077 /* extra setup */
6078 switch (sc->sc_type) {
6079 case WM_T_82542_2_0:
6080 case WM_T_82542_2_1:
6081 case WM_T_82543:
6082 case WM_T_82544:
6083 case WM_T_82540:
6084 case WM_T_82545:
6085 case WM_T_82545_3:
6086 case WM_T_82546:
6087 case WM_T_82546_3:
6088 case WM_T_82541_2:
6089 case WM_T_82547_2:
6090 case WM_T_82571:
6091 case WM_T_82572:
6092 case WM_T_82573:
6093 case WM_T_82574:
6094 case WM_T_82575:
6095 case WM_T_82576:
6096 case WM_T_82580:
6097 case WM_T_I350:
6098 case WM_T_I354:
6099 case WM_T_I210:
6100 case WM_T_I211:
6101 case WM_T_82583:
6102 case WM_T_80003:
6103 /* null */
6104 break;
6105 case WM_T_82541:
6106 case WM_T_82547:
6107 /* XXX Configure actively LED after PHY reset */
6108 break;
6109 case WM_T_ICH8:
6110 case WM_T_ICH9:
6111 case WM_T_ICH10:
6112 case WM_T_PCH:
6113 case WM_T_PCH2:
6114 case WM_T_PCH_LPT:
6115 /* Allow time for h/w to get to a quiescent state afer reset */
6116 delay(10*1000);
6117
6118 if (sc->sc_type == WM_T_PCH)
6119 wm_hv_phy_workaround_ich8lan(sc);
6120
6121 if (sc->sc_type == WM_T_PCH2)
6122 wm_lv_phy_workaround_ich8lan(sc);
6123
6124 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6125 /*
6126 * dummy read to clear the phy wakeup bit after lcd
6127 * reset
6128 */
6129 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6130 }
6131
6132 /*
6133 * XXX Configure the LCD with th extended configuration region
6134 * in NVM
6135 */
6136
6137 /* Configure the LCD with the OEM bits in NVM */
6138 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6139 || (sc->sc_type == WM_T_PCH_LPT)) {
6140 /*
6141 * Disable LPLU.
6142 * XXX It seems that 82567 has LPLU, too.
6143 */
6144 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6145 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6146 reg |= HV_OEM_BITS_ANEGNOW;
6147 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6148 }
6149 break;
6150 default:
6151 panic("%s: unknown type\n", __func__);
6152 break;
6153 }
6154 }
6155
6156 /*
6157 * wm_get_phy_id_82575:
6158 *
6159 * Return PHY ID. Return -1 if it failed.
6160 */
6161 static int
6162 wm_get_phy_id_82575(struct wm_softc *sc)
6163 {
6164 uint32_t reg;
6165 int phyid = -1;
6166
6167 /* XXX */
6168 if ((sc->sc_flags & WM_F_SGMII) == 0)
6169 return -1;
6170
6171 if (wm_sgmii_uses_mdio(sc)) {
6172 switch (sc->sc_type) {
6173 case WM_T_82575:
6174 case WM_T_82576:
6175 reg = CSR_READ(sc, WMREG_MDIC);
6176 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
6177 break;
6178 case WM_T_82580:
6179 case WM_T_I350:
6180 case WM_T_I354:
6181 case WM_T_I210:
6182 case WM_T_I211:
6183 reg = CSR_READ(sc, WMREG_MDICNFG);
6184 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
6185 break;
6186 default:
6187 return -1;
6188 }
6189 }
6190
6191 return phyid;
6192 }
6193
6194
6195 /*
6196 * wm_gmii_mediainit:
6197 *
6198 * Initialize media for use on 1000BASE-T devices.
6199 */
6200 static void
6201 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6202 {
6203 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6204 struct mii_data *mii = &sc->sc_mii;
6205 uint32_t reg;
6206
6207 /* We have GMII. */
6208 sc->sc_flags |= WM_F_HAS_MII;
6209
6210 if (sc->sc_type == WM_T_80003)
6211 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6212 else
6213 sc->sc_tipg = TIPG_1000T_DFLT;
6214
6215 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
6216 if ((sc->sc_type == WM_T_82580)
6217 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
6218 || (sc->sc_type == WM_T_I211)) {
6219 reg = CSR_READ(sc, WMREG_PHPM);
6220 reg &= ~PHPM_GO_LINK_D;
6221 CSR_WRITE(sc, WMREG_PHPM, reg);
6222 }
6223
6224 /*
6225 * Let the chip set speed/duplex on its own based on
6226 * signals from the PHY.
6227 * XXXbouyer - I'm not sure this is right for the 80003,
6228 * the em driver only sets CTRL_SLU here - but it seems to work.
6229 */
6230 sc->sc_ctrl |= CTRL_SLU;
6231 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6232
6233 /* Initialize our media structures and probe the GMII. */
6234 mii->mii_ifp = ifp;
6235
6236 /*
6237 * Determine the PHY access method.
6238 *
6239 * For SGMII, use SGMII specific method.
6240 *
6241 * For some devices, we can determine the PHY access method
6242 * from sc_type.
6243 *
6244 * For ICH8 variants, it's difficult to detemine the PHY access
6245 * method by sc_type, so use the PCI product ID for some devices.
6246 * For other ICH8 variants, try to use igp's method. If the PHY
6247 * can't detect, then use bm's method.
6248 */
6249 switch (prodid) {
6250 case PCI_PRODUCT_INTEL_PCH_M_LM:
6251 case PCI_PRODUCT_INTEL_PCH_M_LC:
6252 /* 82577 */
6253 sc->sc_phytype = WMPHY_82577;
6254 mii->mii_readreg = wm_gmii_hv_readreg;
6255 mii->mii_writereg = wm_gmii_hv_writereg;
6256 break;
6257 case PCI_PRODUCT_INTEL_PCH_D_DM:
6258 case PCI_PRODUCT_INTEL_PCH_D_DC:
6259 /* 82578 */
6260 sc->sc_phytype = WMPHY_82578;
6261 mii->mii_readreg = wm_gmii_hv_readreg;
6262 mii->mii_writereg = wm_gmii_hv_writereg;
6263 break;
6264 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6265 case PCI_PRODUCT_INTEL_PCH2_LV_V:
6266 /* 82579 */
6267 sc->sc_phytype = WMPHY_82579;
6268 mii->mii_readreg = wm_gmii_hv_readreg;
6269 mii->mii_writereg = wm_gmii_hv_writereg;
6270 break;
6271 case PCI_PRODUCT_INTEL_I217_LM:
6272 case PCI_PRODUCT_INTEL_I217_V:
6273 case PCI_PRODUCT_INTEL_I218_LM:
6274 case PCI_PRODUCT_INTEL_I218_V:
6275 /* I21[78] */
6276 mii->mii_readreg = wm_gmii_hv_readreg;
6277 mii->mii_writereg = wm_gmii_hv_writereg;
6278 break;
6279 case PCI_PRODUCT_INTEL_82801I_BM:
6280 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6281 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6282 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6283 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6284 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6285 /* 82567 */
6286 sc->sc_phytype = WMPHY_BM;
6287 mii->mii_readreg = wm_gmii_bm_readreg;
6288 mii->mii_writereg = wm_gmii_bm_writereg;
6289 break;
6290 default:
6291 if (((sc->sc_flags & WM_F_SGMII) != 0)
6292 && !wm_sgmii_uses_mdio(sc)){
6293 mii->mii_readreg = wm_sgmii_readreg;
6294 mii->mii_writereg = wm_sgmii_writereg;
6295 } else if (sc->sc_type >= WM_T_80003) {
6296 mii->mii_readreg = wm_gmii_i80003_readreg;
6297 mii->mii_writereg = wm_gmii_i80003_writereg;
6298 } else if (sc->sc_type >= WM_T_I210) {
6299 mii->mii_readreg = wm_gmii_i82544_readreg;
6300 mii->mii_writereg = wm_gmii_i82544_writereg;
6301 } else if (sc->sc_type >= WM_T_82580) {
6302 sc->sc_phytype = WMPHY_82580;
6303 mii->mii_readreg = wm_gmii_82580_readreg;
6304 mii->mii_writereg = wm_gmii_82580_writereg;
6305 } else if (sc->sc_type >= WM_T_82544) {
6306 mii->mii_readreg = wm_gmii_i82544_readreg;
6307 mii->mii_writereg = wm_gmii_i82544_writereg;
6308 } else {
6309 mii->mii_readreg = wm_gmii_i82543_readreg;
6310 mii->mii_writereg = wm_gmii_i82543_writereg;
6311 }
6312 break;
6313 }
6314 mii->mii_statchg = wm_gmii_statchg;
6315
6316 wm_gmii_reset(sc);
6317
6318 sc->sc_ethercom.ec_mii = &sc->sc_mii;
6319 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6320 wm_gmii_mediastatus);
6321
6322 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6323 || (sc->sc_type == WM_T_82580)
6324 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
6325 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
6326 if ((sc->sc_flags & WM_F_SGMII) == 0) {
6327 /* Attach only one port */
6328 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6329 MII_OFFSET_ANY, MIIF_DOPAUSE);
6330 } else {
6331 int i, id;
6332 uint32_t ctrl_ext;
6333
6334 id = wm_get_phy_id_82575(sc);
6335 if (id != -1) {
6336 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6337 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
6338 }
6339 if ((id == -1)
6340 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
6341 /* Power on sgmii phy if it is disabled */
6342 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6343 CSR_WRITE(sc, WMREG_CTRL_EXT,
6344 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6345 CSR_WRITE_FLUSH(sc);
6346 delay(300*1000); /* XXX too long */
6347
6348 /* from 1 to 8 */
6349 for (i = 1; i < 8; i++)
6350 mii_attach(sc->sc_dev, &sc->sc_mii,
6351 0xffffffff, i, MII_OFFSET_ANY,
6352 MIIF_DOPAUSE);
6353
6354 /* restore previous sfp cage power state */
6355 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6356 }
6357 }
6358 } else {
6359 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6360 MII_OFFSET_ANY, MIIF_DOPAUSE);
6361 }
6362
6363 /*
6364 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
6365 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6366 */
6367 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
6368 (LIST_FIRST(&mii->mii_phys) == NULL)) {
6369 wm_set_mdio_slow_mode_hv(sc);
6370 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6371 MII_OFFSET_ANY, MIIF_DOPAUSE);
6372 }
6373
6374 /*
6375 * (For ICH8 variants)
6376 * If PHY detection failed, use BM's r/w function and retry.
6377 */
6378 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6379 /* if failed, retry with *_bm_* */
6380 mii->mii_readreg = wm_gmii_bm_readreg;
6381 mii->mii_writereg = wm_gmii_bm_writereg;
6382
6383 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6384 MII_OFFSET_ANY, MIIF_DOPAUSE);
6385 }
6386
6387 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6388 /* Any PHY wasn't find */
6389 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6390 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6391 sc->sc_phytype = WMPHY_NONE;
6392 } else {
6393 /*
6394 * PHY Found!
6395 * Check PHY type.
6396 */
6397 uint32_t model;
6398 struct mii_softc *child;
6399
6400 child = LIST_FIRST(&mii->mii_phys);
6401 if (device_is_a(child->mii_dev, "igphy")) {
6402 struct igphy_softc *isc = (struct igphy_softc *)child;
6403
6404 model = isc->sc_mii.mii_mpd_model;
6405 if (model == MII_MODEL_yyINTEL_I82566)
6406 sc->sc_phytype = WMPHY_IGP_3;
6407 }
6408
6409 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6410 }
6411 }
6412
6413 /*
6414 * wm_gmii_mediastatus: [ifmedia interface function]
6415 *
6416 * Get the current interface media status on a 1000BASE-T device.
6417 */
6418 static void
6419 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6420 {
6421 struct wm_softc *sc = ifp->if_softc;
6422
6423 ether_mediastatus(ifp, ifmr);
6424 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6425 | sc->sc_flowflags;
6426 }
6427
6428 /*
6429 * wm_gmii_mediachange: [ifmedia interface function]
6430 *
6431 * Set hardware to newly-selected media on a 1000BASE-T device.
6432 */
6433 static int
6434 wm_gmii_mediachange(struct ifnet *ifp)
6435 {
6436 struct wm_softc *sc = ifp->if_softc;
6437 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6438 int rc;
6439
6440 if ((ifp->if_flags & IFF_UP) == 0)
6441 return 0;
6442
6443 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6444 sc->sc_ctrl |= CTRL_SLU;
6445 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6446 || (sc->sc_type > WM_T_82543)) {
6447 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6448 } else {
6449 sc->sc_ctrl &= ~CTRL_ASDE;
6450 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6451 if (ife->ifm_media & IFM_FDX)
6452 sc->sc_ctrl |= CTRL_FD;
6453 switch (IFM_SUBTYPE(ife->ifm_media)) {
6454 case IFM_10_T:
6455 sc->sc_ctrl |= CTRL_SPEED_10;
6456 break;
6457 case IFM_100_TX:
6458 sc->sc_ctrl |= CTRL_SPEED_100;
6459 break;
6460 case IFM_1000_T:
6461 sc->sc_ctrl |= CTRL_SPEED_1000;
6462 break;
6463 default:
6464 panic("wm_gmii_mediachange: bad media 0x%x",
6465 ife->ifm_media);
6466 }
6467 }
6468 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6469 if (sc->sc_type <= WM_T_82543)
6470 wm_gmii_reset(sc);
6471
6472 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6473 return 0;
6474 return rc;
6475 }
6476
6477 #define MDI_IO CTRL_SWDPIN(2)
6478 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
6479 #define MDI_CLK CTRL_SWDPIN(3)
6480
6481 static void
6482 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6483 {
6484 uint32_t i, v;
6485
6486 v = CSR_READ(sc, WMREG_CTRL);
6487 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6488 v |= MDI_DIR | CTRL_SWDPIO(3);
6489
6490 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6491 if (data & i)
6492 v |= MDI_IO;
6493 else
6494 v &= ~MDI_IO;
6495 CSR_WRITE(sc, WMREG_CTRL, v);
6496 CSR_WRITE_FLUSH(sc);
6497 delay(10);
6498 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6499 CSR_WRITE_FLUSH(sc);
6500 delay(10);
6501 CSR_WRITE(sc, WMREG_CTRL, v);
6502 CSR_WRITE_FLUSH(sc);
6503 delay(10);
6504 }
6505 }
6506
6507 static uint32_t
6508 wm_i82543_mii_recvbits(struct wm_softc *sc)
6509 {
6510 uint32_t v, i, data = 0;
6511
6512 v = CSR_READ(sc, WMREG_CTRL);
6513 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6514 v |= CTRL_SWDPIO(3);
6515
6516 CSR_WRITE(sc, WMREG_CTRL, v);
6517 CSR_WRITE_FLUSH(sc);
6518 delay(10);
6519 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6520 CSR_WRITE_FLUSH(sc);
6521 delay(10);
6522 CSR_WRITE(sc, WMREG_CTRL, v);
6523 CSR_WRITE_FLUSH(sc);
6524 delay(10);
6525
6526 for (i = 0; i < 16; i++) {
6527 data <<= 1;
6528 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6529 CSR_WRITE_FLUSH(sc);
6530 delay(10);
6531 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6532 data |= 1;
6533 CSR_WRITE(sc, WMREG_CTRL, v);
6534 CSR_WRITE_FLUSH(sc);
6535 delay(10);
6536 }
6537
6538 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6539 CSR_WRITE_FLUSH(sc);
6540 delay(10);
6541 CSR_WRITE(sc, WMREG_CTRL, v);
6542 CSR_WRITE_FLUSH(sc);
6543 delay(10);
6544
6545 return data;
6546 }
6547
6548 #undef MDI_IO
6549 #undef MDI_DIR
6550 #undef MDI_CLK
6551
6552 /*
6553 * wm_gmii_i82543_readreg: [mii interface function]
6554 *
6555 * Read a PHY register on the GMII (i82543 version).
6556 */
6557 static int
6558 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6559 {
6560 struct wm_softc *sc = device_private(self);
6561 int rv;
6562
6563 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6564 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
6565 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6566 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
6567
6568 DPRINTF(WM_DEBUG_GMII,
6569 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6570 device_xname(sc->sc_dev), phy, reg, rv));
6571
6572 return rv;
6573 }
6574
6575 /*
6576 * wm_gmii_i82543_writereg: [mii interface function]
6577 *
6578 * Write a PHY register on the GMII (i82543 version).
6579 */
6580 static void
6581 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6582 {
6583 struct wm_softc *sc = device_private(self);
6584
6585 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6586 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6587 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6588 (MII_COMMAND_START << 30), 32);
6589 }
6590
6591 /*
6592 * wm_gmii_i82544_readreg: [mii interface function]
6593 *
6594 * Read a PHY register on the GMII.
6595 */
6596 static int
6597 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6598 {
6599 struct wm_softc *sc = device_private(self);
6600 uint32_t mdic = 0;
6601 int i, rv;
6602
6603 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6604 MDIC_REGADD(reg));
6605
6606 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6607 mdic = CSR_READ(sc, WMREG_MDIC);
6608 if (mdic & MDIC_READY)
6609 break;
6610 delay(50);
6611 }
6612
6613 if ((mdic & MDIC_READY) == 0) {
6614 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6615 device_xname(sc->sc_dev), phy, reg);
6616 rv = 0;
6617 } else if (mdic & MDIC_E) {
6618 #if 0 /* This is normal if no PHY is present. */
6619 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6620 device_xname(sc->sc_dev), phy, reg);
6621 #endif
6622 rv = 0;
6623 } else {
6624 rv = MDIC_DATA(mdic);
6625 if (rv == 0xffff)
6626 rv = 0;
6627 }
6628
6629 return rv;
6630 }
6631
6632 /*
6633 * wm_gmii_i82544_writereg: [mii interface function]
6634 *
6635 * Write a PHY register on the GMII.
6636 */
6637 static void
6638 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6639 {
6640 struct wm_softc *sc = device_private(self);
6641 uint32_t mdic = 0;
6642 int i;
6643
6644 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6645 MDIC_REGADD(reg) | MDIC_DATA(val));
6646
6647 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6648 mdic = CSR_READ(sc, WMREG_MDIC);
6649 if (mdic & MDIC_READY)
6650 break;
6651 delay(50);
6652 }
6653
6654 if ((mdic & MDIC_READY) == 0)
6655 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6656 device_xname(sc->sc_dev), phy, reg);
6657 else if (mdic & MDIC_E)
6658 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6659 device_xname(sc->sc_dev), phy, reg);
6660 }
6661
6662 /*
6663 * wm_gmii_i80003_readreg: [mii interface function]
6664 *
6665 * Read a PHY register on the kumeran
6666 * This could be handled by the PHY layer if we didn't have to lock the
6667 * ressource ...
6668 */
6669 static int
6670 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6671 {
6672 struct wm_softc *sc = device_private(self);
6673 int sem;
6674 int rv;
6675
6676 if (phy != 1) /* only one PHY on kumeran bus */
6677 return 0;
6678
6679 sem = swfwphysem[sc->sc_funcid];
6680 if (wm_get_swfw_semaphore(sc, sem)) {
6681 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6682 __func__);
6683 return 0;
6684 }
6685
6686 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6687 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6688 reg >> GG82563_PAGE_SHIFT);
6689 } else {
6690 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6691 reg >> GG82563_PAGE_SHIFT);
6692 }
6693 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6694 delay(200);
6695 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6696 delay(200);
6697
6698 wm_put_swfw_semaphore(sc, sem);
6699 return rv;
6700 }
6701
6702 /*
6703 * wm_gmii_i80003_writereg: [mii interface function]
6704 *
6705 * Write a PHY register on the kumeran.
6706 * This could be handled by the PHY layer if we didn't have to lock the
6707 * ressource ...
6708 */
6709 static void
6710 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6711 {
6712 struct wm_softc *sc = device_private(self);
6713 int sem;
6714
6715 if (phy != 1) /* only one PHY on kumeran bus */
6716 return;
6717
6718 sem = swfwphysem[sc->sc_funcid];
6719 if (wm_get_swfw_semaphore(sc, sem)) {
6720 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6721 __func__);
6722 return;
6723 }
6724
6725 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6726 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6727 reg >> GG82563_PAGE_SHIFT);
6728 } else {
6729 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6730 reg >> GG82563_PAGE_SHIFT);
6731 }
6732 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6733 delay(200);
6734 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6735 delay(200);
6736
6737 wm_put_swfw_semaphore(sc, sem);
6738 }
6739
6740 /*
6741 * wm_gmii_bm_readreg: [mii interface function]
6742 *
6743 * Read a PHY register on the kumeran
6744 * This could be handled by the PHY layer if we didn't have to lock the
6745 * ressource ...
6746 */
6747 static int
6748 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6749 {
6750 struct wm_softc *sc = device_private(self);
6751 int sem;
6752 int rv;
6753
6754 sem = swfwphysem[sc->sc_funcid];
6755 if (wm_get_swfw_semaphore(sc, sem)) {
6756 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6757 __func__);
6758 return 0;
6759 }
6760
6761 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6762 if (phy == 1)
6763 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6764 reg);
6765 else
6766 wm_gmii_i82544_writereg(self, phy,
6767 GG82563_PHY_PAGE_SELECT,
6768 reg >> GG82563_PAGE_SHIFT);
6769 }
6770
6771 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6772 wm_put_swfw_semaphore(sc, sem);
6773 return rv;
6774 }
6775
6776 /*
6777 * wm_gmii_bm_writereg: [mii interface function]
6778 *
6779 * Write a PHY register on the kumeran.
6780 * This could be handled by the PHY layer if we didn't have to lock the
6781 * ressource ...
6782 */
6783 static void
6784 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6785 {
6786 struct wm_softc *sc = device_private(self);
6787 int sem;
6788
6789 sem = swfwphysem[sc->sc_funcid];
6790 if (wm_get_swfw_semaphore(sc, sem)) {
6791 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6792 __func__);
6793 return;
6794 }
6795
6796 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6797 if (phy == 1)
6798 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6799 reg);
6800 else
6801 wm_gmii_i82544_writereg(self, phy,
6802 GG82563_PHY_PAGE_SELECT,
6803 reg >> GG82563_PAGE_SHIFT);
6804 }
6805
6806 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6807 wm_put_swfw_semaphore(sc, sem);
6808 }
6809
6810 static void
6811 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6812 {
6813 struct wm_softc *sc = device_private(self);
6814 uint16_t regnum = BM_PHY_REG_NUM(offset);
6815 uint16_t wuce;
6816
6817 /* XXX Gig must be disabled for MDIO accesses to page 800 */
6818 if (sc->sc_type == WM_T_PCH) {
6819 /* XXX e1000 driver do nothing... why? */
6820 }
6821
6822 /* Set page 769 */
6823 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6824 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6825
6826 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6827
6828 wuce &= ~BM_WUC_HOST_WU_BIT;
6829 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6830 wuce | BM_WUC_ENABLE_BIT);
6831
6832 /* Select page 800 */
6833 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6834 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6835
6836 /* Write page 800 */
6837 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6838
6839 if (rd)
6840 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6841 else
6842 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6843
6844 /* Set page 769 */
6845 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6846 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6847
6848 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6849 }
6850
6851 /*
6852 * wm_gmii_hv_readreg: [mii interface function]
6853 *
6854 * Read a PHY register on the kumeran
6855 * This could be handled by the PHY layer if we didn't have to lock the
6856 * ressource ...
6857 */
6858 static int
6859 wm_gmii_hv_readreg(device_t self, int phy, int reg)
6860 {
6861 struct wm_softc *sc = device_private(self);
6862 uint16_t page = BM_PHY_REG_PAGE(reg);
6863 uint16_t regnum = BM_PHY_REG_NUM(reg);
6864 uint16_t val;
6865 int rv;
6866
6867 if (wm_get_swfwhw_semaphore(sc)) {
6868 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6869 __func__);
6870 return 0;
6871 }
6872
6873 /* XXX Workaround failure in MDIO access while cable is disconnected */
6874 if (sc->sc_phytype == WMPHY_82577) {
6875 /* XXX must write */
6876 }
6877
6878 /* Page 800 works differently than the rest so it has its own func */
6879 if (page == BM_WUC_PAGE) {
6880 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
6881 return val;
6882 }
6883
6884 /*
6885 * Lower than page 768 works differently than the rest so it has its
6886 * own func
6887 */
6888 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6889 printf("gmii_hv_readreg!!!\n");
6890 return 0;
6891 }
6892
6893 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6894 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6895 page << BME1000_PAGE_SHIFT);
6896 }
6897
6898 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
6899 wm_put_swfwhw_semaphore(sc);
6900 return rv;
6901 }
6902
6903 /*
6904 * wm_gmii_hv_writereg: [mii interface function]
6905 *
6906 * Write a PHY register on the kumeran.
6907 * This could be handled by the PHY layer if we didn't have to lock the
6908 * ressource ...
6909 */
6910 static void
6911 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
6912 {
6913 struct wm_softc *sc = device_private(self);
6914 uint16_t page = BM_PHY_REG_PAGE(reg);
6915 uint16_t regnum = BM_PHY_REG_NUM(reg);
6916
6917 if (wm_get_swfwhw_semaphore(sc)) {
6918 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6919 __func__);
6920 return;
6921 }
6922
6923 /* XXX Workaround failure in MDIO access while cable is disconnected */
6924
6925 /* Page 800 works differently than the rest so it has its own func */
6926 if (page == BM_WUC_PAGE) {
6927 uint16_t tmp;
6928
6929 tmp = val;
6930 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
6931 return;
6932 }
6933
6934 /*
6935 * Lower than page 768 works differently than the rest so it has its
6936 * own func
6937 */
6938 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6939 printf("gmii_hv_writereg!!!\n");
6940 return;
6941 }
6942
6943 /*
6944 * XXX Workaround MDIO accesses being disabled after entering IEEE
6945 * Power Down (whenever bit 11 of the PHY control register is set)
6946 */
6947
6948 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6949 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6950 page << BME1000_PAGE_SHIFT);
6951 }
6952
6953 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
6954 wm_put_swfwhw_semaphore(sc);
6955 }
6956
6957 /*
6958 * wm_gmii_82580_readreg: [mii interface function]
6959 *
6960 * Read a PHY register on the 82580 and I350.
6961 * This could be handled by the PHY layer if we didn't have to lock the
6962 * ressource ...
6963 */
6964 static int
6965 wm_gmii_82580_readreg(device_t self, int phy, int reg)
6966 {
6967 struct wm_softc *sc = device_private(self);
6968 int sem;
6969 int rv;
6970
6971 sem = swfwphysem[sc->sc_funcid];
6972 if (wm_get_swfw_semaphore(sc, sem)) {
6973 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6974 __func__);
6975 return 0;
6976 }
6977
6978 rv = wm_gmii_i82544_readreg(self, phy, reg);
6979
6980 wm_put_swfw_semaphore(sc, sem);
6981 return rv;
6982 }
6983
6984 /*
6985 * wm_gmii_82580_writereg: [mii interface function]
6986 *
6987 * Write a PHY register on the 82580 and I350.
6988 * This could be handled by the PHY layer if we didn't have to lock the
6989 * ressource ...
6990 */
6991 static void
6992 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
6993 {
6994 struct wm_softc *sc = device_private(self);
6995 int sem;
6996
6997 sem = swfwphysem[sc->sc_funcid];
6998 if (wm_get_swfw_semaphore(sc, sem)) {
6999 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7000 __func__);
7001 return;
7002 }
7003
7004 wm_gmii_i82544_writereg(self, phy, reg, val);
7005
7006 wm_put_swfw_semaphore(sc, sem);
7007 }
7008
7009 /*
7010 * wm_gmii_statchg: [mii interface function]
7011 *
7012 * Callback from MII layer when media changes.
7013 */
7014 static void
7015 wm_gmii_statchg(struct ifnet *ifp)
7016 {
7017 struct wm_softc *sc = ifp->if_softc;
7018 struct mii_data *mii = &sc->sc_mii;
7019
7020 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7021 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7022 sc->sc_fcrtl &= ~FCRTL_XONE;
7023
7024 /*
7025 * Get flow control negotiation result.
7026 */
7027 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7028 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7029 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7030 mii->mii_media_active &= ~IFM_ETH_FMASK;
7031 }
7032
7033 if (sc->sc_flowflags & IFM_FLOW) {
7034 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7035 sc->sc_ctrl |= CTRL_TFCE;
7036 sc->sc_fcrtl |= FCRTL_XONE;
7037 }
7038 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7039 sc->sc_ctrl |= CTRL_RFCE;
7040 }
7041
7042 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7043 DPRINTF(WM_DEBUG_LINK,
7044 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7045 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7046 } else {
7047 DPRINTF(WM_DEBUG_LINK,
7048 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7049 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7050 }
7051
7052 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7053 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7054 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7055 : WMREG_FCRTL, sc->sc_fcrtl);
7056 if (sc->sc_type == WM_T_80003) {
7057 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7058 case IFM_1000_T:
7059 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7060 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7061 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7062 break;
7063 default:
7064 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7065 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7066 sc->sc_tipg = TIPG_10_100_80003_DFLT;
7067 break;
7068 }
7069 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7070 }
7071 }
7072
7073 /*
7074 * wm_kmrn_readreg:
7075 *
7076 * Read a kumeran register
7077 */
7078 static int
7079 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7080 {
7081 int rv;
7082
7083 if (sc->sc_flags == WM_F_LOCK_SWFW) {
7084 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7085 aprint_error_dev(sc->sc_dev,
7086 "%s: failed to get semaphore\n", __func__);
7087 return 0;
7088 }
7089 } else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
7090 if (wm_get_swfwhw_semaphore(sc)) {
7091 aprint_error_dev(sc->sc_dev,
7092 "%s: failed to get semaphore\n", __func__);
7093 return 0;
7094 }
7095 }
7096
7097 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7098 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7099 KUMCTRLSTA_REN);
7100 CSR_WRITE_FLUSH(sc);
7101 delay(2);
7102
7103 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7104
7105 if (sc->sc_flags == WM_F_LOCK_SWFW)
7106 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7107 else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
7108 wm_put_swfwhw_semaphore(sc);
7109
7110 return rv;
7111 }
7112
7113 /*
7114 * wm_kmrn_writereg:
7115 *
7116 * Write a kumeran register
7117 */
7118 static void
7119 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7120 {
7121
7122 if (sc->sc_flags == WM_F_LOCK_SWFW) {
7123 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7124 aprint_error_dev(sc->sc_dev,
7125 "%s: failed to get semaphore\n", __func__);
7126 return;
7127 }
7128 } else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
7129 if (wm_get_swfwhw_semaphore(sc)) {
7130 aprint_error_dev(sc->sc_dev,
7131 "%s: failed to get semaphore\n", __func__);
7132 return;
7133 }
7134 }
7135
7136 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7137 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7138 (val & KUMCTRLSTA_MASK));
7139
7140 if (sc->sc_flags == WM_F_LOCK_SWFW)
7141 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7142 else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
7143 wm_put_swfwhw_semaphore(sc);
7144 }
7145
7146 /* SGMII related */
7147
7148 /*
7149 * wm_sgmii_uses_mdio
7150 *
7151 * Check whether the transaction is to the internal PHY or the external
7152 * MDIO interface. Return true if it's MDIO.
7153 */
7154 static bool
7155 wm_sgmii_uses_mdio(struct wm_softc *sc)
7156 {
7157 uint32_t reg;
7158 bool ismdio = false;
7159
7160 switch (sc->sc_type) {
7161 case WM_T_82575:
7162 case WM_T_82576:
7163 reg = CSR_READ(sc, WMREG_MDIC);
7164 ismdio = ((reg & MDIC_DEST) != 0);
7165 break;
7166 case WM_T_82580:
7167 case WM_T_I350:
7168 case WM_T_I354:
7169 case WM_T_I210:
7170 case WM_T_I211:
7171 reg = CSR_READ(sc, WMREG_MDICNFG);
7172 ismdio = ((reg & MDICNFG_DEST) != 0);
7173 break;
7174 default:
7175 break;
7176 }
7177
7178 return ismdio;
7179 }
7180
7181 /*
7182 * wm_sgmii_readreg: [mii interface function]
7183 *
7184 * Read a PHY register on the SGMII
7185 * This could be handled by the PHY layer if we didn't have to lock the
7186 * ressource ...
7187 */
7188 static int
7189 wm_sgmii_readreg(device_t self, int phy, int reg)
7190 {
7191 struct wm_softc *sc = device_private(self);
7192 uint32_t i2ccmd;
7193 int i, rv;
7194
7195 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7196 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7197 __func__);
7198 return 0;
7199 }
7200
7201 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7202 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7203 | I2CCMD_OPCODE_READ;
7204 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7205
7206 /* Poll the ready bit */
7207 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7208 delay(50);
7209 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7210 if (i2ccmd & I2CCMD_READY)
7211 break;
7212 }
7213 if ((i2ccmd & I2CCMD_READY) == 0)
7214 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7215 if ((i2ccmd & I2CCMD_ERROR) != 0)
7216 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7217
7218 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7219
7220 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7221 return rv;
7222 }
7223
7224 /*
7225 * wm_sgmii_writereg: [mii interface function]
7226 *
7227 * Write a PHY register on the SGMII.
7228 * This could be handled by the PHY layer if we didn't have to lock the
7229 * ressource ...
7230 */
7231 static void
7232 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7233 {
7234 struct wm_softc *sc = device_private(self);
7235 uint32_t i2ccmd;
7236 int i;
7237
7238 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7239 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7240 __func__);
7241 return;
7242 }
7243
7244 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7245 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7246 | I2CCMD_OPCODE_WRITE;
7247 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7248
7249 /* Poll the ready bit */
7250 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7251 delay(50);
7252 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7253 if (i2ccmd & I2CCMD_READY)
7254 break;
7255 }
7256 if ((i2ccmd & I2CCMD_READY) == 0)
7257 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7258 if ((i2ccmd & I2CCMD_ERROR) != 0)
7259 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7260
7261 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7262 }
7263
7264 /* TBI related */
7265
7266 /* XXX Currently TBI only */
7267 static int
7268 wm_check_for_link(struct wm_softc *sc)
7269 {
7270 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7271 uint32_t rxcw;
7272 uint32_t ctrl;
7273 uint32_t status;
7274 uint32_t sig;
7275
7276 if (sc->sc_mediatype & WMP_F_SERDES) {
7277 sc->sc_tbi_linkup = 1;
7278 return 0;
7279 }
7280
7281 rxcw = CSR_READ(sc, WMREG_RXCW);
7282 ctrl = CSR_READ(sc, WMREG_CTRL);
7283 status = CSR_READ(sc, WMREG_STATUS);
7284
7285 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7286
7287 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7288 device_xname(sc->sc_dev), __func__,
7289 ((ctrl & CTRL_SWDPIN(1)) == sig),
7290 ((status & STATUS_LU) != 0),
7291 ((rxcw & RXCW_C) != 0)
7292 ));
7293
7294 /*
7295 * SWDPIN LU RXCW
7296 * 0 0 0
7297 * 0 0 1 (should not happen)
7298 * 0 1 0 (should not happen)
7299 * 0 1 1 (should not happen)
7300 * 1 0 0 Disable autonego and force linkup
7301 * 1 0 1 got /C/ but not linkup yet
7302 * 1 1 0 (linkup)
7303 * 1 1 1 If IFM_AUTO, back to autonego
7304 *
7305 */
7306 if (((ctrl & CTRL_SWDPIN(1)) == sig)
7307 && ((status & STATUS_LU) == 0)
7308 && ((rxcw & RXCW_C) == 0)) {
7309 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7310 __func__));
7311 sc->sc_tbi_linkup = 0;
7312 /* Disable auto-negotiation in the TXCW register */
7313 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7314
7315 /*
7316 * Force link-up and also force full-duplex.
7317 *
7318 * NOTE: CTRL was updated TFCE and RFCE automatically,
7319 * so we should update sc->sc_ctrl
7320 */
7321 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7322 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7323 } else if (((status & STATUS_LU) != 0)
7324 && ((rxcw & RXCW_C) != 0)
7325 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7326 sc->sc_tbi_linkup = 1;
7327 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7328 __func__));
7329 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7330 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7331 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7332 && ((rxcw & RXCW_C) != 0)) {
7333 DPRINTF(WM_DEBUG_LINK, ("/C/"));
7334 } else {
7335 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7336 status));
7337 }
7338
7339 return 0;
7340 }
7341
7342 /*
7343 * wm_tbi_mediainit:
7344 *
7345 * Initialize media for use on 1000BASE-X devices.
7346 */
7347 static void
7348 wm_tbi_mediainit(struct wm_softc *sc)
7349 {
7350 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7351 const char *sep = "";
7352
7353 if (sc->sc_type < WM_T_82543)
7354 sc->sc_tipg = TIPG_WM_DFLT;
7355 else
7356 sc->sc_tipg = TIPG_LG_DFLT;
7357
7358 sc->sc_tbi_anegticks = 5;
7359
7360 /* Initialize our media structures */
7361 sc->sc_mii.mii_ifp = ifp;
7362
7363 sc->sc_ethercom.ec_mii = &sc->sc_mii;
7364 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
7365 wm_tbi_mediastatus);
7366
7367 /*
7368 * SWD Pins:
7369 *
7370 * 0 = Link LED (output)
7371 * 1 = Loss Of Signal (input)
7372 */
7373 sc->sc_ctrl |= CTRL_SWDPIO(0);
7374 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
7375 if (sc->sc_mediatype & WMP_F_SERDES)
7376 sc->sc_ctrl &= ~CTRL_LRST;
7377
7378 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7379
7380 #define ADD(ss, mm, dd) \
7381 do { \
7382 aprint_normal("%s%s", sep, ss); \
7383 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
7384 sep = ", "; \
7385 } while (/*CONSTCOND*/0)
7386
7387 aprint_normal_dev(sc->sc_dev, "");
7388
7389 /* Only 82545 is LX */
7390 if (sc->sc_type == WM_T_82545) {
7391 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
7392 ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
7393 } else {
7394 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
7395 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
7396 }
7397 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
7398 aprint_normal("\n");
7399
7400 #undef ADD
7401
7402 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
7403 }
7404
7405 /*
7406 * wm_tbi_mediastatus: [ifmedia interface function]
7407 *
7408 * Get the current interface media status on a 1000BASE-X device.
7409 */
7410 static void
7411 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
7412 {
7413 struct wm_softc *sc = ifp->if_softc;
7414 uint32_t ctrl, status;
7415
7416 ifmr->ifm_status = IFM_AVALID;
7417 ifmr->ifm_active = IFM_ETHER;
7418
7419 status = CSR_READ(sc, WMREG_STATUS);
7420 if ((status & STATUS_LU) == 0) {
7421 ifmr->ifm_active |= IFM_NONE;
7422 return;
7423 }
7424
7425 ifmr->ifm_status |= IFM_ACTIVE;
7426 /* Only 82545 is LX */
7427 if (sc->sc_type == WM_T_82545)
7428 ifmr->ifm_active |= IFM_1000_LX;
7429 else
7430 ifmr->ifm_active |= IFM_1000_SX;
7431 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
7432 ifmr->ifm_active |= IFM_FDX;
7433 else
7434 ifmr->ifm_active |= IFM_HDX;
7435 ctrl = CSR_READ(sc, WMREG_CTRL);
7436 if (ctrl & CTRL_RFCE)
7437 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
7438 if (ctrl & CTRL_TFCE)
7439 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
7440 }
7441
7442 /*
7443 * wm_tbi_mediachange: [ifmedia interface function]
7444 *
7445 * Set hardware to newly-selected media on a 1000BASE-X device.
7446 */
7447 static int
7448 wm_tbi_mediachange(struct ifnet *ifp)
7449 {
7450 struct wm_softc *sc = ifp->if_softc;
7451 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7452 uint32_t status;
7453 int i;
7454
7455 if (sc->sc_mediatype & WMP_F_SERDES)
7456 return 0;
7457
7458 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
7459 || (sc->sc_type >= WM_T_82575))
7460 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
7461
7462 /* XXX power_up_serdes_link_82575() */
7463
7464 sc->sc_ctrl &= ~CTRL_LRST;
7465 sc->sc_txcw = TXCW_ANE;
7466 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
7467 sc->sc_txcw |= TXCW_FD | TXCW_HD;
7468 else if (ife->ifm_media & IFM_FDX)
7469 sc->sc_txcw |= TXCW_FD;
7470 else
7471 sc->sc_txcw |= TXCW_HD;
7472
7473 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
7474 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
7475
7476 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
7477 device_xname(sc->sc_dev), sc->sc_txcw));
7478 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7479 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7480 CSR_WRITE_FLUSH(sc);
7481 delay(1000);
7482
7483 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
7484 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
7485
7486 /*
7487 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
7488 * optics detect a signal, 0 if they don't.
7489 */
7490 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
7491 /* Have signal; wait for the link to come up. */
7492 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
7493 delay(10000);
7494 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
7495 break;
7496 }
7497
7498 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
7499 device_xname(sc->sc_dev),i));
7500
7501 status = CSR_READ(sc, WMREG_STATUS);
7502 DPRINTF(WM_DEBUG_LINK,
7503 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
7504 device_xname(sc->sc_dev),status, STATUS_LU));
7505 if (status & STATUS_LU) {
7506 /* Link is up. */
7507 DPRINTF(WM_DEBUG_LINK,
7508 ("%s: LINK: set media -> link up %s\n",
7509 device_xname(sc->sc_dev),
7510 (status & STATUS_FD) ? "FDX" : "HDX"));
7511
7512 /*
7513 * NOTE: CTRL will update TFCE and RFCE automatically,
7514 * so we should update sc->sc_ctrl
7515 */
7516 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7517 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7518 sc->sc_fcrtl &= ~FCRTL_XONE;
7519 if (status & STATUS_FD)
7520 sc->sc_tctl |=
7521 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7522 else
7523 sc->sc_tctl |=
7524 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7525 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
7526 sc->sc_fcrtl |= FCRTL_XONE;
7527 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7528 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7529 WMREG_OLD_FCRTL : WMREG_FCRTL,
7530 sc->sc_fcrtl);
7531 sc->sc_tbi_linkup = 1;
7532 } else {
7533 if (i == WM_LINKUP_TIMEOUT)
7534 wm_check_for_link(sc);
7535 /* Link is down. */
7536 DPRINTF(WM_DEBUG_LINK,
7537 ("%s: LINK: set media -> link down\n",
7538 device_xname(sc->sc_dev)));
7539 sc->sc_tbi_linkup = 0;
7540 }
7541 } else {
7542 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
7543 device_xname(sc->sc_dev)));
7544 sc->sc_tbi_linkup = 0;
7545 }
7546
7547 wm_tbi_set_linkled(sc);
7548
7549 return 0;
7550 }
7551
7552 /*
7553 * wm_tbi_set_linkled:
7554 *
7555 * Update the link LED on 1000BASE-X devices.
7556 */
7557 static void
7558 wm_tbi_set_linkled(struct wm_softc *sc)
7559 {
7560
7561 if (sc->sc_tbi_linkup)
7562 sc->sc_ctrl |= CTRL_SWDPIN(0);
7563 else
7564 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7565
7566 /* 82540 or newer devices are active low */
7567 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7568
7569 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7570 }
7571
7572 /*
7573 * wm_tbi_check_link:
7574 *
7575 * Check the link on 1000BASE-X devices.
7576 */
7577 static void
7578 wm_tbi_check_link(struct wm_softc *sc)
7579 {
7580 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7581 uint32_t status;
7582
7583 KASSERT(WM_TX_LOCKED(sc));
7584
7585 if (sc->sc_mediatype & WMP_F_SERDES) {
7586 sc->sc_tbi_linkup = 1;
7587 return;
7588 }
7589
7590 status = CSR_READ(sc, WMREG_STATUS);
7591
7592 /* XXX is this needed? */
7593 (void)CSR_READ(sc, WMREG_RXCW);
7594 (void)CSR_READ(sc, WMREG_CTRL);
7595
7596 /* set link status */
7597 if ((status & STATUS_LU) == 0) {
7598 DPRINTF(WM_DEBUG_LINK,
7599 ("%s: LINK: checklink -> down\n",
7600 device_xname(sc->sc_dev)));
7601 sc->sc_tbi_linkup = 0;
7602 } else if (sc->sc_tbi_linkup == 0) {
7603 DPRINTF(WM_DEBUG_LINK,
7604 ("%s: LINK: checklink -> up %s\n",
7605 device_xname(sc->sc_dev),
7606 (status & STATUS_FD) ? "FDX" : "HDX"));
7607 sc->sc_tbi_linkup = 1;
7608 }
7609
7610 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
7611 && ((status & STATUS_LU) == 0)) {
7612 sc->sc_tbi_linkup = 0;
7613 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7614 /* If the timer expired, retry autonegotiation */
7615 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
7616 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
7617 sc->sc_tbi_ticks = 0;
7618 /*
7619 * Reset the link, and let autonegotiation do
7620 * its thing
7621 */
7622 sc->sc_ctrl |= CTRL_LRST;
7623 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7624 CSR_WRITE_FLUSH(sc);
7625 delay(1000);
7626 sc->sc_ctrl &= ~CTRL_LRST;
7627 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7628 CSR_WRITE_FLUSH(sc);
7629 delay(1000);
7630 CSR_WRITE(sc, WMREG_TXCW,
7631 sc->sc_txcw & ~TXCW_ANE);
7632 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7633 }
7634 }
7635 }
7636
7637 wm_tbi_set_linkled(sc);
7638 }
7639
7640 /* SFP related */
7641
7642 static int
7643 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
7644 {
7645 uint32_t i2ccmd;
7646 int i;
7647
7648 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
7649 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7650
7651 /* Poll the ready bit */
7652 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7653 delay(50);
7654 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7655 if (i2ccmd & I2CCMD_READY)
7656 break;
7657 }
7658 if ((i2ccmd & I2CCMD_READY) == 0)
7659 return -1;
7660 if ((i2ccmd & I2CCMD_ERROR) != 0)
7661 return -1;
7662
7663 *data = i2ccmd & 0x00ff;
7664
7665 return 0;
7666 }
7667
7668 static uint32_t
7669 wm_sfp_get_media_type(struct wm_softc *sc)
7670 {
7671 uint32_t ctrl_ext;
7672 uint8_t val = 0;
7673 int timeout = 3;
7674 uint32_t mediatype = WMP_F_UNKNOWN;
7675 int rv = -1;
7676
7677 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7678 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
7679 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
7680 CSR_WRITE_FLUSH(sc);
7681
7682 /* Read SFP module data */
7683 while (timeout) {
7684 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
7685 if (rv == 0)
7686 break;
7687 delay(100*1000); /* XXX too big */
7688 timeout--;
7689 }
7690 if (rv != 0)
7691 goto out;
7692 switch (val) {
7693 case SFF_SFP_ID_SFF:
7694 aprint_normal_dev(sc->sc_dev,
7695 "Module/Connector soldered to board\n");
7696 break;
7697 case SFF_SFP_ID_SFP:
7698 aprint_normal_dev(sc->sc_dev, "SFP\n");
7699 break;
7700 case SFF_SFP_ID_UNKNOWN:
7701 goto out;
7702 default:
7703 break;
7704 }
7705
7706 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
7707 if (rv != 0) {
7708 goto out;
7709 }
7710
7711 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
7712 mediatype = WMP_F_SERDES;
7713 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
7714 sc->sc_flags |= WM_F_SGMII;
7715 mediatype = WMP_F_COPPER;
7716 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
7717 sc->sc_flags |= WM_F_SGMII;
7718 mediatype = WMP_F_SERDES;
7719 }
7720
7721 out:
7722 /* Restore I2C interface setting */
7723 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7724
7725 return mediatype;
7726 }
7727 /*
7728 * NVM related.
7729 * Microwire, SPI (w/wo EERD) and Flash.
7730 */
7731
7732 /* Both spi and uwire */
7733
7734 /*
7735 * wm_eeprom_sendbits:
7736 *
7737 * Send a series of bits to the EEPROM.
7738 */
7739 static void
7740 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
7741 {
7742 uint32_t reg;
7743 int x;
7744
7745 reg = CSR_READ(sc, WMREG_EECD);
7746
7747 for (x = nbits; x > 0; x--) {
7748 if (bits & (1U << (x - 1)))
7749 reg |= EECD_DI;
7750 else
7751 reg &= ~EECD_DI;
7752 CSR_WRITE(sc, WMREG_EECD, reg);
7753 CSR_WRITE_FLUSH(sc);
7754 delay(2);
7755 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7756 CSR_WRITE_FLUSH(sc);
7757 delay(2);
7758 CSR_WRITE(sc, WMREG_EECD, reg);
7759 CSR_WRITE_FLUSH(sc);
7760 delay(2);
7761 }
7762 }
7763
7764 /*
7765 * wm_eeprom_recvbits:
7766 *
7767 * Receive a series of bits from the EEPROM.
7768 */
7769 static void
7770 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
7771 {
7772 uint32_t reg, val;
7773 int x;
7774
7775 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
7776
7777 val = 0;
7778 for (x = nbits; x > 0; x--) {
7779 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7780 CSR_WRITE_FLUSH(sc);
7781 delay(2);
7782 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
7783 val |= (1U << (x - 1));
7784 CSR_WRITE(sc, WMREG_EECD, reg);
7785 CSR_WRITE_FLUSH(sc);
7786 delay(2);
7787 }
7788 *valp = val;
7789 }
7790
7791 /* Microwire */
7792
7793 /*
7794 * wm_nvm_read_uwire:
7795 *
7796 * Read a word from the EEPROM using the MicroWire protocol.
7797 */
7798 static int
7799 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
7800 {
7801 uint32_t reg, val;
7802 int i;
7803
7804 for (i = 0; i < wordcnt; i++) {
7805 /* Clear SK and DI. */
7806 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
7807 CSR_WRITE(sc, WMREG_EECD, reg);
7808
7809 /*
7810 * XXX: workaround for a bug in qemu-0.12.x and prior
7811 * and Xen.
7812 *
7813 * We use this workaround only for 82540 because qemu's
7814 * e1000 act as 82540.
7815 */
7816 if (sc->sc_type == WM_T_82540) {
7817 reg |= EECD_SK;
7818 CSR_WRITE(sc, WMREG_EECD, reg);
7819 reg &= ~EECD_SK;
7820 CSR_WRITE(sc, WMREG_EECD, reg);
7821 CSR_WRITE_FLUSH(sc);
7822 delay(2);
7823 }
7824 /* XXX: end of workaround */
7825
7826 /* Set CHIP SELECT. */
7827 reg |= EECD_CS;
7828 CSR_WRITE(sc, WMREG_EECD, reg);
7829 CSR_WRITE_FLUSH(sc);
7830 delay(2);
7831
7832 /* Shift in the READ command. */
7833 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
7834
7835 /* Shift in address. */
7836 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
7837
7838 /* Shift out the data. */
7839 wm_eeprom_recvbits(sc, &val, 16);
7840 data[i] = val & 0xffff;
7841
7842 /* Clear CHIP SELECT. */
7843 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
7844 CSR_WRITE(sc, WMREG_EECD, reg);
7845 CSR_WRITE_FLUSH(sc);
7846 delay(2);
7847 }
7848
7849 return 0;
7850 }
7851
7852 /* SPI */
7853
7854 /*
7855 * Set SPI and FLASH related information from the EECD register.
7856 * For 82541 and 82547, the word size is taken from EEPROM.
7857 */
7858 static int
7859 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
7860 {
7861 int size;
7862 uint32_t reg;
7863 uint16_t data;
7864
7865 reg = CSR_READ(sc, WMREG_EECD);
7866 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
7867
7868 /* Read the size of NVM from EECD by default */
7869 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
7870 switch (sc->sc_type) {
7871 case WM_T_82541:
7872 case WM_T_82541_2:
7873 case WM_T_82547:
7874 case WM_T_82547_2:
7875 /* Set dummy value to access EEPROM */
7876 sc->sc_nvm_wordsize = 64;
7877 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
7878 reg = data;
7879 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
7880 if (size == 0)
7881 size = 6; /* 64 word size */
7882 else
7883 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
7884 break;
7885 case WM_T_80003:
7886 case WM_T_82571:
7887 case WM_T_82572:
7888 case WM_T_82573: /* SPI case */
7889 case WM_T_82574: /* SPI case */
7890 case WM_T_82583: /* SPI case */
7891 size += NVM_WORD_SIZE_BASE_SHIFT;
7892 if (size > 14)
7893 size = 14;
7894 break;
7895 case WM_T_82575:
7896 case WM_T_82576:
7897 case WM_T_82580:
7898 case WM_T_I350:
7899 case WM_T_I354:
7900 case WM_T_I210:
7901 case WM_T_I211:
7902 size += NVM_WORD_SIZE_BASE_SHIFT;
7903 if (size > 15)
7904 size = 15;
7905 break;
7906 default:
7907 aprint_error_dev(sc->sc_dev,
7908 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
7909 return -1;
7910 break;
7911 }
7912
7913 sc->sc_nvm_wordsize = 1 << size;
7914
7915 return 0;
7916 }
7917
7918 /*
7919 * wm_nvm_ready_spi:
7920 *
7921 * Wait for a SPI EEPROM to be ready for commands.
7922 */
7923 static int
7924 wm_nvm_ready_spi(struct wm_softc *sc)
7925 {
7926 uint32_t val;
7927 int usec;
7928
7929 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
7930 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
7931 wm_eeprom_recvbits(sc, &val, 8);
7932 if ((val & SPI_SR_RDY) == 0)
7933 break;
7934 }
7935 if (usec >= SPI_MAX_RETRIES) {
7936 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
7937 return 1;
7938 }
7939 return 0;
7940 }
7941
7942 /*
7943 * wm_nvm_read_spi:
7944 *
7945 * Read a work from the EEPROM using the SPI protocol.
7946 */
7947 static int
7948 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
7949 {
7950 uint32_t reg, val;
7951 int i;
7952 uint8_t opc;
7953
7954 /* Clear SK and CS. */
7955 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
7956 CSR_WRITE(sc, WMREG_EECD, reg);
7957 CSR_WRITE_FLUSH(sc);
7958 delay(2);
7959
7960 if (wm_nvm_ready_spi(sc))
7961 return 1;
7962
7963 /* Toggle CS to flush commands. */
7964 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
7965 CSR_WRITE_FLUSH(sc);
7966 delay(2);
7967 CSR_WRITE(sc, WMREG_EECD, reg);
7968 CSR_WRITE_FLUSH(sc);
7969 delay(2);
7970
7971 opc = SPI_OPC_READ;
7972 if (sc->sc_nvm_addrbits == 8 && word >= 128)
7973 opc |= SPI_OPC_A8;
7974
7975 wm_eeprom_sendbits(sc, opc, 8);
7976 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
7977
7978 for (i = 0; i < wordcnt; i++) {
7979 wm_eeprom_recvbits(sc, &val, 16);
7980 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
7981 }
7982
7983 /* Raise CS and clear SK. */
7984 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
7985 CSR_WRITE(sc, WMREG_EECD, reg);
7986 CSR_WRITE_FLUSH(sc);
7987 delay(2);
7988
7989 return 0;
7990 }
7991
7992 /* Using with EERD */
7993
7994 static int
7995 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
7996 {
7997 uint32_t attempts = 100000;
7998 uint32_t i, reg = 0;
7999 int32_t done = -1;
8000
8001 for (i = 0; i < attempts; i++) {
8002 reg = CSR_READ(sc, rw);
8003
8004 if (reg & EERD_DONE) {
8005 done = 0;
8006 break;
8007 }
8008 delay(5);
8009 }
8010
8011 return done;
8012 }
8013
8014 static int
8015 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
8016 uint16_t *data)
8017 {
8018 int i, eerd = 0;
8019 int error = 0;
8020
8021 for (i = 0; i < wordcnt; i++) {
8022 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
8023
8024 CSR_WRITE(sc, WMREG_EERD, eerd);
8025 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
8026 if (error != 0)
8027 break;
8028
8029 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
8030 }
8031
8032 return error;
8033 }
8034
8035 /* Flash */
8036
8037 static int
8038 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
8039 {
8040 uint32_t eecd;
8041 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
8042 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
8043 uint8_t sig_byte = 0;
8044
8045 switch (sc->sc_type) {
8046 case WM_T_ICH8:
8047 case WM_T_ICH9:
8048 eecd = CSR_READ(sc, WMREG_EECD);
8049 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
8050 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
8051 return 0;
8052 }
8053 /* FALLTHROUGH */
8054 default:
8055 /* Default to 0 */
8056 *bank = 0;
8057
8058 /* Check bank 0 */
8059 wm_read_ich8_byte(sc, act_offset, &sig_byte);
8060 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
8061 *bank = 0;
8062 return 0;
8063 }
8064
8065 /* Check bank 1 */
8066 wm_read_ich8_byte(sc, act_offset + bank1_offset,
8067 &sig_byte);
8068 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
8069 *bank = 1;
8070 return 0;
8071 }
8072 }
8073
8074 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
8075 device_xname(sc->sc_dev)));
8076 return -1;
8077 }
8078
8079 /******************************************************************************
8080 * This function does initial flash setup so that a new read/write/erase cycle
8081 * can be started.
8082 *
8083 * sc - The pointer to the hw structure
8084 ****************************************************************************/
8085 static int32_t
8086 wm_ich8_cycle_init(struct wm_softc *sc)
8087 {
8088 uint16_t hsfsts;
8089 int32_t error = 1;
8090 int32_t i = 0;
8091
8092 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8093
8094 /* May be check the Flash Des Valid bit in Hw status */
8095 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
8096 return error;
8097 }
8098
8099 /* Clear FCERR in Hw status by writing 1 */
8100 /* Clear DAEL in Hw status by writing a 1 */
8101 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
8102
8103 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8104
8105 /*
8106 * Either we should have a hardware SPI cycle in progress bit to check
8107 * against, in order to start a new cycle or FDONE bit should be
8108 * changed in the hardware so that it is 1 after harware reset, which
8109 * can then be used as an indication whether a cycle is in progress or
8110 * has been completed .. we should also have some software semaphore
8111 * mechanism to guard FDONE or the cycle in progress bit so that two
8112 * threads access to those bits can be sequentiallized or a way so that
8113 * 2 threads dont start the cycle at the same time
8114 */
8115
8116 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8117 /*
8118 * There is no cycle running at present, so we can start a
8119 * cycle
8120 */
8121
8122 /* Begin by setting Flash Cycle Done. */
8123 hsfsts |= HSFSTS_DONE;
8124 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8125 error = 0;
8126 } else {
8127 /*
8128 * otherwise poll for sometime so the current cycle has a
8129 * chance to end before giving up.
8130 */
8131 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
8132 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8133 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8134 error = 0;
8135 break;
8136 }
8137 delay(1);
8138 }
8139 if (error == 0) {
8140 /*
8141 * Successful in waiting for previous cycle to timeout,
8142 * now set the Flash Cycle Done.
8143 */
8144 hsfsts |= HSFSTS_DONE;
8145 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8146 }
8147 }
8148 return error;
8149 }
8150
8151 /******************************************************************************
8152 * This function starts a flash cycle and waits for its completion
8153 *
8154 * sc - The pointer to the hw structure
8155 ****************************************************************************/
8156 static int32_t
8157 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
8158 {
8159 uint16_t hsflctl;
8160 uint16_t hsfsts;
8161 int32_t error = 1;
8162 uint32_t i = 0;
8163
8164 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
8165 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8166 hsflctl |= HSFCTL_GO;
8167 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8168
8169 /* Wait till FDONE bit is set to 1 */
8170 do {
8171 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8172 if (hsfsts & HSFSTS_DONE)
8173 break;
8174 delay(1);
8175 i++;
8176 } while (i < timeout);
8177 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
8178 error = 0;
8179
8180 return error;
8181 }
8182
8183 /******************************************************************************
8184 * Reads a byte or word from the NVM using the ICH8 flash access registers.
8185 *
8186 * sc - The pointer to the hw structure
8187 * index - The index of the byte or word to read.
8188 * size - Size of data to read, 1=byte 2=word
8189 * data - Pointer to the word to store the value read.
8190 *****************************************************************************/
8191 static int32_t
8192 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
8193 uint32_t size, uint16_t *data)
8194 {
8195 uint16_t hsfsts;
8196 uint16_t hsflctl;
8197 uint32_t flash_linear_address;
8198 uint32_t flash_data = 0;
8199 int32_t error = 1;
8200 int32_t count = 0;
8201
8202 if (size < 1 || size > 2 || data == 0x0 ||
8203 index > ICH_FLASH_LINEAR_ADDR_MASK)
8204 return error;
8205
8206 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
8207 sc->sc_ich8_flash_base;
8208
8209 do {
8210 delay(1);
8211 /* Steps */
8212 error = wm_ich8_cycle_init(sc);
8213 if (error)
8214 break;
8215
8216 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8217 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
8218 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
8219 & HSFCTL_BCOUNT_MASK;
8220 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
8221 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8222
8223 /*
8224 * Write the last 24 bits of index into Flash Linear address
8225 * field in Flash Address
8226 */
8227 /* TODO: TBD maybe check the index against the size of flash */
8228
8229 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
8230
8231 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
8232
8233 /*
8234 * Check if FCERR is set to 1, if set to 1, clear it and try
8235 * the whole sequence a few more times, else read in (shift in)
8236 * the Flash Data0, the order is least significant byte first
8237 * msb to lsb
8238 */
8239 if (error == 0) {
8240 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
8241 if (size == 1)
8242 *data = (uint8_t)(flash_data & 0x000000FF);
8243 else if (size == 2)
8244 *data = (uint16_t)(flash_data & 0x0000FFFF);
8245 break;
8246 } else {
8247 /*
8248 * If we've gotten here, then things are probably
8249 * completely hosed, but if the error condition is
8250 * detected, it won't hurt to give it another try...
8251 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
8252 */
8253 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8254 if (hsfsts & HSFSTS_ERR) {
8255 /* Repeat for some time before giving up. */
8256 continue;
8257 } else if ((hsfsts & HSFSTS_DONE) == 0)
8258 break;
8259 }
8260 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
8261
8262 return error;
8263 }
8264
8265 /******************************************************************************
8266 * Reads a single byte from the NVM using the ICH8 flash access registers.
8267 *
8268 * sc - pointer to wm_hw structure
8269 * index - The index of the byte to read.
8270 * data - Pointer to a byte to store the value read.
8271 *****************************************************************************/
8272 static int32_t
8273 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
8274 {
8275 int32_t status;
8276 uint16_t word = 0;
8277
8278 status = wm_read_ich8_data(sc, index, 1, &word);
8279 if (status == 0)
8280 *data = (uint8_t)word;
8281 else
8282 *data = 0;
8283
8284 return status;
8285 }
8286
8287 /******************************************************************************
8288 * Reads a word from the NVM using the ICH8 flash access registers.
8289 *
8290 * sc - pointer to wm_hw structure
8291 * index - The starting byte index of the word to read.
8292 * data - Pointer to a word to store the value read.
8293 *****************************************************************************/
8294 static int32_t
8295 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
8296 {
8297 int32_t status;
8298
8299 status = wm_read_ich8_data(sc, index, 2, data);
8300 return status;
8301 }
8302
8303 /******************************************************************************
8304 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
8305 * register.
8306 *
8307 * sc - Struct containing variables accessed by shared code
8308 * offset - offset of word in the EEPROM to read
8309 * data - word read from the EEPROM
8310 * words - number of words to read
8311 *****************************************************************************/
8312 static int
8313 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
8314 {
8315 int32_t error = 0;
8316 uint32_t flash_bank = 0;
8317 uint32_t act_offset = 0;
8318 uint32_t bank_offset = 0;
8319 uint16_t word = 0;
8320 uint16_t i = 0;
8321
8322 /*
8323 * We need to know which is the valid flash bank. In the event
8324 * that we didn't allocate eeprom_shadow_ram, we may not be
8325 * managing flash_bank. So it cannot be trusted and needs
8326 * to be updated with each read.
8327 */
8328 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
8329 if (error) {
8330 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
8331 device_xname(sc->sc_dev)));
8332 flash_bank = 0;
8333 }
8334
8335 /*
8336 * Adjust offset appropriately if we're on bank 1 - adjust for word
8337 * size
8338 */
8339 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
8340
8341 error = wm_get_swfwhw_semaphore(sc);
8342 if (error) {
8343 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8344 __func__);
8345 return error;
8346 }
8347
8348 for (i = 0; i < words; i++) {
8349 /* The NVM part needs a byte offset, hence * 2 */
8350 act_offset = bank_offset + ((offset + i) * 2);
8351 error = wm_read_ich8_word(sc, act_offset, &word);
8352 if (error) {
8353 aprint_error_dev(sc->sc_dev,
8354 "%s: failed to read NVM\n", __func__);
8355 break;
8356 }
8357 data[i] = word;
8358 }
8359
8360 wm_put_swfwhw_semaphore(sc);
8361 return error;
8362 }
8363
8364 /* Lock, detecting NVM type, validate checksum and read */
8365
8366 /*
8367 * wm_nvm_acquire:
8368 *
8369 * Perform the EEPROM handshake required on some chips.
8370 */
8371 static int
8372 wm_nvm_acquire(struct wm_softc *sc)
8373 {
8374 uint32_t reg;
8375 int x;
8376 int ret = 0;
8377
8378 /* always success */
8379 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8380 return 0;
8381
8382 if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8383 ret = wm_get_swfwhw_semaphore(sc);
8384 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
8385 /* This will also do wm_get_swsm_semaphore() if needed */
8386 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
8387 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
8388 ret = wm_get_swsm_semaphore(sc);
8389 }
8390
8391 if (ret) {
8392 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8393 __func__);
8394 return 1;
8395 }
8396
8397 if (sc->sc_flags & WM_F_LOCK_EECD) {
8398 reg = CSR_READ(sc, WMREG_EECD);
8399
8400 /* Request EEPROM access. */
8401 reg |= EECD_EE_REQ;
8402 CSR_WRITE(sc, WMREG_EECD, reg);
8403
8404 /* ..and wait for it to be granted. */
8405 for (x = 0; x < 1000; x++) {
8406 reg = CSR_READ(sc, WMREG_EECD);
8407 if (reg & EECD_EE_GNT)
8408 break;
8409 delay(5);
8410 }
8411 if ((reg & EECD_EE_GNT) == 0) {
8412 aprint_error_dev(sc->sc_dev,
8413 "could not acquire EEPROM GNT\n");
8414 reg &= ~EECD_EE_REQ;
8415 CSR_WRITE(sc, WMREG_EECD, reg);
8416 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8417 wm_put_swfwhw_semaphore(sc);
8418 if (sc->sc_flags & WM_F_LOCK_SWFW)
8419 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8420 else if (sc->sc_flags & WM_F_LOCK_SWSM)
8421 wm_put_swsm_semaphore(sc);
8422 return 1;
8423 }
8424 }
8425
8426 return 0;
8427 }
8428
8429 /*
8430 * wm_nvm_release:
8431 *
8432 * Release the EEPROM mutex.
8433 */
8434 static void
8435 wm_nvm_release(struct wm_softc *sc)
8436 {
8437 uint32_t reg;
8438
8439 /* always success */
8440 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8441 return;
8442
8443 if (sc->sc_flags & WM_F_LOCK_EECD) {
8444 reg = CSR_READ(sc, WMREG_EECD);
8445 reg &= ~EECD_EE_REQ;
8446 CSR_WRITE(sc, WMREG_EECD, reg);
8447 }
8448
8449 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8450 wm_put_swfwhw_semaphore(sc);
8451 if (sc->sc_flags & WM_F_LOCK_SWFW)
8452 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8453 else if (sc->sc_flags & WM_F_LOCK_SWSM)
8454 wm_put_swsm_semaphore(sc);
8455 }
8456
8457 static int
8458 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
8459 {
8460 uint32_t eecd = 0;
8461
8462 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
8463 || sc->sc_type == WM_T_82583) {
8464 eecd = CSR_READ(sc, WMREG_EECD);
8465
8466 /* Isolate bits 15 & 16 */
8467 eecd = ((eecd >> 15) & 0x03);
8468
8469 /* If both bits are set, device is Flash type */
8470 if (eecd == 0x03)
8471 return 0;
8472 }
8473 return 1;
8474 }
8475
8476 /*
8477 * wm_nvm_validate_checksum
8478 *
8479 * The checksum is defined as the sum of the first 64 (16 bit) words.
8480 */
8481 static int
8482 wm_nvm_validate_checksum(struct wm_softc *sc)
8483 {
8484 uint16_t checksum;
8485 uint16_t eeprom_data;
8486 #ifdef WM_DEBUG
8487 uint16_t csum_wordaddr, valid_checksum;
8488 #endif
8489 int i;
8490
8491 checksum = 0;
8492
8493 /* Don't check for I211 */
8494 if (sc->sc_type == WM_T_I211)
8495 return 0;
8496
8497 #ifdef WM_DEBUG
8498 if (sc->sc_type == WM_T_PCH_LPT) {
8499 csum_wordaddr = NVM_OFF_COMPAT;
8500 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
8501 } else {
8502 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
8503 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
8504 }
8505
8506 /* Dump EEPROM image for debug */
8507 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8508 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8509 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
8510 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
8511 if ((eeprom_data & valid_checksum) == 0) {
8512 DPRINTF(WM_DEBUG_NVM,
8513 ("%s: NVM need to be updated (%04x != %04x)\n",
8514 device_xname(sc->sc_dev), eeprom_data,
8515 valid_checksum));
8516 }
8517 }
8518
8519 if ((wm_debug & WM_DEBUG_NVM) != 0) {
8520 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
8521 for (i = 0; i < NVM_SIZE; i++) {
8522 if (wm_nvm_read(sc, i, 1, &eeprom_data))
8523 printf("XXXX ");
8524 else
8525 printf("%04hx ", eeprom_data);
8526 if (i % 8 == 7)
8527 printf("\n");
8528 }
8529 }
8530
8531 #endif /* WM_DEBUG */
8532
8533 for (i = 0; i < NVM_SIZE; i++) {
8534 if (wm_nvm_read(sc, i, 1, &eeprom_data))
8535 return 1;
8536 checksum += eeprom_data;
8537 }
8538
8539 if (checksum != (uint16_t) NVM_CHECKSUM) {
8540 #ifdef WM_DEBUG
8541 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
8542 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
8543 #endif
8544 }
8545
8546 return 0;
8547 }
8548
8549 /*
8550 * wm_nvm_read:
8551 *
8552 * Read data from the serial EEPROM.
8553 */
8554 static int
8555 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8556 {
8557 int rv;
8558
8559 if (sc->sc_flags & WM_F_EEPROM_INVALID)
8560 return 1;
8561
8562 if (wm_nvm_acquire(sc))
8563 return 1;
8564
8565 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8566 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8567 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
8568 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
8569 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
8570 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
8571 else if (sc->sc_flags & WM_F_EEPROM_SPI)
8572 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
8573 else
8574 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
8575
8576 wm_nvm_release(sc);
8577 return rv;
8578 }
8579
8580 /*
8581 * Hardware semaphores.
8582 * Very complexed...
8583 */
8584
8585 static int
8586 wm_get_swsm_semaphore(struct wm_softc *sc)
8587 {
8588 int32_t timeout;
8589 uint32_t swsm;
8590
8591 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8592 /* Get the SW semaphore. */
8593 timeout = sc->sc_nvm_wordsize + 1;
8594 while (timeout) {
8595 swsm = CSR_READ(sc, WMREG_SWSM);
8596
8597 if ((swsm & SWSM_SMBI) == 0)
8598 break;
8599
8600 delay(50);
8601 timeout--;
8602 }
8603
8604 if (timeout == 0) {
8605 aprint_error_dev(sc->sc_dev,
8606 "could not acquire SWSM SMBI\n");
8607 return 1;
8608 }
8609 }
8610
8611 /* Get the FW semaphore. */
8612 timeout = sc->sc_nvm_wordsize + 1;
8613 while (timeout) {
8614 swsm = CSR_READ(sc, WMREG_SWSM);
8615 swsm |= SWSM_SWESMBI;
8616 CSR_WRITE(sc, WMREG_SWSM, swsm);
8617 /* If we managed to set the bit we got the semaphore. */
8618 swsm = CSR_READ(sc, WMREG_SWSM);
8619 if (swsm & SWSM_SWESMBI)
8620 break;
8621
8622 delay(50);
8623 timeout--;
8624 }
8625
8626 if (timeout == 0) {
8627 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
8628 /* Release semaphores */
8629 wm_put_swsm_semaphore(sc);
8630 return 1;
8631 }
8632 return 0;
8633 }
8634
8635 static void
8636 wm_put_swsm_semaphore(struct wm_softc *sc)
8637 {
8638 uint32_t swsm;
8639
8640 swsm = CSR_READ(sc, WMREG_SWSM);
8641 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
8642 CSR_WRITE(sc, WMREG_SWSM, swsm);
8643 }
8644
8645 static int
8646 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8647 {
8648 uint32_t swfw_sync;
8649 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
8650 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
8651 int timeout = 200;
8652
8653 for (timeout = 0; timeout < 200; timeout++) {
8654 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8655 if (wm_get_swsm_semaphore(sc)) {
8656 aprint_error_dev(sc->sc_dev,
8657 "%s: failed to get semaphore\n",
8658 __func__);
8659 return 1;
8660 }
8661 }
8662 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8663 if ((swfw_sync & (swmask | fwmask)) == 0) {
8664 swfw_sync |= swmask;
8665 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8666 if (sc->sc_flags & WM_F_LOCK_SWSM)
8667 wm_put_swsm_semaphore(sc);
8668 return 0;
8669 }
8670 if (sc->sc_flags & WM_F_LOCK_SWSM)
8671 wm_put_swsm_semaphore(sc);
8672 delay(5000);
8673 }
8674 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
8675 device_xname(sc->sc_dev), mask, swfw_sync);
8676 return 1;
8677 }
8678
8679 static void
8680 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8681 {
8682 uint32_t swfw_sync;
8683
8684 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8685 while (wm_get_swsm_semaphore(sc) != 0)
8686 continue;
8687 }
8688 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8689 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
8690 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8691 if (sc->sc_flags & WM_F_LOCK_SWSM)
8692 wm_put_swsm_semaphore(sc);
8693 }
8694
8695 static int
8696 wm_get_swfwhw_semaphore(struct wm_softc *sc)
8697 {
8698 uint32_t ext_ctrl;
8699 int timeout = 200;
8700
8701 for (timeout = 0; timeout < 200; timeout++) {
8702 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8703 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
8704 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8705
8706 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8707 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
8708 return 0;
8709 delay(5000);
8710 }
8711 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
8712 device_xname(sc->sc_dev), ext_ctrl);
8713 return 1;
8714 }
8715
8716 static void
8717 wm_put_swfwhw_semaphore(struct wm_softc *sc)
8718 {
8719 uint32_t ext_ctrl;
8720 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8721 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
8722 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8723 }
8724
8725 static int
8726 wm_get_hw_semaphore_82573(struct wm_softc *sc)
8727 {
8728 int i = 0;
8729 uint32_t reg;
8730
8731 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8732 do {
8733 CSR_WRITE(sc, WMREG_EXTCNFCTR,
8734 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
8735 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8736 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
8737 break;
8738 delay(2*1000);
8739 i++;
8740 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
8741
8742 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
8743 wm_put_hw_semaphore_82573(sc);
8744 log(LOG_ERR, "%s: Driver can't access the PHY\n",
8745 device_xname(sc->sc_dev));
8746 return -1;
8747 }
8748
8749 return 0;
8750 }
8751
8752 static void
8753 wm_put_hw_semaphore_82573(struct wm_softc *sc)
8754 {
8755 uint32_t reg;
8756
8757 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8758 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
8759 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
8760 }
8761
8762 /*
8763 * Management mode and power management related subroutines.
8764 * BMC, AMT, suspend/resume and EEE.
8765 */
8766
8767 static int
8768 wm_check_mng_mode(struct wm_softc *sc)
8769 {
8770 int rv;
8771
8772 switch (sc->sc_type) {
8773 case WM_T_ICH8:
8774 case WM_T_ICH9:
8775 case WM_T_ICH10:
8776 case WM_T_PCH:
8777 case WM_T_PCH2:
8778 case WM_T_PCH_LPT:
8779 rv = wm_check_mng_mode_ich8lan(sc);
8780 break;
8781 case WM_T_82574:
8782 case WM_T_82583:
8783 rv = wm_check_mng_mode_82574(sc);
8784 break;
8785 case WM_T_82571:
8786 case WM_T_82572:
8787 case WM_T_82573:
8788 case WM_T_80003:
8789 rv = wm_check_mng_mode_generic(sc);
8790 break;
8791 default:
8792 /* noting to do */
8793 rv = 0;
8794 break;
8795 }
8796
8797 return rv;
8798 }
8799
8800 static int
8801 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
8802 {
8803 uint32_t fwsm;
8804
8805 fwsm = CSR_READ(sc, WMREG_FWSM);
8806
8807 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
8808 return 1;
8809
8810 return 0;
8811 }
8812
8813 static int
8814 wm_check_mng_mode_82574(struct wm_softc *sc)
8815 {
8816 uint16_t data;
8817
8818 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
8819
8820 if ((data & NVM_CFG2_MNGM_MASK) != 0)
8821 return 1;
8822
8823 return 0;
8824 }
8825
8826 static int
8827 wm_check_mng_mode_generic(struct wm_softc *sc)
8828 {
8829 uint32_t fwsm;
8830
8831 fwsm = CSR_READ(sc, WMREG_FWSM);
8832
8833 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
8834 return 1;
8835
8836 return 0;
8837 }
8838
8839 static int
8840 wm_enable_mng_pass_thru(struct wm_softc *sc)
8841 {
8842 uint32_t manc, fwsm, factps;
8843
8844 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
8845 return 0;
8846
8847 manc = CSR_READ(sc, WMREG_MANC);
8848
8849 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
8850 device_xname(sc->sc_dev), manc));
8851 if ((manc & MANC_RECV_TCO_EN) == 0)
8852 return 0;
8853
8854 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
8855 fwsm = CSR_READ(sc, WMREG_FWSM);
8856 factps = CSR_READ(sc, WMREG_FACTPS);
8857 if (((factps & FACTPS_MNGCG) == 0)
8858 && ((fwsm & FWSM_MODE_MASK)
8859 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
8860 return 1;
8861 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
8862 uint16_t data;
8863
8864 factps = CSR_READ(sc, WMREG_FACTPS);
8865 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
8866 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
8867 device_xname(sc->sc_dev), factps, data));
8868 if (((factps & FACTPS_MNGCG) == 0)
8869 && ((data & NVM_CFG2_MNGM_MASK)
8870 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
8871 return 1;
8872 } else if (((manc & MANC_SMBUS_EN) != 0)
8873 && ((manc & MANC_ASF_EN) == 0))
8874 return 1;
8875
8876 return 0;
8877 }
8878
8879 static int
8880 wm_check_reset_block(struct wm_softc *sc)
8881 {
8882 uint32_t reg;
8883
8884 switch (sc->sc_type) {
8885 case WM_T_ICH8:
8886 case WM_T_ICH9:
8887 case WM_T_ICH10:
8888 case WM_T_PCH:
8889 case WM_T_PCH2:
8890 case WM_T_PCH_LPT:
8891 reg = CSR_READ(sc, WMREG_FWSM);
8892 if ((reg & FWSM_RSPCIPHY) != 0)
8893 return 0;
8894 else
8895 return -1;
8896 break;
8897 case WM_T_82571:
8898 case WM_T_82572:
8899 case WM_T_82573:
8900 case WM_T_82574:
8901 case WM_T_82583:
8902 case WM_T_80003:
8903 reg = CSR_READ(sc, WMREG_MANC);
8904 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
8905 return -1;
8906 else
8907 return 0;
8908 break;
8909 default:
8910 /* no problem */
8911 break;
8912 }
8913
8914 return 0;
8915 }
8916
8917 static void
8918 wm_get_hw_control(struct wm_softc *sc)
8919 {
8920 uint32_t reg;
8921
8922 switch (sc->sc_type) {
8923 case WM_T_82573:
8924 reg = CSR_READ(sc, WMREG_SWSM);
8925 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
8926 break;
8927 case WM_T_82571:
8928 case WM_T_82572:
8929 case WM_T_82574:
8930 case WM_T_82583:
8931 case WM_T_80003:
8932 case WM_T_ICH8:
8933 case WM_T_ICH9:
8934 case WM_T_ICH10:
8935 case WM_T_PCH:
8936 case WM_T_PCH2:
8937 case WM_T_PCH_LPT:
8938 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8939 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
8940 break;
8941 default:
8942 break;
8943 }
8944 }
8945
8946 static void
8947 wm_release_hw_control(struct wm_softc *sc)
8948 {
8949 uint32_t reg;
8950
8951 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
8952 return;
8953
8954 if (sc->sc_type == WM_T_82573) {
8955 reg = CSR_READ(sc, WMREG_SWSM);
8956 reg &= ~SWSM_DRV_LOAD;
8957 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
8958 } else {
8959 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8960 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
8961 }
8962 }
8963
8964 static void
8965 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
8966 {
8967 uint32_t reg;
8968
8969 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8970
8971 if (on != 0)
8972 reg |= EXTCNFCTR_GATE_PHY_CFG;
8973 else
8974 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
8975
8976 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
8977 }
8978
8979 static void
8980 wm_smbustopci(struct wm_softc *sc)
8981 {
8982 uint32_t fwsm;
8983
8984 fwsm = CSR_READ(sc, WMREG_FWSM);
8985 if (((fwsm & FWSM_FW_VALID) == 0)
8986 && ((wm_check_reset_block(sc) == 0))) {
8987 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
8988 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
8989 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8990 CSR_WRITE_FLUSH(sc);
8991 delay(10);
8992 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
8993 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8994 CSR_WRITE_FLUSH(sc);
8995 delay(50*1000);
8996
8997 /*
8998 * Gate automatic PHY configuration by hardware on non-managed
8999 * 82579
9000 */
9001 if (sc->sc_type == WM_T_PCH2)
9002 wm_gate_hw_phy_config_ich8lan(sc, 1);
9003 }
9004 }
9005
9006 static void
9007 wm_init_manageability(struct wm_softc *sc)
9008 {
9009
9010 if (sc->sc_flags & WM_F_HAS_MANAGE) {
9011 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
9012 uint32_t manc = CSR_READ(sc, WMREG_MANC);
9013
9014 /* Disable hardware interception of ARP */
9015 manc &= ~MANC_ARP_EN;
9016
9017 /* Enable receiving management packets to the host */
9018 if (sc->sc_type >= WM_T_82571) {
9019 manc |= MANC_EN_MNG2HOST;
9020 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
9021 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
9022
9023 }
9024
9025 CSR_WRITE(sc, WMREG_MANC, manc);
9026 }
9027 }
9028
9029 static void
9030 wm_release_manageability(struct wm_softc *sc)
9031 {
9032
9033 if (sc->sc_flags & WM_F_HAS_MANAGE) {
9034 uint32_t manc = CSR_READ(sc, WMREG_MANC);
9035
9036 manc |= MANC_ARP_EN;
9037 if (sc->sc_type >= WM_T_82571)
9038 manc &= ~MANC_EN_MNG2HOST;
9039
9040 CSR_WRITE(sc, WMREG_MANC, manc);
9041 }
9042 }
9043
9044 static void
9045 wm_get_wakeup(struct wm_softc *sc)
9046 {
9047
9048 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
9049 switch (sc->sc_type) {
9050 case WM_T_82573:
9051 case WM_T_82583:
9052 sc->sc_flags |= WM_F_HAS_AMT;
9053 /* FALLTHROUGH */
9054 case WM_T_80003:
9055 case WM_T_82541:
9056 case WM_T_82547:
9057 case WM_T_82571:
9058 case WM_T_82572:
9059 case WM_T_82574:
9060 case WM_T_82575:
9061 case WM_T_82576:
9062 case WM_T_82580:
9063 case WM_T_I350:
9064 case WM_T_I354:
9065 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
9066 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
9067 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
9068 break;
9069 case WM_T_ICH8:
9070 case WM_T_ICH9:
9071 case WM_T_ICH10:
9072 case WM_T_PCH:
9073 case WM_T_PCH2:
9074 case WM_T_PCH_LPT:
9075 sc->sc_flags |= WM_F_HAS_AMT;
9076 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
9077 break;
9078 default:
9079 break;
9080 }
9081
9082 /* 1: HAS_MANAGE */
9083 if (wm_enable_mng_pass_thru(sc) != 0)
9084 sc->sc_flags |= WM_F_HAS_MANAGE;
9085
9086 #ifdef WM_DEBUG
9087 printf("\n");
9088 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
9089 printf("HAS_AMT,");
9090 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
9091 printf("ARC_SUBSYS_VALID,");
9092 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
9093 printf("ASF_FIRMWARE_PRES,");
9094 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
9095 printf("HAS_MANAGE,");
9096 printf("\n");
9097 #endif
9098 /*
9099 * Note that the WOL flags is set after the resetting of the eeprom
9100 * stuff
9101 */
9102 }
9103
9104 #ifdef WM_WOL
9105 /* WOL in the newer chipset interfaces (pchlan) */
9106 static void
9107 wm_enable_phy_wakeup(struct wm_softc *sc)
9108 {
9109 #if 0
9110 uint16_t preg;
9111
9112 /* Copy MAC RARs to PHY RARs */
9113
9114 /* Copy MAC MTA to PHY MTA */
9115
9116 /* Configure PHY Rx Control register */
9117
9118 /* Enable PHY wakeup in MAC register */
9119
9120 /* Configure and enable PHY wakeup in PHY registers */
9121
9122 /* Activate PHY wakeup */
9123
9124 /* XXX */
9125 #endif
9126 }
9127
9128 /* Power down workaround on D3 */
9129 static void
9130 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
9131 {
9132 uint32_t reg;
9133 int i;
9134
9135 for (i = 0; i < 2; i++) {
9136 /* Disable link */
9137 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9138 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9139 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9140
9141 /*
9142 * Call gig speed drop workaround on Gig disable before
9143 * accessing any PHY registers
9144 */
9145 if (sc->sc_type == WM_T_ICH8)
9146 wm_gig_downshift_workaround_ich8lan(sc);
9147
9148 /* Write VR power-down enable */
9149 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
9150 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
9151 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
9152 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
9153
9154 /* Read it back and test */
9155 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
9156 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
9157 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
9158 break;
9159
9160 /* Issue PHY reset and repeat at most one more time */
9161 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
9162 }
9163 }
9164
9165 static void
9166 wm_enable_wakeup(struct wm_softc *sc)
9167 {
9168 uint32_t reg, pmreg;
9169 pcireg_t pmode;
9170
9171 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
9172 &pmreg, NULL) == 0)
9173 return;
9174
9175 /* Advertise the wakeup capability */
9176 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
9177 | CTRL_SWDPIN(3));
9178 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
9179
9180 /* ICH workaround */
9181 switch (sc->sc_type) {
9182 case WM_T_ICH8:
9183 case WM_T_ICH9:
9184 case WM_T_ICH10:
9185 case WM_T_PCH:
9186 case WM_T_PCH2:
9187 case WM_T_PCH_LPT:
9188 /* Disable gig during WOL */
9189 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9190 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
9191 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9192 if (sc->sc_type == WM_T_PCH)
9193 wm_gmii_reset(sc);
9194
9195 /* Power down workaround */
9196 if (sc->sc_phytype == WMPHY_82577) {
9197 struct mii_softc *child;
9198
9199 /* Assume that the PHY is copper */
9200 child = LIST_FIRST(&sc->sc_mii.mii_phys);
9201 if (child->mii_mpd_rev <= 2)
9202 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
9203 (768 << 5) | 25, 0x0444); /* magic num */
9204 }
9205 break;
9206 default:
9207 break;
9208 }
9209
9210 /* Keep the laser running on fiber adapters */
9211 if (((sc->sc_mediatype & WMP_F_FIBER) != 0)
9212 || (sc->sc_mediatype & WMP_F_SERDES) != 0) {
9213 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9214 reg |= CTRL_EXT_SWDPIN(3);
9215 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9216 }
9217
9218 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
9219 #if 0 /* for the multicast packet */
9220 reg |= WUFC_MC;
9221 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
9222 #endif
9223
9224 if (sc->sc_type == WM_T_PCH) {
9225 wm_enable_phy_wakeup(sc);
9226 } else {
9227 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
9228 CSR_WRITE(sc, WMREG_WUFC, reg);
9229 }
9230
9231 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9232 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9233 || (sc->sc_type == WM_T_PCH2))
9234 && (sc->sc_phytype == WMPHY_IGP_3))
9235 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
9236
9237 /* Request PME */
9238 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
9239 #if 0
9240 /* Disable WOL */
9241 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
9242 #else
9243 /* For WOL */
9244 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
9245 #endif
9246 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
9247 }
9248 #endif /* WM_WOL */
9249
9250 /* EEE */
9251
9252 static void
9253 wm_set_eee_i350(struct wm_softc *sc)
9254 {
9255 uint32_t ipcnfg, eeer;
9256
9257 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
9258 eeer = CSR_READ(sc, WMREG_EEER);
9259
9260 if ((sc->sc_flags & WM_F_EEE) != 0) {
9261 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9262 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
9263 | EEER_LPI_FC);
9264 } else {
9265 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9266 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
9267 | EEER_LPI_FC);
9268 }
9269
9270 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
9271 CSR_WRITE(sc, WMREG_EEER, eeer);
9272 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
9273 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
9274 }
9275
9276 /*
9277 * Workarounds (mainly PHY related).
9278 * Basically, PHY's workarounds are in the PHY drivers.
9279 */
9280
9281 /* Work-around for 82566 Kumeran PCS lock loss */
9282 static void
9283 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
9284 {
9285 int miistatus, active, i;
9286 int reg;
9287
9288 miistatus = sc->sc_mii.mii_media_status;
9289
9290 /* If the link is not up, do nothing */
9291 if ((miistatus & IFM_ACTIVE) != 0)
9292 return;
9293
9294 active = sc->sc_mii.mii_media_active;
9295
9296 /* Nothing to do if the link is other than 1Gbps */
9297 if (IFM_SUBTYPE(active) != IFM_1000_T)
9298 return;
9299
9300 for (i = 0; i < 10; i++) {
9301 /* read twice */
9302 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9303 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9304 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
9305 goto out; /* GOOD! */
9306
9307 /* Reset the PHY */
9308 wm_gmii_reset(sc);
9309 delay(5*1000);
9310 }
9311
9312 /* Disable GigE link negotiation */
9313 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9314 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9315 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9316
9317 /*
9318 * Call gig speed drop workaround on Gig disable before accessing
9319 * any PHY registers.
9320 */
9321 wm_gig_downshift_workaround_ich8lan(sc);
9322
9323 out:
9324 return;
9325 }
9326
9327 /* WOL from S5 stops working */
9328 static void
9329 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
9330 {
9331 uint16_t kmrn_reg;
9332
9333 /* Only for igp3 */
9334 if (sc->sc_phytype == WMPHY_IGP_3) {
9335 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
9336 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
9337 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9338 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
9339 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9340 }
9341 }
9342
9343 /*
9344 * Workaround for pch's PHYs
9345 * XXX should be moved to new PHY driver?
9346 */
9347 static void
9348 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
9349 {
9350 if (sc->sc_phytype == WMPHY_82577)
9351 wm_set_mdio_slow_mode_hv(sc);
9352
9353 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
9354
9355 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
9356
9357 /* 82578 */
9358 if (sc->sc_phytype == WMPHY_82578) {
9359 /* PCH rev. < 3 */
9360 if (sc->sc_rev < 3) {
9361 /* XXX 6 bit shift? Why? Is it page2? */
9362 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
9363 0x66c0);
9364 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
9365 0xffff);
9366 }
9367
9368 /* XXX phy rev. < 2 */
9369 }
9370
9371 /* Select page 0 */
9372
9373 /* XXX acquire semaphore */
9374 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
9375 /* XXX release semaphore */
9376
9377 /*
9378 * Configure the K1 Si workaround during phy reset assuming there is
9379 * link so that it disables K1 if link is in 1Gbps.
9380 */
9381 wm_k1_gig_workaround_hv(sc, 1);
9382 }
9383
9384 static void
9385 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
9386 {
9387
9388 wm_set_mdio_slow_mode_hv(sc);
9389 }
9390
9391 static void
9392 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
9393 {
9394 int k1_enable = sc->sc_nvm_k1_enabled;
9395
9396 /* XXX acquire semaphore */
9397
9398 if (link) {
9399 k1_enable = 0;
9400
9401 /* Link stall fix for link up */
9402 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
9403 } else {
9404 /* Link stall fix for link down */
9405 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
9406 }
9407
9408 wm_configure_k1_ich8lan(sc, k1_enable);
9409
9410 /* XXX release semaphore */
9411 }
9412
9413 static void
9414 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
9415 {
9416 uint32_t reg;
9417
9418 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
9419 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
9420 reg | HV_KMRN_MDIO_SLOW);
9421 }
9422
9423 static void
9424 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
9425 {
9426 uint32_t ctrl, ctrl_ext, tmp;
9427 uint16_t kmrn_reg;
9428
9429 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
9430
9431 if (k1_enable)
9432 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
9433 else
9434 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
9435
9436 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
9437
9438 delay(20);
9439
9440 ctrl = CSR_READ(sc, WMREG_CTRL);
9441 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9442
9443 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
9444 tmp |= CTRL_FRCSPD;
9445
9446 CSR_WRITE(sc, WMREG_CTRL, tmp);
9447 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
9448 CSR_WRITE_FLUSH(sc);
9449 delay(20);
9450
9451 CSR_WRITE(sc, WMREG_CTRL, ctrl);
9452 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9453 CSR_WRITE_FLUSH(sc);
9454 delay(20);
9455 }
9456
9457 /* special case - for 82575 - need to do manual init ... */
9458 static void
9459 wm_reset_init_script_82575(struct wm_softc *sc)
9460 {
9461 /*
9462 * remark: this is untested code - we have no board without EEPROM
9463 * same setup as mentioned int the freeBSD driver for the i82575
9464 */
9465
9466 /* SerDes configuration via SERDESCTRL */
9467 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
9468 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
9469 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
9470 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
9471
9472 /* CCM configuration via CCMCTL register */
9473 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
9474 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
9475
9476 /* PCIe lanes configuration */
9477 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
9478 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
9479 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
9480 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
9481
9482 /* PCIe PLL Configuration */
9483 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
9484 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
9485 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
9486 }
9487