if_wm.c revision 1.302 1 /* $NetBSD: if_wm.c,v 1.302 2014/10/07 07:04:35 ozaki-r Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - EEE (Energy Efficiency Ethernet)
77 * - MSI/MSI-X
78 * - Virtual Function
79 * - Set LED correctly (based on contents in EEPROM)
80 * - Rework how parameters are loaded from the EEPROM.
81 */
82
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.302 2014/10/07 07:04:35 ozaki-r Exp $");
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/callout.h>
89 #include <sys/mbuf.h>
90 #include <sys/malloc.h>
91 #include <sys/kernel.h>
92 #include <sys/socket.h>
93 #include <sys/ioctl.h>
94 #include <sys/errno.h>
95 #include <sys/device.h>
96 #include <sys/queue.h>
97 #include <sys/syslog.h>
98
99 #include <sys/rnd.h>
100
101 #include <net/if.h>
102 #include <net/if_dl.h>
103 #include <net/if_media.h>
104 #include <net/if_ether.h>
105
106 #include <net/bpf.h>
107
108 #include <netinet/in.h> /* XXX for struct ip */
109 #include <netinet/in_systm.h> /* XXX for struct ip */
110 #include <netinet/ip.h> /* XXX for struct ip */
111 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
112 #include <netinet/tcp.h> /* XXX for struct tcphdr */
113
114 #include <sys/bus.h>
115 #include <sys/intr.h>
116 #include <machine/endian.h>
117
118 #include <dev/mii/mii.h>
119 #include <dev/mii/miivar.h>
120 #include <dev/mii/miidevs.h>
121 #include <dev/mii/mii_bitbang.h>
122 #include <dev/mii/ikphyreg.h>
123 #include <dev/mii/igphyreg.h>
124 #include <dev/mii/igphyvar.h>
125 #include <dev/mii/inbmphyreg.h>
126
127 #include <dev/pci/pcireg.h>
128 #include <dev/pci/pcivar.h>
129 #include <dev/pci/pcidevs.h>
130
131 #include <dev/pci/if_wmreg.h>
132 #include <dev/pci/if_wmvar.h>
133
134 #ifdef WM_DEBUG
135 #define WM_DEBUG_LINK 0x01
136 #define WM_DEBUG_TX 0x02
137 #define WM_DEBUG_RX 0x04
138 #define WM_DEBUG_GMII 0x08
139 #define WM_DEBUG_MANAGE 0x10
140 #define WM_DEBUG_NVM 0x20
141 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
142 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
143
144 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
145 #else
146 #define DPRINTF(x, y) /* nothing */
147 #endif /* WM_DEBUG */
148
149 #ifdef NET_MPSAFE
150 #define WM_MPSAFE 1
151 #endif
152
153 /*
154 * Transmit descriptor list size. Due to errata, we can only have
155 * 256 hardware descriptors in the ring on < 82544, but we use 4096
156 * on >= 82544. We tell the upper layers that they can queue a lot
157 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
158 * of them at a time.
159 *
160 * We allow up to 256 (!) DMA segments per packet. Pathological packet
161 * chains containing many small mbufs have been observed in zero-copy
162 * situations with jumbo frames.
163 */
164 #define WM_NTXSEGS 256
165 #define WM_IFQUEUELEN 256
166 #define WM_TXQUEUELEN_MAX 64
167 #define WM_TXQUEUELEN_MAX_82547 16
168 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
169 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
170 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
171 #define WM_NTXDESC_82542 256
172 #define WM_NTXDESC_82544 4096
173 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
174 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
175 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
176 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
177 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
178
179 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
180
181 /*
182 * Receive descriptor list size. We have one Rx buffer for normal
183 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
184 * packet. We allocate 256 receive descriptors, each with a 2k
185 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
186 */
187 #define WM_NRXDESC 256
188 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
189 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
190 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
191
192 /*
193 * Control structures are DMA'd to the i82542 chip. We allocate them in
194 * a single clump that maps to a single DMA segment to make several things
195 * easier.
196 */
197 struct wm_control_data_82544 {
198 /*
199 * The receive descriptors.
200 */
201 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
202
203 /*
204 * The transmit descriptors. Put these at the end, because
205 * we might use a smaller number of them.
206 */
207 union {
208 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
209 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
210 } wdc_u;
211 };
212
213 struct wm_control_data_82542 {
214 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
215 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
216 };
217
218 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
219 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
220 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
221
222 /*
223 * Software state for transmit jobs.
224 */
225 struct wm_txsoft {
226 struct mbuf *txs_mbuf; /* head of our mbuf chain */
227 bus_dmamap_t txs_dmamap; /* our DMA map */
228 int txs_firstdesc; /* first descriptor in packet */
229 int txs_lastdesc; /* last descriptor in packet */
230 int txs_ndesc; /* # of descriptors used */
231 };
232
233 /*
234 * Software state for receive buffers. Each descriptor gets a
235 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
236 * more than one buffer, we chain them together.
237 */
238 struct wm_rxsoft {
239 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
240 bus_dmamap_t rxs_dmamap; /* our DMA map */
241 };
242
243 #define WM_LINKUP_TIMEOUT 50
244
245 static uint16_t swfwphysem[] = {
246 SWFW_PHY0_SM,
247 SWFW_PHY1_SM,
248 SWFW_PHY2_SM,
249 SWFW_PHY3_SM
250 };
251
252 /*
253 * Software state per device.
254 */
255 struct wm_softc {
256 device_t sc_dev; /* generic device information */
257 bus_space_tag_t sc_st; /* bus space tag */
258 bus_space_handle_t sc_sh; /* bus space handle */
259 bus_size_t sc_ss; /* bus space size */
260 bus_space_tag_t sc_iot; /* I/O space tag */
261 bus_space_handle_t sc_ioh; /* I/O space handle */
262 bus_size_t sc_ios; /* I/O space size */
263 bus_space_tag_t sc_flasht; /* flash registers space tag */
264 bus_space_handle_t sc_flashh; /* flash registers space handle */
265 bus_dma_tag_t sc_dmat; /* bus DMA tag */
266
267 struct ethercom sc_ethercom; /* ethernet common data */
268 struct mii_data sc_mii; /* MII/media information */
269
270 pci_chipset_tag_t sc_pc;
271 pcitag_t sc_pcitag;
272 int sc_bus_speed; /* PCI/PCIX bus speed */
273 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
274
275 wm_chip_type sc_type; /* MAC type */
276 int sc_rev; /* MAC revision */
277 wm_phy_type sc_phytype; /* PHY type */
278 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
279 int sc_funcid; /* unit number of the chip (0 to 3) */
280 int sc_flags; /* flags; see below */
281 int sc_if_flags; /* last if_flags */
282 int sc_flowflags; /* 802.3x flow control flags */
283 int sc_align_tweak;
284
285 void *sc_ih; /* interrupt cookie */
286 callout_t sc_tick_ch; /* tick callout */
287 bool sc_stopping;
288
289 int sc_nvm_addrbits; /* NVM address bits */
290 unsigned int sc_nvm_wordsize; /* NVM word size */
291 int sc_ich8_flash_base;
292 int sc_ich8_flash_bank_size;
293 int sc_nvm_k1_enabled;
294
295 /* Software state for the transmit and receive descriptors. */
296 int sc_txnum; /* must be a power of two */
297 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
298 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
299
300 /* Control data structures. */
301 int sc_ntxdesc; /* must be a power of two */
302 struct wm_control_data_82544 *sc_control_data;
303 bus_dmamap_t sc_cddmamap; /* control data DMA map */
304 bus_dma_segment_t sc_cd_seg; /* control data segment */
305 int sc_cd_rseg; /* real number of control segment */
306 size_t sc_cd_size; /* control data size */
307 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
308 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
309 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
310 #define sc_rxdescs sc_control_data->wcd_rxdescs
311
312 #ifdef WM_EVENT_COUNTERS
313 /* Event counters. */
314 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
315 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
316 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
317 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
318 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
319 struct evcnt sc_ev_rxintr; /* Rx interrupts */
320 struct evcnt sc_ev_linkintr; /* Link interrupts */
321
322 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
323 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
324 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
325 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
326 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
327 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
328 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
329 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
330
331 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
332 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
333
334 struct evcnt sc_ev_tu; /* Tx underrun */
335
336 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
337 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
338 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
339 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
340 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
341 #endif /* WM_EVENT_COUNTERS */
342
343 bus_addr_t sc_tdt_reg; /* offset of TDT register */
344
345 int sc_txfree; /* number of free Tx descriptors */
346 int sc_txnext; /* next ready Tx descriptor */
347
348 int sc_txsfree; /* number of free Tx jobs */
349 int sc_txsnext; /* next free Tx job */
350 int sc_txsdirty; /* dirty Tx jobs */
351
352 /* These 5 variables are used only on the 82547. */
353 int sc_txfifo_size; /* Tx FIFO size */
354 int sc_txfifo_head; /* current head of FIFO */
355 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
356 int sc_txfifo_stall; /* Tx FIFO is stalled */
357 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
358
359 bus_addr_t sc_rdt_reg; /* offset of RDT register */
360
361 int sc_rxptr; /* next ready Rx descriptor/queue ent */
362 int sc_rxdiscard;
363 int sc_rxlen;
364 struct mbuf *sc_rxhead;
365 struct mbuf *sc_rxtail;
366 struct mbuf **sc_rxtailp;
367
368 uint32_t sc_ctrl; /* prototype CTRL register */
369 #if 0
370 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
371 #endif
372 uint32_t sc_icr; /* prototype interrupt bits */
373 uint32_t sc_itr; /* prototype intr throttling reg */
374 uint32_t sc_tctl; /* prototype TCTL register */
375 uint32_t sc_rctl; /* prototype RCTL register */
376 uint32_t sc_txcw; /* prototype TXCW register */
377 uint32_t sc_tipg; /* prototype TIPG register */
378 uint32_t sc_fcrtl; /* prototype FCRTL register */
379 uint32_t sc_pba; /* prototype PBA register */
380
381 int sc_tbi_linkup; /* TBI link status */
382 int sc_tbi_anegticks; /* autonegotiation ticks */
383 int sc_tbi_ticks; /* tbi ticks */
384
385 int sc_mchash_type; /* multicast filter offset */
386
387 krndsource_t rnd_source; /* random source */
388
389 kmutex_t *sc_tx_lock; /* lock for tx operations */
390 kmutex_t *sc_rx_lock; /* lock for rx operations */
391 };
392
393 #define WM_TX_LOCK(_sc) if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
394 #define WM_TX_UNLOCK(_sc) if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
395 #define WM_TX_LOCKED(_sc) (!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
396 #define WM_RX_LOCK(_sc) if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
397 #define WM_RX_UNLOCK(_sc) if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
398 #define WM_RX_LOCKED(_sc) (!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
399 #define WM_BOTH_LOCK(_sc) do {WM_TX_LOCK(_sc); WM_RX_LOCK(_sc);} while (0)
400 #define WM_BOTH_UNLOCK(_sc) do {WM_RX_UNLOCK(_sc); WM_TX_UNLOCK(_sc);} while (0)
401 #define WM_BOTH_LOCKED(_sc) (WM_TX_LOCKED(_sc) && WM_RX_LOCKED(_sc))
402
403 #ifdef WM_MPSAFE
404 #define CALLOUT_FLAGS CALLOUT_MPSAFE
405 #else
406 #define CALLOUT_FLAGS 0
407 #endif
408
409 #define WM_RXCHAIN_RESET(sc) \
410 do { \
411 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
412 *(sc)->sc_rxtailp = NULL; \
413 (sc)->sc_rxlen = 0; \
414 } while (/*CONSTCOND*/0)
415
416 #define WM_RXCHAIN_LINK(sc, m) \
417 do { \
418 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
419 (sc)->sc_rxtailp = &(m)->m_next; \
420 } while (/*CONSTCOND*/0)
421
422 #ifdef WM_EVENT_COUNTERS
423 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
424 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
425 #else
426 #define WM_EVCNT_INCR(ev) /* nothing */
427 #define WM_EVCNT_ADD(ev, val) /* nothing */
428 #endif
429
430 #define CSR_READ(sc, reg) \
431 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
432 #define CSR_WRITE(sc, reg, val) \
433 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
434 #define CSR_WRITE_FLUSH(sc) \
435 (void) CSR_READ((sc), WMREG_STATUS)
436
437 #define ICH8_FLASH_READ32(sc, reg) \
438 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
439 #define ICH8_FLASH_WRITE32(sc, reg, data) \
440 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
441
442 #define ICH8_FLASH_READ16(sc, reg) \
443 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
444 #define ICH8_FLASH_WRITE16(sc, reg, data) \
445 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
446
447 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
448 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
449
450 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
451 #define WM_CDTXADDR_HI(sc, x) \
452 (sizeof(bus_addr_t) == 8 ? \
453 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
454
455 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
456 #define WM_CDRXADDR_HI(sc, x) \
457 (sizeof(bus_addr_t) == 8 ? \
458 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
459
460 #define WM_CDTXSYNC(sc, x, n, ops) \
461 do { \
462 int __x, __n; \
463 \
464 __x = (x); \
465 __n = (n); \
466 \
467 /* If it will wrap around, sync to the end of the ring. */ \
468 if ((__x + __n) > WM_NTXDESC(sc)) { \
469 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
470 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
471 (WM_NTXDESC(sc) - __x), (ops)); \
472 __n -= (WM_NTXDESC(sc) - __x); \
473 __x = 0; \
474 } \
475 \
476 /* Now sync whatever is left. */ \
477 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
478 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
479 } while (/*CONSTCOND*/0)
480
481 #define WM_CDRXSYNC(sc, x, ops) \
482 do { \
483 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
484 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
485 } while (/*CONSTCOND*/0)
486
487 #define WM_INIT_RXDESC(sc, x) \
488 do { \
489 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
490 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
491 struct mbuf *__m = __rxs->rxs_mbuf; \
492 \
493 /* \
494 * Note: We scoot the packet forward 2 bytes in the buffer \
495 * so that the payload after the Ethernet header is aligned \
496 * to a 4-byte boundary. \
497 * \
498 * XXX BRAINDAMAGE ALERT! \
499 * The stupid chip uses the same size for every buffer, which \
500 * is set in the Receive Control register. We are using the 2K \
501 * size option, but what we REALLY want is (2K - 2)! For this \
502 * reason, we can't "scoot" packets longer than the standard \
503 * Ethernet MTU. On strict-alignment platforms, if the total \
504 * size exceeds (2K - 2) we set align_tweak to 0 and let \
505 * the upper layer copy the headers. \
506 */ \
507 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
508 \
509 wm_set_dma_addr(&__rxd->wrx_addr, \
510 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
511 __rxd->wrx_len = 0; \
512 __rxd->wrx_cksum = 0; \
513 __rxd->wrx_status = 0; \
514 __rxd->wrx_errors = 0; \
515 __rxd->wrx_special = 0; \
516 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
517 \
518 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
519 } while (/*CONSTCOND*/0)
520
521 /*
522 * Register read/write functions.
523 * Other than CSR_{READ|WRITE}().
524 */
525 #if 0
526 static inline uint32_t wm_io_read(struct wm_softc *, int);
527 #endif
528 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
529 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
530 uint32_t, uint32_t);
531 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
532
533 /*
534 * Device driver interface functions and commonly used functions.
535 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
536 */
537 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
538 static int wm_match(device_t, cfdata_t, void *);
539 static void wm_attach(device_t, device_t, void *);
540 static int wm_detach(device_t, int);
541 static bool wm_suspend(device_t, const pmf_qual_t *);
542 static bool wm_resume(device_t, const pmf_qual_t *);
543 static void wm_watchdog(struct ifnet *);
544 static void wm_tick(void *);
545 static int wm_ifflags_cb(struct ethercom *);
546 static int wm_ioctl(struct ifnet *, u_long, void *);
547 /* MAC address related */
548 static int wm_check_alt_mac_addr(struct wm_softc *);
549 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
550 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
551 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
552 static void wm_set_filter(struct wm_softc *);
553 /* Reset and init related */
554 static void wm_set_vlan(struct wm_softc *);
555 static void wm_set_pcie_completion_timeout(struct wm_softc *);
556 static void wm_get_auto_rd_done(struct wm_softc *);
557 static void wm_lan_init_done(struct wm_softc *);
558 static void wm_get_cfg_done(struct wm_softc *);
559 static void wm_reset(struct wm_softc *);
560 static int wm_add_rxbuf(struct wm_softc *, int);
561 static void wm_rxdrain(struct wm_softc *);
562 static int wm_init(struct ifnet *);
563 static int wm_init_locked(struct ifnet *);
564 static void wm_stop(struct ifnet *, int);
565 static void wm_stop_locked(struct ifnet *, int);
566 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
567 uint32_t *, uint8_t *);
568 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
569 static void wm_82547_txfifo_stall(void *);
570 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
571 /* Start */
572 static void wm_start(struct ifnet *);
573 static void wm_start_locked(struct ifnet *);
574 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
575 uint32_t *, uint32_t *, bool *);
576 static void wm_nq_start(struct ifnet *);
577 static void wm_nq_start_locked(struct ifnet *);
578 /* Interrupt */
579 static void wm_txintr(struct wm_softc *);
580 static void wm_rxintr(struct wm_softc *);
581 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
582 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
583 static void wm_linkintr(struct wm_softc *, uint32_t);
584 static int wm_intr(void *);
585
586 /*
587 * Media related.
588 * GMII, SGMII, TBI, SERDES and SFP.
589 */
590 /* GMII related */
591 static void wm_gmii_reset(struct wm_softc *);
592 static int wm_get_phy_id_82575(struct wm_softc *);
593 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
594 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
595 static int wm_gmii_mediachange(struct ifnet *);
596 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
597 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
598 static int wm_gmii_i82543_readreg(device_t, int, int);
599 static void wm_gmii_i82543_writereg(device_t, int, int, int);
600 static int wm_gmii_i82544_readreg(device_t, int, int);
601 static void wm_gmii_i82544_writereg(device_t, int, int, int);
602 static int wm_gmii_i80003_readreg(device_t, int, int);
603 static void wm_gmii_i80003_writereg(device_t, int, int, int);
604 static int wm_gmii_bm_readreg(device_t, int, int);
605 static void wm_gmii_bm_writereg(device_t, int, int, int);
606 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
607 static int wm_gmii_hv_readreg(device_t, int, int);
608 static void wm_gmii_hv_writereg(device_t, int, int, int);
609 static int wm_gmii_82580_readreg(device_t, int, int);
610 static void wm_gmii_82580_writereg(device_t, int, int, int);
611 static void wm_gmii_statchg(struct ifnet *);
612 static int wm_kmrn_readreg(struct wm_softc *, int);
613 static void wm_kmrn_writereg(struct wm_softc *, int, int);
614 /* SGMII */
615 static bool wm_sgmii_uses_mdio(struct wm_softc *);
616 static int wm_sgmii_readreg(device_t, int, int);
617 static void wm_sgmii_writereg(device_t, int, int, int);
618 /* TBI related */
619 static int wm_check_for_link(struct wm_softc *);
620 static void wm_tbi_mediainit(struct wm_softc *);
621 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
622 static int wm_tbi_mediachange(struct ifnet *);
623 static void wm_tbi_set_linkled(struct wm_softc *);
624 static void wm_tbi_check_link(struct wm_softc *);
625 /* SFP related */
626 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
627 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
628
629 /*
630 * NVM related.
631 * Microwire, SPI (w/wo EERD) and Flash.
632 */
633 /* Misc functions */
634 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
635 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
636 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
637 /* Microwire */
638 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
639 /* SPI */
640 static int wm_nvm_ready_spi(struct wm_softc *);
641 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
642 /* Using with EERD */
643 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
644 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
645 /* Flash */
646 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
647 unsigned int *);
648 static int32_t wm_ich8_cycle_init(struct wm_softc *);
649 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
650 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
651 uint16_t *);
652 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
653 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
654 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
655 /* Lock, detecting NVM type, validate checksum and read */
656 static int wm_nvm_acquire(struct wm_softc *);
657 static void wm_nvm_release(struct wm_softc *);
658 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
659 static int wm_nvm_validate_checksum(struct wm_softc *);
660 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
661
662 /*
663 * Hardware semaphores.
664 * Very complexed...
665 */
666 static int wm_get_swsm_semaphore(struct wm_softc *);
667 static void wm_put_swsm_semaphore(struct wm_softc *);
668 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
669 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
670 static int wm_get_swfwhw_semaphore(struct wm_softc *);
671 static void wm_put_swfwhw_semaphore(struct wm_softc *);
672 static int wm_get_hw_semaphore_82573(struct wm_softc *);
673 static void wm_put_hw_semaphore_82573(struct wm_softc *);
674
675 /*
676 * Management mode and power management related subroutines.
677 * BMC, AMT, suspend/resume and EEE.
678 */
679 static int wm_check_mng_mode(struct wm_softc *);
680 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
681 static int wm_check_mng_mode_82574(struct wm_softc *);
682 static int wm_check_mng_mode_generic(struct wm_softc *);
683 static int wm_enable_mng_pass_thru(struct wm_softc *);
684 static int wm_check_reset_block(struct wm_softc *);
685 static void wm_get_hw_control(struct wm_softc *);
686 static void wm_release_hw_control(struct wm_softc *);
687 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
688 static void wm_smbustopci(struct wm_softc *);
689 static void wm_init_manageability(struct wm_softc *);
690 static void wm_release_manageability(struct wm_softc *);
691 static void wm_get_wakeup(struct wm_softc *);
692 #ifdef WM_WOL
693 static void wm_enable_phy_wakeup(struct wm_softc *);
694 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
695 static void wm_enable_wakeup(struct wm_softc *);
696 #endif
697 /* EEE */
698 static void wm_set_eee_i350(struct wm_softc *);
699
700 /*
701 * Workarounds (mainly PHY related).
702 * Basically, PHY's workarounds are in the PHY drivers.
703 */
704 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
705 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
706 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
707 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
708 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
709 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
710 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
711 static void wm_reset_init_script_82575(struct wm_softc *);
712
713 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
714 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
715
716 /*
717 * Devices supported by this driver.
718 */
719 static const struct wm_product {
720 pci_vendor_id_t wmp_vendor;
721 pci_product_id_t wmp_product;
722 const char *wmp_name;
723 wm_chip_type wmp_type;
724 uint32_t wmp_flags;
725 #define WMP_F_UNKNOWN 0x00
726 #define WMP_F_FIBER 0x01
727 #define WMP_F_COPPER 0x02
728 #define WMP_F_SERDES 0x03 /* Internal SERDES */
729 #define WMP_MEDIATYPE(x) ((x) & 0x03)
730 } wm_products[] = {
731 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
732 "Intel i82542 1000BASE-X Ethernet",
733 WM_T_82542_2_1, WMP_F_FIBER },
734
735 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
736 "Intel i82543GC 1000BASE-X Ethernet",
737 WM_T_82543, WMP_F_FIBER },
738
739 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
740 "Intel i82543GC 1000BASE-T Ethernet",
741 WM_T_82543, WMP_F_COPPER },
742
743 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
744 "Intel i82544EI 1000BASE-T Ethernet",
745 WM_T_82544, WMP_F_COPPER },
746
747 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
748 "Intel i82544EI 1000BASE-X Ethernet",
749 WM_T_82544, WMP_F_FIBER },
750
751 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
752 "Intel i82544GC 1000BASE-T Ethernet",
753 WM_T_82544, WMP_F_COPPER },
754
755 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
756 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
757 WM_T_82544, WMP_F_COPPER },
758
759 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
760 "Intel i82540EM 1000BASE-T Ethernet",
761 WM_T_82540, WMP_F_COPPER },
762
763 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
764 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
765 WM_T_82540, WMP_F_COPPER },
766
767 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
768 "Intel i82540EP 1000BASE-T Ethernet",
769 WM_T_82540, WMP_F_COPPER },
770
771 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
772 "Intel i82540EP 1000BASE-T Ethernet",
773 WM_T_82540, WMP_F_COPPER },
774
775 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
776 "Intel i82540EP 1000BASE-T Ethernet",
777 WM_T_82540, WMP_F_COPPER },
778
779 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
780 "Intel i82545EM 1000BASE-T Ethernet",
781 WM_T_82545, WMP_F_COPPER },
782
783 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
784 "Intel i82545GM 1000BASE-T Ethernet",
785 WM_T_82545_3, WMP_F_COPPER },
786
787 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
788 "Intel i82545GM 1000BASE-X Ethernet",
789 WM_T_82545_3, WMP_F_FIBER },
790
791 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
792 "Intel i82545GM Gigabit Ethernet (SERDES)",
793 WM_T_82545_3, WMP_F_SERDES },
794
795 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
796 "Intel i82546EB 1000BASE-T Ethernet",
797 WM_T_82546, WMP_F_COPPER },
798
799 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
800 "Intel i82546EB 1000BASE-T Ethernet",
801 WM_T_82546, WMP_F_COPPER },
802
803 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
804 "Intel i82545EM 1000BASE-X Ethernet",
805 WM_T_82545, WMP_F_FIBER },
806
807 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
808 "Intel i82546EB 1000BASE-X Ethernet",
809 WM_T_82546, WMP_F_FIBER },
810
811 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
812 "Intel i82546GB 1000BASE-T Ethernet",
813 WM_T_82546_3, WMP_F_COPPER },
814
815 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
816 "Intel i82546GB 1000BASE-X Ethernet",
817 WM_T_82546_3, WMP_F_FIBER },
818
819 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
820 "Intel i82546GB Gigabit Ethernet (SERDES)",
821 WM_T_82546_3, WMP_F_SERDES },
822
823 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
824 "i82546GB quad-port Gigabit Ethernet",
825 WM_T_82546_3, WMP_F_COPPER },
826
827 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
828 "i82546GB quad-port Gigabit Ethernet (KSP3)",
829 WM_T_82546_3, WMP_F_COPPER },
830
831 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
832 "Intel PRO/1000MT (82546GB)",
833 WM_T_82546_3, WMP_F_COPPER },
834
835 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
836 "Intel i82541EI 1000BASE-T Ethernet",
837 WM_T_82541, WMP_F_COPPER },
838
839 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
840 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
841 WM_T_82541, WMP_F_COPPER },
842
843 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
844 "Intel i82541EI Mobile 1000BASE-T Ethernet",
845 WM_T_82541, WMP_F_COPPER },
846
847 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
848 "Intel i82541ER 1000BASE-T Ethernet",
849 WM_T_82541_2, WMP_F_COPPER },
850
851 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
852 "Intel i82541GI 1000BASE-T Ethernet",
853 WM_T_82541_2, WMP_F_COPPER },
854
855 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
856 "Intel i82541GI Mobile 1000BASE-T Ethernet",
857 WM_T_82541_2, WMP_F_COPPER },
858
859 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
860 "Intel i82541PI 1000BASE-T Ethernet",
861 WM_T_82541_2, WMP_F_COPPER },
862
863 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
864 "Intel i82547EI 1000BASE-T Ethernet",
865 WM_T_82547, WMP_F_COPPER },
866
867 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
868 "Intel i82547EI Mobile 1000BASE-T Ethernet",
869 WM_T_82547, WMP_F_COPPER },
870
871 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
872 "Intel i82547GI 1000BASE-T Ethernet",
873 WM_T_82547_2, WMP_F_COPPER },
874
875 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
876 "Intel PRO/1000 PT (82571EB)",
877 WM_T_82571, WMP_F_COPPER },
878
879 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
880 "Intel PRO/1000 PF (82571EB)",
881 WM_T_82571, WMP_F_FIBER },
882
883 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
884 "Intel PRO/1000 PB (82571EB)",
885 WM_T_82571, WMP_F_SERDES },
886
887 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
888 "Intel PRO/1000 QT (82571EB)",
889 WM_T_82571, WMP_F_COPPER },
890
891 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
892 "Intel PRO/1000 PT Quad Port Server Adapter",
893 WM_T_82571, WMP_F_COPPER, },
894
895 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
896 "Intel Gigabit PT Quad Port Server ExpressModule",
897 WM_T_82571, WMP_F_COPPER, },
898
899 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
900 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
901 WM_T_82571, WMP_F_SERDES, },
902
903 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
904 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
905 WM_T_82571, WMP_F_SERDES, },
906
907 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
908 "Intel 82571EB Quad 1000baseX Ethernet",
909 WM_T_82571, WMP_F_FIBER, },
910
911 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
912 "Intel i82572EI 1000baseT Ethernet",
913 WM_T_82572, WMP_F_COPPER },
914
915 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
916 "Intel i82572EI 1000baseX Ethernet",
917 WM_T_82572, WMP_F_FIBER },
918
919 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
920 "Intel i82572EI Gigabit Ethernet (SERDES)",
921 WM_T_82572, WMP_F_SERDES },
922
923 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
924 "Intel i82572EI 1000baseT Ethernet",
925 WM_T_82572, WMP_F_COPPER },
926
927 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
928 "Intel i82573E",
929 WM_T_82573, WMP_F_COPPER },
930
931 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
932 "Intel i82573E IAMT",
933 WM_T_82573, WMP_F_COPPER },
934
935 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
936 "Intel i82573L Gigabit Ethernet",
937 WM_T_82573, WMP_F_COPPER },
938
939 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
940 "Intel i82574L",
941 WM_T_82574, WMP_F_COPPER },
942
943 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
944 "Intel i82574L",
945 WM_T_82574, WMP_F_COPPER },
946
947 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
948 "Intel i82583V",
949 WM_T_82583, WMP_F_COPPER },
950
951 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
952 "i80003 dual 1000baseT Ethernet",
953 WM_T_80003, WMP_F_COPPER },
954
955 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
956 "i80003 dual 1000baseX Ethernet",
957 WM_T_80003, WMP_F_COPPER },
958
959 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
960 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
961 WM_T_80003, WMP_F_SERDES },
962
963 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
964 "Intel i80003 1000baseT Ethernet",
965 WM_T_80003, WMP_F_COPPER },
966
967 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
968 "Intel i80003 Gigabit Ethernet (SERDES)",
969 WM_T_80003, WMP_F_SERDES },
970
971 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
972 "Intel i82801H (M_AMT) LAN Controller",
973 WM_T_ICH8, WMP_F_COPPER },
974 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
975 "Intel i82801H (AMT) LAN Controller",
976 WM_T_ICH8, WMP_F_COPPER },
977 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
978 "Intel i82801H LAN Controller",
979 WM_T_ICH8, WMP_F_COPPER },
980 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
981 "Intel i82801H (IFE) LAN Controller",
982 WM_T_ICH8, WMP_F_COPPER },
983 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
984 "Intel i82801H (M) LAN Controller",
985 WM_T_ICH8, WMP_F_COPPER },
986 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
987 "Intel i82801H IFE (GT) LAN Controller",
988 WM_T_ICH8, WMP_F_COPPER },
989 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
990 "Intel i82801H IFE (G) LAN Controller",
991 WM_T_ICH8, WMP_F_COPPER },
992 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
993 "82801I (AMT) LAN Controller",
994 WM_T_ICH9, WMP_F_COPPER },
995 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
996 "82801I LAN Controller",
997 WM_T_ICH9, WMP_F_COPPER },
998 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
999 "82801I (G) LAN Controller",
1000 WM_T_ICH9, WMP_F_COPPER },
1001 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1002 "82801I (GT) LAN Controller",
1003 WM_T_ICH9, WMP_F_COPPER },
1004 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1005 "82801I (C) LAN Controller",
1006 WM_T_ICH9, WMP_F_COPPER },
1007 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1008 "82801I mobile LAN Controller",
1009 WM_T_ICH9, WMP_F_COPPER },
1010 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1011 "82801I mobile (V) LAN Controller",
1012 WM_T_ICH9, WMP_F_COPPER },
1013 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1014 "82801I mobile (AMT) LAN Controller",
1015 WM_T_ICH9, WMP_F_COPPER },
1016 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1017 "82567LM-4 LAN Controller",
1018 WM_T_ICH9, WMP_F_COPPER },
1019 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
1020 "82567V-3 LAN Controller",
1021 WM_T_ICH9, WMP_F_COPPER },
1022 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1023 "82567LM-2 LAN Controller",
1024 WM_T_ICH10, WMP_F_COPPER },
1025 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1026 "82567LF-2 LAN Controller",
1027 WM_T_ICH10, WMP_F_COPPER },
1028 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1029 "82567LM-3 LAN Controller",
1030 WM_T_ICH10, WMP_F_COPPER },
1031 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1032 "82567LF-3 LAN Controller",
1033 WM_T_ICH10, WMP_F_COPPER },
1034 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1035 "82567V-2 LAN Controller",
1036 WM_T_ICH10, WMP_F_COPPER },
1037 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1038 "82567V-3? LAN Controller",
1039 WM_T_ICH10, WMP_F_COPPER },
1040 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1041 "HANKSVILLE LAN Controller",
1042 WM_T_ICH10, WMP_F_COPPER },
1043 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1044 "PCH LAN (82577LM) Controller",
1045 WM_T_PCH, WMP_F_COPPER },
1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1047 "PCH LAN (82577LC) Controller",
1048 WM_T_PCH, WMP_F_COPPER },
1049 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1050 "PCH LAN (82578DM) Controller",
1051 WM_T_PCH, WMP_F_COPPER },
1052 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1053 "PCH LAN (82578DC) Controller",
1054 WM_T_PCH, WMP_F_COPPER },
1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1056 "PCH2 LAN (82579LM) Controller",
1057 WM_T_PCH2, WMP_F_COPPER },
1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1059 "PCH2 LAN (82579V) Controller",
1060 WM_T_PCH2, WMP_F_COPPER },
1061 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1062 "82575EB dual-1000baseT Ethernet",
1063 WM_T_82575, WMP_F_COPPER },
1064 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1065 "82575EB dual-1000baseX Ethernet (SERDES)",
1066 WM_T_82575, WMP_F_SERDES },
1067 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1068 "82575GB quad-1000baseT Ethernet",
1069 WM_T_82575, WMP_F_COPPER },
1070 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1071 "82575GB quad-1000baseT Ethernet (PM)",
1072 WM_T_82575, WMP_F_COPPER },
1073 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1074 "82576 1000BaseT Ethernet",
1075 WM_T_82576, WMP_F_COPPER },
1076 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1077 "82576 1000BaseX Ethernet",
1078 WM_T_82576, WMP_F_FIBER },
1079
1080 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1081 "82576 gigabit Ethernet (SERDES)",
1082 WM_T_82576, WMP_F_SERDES },
1083
1084 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1085 "82576 quad-1000BaseT Ethernet",
1086 WM_T_82576, WMP_F_COPPER },
1087
1088 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1089 "82576 Gigabit ET2 Quad Port Server Adapter",
1090 WM_T_82576, WMP_F_COPPER },
1091
1092 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1093 "82576 gigabit Ethernet",
1094 WM_T_82576, WMP_F_COPPER },
1095
1096 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1097 "82576 gigabit Ethernet (SERDES)",
1098 WM_T_82576, WMP_F_SERDES },
1099 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1100 "82576 quad-gigabit Ethernet (SERDES)",
1101 WM_T_82576, WMP_F_SERDES },
1102
1103 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1104 "82580 1000BaseT Ethernet",
1105 WM_T_82580, WMP_F_COPPER },
1106 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1107 "82580 1000BaseX Ethernet",
1108 WM_T_82580, WMP_F_FIBER },
1109
1110 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1111 "82580 1000BaseT Ethernet (SERDES)",
1112 WM_T_82580, WMP_F_SERDES },
1113
1114 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1115 "82580 gigabit Ethernet (SGMII)",
1116 WM_T_82580, WMP_F_COPPER },
1117 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1118 "82580 dual-1000BaseT Ethernet",
1119 WM_T_82580, WMP_F_COPPER },
1120
1121 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1122 "82580 quad-1000BaseX Ethernet",
1123 WM_T_82580, WMP_F_FIBER },
1124
1125 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1126 "I350 Gigabit Network Connection",
1127 WM_T_I350, WMP_F_COPPER },
1128 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1129 "I350 Gigabit Fiber Network Connection",
1130 WM_T_I350, WMP_F_FIBER },
1131
1132 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1133 "I350 Gigabit Backplane Connection",
1134 WM_T_I350, WMP_F_SERDES },
1135
1136 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1137 "I350 Quad Port Gigabit Ethernet",
1138 WM_T_I350, WMP_F_SERDES },
1139
1140 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1141 "I350 Gigabit Connection",
1142 WM_T_I350, WMP_F_COPPER },
1143
1144 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1145 "I354 Gigabit Connection",
1146 WM_T_I354, WMP_F_COPPER },
1147 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1148 "I210-T1 Ethernet Server Adapter",
1149 WM_T_I210, WMP_F_COPPER },
1150
1151 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1152 "I210 Ethernet (Copper OEM)",
1153 WM_T_I210, WMP_F_COPPER },
1154
1155 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1156 "I210 Ethernet (Copper IT)",
1157 WM_T_I210, WMP_F_COPPER },
1158
1159 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1160 "I210 Ethernet (FLASH less)",
1161 WM_T_I210, WMP_F_COPPER },
1162
1163 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1164 "I210 Gigabit Ethernet (Fiber)",
1165 WM_T_I210, WMP_F_FIBER },
1166
1167 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1168 "I210 Gigabit Ethernet (SERDES)",
1169 WM_T_I210, WMP_F_SERDES },
1170
1171 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1172 "I210 Gigabit Ethernet (FLASH less)",
1173 WM_T_I210, WMP_F_SERDES },
1174
1175 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1176 "I210 Gigabit Ethernet (SGMII)",
1177 WM_T_I210, WMP_F_COPPER },
1178
1179 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1180 "I211 Ethernet (COPPER)",
1181 WM_T_I211, WMP_F_COPPER },
1182 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1183 "I217 V Ethernet Connection",
1184 WM_T_PCH_LPT, WMP_F_COPPER },
1185 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1186 "I217 LM Ethernet Connection",
1187 WM_T_PCH_LPT, WMP_F_COPPER },
1188 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1189 "I218 V Ethernet Connection",
1190 WM_T_PCH_LPT, WMP_F_COPPER },
1191 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1192 "I218 V Ethernet Connection",
1193 WM_T_PCH_LPT, WMP_F_COPPER },
1194 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1195 "I218 V Ethernet Connection",
1196 WM_T_PCH_LPT, WMP_F_COPPER },
1197 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1198 "I218 LM Ethernet Connection",
1199 WM_T_PCH_LPT, WMP_F_COPPER },
1200 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1201 "I218 LM Ethernet Connection",
1202 WM_T_PCH_LPT, WMP_F_COPPER },
1203 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1204 "I218 LM Ethernet Connection",
1205 WM_T_PCH_LPT, WMP_F_COPPER },
1206 { 0, 0,
1207 NULL,
1208 0, 0 },
1209 };
1210
1211 #ifdef WM_EVENT_COUNTERS
1212 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1213 #endif /* WM_EVENT_COUNTERS */
1214
1215
1216 /*
1217 * Register read/write functions.
1218 * Other than CSR_{READ|WRITE}().
1219 */
1220
1221 #if 0 /* Not currently used */
1222 static inline uint32_t
1223 wm_io_read(struct wm_softc *sc, int reg)
1224 {
1225
1226 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1227 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1228 }
1229 #endif
1230
1231 static inline void
1232 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1233 {
1234
1235 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1236 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1237 }
1238
1239 static inline void
1240 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1241 uint32_t data)
1242 {
1243 uint32_t regval;
1244 int i;
1245
1246 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1247
1248 CSR_WRITE(sc, reg, regval);
1249
1250 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1251 delay(5);
1252 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1253 break;
1254 }
1255 if (i == SCTL_CTL_POLL_TIMEOUT) {
1256 aprint_error("%s: WARNING:"
1257 " i82575 reg 0x%08x setup did not indicate ready\n",
1258 device_xname(sc->sc_dev), reg);
1259 }
1260 }
1261
1262 static inline void
1263 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1264 {
1265 wa->wa_low = htole32(v & 0xffffffffU);
1266 if (sizeof(bus_addr_t) == 8)
1267 wa->wa_high = htole32((uint64_t) v >> 32);
1268 else
1269 wa->wa_high = 0;
1270 }
1271
1272 /*
1273 * Device driver interface functions and commonly used functions.
1274 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1275 */
1276
1277 /* Lookup supported device table */
1278 static const struct wm_product *
1279 wm_lookup(const struct pci_attach_args *pa)
1280 {
1281 const struct wm_product *wmp;
1282
1283 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1284 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1285 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1286 return wmp;
1287 }
1288 return NULL;
1289 }
1290
1291 /* The match function (ca_match) */
1292 static int
1293 wm_match(device_t parent, cfdata_t cf, void *aux)
1294 {
1295 struct pci_attach_args *pa = aux;
1296
1297 if (wm_lookup(pa) != NULL)
1298 return 1;
1299
1300 return 0;
1301 }
1302
1303 /* The attach function (ca_attach) */
1304 static void
1305 wm_attach(device_t parent, device_t self, void *aux)
1306 {
1307 struct wm_softc *sc = device_private(self);
1308 struct pci_attach_args *pa = aux;
1309 prop_dictionary_t dict;
1310 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1311 pci_chipset_tag_t pc = pa->pa_pc;
1312 pci_intr_handle_t ih;
1313 const char *intrstr = NULL;
1314 const char *eetype, *xname;
1315 bus_space_tag_t memt;
1316 bus_space_handle_t memh;
1317 bus_size_t memsize;
1318 int memh_valid;
1319 int i, error;
1320 const struct wm_product *wmp;
1321 prop_data_t ea;
1322 prop_number_t pn;
1323 uint8_t enaddr[ETHER_ADDR_LEN];
1324 uint16_t cfg1, cfg2, swdpin, io3;
1325 pcireg_t preg, memtype;
1326 uint16_t eeprom_data, apme_mask;
1327 bool force_clear_smbi;
1328 uint32_t link_mode;
1329 uint32_t reg;
1330 char intrbuf[PCI_INTRSTR_LEN];
1331
1332 sc->sc_dev = self;
1333 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1334 sc->sc_stopping = false;
1335
1336 wmp = wm_lookup(pa);
1337 #ifdef DIAGNOSTIC
1338 if (wmp == NULL) {
1339 printf("\n");
1340 panic("wm_attach: impossible");
1341 }
1342 #endif
1343 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1344
1345 sc->sc_pc = pa->pa_pc;
1346 sc->sc_pcitag = pa->pa_tag;
1347
1348 if (pci_dma64_available(pa))
1349 sc->sc_dmat = pa->pa_dmat64;
1350 else
1351 sc->sc_dmat = pa->pa_dmat;
1352
1353 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1354 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1355
1356 sc->sc_type = wmp->wmp_type;
1357 if (sc->sc_type < WM_T_82543) {
1358 if (sc->sc_rev < 2) {
1359 aprint_error_dev(sc->sc_dev,
1360 "i82542 must be at least rev. 2\n");
1361 return;
1362 }
1363 if (sc->sc_rev < 3)
1364 sc->sc_type = WM_T_82542_2_0;
1365 }
1366
1367 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1368 || (sc->sc_type == WM_T_82580)
1369 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1370 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1371 sc->sc_flags |= WM_F_NEWQUEUE;
1372
1373 /* Set device properties (mactype) */
1374 dict = device_properties(sc->sc_dev);
1375 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1376
1377 /*
1378 * Map the device. All devices support memory-mapped acccess,
1379 * and it is really required for normal operation.
1380 */
1381 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1382 switch (memtype) {
1383 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1384 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1385 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1386 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1387 break;
1388 default:
1389 memh_valid = 0;
1390 break;
1391 }
1392
1393 if (memh_valid) {
1394 sc->sc_st = memt;
1395 sc->sc_sh = memh;
1396 sc->sc_ss = memsize;
1397 } else {
1398 aprint_error_dev(sc->sc_dev,
1399 "unable to map device registers\n");
1400 return;
1401 }
1402
1403 /*
1404 * In addition, i82544 and later support I/O mapped indirect
1405 * register access. It is not desirable (nor supported in
1406 * this driver) to use it for normal operation, though it is
1407 * required to work around bugs in some chip versions.
1408 */
1409 if (sc->sc_type >= WM_T_82544) {
1410 /* First we have to find the I/O BAR. */
1411 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1412 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1413 if (memtype == PCI_MAPREG_TYPE_IO)
1414 break;
1415 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1416 PCI_MAPREG_MEM_TYPE_64BIT)
1417 i += 4; /* skip high bits, too */
1418 }
1419 if (i < PCI_MAPREG_END) {
1420 /*
1421 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1422 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1423 * It's no problem because newer chips has no this
1424 * bug.
1425 *
1426 * The i8254x doesn't apparently respond when the
1427 * I/O BAR is 0, which looks somewhat like it's not
1428 * been configured.
1429 */
1430 preg = pci_conf_read(pc, pa->pa_tag, i);
1431 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1432 aprint_error_dev(sc->sc_dev,
1433 "WARNING: I/O BAR at zero.\n");
1434 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1435 0, &sc->sc_iot, &sc->sc_ioh,
1436 NULL, &sc->sc_ios) == 0) {
1437 sc->sc_flags |= WM_F_IOH_VALID;
1438 } else {
1439 aprint_error_dev(sc->sc_dev,
1440 "WARNING: unable to map I/O space\n");
1441 }
1442 }
1443
1444 }
1445
1446 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1447 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1448 preg |= PCI_COMMAND_MASTER_ENABLE;
1449 if (sc->sc_type < WM_T_82542_2_1)
1450 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1451 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1452
1453 /* power up chip */
1454 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1455 NULL)) && error != EOPNOTSUPP) {
1456 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1457 return;
1458 }
1459
1460 /*
1461 * Map and establish our interrupt.
1462 */
1463 if (pci_intr_map(pa, &ih)) {
1464 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1465 return;
1466 }
1467 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1468 #ifdef WM_MPSAFE
1469 pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
1470 #endif
1471 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1472 if (sc->sc_ih == NULL) {
1473 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1474 if (intrstr != NULL)
1475 aprint_error(" at %s", intrstr);
1476 aprint_error("\n");
1477 return;
1478 }
1479 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1480
1481 /*
1482 * Check the function ID (unit number of the chip).
1483 */
1484 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1485 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1486 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1487 || (sc->sc_type == WM_T_82580)
1488 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1489 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1490 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1491 else
1492 sc->sc_funcid = 0;
1493
1494 /*
1495 * Determine a few things about the bus we're connected to.
1496 */
1497 if (sc->sc_type < WM_T_82543) {
1498 /* We don't really know the bus characteristics here. */
1499 sc->sc_bus_speed = 33;
1500 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1501 /*
1502 * CSA (Communication Streaming Architecture) is about as fast
1503 * a 32-bit 66MHz PCI Bus.
1504 */
1505 sc->sc_flags |= WM_F_CSA;
1506 sc->sc_bus_speed = 66;
1507 aprint_verbose_dev(sc->sc_dev,
1508 "Communication Streaming Architecture\n");
1509 if (sc->sc_type == WM_T_82547) {
1510 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1511 callout_setfunc(&sc->sc_txfifo_ch,
1512 wm_82547_txfifo_stall, sc);
1513 aprint_verbose_dev(sc->sc_dev,
1514 "using 82547 Tx FIFO stall work-around\n");
1515 }
1516 } else if (sc->sc_type >= WM_T_82571) {
1517 sc->sc_flags |= WM_F_PCIE;
1518 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1519 && (sc->sc_type != WM_T_ICH10)
1520 && (sc->sc_type != WM_T_PCH)
1521 && (sc->sc_type != WM_T_PCH2)
1522 && (sc->sc_type != WM_T_PCH_LPT)) {
1523 /* ICH* and PCH* have no PCIe capability registers */
1524 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1525 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1526 NULL) == 0)
1527 aprint_error_dev(sc->sc_dev,
1528 "unable to find PCIe capability\n");
1529 }
1530 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1531 } else {
1532 reg = CSR_READ(sc, WMREG_STATUS);
1533 if (reg & STATUS_BUS64)
1534 sc->sc_flags |= WM_F_BUS64;
1535 if ((reg & STATUS_PCIX_MODE) != 0) {
1536 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1537
1538 sc->sc_flags |= WM_F_PCIX;
1539 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1540 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1541 aprint_error_dev(sc->sc_dev,
1542 "unable to find PCIX capability\n");
1543 else if (sc->sc_type != WM_T_82545_3 &&
1544 sc->sc_type != WM_T_82546_3) {
1545 /*
1546 * Work around a problem caused by the BIOS
1547 * setting the max memory read byte count
1548 * incorrectly.
1549 */
1550 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1551 sc->sc_pcixe_capoff + PCIX_CMD);
1552 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1553 sc->sc_pcixe_capoff + PCIX_STATUS);
1554
1555 bytecnt =
1556 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1557 PCIX_CMD_BYTECNT_SHIFT;
1558 maxb =
1559 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1560 PCIX_STATUS_MAXB_SHIFT;
1561 if (bytecnt > maxb) {
1562 aprint_verbose_dev(sc->sc_dev,
1563 "resetting PCI-X MMRBC: %d -> %d\n",
1564 512 << bytecnt, 512 << maxb);
1565 pcix_cmd = (pcix_cmd &
1566 ~PCIX_CMD_BYTECNT_MASK) |
1567 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1568 pci_conf_write(pa->pa_pc, pa->pa_tag,
1569 sc->sc_pcixe_capoff + PCIX_CMD,
1570 pcix_cmd);
1571 }
1572 }
1573 }
1574 /*
1575 * The quad port adapter is special; it has a PCIX-PCIX
1576 * bridge on the board, and can run the secondary bus at
1577 * a higher speed.
1578 */
1579 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1580 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1581 : 66;
1582 } else if (sc->sc_flags & WM_F_PCIX) {
1583 switch (reg & STATUS_PCIXSPD_MASK) {
1584 case STATUS_PCIXSPD_50_66:
1585 sc->sc_bus_speed = 66;
1586 break;
1587 case STATUS_PCIXSPD_66_100:
1588 sc->sc_bus_speed = 100;
1589 break;
1590 case STATUS_PCIXSPD_100_133:
1591 sc->sc_bus_speed = 133;
1592 break;
1593 default:
1594 aprint_error_dev(sc->sc_dev,
1595 "unknown PCIXSPD %d; assuming 66MHz\n",
1596 reg & STATUS_PCIXSPD_MASK);
1597 sc->sc_bus_speed = 66;
1598 break;
1599 }
1600 } else
1601 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1602 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1603 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1604 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1605 }
1606
1607 /*
1608 * Allocate the control data structures, and create and load the
1609 * DMA map for it.
1610 *
1611 * NOTE: All Tx descriptors must be in the same 4G segment of
1612 * memory. So must Rx descriptors. We simplify by allocating
1613 * both sets within the same 4G segment.
1614 */
1615 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1616 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1617 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1618 sizeof(struct wm_control_data_82542) :
1619 sizeof(struct wm_control_data_82544);
1620 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1621 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1622 &sc->sc_cd_rseg, 0)) != 0) {
1623 aprint_error_dev(sc->sc_dev,
1624 "unable to allocate control data, error = %d\n",
1625 error);
1626 goto fail_0;
1627 }
1628
1629 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1630 sc->sc_cd_rseg, sc->sc_cd_size,
1631 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1632 aprint_error_dev(sc->sc_dev,
1633 "unable to map control data, error = %d\n", error);
1634 goto fail_1;
1635 }
1636
1637 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1638 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1639 aprint_error_dev(sc->sc_dev,
1640 "unable to create control data DMA map, error = %d\n",
1641 error);
1642 goto fail_2;
1643 }
1644
1645 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1646 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1647 aprint_error_dev(sc->sc_dev,
1648 "unable to load control data DMA map, error = %d\n",
1649 error);
1650 goto fail_3;
1651 }
1652
1653 /* Create the transmit buffer DMA maps. */
1654 WM_TXQUEUELEN(sc) =
1655 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1656 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1657 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1658 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1659 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1660 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1661 aprint_error_dev(sc->sc_dev,
1662 "unable to create Tx DMA map %d, error = %d\n",
1663 i, error);
1664 goto fail_4;
1665 }
1666 }
1667
1668 /* Create the receive buffer DMA maps. */
1669 for (i = 0; i < WM_NRXDESC; i++) {
1670 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1671 MCLBYTES, 0, 0,
1672 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1673 aprint_error_dev(sc->sc_dev,
1674 "unable to create Rx DMA map %d error = %d\n",
1675 i, error);
1676 goto fail_5;
1677 }
1678 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1679 }
1680
1681 /* clear interesting stat counters */
1682 CSR_READ(sc, WMREG_COLC);
1683 CSR_READ(sc, WMREG_RXERRC);
1684
1685 /* get PHY control from SMBus to PCIe */
1686 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1687 || (sc->sc_type == WM_T_PCH_LPT))
1688 wm_smbustopci(sc);
1689
1690 /* Reset the chip to a known state. */
1691 wm_reset(sc);
1692
1693 /* Get some information about the EEPROM. */
1694 switch (sc->sc_type) {
1695 case WM_T_82542_2_0:
1696 case WM_T_82542_2_1:
1697 case WM_T_82543:
1698 case WM_T_82544:
1699 /* Microwire */
1700 sc->sc_nvm_wordsize = 64;
1701 sc->sc_nvm_addrbits = 6;
1702 break;
1703 case WM_T_82540:
1704 case WM_T_82545:
1705 case WM_T_82545_3:
1706 case WM_T_82546:
1707 case WM_T_82546_3:
1708 /* Microwire */
1709 reg = CSR_READ(sc, WMREG_EECD);
1710 if (reg & EECD_EE_SIZE) {
1711 sc->sc_nvm_wordsize = 256;
1712 sc->sc_nvm_addrbits = 8;
1713 } else {
1714 sc->sc_nvm_wordsize = 64;
1715 sc->sc_nvm_addrbits = 6;
1716 }
1717 sc->sc_flags |= WM_F_LOCK_EECD;
1718 break;
1719 case WM_T_82541:
1720 case WM_T_82541_2:
1721 case WM_T_82547:
1722 case WM_T_82547_2:
1723 reg = CSR_READ(sc, WMREG_EECD);
1724 if (reg & EECD_EE_TYPE) {
1725 /* SPI */
1726 sc->sc_flags |= WM_F_EEPROM_SPI;
1727 wm_nvm_set_addrbits_size_eecd(sc);
1728 } else {
1729 /* Microwire */
1730 if ((reg & EECD_EE_ABITS) != 0) {
1731 sc->sc_nvm_wordsize = 256;
1732 sc->sc_nvm_addrbits = 8;
1733 } else {
1734 sc->sc_nvm_wordsize = 64;
1735 sc->sc_nvm_addrbits = 6;
1736 }
1737 }
1738 sc->sc_flags |= WM_F_LOCK_EECD;
1739 break;
1740 case WM_T_82571:
1741 case WM_T_82572:
1742 /* SPI */
1743 sc->sc_flags |= WM_F_EEPROM_SPI;
1744 wm_nvm_set_addrbits_size_eecd(sc);
1745 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1746 break;
1747 case WM_T_82573:
1748 sc->sc_flags |= WM_F_LOCK_SWSM;
1749 /* FALLTHROUGH */
1750 case WM_T_82574:
1751 case WM_T_82583:
1752 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1753 sc->sc_flags |= WM_F_EEPROM_FLASH;
1754 sc->sc_nvm_wordsize = 2048;
1755 } else {
1756 /* SPI */
1757 sc->sc_flags |= WM_F_EEPROM_SPI;
1758 wm_nvm_set_addrbits_size_eecd(sc);
1759 }
1760 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1761 break;
1762 case WM_T_82575:
1763 case WM_T_82576:
1764 case WM_T_82580:
1765 case WM_T_I350:
1766 case WM_T_I354:
1767 case WM_T_80003:
1768 /* SPI */
1769 sc->sc_flags |= WM_F_EEPROM_SPI;
1770 wm_nvm_set_addrbits_size_eecd(sc);
1771 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1772 | WM_F_LOCK_SWSM;
1773 break;
1774 case WM_T_ICH8:
1775 case WM_T_ICH9:
1776 case WM_T_ICH10:
1777 case WM_T_PCH:
1778 case WM_T_PCH2:
1779 case WM_T_PCH_LPT:
1780 /* FLASH */
1781 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1782 sc->sc_nvm_wordsize = 2048;
1783 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1784 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1785 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1786 aprint_error_dev(sc->sc_dev,
1787 "can't map FLASH registers\n");
1788 goto fail_5;
1789 }
1790 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1791 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1792 ICH_FLASH_SECTOR_SIZE;
1793 sc->sc_ich8_flash_bank_size =
1794 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1795 sc->sc_ich8_flash_bank_size -=
1796 (reg & ICH_GFPREG_BASE_MASK);
1797 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1798 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1799 break;
1800 case WM_T_I210:
1801 case WM_T_I211:
1802 wm_nvm_set_addrbits_size_eecd(sc);
1803 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1804 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1805 break;
1806 default:
1807 break;
1808 }
1809
1810 /* Ensure the SMBI bit is clear before first NVM or PHY access */
1811 switch (sc->sc_type) {
1812 case WM_T_82571:
1813 case WM_T_82572:
1814 reg = CSR_READ(sc, WMREG_SWSM2);
1815 if ((reg & SWSM2_LOCK) != 0) {
1816 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
1817 force_clear_smbi = true;
1818 } else
1819 force_clear_smbi = false;
1820 break;
1821 case WM_T_82573:
1822 case WM_T_82574:
1823 case WM_T_82583:
1824 force_clear_smbi = true;
1825 break;
1826 default:
1827 force_clear_smbi = false;
1828 break;
1829 }
1830 if (force_clear_smbi) {
1831 reg = CSR_READ(sc, WMREG_SWSM);
1832 if ((reg & SWSM_SMBI) != 0)
1833 aprint_error_dev(sc->sc_dev,
1834 "Please update the Bootagent\n");
1835 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
1836 }
1837
1838 /*
1839 * Defer printing the EEPROM type until after verifying the checksum
1840 * This allows the EEPROM type to be printed correctly in the case
1841 * that no EEPROM is attached.
1842 */
1843 /*
1844 * Validate the EEPROM checksum. If the checksum fails, flag
1845 * this for later, so we can fail future reads from the EEPROM.
1846 */
1847 if (wm_nvm_validate_checksum(sc)) {
1848 /*
1849 * Read twice again because some PCI-e parts fail the
1850 * first check due to the link being in sleep state.
1851 */
1852 if (wm_nvm_validate_checksum(sc))
1853 sc->sc_flags |= WM_F_EEPROM_INVALID;
1854 }
1855
1856 /* Set device properties (macflags) */
1857 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1858
1859 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1860 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1861 else {
1862 aprint_verbose_dev(sc->sc_dev, "%u words ",
1863 sc->sc_nvm_wordsize);
1864 if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
1865 aprint_verbose("FLASH(HW)\n");
1866 } else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1867 aprint_verbose("FLASH\n");
1868 } else {
1869 if (sc->sc_flags & WM_F_EEPROM_SPI)
1870 eetype = "SPI";
1871 else
1872 eetype = "MicroWire";
1873 aprint_verbose("(%d address bits) %s EEPROM\n",
1874 sc->sc_nvm_addrbits, eetype);
1875 }
1876 }
1877
1878 switch (sc->sc_type) {
1879 case WM_T_82571:
1880 case WM_T_82572:
1881 case WM_T_82573:
1882 case WM_T_82574:
1883 case WM_T_82583:
1884 case WM_T_80003:
1885 case WM_T_ICH8:
1886 case WM_T_ICH9:
1887 case WM_T_ICH10:
1888 case WM_T_PCH:
1889 case WM_T_PCH2:
1890 case WM_T_PCH_LPT:
1891 if (wm_check_mng_mode(sc) != 0)
1892 wm_get_hw_control(sc);
1893 break;
1894 default:
1895 break;
1896 }
1897 wm_get_wakeup(sc);
1898 /*
1899 * Read the Ethernet address from the EEPROM, if not first found
1900 * in device properties.
1901 */
1902 ea = prop_dictionary_get(dict, "mac-address");
1903 if (ea != NULL) {
1904 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1905 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1906 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1907 } else {
1908 if (wm_read_mac_addr(sc, enaddr) != 0) {
1909 aprint_error_dev(sc->sc_dev,
1910 "unable to read Ethernet address\n");
1911 goto fail_5;
1912 }
1913 }
1914
1915 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1916 ether_sprintf(enaddr));
1917
1918 /*
1919 * Read the config info from the EEPROM, and set up various
1920 * bits in the control registers based on their contents.
1921 */
1922 pn = prop_dictionary_get(dict, "i82543-cfg1");
1923 if (pn != NULL) {
1924 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1925 cfg1 = (uint16_t) prop_number_integer_value(pn);
1926 } else {
1927 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
1928 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1929 goto fail_5;
1930 }
1931 }
1932
1933 pn = prop_dictionary_get(dict, "i82543-cfg2");
1934 if (pn != NULL) {
1935 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1936 cfg2 = (uint16_t) prop_number_integer_value(pn);
1937 } else {
1938 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
1939 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1940 goto fail_5;
1941 }
1942 }
1943
1944 /* check for WM_F_WOL */
1945 switch (sc->sc_type) {
1946 case WM_T_82542_2_0:
1947 case WM_T_82542_2_1:
1948 case WM_T_82543:
1949 /* dummy? */
1950 eeprom_data = 0;
1951 apme_mask = NVM_CFG3_APME;
1952 break;
1953 case WM_T_82544:
1954 apme_mask = NVM_CFG2_82544_APM_EN;
1955 eeprom_data = cfg2;
1956 break;
1957 case WM_T_82546:
1958 case WM_T_82546_3:
1959 case WM_T_82571:
1960 case WM_T_82572:
1961 case WM_T_82573:
1962 case WM_T_82574:
1963 case WM_T_82583:
1964 case WM_T_80003:
1965 default:
1966 apme_mask = NVM_CFG3_APME;
1967 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
1968 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
1969 break;
1970 case WM_T_82575:
1971 case WM_T_82576:
1972 case WM_T_82580:
1973 case WM_T_I350:
1974 case WM_T_I354: /* XXX ok? */
1975 case WM_T_ICH8:
1976 case WM_T_ICH9:
1977 case WM_T_ICH10:
1978 case WM_T_PCH:
1979 case WM_T_PCH2:
1980 case WM_T_PCH_LPT:
1981 /* XXX The funcid should be checked on some devices */
1982 apme_mask = WUC_APME;
1983 eeprom_data = CSR_READ(sc, WMREG_WUC);
1984 break;
1985 }
1986
1987 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1988 if ((eeprom_data & apme_mask) != 0)
1989 sc->sc_flags |= WM_F_WOL;
1990 #ifdef WM_DEBUG
1991 if ((sc->sc_flags & WM_F_WOL) != 0)
1992 printf("WOL\n");
1993 #endif
1994
1995 /*
1996 * XXX need special handling for some multiple port cards
1997 * to disable a paticular port.
1998 */
1999
2000 if (sc->sc_type >= WM_T_82544) {
2001 pn = prop_dictionary_get(dict, "i82543-swdpin");
2002 if (pn != NULL) {
2003 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2004 swdpin = (uint16_t) prop_number_integer_value(pn);
2005 } else {
2006 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2007 aprint_error_dev(sc->sc_dev,
2008 "unable to read SWDPIN\n");
2009 goto fail_5;
2010 }
2011 }
2012 }
2013
2014 if (cfg1 & NVM_CFG1_ILOS)
2015 sc->sc_ctrl |= CTRL_ILOS;
2016 if (sc->sc_type >= WM_T_82544) {
2017 sc->sc_ctrl |=
2018 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2019 CTRL_SWDPIO_SHIFT;
2020 sc->sc_ctrl |=
2021 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2022 CTRL_SWDPINS_SHIFT;
2023 } else {
2024 sc->sc_ctrl |=
2025 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2026 CTRL_SWDPIO_SHIFT;
2027 }
2028
2029 #if 0
2030 if (sc->sc_type >= WM_T_82544) {
2031 if (cfg1 & NVM_CFG1_IPS0)
2032 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2033 if (cfg1 & NVM_CFG1_IPS1)
2034 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2035 sc->sc_ctrl_ext |=
2036 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2037 CTRL_EXT_SWDPIO_SHIFT;
2038 sc->sc_ctrl_ext |=
2039 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2040 CTRL_EXT_SWDPINS_SHIFT;
2041 } else {
2042 sc->sc_ctrl_ext |=
2043 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2044 CTRL_EXT_SWDPIO_SHIFT;
2045 }
2046 #endif
2047
2048 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2049 #if 0
2050 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2051 #endif
2052
2053 /*
2054 * Set up some register offsets that are different between
2055 * the i82542 and the i82543 and later chips.
2056 */
2057 if (sc->sc_type < WM_T_82543) {
2058 sc->sc_rdt_reg = WMREG_OLD_RDT0;
2059 sc->sc_tdt_reg = WMREG_OLD_TDT;
2060 } else {
2061 sc->sc_rdt_reg = WMREG_RDT;
2062 sc->sc_tdt_reg = WMREG_TDT;
2063 }
2064
2065 if (sc->sc_type == WM_T_PCH) {
2066 uint16_t val;
2067
2068 /* Save the NVM K1 bit setting */
2069 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2070
2071 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2072 sc->sc_nvm_k1_enabled = 1;
2073 else
2074 sc->sc_nvm_k1_enabled = 0;
2075 }
2076
2077 /*
2078 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2079 * media structures accordingly.
2080 */
2081 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2082 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2083 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2084 || sc->sc_type == WM_T_82573
2085 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2086 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2087 wm_gmii_mediainit(sc, wmp->wmp_product);
2088 } else if (sc->sc_type < WM_T_82543 ||
2089 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2090 if (sc->sc_mediatype & WMP_F_COPPER) {
2091 aprint_error_dev(sc->sc_dev,
2092 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2093 sc->sc_mediatype = WMP_F_FIBER;
2094 }
2095 wm_tbi_mediainit(sc);
2096 } else {
2097 switch (sc->sc_type) {
2098 case WM_T_82575:
2099 case WM_T_82576:
2100 case WM_T_82580:
2101 case WM_T_I350:
2102 case WM_T_I354:
2103 case WM_T_I210:
2104 case WM_T_I211:
2105 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2106 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2107 switch (link_mode) {
2108 case CTRL_EXT_LINK_MODE_1000KX:
2109 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2110 sc->sc_mediatype = WMP_F_SERDES;
2111 break;
2112 case CTRL_EXT_LINK_MODE_SGMII:
2113 if (wm_sgmii_uses_mdio(sc)) {
2114 aprint_verbose_dev(sc->sc_dev,
2115 "SGMII(MDIO)\n");
2116 sc->sc_flags |= WM_F_SGMII;
2117 sc->sc_mediatype = WMP_F_COPPER;
2118 break;
2119 }
2120 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2121 /*FALLTHROUGH*/
2122 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2123 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2124 if (sc->sc_mediatype == WMP_F_UNKNOWN) {
2125 if (link_mode
2126 == CTRL_EXT_LINK_MODE_SGMII) {
2127 sc->sc_mediatype
2128 = WMP_F_COPPER;
2129 sc->sc_flags |= WM_F_SGMII;
2130 } else {
2131 sc->sc_mediatype
2132 = WMP_F_SERDES;
2133 aprint_verbose_dev(sc->sc_dev,
2134 "SERDES\n");
2135 }
2136 break;
2137 }
2138 if (sc->sc_mediatype == WMP_F_SERDES)
2139 aprint_verbose_dev(sc->sc_dev,
2140 "SERDES\n");
2141
2142 /* Change current link mode setting */
2143 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2144 switch (sc->sc_mediatype) {
2145 case WMP_F_COPPER:
2146 reg |= CTRL_EXT_LINK_MODE_SGMII;
2147 break;
2148 case WMP_F_SERDES:
2149 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2150 break;
2151 default:
2152 break;
2153 }
2154 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2155 break;
2156 case CTRL_EXT_LINK_MODE_GMII:
2157 default:
2158 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2159 sc->sc_mediatype = WMP_F_COPPER;
2160 break;
2161 }
2162
2163 reg &= ~CTRL_EXT_I2C_ENA;
2164 if ((sc->sc_flags & WM_F_SGMII) != 0)
2165 reg |= CTRL_EXT_I2C_ENA;
2166 else
2167 reg &= ~CTRL_EXT_I2C_ENA;
2168 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2169
2170 if (sc->sc_mediatype == WMP_F_COPPER)
2171 wm_gmii_mediainit(sc, wmp->wmp_product);
2172 else
2173 wm_tbi_mediainit(sc);
2174 break;
2175 default:
2176 if (sc->sc_mediatype & WMP_F_FIBER)
2177 aprint_error_dev(sc->sc_dev,
2178 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2179 sc->sc_mediatype = WMP_F_COPPER;
2180 wm_gmii_mediainit(sc, wmp->wmp_product);
2181 }
2182 }
2183
2184 ifp = &sc->sc_ethercom.ec_if;
2185 xname = device_xname(sc->sc_dev);
2186 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2187 ifp->if_softc = sc;
2188 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2189 ifp->if_ioctl = wm_ioctl;
2190 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2191 ifp->if_start = wm_nq_start;
2192 else
2193 ifp->if_start = wm_start;
2194 ifp->if_watchdog = wm_watchdog;
2195 ifp->if_init = wm_init;
2196 ifp->if_stop = wm_stop;
2197 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2198 IFQ_SET_READY(&ifp->if_snd);
2199
2200 /* Check for jumbo frame */
2201 switch (sc->sc_type) {
2202 case WM_T_82573:
2203 /* XXX limited to 9234 if ASPM is disabled */
2204 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &io3);
2205 if ((io3 & NVM_3GIO_3_ASPM_MASK) != 0)
2206 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2207 break;
2208 case WM_T_82571:
2209 case WM_T_82572:
2210 case WM_T_82574:
2211 case WM_T_82575:
2212 case WM_T_82576:
2213 case WM_T_82580:
2214 case WM_T_I350:
2215 case WM_T_I354: /* XXXX ok? */
2216 case WM_T_I210:
2217 case WM_T_I211:
2218 case WM_T_80003:
2219 case WM_T_ICH9:
2220 case WM_T_ICH10:
2221 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2222 case WM_T_PCH_LPT:
2223 /* XXX limited to 9234 */
2224 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2225 break;
2226 case WM_T_PCH:
2227 /* XXX limited to 4096 */
2228 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2229 break;
2230 case WM_T_82542_2_0:
2231 case WM_T_82542_2_1:
2232 case WM_T_82583:
2233 case WM_T_ICH8:
2234 /* No support for jumbo frame */
2235 break;
2236 default:
2237 /* ETHER_MAX_LEN_JUMBO */
2238 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2239 break;
2240 }
2241
2242 /* If we're a i82543 or greater, we can support VLANs. */
2243 if (sc->sc_type >= WM_T_82543)
2244 sc->sc_ethercom.ec_capabilities |=
2245 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2246
2247 /*
2248 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2249 * on i82543 and later.
2250 */
2251 if (sc->sc_type >= WM_T_82543) {
2252 ifp->if_capabilities |=
2253 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2254 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2255 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2256 IFCAP_CSUM_TCPv6_Tx |
2257 IFCAP_CSUM_UDPv6_Tx;
2258 }
2259
2260 /*
2261 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2262 *
2263 * 82541GI (8086:1076) ... no
2264 * 82572EI (8086:10b9) ... yes
2265 */
2266 if (sc->sc_type >= WM_T_82571) {
2267 ifp->if_capabilities |=
2268 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2269 }
2270
2271 /*
2272 * If we're a i82544 or greater (except i82547), we can do
2273 * TCP segmentation offload.
2274 */
2275 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2276 ifp->if_capabilities |= IFCAP_TSOv4;
2277 }
2278
2279 if (sc->sc_type >= WM_T_82571) {
2280 ifp->if_capabilities |= IFCAP_TSOv6;
2281 }
2282
2283 #ifdef WM_MPSAFE
2284 sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2285 sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2286 #else
2287 sc->sc_tx_lock = NULL;
2288 sc->sc_rx_lock = NULL;
2289 #endif
2290
2291 /* Attach the interface. */
2292 if_attach(ifp);
2293 ether_ifattach(ifp, enaddr);
2294 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2295 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2296 RND_FLAG_DEFAULT);
2297
2298 #ifdef WM_EVENT_COUNTERS
2299 /* Attach event counters. */
2300 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2301 NULL, xname, "txsstall");
2302 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2303 NULL, xname, "txdstall");
2304 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2305 NULL, xname, "txfifo_stall");
2306 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2307 NULL, xname, "txdw");
2308 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2309 NULL, xname, "txqe");
2310 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2311 NULL, xname, "rxintr");
2312 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2313 NULL, xname, "linkintr");
2314
2315 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2316 NULL, xname, "rxipsum");
2317 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2318 NULL, xname, "rxtusum");
2319 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2320 NULL, xname, "txipsum");
2321 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2322 NULL, xname, "txtusum");
2323 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2324 NULL, xname, "txtusum6");
2325
2326 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2327 NULL, xname, "txtso");
2328 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2329 NULL, xname, "txtso6");
2330 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2331 NULL, xname, "txtsopain");
2332
2333 for (i = 0; i < WM_NTXSEGS; i++) {
2334 snprintf(wm_txseg_evcnt_names[i],
2335 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2336 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2337 NULL, xname, wm_txseg_evcnt_names[i]);
2338 }
2339
2340 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2341 NULL, xname, "txdrop");
2342
2343 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2344 NULL, xname, "tu");
2345
2346 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2347 NULL, xname, "tx_xoff");
2348 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2349 NULL, xname, "tx_xon");
2350 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2351 NULL, xname, "rx_xoff");
2352 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2353 NULL, xname, "rx_xon");
2354 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2355 NULL, xname, "rx_macctl");
2356 #endif /* WM_EVENT_COUNTERS */
2357
2358 if (pmf_device_register(self, wm_suspend, wm_resume))
2359 pmf_class_network_register(self, ifp);
2360 else
2361 aprint_error_dev(self, "couldn't establish power handler\n");
2362
2363 sc->sc_flags |= WM_F_ATTACHED;
2364 return;
2365
2366 /*
2367 * Free any resources we've allocated during the failed attach
2368 * attempt. Do this in reverse order and fall through.
2369 */
2370 fail_5:
2371 for (i = 0; i < WM_NRXDESC; i++) {
2372 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2373 bus_dmamap_destroy(sc->sc_dmat,
2374 sc->sc_rxsoft[i].rxs_dmamap);
2375 }
2376 fail_4:
2377 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2378 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2379 bus_dmamap_destroy(sc->sc_dmat,
2380 sc->sc_txsoft[i].txs_dmamap);
2381 }
2382 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2383 fail_3:
2384 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2385 fail_2:
2386 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2387 sc->sc_cd_size);
2388 fail_1:
2389 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2390 fail_0:
2391 return;
2392 }
2393
2394 /* The detach function (ca_detach) */
2395 static int
2396 wm_detach(device_t self, int flags __unused)
2397 {
2398 struct wm_softc *sc = device_private(self);
2399 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2400 int i;
2401 #ifndef WM_MPSAFE
2402 int s;
2403 #endif
2404
2405 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2406 return 0;
2407
2408 #ifndef WM_MPSAFE
2409 s = splnet();
2410 #endif
2411 /* Stop the interface. Callouts are stopped in it. */
2412 wm_stop(ifp, 1);
2413
2414 #ifndef WM_MPSAFE
2415 splx(s);
2416 #endif
2417
2418 pmf_device_deregister(self);
2419
2420 /* Tell the firmware about the release */
2421 WM_BOTH_LOCK(sc);
2422 wm_release_manageability(sc);
2423 wm_release_hw_control(sc);
2424 WM_BOTH_UNLOCK(sc);
2425
2426 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2427
2428 /* Delete all remaining media. */
2429 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2430
2431 ether_ifdetach(ifp);
2432 if_detach(ifp);
2433
2434
2435 /* Unload RX dmamaps and free mbufs */
2436 WM_RX_LOCK(sc);
2437 wm_rxdrain(sc);
2438 WM_RX_UNLOCK(sc);
2439 /* Must unlock here */
2440
2441 /* Free dmamap. It's the same as the end of the wm_attach() function */
2442 for (i = 0; i < WM_NRXDESC; i++) {
2443 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2444 bus_dmamap_destroy(sc->sc_dmat,
2445 sc->sc_rxsoft[i].rxs_dmamap);
2446 }
2447 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2448 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2449 bus_dmamap_destroy(sc->sc_dmat,
2450 sc->sc_txsoft[i].txs_dmamap);
2451 }
2452 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2453 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2454 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2455 sc->sc_cd_size);
2456 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2457
2458 /* Disestablish the interrupt handler */
2459 if (sc->sc_ih != NULL) {
2460 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2461 sc->sc_ih = NULL;
2462 }
2463
2464 /* Unmap the registers */
2465 if (sc->sc_ss) {
2466 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2467 sc->sc_ss = 0;
2468 }
2469
2470 if (sc->sc_ios) {
2471 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2472 sc->sc_ios = 0;
2473 }
2474
2475 if (sc->sc_tx_lock)
2476 mutex_obj_free(sc->sc_tx_lock);
2477 if (sc->sc_rx_lock)
2478 mutex_obj_free(sc->sc_rx_lock);
2479
2480 return 0;
2481 }
2482
2483 static bool
2484 wm_suspend(device_t self, const pmf_qual_t *qual)
2485 {
2486 struct wm_softc *sc = device_private(self);
2487
2488 wm_release_manageability(sc);
2489 wm_release_hw_control(sc);
2490 #ifdef WM_WOL
2491 wm_enable_wakeup(sc);
2492 #endif
2493
2494 return true;
2495 }
2496
2497 static bool
2498 wm_resume(device_t self, const pmf_qual_t *qual)
2499 {
2500 struct wm_softc *sc = device_private(self);
2501
2502 wm_init_manageability(sc);
2503
2504 return true;
2505 }
2506
2507 /*
2508 * wm_watchdog: [ifnet interface function]
2509 *
2510 * Watchdog timer handler.
2511 */
2512 static void
2513 wm_watchdog(struct ifnet *ifp)
2514 {
2515 struct wm_softc *sc = ifp->if_softc;
2516
2517 /*
2518 * Since we're using delayed interrupts, sweep up
2519 * before we report an error.
2520 */
2521 WM_TX_LOCK(sc);
2522 wm_txintr(sc);
2523 WM_TX_UNLOCK(sc);
2524
2525 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2526 #ifdef WM_DEBUG
2527 int i, j;
2528 struct wm_txsoft *txs;
2529 #endif
2530 log(LOG_ERR,
2531 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2532 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2533 sc->sc_txnext);
2534 ifp->if_oerrors++;
2535 #ifdef WM_DEBUG
2536 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
2537 i = WM_NEXTTXS(sc, i)) {
2538 txs = &sc->sc_txsoft[i];
2539 printf("txs %d tx %d -> %d\n",
2540 i, txs->txs_firstdesc, txs->txs_lastdesc);
2541 for (j = txs->txs_firstdesc; ;
2542 j = WM_NEXTTX(sc, j)) {
2543 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2544 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
2545 printf("\t %#08x%08x\n",
2546 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
2547 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
2548 if (j == txs->txs_lastdesc)
2549 break;
2550 }
2551 }
2552 #endif
2553 /* Reset the interface. */
2554 (void) wm_init(ifp);
2555 }
2556
2557 /* Try to get more packets going. */
2558 ifp->if_start(ifp);
2559 }
2560
2561 /*
2562 * wm_tick:
2563 *
2564 * One second timer, used to check link status, sweep up
2565 * completed transmit jobs, etc.
2566 */
2567 static void
2568 wm_tick(void *arg)
2569 {
2570 struct wm_softc *sc = arg;
2571 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2572 #ifndef WM_MPSAFE
2573 int s;
2574
2575 s = splnet();
2576 #endif
2577
2578 WM_TX_LOCK(sc);
2579
2580 if (sc->sc_stopping)
2581 goto out;
2582
2583 if (sc->sc_type >= WM_T_82542_2_1) {
2584 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2585 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2586 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2587 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2588 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2589 }
2590
2591 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2592 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2593 + CSR_READ(sc, WMREG_CRCERRS)
2594 + CSR_READ(sc, WMREG_ALGNERRC)
2595 + CSR_READ(sc, WMREG_SYMERRC)
2596 + CSR_READ(sc, WMREG_RXERRC)
2597 + CSR_READ(sc, WMREG_SEC)
2598 + CSR_READ(sc, WMREG_CEXTERR)
2599 + CSR_READ(sc, WMREG_RLEC);
2600 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2601
2602 if (sc->sc_flags & WM_F_HAS_MII)
2603 mii_tick(&sc->sc_mii);
2604 else
2605 wm_tbi_check_link(sc);
2606
2607 out:
2608 WM_TX_UNLOCK(sc);
2609 #ifndef WM_MPSAFE
2610 splx(s);
2611 #endif
2612
2613 if (!sc->sc_stopping)
2614 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2615 }
2616
2617 static int
2618 wm_ifflags_cb(struct ethercom *ec)
2619 {
2620 struct ifnet *ifp = &ec->ec_if;
2621 struct wm_softc *sc = ifp->if_softc;
2622 int change = ifp->if_flags ^ sc->sc_if_flags;
2623 int rc = 0;
2624
2625 WM_BOTH_LOCK(sc);
2626
2627 if (change != 0)
2628 sc->sc_if_flags = ifp->if_flags;
2629
2630 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
2631 rc = ENETRESET;
2632 goto out;
2633 }
2634
2635 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2636 wm_set_filter(sc);
2637
2638 wm_set_vlan(sc);
2639
2640 out:
2641 WM_BOTH_UNLOCK(sc);
2642
2643 return rc;
2644 }
2645
2646 /*
2647 * wm_ioctl: [ifnet interface function]
2648 *
2649 * Handle control requests from the operator.
2650 */
2651 static int
2652 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2653 {
2654 struct wm_softc *sc = ifp->if_softc;
2655 struct ifreq *ifr = (struct ifreq *) data;
2656 struct ifaddr *ifa = (struct ifaddr *)data;
2657 struct sockaddr_dl *sdl;
2658 int s, error;
2659
2660 #ifndef WM_MPSAFE
2661 s = splnet();
2662 #endif
2663 WM_BOTH_LOCK(sc);
2664
2665 switch (cmd) {
2666 case SIOCSIFMEDIA:
2667 case SIOCGIFMEDIA:
2668 /* Flow control requires full-duplex mode. */
2669 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2670 (ifr->ifr_media & IFM_FDX) == 0)
2671 ifr->ifr_media &= ~IFM_ETH_FMASK;
2672 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2673 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2674 /* We can do both TXPAUSE and RXPAUSE. */
2675 ifr->ifr_media |=
2676 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2677 }
2678 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2679 }
2680 WM_BOTH_UNLOCK(sc);
2681 #ifdef WM_MPSAFE
2682 s = splnet();
2683 #endif
2684 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2685 #ifdef WM_MPSAFE
2686 splx(s);
2687 #endif
2688 WM_BOTH_LOCK(sc);
2689 break;
2690 case SIOCINITIFADDR:
2691 if (ifa->ifa_addr->sa_family == AF_LINK) {
2692 sdl = satosdl(ifp->if_dl->ifa_addr);
2693 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2694 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2695 /* unicast address is first multicast entry */
2696 wm_set_filter(sc);
2697 error = 0;
2698 break;
2699 }
2700 /*FALLTHROUGH*/
2701 default:
2702 WM_BOTH_UNLOCK(sc);
2703 #ifdef WM_MPSAFE
2704 s = splnet();
2705 #endif
2706 /* It may call wm_start, so unlock here */
2707 error = ether_ioctl(ifp, cmd, data);
2708 #ifdef WM_MPSAFE
2709 splx(s);
2710 #endif
2711 WM_BOTH_LOCK(sc);
2712
2713 if (error != ENETRESET)
2714 break;
2715
2716 error = 0;
2717
2718 if (cmd == SIOCSIFCAP) {
2719 WM_BOTH_UNLOCK(sc);
2720 error = (*ifp->if_init)(ifp);
2721 WM_BOTH_LOCK(sc);
2722 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2723 ;
2724 else if (ifp->if_flags & IFF_RUNNING) {
2725 /*
2726 * Multicast list has changed; set the hardware filter
2727 * accordingly.
2728 */
2729 wm_set_filter(sc);
2730 }
2731 break;
2732 }
2733
2734 WM_BOTH_UNLOCK(sc);
2735
2736 /* Try to get more packets going. */
2737 ifp->if_start(ifp);
2738
2739 #ifndef WM_MPSAFE
2740 splx(s);
2741 #endif
2742 return error;
2743 }
2744
2745 /* MAC address related */
2746
2747 static int
2748 wm_check_alt_mac_addr(struct wm_softc *sc)
2749 {
2750 uint16_t myea[ETHER_ADDR_LEN / 2];
2751 uint16_t offset = NVM_OFF_MACADDR;
2752
2753 /* Try to read alternative MAC address pointer */
2754 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2755 return -1;
2756
2757 /* Check pointer */
2758 if (offset == 0xffff)
2759 return -1;
2760
2761 /*
2762 * Check whether alternative MAC address is valid or not.
2763 * Some cards have non 0xffff pointer but those don't use
2764 * alternative MAC address in reality.
2765 *
2766 * Check whether the broadcast bit is set or not.
2767 */
2768 if (wm_nvm_read(sc, offset, 1, myea) == 0)
2769 if (((myea[0] & 0xff) & 0x01) == 0)
2770 return 0; /* found! */
2771
2772 /* not found */
2773 return -1;
2774 }
2775
2776 static int
2777 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2778 {
2779 uint16_t myea[ETHER_ADDR_LEN / 2];
2780 uint16_t offset = NVM_OFF_MACADDR;
2781 int do_invert = 0;
2782
2783 switch (sc->sc_type) {
2784 case WM_T_82580:
2785 case WM_T_I350:
2786 case WM_T_I354:
2787 switch (sc->sc_funcid) {
2788 case 0:
2789 /* default value (== NVM_OFF_MACADDR) */
2790 break;
2791 case 1:
2792 offset = NVM_OFF_LAN1;
2793 break;
2794 case 2:
2795 offset = NVM_OFF_LAN2;
2796 break;
2797 case 3:
2798 offset = NVM_OFF_LAN3;
2799 break;
2800 default:
2801 goto bad;
2802 /* NOTREACHED */
2803 break;
2804 }
2805 break;
2806 case WM_T_82571:
2807 case WM_T_82575:
2808 case WM_T_82576:
2809 case WM_T_80003:
2810 case WM_T_I210:
2811 case WM_T_I211:
2812 if (wm_check_alt_mac_addr(sc) != 0) {
2813 /* reset the offset to LAN0 */
2814 offset = NVM_OFF_MACADDR;
2815 if ((sc->sc_funcid & 0x01) == 1)
2816 do_invert = 1;
2817 goto do_read;
2818 }
2819 switch (sc->sc_funcid) {
2820 case 0:
2821 /*
2822 * The offset is the value in NVM_OFF_ALT_MAC_ADDR_PTR
2823 * itself.
2824 */
2825 break;
2826 case 1:
2827 offset += NVM_OFF_MACADDR_LAN1;
2828 break;
2829 case 2:
2830 offset += NVM_OFF_MACADDR_LAN2;
2831 break;
2832 case 3:
2833 offset += NVM_OFF_MACADDR_LAN3;
2834 break;
2835 default:
2836 goto bad;
2837 /* NOTREACHED */
2838 break;
2839 }
2840 break;
2841 default:
2842 if ((sc->sc_funcid & 0x01) == 1)
2843 do_invert = 1;
2844 break;
2845 }
2846
2847 do_read:
2848 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
2849 myea) != 0) {
2850 goto bad;
2851 }
2852
2853 enaddr[0] = myea[0] & 0xff;
2854 enaddr[1] = myea[0] >> 8;
2855 enaddr[2] = myea[1] & 0xff;
2856 enaddr[3] = myea[1] >> 8;
2857 enaddr[4] = myea[2] & 0xff;
2858 enaddr[5] = myea[2] >> 8;
2859
2860 /*
2861 * Toggle the LSB of the MAC address on the second port
2862 * of some dual port cards.
2863 */
2864 if (do_invert != 0)
2865 enaddr[5] ^= 1;
2866
2867 return 0;
2868
2869 bad:
2870 return -1;
2871 }
2872
2873 /*
2874 * wm_set_ral:
2875 *
2876 * Set an entery in the receive address list.
2877 */
2878 static void
2879 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2880 {
2881 uint32_t ral_lo, ral_hi;
2882
2883 if (enaddr != NULL) {
2884 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2885 (enaddr[3] << 24);
2886 ral_hi = enaddr[4] | (enaddr[5] << 8);
2887 ral_hi |= RAL_AV;
2888 } else {
2889 ral_lo = 0;
2890 ral_hi = 0;
2891 }
2892
2893 if (sc->sc_type >= WM_T_82544) {
2894 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2895 ral_lo);
2896 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2897 ral_hi);
2898 } else {
2899 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2900 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2901 }
2902 }
2903
2904 /*
2905 * wm_mchash:
2906 *
2907 * Compute the hash of the multicast address for the 4096-bit
2908 * multicast filter.
2909 */
2910 static uint32_t
2911 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2912 {
2913 static const int lo_shift[4] = { 4, 3, 2, 0 };
2914 static const int hi_shift[4] = { 4, 5, 6, 8 };
2915 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
2916 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
2917 uint32_t hash;
2918
2919 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2920 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2921 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
2922 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
2923 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
2924 return (hash & 0x3ff);
2925 }
2926 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2927 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2928
2929 return (hash & 0xfff);
2930 }
2931
2932 /*
2933 * wm_set_filter:
2934 *
2935 * Set up the receive filter.
2936 */
2937 static void
2938 wm_set_filter(struct wm_softc *sc)
2939 {
2940 struct ethercom *ec = &sc->sc_ethercom;
2941 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2942 struct ether_multi *enm;
2943 struct ether_multistep step;
2944 bus_addr_t mta_reg;
2945 uint32_t hash, reg, bit;
2946 int i, size;
2947
2948 if (sc->sc_type >= WM_T_82544)
2949 mta_reg = WMREG_CORDOVA_MTA;
2950 else
2951 mta_reg = WMREG_MTA;
2952
2953 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2954
2955 if (ifp->if_flags & IFF_BROADCAST)
2956 sc->sc_rctl |= RCTL_BAM;
2957 if (ifp->if_flags & IFF_PROMISC) {
2958 sc->sc_rctl |= RCTL_UPE;
2959 goto allmulti;
2960 }
2961
2962 /*
2963 * Set the station address in the first RAL slot, and
2964 * clear the remaining slots.
2965 */
2966 if (sc->sc_type == WM_T_ICH8)
2967 size = WM_RAL_TABSIZE_ICH8 -1;
2968 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
2969 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
2970 || (sc->sc_type == WM_T_PCH_LPT))
2971 size = WM_RAL_TABSIZE_ICH8;
2972 else if (sc->sc_type == WM_T_82575)
2973 size = WM_RAL_TABSIZE_82575;
2974 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
2975 size = WM_RAL_TABSIZE_82576;
2976 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2977 size = WM_RAL_TABSIZE_I350;
2978 else
2979 size = WM_RAL_TABSIZE;
2980 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
2981 for (i = 1; i < size; i++)
2982 wm_set_ral(sc, NULL, i);
2983
2984 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2985 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2986 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
2987 size = WM_ICH8_MC_TABSIZE;
2988 else
2989 size = WM_MC_TABSIZE;
2990 /* Clear out the multicast table. */
2991 for (i = 0; i < size; i++)
2992 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2993
2994 ETHER_FIRST_MULTI(step, ec, enm);
2995 while (enm != NULL) {
2996 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2997 /*
2998 * We must listen to a range of multicast addresses.
2999 * For now, just accept all multicasts, rather than
3000 * trying to set only those filter bits needed to match
3001 * the range. (At this time, the only use of address
3002 * ranges is for IP multicast routing, for which the
3003 * range is big enough to require all bits set.)
3004 */
3005 goto allmulti;
3006 }
3007
3008 hash = wm_mchash(sc, enm->enm_addrlo);
3009
3010 reg = (hash >> 5);
3011 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3012 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3013 || (sc->sc_type == WM_T_PCH2)
3014 || (sc->sc_type == WM_T_PCH_LPT))
3015 reg &= 0x1f;
3016 else
3017 reg &= 0x7f;
3018 bit = hash & 0x1f;
3019
3020 hash = CSR_READ(sc, mta_reg + (reg << 2));
3021 hash |= 1U << bit;
3022
3023 /* XXX Hardware bug?? */
3024 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3025 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3026 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3027 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3028 } else
3029 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3030
3031 ETHER_NEXT_MULTI(step, enm);
3032 }
3033
3034 ifp->if_flags &= ~IFF_ALLMULTI;
3035 goto setit;
3036
3037 allmulti:
3038 ifp->if_flags |= IFF_ALLMULTI;
3039 sc->sc_rctl |= RCTL_MPE;
3040
3041 setit:
3042 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3043 }
3044
3045 /* Reset and init related */
3046
3047 static void
3048 wm_set_vlan(struct wm_softc *sc)
3049 {
3050 /* Deal with VLAN enables. */
3051 if (VLAN_ATTACHED(&sc->sc_ethercom))
3052 sc->sc_ctrl |= CTRL_VME;
3053 else
3054 sc->sc_ctrl &= ~CTRL_VME;
3055
3056 /* Write the control registers. */
3057 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3058 }
3059
3060 static void
3061 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3062 {
3063 uint32_t gcr;
3064 pcireg_t ctrl2;
3065
3066 gcr = CSR_READ(sc, WMREG_GCR);
3067
3068 /* Only take action if timeout value is defaulted to 0 */
3069 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3070 goto out;
3071
3072 if ((gcr & GCR_CAP_VER2) == 0) {
3073 gcr |= GCR_CMPL_TMOUT_10MS;
3074 goto out;
3075 }
3076
3077 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3078 sc->sc_pcixe_capoff + PCIE_DCSR2);
3079 ctrl2 |= WM_PCIE_DCSR2_16MS;
3080 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3081 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3082
3083 out:
3084 /* Disable completion timeout resend */
3085 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3086
3087 CSR_WRITE(sc, WMREG_GCR, gcr);
3088 }
3089
3090 void
3091 wm_get_auto_rd_done(struct wm_softc *sc)
3092 {
3093 int i;
3094
3095 /* wait for eeprom to reload */
3096 switch (sc->sc_type) {
3097 case WM_T_82571:
3098 case WM_T_82572:
3099 case WM_T_82573:
3100 case WM_T_82574:
3101 case WM_T_82583:
3102 case WM_T_82575:
3103 case WM_T_82576:
3104 case WM_T_82580:
3105 case WM_T_I350:
3106 case WM_T_I354:
3107 case WM_T_I210:
3108 case WM_T_I211:
3109 case WM_T_80003:
3110 case WM_T_ICH8:
3111 case WM_T_ICH9:
3112 for (i = 0; i < 10; i++) {
3113 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3114 break;
3115 delay(1000);
3116 }
3117 if (i == 10) {
3118 log(LOG_ERR, "%s: auto read from eeprom failed to "
3119 "complete\n", device_xname(sc->sc_dev));
3120 }
3121 break;
3122 default:
3123 break;
3124 }
3125 }
3126
3127 void
3128 wm_lan_init_done(struct wm_softc *sc)
3129 {
3130 uint32_t reg = 0;
3131 int i;
3132
3133 /* wait for eeprom to reload */
3134 switch (sc->sc_type) {
3135 case WM_T_ICH10:
3136 case WM_T_PCH:
3137 case WM_T_PCH2:
3138 case WM_T_PCH_LPT:
3139 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3140 reg = CSR_READ(sc, WMREG_STATUS);
3141 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3142 break;
3143 delay(100);
3144 }
3145 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3146 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3147 "complete\n", device_xname(sc->sc_dev), __func__);
3148 }
3149 break;
3150 default:
3151 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3152 __func__);
3153 break;
3154 }
3155
3156 reg &= ~STATUS_LAN_INIT_DONE;
3157 CSR_WRITE(sc, WMREG_STATUS, reg);
3158 }
3159
3160 void
3161 wm_get_cfg_done(struct wm_softc *sc)
3162 {
3163 int mask;
3164 uint32_t reg;
3165 int i;
3166
3167 /* wait for eeprom to reload */
3168 switch (sc->sc_type) {
3169 case WM_T_82542_2_0:
3170 case WM_T_82542_2_1:
3171 /* null */
3172 break;
3173 case WM_T_82543:
3174 case WM_T_82544:
3175 case WM_T_82540:
3176 case WM_T_82545:
3177 case WM_T_82545_3:
3178 case WM_T_82546:
3179 case WM_T_82546_3:
3180 case WM_T_82541:
3181 case WM_T_82541_2:
3182 case WM_T_82547:
3183 case WM_T_82547_2:
3184 case WM_T_82573:
3185 case WM_T_82574:
3186 case WM_T_82583:
3187 /* generic */
3188 delay(10*1000);
3189 break;
3190 case WM_T_80003:
3191 case WM_T_82571:
3192 case WM_T_82572:
3193 case WM_T_82575:
3194 case WM_T_82576:
3195 case WM_T_82580:
3196 case WM_T_I350:
3197 case WM_T_I354:
3198 case WM_T_I210:
3199 case WM_T_I211:
3200 if (sc->sc_type == WM_T_82571) {
3201 /* Only 82571 shares port 0 */
3202 mask = EEMNGCTL_CFGDONE_0;
3203 } else
3204 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3205 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3206 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3207 break;
3208 delay(1000);
3209 }
3210 if (i >= WM_PHY_CFG_TIMEOUT) {
3211 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3212 device_xname(sc->sc_dev), __func__));
3213 }
3214 break;
3215 case WM_T_ICH8:
3216 case WM_T_ICH9:
3217 case WM_T_ICH10:
3218 case WM_T_PCH:
3219 case WM_T_PCH2:
3220 case WM_T_PCH_LPT:
3221 delay(10*1000);
3222 if (sc->sc_type >= WM_T_ICH10)
3223 wm_lan_init_done(sc);
3224 else
3225 wm_get_auto_rd_done(sc);
3226
3227 reg = CSR_READ(sc, WMREG_STATUS);
3228 if ((reg & STATUS_PHYRA) != 0)
3229 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3230 break;
3231 default:
3232 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3233 __func__);
3234 break;
3235 }
3236 }
3237
3238 /*
3239 * wm_reset:
3240 *
3241 * Reset the i82542 chip.
3242 */
3243 static void
3244 wm_reset(struct wm_softc *sc)
3245 {
3246 int phy_reset = 0;
3247 int error = 0;
3248 uint32_t reg, mask;
3249
3250 /*
3251 * Allocate on-chip memory according to the MTU size.
3252 * The Packet Buffer Allocation register must be written
3253 * before the chip is reset.
3254 */
3255 switch (sc->sc_type) {
3256 case WM_T_82547:
3257 case WM_T_82547_2:
3258 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3259 PBA_22K : PBA_30K;
3260 sc->sc_txfifo_head = 0;
3261 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3262 sc->sc_txfifo_size =
3263 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3264 sc->sc_txfifo_stall = 0;
3265 break;
3266 case WM_T_82571:
3267 case WM_T_82572:
3268 case WM_T_82575: /* XXX need special handing for jumbo frames */
3269 case WM_T_I350:
3270 case WM_T_I354:
3271 case WM_T_80003:
3272 sc->sc_pba = PBA_32K;
3273 break;
3274 case WM_T_82580:
3275 sc->sc_pba = PBA_35K;
3276 break;
3277 case WM_T_I210:
3278 case WM_T_I211:
3279 sc->sc_pba = PBA_34K;
3280 break;
3281 case WM_T_82576:
3282 sc->sc_pba = PBA_64K;
3283 break;
3284 case WM_T_82573:
3285 sc->sc_pba = PBA_12K;
3286 break;
3287 case WM_T_82574:
3288 case WM_T_82583:
3289 sc->sc_pba = PBA_20K;
3290 break;
3291 case WM_T_ICH8:
3292 sc->sc_pba = PBA_8K;
3293 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3294 break;
3295 case WM_T_ICH9:
3296 case WM_T_ICH10:
3297 sc->sc_pba = PBA_10K;
3298 break;
3299 case WM_T_PCH:
3300 case WM_T_PCH2:
3301 case WM_T_PCH_LPT:
3302 sc->sc_pba = PBA_26K;
3303 break;
3304 default:
3305 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3306 PBA_40K : PBA_48K;
3307 break;
3308 }
3309 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3310
3311 /* Prevent the PCI-E bus from sticking */
3312 if (sc->sc_flags & WM_F_PCIE) {
3313 int timeout = 800;
3314
3315 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3316 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3317
3318 while (timeout--) {
3319 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3320 == 0)
3321 break;
3322 delay(100);
3323 }
3324 }
3325
3326 /* Set the completion timeout for interface */
3327 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3328 || (sc->sc_type == WM_T_82580)
3329 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3330 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3331 wm_set_pcie_completion_timeout(sc);
3332
3333 /* Clear interrupt */
3334 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3335
3336 /* Stop the transmit and receive processes. */
3337 CSR_WRITE(sc, WMREG_RCTL, 0);
3338 sc->sc_rctl &= ~RCTL_EN;
3339 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3340 CSR_WRITE_FLUSH(sc);
3341
3342 /* XXX set_tbi_sbp_82543() */
3343
3344 delay(10*1000);
3345
3346 /* Must acquire the MDIO ownership before MAC reset */
3347 switch (sc->sc_type) {
3348 case WM_T_82573:
3349 case WM_T_82574:
3350 case WM_T_82583:
3351 error = wm_get_hw_semaphore_82573(sc);
3352 break;
3353 default:
3354 break;
3355 }
3356
3357 /*
3358 * 82541 Errata 29? & 82547 Errata 28?
3359 * See also the description about PHY_RST bit in CTRL register
3360 * in 8254x_GBe_SDM.pdf.
3361 */
3362 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3363 CSR_WRITE(sc, WMREG_CTRL,
3364 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3365 CSR_WRITE_FLUSH(sc);
3366 delay(5000);
3367 }
3368
3369 switch (sc->sc_type) {
3370 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3371 case WM_T_82541:
3372 case WM_T_82541_2:
3373 case WM_T_82547:
3374 case WM_T_82547_2:
3375 /*
3376 * On some chipsets, a reset through a memory-mapped write
3377 * cycle can cause the chip to reset before completing the
3378 * write cycle. This causes major headache that can be
3379 * avoided by issuing the reset via indirect register writes
3380 * through I/O space.
3381 *
3382 * So, if we successfully mapped the I/O BAR at attach time,
3383 * use that. Otherwise, try our luck with a memory-mapped
3384 * reset.
3385 */
3386 if (sc->sc_flags & WM_F_IOH_VALID)
3387 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3388 else
3389 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3390 break;
3391 case WM_T_82545_3:
3392 case WM_T_82546_3:
3393 /* Use the shadow control register on these chips. */
3394 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3395 break;
3396 case WM_T_80003:
3397 mask = swfwphysem[sc->sc_funcid];
3398 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3399 wm_get_swfw_semaphore(sc, mask);
3400 CSR_WRITE(sc, WMREG_CTRL, reg);
3401 wm_put_swfw_semaphore(sc, mask);
3402 break;
3403 case WM_T_ICH8:
3404 case WM_T_ICH9:
3405 case WM_T_ICH10:
3406 case WM_T_PCH:
3407 case WM_T_PCH2:
3408 case WM_T_PCH_LPT:
3409 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3410 if (wm_check_reset_block(sc) == 0) {
3411 /*
3412 * Gate automatic PHY configuration by hardware on
3413 * non-managed 82579
3414 */
3415 if ((sc->sc_type == WM_T_PCH2)
3416 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3417 != 0))
3418 wm_gate_hw_phy_config_ich8lan(sc, 1);
3419
3420
3421 reg |= CTRL_PHY_RESET;
3422 phy_reset = 1;
3423 }
3424 wm_get_swfwhw_semaphore(sc);
3425 CSR_WRITE(sc, WMREG_CTRL, reg);
3426 /* Don't insert a completion barrier when reset */
3427 delay(20*1000);
3428 wm_put_swfwhw_semaphore(sc);
3429 break;
3430 case WM_T_82542_2_0:
3431 case WM_T_82542_2_1:
3432 case WM_T_82543:
3433 case WM_T_82540:
3434 case WM_T_82545:
3435 case WM_T_82546:
3436 case WM_T_82571:
3437 case WM_T_82572:
3438 case WM_T_82573:
3439 case WM_T_82574:
3440 case WM_T_82575:
3441 case WM_T_82576:
3442 case WM_T_82580:
3443 case WM_T_82583:
3444 case WM_T_I350:
3445 case WM_T_I354:
3446 case WM_T_I210:
3447 case WM_T_I211:
3448 default:
3449 /* Everything else can safely use the documented method. */
3450 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3451 break;
3452 }
3453
3454 /* Must release the MDIO ownership after MAC reset */
3455 switch (sc->sc_type) {
3456 case WM_T_82573:
3457 case WM_T_82574:
3458 case WM_T_82583:
3459 if (error == 0)
3460 wm_put_hw_semaphore_82573(sc);
3461 break;
3462 default:
3463 break;
3464 }
3465
3466 if (phy_reset != 0)
3467 wm_get_cfg_done(sc);
3468
3469 /* reload EEPROM */
3470 switch (sc->sc_type) {
3471 case WM_T_82542_2_0:
3472 case WM_T_82542_2_1:
3473 case WM_T_82543:
3474 case WM_T_82544:
3475 delay(10);
3476 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3477 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3478 CSR_WRITE_FLUSH(sc);
3479 delay(2000);
3480 break;
3481 case WM_T_82540:
3482 case WM_T_82545:
3483 case WM_T_82545_3:
3484 case WM_T_82546:
3485 case WM_T_82546_3:
3486 delay(5*1000);
3487 /* XXX Disable HW ARPs on ASF enabled adapters */
3488 break;
3489 case WM_T_82541:
3490 case WM_T_82541_2:
3491 case WM_T_82547:
3492 case WM_T_82547_2:
3493 delay(20000);
3494 /* XXX Disable HW ARPs on ASF enabled adapters */
3495 break;
3496 case WM_T_82571:
3497 case WM_T_82572:
3498 case WM_T_82573:
3499 case WM_T_82574:
3500 case WM_T_82583:
3501 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3502 delay(10);
3503 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3504 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3505 CSR_WRITE_FLUSH(sc);
3506 }
3507 /* check EECD_EE_AUTORD */
3508 wm_get_auto_rd_done(sc);
3509 /*
3510 * Phy configuration from NVM just starts after EECD_AUTO_RD
3511 * is set.
3512 */
3513 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3514 || (sc->sc_type == WM_T_82583))
3515 delay(25*1000);
3516 break;
3517 case WM_T_82575:
3518 case WM_T_82576:
3519 case WM_T_82580:
3520 case WM_T_I350:
3521 case WM_T_I354:
3522 case WM_T_I210:
3523 case WM_T_I211:
3524 case WM_T_80003:
3525 /* check EECD_EE_AUTORD */
3526 wm_get_auto_rd_done(sc);
3527 break;
3528 case WM_T_ICH8:
3529 case WM_T_ICH9:
3530 case WM_T_ICH10:
3531 case WM_T_PCH:
3532 case WM_T_PCH2:
3533 case WM_T_PCH_LPT:
3534 break;
3535 default:
3536 panic("%s: unknown type\n", __func__);
3537 }
3538
3539 /* Check whether EEPROM is present or not */
3540 switch (sc->sc_type) {
3541 case WM_T_82575:
3542 case WM_T_82576:
3543 #if 0 /* XXX */
3544 case WM_T_82580:
3545 #endif
3546 case WM_T_I350:
3547 case WM_T_I354:
3548 case WM_T_ICH8:
3549 case WM_T_ICH9:
3550 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3551 /* Not found */
3552 sc->sc_flags |= WM_F_EEPROM_INVALID;
3553 if ((sc->sc_type == WM_T_82575)
3554 || (sc->sc_type == WM_T_82576)
3555 || (sc->sc_type == WM_T_82580)
3556 || (sc->sc_type == WM_T_I350)
3557 || (sc->sc_type == WM_T_I354))
3558 wm_reset_init_script_82575(sc);
3559 }
3560 break;
3561 default:
3562 break;
3563 }
3564
3565 if ((sc->sc_type == WM_T_82580)
3566 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
3567 /* clear global device reset status bit */
3568 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3569 }
3570
3571 /* Clear any pending interrupt events. */
3572 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3573 reg = CSR_READ(sc, WMREG_ICR);
3574
3575 /* reload sc_ctrl */
3576 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3577
3578 if (sc->sc_type == WM_T_I350)
3579 wm_set_eee_i350(sc);
3580
3581 /* dummy read from WUC */
3582 if (sc->sc_type == WM_T_PCH)
3583 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3584 /*
3585 * For PCH, this write will make sure that any noise will be detected
3586 * as a CRC error and be dropped rather than show up as a bad packet
3587 * to the DMA engine
3588 */
3589 if (sc->sc_type == WM_T_PCH)
3590 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3591
3592 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3593 CSR_WRITE(sc, WMREG_WUC, 0);
3594
3595 /* XXX need special handling for 82580 */
3596 }
3597
3598 /*
3599 * wm_add_rxbuf:
3600 *
3601 * Add a receive buffer to the indiciated descriptor.
3602 */
3603 static int
3604 wm_add_rxbuf(struct wm_softc *sc, int idx)
3605 {
3606 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3607 struct mbuf *m;
3608 int error;
3609
3610 KASSERT(WM_RX_LOCKED(sc));
3611
3612 MGETHDR(m, M_DONTWAIT, MT_DATA);
3613 if (m == NULL)
3614 return ENOBUFS;
3615
3616 MCLGET(m, M_DONTWAIT);
3617 if ((m->m_flags & M_EXT) == 0) {
3618 m_freem(m);
3619 return ENOBUFS;
3620 }
3621
3622 if (rxs->rxs_mbuf != NULL)
3623 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3624
3625 rxs->rxs_mbuf = m;
3626
3627 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3628 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3629 BUS_DMA_READ|BUS_DMA_NOWAIT);
3630 if (error) {
3631 /* XXX XXX XXX */
3632 aprint_error_dev(sc->sc_dev,
3633 "unable to load rx DMA map %d, error = %d\n",
3634 idx, error);
3635 panic("wm_add_rxbuf");
3636 }
3637
3638 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3639 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3640
3641 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3642 if ((sc->sc_rctl & RCTL_EN) != 0)
3643 WM_INIT_RXDESC(sc, idx);
3644 } else
3645 WM_INIT_RXDESC(sc, idx);
3646
3647 return 0;
3648 }
3649
3650 /*
3651 * wm_rxdrain:
3652 *
3653 * Drain the receive queue.
3654 */
3655 static void
3656 wm_rxdrain(struct wm_softc *sc)
3657 {
3658 struct wm_rxsoft *rxs;
3659 int i;
3660
3661 KASSERT(WM_RX_LOCKED(sc));
3662
3663 for (i = 0; i < WM_NRXDESC; i++) {
3664 rxs = &sc->sc_rxsoft[i];
3665 if (rxs->rxs_mbuf != NULL) {
3666 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3667 m_freem(rxs->rxs_mbuf);
3668 rxs->rxs_mbuf = NULL;
3669 }
3670 }
3671 }
3672
3673 /*
3674 * wm_init: [ifnet interface function]
3675 *
3676 * Initialize the interface.
3677 */
3678 static int
3679 wm_init(struct ifnet *ifp)
3680 {
3681 struct wm_softc *sc = ifp->if_softc;
3682 int ret;
3683
3684 WM_BOTH_LOCK(sc);
3685 ret = wm_init_locked(ifp);
3686 WM_BOTH_UNLOCK(sc);
3687
3688 return ret;
3689 }
3690
3691 static int
3692 wm_init_locked(struct ifnet *ifp)
3693 {
3694 struct wm_softc *sc = ifp->if_softc;
3695 struct wm_rxsoft *rxs;
3696 int i, j, trynum, error = 0;
3697 uint32_t reg;
3698
3699 KASSERT(WM_BOTH_LOCKED(sc));
3700 /*
3701 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3702 * There is a small but measurable benefit to avoiding the adjusment
3703 * of the descriptor so that the headers are aligned, for normal mtu,
3704 * on such platforms. One possibility is that the DMA itself is
3705 * slightly more efficient if the front of the entire packet (instead
3706 * of the front of the headers) is aligned.
3707 *
3708 * Note we must always set align_tweak to 0 if we are using
3709 * jumbo frames.
3710 */
3711 #ifdef __NO_STRICT_ALIGNMENT
3712 sc->sc_align_tweak = 0;
3713 #else
3714 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3715 sc->sc_align_tweak = 0;
3716 else
3717 sc->sc_align_tweak = 2;
3718 #endif /* __NO_STRICT_ALIGNMENT */
3719
3720 /* Cancel any pending I/O. */
3721 wm_stop_locked(ifp, 0);
3722
3723 /* update statistics before reset */
3724 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3725 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3726
3727 /* Reset the chip to a known state. */
3728 wm_reset(sc);
3729
3730 switch (sc->sc_type) {
3731 case WM_T_82571:
3732 case WM_T_82572:
3733 case WM_T_82573:
3734 case WM_T_82574:
3735 case WM_T_82583:
3736 case WM_T_80003:
3737 case WM_T_ICH8:
3738 case WM_T_ICH9:
3739 case WM_T_ICH10:
3740 case WM_T_PCH:
3741 case WM_T_PCH2:
3742 case WM_T_PCH_LPT:
3743 if (wm_check_mng_mode(sc) != 0)
3744 wm_get_hw_control(sc);
3745 break;
3746 default:
3747 break;
3748 }
3749
3750 /* Reset the PHY. */
3751 if (sc->sc_flags & WM_F_HAS_MII)
3752 wm_gmii_reset(sc);
3753
3754 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3755 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3756 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
3757 || (sc->sc_type == WM_T_PCH_LPT))
3758 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
3759
3760 /* Initialize the transmit descriptor ring. */
3761 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3762 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3763 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3764 sc->sc_txfree = WM_NTXDESC(sc);
3765 sc->sc_txnext = 0;
3766
3767 if (sc->sc_type < WM_T_82543) {
3768 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
3769 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
3770 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3771 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3772 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3773 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3774 } else {
3775 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
3776 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
3777 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3778 CSR_WRITE(sc, WMREG_TDH, 0);
3779 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3780 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3781
3782 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3783 /*
3784 * Don't write TDT before TCTL.EN is set.
3785 * See the document.
3786 */
3787 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
3788 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
3789 | TXDCTL_WTHRESH(0));
3790 else {
3791 CSR_WRITE(sc, WMREG_TDT, 0);
3792 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3793 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3794 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3795 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3796 }
3797 }
3798 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3799 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3800
3801 /* Initialize the transmit job descriptors. */
3802 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3803 sc->sc_txsoft[i].txs_mbuf = NULL;
3804 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3805 sc->sc_txsnext = 0;
3806 sc->sc_txsdirty = 0;
3807
3808 /*
3809 * Initialize the receive descriptor and receive job
3810 * descriptor rings.
3811 */
3812 if (sc->sc_type < WM_T_82543) {
3813 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3814 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3815 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3816 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3817 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3818 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3819
3820 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3821 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3822 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3823 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3824 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3825 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3826 } else {
3827 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3828 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3829 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3830 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3831 CSR_WRITE(sc, WMREG_EITR(0), 450);
3832 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
3833 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
3834 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
3835 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
3836 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
3837 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
3838 | RXDCTL_WTHRESH(1));
3839 } else {
3840 CSR_WRITE(sc, WMREG_RDH, 0);
3841 CSR_WRITE(sc, WMREG_RDT, 0);
3842 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3843 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
3844 }
3845 }
3846 for (i = 0; i < WM_NRXDESC; i++) {
3847 rxs = &sc->sc_rxsoft[i];
3848 if (rxs->rxs_mbuf == NULL) {
3849 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3850 log(LOG_ERR, "%s: unable to allocate or map "
3851 "rx buffer %d, error = %d\n",
3852 device_xname(sc->sc_dev), i, error);
3853 /*
3854 * XXX Should attempt to run with fewer receive
3855 * XXX buffers instead of just failing.
3856 */
3857 wm_rxdrain(sc);
3858 goto out;
3859 }
3860 } else {
3861 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3862 WM_INIT_RXDESC(sc, i);
3863 /*
3864 * For 82575 and newer device, the RX descriptors
3865 * must be initialized after the setting of RCTL.EN in
3866 * wm_set_filter()
3867 */
3868 }
3869 }
3870 sc->sc_rxptr = 0;
3871 sc->sc_rxdiscard = 0;
3872 WM_RXCHAIN_RESET(sc);
3873
3874 /*
3875 * Clear out the VLAN table -- we don't use it (yet).
3876 */
3877 CSR_WRITE(sc, WMREG_VET, 0);
3878 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3879 trynum = 10; /* Due to hw errata */
3880 else
3881 trynum = 1;
3882 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3883 for (j = 0; j < trynum; j++)
3884 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3885
3886 /*
3887 * Set up flow-control parameters.
3888 *
3889 * XXX Values could probably stand some tuning.
3890 */
3891 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3892 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
3893 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
3894 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3895 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3896 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3897 }
3898
3899 sc->sc_fcrtl = FCRTL_DFLT;
3900 if (sc->sc_type < WM_T_82543) {
3901 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3902 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3903 } else {
3904 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3905 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3906 }
3907
3908 if (sc->sc_type == WM_T_80003)
3909 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3910 else
3911 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3912
3913 /* Writes the control register. */
3914 wm_set_vlan(sc);
3915
3916 if (sc->sc_flags & WM_F_HAS_MII) {
3917 int val;
3918
3919 switch (sc->sc_type) {
3920 case WM_T_80003:
3921 case WM_T_ICH8:
3922 case WM_T_ICH9:
3923 case WM_T_ICH10:
3924 case WM_T_PCH:
3925 case WM_T_PCH2:
3926 case WM_T_PCH_LPT:
3927 /*
3928 * Set the mac to wait the maximum time between each
3929 * iteration and increase the max iterations when
3930 * polling the phy; this fixes erroneous timeouts at
3931 * 10Mbps.
3932 */
3933 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
3934 0xFFFF);
3935 val = wm_kmrn_readreg(sc,
3936 KUMCTRLSTA_OFFSET_INB_PARAM);
3937 val |= 0x3F;
3938 wm_kmrn_writereg(sc,
3939 KUMCTRLSTA_OFFSET_INB_PARAM, val);
3940 break;
3941 default:
3942 break;
3943 }
3944
3945 if (sc->sc_type == WM_T_80003) {
3946 val = CSR_READ(sc, WMREG_CTRL_EXT);
3947 val &= ~CTRL_EXT_LINK_MODE_MASK;
3948 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3949
3950 /* Bypass RX and TX FIFO's */
3951 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3952 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
3953 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3954 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3955 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3956 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3957 }
3958 }
3959 #if 0
3960 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3961 #endif
3962
3963 /* Set up checksum offload parameters. */
3964 reg = CSR_READ(sc, WMREG_RXCSUM);
3965 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3966 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3967 reg |= RXCSUM_IPOFL;
3968 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3969 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3970 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3971 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3972 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3973
3974 /* Set up the interrupt registers. */
3975 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3976 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3977 ICR_RXO | ICR_RXT0;
3978 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3979
3980 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3981 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3982 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
3983 reg = CSR_READ(sc, WMREG_KABGTXD);
3984 reg |= KABGTXD_BGSQLBIAS;
3985 CSR_WRITE(sc, WMREG_KABGTXD, reg);
3986 }
3987
3988 /* Set up the inter-packet gap. */
3989 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3990
3991 if (sc->sc_type >= WM_T_82543) {
3992 /*
3993 * Set up the interrupt throttling register (units of 256ns)
3994 * Note that a footnote in Intel's documentation says this
3995 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3996 * or 10Mbit mode. Empirically, it appears to be the case
3997 * that that is also true for the 1024ns units of the other
3998 * interrupt-related timer registers -- so, really, we ought
3999 * to divide this value by 4 when the link speed is low.
4000 *
4001 * XXX implement this division at link speed change!
4002 */
4003
4004 /*
4005 * For N interrupts/sec, set this value to:
4006 * 1000000000 / (N * 256). Note that we set the
4007 * absolute and packet timer values to this value
4008 * divided by 4 to get "simple timer" behavior.
4009 */
4010
4011 sc->sc_itr = 1500; /* 2604 ints/sec */
4012 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4013 }
4014
4015 /* Set the VLAN ethernetype. */
4016 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4017
4018 /*
4019 * Set up the transmit control register; we start out with
4020 * a collision distance suitable for FDX, but update it whe
4021 * we resolve the media type.
4022 */
4023 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4024 | TCTL_CT(TX_COLLISION_THRESHOLD)
4025 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4026 if (sc->sc_type >= WM_T_82571)
4027 sc->sc_tctl |= TCTL_MULR;
4028 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4029
4030 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4031 /* Write TDT after TCTL.EN is set. See the document. */
4032 CSR_WRITE(sc, WMREG_TDT, 0);
4033 }
4034
4035 if (sc->sc_type == WM_T_80003) {
4036 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4037 reg &= ~TCTL_EXT_GCEX_MASK;
4038 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4039 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4040 }
4041
4042 /* Set the media. */
4043 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4044 goto out;
4045
4046 /* Configure for OS presence */
4047 wm_init_manageability(sc);
4048
4049 /*
4050 * Set up the receive control register; we actually program
4051 * the register when we set the receive filter. Use multicast
4052 * address offset type 0.
4053 *
4054 * Only the i82544 has the ability to strip the incoming
4055 * CRC, so we don't enable that feature.
4056 */
4057 sc->sc_mchash_type = 0;
4058 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4059 | RCTL_MO(sc->sc_mchash_type);
4060
4061 /*
4062 * The I350 has a bug where it always strips the CRC whether
4063 * asked to or not. So ask for stripped CRC here and cope in rxeof
4064 */
4065 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4066 || (sc->sc_type == WM_T_I210))
4067 sc->sc_rctl |= RCTL_SECRC;
4068
4069 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4070 && (ifp->if_mtu > ETHERMTU)) {
4071 sc->sc_rctl |= RCTL_LPE;
4072 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4073 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4074 }
4075
4076 if (MCLBYTES == 2048) {
4077 sc->sc_rctl |= RCTL_2k;
4078 } else {
4079 if (sc->sc_type >= WM_T_82543) {
4080 switch (MCLBYTES) {
4081 case 4096:
4082 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4083 break;
4084 case 8192:
4085 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4086 break;
4087 case 16384:
4088 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4089 break;
4090 default:
4091 panic("wm_init: MCLBYTES %d unsupported",
4092 MCLBYTES);
4093 break;
4094 }
4095 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4096 }
4097
4098 /* Set the receive filter. */
4099 wm_set_filter(sc);
4100
4101 /* Enable ECC */
4102 switch (sc->sc_type) {
4103 case WM_T_82571:
4104 reg = CSR_READ(sc, WMREG_PBA_ECC);
4105 reg |= PBA_ECC_CORR_EN;
4106 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4107 break;
4108 case WM_T_PCH_LPT:
4109 reg = CSR_READ(sc, WMREG_PBECCSTS);
4110 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4111 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4112
4113 reg = CSR_READ(sc, WMREG_CTRL);
4114 reg |= CTRL_MEHE;
4115 CSR_WRITE(sc, WMREG_CTRL, reg);
4116 break;
4117 default:
4118 break;
4119 }
4120
4121 /* On 575 and later set RDT only if RX enabled */
4122 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4123 for (i = 0; i < WM_NRXDESC; i++)
4124 WM_INIT_RXDESC(sc, i);
4125
4126 sc->sc_stopping = false;
4127
4128 /* Start the one second link check clock. */
4129 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4130
4131 /* ...all done! */
4132 ifp->if_flags |= IFF_RUNNING;
4133 ifp->if_flags &= ~IFF_OACTIVE;
4134
4135 out:
4136 sc->sc_if_flags = ifp->if_flags;
4137 if (error)
4138 log(LOG_ERR, "%s: interface not running\n",
4139 device_xname(sc->sc_dev));
4140 return error;
4141 }
4142
4143 /*
4144 * wm_stop: [ifnet interface function]
4145 *
4146 * Stop transmission on the interface.
4147 */
4148 static void
4149 wm_stop(struct ifnet *ifp, int disable)
4150 {
4151 struct wm_softc *sc = ifp->if_softc;
4152
4153 WM_BOTH_LOCK(sc);
4154 wm_stop_locked(ifp, disable);
4155 WM_BOTH_UNLOCK(sc);
4156 }
4157
4158 static void
4159 wm_stop_locked(struct ifnet *ifp, int disable)
4160 {
4161 struct wm_softc *sc = ifp->if_softc;
4162 struct wm_txsoft *txs;
4163 int i;
4164
4165 KASSERT(WM_BOTH_LOCKED(sc));
4166
4167 sc->sc_stopping = true;
4168
4169 /* Stop the one second clock. */
4170 callout_stop(&sc->sc_tick_ch);
4171
4172 /* Stop the 82547 Tx FIFO stall check timer. */
4173 if (sc->sc_type == WM_T_82547)
4174 callout_stop(&sc->sc_txfifo_ch);
4175
4176 if (sc->sc_flags & WM_F_HAS_MII) {
4177 /* Down the MII. */
4178 mii_down(&sc->sc_mii);
4179 } else {
4180 #if 0
4181 /* Should we clear PHY's status properly? */
4182 wm_reset(sc);
4183 #endif
4184 }
4185
4186 /* Stop the transmit and receive processes. */
4187 CSR_WRITE(sc, WMREG_TCTL, 0);
4188 CSR_WRITE(sc, WMREG_RCTL, 0);
4189 sc->sc_rctl &= ~RCTL_EN;
4190
4191 /*
4192 * Clear the interrupt mask to ensure the device cannot assert its
4193 * interrupt line.
4194 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4195 * any currently pending or shared interrupt.
4196 */
4197 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4198 sc->sc_icr = 0;
4199
4200 /* Release any queued transmit buffers. */
4201 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4202 txs = &sc->sc_txsoft[i];
4203 if (txs->txs_mbuf != NULL) {
4204 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4205 m_freem(txs->txs_mbuf);
4206 txs->txs_mbuf = NULL;
4207 }
4208 }
4209
4210 /* Mark the interface as down and cancel the watchdog timer. */
4211 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4212 ifp->if_timer = 0;
4213
4214 if (disable)
4215 wm_rxdrain(sc);
4216
4217 #if 0 /* notyet */
4218 if (sc->sc_type >= WM_T_82544)
4219 CSR_WRITE(sc, WMREG_WUC, 0);
4220 #endif
4221 }
4222
4223 /*
4224 * wm_tx_offload:
4225 *
4226 * Set up TCP/IP checksumming parameters for the
4227 * specified packet.
4228 */
4229 static int
4230 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
4231 uint8_t *fieldsp)
4232 {
4233 struct mbuf *m0 = txs->txs_mbuf;
4234 struct livengood_tcpip_ctxdesc *t;
4235 uint32_t ipcs, tucs, cmd, cmdlen, seg;
4236 uint32_t ipcse;
4237 struct ether_header *eh;
4238 int offset, iphl;
4239 uint8_t fields;
4240
4241 /*
4242 * XXX It would be nice if the mbuf pkthdr had offset
4243 * fields for the protocol headers.
4244 */
4245
4246 eh = mtod(m0, struct ether_header *);
4247 switch (htons(eh->ether_type)) {
4248 case ETHERTYPE_IP:
4249 case ETHERTYPE_IPV6:
4250 offset = ETHER_HDR_LEN;
4251 break;
4252
4253 case ETHERTYPE_VLAN:
4254 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4255 break;
4256
4257 default:
4258 /*
4259 * Don't support this protocol or encapsulation.
4260 */
4261 *fieldsp = 0;
4262 *cmdp = 0;
4263 return 0;
4264 }
4265
4266 if ((m0->m_pkthdr.csum_flags &
4267 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
4268 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4269 } else {
4270 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4271 }
4272 ipcse = offset + iphl - 1;
4273
4274 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
4275 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
4276 seg = 0;
4277 fields = 0;
4278
4279 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4280 int hlen = offset + iphl;
4281 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4282
4283 if (__predict_false(m0->m_len <
4284 (hlen + sizeof(struct tcphdr)))) {
4285 /*
4286 * TCP/IP headers are not in the first mbuf; we need
4287 * to do this the slow and painful way. Let's just
4288 * hope this doesn't happen very often.
4289 */
4290 struct tcphdr th;
4291
4292 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4293
4294 m_copydata(m0, hlen, sizeof(th), &th);
4295 if (v4) {
4296 struct ip ip;
4297
4298 m_copydata(m0, offset, sizeof(ip), &ip);
4299 ip.ip_len = 0;
4300 m_copyback(m0,
4301 offset + offsetof(struct ip, ip_len),
4302 sizeof(ip.ip_len), &ip.ip_len);
4303 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4304 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4305 } else {
4306 struct ip6_hdr ip6;
4307
4308 m_copydata(m0, offset, sizeof(ip6), &ip6);
4309 ip6.ip6_plen = 0;
4310 m_copyback(m0,
4311 offset + offsetof(struct ip6_hdr, ip6_plen),
4312 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4313 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4314 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4315 }
4316 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4317 sizeof(th.th_sum), &th.th_sum);
4318
4319 hlen += th.th_off << 2;
4320 } else {
4321 /*
4322 * TCP/IP headers are in the first mbuf; we can do
4323 * this the easy way.
4324 */
4325 struct tcphdr *th;
4326
4327 if (v4) {
4328 struct ip *ip =
4329 (void *)(mtod(m0, char *) + offset);
4330 th = (void *)(mtod(m0, char *) + hlen);
4331
4332 ip->ip_len = 0;
4333 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4334 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4335 } else {
4336 struct ip6_hdr *ip6 =
4337 (void *)(mtod(m0, char *) + offset);
4338 th = (void *)(mtod(m0, char *) + hlen);
4339
4340 ip6->ip6_plen = 0;
4341 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4342 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4343 }
4344 hlen += th->th_off << 2;
4345 }
4346
4347 if (v4) {
4348 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4349 cmdlen |= WTX_TCPIP_CMD_IP;
4350 } else {
4351 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4352 ipcse = 0;
4353 }
4354 cmd |= WTX_TCPIP_CMD_TSE;
4355 cmdlen |= WTX_TCPIP_CMD_TSE |
4356 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
4357 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
4358 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
4359 }
4360
4361 /*
4362 * NOTE: Even if we're not using the IP or TCP/UDP checksum
4363 * offload feature, if we load the context descriptor, we
4364 * MUST provide valid values for IPCSS and TUCSS fields.
4365 */
4366
4367 ipcs = WTX_TCPIP_IPCSS(offset) |
4368 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
4369 WTX_TCPIP_IPCSE(ipcse);
4370 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
4371 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
4372 fields |= WTX_IXSM;
4373 }
4374
4375 offset += iphl;
4376
4377 if (m0->m_pkthdr.csum_flags &
4378 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
4379 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
4380 fields |= WTX_TXSM;
4381 tucs = WTX_TCPIP_TUCSS(offset) |
4382 WTX_TCPIP_TUCSO(offset +
4383 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
4384 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4385 } else if ((m0->m_pkthdr.csum_flags &
4386 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
4387 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
4388 fields |= WTX_TXSM;
4389 tucs = WTX_TCPIP_TUCSS(offset) |
4390 WTX_TCPIP_TUCSO(offset +
4391 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
4392 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4393 } else {
4394 /* Just initialize it to a valid TCP context. */
4395 tucs = WTX_TCPIP_TUCSS(offset) |
4396 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
4397 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4398 }
4399
4400 /* Fill in the context descriptor. */
4401 t = (struct livengood_tcpip_ctxdesc *)
4402 &sc->sc_txdescs[sc->sc_txnext];
4403 t->tcpip_ipcs = htole32(ipcs);
4404 t->tcpip_tucs = htole32(tucs);
4405 t->tcpip_cmdlen = htole32(cmdlen);
4406 t->tcpip_seg = htole32(seg);
4407 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
4408
4409 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
4410 txs->txs_ndesc++;
4411
4412 *cmdp = cmd;
4413 *fieldsp = fields;
4414
4415 return 0;
4416 }
4417
4418 static void
4419 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
4420 {
4421 struct mbuf *m;
4422 int i;
4423
4424 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
4425 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
4426 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
4427 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
4428 m->m_data, m->m_len, m->m_flags);
4429 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
4430 i, i == 1 ? "" : "s");
4431 }
4432
4433 /*
4434 * wm_82547_txfifo_stall:
4435 *
4436 * Callout used to wait for the 82547 Tx FIFO to drain,
4437 * reset the FIFO pointers, and restart packet transmission.
4438 */
4439 static void
4440 wm_82547_txfifo_stall(void *arg)
4441 {
4442 struct wm_softc *sc = arg;
4443 #ifndef WM_MPSAFE
4444 int s;
4445
4446 s = splnet();
4447 #endif
4448 WM_TX_LOCK(sc);
4449
4450 if (sc->sc_stopping)
4451 goto out;
4452
4453 if (sc->sc_txfifo_stall) {
4454 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
4455 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
4456 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
4457 /*
4458 * Packets have drained. Stop transmitter, reset
4459 * FIFO pointers, restart transmitter, and kick
4460 * the packet queue.
4461 */
4462 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
4463 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
4464 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
4465 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
4466 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
4467 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
4468 CSR_WRITE(sc, WMREG_TCTL, tctl);
4469 CSR_WRITE_FLUSH(sc);
4470
4471 sc->sc_txfifo_head = 0;
4472 sc->sc_txfifo_stall = 0;
4473 wm_start_locked(&sc->sc_ethercom.ec_if);
4474 } else {
4475 /*
4476 * Still waiting for packets to drain; try again in
4477 * another tick.
4478 */
4479 callout_schedule(&sc->sc_txfifo_ch, 1);
4480 }
4481 }
4482
4483 out:
4484 WM_TX_UNLOCK(sc);
4485 #ifndef WM_MPSAFE
4486 splx(s);
4487 #endif
4488 }
4489
4490 /*
4491 * wm_82547_txfifo_bugchk:
4492 *
4493 * Check for bug condition in the 82547 Tx FIFO. We need to
4494 * prevent enqueueing a packet that would wrap around the end
4495 * if the Tx FIFO ring buffer, otherwise the chip will croak.
4496 *
4497 * We do this by checking the amount of space before the end
4498 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
4499 * the Tx FIFO, wait for all remaining packets to drain, reset
4500 * the internal FIFO pointers to the beginning, and restart
4501 * transmission on the interface.
4502 */
4503 #define WM_FIFO_HDR 0x10
4504 #define WM_82547_PAD_LEN 0x3e0
4505 static int
4506 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
4507 {
4508 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
4509 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
4510
4511 /* Just return if already stalled. */
4512 if (sc->sc_txfifo_stall)
4513 return 1;
4514
4515 if (sc->sc_mii.mii_media_active & IFM_FDX) {
4516 /* Stall only occurs in half-duplex mode. */
4517 goto send_packet;
4518 }
4519
4520 if (len >= WM_82547_PAD_LEN + space) {
4521 sc->sc_txfifo_stall = 1;
4522 callout_schedule(&sc->sc_txfifo_ch, 1);
4523 return 1;
4524 }
4525
4526 send_packet:
4527 sc->sc_txfifo_head += len;
4528 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
4529 sc->sc_txfifo_head -= sc->sc_txfifo_size;
4530
4531 return 0;
4532 }
4533
4534 /*
4535 * wm_start: [ifnet interface function]
4536 *
4537 * Start packet transmission on the interface.
4538 */
4539 static void
4540 wm_start(struct ifnet *ifp)
4541 {
4542 struct wm_softc *sc = ifp->if_softc;
4543
4544 WM_TX_LOCK(sc);
4545 if (!sc->sc_stopping)
4546 wm_start_locked(ifp);
4547 WM_TX_UNLOCK(sc);
4548 }
4549
4550 static void
4551 wm_start_locked(struct ifnet *ifp)
4552 {
4553 struct wm_softc *sc = ifp->if_softc;
4554 struct mbuf *m0;
4555 struct m_tag *mtag;
4556 struct wm_txsoft *txs;
4557 bus_dmamap_t dmamap;
4558 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
4559 bus_addr_t curaddr;
4560 bus_size_t seglen, curlen;
4561 uint32_t cksumcmd;
4562 uint8_t cksumfields;
4563
4564 KASSERT(WM_TX_LOCKED(sc));
4565
4566 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
4567 return;
4568
4569 /* Remember the previous number of free descriptors. */
4570 ofree = sc->sc_txfree;
4571
4572 /*
4573 * Loop through the send queue, setting up transmit descriptors
4574 * until we drain the queue, or use up all available transmit
4575 * descriptors.
4576 */
4577 for (;;) {
4578 m0 = NULL;
4579
4580 /* Get a work queue entry. */
4581 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
4582 wm_txintr(sc);
4583 if (sc->sc_txsfree == 0) {
4584 DPRINTF(WM_DEBUG_TX,
4585 ("%s: TX: no free job descriptors\n",
4586 device_xname(sc->sc_dev)));
4587 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
4588 break;
4589 }
4590 }
4591
4592 /* Grab a packet off the queue. */
4593 IFQ_DEQUEUE(&ifp->if_snd, m0);
4594 if (m0 == NULL)
4595 break;
4596
4597 DPRINTF(WM_DEBUG_TX,
4598 ("%s: TX: have packet to transmit: %p\n",
4599 device_xname(sc->sc_dev), m0));
4600
4601 txs = &sc->sc_txsoft[sc->sc_txsnext];
4602 dmamap = txs->txs_dmamap;
4603
4604 use_tso = (m0->m_pkthdr.csum_flags &
4605 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
4606
4607 /*
4608 * So says the Linux driver:
4609 * The controller does a simple calculation to make sure
4610 * there is enough room in the FIFO before initiating the
4611 * DMA for each buffer. The calc is:
4612 * 4 = ceil(buffer len / MSS)
4613 * To make sure we don't overrun the FIFO, adjust the max
4614 * buffer len if the MSS drops.
4615 */
4616 dmamap->dm_maxsegsz =
4617 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
4618 ? m0->m_pkthdr.segsz << 2
4619 : WTX_MAX_LEN;
4620
4621 /*
4622 * Load the DMA map. If this fails, the packet either
4623 * didn't fit in the allotted number of segments, or we
4624 * were short on resources. For the too-many-segments
4625 * case, we simply report an error and drop the packet,
4626 * since we can't sanely copy a jumbo packet to a single
4627 * buffer.
4628 */
4629 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
4630 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
4631 if (error) {
4632 if (error == EFBIG) {
4633 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4634 log(LOG_ERR, "%s: Tx packet consumes too many "
4635 "DMA segments, dropping...\n",
4636 device_xname(sc->sc_dev));
4637 wm_dump_mbuf_chain(sc, m0);
4638 m_freem(m0);
4639 continue;
4640 }
4641 /* Short on resources, just stop for now. */
4642 DPRINTF(WM_DEBUG_TX,
4643 ("%s: TX: dmamap load failed: %d\n",
4644 device_xname(sc->sc_dev), error));
4645 break;
4646 }
4647
4648 segs_needed = dmamap->dm_nsegs;
4649 if (use_tso) {
4650 /* For sentinel descriptor; see below. */
4651 segs_needed++;
4652 }
4653
4654 /*
4655 * Ensure we have enough descriptors free to describe
4656 * the packet. Note, we always reserve one descriptor
4657 * at the end of the ring due to the semantics of the
4658 * TDT register, plus one more in the event we need
4659 * to load offload context.
4660 */
4661 if (segs_needed > sc->sc_txfree - 2) {
4662 /*
4663 * Not enough free descriptors to transmit this
4664 * packet. We haven't committed anything yet,
4665 * so just unload the DMA map, put the packet
4666 * pack on the queue, and punt. Notify the upper
4667 * layer that there are no more slots left.
4668 */
4669 DPRINTF(WM_DEBUG_TX,
4670 ("%s: TX: need %d (%d) descriptors, have %d\n",
4671 device_xname(sc->sc_dev), dmamap->dm_nsegs,
4672 segs_needed, sc->sc_txfree - 1));
4673 ifp->if_flags |= IFF_OACTIVE;
4674 bus_dmamap_unload(sc->sc_dmat, dmamap);
4675 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
4676 break;
4677 }
4678
4679 /*
4680 * Check for 82547 Tx FIFO bug. We need to do this
4681 * once we know we can transmit the packet, since we
4682 * do some internal FIFO space accounting here.
4683 */
4684 if (sc->sc_type == WM_T_82547 &&
4685 wm_82547_txfifo_bugchk(sc, m0)) {
4686 DPRINTF(WM_DEBUG_TX,
4687 ("%s: TX: 82547 Tx FIFO bug detected\n",
4688 device_xname(sc->sc_dev)));
4689 ifp->if_flags |= IFF_OACTIVE;
4690 bus_dmamap_unload(sc->sc_dmat, dmamap);
4691 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
4692 break;
4693 }
4694
4695 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
4696
4697 DPRINTF(WM_DEBUG_TX,
4698 ("%s: TX: packet has %d (%d) DMA segments\n",
4699 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
4700
4701 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
4702
4703 /*
4704 * Store a pointer to the packet so that we can free it
4705 * later.
4706 *
4707 * Initially, we consider the number of descriptors the
4708 * packet uses the number of DMA segments. This may be
4709 * incremented by 1 if we do checksum offload (a descriptor
4710 * is used to set the checksum context).
4711 */
4712 txs->txs_mbuf = m0;
4713 txs->txs_firstdesc = sc->sc_txnext;
4714 txs->txs_ndesc = segs_needed;
4715
4716 /* Set up offload parameters for this packet. */
4717 if (m0->m_pkthdr.csum_flags &
4718 (M_CSUM_TSOv4|M_CSUM_TSOv6|
4719 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
4720 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
4721 if (wm_tx_offload(sc, txs, &cksumcmd,
4722 &cksumfields) != 0) {
4723 /* Error message already displayed. */
4724 bus_dmamap_unload(sc->sc_dmat, dmamap);
4725 continue;
4726 }
4727 } else {
4728 cksumcmd = 0;
4729 cksumfields = 0;
4730 }
4731
4732 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
4733
4734 /* Sync the DMA map. */
4735 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
4736 BUS_DMASYNC_PREWRITE);
4737
4738 /* Initialize the transmit descriptor. */
4739 for (nexttx = sc->sc_txnext, seg = 0;
4740 seg < dmamap->dm_nsegs; seg++) {
4741 for (seglen = dmamap->dm_segs[seg].ds_len,
4742 curaddr = dmamap->dm_segs[seg].ds_addr;
4743 seglen != 0;
4744 curaddr += curlen, seglen -= curlen,
4745 nexttx = WM_NEXTTX(sc, nexttx)) {
4746 curlen = seglen;
4747
4748 /*
4749 * So says the Linux driver:
4750 * Work around for premature descriptor
4751 * write-backs in TSO mode. Append a
4752 * 4-byte sentinel descriptor.
4753 */
4754 if (use_tso &&
4755 seg == dmamap->dm_nsegs - 1 &&
4756 curlen > 8)
4757 curlen -= 4;
4758
4759 wm_set_dma_addr(
4760 &sc->sc_txdescs[nexttx].wtx_addr,
4761 curaddr);
4762 sc->sc_txdescs[nexttx].wtx_cmdlen =
4763 htole32(cksumcmd | curlen);
4764 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
4765 0;
4766 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
4767 cksumfields;
4768 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
4769 lasttx = nexttx;
4770
4771 DPRINTF(WM_DEBUG_TX,
4772 ("%s: TX: desc %d: low %#" PRIx64 ", "
4773 "len %#04zx\n",
4774 device_xname(sc->sc_dev), nexttx,
4775 (uint64_t)curaddr, curlen));
4776 }
4777 }
4778
4779 KASSERT(lasttx != -1);
4780
4781 /*
4782 * Set up the command byte on the last descriptor of
4783 * the packet. If we're in the interrupt delay window,
4784 * delay the interrupt.
4785 */
4786 sc->sc_txdescs[lasttx].wtx_cmdlen |=
4787 htole32(WTX_CMD_EOP | WTX_CMD_RS);
4788
4789 /*
4790 * If VLANs are enabled and the packet has a VLAN tag, set
4791 * up the descriptor to encapsulate the packet for us.
4792 *
4793 * This is only valid on the last descriptor of the packet.
4794 */
4795 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
4796 sc->sc_txdescs[lasttx].wtx_cmdlen |=
4797 htole32(WTX_CMD_VLE);
4798 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
4799 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
4800 }
4801
4802 txs->txs_lastdesc = lasttx;
4803
4804 DPRINTF(WM_DEBUG_TX,
4805 ("%s: TX: desc %d: cmdlen 0x%08x\n",
4806 device_xname(sc->sc_dev),
4807 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
4808
4809 /* Sync the descriptors we're using. */
4810 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
4811 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4812
4813 /* Give the packet to the chip. */
4814 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
4815
4816 DPRINTF(WM_DEBUG_TX,
4817 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
4818
4819 DPRINTF(WM_DEBUG_TX,
4820 ("%s: TX: finished transmitting packet, job %d\n",
4821 device_xname(sc->sc_dev), sc->sc_txsnext));
4822
4823 /* Advance the tx pointer. */
4824 sc->sc_txfree -= txs->txs_ndesc;
4825 sc->sc_txnext = nexttx;
4826
4827 sc->sc_txsfree--;
4828 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
4829
4830 /* Pass the packet to any BPF listeners. */
4831 bpf_mtap(ifp, m0);
4832 }
4833
4834 if (m0 != NULL) {
4835 ifp->if_flags |= IFF_OACTIVE;
4836 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4837 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
4838 m_freem(m0);
4839 }
4840
4841 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
4842 /* No more slots; notify upper layer. */
4843 ifp->if_flags |= IFF_OACTIVE;
4844 }
4845
4846 if (sc->sc_txfree != ofree) {
4847 /* Set a watchdog timer in case the chip flakes out. */
4848 ifp->if_timer = 5;
4849 }
4850 }
4851
4852 /*
4853 * wm_nq_tx_offload:
4854 *
4855 * Set up TCP/IP checksumming parameters for the
4856 * specified packet, for NEWQUEUE devices
4857 */
4858 static int
4859 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
4860 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
4861 {
4862 struct mbuf *m0 = txs->txs_mbuf;
4863 struct m_tag *mtag;
4864 uint32_t vl_len, mssidx, cmdc;
4865 struct ether_header *eh;
4866 int offset, iphl;
4867
4868 /*
4869 * XXX It would be nice if the mbuf pkthdr had offset
4870 * fields for the protocol headers.
4871 */
4872 *cmdlenp = 0;
4873 *fieldsp = 0;
4874
4875 eh = mtod(m0, struct ether_header *);
4876 switch (htons(eh->ether_type)) {
4877 case ETHERTYPE_IP:
4878 case ETHERTYPE_IPV6:
4879 offset = ETHER_HDR_LEN;
4880 break;
4881
4882 case ETHERTYPE_VLAN:
4883 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4884 break;
4885
4886 default:
4887 /* Don't support this protocol or encapsulation. */
4888 *do_csum = false;
4889 return 0;
4890 }
4891 *do_csum = true;
4892 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
4893 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
4894
4895 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
4896 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
4897
4898 if ((m0->m_pkthdr.csum_flags &
4899 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
4900 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4901 } else {
4902 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4903 }
4904 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
4905 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
4906
4907 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
4908 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
4909 << NQTXC_VLLEN_VLAN_SHIFT);
4910 *cmdlenp |= NQTX_CMD_VLE;
4911 }
4912
4913 mssidx = 0;
4914
4915 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4916 int hlen = offset + iphl;
4917 int tcp_hlen;
4918 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4919
4920 if (__predict_false(m0->m_len <
4921 (hlen + sizeof(struct tcphdr)))) {
4922 /*
4923 * TCP/IP headers are not in the first mbuf; we need
4924 * to do this the slow and painful way. Let's just
4925 * hope this doesn't happen very often.
4926 */
4927 struct tcphdr th;
4928
4929 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4930
4931 m_copydata(m0, hlen, sizeof(th), &th);
4932 if (v4) {
4933 struct ip ip;
4934
4935 m_copydata(m0, offset, sizeof(ip), &ip);
4936 ip.ip_len = 0;
4937 m_copyback(m0,
4938 offset + offsetof(struct ip, ip_len),
4939 sizeof(ip.ip_len), &ip.ip_len);
4940 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4941 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4942 } else {
4943 struct ip6_hdr ip6;
4944
4945 m_copydata(m0, offset, sizeof(ip6), &ip6);
4946 ip6.ip6_plen = 0;
4947 m_copyback(m0,
4948 offset + offsetof(struct ip6_hdr, ip6_plen),
4949 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4950 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4951 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4952 }
4953 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4954 sizeof(th.th_sum), &th.th_sum);
4955
4956 tcp_hlen = th.th_off << 2;
4957 } else {
4958 /*
4959 * TCP/IP headers are in the first mbuf; we can do
4960 * this the easy way.
4961 */
4962 struct tcphdr *th;
4963
4964 if (v4) {
4965 struct ip *ip =
4966 (void *)(mtod(m0, char *) + offset);
4967 th = (void *)(mtod(m0, char *) + hlen);
4968
4969 ip->ip_len = 0;
4970 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4971 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4972 } else {
4973 struct ip6_hdr *ip6 =
4974 (void *)(mtod(m0, char *) + offset);
4975 th = (void *)(mtod(m0, char *) + hlen);
4976
4977 ip6->ip6_plen = 0;
4978 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4979 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4980 }
4981 tcp_hlen = th->th_off << 2;
4982 }
4983 hlen += tcp_hlen;
4984 *cmdlenp |= NQTX_CMD_TSE;
4985
4986 if (v4) {
4987 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4988 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
4989 } else {
4990 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4991 *fieldsp |= NQTXD_FIELDS_TUXSM;
4992 }
4993 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
4994 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
4995 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
4996 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
4997 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
4998 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
4999 } else {
5000 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
5001 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5002 }
5003
5004 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
5005 *fieldsp |= NQTXD_FIELDS_IXSM;
5006 cmdc |= NQTXC_CMD_IP4;
5007 }
5008
5009 if (m0->m_pkthdr.csum_flags &
5010 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5011 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5012 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5013 cmdc |= NQTXC_CMD_TCP;
5014 } else {
5015 cmdc |= NQTXC_CMD_UDP;
5016 }
5017 cmdc |= NQTXC_CMD_IP4;
5018 *fieldsp |= NQTXD_FIELDS_TUXSM;
5019 }
5020 if (m0->m_pkthdr.csum_flags &
5021 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5022 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5023 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5024 cmdc |= NQTXC_CMD_TCP;
5025 } else {
5026 cmdc |= NQTXC_CMD_UDP;
5027 }
5028 cmdc |= NQTXC_CMD_IP6;
5029 *fieldsp |= NQTXD_FIELDS_TUXSM;
5030 }
5031
5032 /* Fill in the context descriptor. */
5033 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
5034 htole32(vl_len);
5035 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
5036 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
5037 htole32(cmdc);
5038 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
5039 htole32(mssidx);
5040 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
5041 DPRINTF(WM_DEBUG_TX,
5042 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
5043 sc->sc_txnext, 0, vl_len));
5044 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
5045 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
5046 txs->txs_ndesc++;
5047 return 0;
5048 }
5049
5050 /*
5051 * wm_nq_start: [ifnet interface function]
5052 *
5053 * Start packet transmission on the interface for NEWQUEUE devices
5054 */
5055 static void
5056 wm_nq_start(struct ifnet *ifp)
5057 {
5058 struct wm_softc *sc = ifp->if_softc;
5059
5060 WM_TX_LOCK(sc);
5061 if (!sc->sc_stopping)
5062 wm_nq_start_locked(ifp);
5063 WM_TX_UNLOCK(sc);
5064 }
5065
5066 static void
5067 wm_nq_start_locked(struct ifnet *ifp)
5068 {
5069 struct wm_softc *sc = ifp->if_softc;
5070 struct mbuf *m0;
5071 struct m_tag *mtag;
5072 struct wm_txsoft *txs;
5073 bus_dmamap_t dmamap;
5074 int error, nexttx, lasttx = -1, seg, segs_needed;
5075 bool do_csum, sent;
5076
5077 KASSERT(WM_TX_LOCKED(sc));
5078
5079 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5080 return;
5081
5082 sent = false;
5083
5084 /*
5085 * Loop through the send queue, setting up transmit descriptors
5086 * until we drain the queue, or use up all available transmit
5087 * descriptors.
5088 */
5089 for (;;) {
5090 m0 = NULL;
5091
5092 /* Get a work queue entry. */
5093 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
5094 wm_txintr(sc);
5095 if (sc->sc_txsfree == 0) {
5096 DPRINTF(WM_DEBUG_TX,
5097 ("%s: TX: no free job descriptors\n",
5098 device_xname(sc->sc_dev)));
5099 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
5100 break;
5101 }
5102 }
5103
5104 /* Grab a packet off the queue. */
5105 IFQ_DEQUEUE(&ifp->if_snd, m0);
5106 if (m0 == NULL)
5107 break;
5108
5109 DPRINTF(WM_DEBUG_TX,
5110 ("%s: TX: have packet to transmit: %p\n",
5111 device_xname(sc->sc_dev), m0));
5112
5113 txs = &sc->sc_txsoft[sc->sc_txsnext];
5114 dmamap = txs->txs_dmamap;
5115
5116 /*
5117 * Load the DMA map. If this fails, the packet either
5118 * didn't fit in the allotted number of segments, or we
5119 * were short on resources. For the too-many-segments
5120 * case, we simply report an error and drop the packet,
5121 * since we can't sanely copy a jumbo packet to a single
5122 * buffer.
5123 */
5124 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5125 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5126 if (error) {
5127 if (error == EFBIG) {
5128 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5129 log(LOG_ERR, "%s: Tx packet consumes too many "
5130 "DMA segments, dropping...\n",
5131 device_xname(sc->sc_dev));
5132 wm_dump_mbuf_chain(sc, m0);
5133 m_freem(m0);
5134 continue;
5135 }
5136 /* Short on resources, just stop for now. */
5137 DPRINTF(WM_DEBUG_TX,
5138 ("%s: TX: dmamap load failed: %d\n",
5139 device_xname(sc->sc_dev), error));
5140 break;
5141 }
5142
5143 segs_needed = dmamap->dm_nsegs;
5144
5145 /*
5146 * Ensure we have enough descriptors free to describe
5147 * the packet. Note, we always reserve one descriptor
5148 * at the end of the ring due to the semantics of the
5149 * TDT register, plus one more in the event we need
5150 * to load offload context.
5151 */
5152 if (segs_needed > sc->sc_txfree - 2) {
5153 /*
5154 * Not enough free descriptors to transmit this
5155 * packet. We haven't committed anything yet,
5156 * so just unload the DMA map, put the packet
5157 * pack on the queue, and punt. Notify the upper
5158 * layer that there are no more slots left.
5159 */
5160 DPRINTF(WM_DEBUG_TX,
5161 ("%s: TX: need %d (%d) descriptors, have %d\n",
5162 device_xname(sc->sc_dev), dmamap->dm_nsegs,
5163 segs_needed, sc->sc_txfree - 1));
5164 ifp->if_flags |= IFF_OACTIVE;
5165 bus_dmamap_unload(sc->sc_dmat, dmamap);
5166 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5167 break;
5168 }
5169
5170 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5171
5172 DPRINTF(WM_DEBUG_TX,
5173 ("%s: TX: packet has %d (%d) DMA segments\n",
5174 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5175
5176 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5177
5178 /*
5179 * Store a pointer to the packet so that we can free it
5180 * later.
5181 *
5182 * Initially, we consider the number of descriptors the
5183 * packet uses the number of DMA segments. This may be
5184 * incremented by 1 if we do checksum offload (a descriptor
5185 * is used to set the checksum context).
5186 */
5187 txs->txs_mbuf = m0;
5188 txs->txs_firstdesc = sc->sc_txnext;
5189 txs->txs_ndesc = segs_needed;
5190
5191 /* Set up offload parameters for this packet. */
5192 uint32_t cmdlen, fields, dcmdlen;
5193 if (m0->m_pkthdr.csum_flags &
5194 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5195 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5196 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5197 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
5198 &do_csum) != 0) {
5199 /* Error message already displayed. */
5200 bus_dmamap_unload(sc->sc_dmat, dmamap);
5201 continue;
5202 }
5203 } else {
5204 do_csum = false;
5205 cmdlen = 0;
5206 fields = 0;
5207 }
5208
5209 /* Sync the DMA map. */
5210 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5211 BUS_DMASYNC_PREWRITE);
5212
5213 /* Initialize the first transmit descriptor. */
5214 nexttx = sc->sc_txnext;
5215 if (!do_csum) {
5216 /* setup a legacy descriptor */
5217 wm_set_dma_addr(
5218 &sc->sc_txdescs[nexttx].wtx_addr,
5219 dmamap->dm_segs[0].ds_addr);
5220 sc->sc_txdescs[nexttx].wtx_cmdlen =
5221 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
5222 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
5223 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
5224 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
5225 NULL) {
5226 sc->sc_txdescs[nexttx].wtx_cmdlen |=
5227 htole32(WTX_CMD_VLE);
5228 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
5229 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5230 } else {
5231 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
5232 }
5233 dcmdlen = 0;
5234 } else {
5235 /* setup an advanced data descriptor */
5236 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5237 htole64(dmamap->dm_segs[0].ds_addr);
5238 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
5239 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5240 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
5241 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
5242 htole32(fields);
5243 DPRINTF(WM_DEBUG_TX,
5244 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
5245 device_xname(sc->sc_dev), nexttx,
5246 (uint64_t)dmamap->dm_segs[0].ds_addr));
5247 DPRINTF(WM_DEBUG_TX,
5248 ("\t 0x%08x%08x\n", fields,
5249 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
5250 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
5251 }
5252
5253 lasttx = nexttx;
5254 nexttx = WM_NEXTTX(sc, nexttx);
5255 /*
5256 * fill in the next descriptors. legacy or adcanced format
5257 * is the same here
5258 */
5259 for (seg = 1; seg < dmamap->dm_nsegs;
5260 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
5261 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5262 htole64(dmamap->dm_segs[seg].ds_addr);
5263 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5264 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
5265 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
5266 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
5267 lasttx = nexttx;
5268
5269 DPRINTF(WM_DEBUG_TX,
5270 ("%s: TX: desc %d: %#" PRIx64 ", "
5271 "len %#04zx\n",
5272 device_xname(sc->sc_dev), nexttx,
5273 (uint64_t)dmamap->dm_segs[seg].ds_addr,
5274 dmamap->dm_segs[seg].ds_len));
5275 }
5276
5277 KASSERT(lasttx != -1);
5278
5279 /*
5280 * Set up the command byte on the last descriptor of
5281 * the packet. If we're in the interrupt delay window,
5282 * delay the interrupt.
5283 */
5284 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
5285 (NQTX_CMD_EOP | NQTX_CMD_RS));
5286 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5287 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5288
5289 txs->txs_lastdesc = lasttx;
5290
5291 DPRINTF(WM_DEBUG_TX,
5292 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5293 device_xname(sc->sc_dev),
5294 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5295
5296 /* Sync the descriptors we're using. */
5297 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5298 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5299
5300 /* Give the packet to the chip. */
5301 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5302 sent = true;
5303
5304 DPRINTF(WM_DEBUG_TX,
5305 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5306
5307 DPRINTF(WM_DEBUG_TX,
5308 ("%s: TX: finished transmitting packet, job %d\n",
5309 device_xname(sc->sc_dev), sc->sc_txsnext));
5310
5311 /* Advance the tx pointer. */
5312 sc->sc_txfree -= txs->txs_ndesc;
5313 sc->sc_txnext = nexttx;
5314
5315 sc->sc_txsfree--;
5316 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5317
5318 /* Pass the packet to any BPF listeners. */
5319 bpf_mtap(ifp, m0);
5320 }
5321
5322 if (m0 != NULL) {
5323 ifp->if_flags |= IFF_OACTIVE;
5324 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5325 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5326 m_freem(m0);
5327 }
5328
5329 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5330 /* No more slots; notify upper layer. */
5331 ifp->if_flags |= IFF_OACTIVE;
5332 }
5333
5334 if (sent) {
5335 /* Set a watchdog timer in case the chip flakes out. */
5336 ifp->if_timer = 5;
5337 }
5338 }
5339
5340 /* Interrupt */
5341
5342 /*
5343 * wm_txintr:
5344 *
5345 * Helper; handle transmit interrupts.
5346 */
5347 static void
5348 wm_txintr(struct wm_softc *sc)
5349 {
5350 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5351 struct wm_txsoft *txs;
5352 uint8_t status;
5353 int i;
5354
5355 if (sc->sc_stopping)
5356 return;
5357
5358 ifp->if_flags &= ~IFF_OACTIVE;
5359
5360 /*
5361 * Go through the Tx list and free mbufs for those
5362 * frames which have been transmitted.
5363 */
5364 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
5365 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
5366 txs = &sc->sc_txsoft[i];
5367
5368 DPRINTF(WM_DEBUG_TX,
5369 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
5370
5371 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
5372 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5373
5374 status =
5375 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
5376 if ((status & WTX_ST_DD) == 0) {
5377 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
5378 BUS_DMASYNC_PREREAD);
5379 break;
5380 }
5381
5382 DPRINTF(WM_DEBUG_TX,
5383 ("%s: TX: job %d done: descs %d..%d\n",
5384 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
5385 txs->txs_lastdesc));
5386
5387 /*
5388 * XXX We should probably be using the statistics
5389 * XXX registers, but I don't know if they exist
5390 * XXX on chips before the i82544.
5391 */
5392
5393 #ifdef WM_EVENT_COUNTERS
5394 if (status & WTX_ST_TU)
5395 WM_EVCNT_INCR(&sc->sc_ev_tu);
5396 #endif /* WM_EVENT_COUNTERS */
5397
5398 if (status & (WTX_ST_EC|WTX_ST_LC)) {
5399 ifp->if_oerrors++;
5400 if (status & WTX_ST_LC)
5401 log(LOG_WARNING, "%s: late collision\n",
5402 device_xname(sc->sc_dev));
5403 else if (status & WTX_ST_EC) {
5404 ifp->if_collisions += 16;
5405 log(LOG_WARNING, "%s: excessive collisions\n",
5406 device_xname(sc->sc_dev));
5407 }
5408 } else
5409 ifp->if_opackets++;
5410
5411 sc->sc_txfree += txs->txs_ndesc;
5412 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
5413 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
5414 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
5415 m_freem(txs->txs_mbuf);
5416 txs->txs_mbuf = NULL;
5417 }
5418
5419 /* Update the dirty transmit buffer pointer. */
5420 sc->sc_txsdirty = i;
5421 DPRINTF(WM_DEBUG_TX,
5422 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
5423
5424 /*
5425 * If there are no more pending transmissions, cancel the watchdog
5426 * timer.
5427 */
5428 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
5429 ifp->if_timer = 0;
5430 }
5431
5432 /*
5433 * wm_rxintr:
5434 *
5435 * Helper; handle receive interrupts.
5436 */
5437 static void
5438 wm_rxintr(struct wm_softc *sc)
5439 {
5440 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5441 struct wm_rxsoft *rxs;
5442 struct mbuf *m;
5443 int i, len;
5444 uint8_t status, errors;
5445 uint16_t vlantag;
5446
5447 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
5448 rxs = &sc->sc_rxsoft[i];
5449
5450 DPRINTF(WM_DEBUG_RX,
5451 ("%s: RX: checking descriptor %d\n",
5452 device_xname(sc->sc_dev), i));
5453
5454 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5455
5456 status = sc->sc_rxdescs[i].wrx_status;
5457 errors = sc->sc_rxdescs[i].wrx_errors;
5458 len = le16toh(sc->sc_rxdescs[i].wrx_len);
5459 vlantag = sc->sc_rxdescs[i].wrx_special;
5460
5461 if ((status & WRX_ST_DD) == 0) {
5462 /* We have processed all of the receive descriptors. */
5463 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
5464 break;
5465 }
5466
5467 if (__predict_false(sc->sc_rxdiscard)) {
5468 DPRINTF(WM_DEBUG_RX,
5469 ("%s: RX: discarding contents of descriptor %d\n",
5470 device_xname(sc->sc_dev), i));
5471 WM_INIT_RXDESC(sc, i);
5472 if (status & WRX_ST_EOP) {
5473 /* Reset our state. */
5474 DPRINTF(WM_DEBUG_RX,
5475 ("%s: RX: resetting rxdiscard -> 0\n",
5476 device_xname(sc->sc_dev)));
5477 sc->sc_rxdiscard = 0;
5478 }
5479 continue;
5480 }
5481
5482 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5483 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
5484
5485 m = rxs->rxs_mbuf;
5486
5487 /*
5488 * Add a new receive buffer to the ring, unless of
5489 * course the length is zero. Treat the latter as a
5490 * failed mapping.
5491 */
5492 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
5493 /*
5494 * Failed, throw away what we've done so
5495 * far, and discard the rest of the packet.
5496 */
5497 ifp->if_ierrors++;
5498 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5499 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5500 WM_INIT_RXDESC(sc, i);
5501 if ((status & WRX_ST_EOP) == 0)
5502 sc->sc_rxdiscard = 1;
5503 if (sc->sc_rxhead != NULL)
5504 m_freem(sc->sc_rxhead);
5505 WM_RXCHAIN_RESET(sc);
5506 DPRINTF(WM_DEBUG_RX,
5507 ("%s: RX: Rx buffer allocation failed, "
5508 "dropping packet%s\n", device_xname(sc->sc_dev),
5509 sc->sc_rxdiscard ? " (discard)" : ""));
5510 continue;
5511 }
5512
5513 m->m_len = len;
5514 sc->sc_rxlen += len;
5515 DPRINTF(WM_DEBUG_RX,
5516 ("%s: RX: buffer at %p len %d\n",
5517 device_xname(sc->sc_dev), m->m_data, len));
5518
5519 /* If this is not the end of the packet, keep looking. */
5520 if ((status & WRX_ST_EOP) == 0) {
5521 WM_RXCHAIN_LINK(sc, m);
5522 DPRINTF(WM_DEBUG_RX,
5523 ("%s: RX: not yet EOP, rxlen -> %d\n",
5524 device_xname(sc->sc_dev), sc->sc_rxlen));
5525 continue;
5526 }
5527
5528 /*
5529 * Okay, we have the entire packet now. The chip is
5530 * configured to include the FCS except I350 and I21[01]
5531 * (not all chips can be configured to strip it),
5532 * so we need to trim it.
5533 * May need to adjust length of previous mbuf in the
5534 * chain if the current mbuf is too short.
5535 * For an eratta, the RCTL_SECRC bit in RCTL register
5536 * is always set in I350, so we don't trim it.
5537 */
5538 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
5539 && (sc->sc_type != WM_T_I210)
5540 && (sc->sc_type != WM_T_I211)) {
5541 if (m->m_len < ETHER_CRC_LEN) {
5542 sc->sc_rxtail->m_len
5543 -= (ETHER_CRC_LEN - m->m_len);
5544 m->m_len = 0;
5545 } else
5546 m->m_len -= ETHER_CRC_LEN;
5547 len = sc->sc_rxlen - ETHER_CRC_LEN;
5548 } else
5549 len = sc->sc_rxlen;
5550
5551 WM_RXCHAIN_LINK(sc, m);
5552
5553 *sc->sc_rxtailp = NULL;
5554 m = sc->sc_rxhead;
5555
5556 WM_RXCHAIN_RESET(sc);
5557
5558 DPRINTF(WM_DEBUG_RX,
5559 ("%s: RX: have entire packet, len -> %d\n",
5560 device_xname(sc->sc_dev), len));
5561
5562 /* If an error occurred, update stats and drop the packet. */
5563 if (errors &
5564 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
5565 if (errors & WRX_ER_SE)
5566 log(LOG_WARNING, "%s: symbol error\n",
5567 device_xname(sc->sc_dev));
5568 else if (errors & WRX_ER_SEQ)
5569 log(LOG_WARNING, "%s: receive sequence error\n",
5570 device_xname(sc->sc_dev));
5571 else if (errors & WRX_ER_CE)
5572 log(LOG_WARNING, "%s: CRC error\n",
5573 device_xname(sc->sc_dev));
5574 m_freem(m);
5575 continue;
5576 }
5577
5578 /* No errors. Receive the packet. */
5579 m->m_pkthdr.rcvif = ifp;
5580 m->m_pkthdr.len = len;
5581
5582 /*
5583 * If VLANs are enabled, VLAN packets have been unwrapped
5584 * for us. Associate the tag with the packet.
5585 */
5586 /* XXXX should check for i350 and i354 */
5587 if ((status & WRX_ST_VP) != 0) {
5588 VLAN_INPUT_TAG(ifp, m,
5589 le16toh(vlantag),
5590 continue);
5591 }
5592
5593 /* Set up checksum info for this packet. */
5594 if ((status & WRX_ST_IXSM) == 0) {
5595 if (status & WRX_ST_IPCS) {
5596 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
5597 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
5598 if (errors & WRX_ER_IPE)
5599 m->m_pkthdr.csum_flags |=
5600 M_CSUM_IPv4_BAD;
5601 }
5602 if (status & WRX_ST_TCPCS) {
5603 /*
5604 * Note: we don't know if this was TCP or UDP,
5605 * so we just set both bits, and expect the
5606 * upper layers to deal.
5607 */
5608 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
5609 m->m_pkthdr.csum_flags |=
5610 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
5611 M_CSUM_TCPv6 | M_CSUM_UDPv6;
5612 if (errors & WRX_ER_TCPE)
5613 m->m_pkthdr.csum_flags |=
5614 M_CSUM_TCP_UDP_BAD;
5615 }
5616 }
5617
5618 ifp->if_ipackets++;
5619
5620 WM_RX_UNLOCK(sc);
5621
5622 /* Pass this up to any BPF listeners. */
5623 bpf_mtap(ifp, m);
5624
5625 /* Pass it on. */
5626 (*ifp->if_input)(ifp, m);
5627
5628 WM_RX_LOCK(sc);
5629
5630 if (sc->sc_stopping)
5631 break;
5632 }
5633
5634 /* Update the receive pointer. */
5635 sc->sc_rxptr = i;
5636
5637 DPRINTF(WM_DEBUG_RX,
5638 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
5639 }
5640
5641 /*
5642 * wm_linkintr_gmii:
5643 *
5644 * Helper; handle link interrupts for GMII.
5645 */
5646 static void
5647 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
5648 {
5649
5650 KASSERT(WM_TX_LOCKED(sc));
5651
5652 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5653 __func__));
5654
5655 if (icr & ICR_LSC) {
5656 DPRINTF(WM_DEBUG_LINK,
5657 ("%s: LINK: LSC -> mii_pollstat\n",
5658 device_xname(sc->sc_dev)));
5659 mii_pollstat(&sc->sc_mii);
5660 if (sc->sc_type == WM_T_82543) {
5661 int miistatus, active;
5662
5663 /*
5664 * With 82543, we need to force speed and
5665 * duplex on the MAC equal to what the PHY
5666 * speed and duplex configuration is.
5667 */
5668 miistatus = sc->sc_mii.mii_media_status;
5669
5670 if (miistatus & IFM_ACTIVE) {
5671 active = sc->sc_mii.mii_media_active;
5672 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5673 switch (IFM_SUBTYPE(active)) {
5674 case IFM_10_T:
5675 sc->sc_ctrl |= CTRL_SPEED_10;
5676 break;
5677 case IFM_100_TX:
5678 sc->sc_ctrl |= CTRL_SPEED_100;
5679 break;
5680 case IFM_1000_T:
5681 sc->sc_ctrl |= CTRL_SPEED_1000;
5682 break;
5683 default:
5684 /*
5685 * fiber?
5686 * Shoud not enter here.
5687 */
5688 printf("unknown media (%x)\n",
5689 active);
5690 break;
5691 }
5692 if (active & IFM_FDX)
5693 sc->sc_ctrl |= CTRL_FD;
5694 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5695 }
5696 } else if ((sc->sc_type == WM_T_ICH8)
5697 && (sc->sc_phytype == WMPHY_IGP_3)) {
5698 wm_kmrn_lock_loss_workaround_ich8lan(sc);
5699 } else if (sc->sc_type == WM_T_PCH) {
5700 wm_k1_gig_workaround_hv(sc,
5701 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
5702 }
5703
5704 if ((sc->sc_phytype == WMPHY_82578)
5705 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
5706 == IFM_1000_T)) {
5707
5708 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
5709 delay(200*1000); /* XXX too big */
5710
5711 /* Link stall fix for link up */
5712 wm_gmii_hv_writereg(sc->sc_dev, 1,
5713 HV_MUX_DATA_CTRL,
5714 HV_MUX_DATA_CTRL_GEN_TO_MAC
5715 | HV_MUX_DATA_CTRL_FORCE_SPEED);
5716 wm_gmii_hv_writereg(sc->sc_dev, 1,
5717 HV_MUX_DATA_CTRL,
5718 HV_MUX_DATA_CTRL_GEN_TO_MAC);
5719 }
5720 }
5721 } else if (icr & ICR_RXSEQ) {
5722 DPRINTF(WM_DEBUG_LINK,
5723 ("%s: LINK Receive sequence error\n",
5724 device_xname(sc->sc_dev)));
5725 }
5726 }
5727
5728 /*
5729 * wm_linkintr_tbi:
5730 *
5731 * Helper; handle link interrupts for TBI mode.
5732 */
5733 static void
5734 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
5735 {
5736 uint32_t status;
5737
5738 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5739 __func__));
5740
5741 status = CSR_READ(sc, WMREG_STATUS);
5742 if (icr & ICR_LSC) {
5743 if (status & STATUS_LU) {
5744 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
5745 device_xname(sc->sc_dev),
5746 (status & STATUS_FD) ? "FDX" : "HDX"));
5747 /*
5748 * NOTE: CTRL will update TFCE and RFCE automatically,
5749 * so we should update sc->sc_ctrl
5750 */
5751
5752 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5753 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5754 sc->sc_fcrtl &= ~FCRTL_XONE;
5755 if (status & STATUS_FD)
5756 sc->sc_tctl |=
5757 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5758 else
5759 sc->sc_tctl |=
5760 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5761 if (sc->sc_ctrl & CTRL_TFCE)
5762 sc->sc_fcrtl |= FCRTL_XONE;
5763 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5764 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5765 WMREG_OLD_FCRTL : WMREG_FCRTL,
5766 sc->sc_fcrtl);
5767 sc->sc_tbi_linkup = 1;
5768 } else {
5769 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
5770 device_xname(sc->sc_dev)));
5771 sc->sc_tbi_linkup = 0;
5772 }
5773 wm_tbi_set_linkled(sc);
5774 } else if (icr & ICR_RXSEQ) {
5775 DPRINTF(WM_DEBUG_LINK,
5776 ("%s: LINK: Receive sequence error\n",
5777 device_xname(sc->sc_dev)));
5778 }
5779 }
5780
5781 /*
5782 * wm_linkintr:
5783 *
5784 * Helper; handle link interrupts.
5785 */
5786 static void
5787 wm_linkintr(struct wm_softc *sc, uint32_t icr)
5788 {
5789
5790 if (sc->sc_flags & WM_F_HAS_MII)
5791 wm_linkintr_gmii(sc, icr);
5792 else
5793 wm_linkintr_tbi(sc, icr);
5794 }
5795
5796 /*
5797 * wm_intr:
5798 *
5799 * Interrupt service routine.
5800 */
5801 static int
5802 wm_intr(void *arg)
5803 {
5804 struct wm_softc *sc = arg;
5805 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5806 uint32_t icr;
5807 int handled = 0;
5808
5809 while (1 /* CONSTCOND */) {
5810 icr = CSR_READ(sc, WMREG_ICR);
5811 if ((icr & sc->sc_icr) == 0)
5812 break;
5813 rnd_add_uint32(&sc->rnd_source, icr);
5814
5815 WM_RX_LOCK(sc);
5816
5817 if (sc->sc_stopping) {
5818 WM_RX_UNLOCK(sc);
5819 break;
5820 }
5821
5822 handled = 1;
5823
5824 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
5825 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
5826 DPRINTF(WM_DEBUG_RX,
5827 ("%s: RX: got Rx intr 0x%08x\n",
5828 device_xname(sc->sc_dev),
5829 icr & (ICR_RXDMT0|ICR_RXT0)));
5830 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
5831 }
5832 #endif
5833 wm_rxintr(sc);
5834
5835 WM_RX_UNLOCK(sc);
5836 WM_TX_LOCK(sc);
5837
5838 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
5839 if (icr & ICR_TXDW) {
5840 DPRINTF(WM_DEBUG_TX,
5841 ("%s: TX: got TXDW interrupt\n",
5842 device_xname(sc->sc_dev)));
5843 WM_EVCNT_INCR(&sc->sc_ev_txdw);
5844 }
5845 #endif
5846 wm_txintr(sc);
5847
5848 if (icr & (ICR_LSC|ICR_RXSEQ)) {
5849 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
5850 wm_linkintr(sc, icr);
5851 }
5852
5853 WM_TX_UNLOCK(sc);
5854
5855 if (icr & ICR_RXO) {
5856 #if defined(WM_DEBUG)
5857 log(LOG_WARNING, "%s: Receive overrun\n",
5858 device_xname(sc->sc_dev));
5859 #endif /* defined(WM_DEBUG) */
5860 }
5861 }
5862
5863 if (handled) {
5864 /* Try to get more packets going. */
5865 ifp->if_start(ifp);
5866 }
5867
5868 return handled;
5869 }
5870
5871 /*
5872 * Media related.
5873 * GMII, SGMII, TBI (and SERDES)
5874 */
5875
5876 /* GMII related */
5877
5878 /*
5879 * wm_gmii_reset:
5880 *
5881 * Reset the PHY.
5882 */
5883 static void
5884 wm_gmii_reset(struct wm_softc *sc)
5885 {
5886 uint32_t reg;
5887 int rv;
5888
5889 /* get phy semaphore */
5890 switch (sc->sc_type) {
5891 case WM_T_82571:
5892 case WM_T_82572:
5893 case WM_T_82573:
5894 case WM_T_82574:
5895 case WM_T_82583:
5896 /* XXX should get sw semaphore, too */
5897 rv = wm_get_swsm_semaphore(sc);
5898 break;
5899 case WM_T_82575:
5900 case WM_T_82576:
5901 case WM_T_82580:
5902 case WM_T_I350:
5903 case WM_T_I354:
5904 case WM_T_I210:
5905 case WM_T_I211:
5906 case WM_T_80003:
5907 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5908 break;
5909 case WM_T_ICH8:
5910 case WM_T_ICH9:
5911 case WM_T_ICH10:
5912 case WM_T_PCH:
5913 case WM_T_PCH2:
5914 case WM_T_PCH_LPT:
5915 rv = wm_get_swfwhw_semaphore(sc);
5916 break;
5917 default:
5918 /* nothing to do*/
5919 rv = 0;
5920 break;
5921 }
5922 if (rv != 0) {
5923 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5924 __func__);
5925 return;
5926 }
5927
5928 switch (sc->sc_type) {
5929 case WM_T_82542_2_0:
5930 case WM_T_82542_2_1:
5931 /* null */
5932 break;
5933 case WM_T_82543:
5934 /*
5935 * With 82543, we need to force speed and duplex on the MAC
5936 * equal to what the PHY speed and duplex configuration is.
5937 * In addition, we need to perform a hardware reset on the PHY
5938 * to take it out of reset.
5939 */
5940 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5941 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5942
5943 /* The PHY reset pin is active-low. */
5944 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5945 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
5946 CTRL_EXT_SWDPIN(4));
5947 reg |= CTRL_EXT_SWDPIO(4);
5948
5949 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5950 CSR_WRITE_FLUSH(sc);
5951 delay(10*1000);
5952
5953 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
5954 CSR_WRITE_FLUSH(sc);
5955 delay(150);
5956 #if 0
5957 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
5958 #endif
5959 delay(20*1000); /* XXX extra delay to get PHY ID? */
5960 break;
5961 case WM_T_82544: /* reset 10000us */
5962 case WM_T_82540:
5963 case WM_T_82545:
5964 case WM_T_82545_3:
5965 case WM_T_82546:
5966 case WM_T_82546_3:
5967 case WM_T_82541:
5968 case WM_T_82541_2:
5969 case WM_T_82547:
5970 case WM_T_82547_2:
5971 case WM_T_82571: /* reset 100us */
5972 case WM_T_82572:
5973 case WM_T_82573:
5974 case WM_T_82574:
5975 case WM_T_82575:
5976 case WM_T_82576:
5977 case WM_T_82580:
5978 case WM_T_I350:
5979 case WM_T_I354:
5980 case WM_T_I210:
5981 case WM_T_I211:
5982 case WM_T_82583:
5983 case WM_T_80003:
5984 /* generic reset */
5985 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5986 CSR_WRITE_FLUSH(sc);
5987 delay(20000);
5988 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5989 CSR_WRITE_FLUSH(sc);
5990 delay(20000);
5991
5992 if ((sc->sc_type == WM_T_82541)
5993 || (sc->sc_type == WM_T_82541_2)
5994 || (sc->sc_type == WM_T_82547)
5995 || (sc->sc_type == WM_T_82547_2)) {
5996 /* workaround for igp are done in igp_reset() */
5997 /* XXX add code to set LED after phy reset */
5998 }
5999 break;
6000 case WM_T_ICH8:
6001 case WM_T_ICH9:
6002 case WM_T_ICH10:
6003 case WM_T_PCH:
6004 case WM_T_PCH2:
6005 case WM_T_PCH_LPT:
6006 /* generic reset */
6007 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6008 CSR_WRITE_FLUSH(sc);
6009 delay(100);
6010 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6011 CSR_WRITE_FLUSH(sc);
6012 delay(150);
6013 break;
6014 default:
6015 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6016 __func__);
6017 break;
6018 }
6019
6020 /* release PHY semaphore */
6021 switch (sc->sc_type) {
6022 case WM_T_82571:
6023 case WM_T_82572:
6024 case WM_T_82573:
6025 case WM_T_82574:
6026 case WM_T_82583:
6027 /* XXX should put sw semaphore, too */
6028 wm_put_swsm_semaphore(sc);
6029 break;
6030 case WM_T_82575:
6031 case WM_T_82576:
6032 case WM_T_82580:
6033 case WM_T_I350:
6034 case WM_T_I354:
6035 case WM_T_I210:
6036 case WM_T_I211:
6037 case WM_T_80003:
6038 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6039 break;
6040 case WM_T_ICH8:
6041 case WM_T_ICH9:
6042 case WM_T_ICH10:
6043 case WM_T_PCH:
6044 case WM_T_PCH2:
6045 case WM_T_PCH_LPT:
6046 wm_put_swfwhw_semaphore(sc);
6047 break;
6048 default:
6049 /* nothing to do*/
6050 rv = 0;
6051 break;
6052 }
6053
6054 /* get_cfg_done */
6055 wm_get_cfg_done(sc);
6056
6057 /* extra setup */
6058 switch (sc->sc_type) {
6059 case WM_T_82542_2_0:
6060 case WM_T_82542_2_1:
6061 case WM_T_82543:
6062 case WM_T_82544:
6063 case WM_T_82540:
6064 case WM_T_82545:
6065 case WM_T_82545_3:
6066 case WM_T_82546:
6067 case WM_T_82546_3:
6068 case WM_T_82541_2:
6069 case WM_T_82547_2:
6070 case WM_T_82571:
6071 case WM_T_82572:
6072 case WM_T_82573:
6073 case WM_T_82574:
6074 case WM_T_82575:
6075 case WM_T_82576:
6076 case WM_T_82580:
6077 case WM_T_I350:
6078 case WM_T_I354:
6079 case WM_T_I210:
6080 case WM_T_I211:
6081 case WM_T_82583:
6082 case WM_T_80003:
6083 /* null */
6084 break;
6085 case WM_T_82541:
6086 case WM_T_82547:
6087 /* XXX Configure actively LED after PHY reset */
6088 break;
6089 case WM_T_ICH8:
6090 case WM_T_ICH9:
6091 case WM_T_ICH10:
6092 case WM_T_PCH:
6093 case WM_T_PCH2:
6094 case WM_T_PCH_LPT:
6095 /* Allow time for h/w to get to a quiescent state afer reset */
6096 delay(10*1000);
6097
6098 if (sc->sc_type == WM_T_PCH)
6099 wm_hv_phy_workaround_ich8lan(sc);
6100
6101 if (sc->sc_type == WM_T_PCH2)
6102 wm_lv_phy_workaround_ich8lan(sc);
6103
6104 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6105 /*
6106 * dummy read to clear the phy wakeup bit after lcd
6107 * reset
6108 */
6109 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6110 }
6111
6112 /*
6113 * XXX Configure the LCD with th extended configuration region
6114 * in NVM
6115 */
6116
6117 /* Configure the LCD with the OEM bits in NVM */
6118 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6119 || (sc->sc_type == WM_T_PCH_LPT)) {
6120 /*
6121 * Disable LPLU.
6122 * XXX It seems that 82567 has LPLU, too.
6123 */
6124 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6125 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6126 reg |= HV_OEM_BITS_ANEGNOW;
6127 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6128 }
6129 break;
6130 default:
6131 panic("%s: unknown type\n", __func__);
6132 break;
6133 }
6134 }
6135
6136 /*
6137 * wm_get_phy_id_82575:
6138 *
6139 * Return PHY ID. Return -1 if it failed.
6140 */
6141 static int
6142 wm_get_phy_id_82575(struct wm_softc *sc)
6143 {
6144 uint32_t reg;
6145 int phyid = -1;
6146
6147 /* XXX */
6148 if ((sc->sc_flags & WM_F_SGMII) == 0)
6149 return -1;
6150
6151 if (wm_sgmii_uses_mdio(sc)) {
6152 switch (sc->sc_type) {
6153 case WM_T_82575:
6154 case WM_T_82576:
6155 reg = CSR_READ(sc, WMREG_MDIC);
6156 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
6157 break;
6158 case WM_T_82580:
6159 case WM_T_I350:
6160 case WM_T_I354:
6161 case WM_T_I210:
6162 case WM_T_I211:
6163 reg = CSR_READ(sc, WMREG_MDICNFG);
6164 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
6165 break;
6166 default:
6167 return -1;
6168 }
6169 }
6170
6171 return phyid;
6172 }
6173
6174
6175 /*
6176 * wm_gmii_mediainit:
6177 *
6178 * Initialize media for use on 1000BASE-T devices.
6179 */
6180 static void
6181 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6182 {
6183 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6184 struct mii_data *mii = &sc->sc_mii;
6185 uint32_t reg;
6186
6187 /* We have GMII. */
6188 sc->sc_flags |= WM_F_HAS_MII;
6189
6190 if (sc->sc_type == WM_T_80003)
6191 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6192 else
6193 sc->sc_tipg = TIPG_1000T_DFLT;
6194
6195 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
6196 if ((sc->sc_type == WM_T_82580)
6197 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
6198 || (sc->sc_type == WM_T_I211)) {
6199 reg = CSR_READ(sc, WMREG_PHPM);
6200 reg &= ~PHPM_GO_LINK_D;
6201 CSR_WRITE(sc, WMREG_PHPM, reg);
6202 }
6203
6204 /*
6205 * Let the chip set speed/duplex on its own based on
6206 * signals from the PHY.
6207 * XXXbouyer - I'm not sure this is right for the 80003,
6208 * the em driver only sets CTRL_SLU here - but it seems to work.
6209 */
6210 sc->sc_ctrl |= CTRL_SLU;
6211 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6212
6213 /* Initialize our media structures and probe the GMII. */
6214 mii->mii_ifp = ifp;
6215
6216 /*
6217 * Determine the PHY access method.
6218 *
6219 * For SGMII, use SGMII specific method.
6220 *
6221 * For some devices, we can determine the PHY access method
6222 * from sc_type.
6223 *
6224 * For ICH8 variants, it's difficult to detemine the PHY access
6225 * method by sc_type, so use the PCI product ID for some devices.
6226 * For other ICH8 variants, try to use igp's method. If the PHY
6227 * can't detect, then use bm's method.
6228 */
6229 switch (prodid) {
6230 case PCI_PRODUCT_INTEL_PCH_M_LM:
6231 case PCI_PRODUCT_INTEL_PCH_M_LC:
6232 /* 82577 */
6233 sc->sc_phytype = WMPHY_82577;
6234 mii->mii_readreg = wm_gmii_hv_readreg;
6235 mii->mii_writereg = wm_gmii_hv_writereg;
6236 break;
6237 case PCI_PRODUCT_INTEL_PCH_D_DM:
6238 case PCI_PRODUCT_INTEL_PCH_D_DC:
6239 /* 82578 */
6240 sc->sc_phytype = WMPHY_82578;
6241 mii->mii_readreg = wm_gmii_hv_readreg;
6242 mii->mii_writereg = wm_gmii_hv_writereg;
6243 break;
6244 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6245 case PCI_PRODUCT_INTEL_PCH2_LV_V:
6246 /* 82579 */
6247 sc->sc_phytype = WMPHY_82579;
6248 mii->mii_readreg = wm_gmii_hv_readreg;
6249 mii->mii_writereg = wm_gmii_hv_writereg;
6250 break;
6251 case PCI_PRODUCT_INTEL_I217_LM:
6252 case PCI_PRODUCT_INTEL_I217_V:
6253 case PCI_PRODUCT_INTEL_I218_LM:
6254 case PCI_PRODUCT_INTEL_I218_V:
6255 /* I21[78] */
6256 mii->mii_readreg = wm_gmii_hv_readreg;
6257 mii->mii_writereg = wm_gmii_hv_writereg;
6258 break;
6259 case PCI_PRODUCT_INTEL_82801I_BM:
6260 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6261 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6262 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6263 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6264 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6265 /* 82567 */
6266 sc->sc_phytype = WMPHY_BM;
6267 mii->mii_readreg = wm_gmii_bm_readreg;
6268 mii->mii_writereg = wm_gmii_bm_writereg;
6269 break;
6270 default:
6271 if (((sc->sc_flags & WM_F_SGMII) != 0)
6272 && !wm_sgmii_uses_mdio(sc)){
6273 mii->mii_readreg = wm_sgmii_readreg;
6274 mii->mii_writereg = wm_sgmii_writereg;
6275 } else if (sc->sc_type >= WM_T_80003) {
6276 mii->mii_readreg = wm_gmii_i80003_readreg;
6277 mii->mii_writereg = wm_gmii_i80003_writereg;
6278 } else if (sc->sc_type >= WM_T_I210) {
6279 mii->mii_readreg = wm_gmii_i82544_readreg;
6280 mii->mii_writereg = wm_gmii_i82544_writereg;
6281 } else if (sc->sc_type >= WM_T_82580) {
6282 sc->sc_phytype = WMPHY_82580;
6283 mii->mii_readreg = wm_gmii_82580_readreg;
6284 mii->mii_writereg = wm_gmii_82580_writereg;
6285 } else if (sc->sc_type >= WM_T_82544) {
6286 mii->mii_readreg = wm_gmii_i82544_readreg;
6287 mii->mii_writereg = wm_gmii_i82544_writereg;
6288 } else {
6289 mii->mii_readreg = wm_gmii_i82543_readreg;
6290 mii->mii_writereg = wm_gmii_i82543_writereg;
6291 }
6292 break;
6293 }
6294 mii->mii_statchg = wm_gmii_statchg;
6295
6296 wm_gmii_reset(sc);
6297
6298 sc->sc_ethercom.ec_mii = &sc->sc_mii;
6299 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6300 wm_gmii_mediastatus);
6301
6302 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6303 || (sc->sc_type == WM_T_82580)
6304 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
6305 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
6306 if ((sc->sc_flags & WM_F_SGMII) == 0) {
6307 /* Attach only one port */
6308 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6309 MII_OFFSET_ANY, MIIF_DOPAUSE);
6310 } else {
6311 int i, id;
6312 uint32_t ctrl_ext;
6313
6314 id = wm_get_phy_id_82575(sc);
6315 if (id != -1) {
6316 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6317 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
6318 }
6319 if ((id == -1)
6320 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
6321 /* Power on sgmii phy if it is disabled */
6322 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6323 CSR_WRITE(sc, WMREG_CTRL_EXT,
6324 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6325 CSR_WRITE_FLUSH(sc);
6326 delay(300*1000); /* XXX too long */
6327
6328 /* from 1 to 8 */
6329 for (i = 1; i < 8; i++)
6330 mii_attach(sc->sc_dev, &sc->sc_mii,
6331 0xffffffff, i, MII_OFFSET_ANY,
6332 MIIF_DOPAUSE);
6333
6334 /* restore previous sfp cage power state */
6335 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6336 }
6337 }
6338 } else {
6339 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6340 MII_OFFSET_ANY, MIIF_DOPAUSE);
6341 }
6342
6343 /*
6344 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
6345 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6346 */
6347 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
6348 (LIST_FIRST(&mii->mii_phys) == NULL)) {
6349 wm_set_mdio_slow_mode_hv(sc);
6350 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6351 MII_OFFSET_ANY, MIIF_DOPAUSE);
6352 }
6353
6354 /*
6355 * (For ICH8 variants)
6356 * If PHY detection failed, use BM's r/w function and retry.
6357 */
6358 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6359 /* if failed, retry with *_bm_* */
6360 mii->mii_readreg = wm_gmii_bm_readreg;
6361 mii->mii_writereg = wm_gmii_bm_writereg;
6362
6363 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6364 MII_OFFSET_ANY, MIIF_DOPAUSE);
6365 }
6366
6367 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6368 /* Any PHY wasn't find */
6369 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6370 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6371 sc->sc_phytype = WMPHY_NONE;
6372 } else {
6373 /*
6374 * PHY Found!
6375 * Check PHY type.
6376 */
6377 uint32_t model;
6378 struct mii_softc *child;
6379
6380 child = LIST_FIRST(&mii->mii_phys);
6381 if (device_is_a(child->mii_dev, "igphy")) {
6382 struct igphy_softc *isc = (struct igphy_softc *)child;
6383
6384 model = isc->sc_mii.mii_mpd_model;
6385 if (model == MII_MODEL_yyINTEL_I82566)
6386 sc->sc_phytype = WMPHY_IGP_3;
6387 }
6388
6389 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6390 }
6391 }
6392
6393 /*
6394 * wm_gmii_mediastatus: [ifmedia interface function]
6395 *
6396 * Get the current interface media status on a 1000BASE-T device.
6397 */
6398 static void
6399 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6400 {
6401 struct wm_softc *sc = ifp->if_softc;
6402
6403 ether_mediastatus(ifp, ifmr);
6404 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6405 | sc->sc_flowflags;
6406 }
6407
6408 /*
6409 * wm_gmii_mediachange: [ifmedia interface function]
6410 *
6411 * Set hardware to newly-selected media on a 1000BASE-T device.
6412 */
6413 static int
6414 wm_gmii_mediachange(struct ifnet *ifp)
6415 {
6416 struct wm_softc *sc = ifp->if_softc;
6417 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6418 int rc;
6419
6420 if ((ifp->if_flags & IFF_UP) == 0)
6421 return 0;
6422
6423 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6424 sc->sc_ctrl |= CTRL_SLU;
6425 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6426 || (sc->sc_type > WM_T_82543)) {
6427 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6428 } else {
6429 sc->sc_ctrl &= ~CTRL_ASDE;
6430 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6431 if (ife->ifm_media & IFM_FDX)
6432 sc->sc_ctrl |= CTRL_FD;
6433 switch (IFM_SUBTYPE(ife->ifm_media)) {
6434 case IFM_10_T:
6435 sc->sc_ctrl |= CTRL_SPEED_10;
6436 break;
6437 case IFM_100_TX:
6438 sc->sc_ctrl |= CTRL_SPEED_100;
6439 break;
6440 case IFM_1000_T:
6441 sc->sc_ctrl |= CTRL_SPEED_1000;
6442 break;
6443 default:
6444 panic("wm_gmii_mediachange: bad media 0x%x",
6445 ife->ifm_media);
6446 }
6447 }
6448 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6449 if (sc->sc_type <= WM_T_82543)
6450 wm_gmii_reset(sc);
6451
6452 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6453 return 0;
6454 return rc;
6455 }
6456
6457 #define MDI_IO CTRL_SWDPIN(2)
6458 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
6459 #define MDI_CLK CTRL_SWDPIN(3)
6460
6461 static void
6462 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6463 {
6464 uint32_t i, v;
6465
6466 v = CSR_READ(sc, WMREG_CTRL);
6467 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6468 v |= MDI_DIR | CTRL_SWDPIO(3);
6469
6470 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6471 if (data & i)
6472 v |= MDI_IO;
6473 else
6474 v &= ~MDI_IO;
6475 CSR_WRITE(sc, WMREG_CTRL, v);
6476 CSR_WRITE_FLUSH(sc);
6477 delay(10);
6478 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6479 CSR_WRITE_FLUSH(sc);
6480 delay(10);
6481 CSR_WRITE(sc, WMREG_CTRL, v);
6482 CSR_WRITE_FLUSH(sc);
6483 delay(10);
6484 }
6485 }
6486
6487 static uint32_t
6488 wm_i82543_mii_recvbits(struct wm_softc *sc)
6489 {
6490 uint32_t v, i, data = 0;
6491
6492 v = CSR_READ(sc, WMREG_CTRL);
6493 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6494 v |= CTRL_SWDPIO(3);
6495
6496 CSR_WRITE(sc, WMREG_CTRL, v);
6497 CSR_WRITE_FLUSH(sc);
6498 delay(10);
6499 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6500 CSR_WRITE_FLUSH(sc);
6501 delay(10);
6502 CSR_WRITE(sc, WMREG_CTRL, v);
6503 CSR_WRITE_FLUSH(sc);
6504 delay(10);
6505
6506 for (i = 0; i < 16; i++) {
6507 data <<= 1;
6508 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6509 CSR_WRITE_FLUSH(sc);
6510 delay(10);
6511 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6512 data |= 1;
6513 CSR_WRITE(sc, WMREG_CTRL, v);
6514 CSR_WRITE_FLUSH(sc);
6515 delay(10);
6516 }
6517
6518 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6519 CSR_WRITE_FLUSH(sc);
6520 delay(10);
6521 CSR_WRITE(sc, WMREG_CTRL, v);
6522 CSR_WRITE_FLUSH(sc);
6523 delay(10);
6524
6525 return data;
6526 }
6527
6528 #undef MDI_IO
6529 #undef MDI_DIR
6530 #undef MDI_CLK
6531
6532 /*
6533 * wm_gmii_i82543_readreg: [mii interface function]
6534 *
6535 * Read a PHY register on the GMII (i82543 version).
6536 */
6537 static int
6538 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6539 {
6540 struct wm_softc *sc = device_private(self);
6541 int rv;
6542
6543 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6544 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
6545 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6546 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
6547
6548 DPRINTF(WM_DEBUG_GMII,
6549 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6550 device_xname(sc->sc_dev), phy, reg, rv));
6551
6552 return rv;
6553 }
6554
6555 /*
6556 * wm_gmii_i82543_writereg: [mii interface function]
6557 *
6558 * Write a PHY register on the GMII (i82543 version).
6559 */
6560 static void
6561 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6562 {
6563 struct wm_softc *sc = device_private(self);
6564
6565 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6566 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6567 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6568 (MII_COMMAND_START << 30), 32);
6569 }
6570
6571 /*
6572 * wm_gmii_i82544_readreg: [mii interface function]
6573 *
6574 * Read a PHY register on the GMII.
6575 */
6576 static int
6577 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6578 {
6579 struct wm_softc *sc = device_private(self);
6580 uint32_t mdic = 0;
6581 int i, rv;
6582
6583 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6584 MDIC_REGADD(reg));
6585
6586 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6587 mdic = CSR_READ(sc, WMREG_MDIC);
6588 if (mdic & MDIC_READY)
6589 break;
6590 delay(50);
6591 }
6592
6593 if ((mdic & MDIC_READY) == 0) {
6594 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6595 device_xname(sc->sc_dev), phy, reg);
6596 rv = 0;
6597 } else if (mdic & MDIC_E) {
6598 #if 0 /* This is normal if no PHY is present. */
6599 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6600 device_xname(sc->sc_dev), phy, reg);
6601 #endif
6602 rv = 0;
6603 } else {
6604 rv = MDIC_DATA(mdic);
6605 if (rv == 0xffff)
6606 rv = 0;
6607 }
6608
6609 return rv;
6610 }
6611
6612 /*
6613 * wm_gmii_i82544_writereg: [mii interface function]
6614 *
6615 * Write a PHY register on the GMII.
6616 */
6617 static void
6618 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6619 {
6620 struct wm_softc *sc = device_private(self);
6621 uint32_t mdic = 0;
6622 int i;
6623
6624 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6625 MDIC_REGADD(reg) | MDIC_DATA(val));
6626
6627 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6628 mdic = CSR_READ(sc, WMREG_MDIC);
6629 if (mdic & MDIC_READY)
6630 break;
6631 delay(50);
6632 }
6633
6634 if ((mdic & MDIC_READY) == 0)
6635 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6636 device_xname(sc->sc_dev), phy, reg);
6637 else if (mdic & MDIC_E)
6638 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6639 device_xname(sc->sc_dev), phy, reg);
6640 }
6641
6642 /*
6643 * wm_gmii_i80003_readreg: [mii interface function]
6644 *
6645 * Read a PHY register on the kumeran
6646 * This could be handled by the PHY layer if we didn't have to lock the
6647 * ressource ...
6648 */
6649 static int
6650 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6651 {
6652 struct wm_softc *sc = device_private(self);
6653 int sem;
6654 int rv;
6655
6656 if (phy != 1) /* only one PHY on kumeran bus */
6657 return 0;
6658
6659 sem = swfwphysem[sc->sc_funcid];
6660 if (wm_get_swfw_semaphore(sc, sem)) {
6661 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6662 __func__);
6663 return 0;
6664 }
6665
6666 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6667 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6668 reg >> GG82563_PAGE_SHIFT);
6669 } else {
6670 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6671 reg >> GG82563_PAGE_SHIFT);
6672 }
6673 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6674 delay(200);
6675 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6676 delay(200);
6677
6678 wm_put_swfw_semaphore(sc, sem);
6679 return rv;
6680 }
6681
6682 /*
6683 * wm_gmii_i80003_writereg: [mii interface function]
6684 *
6685 * Write a PHY register on the kumeran.
6686 * This could be handled by the PHY layer if we didn't have to lock the
6687 * ressource ...
6688 */
6689 static void
6690 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6691 {
6692 struct wm_softc *sc = device_private(self);
6693 int sem;
6694
6695 if (phy != 1) /* only one PHY on kumeran bus */
6696 return;
6697
6698 sem = swfwphysem[sc->sc_funcid];
6699 if (wm_get_swfw_semaphore(sc, sem)) {
6700 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6701 __func__);
6702 return;
6703 }
6704
6705 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6706 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6707 reg >> GG82563_PAGE_SHIFT);
6708 } else {
6709 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6710 reg >> GG82563_PAGE_SHIFT);
6711 }
6712 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6713 delay(200);
6714 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6715 delay(200);
6716
6717 wm_put_swfw_semaphore(sc, sem);
6718 }
6719
6720 /*
6721 * wm_gmii_bm_readreg: [mii interface function]
6722 *
6723 * Read a PHY register on the kumeran
6724 * This could be handled by the PHY layer if we didn't have to lock the
6725 * ressource ...
6726 */
6727 static int
6728 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6729 {
6730 struct wm_softc *sc = device_private(self);
6731 int sem;
6732 int rv;
6733
6734 sem = swfwphysem[sc->sc_funcid];
6735 if (wm_get_swfw_semaphore(sc, sem)) {
6736 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6737 __func__);
6738 return 0;
6739 }
6740
6741 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6742 if (phy == 1)
6743 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6744 reg);
6745 else
6746 wm_gmii_i82544_writereg(self, phy,
6747 GG82563_PHY_PAGE_SELECT,
6748 reg >> GG82563_PAGE_SHIFT);
6749 }
6750
6751 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6752 wm_put_swfw_semaphore(sc, sem);
6753 return rv;
6754 }
6755
6756 /*
6757 * wm_gmii_bm_writereg: [mii interface function]
6758 *
6759 * Write a PHY register on the kumeran.
6760 * This could be handled by the PHY layer if we didn't have to lock the
6761 * ressource ...
6762 */
6763 static void
6764 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6765 {
6766 struct wm_softc *sc = device_private(self);
6767 int sem;
6768
6769 sem = swfwphysem[sc->sc_funcid];
6770 if (wm_get_swfw_semaphore(sc, sem)) {
6771 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6772 __func__);
6773 return;
6774 }
6775
6776 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6777 if (phy == 1)
6778 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6779 reg);
6780 else
6781 wm_gmii_i82544_writereg(self, phy,
6782 GG82563_PHY_PAGE_SELECT,
6783 reg >> GG82563_PAGE_SHIFT);
6784 }
6785
6786 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6787 wm_put_swfw_semaphore(sc, sem);
6788 }
6789
6790 static void
6791 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6792 {
6793 struct wm_softc *sc = device_private(self);
6794 uint16_t regnum = BM_PHY_REG_NUM(offset);
6795 uint16_t wuce;
6796
6797 /* XXX Gig must be disabled for MDIO accesses to page 800 */
6798 if (sc->sc_type == WM_T_PCH) {
6799 /* XXX e1000 driver do nothing... why? */
6800 }
6801
6802 /* Set page 769 */
6803 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6804 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6805
6806 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6807
6808 wuce &= ~BM_WUC_HOST_WU_BIT;
6809 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6810 wuce | BM_WUC_ENABLE_BIT);
6811
6812 /* Select page 800 */
6813 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6814 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6815
6816 /* Write page 800 */
6817 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6818
6819 if (rd)
6820 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6821 else
6822 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6823
6824 /* Set page 769 */
6825 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6826 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6827
6828 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6829 }
6830
6831 /*
6832 * wm_gmii_hv_readreg: [mii interface function]
6833 *
6834 * Read a PHY register on the kumeran
6835 * This could be handled by the PHY layer if we didn't have to lock the
6836 * ressource ...
6837 */
6838 static int
6839 wm_gmii_hv_readreg(device_t self, int phy, int reg)
6840 {
6841 struct wm_softc *sc = device_private(self);
6842 uint16_t page = BM_PHY_REG_PAGE(reg);
6843 uint16_t regnum = BM_PHY_REG_NUM(reg);
6844 uint16_t val;
6845 int rv;
6846
6847 if (wm_get_swfwhw_semaphore(sc)) {
6848 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6849 __func__);
6850 return 0;
6851 }
6852
6853 /* XXX Workaround failure in MDIO access while cable is disconnected */
6854 if (sc->sc_phytype == WMPHY_82577) {
6855 /* XXX must write */
6856 }
6857
6858 /* Page 800 works differently than the rest so it has its own func */
6859 if (page == BM_WUC_PAGE) {
6860 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
6861 return val;
6862 }
6863
6864 /*
6865 * Lower than page 768 works differently than the rest so it has its
6866 * own func
6867 */
6868 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6869 printf("gmii_hv_readreg!!!\n");
6870 return 0;
6871 }
6872
6873 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6874 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6875 page << BME1000_PAGE_SHIFT);
6876 }
6877
6878 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
6879 wm_put_swfwhw_semaphore(sc);
6880 return rv;
6881 }
6882
6883 /*
6884 * wm_gmii_hv_writereg: [mii interface function]
6885 *
6886 * Write a PHY register on the kumeran.
6887 * This could be handled by the PHY layer if we didn't have to lock the
6888 * ressource ...
6889 */
6890 static void
6891 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
6892 {
6893 struct wm_softc *sc = device_private(self);
6894 uint16_t page = BM_PHY_REG_PAGE(reg);
6895 uint16_t regnum = BM_PHY_REG_NUM(reg);
6896
6897 if (wm_get_swfwhw_semaphore(sc)) {
6898 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6899 __func__);
6900 return;
6901 }
6902
6903 /* XXX Workaround failure in MDIO access while cable is disconnected */
6904
6905 /* Page 800 works differently than the rest so it has its own func */
6906 if (page == BM_WUC_PAGE) {
6907 uint16_t tmp;
6908
6909 tmp = val;
6910 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
6911 return;
6912 }
6913
6914 /*
6915 * Lower than page 768 works differently than the rest so it has its
6916 * own func
6917 */
6918 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6919 printf("gmii_hv_writereg!!!\n");
6920 return;
6921 }
6922
6923 /*
6924 * XXX Workaround MDIO accesses being disabled after entering IEEE
6925 * Power Down (whenever bit 11 of the PHY control register is set)
6926 */
6927
6928 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6929 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6930 page << BME1000_PAGE_SHIFT);
6931 }
6932
6933 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
6934 wm_put_swfwhw_semaphore(sc);
6935 }
6936
6937 /*
6938 * wm_gmii_82580_readreg: [mii interface function]
6939 *
6940 * Read a PHY register on the 82580 and I350.
6941 * This could be handled by the PHY layer if we didn't have to lock the
6942 * ressource ...
6943 */
6944 static int
6945 wm_gmii_82580_readreg(device_t self, int phy, int reg)
6946 {
6947 struct wm_softc *sc = device_private(self);
6948 int sem;
6949 int rv;
6950
6951 sem = swfwphysem[sc->sc_funcid];
6952 if (wm_get_swfw_semaphore(sc, sem)) {
6953 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6954 __func__);
6955 return 0;
6956 }
6957
6958 rv = wm_gmii_i82544_readreg(self, phy, reg);
6959
6960 wm_put_swfw_semaphore(sc, sem);
6961 return rv;
6962 }
6963
6964 /*
6965 * wm_gmii_82580_writereg: [mii interface function]
6966 *
6967 * Write a PHY register on the 82580 and I350.
6968 * This could be handled by the PHY layer if we didn't have to lock the
6969 * ressource ...
6970 */
6971 static void
6972 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
6973 {
6974 struct wm_softc *sc = device_private(self);
6975 int sem;
6976
6977 sem = swfwphysem[sc->sc_funcid];
6978 if (wm_get_swfw_semaphore(sc, sem)) {
6979 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6980 __func__);
6981 return;
6982 }
6983
6984 wm_gmii_i82544_writereg(self, phy, reg, val);
6985
6986 wm_put_swfw_semaphore(sc, sem);
6987 }
6988
6989 /*
6990 * wm_gmii_statchg: [mii interface function]
6991 *
6992 * Callback from MII layer when media changes.
6993 */
6994 static void
6995 wm_gmii_statchg(struct ifnet *ifp)
6996 {
6997 struct wm_softc *sc = ifp->if_softc;
6998 struct mii_data *mii = &sc->sc_mii;
6999
7000 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7001 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7002 sc->sc_fcrtl &= ~FCRTL_XONE;
7003
7004 /*
7005 * Get flow control negotiation result.
7006 */
7007 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7008 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7009 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7010 mii->mii_media_active &= ~IFM_ETH_FMASK;
7011 }
7012
7013 if (sc->sc_flowflags & IFM_FLOW) {
7014 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7015 sc->sc_ctrl |= CTRL_TFCE;
7016 sc->sc_fcrtl |= FCRTL_XONE;
7017 }
7018 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7019 sc->sc_ctrl |= CTRL_RFCE;
7020 }
7021
7022 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7023 DPRINTF(WM_DEBUG_LINK,
7024 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7025 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7026 } else {
7027 DPRINTF(WM_DEBUG_LINK,
7028 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7029 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7030 }
7031
7032 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7033 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7034 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7035 : WMREG_FCRTL, sc->sc_fcrtl);
7036 if (sc->sc_type == WM_T_80003) {
7037 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7038 case IFM_1000_T:
7039 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7040 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7041 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7042 break;
7043 default:
7044 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7045 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7046 sc->sc_tipg = TIPG_10_100_80003_DFLT;
7047 break;
7048 }
7049 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7050 }
7051 }
7052
7053 /*
7054 * wm_kmrn_readreg:
7055 *
7056 * Read a kumeran register
7057 */
7058 static int
7059 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7060 {
7061 int rv;
7062
7063 if (sc->sc_flags == WM_F_LOCK_SWFW) {
7064 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7065 aprint_error_dev(sc->sc_dev,
7066 "%s: failed to get semaphore\n", __func__);
7067 return 0;
7068 }
7069 } else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
7070 if (wm_get_swfwhw_semaphore(sc)) {
7071 aprint_error_dev(sc->sc_dev,
7072 "%s: failed to get semaphore\n", __func__);
7073 return 0;
7074 }
7075 }
7076
7077 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7078 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7079 KUMCTRLSTA_REN);
7080 CSR_WRITE_FLUSH(sc);
7081 delay(2);
7082
7083 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7084
7085 if (sc->sc_flags == WM_F_LOCK_SWFW)
7086 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7087 else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
7088 wm_put_swfwhw_semaphore(sc);
7089
7090 return rv;
7091 }
7092
7093 /*
7094 * wm_kmrn_writereg:
7095 *
7096 * Write a kumeran register
7097 */
7098 static void
7099 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7100 {
7101
7102 if (sc->sc_flags == WM_F_LOCK_SWFW) {
7103 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7104 aprint_error_dev(sc->sc_dev,
7105 "%s: failed to get semaphore\n", __func__);
7106 return;
7107 }
7108 } else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
7109 if (wm_get_swfwhw_semaphore(sc)) {
7110 aprint_error_dev(sc->sc_dev,
7111 "%s: failed to get semaphore\n", __func__);
7112 return;
7113 }
7114 }
7115
7116 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7117 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7118 (val & KUMCTRLSTA_MASK));
7119
7120 if (sc->sc_flags == WM_F_LOCK_SWFW)
7121 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7122 else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
7123 wm_put_swfwhw_semaphore(sc);
7124 }
7125
7126 /* SGMII related */
7127
7128 /*
7129 * wm_sgmii_uses_mdio
7130 *
7131 * Check whether the transaction is to the internal PHY or the external
7132 * MDIO interface. Return true if it's MDIO.
7133 */
7134 static bool
7135 wm_sgmii_uses_mdio(struct wm_softc *sc)
7136 {
7137 uint32_t reg;
7138 bool ismdio = false;
7139
7140 switch (sc->sc_type) {
7141 case WM_T_82575:
7142 case WM_T_82576:
7143 reg = CSR_READ(sc, WMREG_MDIC);
7144 ismdio = ((reg & MDIC_DEST) != 0);
7145 break;
7146 case WM_T_82580:
7147 case WM_T_I350:
7148 case WM_T_I354:
7149 case WM_T_I210:
7150 case WM_T_I211:
7151 reg = CSR_READ(sc, WMREG_MDICNFG);
7152 ismdio = ((reg & MDICNFG_DEST) != 0);
7153 break;
7154 default:
7155 break;
7156 }
7157
7158 return ismdio;
7159 }
7160
7161 /*
7162 * wm_sgmii_readreg: [mii interface function]
7163 *
7164 * Read a PHY register on the SGMII
7165 * This could be handled by the PHY layer if we didn't have to lock the
7166 * ressource ...
7167 */
7168 static int
7169 wm_sgmii_readreg(device_t self, int phy, int reg)
7170 {
7171 struct wm_softc *sc = device_private(self);
7172 uint32_t i2ccmd;
7173 int i, rv;
7174
7175 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7176 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7177 __func__);
7178 return 0;
7179 }
7180
7181 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7182 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7183 | I2CCMD_OPCODE_READ;
7184 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7185
7186 /* Poll the ready bit */
7187 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7188 delay(50);
7189 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7190 if (i2ccmd & I2CCMD_READY)
7191 break;
7192 }
7193 if ((i2ccmd & I2CCMD_READY) == 0)
7194 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7195 if ((i2ccmd & I2CCMD_ERROR) != 0)
7196 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7197
7198 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7199
7200 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7201 return rv;
7202 }
7203
7204 /*
7205 * wm_sgmii_writereg: [mii interface function]
7206 *
7207 * Write a PHY register on the SGMII.
7208 * This could be handled by the PHY layer if we didn't have to lock the
7209 * ressource ...
7210 */
7211 static void
7212 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7213 {
7214 struct wm_softc *sc = device_private(self);
7215 uint32_t i2ccmd;
7216 int i;
7217
7218 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7219 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7220 __func__);
7221 return;
7222 }
7223
7224 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7225 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7226 | I2CCMD_OPCODE_WRITE;
7227 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7228
7229 /* Poll the ready bit */
7230 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7231 delay(50);
7232 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7233 if (i2ccmd & I2CCMD_READY)
7234 break;
7235 }
7236 if ((i2ccmd & I2CCMD_READY) == 0)
7237 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7238 if ((i2ccmd & I2CCMD_ERROR) != 0)
7239 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7240
7241 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7242 }
7243
7244 /* TBI related */
7245
7246 /* XXX Currently TBI only */
7247 static int
7248 wm_check_for_link(struct wm_softc *sc)
7249 {
7250 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7251 uint32_t rxcw;
7252 uint32_t ctrl;
7253 uint32_t status;
7254 uint32_t sig;
7255
7256 if (sc->sc_mediatype & WMP_F_SERDES) {
7257 sc->sc_tbi_linkup = 1;
7258 return 0;
7259 }
7260
7261 rxcw = CSR_READ(sc, WMREG_RXCW);
7262 ctrl = CSR_READ(sc, WMREG_CTRL);
7263 status = CSR_READ(sc, WMREG_STATUS);
7264
7265 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7266
7267 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7268 device_xname(sc->sc_dev), __func__,
7269 ((ctrl & CTRL_SWDPIN(1)) == sig),
7270 ((status & STATUS_LU) != 0),
7271 ((rxcw & RXCW_C) != 0)
7272 ));
7273
7274 /*
7275 * SWDPIN LU RXCW
7276 * 0 0 0
7277 * 0 0 1 (should not happen)
7278 * 0 1 0 (should not happen)
7279 * 0 1 1 (should not happen)
7280 * 1 0 0 Disable autonego and force linkup
7281 * 1 0 1 got /C/ but not linkup yet
7282 * 1 1 0 (linkup)
7283 * 1 1 1 If IFM_AUTO, back to autonego
7284 *
7285 */
7286 if (((ctrl & CTRL_SWDPIN(1)) == sig)
7287 && ((status & STATUS_LU) == 0)
7288 && ((rxcw & RXCW_C) == 0)) {
7289 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7290 __func__));
7291 sc->sc_tbi_linkup = 0;
7292 /* Disable auto-negotiation in the TXCW register */
7293 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7294
7295 /*
7296 * Force link-up and also force full-duplex.
7297 *
7298 * NOTE: CTRL was updated TFCE and RFCE automatically,
7299 * so we should update sc->sc_ctrl
7300 */
7301 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7302 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7303 } else if (((status & STATUS_LU) != 0)
7304 && ((rxcw & RXCW_C) != 0)
7305 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7306 sc->sc_tbi_linkup = 1;
7307 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7308 __func__));
7309 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7310 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7311 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7312 && ((rxcw & RXCW_C) != 0)) {
7313 DPRINTF(WM_DEBUG_LINK, ("/C/"));
7314 } else {
7315 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7316 status));
7317 }
7318
7319 return 0;
7320 }
7321
7322 /*
7323 * wm_tbi_mediainit:
7324 *
7325 * Initialize media for use on 1000BASE-X devices.
7326 */
7327 static void
7328 wm_tbi_mediainit(struct wm_softc *sc)
7329 {
7330 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7331 const char *sep = "";
7332
7333 if (sc->sc_type < WM_T_82543)
7334 sc->sc_tipg = TIPG_WM_DFLT;
7335 else
7336 sc->sc_tipg = TIPG_LG_DFLT;
7337
7338 sc->sc_tbi_anegticks = 5;
7339
7340 /* Initialize our media structures */
7341 sc->sc_mii.mii_ifp = ifp;
7342
7343 sc->sc_ethercom.ec_mii = &sc->sc_mii;
7344 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
7345 wm_tbi_mediastatus);
7346
7347 /*
7348 * SWD Pins:
7349 *
7350 * 0 = Link LED (output)
7351 * 1 = Loss Of Signal (input)
7352 */
7353 sc->sc_ctrl |= CTRL_SWDPIO(0);
7354 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
7355 if (sc->sc_mediatype & WMP_F_SERDES)
7356 sc->sc_ctrl &= ~CTRL_LRST;
7357
7358 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7359
7360 #define ADD(ss, mm, dd) \
7361 do { \
7362 aprint_normal("%s%s", sep, ss); \
7363 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
7364 sep = ", "; \
7365 } while (/*CONSTCOND*/0)
7366
7367 aprint_normal_dev(sc->sc_dev, "");
7368
7369 /* Only 82545 is LX */
7370 if (sc->sc_type == WM_T_82545) {
7371 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
7372 ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
7373 } else {
7374 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
7375 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
7376 }
7377 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
7378 aprint_normal("\n");
7379
7380 #undef ADD
7381
7382 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
7383 }
7384
7385 /*
7386 * wm_tbi_mediastatus: [ifmedia interface function]
7387 *
7388 * Get the current interface media status on a 1000BASE-X device.
7389 */
7390 static void
7391 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
7392 {
7393 struct wm_softc *sc = ifp->if_softc;
7394 uint32_t ctrl, status;
7395
7396 ifmr->ifm_status = IFM_AVALID;
7397 ifmr->ifm_active = IFM_ETHER;
7398
7399 status = CSR_READ(sc, WMREG_STATUS);
7400 if ((status & STATUS_LU) == 0) {
7401 ifmr->ifm_active |= IFM_NONE;
7402 return;
7403 }
7404
7405 ifmr->ifm_status |= IFM_ACTIVE;
7406 /* Only 82545 is LX */
7407 if (sc->sc_type == WM_T_82545)
7408 ifmr->ifm_active |= IFM_1000_LX;
7409 else
7410 ifmr->ifm_active |= IFM_1000_SX;
7411 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
7412 ifmr->ifm_active |= IFM_FDX;
7413 else
7414 ifmr->ifm_active |= IFM_HDX;
7415 ctrl = CSR_READ(sc, WMREG_CTRL);
7416 if (ctrl & CTRL_RFCE)
7417 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
7418 if (ctrl & CTRL_TFCE)
7419 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
7420 }
7421
7422 /*
7423 * wm_tbi_mediachange: [ifmedia interface function]
7424 *
7425 * Set hardware to newly-selected media on a 1000BASE-X device.
7426 */
7427 static int
7428 wm_tbi_mediachange(struct ifnet *ifp)
7429 {
7430 struct wm_softc *sc = ifp->if_softc;
7431 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7432 uint32_t status;
7433 int i;
7434
7435 if (sc->sc_mediatype & WMP_F_SERDES)
7436 return 0;
7437
7438 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
7439 || (sc->sc_type >= WM_T_82575))
7440 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
7441
7442 /* XXX power_up_serdes_link_82575() */
7443
7444 sc->sc_ctrl &= ~CTRL_LRST;
7445 sc->sc_txcw = TXCW_ANE;
7446 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
7447 sc->sc_txcw |= TXCW_FD | TXCW_HD;
7448 else if (ife->ifm_media & IFM_FDX)
7449 sc->sc_txcw |= TXCW_FD;
7450 else
7451 sc->sc_txcw |= TXCW_HD;
7452
7453 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
7454 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
7455
7456 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
7457 device_xname(sc->sc_dev), sc->sc_txcw));
7458 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7459 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7460 CSR_WRITE_FLUSH(sc);
7461 delay(1000);
7462
7463 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
7464 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
7465
7466 /*
7467 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
7468 * optics detect a signal, 0 if they don't.
7469 */
7470 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
7471 /* Have signal; wait for the link to come up. */
7472 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
7473 delay(10000);
7474 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
7475 break;
7476 }
7477
7478 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
7479 device_xname(sc->sc_dev),i));
7480
7481 status = CSR_READ(sc, WMREG_STATUS);
7482 DPRINTF(WM_DEBUG_LINK,
7483 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
7484 device_xname(sc->sc_dev),status, STATUS_LU));
7485 if (status & STATUS_LU) {
7486 /* Link is up. */
7487 DPRINTF(WM_DEBUG_LINK,
7488 ("%s: LINK: set media -> link up %s\n",
7489 device_xname(sc->sc_dev),
7490 (status & STATUS_FD) ? "FDX" : "HDX"));
7491
7492 /*
7493 * NOTE: CTRL will update TFCE and RFCE automatically,
7494 * so we should update sc->sc_ctrl
7495 */
7496 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7497 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7498 sc->sc_fcrtl &= ~FCRTL_XONE;
7499 if (status & STATUS_FD)
7500 sc->sc_tctl |=
7501 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7502 else
7503 sc->sc_tctl |=
7504 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7505 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
7506 sc->sc_fcrtl |= FCRTL_XONE;
7507 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7508 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7509 WMREG_OLD_FCRTL : WMREG_FCRTL,
7510 sc->sc_fcrtl);
7511 sc->sc_tbi_linkup = 1;
7512 } else {
7513 if (i == WM_LINKUP_TIMEOUT)
7514 wm_check_for_link(sc);
7515 /* Link is down. */
7516 DPRINTF(WM_DEBUG_LINK,
7517 ("%s: LINK: set media -> link down\n",
7518 device_xname(sc->sc_dev)));
7519 sc->sc_tbi_linkup = 0;
7520 }
7521 } else {
7522 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
7523 device_xname(sc->sc_dev)));
7524 sc->sc_tbi_linkup = 0;
7525 }
7526
7527 wm_tbi_set_linkled(sc);
7528
7529 return 0;
7530 }
7531
7532 /*
7533 * wm_tbi_set_linkled:
7534 *
7535 * Update the link LED on 1000BASE-X devices.
7536 */
7537 static void
7538 wm_tbi_set_linkled(struct wm_softc *sc)
7539 {
7540
7541 if (sc->sc_tbi_linkup)
7542 sc->sc_ctrl |= CTRL_SWDPIN(0);
7543 else
7544 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7545
7546 /* 82540 or newer devices are active low */
7547 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7548
7549 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7550 }
7551
7552 /*
7553 * wm_tbi_check_link:
7554 *
7555 * Check the link on 1000BASE-X devices.
7556 */
7557 static void
7558 wm_tbi_check_link(struct wm_softc *sc)
7559 {
7560 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7561 uint32_t status;
7562
7563 KASSERT(WM_TX_LOCKED(sc));
7564
7565 if (sc->sc_mediatype & WMP_F_SERDES) {
7566 sc->sc_tbi_linkup = 1;
7567 return;
7568 }
7569
7570 status = CSR_READ(sc, WMREG_STATUS);
7571
7572 /* XXX is this needed? */
7573 (void)CSR_READ(sc, WMREG_RXCW);
7574 (void)CSR_READ(sc, WMREG_CTRL);
7575
7576 /* set link status */
7577 if ((status & STATUS_LU) == 0) {
7578 DPRINTF(WM_DEBUG_LINK,
7579 ("%s: LINK: checklink -> down\n",
7580 device_xname(sc->sc_dev)));
7581 sc->sc_tbi_linkup = 0;
7582 } else if (sc->sc_tbi_linkup == 0) {
7583 DPRINTF(WM_DEBUG_LINK,
7584 ("%s: LINK: checklink -> up %s\n",
7585 device_xname(sc->sc_dev),
7586 (status & STATUS_FD) ? "FDX" : "HDX"));
7587 sc->sc_tbi_linkup = 1;
7588 }
7589
7590 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
7591 && ((status & STATUS_LU) == 0)) {
7592 sc->sc_tbi_linkup = 0;
7593 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7594 /* If the timer expired, retry autonegotiation */
7595 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
7596 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
7597 sc->sc_tbi_ticks = 0;
7598 /*
7599 * Reset the link, and let autonegotiation do
7600 * its thing
7601 */
7602 sc->sc_ctrl |= CTRL_LRST;
7603 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7604 CSR_WRITE_FLUSH(sc);
7605 delay(1000);
7606 sc->sc_ctrl &= ~CTRL_LRST;
7607 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7608 CSR_WRITE_FLUSH(sc);
7609 delay(1000);
7610 CSR_WRITE(sc, WMREG_TXCW,
7611 sc->sc_txcw & ~TXCW_ANE);
7612 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7613 }
7614 }
7615 }
7616
7617 wm_tbi_set_linkled(sc);
7618 }
7619
7620 /* SFP related */
7621
7622 static int
7623 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
7624 {
7625 uint32_t i2ccmd;
7626 int i;
7627
7628 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
7629 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7630
7631 /* Poll the ready bit */
7632 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7633 delay(50);
7634 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7635 if (i2ccmd & I2CCMD_READY)
7636 break;
7637 }
7638 if ((i2ccmd & I2CCMD_READY) == 0)
7639 return -1;
7640 if ((i2ccmd & I2CCMD_ERROR) != 0)
7641 return -1;
7642
7643 *data = i2ccmd & 0x00ff;
7644
7645 return 0;
7646 }
7647
7648 static uint32_t
7649 wm_sfp_get_media_type(struct wm_softc *sc)
7650 {
7651 uint32_t ctrl_ext;
7652 uint8_t val = 0;
7653 int timeout = 3;
7654 uint32_t mediatype = WMP_F_UNKNOWN;
7655 int rv = -1;
7656
7657 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7658 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
7659 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
7660 CSR_WRITE_FLUSH(sc);
7661
7662 /* Read SFP module data */
7663 while (timeout) {
7664 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
7665 if (rv == 0)
7666 break;
7667 delay(100*1000); /* XXX too big */
7668 timeout--;
7669 }
7670 if (rv != 0)
7671 goto out;
7672 switch (val) {
7673 case SFF_SFP_ID_SFF:
7674 aprint_normal_dev(sc->sc_dev,
7675 "Module/Connector soldered to board\n");
7676 break;
7677 case SFF_SFP_ID_SFP:
7678 aprint_normal_dev(sc->sc_dev, "SFP\n");
7679 break;
7680 case SFF_SFP_ID_UNKNOWN:
7681 goto out;
7682 default:
7683 break;
7684 }
7685
7686 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
7687 if (rv != 0) {
7688 goto out;
7689 }
7690
7691 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
7692 mediatype = WMP_F_SERDES;
7693 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
7694 sc->sc_flags |= WM_F_SGMII;
7695 mediatype = WMP_F_COPPER;
7696 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
7697 sc->sc_flags |= WM_F_SGMII;
7698 mediatype = WMP_F_SERDES;
7699 }
7700
7701 out:
7702 /* Restore I2C interface setting */
7703 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7704
7705 return mediatype;
7706 }
7707 /*
7708 * NVM related.
7709 * Microwire, SPI (w/wo EERD) and Flash.
7710 */
7711
7712 /* Both spi and uwire */
7713
7714 /*
7715 * wm_eeprom_sendbits:
7716 *
7717 * Send a series of bits to the EEPROM.
7718 */
7719 static void
7720 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
7721 {
7722 uint32_t reg;
7723 int x;
7724
7725 reg = CSR_READ(sc, WMREG_EECD);
7726
7727 for (x = nbits; x > 0; x--) {
7728 if (bits & (1U << (x - 1)))
7729 reg |= EECD_DI;
7730 else
7731 reg &= ~EECD_DI;
7732 CSR_WRITE(sc, WMREG_EECD, reg);
7733 CSR_WRITE_FLUSH(sc);
7734 delay(2);
7735 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7736 CSR_WRITE_FLUSH(sc);
7737 delay(2);
7738 CSR_WRITE(sc, WMREG_EECD, reg);
7739 CSR_WRITE_FLUSH(sc);
7740 delay(2);
7741 }
7742 }
7743
7744 /*
7745 * wm_eeprom_recvbits:
7746 *
7747 * Receive a series of bits from the EEPROM.
7748 */
7749 static void
7750 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
7751 {
7752 uint32_t reg, val;
7753 int x;
7754
7755 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
7756
7757 val = 0;
7758 for (x = nbits; x > 0; x--) {
7759 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7760 CSR_WRITE_FLUSH(sc);
7761 delay(2);
7762 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
7763 val |= (1U << (x - 1));
7764 CSR_WRITE(sc, WMREG_EECD, reg);
7765 CSR_WRITE_FLUSH(sc);
7766 delay(2);
7767 }
7768 *valp = val;
7769 }
7770
7771 /* Microwire */
7772
7773 /*
7774 * wm_nvm_read_uwire:
7775 *
7776 * Read a word from the EEPROM using the MicroWire protocol.
7777 */
7778 static int
7779 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
7780 {
7781 uint32_t reg, val;
7782 int i;
7783
7784 for (i = 0; i < wordcnt; i++) {
7785 /* Clear SK and DI. */
7786 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
7787 CSR_WRITE(sc, WMREG_EECD, reg);
7788
7789 /*
7790 * XXX: workaround for a bug in qemu-0.12.x and prior
7791 * and Xen.
7792 *
7793 * We use this workaround only for 82540 because qemu's
7794 * e1000 act as 82540.
7795 */
7796 if (sc->sc_type == WM_T_82540) {
7797 reg |= EECD_SK;
7798 CSR_WRITE(sc, WMREG_EECD, reg);
7799 reg &= ~EECD_SK;
7800 CSR_WRITE(sc, WMREG_EECD, reg);
7801 CSR_WRITE_FLUSH(sc);
7802 delay(2);
7803 }
7804 /* XXX: end of workaround */
7805
7806 /* Set CHIP SELECT. */
7807 reg |= EECD_CS;
7808 CSR_WRITE(sc, WMREG_EECD, reg);
7809 CSR_WRITE_FLUSH(sc);
7810 delay(2);
7811
7812 /* Shift in the READ command. */
7813 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
7814
7815 /* Shift in address. */
7816 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
7817
7818 /* Shift out the data. */
7819 wm_eeprom_recvbits(sc, &val, 16);
7820 data[i] = val & 0xffff;
7821
7822 /* Clear CHIP SELECT. */
7823 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
7824 CSR_WRITE(sc, WMREG_EECD, reg);
7825 CSR_WRITE_FLUSH(sc);
7826 delay(2);
7827 }
7828
7829 return 0;
7830 }
7831
7832 /* SPI */
7833
7834 /*
7835 * Set SPI and FLASH related information from the EECD register.
7836 * For 82541 and 82547, the word size is taken from EEPROM.
7837 */
7838 static int
7839 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
7840 {
7841 int size;
7842 uint32_t reg;
7843 uint16_t data;
7844
7845 reg = CSR_READ(sc, WMREG_EECD);
7846 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
7847
7848 /* Read the size of NVM from EECD by default */
7849 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
7850 switch (sc->sc_type) {
7851 case WM_T_82541:
7852 case WM_T_82541_2:
7853 case WM_T_82547:
7854 case WM_T_82547_2:
7855 /* Set dummy value to access EEPROM */
7856 sc->sc_nvm_wordsize = 64;
7857 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
7858 reg = data;
7859 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
7860 if (size == 0)
7861 size = 6; /* 64 word size */
7862 else
7863 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
7864 break;
7865 case WM_T_80003:
7866 case WM_T_82571:
7867 case WM_T_82572:
7868 case WM_T_82573: /* SPI case */
7869 case WM_T_82574: /* SPI case */
7870 case WM_T_82583: /* SPI case */
7871 size += NVM_WORD_SIZE_BASE_SHIFT;
7872 if (size > 14)
7873 size = 14;
7874 break;
7875 case WM_T_82575:
7876 case WM_T_82576:
7877 case WM_T_82580:
7878 case WM_T_I350:
7879 case WM_T_I354:
7880 case WM_T_I210:
7881 case WM_T_I211:
7882 size += NVM_WORD_SIZE_BASE_SHIFT;
7883 if (size > 15)
7884 size = 15;
7885 break;
7886 default:
7887 aprint_error_dev(sc->sc_dev,
7888 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
7889 return -1;
7890 break;
7891 }
7892
7893 sc->sc_nvm_wordsize = 1 << size;
7894
7895 return 0;
7896 }
7897
7898 /*
7899 * wm_nvm_ready_spi:
7900 *
7901 * Wait for a SPI EEPROM to be ready for commands.
7902 */
7903 static int
7904 wm_nvm_ready_spi(struct wm_softc *sc)
7905 {
7906 uint32_t val;
7907 int usec;
7908
7909 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
7910 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
7911 wm_eeprom_recvbits(sc, &val, 8);
7912 if ((val & SPI_SR_RDY) == 0)
7913 break;
7914 }
7915 if (usec >= SPI_MAX_RETRIES) {
7916 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
7917 return 1;
7918 }
7919 return 0;
7920 }
7921
7922 /*
7923 * wm_nvm_read_spi:
7924 *
7925 * Read a work from the EEPROM using the SPI protocol.
7926 */
7927 static int
7928 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
7929 {
7930 uint32_t reg, val;
7931 int i;
7932 uint8_t opc;
7933
7934 /* Clear SK and CS. */
7935 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
7936 CSR_WRITE(sc, WMREG_EECD, reg);
7937 CSR_WRITE_FLUSH(sc);
7938 delay(2);
7939
7940 if (wm_nvm_ready_spi(sc))
7941 return 1;
7942
7943 /* Toggle CS to flush commands. */
7944 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
7945 CSR_WRITE_FLUSH(sc);
7946 delay(2);
7947 CSR_WRITE(sc, WMREG_EECD, reg);
7948 CSR_WRITE_FLUSH(sc);
7949 delay(2);
7950
7951 opc = SPI_OPC_READ;
7952 if (sc->sc_nvm_addrbits == 8 && word >= 128)
7953 opc |= SPI_OPC_A8;
7954
7955 wm_eeprom_sendbits(sc, opc, 8);
7956 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
7957
7958 for (i = 0; i < wordcnt; i++) {
7959 wm_eeprom_recvbits(sc, &val, 16);
7960 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
7961 }
7962
7963 /* Raise CS and clear SK. */
7964 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
7965 CSR_WRITE(sc, WMREG_EECD, reg);
7966 CSR_WRITE_FLUSH(sc);
7967 delay(2);
7968
7969 return 0;
7970 }
7971
7972 /* Using with EERD */
7973
7974 static int
7975 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
7976 {
7977 uint32_t attempts = 100000;
7978 uint32_t i, reg = 0;
7979 int32_t done = -1;
7980
7981 for (i = 0; i < attempts; i++) {
7982 reg = CSR_READ(sc, rw);
7983
7984 if (reg & EERD_DONE) {
7985 done = 0;
7986 break;
7987 }
7988 delay(5);
7989 }
7990
7991 return done;
7992 }
7993
7994 static int
7995 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
7996 uint16_t *data)
7997 {
7998 int i, eerd = 0;
7999 int error = 0;
8000
8001 for (i = 0; i < wordcnt; i++) {
8002 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
8003
8004 CSR_WRITE(sc, WMREG_EERD, eerd);
8005 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
8006 if (error != 0)
8007 break;
8008
8009 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
8010 }
8011
8012 return error;
8013 }
8014
8015 /* Flash */
8016
8017 static int
8018 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
8019 {
8020 uint32_t eecd;
8021 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
8022 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
8023 uint8_t sig_byte = 0;
8024
8025 switch (sc->sc_type) {
8026 case WM_T_ICH8:
8027 case WM_T_ICH9:
8028 eecd = CSR_READ(sc, WMREG_EECD);
8029 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
8030 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
8031 return 0;
8032 }
8033 /* FALLTHROUGH */
8034 default:
8035 /* Default to 0 */
8036 *bank = 0;
8037
8038 /* Check bank 0 */
8039 wm_read_ich8_byte(sc, act_offset, &sig_byte);
8040 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
8041 *bank = 0;
8042 return 0;
8043 }
8044
8045 /* Check bank 1 */
8046 wm_read_ich8_byte(sc, act_offset + bank1_offset,
8047 &sig_byte);
8048 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
8049 *bank = 1;
8050 return 0;
8051 }
8052 }
8053
8054 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
8055 device_xname(sc->sc_dev)));
8056 return -1;
8057 }
8058
8059 /******************************************************************************
8060 * This function does initial flash setup so that a new read/write/erase cycle
8061 * can be started.
8062 *
8063 * sc - The pointer to the hw structure
8064 ****************************************************************************/
8065 static int32_t
8066 wm_ich8_cycle_init(struct wm_softc *sc)
8067 {
8068 uint16_t hsfsts;
8069 int32_t error = 1;
8070 int32_t i = 0;
8071
8072 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8073
8074 /* May be check the Flash Des Valid bit in Hw status */
8075 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
8076 return error;
8077 }
8078
8079 /* Clear FCERR in Hw status by writing 1 */
8080 /* Clear DAEL in Hw status by writing a 1 */
8081 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
8082
8083 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8084
8085 /*
8086 * Either we should have a hardware SPI cycle in progress bit to check
8087 * against, in order to start a new cycle or FDONE bit should be
8088 * changed in the hardware so that it is 1 after harware reset, which
8089 * can then be used as an indication whether a cycle is in progress or
8090 * has been completed .. we should also have some software semaphore
8091 * mechanism to guard FDONE or the cycle in progress bit so that two
8092 * threads access to those bits can be sequentiallized or a way so that
8093 * 2 threads dont start the cycle at the same time
8094 */
8095
8096 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8097 /*
8098 * There is no cycle running at present, so we can start a
8099 * cycle
8100 */
8101
8102 /* Begin by setting Flash Cycle Done. */
8103 hsfsts |= HSFSTS_DONE;
8104 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8105 error = 0;
8106 } else {
8107 /*
8108 * otherwise poll for sometime so the current cycle has a
8109 * chance to end before giving up.
8110 */
8111 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
8112 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8113 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8114 error = 0;
8115 break;
8116 }
8117 delay(1);
8118 }
8119 if (error == 0) {
8120 /*
8121 * Successful in waiting for previous cycle to timeout,
8122 * now set the Flash Cycle Done.
8123 */
8124 hsfsts |= HSFSTS_DONE;
8125 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8126 }
8127 }
8128 return error;
8129 }
8130
8131 /******************************************************************************
8132 * This function starts a flash cycle and waits for its completion
8133 *
8134 * sc - The pointer to the hw structure
8135 ****************************************************************************/
8136 static int32_t
8137 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
8138 {
8139 uint16_t hsflctl;
8140 uint16_t hsfsts;
8141 int32_t error = 1;
8142 uint32_t i = 0;
8143
8144 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
8145 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8146 hsflctl |= HSFCTL_GO;
8147 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8148
8149 /* Wait till FDONE bit is set to 1 */
8150 do {
8151 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8152 if (hsfsts & HSFSTS_DONE)
8153 break;
8154 delay(1);
8155 i++;
8156 } while (i < timeout);
8157 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
8158 error = 0;
8159
8160 return error;
8161 }
8162
8163 /******************************************************************************
8164 * Reads a byte or word from the NVM using the ICH8 flash access registers.
8165 *
8166 * sc - The pointer to the hw structure
8167 * index - The index of the byte or word to read.
8168 * size - Size of data to read, 1=byte 2=word
8169 * data - Pointer to the word to store the value read.
8170 *****************************************************************************/
8171 static int32_t
8172 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
8173 uint32_t size, uint16_t *data)
8174 {
8175 uint16_t hsfsts;
8176 uint16_t hsflctl;
8177 uint32_t flash_linear_address;
8178 uint32_t flash_data = 0;
8179 int32_t error = 1;
8180 int32_t count = 0;
8181
8182 if (size < 1 || size > 2 || data == 0x0 ||
8183 index > ICH_FLASH_LINEAR_ADDR_MASK)
8184 return error;
8185
8186 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
8187 sc->sc_ich8_flash_base;
8188
8189 do {
8190 delay(1);
8191 /* Steps */
8192 error = wm_ich8_cycle_init(sc);
8193 if (error)
8194 break;
8195
8196 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8197 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
8198 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
8199 & HSFCTL_BCOUNT_MASK;
8200 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
8201 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8202
8203 /*
8204 * Write the last 24 bits of index into Flash Linear address
8205 * field in Flash Address
8206 */
8207 /* TODO: TBD maybe check the index against the size of flash */
8208
8209 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
8210
8211 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
8212
8213 /*
8214 * Check if FCERR is set to 1, if set to 1, clear it and try
8215 * the whole sequence a few more times, else read in (shift in)
8216 * the Flash Data0, the order is least significant byte first
8217 * msb to lsb
8218 */
8219 if (error == 0) {
8220 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
8221 if (size == 1)
8222 *data = (uint8_t)(flash_data & 0x000000FF);
8223 else if (size == 2)
8224 *data = (uint16_t)(flash_data & 0x0000FFFF);
8225 break;
8226 } else {
8227 /*
8228 * If we've gotten here, then things are probably
8229 * completely hosed, but if the error condition is
8230 * detected, it won't hurt to give it another try...
8231 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
8232 */
8233 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8234 if (hsfsts & HSFSTS_ERR) {
8235 /* Repeat for some time before giving up. */
8236 continue;
8237 } else if ((hsfsts & HSFSTS_DONE) == 0)
8238 break;
8239 }
8240 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
8241
8242 return error;
8243 }
8244
8245 /******************************************************************************
8246 * Reads a single byte from the NVM using the ICH8 flash access registers.
8247 *
8248 * sc - pointer to wm_hw structure
8249 * index - The index of the byte to read.
8250 * data - Pointer to a byte to store the value read.
8251 *****************************************************************************/
8252 static int32_t
8253 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
8254 {
8255 int32_t status;
8256 uint16_t word = 0;
8257
8258 status = wm_read_ich8_data(sc, index, 1, &word);
8259 if (status == 0)
8260 *data = (uint8_t)word;
8261 else
8262 *data = 0;
8263
8264 return status;
8265 }
8266
8267 /******************************************************************************
8268 * Reads a word from the NVM using the ICH8 flash access registers.
8269 *
8270 * sc - pointer to wm_hw structure
8271 * index - The starting byte index of the word to read.
8272 * data - Pointer to a word to store the value read.
8273 *****************************************************************************/
8274 static int32_t
8275 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
8276 {
8277 int32_t status;
8278
8279 status = wm_read_ich8_data(sc, index, 2, data);
8280 return status;
8281 }
8282
8283 /******************************************************************************
8284 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
8285 * register.
8286 *
8287 * sc - Struct containing variables accessed by shared code
8288 * offset - offset of word in the EEPROM to read
8289 * data - word read from the EEPROM
8290 * words - number of words to read
8291 *****************************************************************************/
8292 static int
8293 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
8294 {
8295 int32_t error = 0;
8296 uint32_t flash_bank = 0;
8297 uint32_t act_offset = 0;
8298 uint32_t bank_offset = 0;
8299 uint16_t word = 0;
8300 uint16_t i = 0;
8301
8302 /*
8303 * We need to know which is the valid flash bank. In the event
8304 * that we didn't allocate eeprom_shadow_ram, we may not be
8305 * managing flash_bank. So it cannot be trusted and needs
8306 * to be updated with each read.
8307 */
8308 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
8309 if (error) {
8310 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
8311 device_xname(sc->sc_dev)));
8312 flash_bank = 0;
8313 }
8314
8315 /*
8316 * Adjust offset appropriately if we're on bank 1 - adjust for word
8317 * size
8318 */
8319 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
8320
8321 error = wm_get_swfwhw_semaphore(sc);
8322 if (error) {
8323 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8324 __func__);
8325 return error;
8326 }
8327
8328 for (i = 0; i < words; i++) {
8329 /* The NVM part needs a byte offset, hence * 2 */
8330 act_offset = bank_offset + ((offset + i) * 2);
8331 error = wm_read_ich8_word(sc, act_offset, &word);
8332 if (error) {
8333 aprint_error_dev(sc->sc_dev,
8334 "%s: failed to read NVM\n", __func__);
8335 break;
8336 }
8337 data[i] = word;
8338 }
8339
8340 wm_put_swfwhw_semaphore(sc);
8341 return error;
8342 }
8343
8344 /* Lock, detecting NVM type, validate checksum and read */
8345
8346 /*
8347 * wm_nvm_acquire:
8348 *
8349 * Perform the EEPROM handshake required on some chips.
8350 */
8351 static int
8352 wm_nvm_acquire(struct wm_softc *sc)
8353 {
8354 uint32_t reg;
8355 int x;
8356 int ret = 0;
8357
8358 /* always success */
8359 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8360 return 0;
8361
8362 if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8363 ret = wm_get_swfwhw_semaphore(sc);
8364 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
8365 /* This will also do wm_get_swsm_semaphore() if needed */
8366 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
8367 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
8368 ret = wm_get_swsm_semaphore(sc);
8369 }
8370
8371 if (ret) {
8372 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8373 __func__);
8374 return 1;
8375 }
8376
8377 if (sc->sc_flags & WM_F_LOCK_EECD) {
8378 reg = CSR_READ(sc, WMREG_EECD);
8379
8380 /* Request EEPROM access. */
8381 reg |= EECD_EE_REQ;
8382 CSR_WRITE(sc, WMREG_EECD, reg);
8383
8384 /* ..and wait for it to be granted. */
8385 for (x = 0; x < 1000; x++) {
8386 reg = CSR_READ(sc, WMREG_EECD);
8387 if (reg & EECD_EE_GNT)
8388 break;
8389 delay(5);
8390 }
8391 if ((reg & EECD_EE_GNT) == 0) {
8392 aprint_error_dev(sc->sc_dev,
8393 "could not acquire EEPROM GNT\n");
8394 reg &= ~EECD_EE_REQ;
8395 CSR_WRITE(sc, WMREG_EECD, reg);
8396 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8397 wm_put_swfwhw_semaphore(sc);
8398 if (sc->sc_flags & WM_F_LOCK_SWFW)
8399 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8400 else if (sc->sc_flags & WM_F_LOCK_SWSM)
8401 wm_put_swsm_semaphore(sc);
8402 return 1;
8403 }
8404 }
8405
8406 return 0;
8407 }
8408
8409 /*
8410 * wm_nvm_release:
8411 *
8412 * Release the EEPROM mutex.
8413 */
8414 static void
8415 wm_nvm_release(struct wm_softc *sc)
8416 {
8417 uint32_t reg;
8418
8419 /* always success */
8420 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8421 return;
8422
8423 if (sc->sc_flags & WM_F_LOCK_EECD) {
8424 reg = CSR_READ(sc, WMREG_EECD);
8425 reg &= ~EECD_EE_REQ;
8426 CSR_WRITE(sc, WMREG_EECD, reg);
8427 }
8428
8429 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8430 wm_put_swfwhw_semaphore(sc);
8431 if (sc->sc_flags & WM_F_LOCK_SWFW)
8432 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8433 else if (sc->sc_flags & WM_F_LOCK_SWSM)
8434 wm_put_swsm_semaphore(sc);
8435 }
8436
8437 static int
8438 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
8439 {
8440 uint32_t eecd = 0;
8441
8442 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
8443 || sc->sc_type == WM_T_82583) {
8444 eecd = CSR_READ(sc, WMREG_EECD);
8445
8446 /* Isolate bits 15 & 16 */
8447 eecd = ((eecd >> 15) & 0x03);
8448
8449 /* If both bits are set, device is Flash type */
8450 if (eecd == 0x03)
8451 return 0;
8452 }
8453 return 1;
8454 }
8455
8456 /*
8457 * wm_nvm_validate_checksum
8458 *
8459 * The checksum is defined as the sum of the first 64 (16 bit) words.
8460 */
8461 static int
8462 wm_nvm_validate_checksum(struct wm_softc *sc)
8463 {
8464 uint16_t checksum;
8465 uint16_t eeprom_data;
8466 #ifdef WM_DEBUG
8467 uint16_t csum_wordaddr, valid_checksum;
8468 #endif
8469 int i;
8470
8471 checksum = 0;
8472
8473 /* Don't check for I211 */
8474 if (sc->sc_type == WM_T_I211)
8475 return 0;
8476
8477 #ifdef WM_DEBUG
8478 if (sc->sc_type == WM_T_PCH_LPT) {
8479 csum_wordaddr = NVM_OFF_COMPAT;
8480 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
8481 } else {
8482 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
8483 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
8484 }
8485
8486 /* Dump EEPROM image for debug */
8487 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8488 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8489 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
8490 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
8491 if ((eeprom_data & valid_checksum) == 0) {
8492 DPRINTF(WM_DEBUG_NVM,
8493 ("%s: NVM need to be updated (%04x != %04x)\n",
8494 device_xname(sc->sc_dev), eeprom_data,
8495 valid_checksum));
8496 }
8497 }
8498
8499 if ((wm_debug & WM_DEBUG_NVM) != 0) {
8500 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
8501 for (i = 0; i < NVM_SIZE; i++) {
8502 if (wm_nvm_read(sc, i, 1, &eeprom_data))
8503 printf("XXXX ");
8504 else
8505 printf("%04hx ", eeprom_data);
8506 if (i % 8 == 7)
8507 printf("\n");
8508 }
8509 }
8510
8511 #endif /* WM_DEBUG */
8512
8513 for (i = 0; i < NVM_SIZE; i++) {
8514 if (wm_nvm_read(sc, i, 1, &eeprom_data))
8515 return 1;
8516 checksum += eeprom_data;
8517 }
8518
8519 if (checksum != (uint16_t) NVM_CHECKSUM) {
8520 #ifdef WM_DEBUG
8521 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
8522 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
8523 #endif
8524 }
8525
8526 return 0;
8527 }
8528
8529 /*
8530 * wm_nvm_read:
8531 *
8532 * Read data from the serial EEPROM.
8533 */
8534 static int
8535 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8536 {
8537 int rv;
8538
8539 if (sc->sc_flags & WM_F_EEPROM_INVALID)
8540 return 1;
8541
8542 if (wm_nvm_acquire(sc))
8543 return 1;
8544
8545 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8546 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8547 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
8548 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
8549 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
8550 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
8551 else if (sc->sc_flags & WM_F_EEPROM_SPI)
8552 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
8553 else
8554 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
8555
8556 wm_nvm_release(sc);
8557 return rv;
8558 }
8559
8560 /*
8561 * Hardware semaphores.
8562 * Very complexed...
8563 */
8564
8565 static int
8566 wm_get_swsm_semaphore(struct wm_softc *sc)
8567 {
8568 int32_t timeout;
8569 uint32_t swsm;
8570
8571 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8572 /* Get the SW semaphore. */
8573 timeout = sc->sc_nvm_wordsize + 1;
8574 while (timeout) {
8575 swsm = CSR_READ(sc, WMREG_SWSM);
8576
8577 if ((swsm & SWSM_SMBI) == 0)
8578 break;
8579
8580 delay(50);
8581 timeout--;
8582 }
8583
8584 if (timeout == 0) {
8585 aprint_error_dev(sc->sc_dev,
8586 "could not acquire SWSM SMBI\n");
8587 return 1;
8588 }
8589 }
8590
8591 /* Get the FW semaphore. */
8592 timeout = sc->sc_nvm_wordsize + 1;
8593 while (timeout) {
8594 swsm = CSR_READ(sc, WMREG_SWSM);
8595 swsm |= SWSM_SWESMBI;
8596 CSR_WRITE(sc, WMREG_SWSM, swsm);
8597 /* If we managed to set the bit we got the semaphore. */
8598 swsm = CSR_READ(sc, WMREG_SWSM);
8599 if (swsm & SWSM_SWESMBI)
8600 break;
8601
8602 delay(50);
8603 timeout--;
8604 }
8605
8606 if (timeout == 0) {
8607 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
8608 /* Release semaphores */
8609 wm_put_swsm_semaphore(sc);
8610 return 1;
8611 }
8612 return 0;
8613 }
8614
8615 static void
8616 wm_put_swsm_semaphore(struct wm_softc *sc)
8617 {
8618 uint32_t swsm;
8619
8620 swsm = CSR_READ(sc, WMREG_SWSM);
8621 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
8622 CSR_WRITE(sc, WMREG_SWSM, swsm);
8623 }
8624
8625 static int
8626 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8627 {
8628 uint32_t swfw_sync;
8629 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
8630 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
8631 int timeout = 200;
8632
8633 for (timeout = 0; timeout < 200; timeout++) {
8634 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8635 if (wm_get_swsm_semaphore(sc)) {
8636 aprint_error_dev(sc->sc_dev,
8637 "%s: failed to get semaphore\n",
8638 __func__);
8639 return 1;
8640 }
8641 }
8642 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8643 if ((swfw_sync & (swmask | fwmask)) == 0) {
8644 swfw_sync |= swmask;
8645 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8646 if (sc->sc_flags & WM_F_LOCK_SWSM)
8647 wm_put_swsm_semaphore(sc);
8648 return 0;
8649 }
8650 if (sc->sc_flags & WM_F_LOCK_SWSM)
8651 wm_put_swsm_semaphore(sc);
8652 delay(5000);
8653 }
8654 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
8655 device_xname(sc->sc_dev), mask, swfw_sync);
8656 return 1;
8657 }
8658
8659 static void
8660 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8661 {
8662 uint32_t swfw_sync;
8663
8664 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8665 while (wm_get_swsm_semaphore(sc) != 0)
8666 continue;
8667 }
8668 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8669 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
8670 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8671 if (sc->sc_flags & WM_F_LOCK_SWSM)
8672 wm_put_swsm_semaphore(sc);
8673 }
8674
8675 static int
8676 wm_get_swfwhw_semaphore(struct wm_softc *sc)
8677 {
8678 uint32_t ext_ctrl;
8679 int timeout = 200;
8680
8681 for (timeout = 0; timeout < 200; timeout++) {
8682 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8683 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
8684 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8685
8686 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8687 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
8688 return 0;
8689 delay(5000);
8690 }
8691 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
8692 device_xname(sc->sc_dev), ext_ctrl);
8693 return 1;
8694 }
8695
8696 static void
8697 wm_put_swfwhw_semaphore(struct wm_softc *sc)
8698 {
8699 uint32_t ext_ctrl;
8700 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8701 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
8702 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8703 }
8704
8705 static int
8706 wm_get_hw_semaphore_82573(struct wm_softc *sc)
8707 {
8708 int i = 0;
8709 uint32_t reg;
8710
8711 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8712 do {
8713 CSR_WRITE(sc, WMREG_EXTCNFCTR,
8714 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
8715 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8716 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
8717 break;
8718 delay(2*1000);
8719 i++;
8720 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
8721
8722 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
8723 wm_put_hw_semaphore_82573(sc);
8724 log(LOG_ERR, "%s: Driver can't access the PHY\n",
8725 device_xname(sc->sc_dev));
8726 return -1;
8727 }
8728
8729 return 0;
8730 }
8731
8732 static void
8733 wm_put_hw_semaphore_82573(struct wm_softc *sc)
8734 {
8735 uint32_t reg;
8736
8737 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8738 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
8739 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
8740 }
8741
8742 /*
8743 * Management mode and power management related subroutines.
8744 * BMC, AMT, suspend/resume and EEE.
8745 */
8746
8747 static int
8748 wm_check_mng_mode(struct wm_softc *sc)
8749 {
8750 int rv;
8751
8752 switch (sc->sc_type) {
8753 case WM_T_ICH8:
8754 case WM_T_ICH9:
8755 case WM_T_ICH10:
8756 case WM_T_PCH:
8757 case WM_T_PCH2:
8758 case WM_T_PCH_LPT:
8759 rv = wm_check_mng_mode_ich8lan(sc);
8760 break;
8761 case WM_T_82574:
8762 case WM_T_82583:
8763 rv = wm_check_mng_mode_82574(sc);
8764 break;
8765 case WM_T_82571:
8766 case WM_T_82572:
8767 case WM_T_82573:
8768 case WM_T_80003:
8769 rv = wm_check_mng_mode_generic(sc);
8770 break;
8771 default:
8772 /* noting to do */
8773 rv = 0;
8774 break;
8775 }
8776
8777 return rv;
8778 }
8779
8780 static int
8781 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
8782 {
8783 uint32_t fwsm;
8784
8785 fwsm = CSR_READ(sc, WMREG_FWSM);
8786
8787 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
8788 return 1;
8789
8790 return 0;
8791 }
8792
8793 static int
8794 wm_check_mng_mode_82574(struct wm_softc *sc)
8795 {
8796 uint16_t data;
8797
8798 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
8799
8800 if ((data & NVM_CFG2_MNGM_MASK) != 0)
8801 return 1;
8802
8803 return 0;
8804 }
8805
8806 static int
8807 wm_check_mng_mode_generic(struct wm_softc *sc)
8808 {
8809 uint32_t fwsm;
8810
8811 fwsm = CSR_READ(sc, WMREG_FWSM);
8812
8813 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
8814 return 1;
8815
8816 return 0;
8817 }
8818
8819 static int
8820 wm_enable_mng_pass_thru(struct wm_softc *sc)
8821 {
8822 uint32_t manc, fwsm, factps;
8823
8824 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
8825 return 0;
8826
8827 manc = CSR_READ(sc, WMREG_MANC);
8828
8829 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
8830 device_xname(sc->sc_dev), manc));
8831 if ((manc & MANC_RECV_TCO_EN) == 0)
8832 return 0;
8833
8834 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
8835 fwsm = CSR_READ(sc, WMREG_FWSM);
8836 factps = CSR_READ(sc, WMREG_FACTPS);
8837 if (((factps & FACTPS_MNGCG) == 0)
8838 && ((fwsm & FWSM_MODE_MASK)
8839 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
8840 return 1;
8841 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
8842 uint16_t data;
8843
8844 factps = CSR_READ(sc, WMREG_FACTPS);
8845 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
8846 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
8847 device_xname(sc->sc_dev), factps, data));
8848 if (((factps & FACTPS_MNGCG) == 0)
8849 && ((data & NVM_CFG2_MNGM_MASK)
8850 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
8851 return 1;
8852 } else if (((manc & MANC_SMBUS_EN) != 0)
8853 && ((manc & MANC_ASF_EN) == 0))
8854 return 1;
8855
8856 return 0;
8857 }
8858
8859 static int
8860 wm_check_reset_block(struct wm_softc *sc)
8861 {
8862 uint32_t reg;
8863
8864 switch (sc->sc_type) {
8865 case WM_T_ICH8:
8866 case WM_T_ICH9:
8867 case WM_T_ICH10:
8868 case WM_T_PCH:
8869 case WM_T_PCH2:
8870 case WM_T_PCH_LPT:
8871 reg = CSR_READ(sc, WMREG_FWSM);
8872 if ((reg & FWSM_RSPCIPHY) != 0)
8873 return 0;
8874 else
8875 return -1;
8876 break;
8877 case WM_T_82571:
8878 case WM_T_82572:
8879 case WM_T_82573:
8880 case WM_T_82574:
8881 case WM_T_82583:
8882 case WM_T_80003:
8883 reg = CSR_READ(sc, WMREG_MANC);
8884 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
8885 return -1;
8886 else
8887 return 0;
8888 break;
8889 default:
8890 /* no problem */
8891 break;
8892 }
8893
8894 return 0;
8895 }
8896
8897 static void
8898 wm_get_hw_control(struct wm_softc *sc)
8899 {
8900 uint32_t reg;
8901
8902 switch (sc->sc_type) {
8903 case WM_T_82573:
8904 reg = CSR_READ(sc, WMREG_SWSM);
8905 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
8906 break;
8907 case WM_T_82571:
8908 case WM_T_82572:
8909 case WM_T_82574:
8910 case WM_T_82583:
8911 case WM_T_80003:
8912 case WM_T_ICH8:
8913 case WM_T_ICH9:
8914 case WM_T_ICH10:
8915 case WM_T_PCH:
8916 case WM_T_PCH2:
8917 case WM_T_PCH_LPT:
8918 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8919 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
8920 break;
8921 default:
8922 break;
8923 }
8924 }
8925
8926 static void
8927 wm_release_hw_control(struct wm_softc *sc)
8928 {
8929 uint32_t reg;
8930
8931 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
8932 return;
8933
8934 if (sc->sc_type == WM_T_82573) {
8935 reg = CSR_READ(sc, WMREG_SWSM);
8936 reg &= ~SWSM_DRV_LOAD;
8937 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
8938 } else {
8939 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8940 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
8941 }
8942 }
8943
8944 static void
8945 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
8946 {
8947 uint32_t reg;
8948
8949 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8950
8951 if (on != 0)
8952 reg |= EXTCNFCTR_GATE_PHY_CFG;
8953 else
8954 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
8955
8956 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
8957 }
8958
8959 static void
8960 wm_smbustopci(struct wm_softc *sc)
8961 {
8962 uint32_t fwsm;
8963
8964 fwsm = CSR_READ(sc, WMREG_FWSM);
8965 if (((fwsm & FWSM_FW_VALID) == 0)
8966 && ((wm_check_reset_block(sc) == 0))) {
8967 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
8968 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
8969 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8970 CSR_WRITE_FLUSH(sc);
8971 delay(10);
8972 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
8973 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8974 CSR_WRITE_FLUSH(sc);
8975 delay(50*1000);
8976
8977 /*
8978 * Gate automatic PHY configuration by hardware on non-managed
8979 * 82579
8980 */
8981 if (sc->sc_type == WM_T_PCH2)
8982 wm_gate_hw_phy_config_ich8lan(sc, 1);
8983 }
8984 }
8985
8986 static void
8987 wm_init_manageability(struct wm_softc *sc)
8988 {
8989
8990 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8991 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
8992 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8993
8994 /* Disable hardware interception of ARP */
8995 manc &= ~MANC_ARP_EN;
8996
8997 /* Enable receiving management packets to the host */
8998 if (sc->sc_type >= WM_T_82571) {
8999 manc |= MANC_EN_MNG2HOST;
9000 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
9001 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
9002
9003 }
9004
9005 CSR_WRITE(sc, WMREG_MANC, manc);
9006 }
9007 }
9008
9009 static void
9010 wm_release_manageability(struct wm_softc *sc)
9011 {
9012
9013 if (sc->sc_flags & WM_F_HAS_MANAGE) {
9014 uint32_t manc = CSR_READ(sc, WMREG_MANC);
9015
9016 manc |= MANC_ARP_EN;
9017 if (sc->sc_type >= WM_T_82571)
9018 manc &= ~MANC_EN_MNG2HOST;
9019
9020 CSR_WRITE(sc, WMREG_MANC, manc);
9021 }
9022 }
9023
9024 static void
9025 wm_get_wakeup(struct wm_softc *sc)
9026 {
9027
9028 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
9029 switch (sc->sc_type) {
9030 case WM_T_82573:
9031 case WM_T_82583:
9032 sc->sc_flags |= WM_F_HAS_AMT;
9033 /* FALLTHROUGH */
9034 case WM_T_80003:
9035 case WM_T_82541:
9036 case WM_T_82547:
9037 case WM_T_82571:
9038 case WM_T_82572:
9039 case WM_T_82574:
9040 case WM_T_82575:
9041 case WM_T_82576:
9042 case WM_T_82580:
9043 case WM_T_I350:
9044 case WM_T_I354:
9045 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
9046 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
9047 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
9048 break;
9049 case WM_T_ICH8:
9050 case WM_T_ICH9:
9051 case WM_T_ICH10:
9052 case WM_T_PCH:
9053 case WM_T_PCH2:
9054 case WM_T_PCH_LPT:
9055 sc->sc_flags |= WM_F_HAS_AMT;
9056 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
9057 break;
9058 default:
9059 break;
9060 }
9061
9062 /* 1: HAS_MANAGE */
9063 if (wm_enable_mng_pass_thru(sc) != 0)
9064 sc->sc_flags |= WM_F_HAS_MANAGE;
9065
9066 #ifdef WM_DEBUG
9067 printf("\n");
9068 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
9069 printf("HAS_AMT,");
9070 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
9071 printf("ARC_SUBSYS_VALID,");
9072 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
9073 printf("ASF_FIRMWARE_PRES,");
9074 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
9075 printf("HAS_MANAGE,");
9076 printf("\n");
9077 #endif
9078 /*
9079 * Note that the WOL flags is set after the resetting of the eeprom
9080 * stuff
9081 */
9082 }
9083
9084 #ifdef WM_WOL
9085 /* WOL in the newer chipset interfaces (pchlan) */
9086 static void
9087 wm_enable_phy_wakeup(struct wm_softc *sc)
9088 {
9089 #if 0
9090 uint16_t preg;
9091
9092 /* Copy MAC RARs to PHY RARs */
9093
9094 /* Copy MAC MTA to PHY MTA */
9095
9096 /* Configure PHY Rx Control register */
9097
9098 /* Enable PHY wakeup in MAC register */
9099
9100 /* Configure and enable PHY wakeup in PHY registers */
9101
9102 /* Activate PHY wakeup */
9103
9104 /* XXX */
9105 #endif
9106 }
9107
9108 /* Power down workaround on D3 */
9109 static void
9110 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
9111 {
9112 uint32_t reg;
9113 int i;
9114
9115 for (i = 0; i < 2; i++) {
9116 /* Disable link */
9117 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9118 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9119 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9120
9121 /*
9122 * Call gig speed drop workaround on Gig disable before
9123 * accessing any PHY registers
9124 */
9125 if (sc->sc_type == WM_T_ICH8)
9126 wm_gig_downshift_workaround_ich8lan(sc);
9127
9128 /* Write VR power-down enable */
9129 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
9130 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
9131 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
9132 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
9133
9134 /* Read it back and test */
9135 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
9136 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
9137 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
9138 break;
9139
9140 /* Issue PHY reset and repeat at most one more time */
9141 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
9142 }
9143 }
9144
9145 static void
9146 wm_enable_wakeup(struct wm_softc *sc)
9147 {
9148 uint32_t reg, pmreg;
9149 pcireg_t pmode;
9150
9151 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
9152 &pmreg, NULL) == 0)
9153 return;
9154
9155 /* Advertise the wakeup capability */
9156 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
9157 | CTRL_SWDPIN(3));
9158 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
9159
9160 /* ICH workaround */
9161 switch (sc->sc_type) {
9162 case WM_T_ICH8:
9163 case WM_T_ICH9:
9164 case WM_T_ICH10:
9165 case WM_T_PCH:
9166 case WM_T_PCH2:
9167 case WM_T_PCH_LPT:
9168 /* Disable gig during WOL */
9169 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9170 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
9171 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9172 if (sc->sc_type == WM_T_PCH)
9173 wm_gmii_reset(sc);
9174
9175 /* Power down workaround */
9176 if (sc->sc_phytype == WMPHY_82577) {
9177 struct mii_softc *child;
9178
9179 /* Assume that the PHY is copper */
9180 child = LIST_FIRST(&sc->sc_mii.mii_phys);
9181 if (child->mii_mpd_rev <= 2)
9182 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
9183 (768 << 5) | 25, 0x0444); /* magic num */
9184 }
9185 break;
9186 default:
9187 break;
9188 }
9189
9190 /* Keep the laser running on fiber adapters */
9191 if (((sc->sc_mediatype & WMP_F_FIBER) != 0)
9192 || (sc->sc_mediatype & WMP_F_SERDES) != 0) {
9193 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9194 reg |= CTRL_EXT_SWDPIN(3);
9195 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9196 }
9197
9198 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
9199 #if 0 /* for the multicast packet */
9200 reg |= WUFC_MC;
9201 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
9202 #endif
9203
9204 if (sc->sc_type == WM_T_PCH) {
9205 wm_enable_phy_wakeup(sc);
9206 } else {
9207 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
9208 CSR_WRITE(sc, WMREG_WUFC, reg);
9209 }
9210
9211 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9212 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9213 || (sc->sc_type == WM_T_PCH2))
9214 && (sc->sc_phytype == WMPHY_IGP_3))
9215 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
9216
9217 /* Request PME */
9218 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
9219 #if 0
9220 /* Disable WOL */
9221 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
9222 #else
9223 /* For WOL */
9224 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
9225 #endif
9226 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
9227 }
9228 #endif /* WM_WOL */
9229
9230 /* EEE */
9231
9232 static void
9233 wm_set_eee_i350(struct wm_softc *sc)
9234 {
9235 uint32_t ipcnfg, eeer;
9236
9237 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
9238 eeer = CSR_READ(sc, WMREG_EEER);
9239
9240 if ((sc->sc_flags & WM_F_EEE) != 0) {
9241 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9242 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
9243 | EEER_LPI_FC);
9244 } else {
9245 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9246 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
9247 | EEER_LPI_FC);
9248 }
9249
9250 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
9251 CSR_WRITE(sc, WMREG_EEER, eeer);
9252 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
9253 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
9254 }
9255
9256 /*
9257 * Workarounds (mainly PHY related).
9258 * Basically, PHY's workarounds are in the PHY drivers.
9259 */
9260
9261 /* Work-around for 82566 Kumeran PCS lock loss */
9262 static void
9263 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
9264 {
9265 int miistatus, active, i;
9266 int reg;
9267
9268 miistatus = sc->sc_mii.mii_media_status;
9269
9270 /* If the link is not up, do nothing */
9271 if ((miistatus & IFM_ACTIVE) != 0)
9272 return;
9273
9274 active = sc->sc_mii.mii_media_active;
9275
9276 /* Nothing to do if the link is other than 1Gbps */
9277 if (IFM_SUBTYPE(active) != IFM_1000_T)
9278 return;
9279
9280 for (i = 0; i < 10; i++) {
9281 /* read twice */
9282 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9283 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9284 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
9285 goto out; /* GOOD! */
9286
9287 /* Reset the PHY */
9288 wm_gmii_reset(sc);
9289 delay(5*1000);
9290 }
9291
9292 /* Disable GigE link negotiation */
9293 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9294 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9295 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9296
9297 /*
9298 * Call gig speed drop workaround on Gig disable before accessing
9299 * any PHY registers.
9300 */
9301 wm_gig_downshift_workaround_ich8lan(sc);
9302
9303 out:
9304 return;
9305 }
9306
9307 /* WOL from S5 stops working */
9308 static void
9309 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
9310 {
9311 uint16_t kmrn_reg;
9312
9313 /* Only for igp3 */
9314 if (sc->sc_phytype == WMPHY_IGP_3) {
9315 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
9316 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
9317 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9318 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
9319 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9320 }
9321 }
9322
9323 /*
9324 * Workaround for pch's PHYs
9325 * XXX should be moved to new PHY driver?
9326 */
9327 static void
9328 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
9329 {
9330 if (sc->sc_phytype == WMPHY_82577)
9331 wm_set_mdio_slow_mode_hv(sc);
9332
9333 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
9334
9335 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
9336
9337 /* 82578 */
9338 if (sc->sc_phytype == WMPHY_82578) {
9339 /* PCH rev. < 3 */
9340 if (sc->sc_rev < 3) {
9341 /* XXX 6 bit shift? Why? Is it page2? */
9342 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
9343 0x66c0);
9344 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
9345 0xffff);
9346 }
9347
9348 /* XXX phy rev. < 2 */
9349 }
9350
9351 /* Select page 0 */
9352
9353 /* XXX acquire semaphore */
9354 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
9355 /* XXX release semaphore */
9356
9357 /*
9358 * Configure the K1 Si workaround during phy reset assuming there is
9359 * link so that it disables K1 if link is in 1Gbps.
9360 */
9361 wm_k1_gig_workaround_hv(sc, 1);
9362 }
9363
9364 static void
9365 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
9366 {
9367
9368 wm_set_mdio_slow_mode_hv(sc);
9369 }
9370
9371 static void
9372 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
9373 {
9374 int k1_enable = sc->sc_nvm_k1_enabled;
9375
9376 /* XXX acquire semaphore */
9377
9378 if (link) {
9379 k1_enable = 0;
9380
9381 /* Link stall fix for link up */
9382 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
9383 } else {
9384 /* Link stall fix for link down */
9385 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
9386 }
9387
9388 wm_configure_k1_ich8lan(sc, k1_enable);
9389
9390 /* XXX release semaphore */
9391 }
9392
9393 static void
9394 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
9395 {
9396 uint32_t reg;
9397
9398 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
9399 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
9400 reg | HV_KMRN_MDIO_SLOW);
9401 }
9402
9403 static void
9404 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
9405 {
9406 uint32_t ctrl, ctrl_ext, tmp;
9407 uint16_t kmrn_reg;
9408
9409 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
9410
9411 if (k1_enable)
9412 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
9413 else
9414 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
9415
9416 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
9417
9418 delay(20);
9419
9420 ctrl = CSR_READ(sc, WMREG_CTRL);
9421 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9422
9423 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
9424 tmp |= CTRL_FRCSPD;
9425
9426 CSR_WRITE(sc, WMREG_CTRL, tmp);
9427 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
9428 CSR_WRITE_FLUSH(sc);
9429 delay(20);
9430
9431 CSR_WRITE(sc, WMREG_CTRL, ctrl);
9432 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9433 CSR_WRITE_FLUSH(sc);
9434 delay(20);
9435 }
9436
9437 /* special case - for 82575 - need to do manual init ... */
9438 static void
9439 wm_reset_init_script_82575(struct wm_softc *sc)
9440 {
9441 /*
9442 * remark: this is untested code - we have no board without EEPROM
9443 * same setup as mentioned int the freeBSD driver for the i82575
9444 */
9445
9446 /* SerDes configuration via SERDESCTRL */
9447 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
9448 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
9449 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
9450 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
9451
9452 /* CCM configuration via CCMCTL register */
9453 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
9454 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
9455
9456 /* PCIe lanes configuration */
9457 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
9458 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
9459 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
9460 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
9461
9462 /* PCIe PLL Configuration */
9463 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
9464 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
9465 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
9466 }
9467