if_wm.c revision 1.300 1 /* $NetBSD: if_wm.c,v 1.300 2014/10/06 07:31:24 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - EEE (Energy Efficiency Ethernet)
77 * - MSI/MSI-X
78 * - Virtual Function
79 * - Set LED correctly (based on contents in EEPROM)
80 * - Rework how parameters are loaded from the EEPROM.
81 */
82
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.300 2014/10/06 07:31:24 msaitoh Exp $");
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/callout.h>
89 #include <sys/mbuf.h>
90 #include <sys/malloc.h>
91 #include <sys/kernel.h>
92 #include <sys/socket.h>
93 #include <sys/ioctl.h>
94 #include <sys/errno.h>
95 #include <sys/device.h>
96 #include <sys/queue.h>
97 #include <sys/syslog.h>
98
99 #include <sys/rnd.h>
100
101 #include <net/if.h>
102 #include <net/if_dl.h>
103 #include <net/if_media.h>
104 #include <net/if_ether.h>
105
106 #include <net/bpf.h>
107
108 #include <netinet/in.h> /* XXX for struct ip */
109 #include <netinet/in_systm.h> /* XXX for struct ip */
110 #include <netinet/ip.h> /* XXX for struct ip */
111 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
112 #include <netinet/tcp.h> /* XXX for struct tcphdr */
113
114 #include <sys/bus.h>
115 #include <sys/intr.h>
116 #include <machine/endian.h>
117
118 #include <dev/mii/mii.h>
119 #include <dev/mii/miivar.h>
120 #include <dev/mii/miidevs.h>
121 #include <dev/mii/mii_bitbang.h>
122 #include <dev/mii/ikphyreg.h>
123 #include <dev/mii/igphyreg.h>
124 #include <dev/mii/igphyvar.h>
125 #include <dev/mii/inbmphyreg.h>
126
127 #include <dev/pci/pcireg.h>
128 #include <dev/pci/pcivar.h>
129 #include <dev/pci/pcidevs.h>
130
131 #include <dev/pci/if_wmreg.h>
132 #include <dev/pci/if_wmvar.h>
133
134 #ifdef WM_DEBUG
135 #define WM_DEBUG_LINK 0x01
136 #define WM_DEBUG_TX 0x02
137 #define WM_DEBUG_RX 0x04
138 #define WM_DEBUG_GMII 0x08
139 #define WM_DEBUG_MANAGE 0x10
140 #define WM_DEBUG_NVM 0x20
141 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
142 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
143
144 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
145 #else
146 #define DPRINTF(x, y) /* nothing */
147 #endif /* WM_DEBUG */
148
149 #ifdef NET_MPSAFE
150 #define WM_MPSAFE 1
151 #endif
152
153 /*
154 * Transmit descriptor list size. Due to errata, we can only have
155 * 256 hardware descriptors in the ring on < 82544, but we use 4096
156 * on >= 82544. We tell the upper layers that they can queue a lot
157 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
158 * of them at a time.
159 *
160 * We allow up to 256 (!) DMA segments per packet. Pathological packet
161 * chains containing many small mbufs have been observed in zero-copy
162 * situations with jumbo frames.
163 */
164 #define WM_NTXSEGS 256
165 #define WM_IFQUEUELEN 256
166 #define WM_TXQUEUELEN_MAX 64
167 #define WM_TXQUEUELEN_MAX_82547 16
168 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
169 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
170 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
171 #define WM_NTXDESC_82542 256
172 #define WM_NTXDESC_82544 4096
173 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
174 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
175 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
176 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
177 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
178
179 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
180
181 /*
182 * Receive descriptor list size. We have one Rx buffer for normal
183 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
184 * packet. We allocate 256 receive descriptors, each with a 2k
185 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
186 */
187 #define WM_NRXDESC 256
188 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
189 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
190 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
191
192 /*
193 * Control structures are DMA'd to the i82542 chip. We allocate them in
194 * a single clump that maps to a single DMA segment to make several things
195 * easier.
196 */
197 struct wm_control_data_82544 {
198 /*
199 * The receive descriptors.
200 */
201 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
202
203 /*
204 * The transmit descriptors. Put these at the end, because
205 * we might use a smaller number of them.
206 */
207 union {
208 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
209 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
210 } wdc_u;
211 };
212
213 struct wm_control_data_82542 {
214 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
215 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
216 };
217
218 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
219 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
220 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
221
222 /*
223 * Software state for transmit jobs.
224 */
225 struct wm_txsoft {
226 struct mbuf *txs_mbuf; /* head of our mbuf chain */
227 bus_dmamap_t txs_dmamap; /* our DMA map */
228 int txs_firstdesc; /* first descriptor in packet */
229 int txs_lastdesc; /* last descriptor in packet */
230 int txs_ndesc; /* # of descriptors used */
231 };
232
233 /*
234 * Software state for receive buffers. Each descriptor gets a
235 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
236 * more than one buffer, we chain them together.
237 */
238 struct wm_rxsoft {
239 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
240 bus_dmamap_t rxs_dmamap; /* our DMA map */
241 };
242
243 #define WM_LINKUP_TIMEOUT 50
244
245 static uint16_t swfwphysem[] = {
246 SWFW_PHY0_SM,
247 SWFW_PHY1_SM,
248 SWFW_PHY2_SM,
249 SWFW_PHY3_SM
250 };
251
252 /*
253 * Software state per device.
254 */
255 struct wm_softc {
256 device_t sc_dev; /* generic device information */
257 bus_space_tag_t sc_st; /* bus space tag */
258 bus_space_handle_t sc_sh; /* bus space handle */
259 bus_size_t sc_ss; /* bus space size */
260 bus_space_tag_t sc_iot; /* I/O space tag */
261 bus_space_handle_t sc_ioh; /* I/O space handle */
262 bus_size_t sc_ios; /* I/O space size */
263 bus_space_tag_t sc_flasht; /* flash registers space tag */
264 bus_space_handle_t sc_flashh; /* flash registers space handle */
265 bus_dma_tag_t sc_dmat; /* bus DMA tag */
266
267 struct ethercom sc_ethercom; /* ethernet common data */
268 struct mii_data sc_mii; /* MII/media information */
269
270 pci_chipset_tag_t sc_pc;
271 pcitag_t sc_pcitag;
272 int sc_bus_speed; /* PCI/PCIX bus speed */
273 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
274
275 wm_chip_type sc_type; /* MAC type */
276 int sc_rev; /* MAC revision */
277 wm_phy_type sc_phytype; /* PHY type */
278 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
279 int sc_funcid; /* unit number of the chip (0 to 3) */
280 int sc_flags; /* flags; see below */
281 int sc_if_flags; /* last if_flags */
282 int sc_flowflags; /* 802.3x flow control flags */
283 int sc_align_tweak;
284
285 void *sc_ih; /* interrupt cookie */
286 callout_t sc_tick_ch; /* tick callout */
287 bool sc_stopping;
288
289 int sc_nvm_addrbits; /* NVM address bits */
290 unsigned int sc_nvm_wordsize; /* NVM word size */
291 int sc_ich8_flash_base;
292 int sc_ich8_flash_bank_size;
293 int sc_nvm_k1_enabled;
294
295 /* Software state for the transmit and receive descriptors. */
296 int sc_txnum; /* must be a power of two */
297 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
298 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
299
300 /* Control data structures. */
301 int sc_ntxdesc; /* must be a power of two */
302 struct wm_control_data_82544 *sc_control_data;
303 bus_dmamap_t sc_cddmamap; /* control data DMA map */
304 bus_dma_segment_t sc_cd_seg; /* control data segment */
305 int sc_cd_rseg; /* real number of control segment */
306 size_t sc_cd_size; /* control data size */
307 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
308 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
309 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
310 #define sc_rxdescs sc_control_data->wcd_rxdescs
311
312 #ifdef WM_EVENT_COUNTERS
313 /* Event counters. */
314 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
315 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
316 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
317 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
318 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
319 struct evcnt sc_ev_rxintr; /* Rx interrupts */
320 struct evcnt sc_ev_linkintr; /* Link interrupts */
321
322 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
323 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
324 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
325 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
326 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
327 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
328 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
329 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
330
331 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
332 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
333
334 struct evcnt sc_ev_tu; /* Tx underrun */
335
336 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
337 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
338 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
339 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
340 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
341 #endif /* WM_EVENT_COUNTERS */
342
343 bus_addr_t sc_tdt_reg; /* offset of TDT register */
344
345 int sc_txfree; /* number of free Tx descriptors */
346 int sc_txnext; /* next ready Tx descriptor */
347
348 int sc_txsfree; /* number of free Tx jobs */
349 int sc_txsnext; /* next free Tx job */
350 int sc_txsdirty; /* dirty Tx jobs */
351
352 /* These 5 variables are used only on the 82547. */
353 int sc_txfifo_size; /* Tx FIFO size */
354 int sc_txfifo_head; /* current head of FIFO */
355 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
356 int sc_txfifo_stall; /* Tx FIFO is stalled */
357 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
358
359 bus_addr_t sc_rdt_reg; /* offset of RDT register */
360
361 int sc_rxptr; /* next ready Rx descriptor/queue ent */
362 int sc_rxdiscard;
363 int sc_rxlen;
364 struct mbuf *sc_rxhead;
365 struct mbuf *sc_rxtail;
366 struct mbuf **sc_rxtailp;
367
368 uint32_t sc_ctrl; /* prototype CTRL register */
369 #if 0
370 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
371 #endif
372 uint32_t sc_icr; /* prototype interrupt bits */
373 uint32_t sc_itr; /* prototype intr throttling reg */
374 uint32_t sc_tctl; /* prototype TCTL register */
375 uint32_t sc_rctl; /* prototype RCTL register */
376 uint32_t sc_txcw; /* prototype TXCW register */
377 uint32_t sc_tipg; /* prototype TIPG register */
378 uint32_t sc_fcrtl; /* prototype FCRTL register */
379 uint32_t sc_pba; /* prototype PBA register */
380
381 int sc_tbi_linkup; /* TBI link status */
382 int sc_tbi_anegticks; /* autonegotiation ticks */
383 int sc_tbi_ticks; /* tbi ticks */
384
385 int sc_mchash_type; /* multicast filter offset */
386
387 krndsource_t rnd_source; /* random source */
388
389 kmutex_t *sc_tx_lock; /* lock for tx operations */
390 kmutex_t *sc_rx_lock; /* lock for rx operations */
391 };
392
393 #define WM_TX_LOCK(_sc) if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
394 #define WM_TX_UNLOCK(_sc) if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
395 #define WM_TX_LOCKED(_sc) (!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
396 #define WM_RX_LOCK(_sc) if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
397 #define WM_RX_UNLOCK(_sc) if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
398 #define WM_RX_LOCKED(_sc) (!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
399 #define WM_BOTH_LOCK(_sc) do {WM_TX_LOCK(_sc); WM_RX_LOCK(_sc);} while (0)
400 #define WM_BOTH_UNLOCK(_sc) do {WM_RX_UNLOCK(_sc); WM_TX_UNLOCK(_sc);} while (0)
401 #define WM_BOTH_LOCKED(_sc) (WM_TX_LOCKED(_sc) && WM_RX_LOCKED(_sc))
402
403 #ifdef WM_MPSAFE
404 #define CALLOUT_FLAGS CALLOUT_MPSAFE
405 #else
406 #define CALLOUT_FLAGS 0
407 #endif
408
409 #define WM_RXCHAIN_RESET(sc) \
410 do { \
411 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
412 *(sc)->sc_rxtailp = NULL; \
413 (sc)->sc_rxlen = 0; \
414 } while (/*CONSTCOND*/0)
415
416 #define WM_RXCHAIN_LINK(sc, m) \
417 do { \
418 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
419 (sc)->sc_rxtailp = &(m)->m_next; \
420 } while (/*CONSTCOND*/0)
421
422 #ifdef WM_EVENT_COUNTERS
423 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
424 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
425 #else
426 #define WM_EVCNT_INCR(ev) /* nothing */
427 #define WM_EVCNT_ADD(ev, val) /* nothing */
428 #endif
429
430 #define CSR_READ(sc, reg) \
431 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
432 #define CSR_WRITE(sc, reg, val) \
433 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
434 #define CSR_WRITE_FLUSH(sc) \
435 (void) CSR_READ((sc), WMREG_STATUS)
436
437 #define ICH8_FLASH_READ32(sc, reg) \
438 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
439 #define ICH8_FLASH_WRITE32(sc, reg, data) \
440 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
441
442 #define ICH8_FLASH_READ16(sc, reg) \
443 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
444 #define ICH8_FLASH_WRITE16(sc, reg, data) \
445 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
446
447 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
448 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
449
450 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
451 #define WM_CDTXADDR_HI(sc, x) \
452 (sizeof(bus_addr_t) == 8 ? \
453 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
454
455 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
456 #define WM_CDRXADDR_HI(sc, x) \
457 (sizeof(bus_addr_t) == 8 ? \
458 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
459
460 #define WM_CDTXSYNC(sc, x, n, ops) \
461 do { \
462 int __x, __n; \
463 \
464 __x = (x); \
465 __n = (n); \
466 \
467 /* If it will wrap around, sync to the end of the ring. */ \
468 if ((__x + __n) > WM_NTXDESC(sc)) { \
469 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
470 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
471 (WM_NTXDESC(sc) - __x), (ops)); \
472 __n -= (WM_NTXDESC(sc) - __x); \
473 __x = 0; \
474 } \
475 \
476 /* Now sync whatever is left. */ \
477 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
478 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
479 } while (/*CONSTCOND*/0)
480
481 #define WM_CDRXSYNC(sc, x, ops) \
482 do { \
483 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
484 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
485 } while (/*CONSTCOND*/0)
486
487 #define WM_INIT_RXDESC(sc, x) \
488 do { \
489 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
490 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
491 struct mbuf *__m = __rxs->rxs_mbuf; \
492 \
493 /* \
494 * Note: We scoot the packet forward 2 bytes in the buffer \
495 * so that the payload after the Ethernet header is aligned \
496 * to a 4-byte boundary. \
497 * \
498 * XXX BRAINDAMAGE ALERT! \
499 * The stupid chip uses the same size for every buffer, which \
500 * is set in the Receive Control register. We are using the 2K \
501 * size option, but what we REALLY want is (2K - 2)! For this \
502 * reason, we can't "scoot" packets longer than the standard \
503 * Ethernet MTU. On strict-alignment platforms, if the total \
504 * size exceeds (2K - 2) we set align_tweak to 0 and let \
505 * the upper layer copy the headers. \
506 */ \
507 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
508 \
509 wm_set_dma_addr(&__rxd->wrx_addr, \
510 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
511 __rxd->wrx_len = 0; \
512 __rxd->wrx_cksum = 0; \
513 __rxd->wrx_status = 0; \
514 __rxd->wrx_errors = 0; \
515 __rxd->wrx_special = 0; \
516 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
517 \
518 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
519 } while (/*CONSTCOND*/0)
520
521 /*
522 * Register read/write functions.
523 * Other than CSR_{READ|WRITE}().
524 */
525 #if 0
526 static inline uint32_t wm_io_read(struct wm_softc *, int);
527 #endif
528 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
529 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
530 uint32_t, uint32_t);
531 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
532
533 /*
534 * Device driver interface functions and commonly used functions.
535 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
536 */
537 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
538 static int wm_match(device_t, cfdata_t, void *);
539 static void wm_attach(device_t, device_t, void *);
540 static int wm_detach(device_t, int);
541 static bool wm_suspend(device_t, const pmf_qual_t *);
542 static bool wm_resume(device_t, const pmf_qual_t *);
543 static void wm_watchdog(struct ifnet *);
544 static void wm_tick(void *);
545 static int wm_ifflags_cb(struct ethercom *);
546 static int wm_ioctl(struct ifnet *, u_long, void *);
547 /* MAC address related */
548 static int wm_check_alt_mac_addr(struct wm_softc *);
549 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
550 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
551 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
552 static void wm_set_filter(struct wm_softc *);
553 /* Reset and init related */
554 static void wm_set_vlan(struct wm_softc *);
555 static void wm_set_pcie_completion_timeout(struct wm_softc *);
556 static void wm_get_auto_rd_done(struct wm_softc *);
557 static void wm_lan_init_done(struct wm_softc *);
558 static void wm_get_cfg_done(struct wm_softc *);
559 static void wm_reset(struct wm_softc *);
560 static int wm_add_rxbuf(struct wm_softc *, int);
561 static void wm_rxdrain(struct wm_softc *);
562 static int wm_init(struct ifnet *);
563 static int wm_init_locked(struct ifnet *);
564 static void wm_stop(struct ifnet *, int);
565 static void wm_stop_locked(struct ifnet *, int);
566 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
567 uint32_t *, uint8_t *);
568 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
569 static void wm_82547_txfifo_stall(void *);
570 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
571 /* Start */
572 static void wm_start(struct ifnet *);
573 static void wm_start_locked(struct ifnet *);
574 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
575 uint32_t *, uint32_t *, bool *);
576 static void wm_nq_start(struct ifnet *);
577 static void wm_nq_start_locked(struct ifnet *);
578 /* Interrupt */
579 static void wm_txintr(struct wm_softc *);
580 static void wm_rxintr(struct wm_softc *);
581 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
582 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
583 static void wm_linkintr(struct wm_softc *, uint32_t);
584 static int wm_intr(void *);
585
586 /*
587 * Media related.
588 * GMII, SGMII, TBI, SERDES and SFP.
589 */
590 /* GMII related */
591 static void wm_gmii_reset(struct wm_softc *);
592 static int wm_get_phy_id_82575(struct wm_softc *);
593 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
594 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
595 static int wm_gmii_mediachange(struct ifnet *);
596 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
597 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
598 static int wm_gmii_i82543_readreg(device_t, int, int);
599 static void wm_gmii_i82543_writereg(device_t, int, int, int);
600 static int wm_gmii_i82544_readreg(device_t, int, int);
601 static void wm_gmii_i82544_writereg(device_t, int, int, int);
602 static int wm_gmii_i80003_readreg(device_t, int, int);
603 static void wm_gmii_i80003_writereg(device_t, int, int, int);
604 static int wm_gmii_bm_readreg(device_t, int, int);
605 static void wm_gmii_bm_writereg(device_t, int, int, int);
606 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
607 static int wm_gmii_hv_readreg(device_t, int, int);
608 static void wm_gmii_hv_writereg(device_t, int, int, int);
609 static int wm_gmii_82580_readreg(device_t, int, int);
610 static void wm_gmii_82580_writereg(device_t, int, int, int);
611 static void wm_gmii_statchg(struct ifnet *);
612 static int wm_kmrn_readreg(struct wm_softc *, int);
613 static void wm_kmrn_writereg(struct wm_softc *, int, int);
614 /* SGMII */
615 static bool wm_sgmii_uses_mdio(struct wm_softc *);
616 static int wm_sgmii_readreg(device_t, int, int);
617 static void wm_sgmii_writereg(device_t, int, int, int);
618 /* TBI related */
619 static int wm_check_for_link(struct wm_softc *);
620 static void wm_tbi_mediainit(struct wm_softc *);
621 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
622 static int wm_tbi_mediachange(struct ifnet *);
623 static void wm_tbi_set_linkled(struct wm_softc *);
624 static void wm_tbi_check_link(struct wm_softc *);
625 /* SFP related */
626 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
627 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
628
629 /*
630 * NVM related.
631 * Microwire, SPI (w/wo EERD) and Flash.
632 */
633 /* Misc functions */
634 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
635 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
636 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
637 /* Microwire */
638 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
639 /* SPI */
640 static int wm_nvm_ready_spi(struct wm_softc *);
641 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
642 /* Using with EERD */
643 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
644 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
645 /* Flash */
646 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
647 unsigned int *);
648 static int32_t wm_ich8_cycle_init(struct wm_softc *);
649 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
650 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
651 uint16_t *);
652 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
653 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
654 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
655 /* Lock, detecting NVM type, validate checksum and read */
656 static int wm_nvm_acquire(struct wm_softc *);
657 static void wm_nvm_release(struct wm_softc *);
658 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
659 static int wm_nvm_validate_checksum(struct wm_softc *);
660 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
661
662 /*
663 * Hardware semaphores.
664 * Very complexed...
665 */
666 static int wm_get_swsm_semaphore(struct wm_softc *);
667 static void wm_put_swsm_semaphore(struct wm_softc *);
668 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
669 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
670 static int wm_get_swfwhw_semaphore(struct wm_softc *);
671 static void wm_put_swfwhw_semaphore(struct wm_softc *);
672 static int wm_get_hw_semaphore_82573(struct wm_softc *);
673 static void wm_put_hw_semaphore_82573(struct wm_softc *);
674
675 /*
676 * Management mode and power management related subroutines.
677 * BMC, AMT, suspend/resume and EEE.
678 */
679 static int wm_check_mng_mode(struct wm_softc *);
680 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
681 static int wm_check_mng_mode_82574(struct wm_softc *);
682 static int wm_check_mng_mode_generic(struct wm_softc *);
683 static int wm_enable_mng_pass_thru(struct wm_softc *);
684 static int wm_check_reset_block(struct wm_softc *);
685 static void wm_get_hw_control(struct wm_softc *);
686 static void wm_release_hw_control(struct wm_softc *);
687 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
688 static void wm_smbustopci(struct wm_softc *);
689 static void wm_init_manageability(struct wm_softc *);
690 static void wm_release_manageability(struct wm_softc *);
691 static void wm_get_wakeup(struct wm_softc *);
692 #ifdef WM_WOL
693 static void wm_enable_phy_wakeup(struct wm_softc *);
694 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
695 static void wm_enable_wakeup(struct wm_softc *);
696 #endif
697 /* EEE */
698 static void wm_set_eee_i350(struct wm_softc *);
699
700 /*
701 * Workarounds (mainly PHY related).
702 * Basically, PHY's workarounds are in the PHY drivers.
703 */
704 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
705 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
706 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
707 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
708 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
709 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
710 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
711 static void wm_reset_init_script_82575(struct wm_softc *);
712
713 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
714 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
715
716 /*
717 * Devices supported by this driver.
718 */
719 static const struct wm_product {
720 pci_vendor_id_t wmp_vendor;
721 pci_product_id_t wmp_product;
722 const char *wmp_name;
723 wm_chip_type wmp_type;
724 uint32_t wmp_flags;
725 #define WMP_F_UNKNOWN 0x00
726 #define WMP_F_FIBER 0x01
727 #define WMP_F_COPPER 0x02
728 #define WMP_F_SERDES 0x03 /* Internal SERDES */
729 #define WMP_MEDIATYPE(x) ((x) & 0x03)
730 } wm_products[] = {
731 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
732 "Intel i82542 1000BASE-X Ethernet",
733 WM_T_82542_2_1, WMP_F_FIBER },
734
735 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
736 "Intel i82543GC 1000BASE-X Ethernet",
737 WM_T_82543, WMP_F_FIBER },
738
739 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
740 "Intel i82543GC 1000BASE-T Ethernet",
741 WM_T_82543, WMP_F_COPPER },
742
743 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
744 "Intel i82544EI 1000BASE-T Ethernet",
745 WM_T_82544, WMP_F_COPPER },
746
747 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
748 "Intel i82544EI 1000BASE-X Ethernet",
749 WM_T_82544, WMP_F_FIBER },
750
751 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
752 "Intel i82544GC 1000BASE-T Ethernet",
753 WM_T_82544, WMP_F_COPPER },
754
755 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
756 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
757 WM_T_82544, WMP_F_COPPER },
758
759 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
760 "Intel i82540EM 1000BASE-T Ethernet",
761 WM_T_82540, WMP_F_COPPER },
762
763 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
764 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
765 WM_T_82540, WMP_F_COPPER },
766
767 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
768 "Intel i82540EP 1000BASE-T Ethernet",
769 WM_T_82540, WMP_F_COPPER },
770
771 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
772 "Intel i82540EP 1000BASE-T Ethernet",
773 WM_T_82540, WMP_F_COPPER },
774
775 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
776 "Intel i82540EP 1000BASE-T Ethernet",
777 WM_T_82540, WMP_F_COPPER },
778
779 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
780 "Intel i82545EM 1000BASE-T Ethernet",
781 WM_T_82545, WMP_F_COPPER },
782
783 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
784 "Intel i82545GM 1000BASE-T Ethernet",
785 WM_T_82545_3, WMP_F_COPPER },
786
787 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
788 "Intel i82545GM 1000BASE-X Ethernet",
789 WM_T_82545_3, WMP_F_FIBER },
790
791 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
792 "Intel i82545GM Gigabit Ethernet (SERDES)",
793 WM_T_82545_3, WMP_F_SERDES },
794
795 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
796 "Intel i82546EB 1000BASE-T Ethernet",
797 WM_T_82546, WMP_F_COPPER },
798
799 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
800 "Intel i82546EB 1000BASE-T Ethernet",
801 WM_T_82546, WMP_F_COPPER },
802
803 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
804 "Intel i82545EM 1000BASE-X Ethernet",
805 WM_T_82545, WMP_F_FIBER },
806
807 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
808 "Intel i82546EB 1000BASE-X Ethernet",
809 WM_T_82546, WMP_F_FIBER },
810
811 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
812 "Intel i82546GB 1000BASE-T Ethernet",
813 WM_T_82546_3, WMP_F_COPPER },
814
815 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
816 "Intel i82546GB 1000BASE-X Ethernet",
817 WM_T_82546_3, WMP_F_FIBER },
818
819 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
820 "Intel i82546GB Gigabit Ethernet (SERDES)",
821 WM_T_82546_3, WMP_F_SERDES },
822
823 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
824 "i82546GB quad-port Gigabit Ethernet",
825 WM_T_82546_3, WMP_F_COPPER },
826
827 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
828 "i82546GB quad-port Gigabit Ethernet (KSP3)",
829 WM_T_82546_3, WMP_F_COPPER },
830
831 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
832 "Intel PRO/1000MT (82546GB)",
833 WM_T_82546_3, WMP_F_COPPER },
834
835 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
836 "Intel i82541EI 1000BASE-T Ethernet",
837 WM_T_82541, WMP_F_COPPER },
838
839 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
840 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
841 WM_T_82541, WMP_F_COPPER },
842
843 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
844 "Intel i82541EI Mobile 1000BASE-T Ethernet",
845 WM_T_82541, WMP_F_COPPER },
846
847 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
848 "Intel i82541ER 1000BASE-T Ethernet",
849 WM_T_82541_2, WMP_F_COPPER },
850
851 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
852 "Intel i82541GI 1000BASE-T Ethernet",
853 WM_T_82541_2, WMP_F_COPPER },
854
855 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
856 "Intel i82541GI Mobile 1000BASE-T Ethernet",
857 WM_T_82541_2, WMP_F_COPPER },
858
859 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
860 "Intel i82541PI 1000BASE-T Ethernet",
861 WM_T_82541_2, WMP_F_COPPER },
862
863 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
864 "Intel i82547EI 1000BASE-T Ethernet",
865 WM_T_82547, WMP_F_COPPER },
866
867 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
868 "Intel i82547EI Mobile 1000BASE-T Ethernet",
869 WM_T_82547, WMP_F_COPPER },
870
871 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
872 "Intel i82547GI 1000BASE-T Ethernet",
873 WM_T_82547_2, WMP_F_COPPER },
874
875 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
876 "Intel PRO/1000 PT (82571EB)",
877 WM_T_82571, WMP_F_COPPER },
878
879 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
880 "Intel PRO/1000 PF (82571EB)",
881 WM_T_82571, WMP_F_FIBER },
882
883 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
884 "Intel PRO/1000 PB (82571EB)",
885 WM_T_82571, WMP_F_SERDES },
886
887 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
888 "Intel PRO/1000 QT (82571EB)",
889 WM_T_82571, WMP_F_COPPER },
890
891 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
892 "Intel PRO/1000 PT Quad Port Server Adapter",
893 WM_T_82571, WMP_F_COPPER, },
894
895 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
896 "Intel Gigabit PT Quad Port Server ExpressModule",
897 WM_T_82571, WMP_F_COPPER, },
898
899 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
900 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
901 WM_T_82571, WMP_F_SERDES, },
902
903 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
904 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
905 WM_T_82571, WMP_F_SERDES, },
906
907 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
908 "Intel 82571EB Quad 1000baseX Ethernet",
909 WM_T_82571, WMP_F_FIBER, },
910
911 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
912 "Intel i82572EI 1000baseT Ethernet",
913 WM_T_82572, WMP_F_COPPER },
914
915 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
916 "Intel i82572EI 1000baseX Ethernet",
917 WM_T_82572, WMP_F_FIBER },
918
919 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
920 "Intel i82572EI Gigabit Ethernet (SERDES)",
921 WM_T_82572, WMP_F_SERDES },
922
923 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
924 "Intel i82572EI 1000baseT Ethernet",
925 WM_T_82572, WMP_F_COPPER },
926
927 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
928 "Intel i82573E",
929 WM_T_82573, WMP_F_COPPER },
930
931 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
932 "Intel i82573E IAMT",
933 WM_T_82573, WMP_F_COPPER },
934
935 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
936 "Intel i82573L Gigabit Ethernet",
937 WM_T_82573, WMP_F_COPPER },
938
939 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
940 "Intel i82574L",
941 WM_T_82574, WMP_F_COPPER },
942
943 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
944 "Intel i82574L",
945 WM_T_82574, WMP_F_COPPER },
946
947 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
948 "Intel i82583V",
949 WM_T_82583, WMP_F_COPPER },
950
951 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
952 "i80003 dual 1000baseT Ethernet",
953 WM_T_80003, WMP_F_COPPER },
954
955 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
956 "i80003 dual 1000baseX Ethernet",
957 WM_T_80003, WMP_F_COPPER },
958
959 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
960 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
961 WM_T_80003, WMP_F_SERDES },
962
963 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
964 "Intel i80003 1000baseT Ethernet",
965 WM_T_80003, WMP_F_COPPER },
966
967 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
968 "Intel i80003 Gigabit Ethernet (SERDES)",
969 WM_T_80003, WMP_F_SERDES },
970
971 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
972 "Intel i82801H (M_AMT) LAN Controller",
973 WM_T_ICH8, WMP_F_COPPER },
974 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
975 "Intel i82801H (AMT) LAN Controller",
976 WM_T_ICH8, WMP_F_COPPER },
977 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
978 "Intel i82801H LAN Controller",
979 WM_T_ICH8, WMP_F_COPPER },
980 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
981 "Intel i82801H (IFE) LAN Controller",
982 WM_T_ICH8, WMP_F_COPPER },
983 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
984 "Intel i82801H (M) LAN Controller",
985 WM_T_ICH8, WMP_F_COPPER },
986 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
987 "Intel i82801H IFE (GT) LAN Controller",
988 WM_T_ICH8, WMP_F_COPPER },
989 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
990 "Intel i82801H IFE (G) LAN Controller",
991 WM_T_ICH8, WMP_F_COPPER },
992 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
993 "82801I (AMT) LAN Controller",
994 WM_T_ICH9, WMP_F_COPPER },
995 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
996 "82801I LAN Controller",
997 WM_T_ICH9, WMP_F_COPPER },
998 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
999 "82801I (G) LAN Controller",
1000 WM_T_ICH9, WMP_F_COPPER },
1001 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1002 "82801I (GT) LAN Controller",
1003 WM_T_ICH9, WMP_F_COPPER },
1004 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1005 "82801I (C) LAN Controller",
1006 WM_T_ICH9, WMP_F_COPPER },
1007 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1008 "82801I mobile LAN Controller",
1009 WM_T_ICH9, WMP_F_COPPER },
1010 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1011 "82801I mobile (V) LAN Controller",
1012 WM_T_ICH9, WMP_F_COPPER },
1013 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1014 "82801I mobile (AMT) LAN Controller",
1015 WM_T_ICH9, WMP_F_COPPER },
1016 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1017 "82567LM-4 LAN Controller",
1018 WM_T_ICH9, WMP_F_COPPER },
1019 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
1020 "82567V-3 LAN Controller",
1021 WM_T_ICH9, WMP_F_COPPER },
1022 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1023 "82567LM-2 LAN Controller",
1024 WM_T_ICH10, WMP_F_COPPER },
1025 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1026 "82567LF-2 LAN Controller",
1027 WM_T_ICH10, WMP_F_COPPER },
1028 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1029 "82567LM-3 LAN Controller",
1030 WM_T_ICH10, WMP_F_COPPER },
1031 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1032 "82567LF-3 LAN Controller",
1033 WM_T_ICH10, WMP_F_COPPER },
1034 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1035 "82567V-2 LAN Controller",
1036 WM_T_ICH10, WMP_F_COPPER },
1037 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1038 "82567V-3? LAN Controller",
1039 WM_T_ICH10, WMP_F_COPPER },
1040 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1041 "HANKSVILLE LAN Controller",
1042 WM_T_ICH10, WMP_F_COPPER },
1043 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1044 "PCH LAN (82577LM) Controller",
1045 WM_T_PCH, WMP_F_COPPER },
1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1047 "PCH LAN (82577LC) Controller",
1048 WM_T_PCH, WMP_F_COPPER },
1049 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1050 "PCH LAN (82578DM) Controller",
1051 WM_T_PCH, WMP_F_COPPER },
1052 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1053 "PCH LAN (82578DC) Controller",
1054 WM_T_PCH, WMP_F_COPPER },
1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1056 "PCH2 LAN (82579LM) Controller",
1057 WM_T_PCH2, WMP_F_COPPER },
1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1059 "PCH2 LAN (82579V) Controller",
1060 WM_T_PCH2, WMP_F_COPPER },
1061 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1062 "82575EB dual-1000baseT Ethernet",
1063 WM_T_82575, WMP_F_COPPER },
1064 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1065 "82575EB dual-1000baseX Ethernet (SERDES)",
1066 WM_T_82575, WMP_F_SERDES },
1067 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1068 "82575GB quad-1000baseT Ethernet",
1069 WM_T_82575, WMP_F_COPPER },
1070 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1071 "82575GB quad-1000baseT Ethernet (PM)",
1072 WM_T_82575, WMP_F_COPPER },
1073 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1074 "82576 1000BaseT Ethernet",
1075 WM_T_82576, WMP_F_COPPER },
1076 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1077 "82576 1000BaseX Ethernet",
1078 WM_T_82576, WMP_F_FIBER },
1079
1080 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1081 "82576 gigabit Ethernet (SERDES)",
1082 WM_T_82576, WMP_F_SERDES },
1083
1084 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1085 "82576 quad-1000BaseT Ethernet",
1086 WM_T_82576, WMP_F_COPPER },
1087
1088 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1089 "82576 Gigabit ET2 Quad Port Server Adapter",
1090 WM_T_82576, WMP_F_COPPER },
1091
1092 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1093 "82576 gigabit Ethernet",
1094 WM_T_82576, WMP_F_COPPER },
1095
1096 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1097 "82576 gigabit Ethernet (SERDES)",
1098 WM_T_82576, WMP_F_SERDES },
1099 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1100 "82576 quad-gigabit Ethernet (SERDES)",
1101 WM_T_82576, WMP_F_SERDES },
1102
1103 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1104 "82580 1000BaseT Ethernet",
1105 WM_T_82580, WMP_F_COPPER },
1106 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1107 "82580 1000BaseX Ethernet",
1108 WM_T_82580, WMP_F_FIBER },
1109
1110 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1111 "82580 1000BaseT Ethernet (SERDES)",
1112 WM_T_82580, WMP_F_SERDES },
1113
1114 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1115 "82580 gigabit Ethernet (SGMII)",
1116 WM_T_82580, WMP_F_COPPER },
1117 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1118 "82580 dual-1000BaseT Ethernet",
1119 WM_T_82580, WMP_F_COPPER },
1120
1121 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1122 "82580 quad-1000BaseX Ethernet",
1123 WM_T_82580, WMP_F_FIBER },
1124
1125 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1126 "I350 Gigabit Network Connection",
1127 WM_T_I350, WMP_F_COPPER },
1128 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1129 "I350 Gigabit Fiber Network Connection",
1130 WM_T_I350, WMP_F_FIBER },
1131
1132 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1133 "I350 Gigabit Backplane Connection",
1134 WM_T_I350, WMP_F_SERDES },
1135
1136 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1137 "I350 Quad Port Gigabit Ethernet",
1138 WM_T_I350, WMP_F_SERDES },
1139
1140 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1141 "I350 Gigabit Connection",
1142 WM_T_I350, WMP_F_COPPER },
1143
1144 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1145 "I354 Gigabit Connection",
1146 WM_T_I354, WMP_F_COPPER },
1147 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1148 "I210-T1 Ethernet Server Adapter",
1149 WM_T_I210, WMP_F_COPPER },
1150
1151 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1152 "I210 Ethernet (Copper OEM)",
1153 WM_T_I210, WMP_F_COPPER },
1154
1155 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1156 "I210 Ethernet (Copper IT)",
1157 WM_T_I210, WMP_F_COPPER },
1158
1159 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1160 "I210 Ethernet (FLASH less)",
1161 WM_T_I210, WMP_F_COPPER },
1162
1163 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1164 "I210 Gigabit Ethernet (Fiber)",
1165 WM_T_I210, WMP_F_FIBER },
1166
1167 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1168 "I210 Gigabit Ethernet (SERDES)",
1169 WM_T_I210, WMP_F_SERDES },
1170
1171 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1172 "I210 Gigabit Ethernet (FLASH less)",
1173 WM_T_I210, WMP_F_SERDES },
1174
1175 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1176 "I210 Gigabit Ethernet (SGMII)",
1177 WM_T_I210, WMP_F_COPPER },
1178
1179 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1180 "I211 Ethernet (COPPER)",
1181 WM_T_I211, WMP_F_COPPER },
1182 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1183 "I217 V Ethernet Connection",
1184 WM_T_PCH_LPT, WMP_F_COPPER },
1185 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1186 "I217 LM Ethernet Connection",
1187 WM_T_PCH_LPT, WMP_F_COPPER },
1188 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1189 "I218 V Ethernet Connection",
1190 WM_T_PCH_LPT, WMP_F_COPPER },
1191 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1192 "I218 V Ethernet Connection",
1193 WM_T_PCH_LPT, WMP_F_COPPER },
1194 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1195 "I218 V Ethernet Connection",
1196 WM_T_PCH_LPT, WMP_F_COPPER },
1197 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1198 "I218 LM Ethernet Connection",
1199 WM_T_PCH_LPT, WMP_F_COPPER },
1200 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1201 "I218 LM Ethernet Connection",
1202 WM_T_PCH_LPT, WMP_F_COPPER },
1203 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1204 "I218 LM Ethernet Connection",
1205 WM_T_PCH_LPT, WMP_F_COPPER },
1206 { 0, 0,
1207 NULL,
1208 0, 0 },
1209 };
1210
1211 #ifdef WM_EVENT_COUNTERS
1212 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1213 #endif /* WM_EVENT_COUNTERS */
1214
1215
1216 /*
1217 * Register read/write functions.
1218 * Other than CSR_{READ|WRITE}().
1219 */
1220
1221 #if 0 /* Not currently used */
1222 static inline uint32_t
1223 wm_io_read(struct wm_softc *sc, int reg)
1224 {
1225
1226 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1227 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1228 }
1229 #endif
1230
1231 static inline void
1232 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1233 {
1234
1235 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1236 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1237 }
1238
1239 static inline void
1240 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1241 uint32_t data)
1242 {
1243 uint32_t regval;
1244 int i;
1245
1246 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1247
1248 CSR_WRITE(sc, reg, regval);
1249
1250 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1251 delay(5);
1252 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1253 break;
1254 }
1255 if (i == SCTL_CTL_POLL_TIMEOUT) {
1256 aprint_error("%s: WARNING:"
1257 " i82575 reg 0x%08x setup did not indicate ready\n",
1258 device_xname(sc->sc_dev), reg);
1259 }
1260 }
1261
1262 static inline void
1263 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1264 {
1265 wa->wa_low = htole32(v & 0xffffffffU);
1266 if (sizeof(bus_addr_t) == 8)
1267 wa->wa_high = htole32((uint64_t) v >> 32);
1268 else
1269 wa->wa_high = 0;
1270 }
1271
1272 /*
1273 * Device driver interface functions and commonly used functions.
1274 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1275 */
1276
1277 /* Lookup supported device table */
1278 static const struct wm_product *
1279 wm_lookup(const struct pci_attach_args *pa)
1280 {
1281 const struct wm_product *wmp;
1282
1283 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1284 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1285 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1286 return wmp;
1287 }
1288 return NULL;
1289 }
1290
1291 /* The match function (ca_match) */
1292 static int
1293 wm_match(device_t parent, cfdata_t cf, void *aux)
1294 {
1295 struct pci_attach_args *pa = aux;
1296
1297 if (wm_lookup(pa) != NULL)
1298 return 1;
1299
1300 return 0;
1301 }
1302
1303 /* The attach function (ca_attach) */
1304 static void
1305 wm_attach(device_t parent, device_t self, void *aux)
1306 {
1307 struct wm_softc *sc = device_private(self);
1308 struct pci_attach_args *pa = aux;
1309 prop_dictionary_t dict;
1310 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1311 pci_chipset_tag_t pc = pa->pa_pc;
1312 pci_intr_handle_t ih;
1313 const char *intrstr = NULL;
1314 const char *eetype, *xname;
1315 bus_space_tag_t memt;
1316 bus_space_handle_t memh;
1317 bus_size_t memsize;
1318 int memh_valid;
1319 int i, error;
1320 const struct wm_product *wmp;
1321 prop_data_t ea;
1322 prop_number_t pn;
1323 uint8_t enaddr[ETHER_ADDR_LEN];
1324 uint16_t cfg1, cfg2, swdpin, io3;
1325 pcireg_t preg, memtype;
1326 uint16_t eeprom_data, apme_mask;
1327 bool force_clear_smbi;
1328 uint32_t link_mode;
1329 uint32_t reg;
1330 char intrbuf[PCI_INTRSTR_LEN];
1331
1332 sc->sc_dev = self;
1333 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1334 sc->sc_stopping = false;
1335
1336 wmp = wm_lookup(pa);
1337 #ifdef DIAGNOSTIC
1338 if (wmp == NULL) {
1339 printf("\n");
1340 panic("wm_attach: impossible");
1341 }
1342 #endif
1343 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1344
1345 sc->sc_pc = pa->pa_pc;
1346 sc->sc_pcitag = pa->pa_tag;
1347
1348 if (pci_dma64_available(pa))
1349 sc->sc_dmat = pa->pa_dmat64;
1350 else
1351 sc->sc_dmat = pa->pa_dmat;
1352
1353 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1354 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1355
1356 sc->sc_type = wmp->wmp_type;
1357 if (sc->sc_type < WM_T_82543) {
1358 if (sc->sc_rev < 2) {
1359 aprint_error_dev(sc->sc_dev,
1360 "i82542 must be at least rev. 2\n");
1361 return;
1362 }
1363 if (sc->sc_rev < 3)
1364 sc->sc_type = WM_T_82542_2_0;
1365 }
1366
1367 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1368 || (sc->sc_type == WM_T_82580)
1369 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1370 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1371 sc->sc_flags |= WM_F_NEWQUEUE;
1372
1373 /* Set device properties (mactype) */
1374 dict = device_properties(sc->sc_dev);
1375 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1376
1377 /*
1378 * Map the device. All devices support memory-mapped acccess,
1379 * and it is really required for normal operation.
1380 */
1381 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1382 switch (memtype) {
1383 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1384 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1385 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1386 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1387 break;
1388 default:
1389 memh_valid = 0;
1390 break;
1391 }
1392
1393 if (memh_valid) {
1394 sc->sc_st = memt;
1395 sc->sc_sh = memh;
1396 sc->sc_ss = memsize;
1397 } else {
1398 aprint_error_dev(sc->sc_dev,
1399 "unable to map device registers\n");
1400 return;
1401 }
1402
1403 /*
1404 * In addition, i82544 and later support I/O mapped indirect
1405 * register access. It is not desirable (nor supported in
1406 * this driver) to use it for normal operation, though it is
1407 * required to work around bugs in some chip versions.
1408 */
1409 if (sc->sc_type >= WM_T_82544) {
1410 /* First we have to find the I/O BAR. */
1411 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1412 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1413 if (memtype == PCI_MAPREG_TYPE_IO)
1414 break;
1415 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1416 PCI_MAPREG_MEM_TYPE_64BIT)
1417 i += 4; /* skip high bits, too */
1418 }
1419 if (i < PCI_MAPREG_END) {
1420 /*
1421 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1422 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1423 * It's no problem because newer chips has no this
1424 * bug.
1425 *
1426 * The i8254x doesn't apparently respond when the
1427 * I/O BAR is 0, which looks somewhat like it's not
1428 * been configured.
1429 */
1430 preg = pci_conf_read(pc, pa->pa_tag, i);
1431 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1432 aprint_error_dev(sc->sc_dev,
1433 "WARNING: I/O BAR at zero.\n");
1434 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1435 0, &sc->sc_iot, &sc->sc_ioh,
1436 NULL, &sc->sc_ios) == 0) {
1437 sc->sc_flags |= WM_F_IOH_VALID;
1438 } else {
1439 aprint_error_dev(sc->sc_dev,
1440 "WARNING: unable to map I/O space\n");
1441 }
1442 }
1443
1444 }
1445
1446 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1447 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1448 preg |= PCI_COMMAND_MASTER_ENABLE;
1449 if (sc->sc_type < WM_T_82542_2_1)
1450 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1451 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1452
1453 /* power up chip */
1454 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1455 NULL)) && error != EOPNOTSUPP) {
1456 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1457 return;
1458 }
1459
1460 /*
1461 * Map and establish our interrupt.
1462 */
1463 if (pci_intr_map(pa, &ih)) {
1464 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1465 return;
1466 }
1467 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1468 #ifdef WM_MPSAFE
1469 pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
1470 #endif
1471 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1472 if (sc->sc_ih == NULL) {
1473 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1474 if (intrstr != NULL)
1475 aprint_error(" at %s", intrstr);
1476 aprint_error("\n");
1477 return;
1478 }
1479 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1480
1481 /*
1482 * Check the function ID (unit number of the chip).
1483 */
1484 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1485 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1486 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1487 || (sc->sc_type == WM_T_82580)
1488 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1489 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1490 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1491 else
1492 sc->sc_funcid = 0;
1493
1494 /*
1495 * Determine a few things about the bus we're connected to.
1496 */
1497 if (sc->sc_type < WM_T_82543) {
1498 /* We don't really know the bus characteristics here. */
1499 sc->sc_bus_speed = 33;
1500 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1501 /*
1502 * CSA (Communication Streaming Architecture) is about as fast
1503 * a 32-bit 66MHz PCI Bus.
1504 */
1505 sc->sc_flags |= WM_F_CSA;
1506 sc->sc_bus_speed = 66;
1507 aprint_verbose_dev(sc->sc_dev,
1508 "Communication Streaming Architecture\n");
1509 if (sc->sc_type == WM_T_82547) {
1510 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1511 callout_setfunc(&sc->sc_txfifo_ch,
1512 wm_82547_txfifo_stall, sc);
1513 aprint_verbose_dev(sc->sc_dev,
1514 "using 82547 Tx FIFO stall work-around\n");
1515 }
1516 } else if (sc->sc_type >= WM_T_82571) {
1517 sc->sc_flags |= WM_F_PCIE;
1518 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1519 && (sc->sc_type != WM_T_ICH10)
1520 && (sc->sc_type != WM_T_PCH)
1521 && (sc->sc_type != WM_T_PCH2)
1522 && (sc->sc_type != WM_T_PCH_LPT)) {
1523 /* ICH* and PCH* have no PCIe capability registers */
1524 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1525 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1526 NULL) == 0)
1527 aprint_error_dev(sc->sc_dev,
1528 "unable to find PCIe capability\n");
1529 }
1530 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1531 } else {
1532 reg = CSR_READ(sc, WMREG_STATUS);
1533 if (reg & STATUS_BUS64)
1534 sc->sc_flags |= WM_F_BUS64;
1535 if ((reg & STATUS_PCIX_MODE) != 0) {
1536 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1537
1538 sc->sc_flags |= WM_F_PCIX;
1539 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1540 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1541 aprint_error_dev(sc->sc_dev,
1542 "unable to find PCIX capability\n");
1543 else if (sc->sc_type != WM_T_82545_3 &&
1544 sc->sc_type != WM_T_82546_3) {
1545 /*
1546 * Work around a problem caused by the BIOS
1547 * setting the max memory read byte count
1548 * incorrectly.
1549 */
1550 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1551 sc->sc_pcixe_capoff + PCIX_CMD);
1552 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1553 sc->sc_pcixe_capoff + PCIX_STATUS);
1554
1555 bytecnt =
1556 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1557 PCIX_CMD_BYTECNT_SHIFT;
1558 maxb =
1559 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1560 PCIX_STATUS_MAXB_SHIFT;
1561 if (bytecnt > maxb) {
1562 aprint_verbose_dev(sc->sc_dev,
1563 "resetting PCI-X MMRBC: %d -> %d\n",
1564 512 << bytecnt, 512 << maxb);
1565 pcix_cmd = (pcix_cmd &
1566 ~PCIX_CMD_BYTECNT_MASK) |
1567 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1568 pci_conf_write(pa->pa_pc, pa->pa_tag,
1569 sc->sc_pcixe_capoff + PCIX_CMD,
1570 pcix_cmd);
1571 }
1572 }
1573 }
1574 /*
1575 * The quad port adapter is special; it has a PCIX-PCIX
1576 * bridge on the board, and can run the secondary bus at
1577 * a higher speed.
1578 */
1579 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1580 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1581 : 66;
1582 } else if (sc->sc_flags & WM_F_PCIX) {
1583 switch (reg & STATUS_PCIXSPD_MASK) {
1584 case STATUS_PCIXSPD_50_66:
1585 sc->sc_bus_speed = 66;
1586 break;
1587 case STATUS_PCIXSPD_66_100:
1588 sc->sc_bus_speed = 100;
1589 break;
1590 case STATUS_PCIXSPD_100_133:
1591 sc->sc_bus_speed = 133;
1592 break;
1593 default:
1594 aprint_error_dev(sc->sc_dev,
1595 "unknown PCIXSPD %d; assuming 66MHz\n",
1596 reg & STATUS_PCIXSPD_MASK);
1597 sc->sc_bus_speed = 66;
1598 break;
1599 }
1600 } else
1601 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1602 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1603 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1604 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1605 }
1606
1607 /*
1608 * Allocate the control data structures, and create and load the
1609 * DMA map for it.
1610 *
1611 * NOTE: All Tx descriptors must be in the same 4G segment of
1612 * memory. So must Rx descriptors. We simplify by allocating
1613 * both sets within the same 4G segment.
1614 */
1615 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1616 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1617 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1618 sizeof(struct wm_control_data_82542) :
1619 sizeof(struct wm_control_data_82544);
1620 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1621 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1622 &sc->sc_cd_rseg, 0)) != 0) {
1623 aprint_error_dev(sc->sc_dev,
1624 "unable to allocate control data, error = %d\n",
1625 error);
1626 goto fail_0;
1627 }
1628
1629 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1630 sc->sc_cd_rseg, sc->sc_cd_size,
1631 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1632 aprint_error_dev(sc->sc_dev,
1633 "unable to map control data, error = %d\n", error);
1634 goto fail_1;
1635 }
1636
1637 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1638 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1639 aprint_error_dev(sc->sc_dev,
1640 "unable to create control data DMA map, error = %d\n",
1641 error);
1642 goto fail_2;
1643 }
1644
1645 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1646 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1647 aprint_error_dev(sc->sc_dev,
1648 "unable to load control data DMA map, error = %d\n",
1649 error);
1650 goto fail_3;
1651 }
1652
1653 /* Create the transmit buffer DMA maps. */
1654 WM_TXQUEUELEN(sc) =
1655 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1656 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1657 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1658 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1659 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1660 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1661 aprint_error_dev(sc->sc_dev,
1662 "unable to create Tx DMA map %d, error = %d\n",
1663 i, error);
1664 goto fail_4;
1665 }
1666 }
1667
1668 /* Create the receive buffer DMA maps. */
1669 for (i = 0; i < WM_NRXDESC; i++) {
1670 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1671 MCLBYTES, 0, 0,
1672 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1673 aprint_error_dev(sc->sc_dev,
1674 "unable to create Rx DMA map %d error = %d\n",
1675 i, error);
1676 goto fail_5;
1677 }
1678 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1679 }
1680
1681 /* clear interesting stat counters */
1682 CSR_READ(sc, WMREG_COLC);
1683 CSR_READ(sc, WMREG_RXERRC);
1684
1685 /* get PHY control from SMBus to PCIe */
1686 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1687 || (sc->sc_type == WM_T_PCH_LPT))
1688 wm_smbustopci(sc);
1689
1690 /* Reset the chip to a known state. */
1691 wm_reset(sc);
1692
1693 /* Get some information about the EEPROM. */
1694 switch (sc->sc_type) {
1695 case WM_T_82542_2_0:
1696 case WM_T_82542_2_1:
1697 case WM_T_82543:
1698 case WM_T_82544:
1699 /* Microwire */
1700 sc->sc_nvm_wordsize = 64;
1701 sc->sc_nvm_addrbits = 6;
1702 break;
1703 case WM_T_82540:
1704 case WM_T_82545:
1705 case WM_T_82545_3:
1706 case WM_T_82546:
1707 case WM_T_82546_3:
1708 /* Microwire */
1709 reg = CSR_READ(sc, WMREG_EECD);
1710 if (reg & EECD_EE_SIZE) {
1711 sc->sc_nvm_wordsize = 256;
1712 sc->sc_nvm_addrbits = 8;
1713 } else {
1714 sc->sc_nvm_wordsize = 64;
1715 sc->sc_nvm_addrbits = 6;
1716 }
1717 sc->sc_flags |= WM_F_LOCK_EECD;
1718 break;
1719 case WM_T_82541:
1720 case WM_T_82541_2:
1721 case WM_T_82547:
1722 case WM_T_82547_2:
1723 reg = CSR_READ(sc, WMREG_EECD);
1724 if (reg & EECD_EE_TYPE) {
1725 /* SPI */
1726 sc->sc_flags |= WM_F_EEPROM_SPI;
1727 wm_nvm_set_addrbits_size_eecd(sc);
1728 } else {
1729 /* Microwire */
1730 if ((reg & EECD_EE_ABITS) != 0) {
1731 sc->sc_nvm_wordsize = 256;
1732 sc->sc_nvm_addrbits = 8;
1733 } else {
1734 sc->sc_nvm_wordsize = 64;
1735 sc->sc_nvm_addrbits = 6;
1736 }
1737 }
1738 sc->sc_flags |= WM_F_LOCK_EECD;
1739 break;
1740 case WM_T_82571:
1741 case WM_T_82572:
1742 /* SPI */
1743 sc->sc_flags |= WM_F_EEPROM_SPI;
1744 wm_nvm_set_addrbits_size_eecd(sc);
1745 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1746 break;
1747 case WM_T_82573:
1748 sc->sc_flags |= WM_F_LOCK_SWSM;
1749 /* FALLTHROUGH */
1750 case WM_T_82574:
1751 case WM_T_82583:
1752 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1753 sc->sc_flags |= WM_F_EEPROM_FLASH;
1754 sc->sc_nvm_wordsize = 2048;
1755 } else {
1756 /* SPI */
1757 sc->sc_flags |= WM_F_EEPROM_SPI;
1758 wm_nvm_set_addrbits_size_eecd(sc);
1759 }
1760 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1761 break;
1762 case WM_T_82575:
1763 case WM_T_82576:
1764 case WM_T_82580:
1765 case WM_T_I350:
1766 case WM_T_I354:
1767 case WM_T_80003:
1768 /* SPI */
1769 sc->sc_flags |= WM_F_EEPROM_SPI;
1770 wm_nvm_set_addrbits_size_eecd(sc);
1771 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1772 | WM_F_LOCK_SWSM;
1773 break;
1774 case WM_T_ICH8:
1775 case WM_T_ICH9:
1776 case WM_T_ICH10:
1777 case WM_T_PCH:
1778 case WM_T_PCH2:
1779 case WM_T_PCH_LPT:
1780 /* FLASH */
1781 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1782 sc->sc_nvm_wordsize = 2048;
1783 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1784 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1785 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1786 aprint_error_dev(sc->sc_dev,
1787 "can't map FLASH registers\n");
1788 goto fail_5;
1789 }
1790 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1791 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1792 ICH_FLASH_SECTOR_SIZE;
1793 sc->sc_ich8_flash_bank_size =
1794 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1795 sc->sc_ich8_flash_bank_size -=
1796 (reg & ICH_GFPREG_BASE_MASK);
1797 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1798 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1799 break;
1800 case WM_T_I210:
1801 case WM_T_I211:
1802 wm_nvm_set_addrbits_size_eecd(sc);
1803 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1804 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1805 break;
1806 default:
1807 break;
1808 }
1809
1810 /* Ensure the SMBI bit is clear before first NVM or PHY access */
1811 switch (sc->sc_type) {
1812 case WM_T_82571:
1813 case WM_T_82572:
1814 reg = CSR_READ(sc, WMREG_SWSM2);
1815 if ((reg & SWSM2_LOCK) != 0) {
1816 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
1817 force_clear_smbi = true;
1818 } else
1819 force_clear_smbi = false;
1820 break;
1821 case WM_T_82573:
1822 case WM_T_82574:
1823 case WM_T_82583:
1824 force_clear_smbi = true;
1825 break;
1826 default:
1827 force_clear_smbi = false;
1828 break;
1829 }
1830 if (force_clear_smbi) {
1831 reg = CSR_READ(sc, WMREG_SWSM);
1832 if ((reg & SWSM_SMBI) != 0)
1833 aprint_error_dev(sc->sc_dev,
1834 "Please update the Bootagent\n");
1835 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
1836 }
1837
1838 /*
1839 * Defer printing the EEPROM type until after verifying the checksum
1840 * This allows the EEPROM type to be printed correctly in the case
1841 * that no EEPROM is attached.
1842 */
1843 /*
1844 * Validate the EEPROM checksum. If the checksum fails, flag
1845 * this for later, so we can fail future reads from the EEPROM.
1846 */
1847 if (wm_nvm_validate_checksum(sc)) {
1848 /*
1849 * Read twice again because some PCI-e parts fail the
1850 * first check due to the link being in sleep state.
1851 */
1852 if (wm_nvm_validate_checksum(sc))
1853 sc->sc_flags |= WM_F_EEPROM_INVALID;
1854 }
1855
1856 /* Set device properties (macflags) */
1857 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1858
1859 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1860 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1861 else {
1862 aprint_verbose_dev(sc->sc_dev, "%u words ",
1863 sc->sc_nvm_wordsize);
1864 if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
1865 aprint_verbose("FLASH(HW)\n");
1866 } else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1867 aprint_verbose("FLASH\n");
1868 } else {
1869 if (sc->sc_flags & WM_F_EEPROM_SPI)
1870 eetype = "SPI";
1871 else
1872 eetype = "MicroWire";
1873 aprint_verbose("(%d address bits) %s EEPROM\n",
1874 sc->sc_nvm_addrbits, eetype);
1875 }
1876 }
1877
1878 switch (sc->sc_type) {
1879 case WM_T_82571:
1880 case WM_T_82572:
1881 case WM_T_82573:
1882 case WM_T_82574:
1883 case WM_T_82583:
1884 case WM_T_80003:
1885 case WM_T_ICH8:
1886 case WM_T_ICH9:
1887 case WM_T_ICH10:
1888 case WM_T_PCH:
1889 case WM_T_PCH2:
1890 case WM_T_PCH_LPT:
1891 if (wm_check_mng_mode(sc) != 0)
1892 wm_get_hw_control(sc);
1893 break;
1894 default:
1895 break;
1896 }
1897 wm_get_wakeup(sc);
1898 /*
1899 * Read the Ethernet address from the EEPROM, if not first found
1900 * in device properties.
1901 */
1902 ea = prop_dictionary_get(dict, "mac-address");
1903 if (ea != NULL) {
1904 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1905 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1906 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1907 } else {
1908 if (wm_read_mac_addr(sc, enaddr) != 0) {
1909 aprint_error_dev(sc->sc_dev,
1910 "unable to read Ethernet address\n");
1911 goto fail_5;
1912 }
1913 }
1914
1915 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1916 ether_sprintf(enaddr));
1917
1918 /*
1919 * Read the config info from the EEPROM, and set up various
1920 * bits in the control registers based on their contents.
1921 */
1922 pn = prop_dictionary_get(dict, "i82543-cfg1");
1923 if (pn != NULL) {
1924 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1925 cfg1 = (uint16_t) prop_number_integer_value(pn);
1926 } else {
1927 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
1928 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1929 goto fail_5;
1930 }
1931 }
1932
1933 pn = prop_dictionary_get(dict, "i82543-cfg2");
1934 if (pn != NULL) {
1935 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1936 cfg2 = (uint16_t) prop_number_integer_value(pn);
1937 } else {
1938 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
1939 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1940 goto fail_5;
1941 }
1942 }
1943
1944 /* check for WM_F_WOL */
1945 switch (sc->sc_type) {
1946 case WM_T_82542_2_0:
1947 case WM_T_82542_2_1:
1948 case WM_T_82543:
1949 /* dummy? */
1950 eeprom_data = 0;
1951 apme_mask = NVM_CFG3_APME;
1952 break;
1953 case WM_T_82544:
1954 apme_mask = NVM_CFG2_82544_APM_EN;
1955 eeprom_data = cfg2;
1956 break;
1957 case WM_T_82546:
1958 case WM_T_82546_3:
1959 case WM_T_82571:
1960 case WM_T_82572:
1961 case WM_T_82573:
1962 case WM_T_82574:
1963 case WM_T_82583:
1964 case WM_T_80003:
1965 default:
1966 apme_mask = NVM_CFG3_APME;
1967 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
1968 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
1969 break;
1970 case WM_T_82575:
1971 case WM_T_82576:
1972 case WM_T_82580:
1973 case WM_T_I350:
1974 case WM_T_I354: /* XXX ok? */
1975 case WM_T_ICH8:
1976 case WM_T_ICH9:
1977 case WM_T_ICH10:
1978 case WM_T_PCH:
1979 case WM_T_PCH2:
1980 case WM_T_PCH_LPT:
1981 /* XXX The funcid should be checked on some devices */
1982 apme_mask = WUC_APME;
1983 eeprom_data = CSR_READ(sc, WMREG_WUC);
1984 break;
1985 }
1986
1987 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1988 if ((eeprom_data & apme_mask) != 0)
1989 sc->sc_flags |= WM_F_WOL;
1990 #ifdef WM_DEBUG
1991 if ((sc->sc_flags & WM_F_WOL) != 0)
1992 printf("WOL\n");
1993 #endif
1994
1995 /*
1996 * XXX need special handling for some multiple port cards
1997 * to disable a paticular port.
1998 */
1999
2000 if (sc->sc_type >= WM_T_82544) {
2001 pn = prop_dictionary_get(dict, "i82543-swdpin");
2002 if (pn != NULL) {
2003 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2004 swdpin = (uint16_t) prop_number_integer_value(pn);
2005 } else {
2006 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2007 aprint_error_dev(sc->sc_dev,
2008 "unable to read SWDPIN\n");
2009 goto fail_5;
2010 }
2011 }
2012 }
2013
2014 if (cfg1 & NVM_CFG1_ILOS)
2015 sc->sc_ctrl |= CTRL_ILOS;
2016 if (sc->sc_type >= WM_T_82544) {
2017 sc->sc_ctrl |=
2018 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2019 CTRL_SWDPIO_SHIFT;
2020 sc->sc_ctrl |=
2021 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2022 CTRL_SWDPINS_SHIFT;
2023 } else {
2024 sc->sc_ctrl |=
2025 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2026 CTRL_SWDPIO_SHIFT;
2027 }
2028
2029 #if 0
2030 if (sc->sc_type >= WM_T_82544) {
2031 if (cfg1 & NVM_CFG1_IPS0)
2032 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2033 if (cfg1 & NVM_CFG1_IPS1)
2034 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2035 sc->sc_ctrl_ext |=
2036 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2037 CTRL_EXT_SWDPIO_SHIFT;
2038 sc->sc_ctrl_ext |=
2039 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2040 CTRL_EXT_SWDPINS_SHIFT;
2041 } else {
2042 sc->sc_ctrl_ext |=
2043 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2044 CTRL_EXT_SWDPIO_SHIFT;
2045 }
2046 #endif
2047
2048 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2049 #if 0
2050 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2051 #endif
2052
2053 /*
2054 * Set up some register offsets that are different between
2055 * the i82542 and the i82543 and later chips.
2056 */
2057 if (sc->sc_type < WM_T_82543) {
2058 sc->sc_rdt_reg = WMREG_OLD_RDT0;
2059 sc->sc_tdt_reg = WMREG_OLD_TDT;
2060 } else {
2061 sc->sc_rdt_reg = WMREG_RDT;
2062 sc->sc_tdt_reg = WMREG_TDT;
2063 }
2064
2065 if (sc->sc_type == WM_T_PCH) {
2066 uint16_t val;
2067
2068 /* Save the NVM K1 bit setting */
2069 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2070
2071 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2072 sc->sc_nvm_k1_enabled = 1;
2073 else
2074 sc->sc_nvm_k1_enabled = 0;
2075 }
2076
2077 /*
2078 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2079 * media structures accordingly.
2080 */
2081 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2082 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2083 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2084 || sc->sc_type == WM_T_82573
2085 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2086 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2087 wm_gmii_mediainit(sc, wmp->wmp_product);
2088 } else if (sc->sc_type < WM_T_82543 ||
2089 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2090 if (sc->sc_mediatype & WMP_F_COPPER) {
2091 aprint_error_dev(sc->sc_dev,
2092 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2093 sc->sc_mediatype = WMP_F_FIBER;
2094 }
2095 wm_tbi_mediainit(sc);
2096 } else {
2097 switch (sc->sc_type) {
2098 case WM_T_82575:
2099 case WM_T_82576:
2100 case WM_T_82580:
2101 case WM_T_I350:
2102 case WM_T_I354:
2103 case WM_T_I210:
2104 case WM_T_I211:
2105 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2106 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2107 switch (link_mode) {
2108 case CTRL_EXT_LINK_MODE_1000KX:
2109 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2110 sc->sc_mediatype = WMP_F_SERDES;
2111 break;
2112 case CTRL_EXT_LINK_MODE_SGMII:
2113 if (wm_sgmii_uses_mdio(sc)) {
2114 aprint_verbose_dev(sc->sc_dev,
2115 "SGMII(MDIO)\n");
2116 sc->sc_flags |= WM_F_SGMII;
2117 sc->sc_mediatype = WMP_F_COPPER;
2118 break;
2119 }
2120 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2121 /*FALLTHROUGH*/
2122 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2123 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2124 if (sc->sc_mediatype == WMP_F_UNKNOWN) {
2125 if (link_mode
2126 == CTRL_EXT_LINK_MODE_SGMII) {
2127 sc->sc_mediatype
2128 = WMP_F_COPPER;
2129 sc->sc_flags |= WM_F_SGMII;
2130 } else {
2131 sc->sc_mediatype
2132 = WMP_F_SERDES;
2133 aprint_verbose_dev(sc->sc_dev,
2134 "SERDES\n");
2135 }
2136 break;
2137 }
2138 if (sc->sc_mediatype == WMP_F_SERDES)
2139 aprint_verbose_dev(sc->sc_dev,
2140 "SERDES\n");
2141
2142 /* Change current link mode setting */
2143 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2144 switch (sc->sc_mediatype) {
2145 case WMP_F_COPPER:
2146 reg |= CTRL_EXT_LINK_MODE_SGMII;
2147 break;
2148 case WMP_F_SERDES:
2149 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2150 break;
2151 default:
2152 break;
2153 }
2154 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2155 break;
2156 case CTRL_EXT_LINK_MODE_GMII:
2157 default:
2158 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2159 sc->sc_mediatype = WMP_F_COPPER;
2160 break;
2161 }
2162
2163 reg &= ~CTRL_EXT_I2C_ENA;
2164 if ((sc->sc_flags & WM_F_SGMII) != 0)
2165 reg |= CTRL_EXT_I2C_ENA;
2166 else
2167 reg &= ~CTRL_EXT_I2C_ENA;
2168 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2169
2170 if (sc->sc_mediatype == WMP_F_COPPER)
2171 wm_gmii_mediainit(sc, wmp->wmp_product);
2172 else
2173 wm_tbi_mediainit(sc);
2174 break;
2175 default:
2176 if (sc->sc_mediatype & WMP_F_FIBER)
2177 aprint_error_dev(sc->sc_dev,
2178 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2179 sc->sc_mediatype = WMP_F_COPPER;
2180 wm_gmii_mediainit(sc, wmp->wmp_product);
2181 }
2182 }
2183
2184 ifp = &sc->sc_ethercom.ec_if;
2185 xname = device_xname(sc->sc_dev);
2186 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2187 ifp->if_softc = sc;
2188 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2189 ifp->if_ioctl = wm_ioctl;
2190 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2191 ifp->if_start = wm_nq_start;
2192 else
2193 ifp->if_start = wm_start;
2194 ifp->if_watchdog = wm_watchdog;
2195 ifp->if_init = wm_init;
2196 ifp->if_stop = wm_stop;
2197 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2198 IFQ_SET_READY(&ifp->if_snd);
2199
2200 /* Check for jumbo frame */
2201 switch (sc->sc_type) {
2202 case WM_T_82573:
2203 /* XXX limited to 9234 if ASPM is disabled */
2204 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &io3);
2205 if ((io3 & NVM_3GIO_3_ASPM_MASK) != 0)
2206 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2207 break;
2208 case WM_T_82571:
2209 case WM_T_82572:
2210 case WM_T_82574:
2211 case WM_T_82575:
2212 case WM_T_82576:
2213 case WM_T_82580:
2214 case WM_T_I350:
2215 case WM_T_I354: /* XXXX ok? */
2216 case WM_T_I210:
2217 case WM_T_I211:
2218 case WM_T_80003:
2219 case WM_T_ICH9:
2220 case WM_T_ICH10:
2221 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2222 case WM_T_PCH_LPT:
2223 /* XXX limited to 9234 */
2224 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2225 break;
2226 case WM_T_PCH:
2227 /* XXX limited to 4096 */
2228 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2229 break;
2230 case WM_T_82542_2_0:
2231 case WM_T_82542_2_1:
2232 case WM_T_82583:
2233 case WM_T_ICH8:
2234 /* No support for jumbo frame */
2235 break;
2236 default:
2237 /* ETHER_MAX_LEN_JUMBO */
2238 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2239 break;
2240 }
2241
2242 /* If we're a i82543 or greater, we can support VLANs. */
2243 if (sc->sc_type >= WM_T_82543)
2244 sc->sc_ethercom.ec_capabilities |=
2245 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2246
2247 /*
2248 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2249 * on i82543 and later.
2250 */
2251 if (sc->sc_type >= WM_T_82543) {
2252 ifp->if_capabilities |=
2253 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2254 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2255 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2256 IFCAP_CSUM_TCPv6_Tx |
2257 IFCAP_CSUM_UDPv6_Tx;
2258 }
2259
2260 /*
2261 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2262 *
2263 * 82541GI (8086:1076) ... no
2264 * 82572EI (8086:10b9) ... yes
2265 */
2266 if (sc->sc_type >= WM_T_82571) {
2267 ifp->if_capabilities |=
2268 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2269 }
2270
2271 /*
2272 * If we're a i82544 or greater (except i82547), we can do
2273 * TCP segmentation offload.
2274 */
2275 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2276 ifp->if_capabilities |= IFCAP_TSOv4;
2277 }
2278
2279 if (sc->sc_type >= WM_T_82571) {
2280 ifp->if_capabilities |= IFCAP_TSOv6;
2281 }
2282
2283 #ifdef WM_MPSAFE
2284 sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2285 sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2286 #else
2287 sc->sc_tx_lock = NULL;
2288 sc->sc_rx_lock = NULL;
2289 #endif
2290
2291 /* Attach the interface. */
2292 if_attach(ifp);
2293 ether_ifattach(ifp, enaddr);
2294 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2295 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2296 RND_FLAG_DEFAULT);
2297
2298 #ifdef WM_EVENT_COUNTERS
2299 /* Attach event counters. */
2300 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2301 NULL, xname, "txsstall");
2302 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2303 NULL, xname, "txdstall");
2304 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2305 NULL, xname, "txfifo_stall");
2306 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2307 NULL, xname, "txdw");
2308 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2309 NULL, xname, "txqe");
2310 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2311 NULL, xname, "rxintr");
2312 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2313 NULL, xname, "linkintr");
2314
2315 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2316 NULL, xname, "rxipsum");
2317 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2318 NULL, xname, "rxtusum");
2319 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2320 NULL, xname, "txipsum");
2321 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2322 NULL, xname, "txtusum");
2323 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2324 NULL, xname, "txtusum6");
2325
2326 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2327 NULL, xname, "txtso");
2328 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2329 NULL, xname, "txtso6");
2330 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2331 NULL, xname, "txtsopain");
2332
2333 for (i = 0; i < WM_NTXSEGS; i++) {
2334 snprintf(wm_txseg_evcnt_names[i],
2335 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2336 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2337 NULL, xname, wm_txseg_evcnt_names[i]);
2338 }
2339
2340 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2341 NULL, xname, "txdrop");
2342
2343 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2344 NULL, xname, "tu");
2345
2346 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2347 NULL, xname, "tx_xoff");
2348 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2349 NULL, xname, "tx_xon");
2350 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2351 NULL, xname, "rx_xoff");
2352 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2353 NULL, xname, "rx_xon");
2354 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2355 NULL, xname, "rx_macctl");
2356 #endif /* WM_EVENT_COUNTERS */
2357
2358 if (pmf_device_register(self, wm_suspend, wm_resume))
2359 pmf_class_network_register(self, ifp);
2360 else
2361 aprint_error_dev(self, "couldn't establish power handler\n");
2362
2363 sc->sc_flags |= WM_F_ATTACHED;
2364 return;
2365
2366 /*
2367 * Free any resources we've allocated during the failed attach
2368 * attempt. Do this in reverse order and fall through.
2369 */
2370 fail_5:
2371 for (i = 0; i < WM_NRXDESC; i++) {
2372 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2373 bus_dmamap_destroy(sc->sc_dmat,
2374 sc->sc_rxsoft[i].rxs_dmamap);
2375 }
2376 fail_4:
2377 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2378 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2379 bus_dmamap_destroy(sc->sc_dmat,
2380 sc->sc_txsoft[i].txs_dmamap);
2381 }
2382 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2383 fail_3:
2384 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2385 fail_2:
2386 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2387 sc->sc_cd_size);
2388 fail_1:
2389 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2390 fail_0:
2391 return;
2392 }
2393
2394 /* The detach function (ca_detach) */
2395 static int
2396 wm_detach(device_t self, int flags __unused)
2397 {
2398 struct wm_softc *sc = device_private(self);
2399 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2400 int i;
2401 #ifndef WM_MPSAFE
2402 int s;
2403 #endif
2404
2405 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2406 return 0;
2407
2408 #ifndef WM_MPSAFE
2409 s = splnet();
2410 #endif
2411 /* Stop the interface. Callouts are stopped in it. */
2412 wm_stop(ifp, 1);
2413
2414 #ifndef WM_MPSAFE
2415 splx(s);
2416 #endif
2417
2418 pmf_device_deregister(self);
2419
2420 /* Tell the firmware about the release */
2421 WM_BOTH_LOCK(sc);
2422 wm_release_manageability(sc);
2423 wm_release_hw_control(sc);
2424 WM_BOTH_UNLOCK(sc);
2425
2426 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2427
2428 /* Delete all remaining media. */
2429 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2430
2431 ether_ifdetach(ifp);
2432 if_detach(ifp);
2433
2434
2435 /* Unload RX dmamaps and free mbufs */
2436 WM_RX_LOCK(sc);
2437 wm_rxdrain(sc);
2438 WM_RX_UNLOCK(sc);
2439 /* Must unlock here */
2440
2441 /* Free dmamap. It's the same as the end of the wm_attach() function */
2442 for (i = 0; i < WM_NRXDESC; i++) {
2443 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2444 bus_dmamap_destroy(sc->sc_dmat,
2445 sc->sc_rxsoft[i].rxs_dmamap);
2446 }
2447 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2448 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2449 bus_dmamap_destroy(sc->sc_dmat,
2450 sc->sc_txsoft[i].txs_dmamap);
2451 }
2452 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2453 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2454 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2455 sc->sc_cd_size);
2456 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2457
2458 /* Disestablish the interrupt handler */
2459 if (sc->sc_ih != NULL) {
2460 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2461 sc->sc_ih = NULL;
2462 }
2463
2464 /* Unmap the registers */
2465 if (sc->sc_ss) {
2466 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2467 sc->sc_ss = 0;
2468 }
2469
2470 if (sc->sc_ios) {
2471 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2472 sc->sc_ios = 0;
2473 }
2474
2475 if (sc->sc_tx_lock)
2476 mutex_obj_free(sc->sc_tx_lock);
2477 if (sc->sc_rx_lock)
2478 mutex_obj_free(sc->sc_rx_lock);
2479
2480 return 0;
2481 }
2482
2483 static bool
2484 wm_suspend(device_t self, const pmf_qual_t *qual)
2485 {
2486 struct wm_softc *sc = device_private(self);
2487
2488 wm_release_manageability(sc);
2489 wm_release_hw_control(sc);
2490 #ifdef WM_WOL
2491 wm_enable_wakeup(sc);
2492 #endif
2493
2494 return true;
2495 }
2496
2497 static bool
2498 wm_resume(device_t self, const pmf_qual_t *qual)
2499 {
2500 struct wm_softc *sc = device_private(self);
2501
2502 wm_init_manageability(sc);
2503
2504 return true;
2505 }
2506
2507 /*
2508 * wm_watchdog: [ifnet interface function]
2509 *
2510 * Watchdog timer handler.
2511 */
2512 static void
2513 wm_watchdog(struct ifnet *ifp)
2514 {
2515 struct wm_softc *sc = ifp->if_softc;
2516
2517 /*
2518 * Since we're using delayed interrupts, sweep up
2519 * before we report an error.
2520 */
2521 WM_TX_LOCK(sc);
2522 wm_txintr(sc);
2523 WM_TX_UNLOCK(sc);
2524
2525 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2526 #ifdef WM_DEBUG
2527 int i, j;
2528 struct wm_txsoft *txs;
2529 #endif
2530 log(LOG_ERR,
2531 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2532 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2533 sc->sc_txnext);
2534 ifp->if_oerrors++;
2535 #ifdef WM_DEBUG
2536 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
2537 i = WM_NEXTTXS(sc, i)) {
2538 txs = &sc->sc_txsoft[i];
2539 printf("txs %d tx %d -> %d\n",
2540 i, txs->txs_firstdesc, txs->txs_lastdesc);
2541 for (j = txs->txs_firstdesc; ;
2542 j = WM_NEXTTX(sc, j)) {
2543 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2544 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
2545 printf("\t %#08x%08x\n",
2546 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
2547 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
2548 if (j == txs->txs_lastdesc)
2549 break;
2550 }
2551 }
2552 #endif
2553 /* Reset the interface. */
2554 (void) wm_init(ifp);
2555 }
2556
2557 /* Try to get more packets going. */
2558 ifp->if_start(ifp);
2559 }
2560
2561 /*
2562 * wm_tick:
2563 *
2564 * One second timer, used to check link status, sweep up
2565 * completed transmit jobs, etc.
2566 */
2567 static void
2568 wm_tick(void *arg)
2569 {
2570 struct wm_softc *sc = arg;
2571 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2572 #ifndef WM_MPSAFE
2573 int s;
2574
2575 s = splnet();
2576 #endif
2577
2578 WM_TX_LOCK(sc);
2579
2580 if (sc->sc_stopping)
2581 goto out;
2582
2583 if (sc->sc_type >= WM_T_82542_2_1) {
2584 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2585 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2586 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2587 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2588 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2589 }
2590
2591 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2592 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2593 + CSR_READ(sc, WMREG_CRCERRS)
2594 + CSR_READ(sc, WMREG_ALGNERRC)
2595 + CSR_READ(sc, WMREG_SYMERRC)
2596 + CSR_READ(sc, WMREG_RXERRC)
2597 + CSR_READ(sc, WMREG_SEC)
2598 + CSR_READ(sc, WMREG_CEXTERR)
2599 + CSR_READ(sc, WMREG_RLEC);
2600 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2601
2602 if (sc->sc_flags & WM_F_HAS_MII)
2603 mii_tick(&sc->sc_mii);
2604 else
2605 wm_tbi_check_link(sc);
2606
2607 out:
2608 WM_TX_UNLOCK(sc);
2609 #ifndef WM_MPSAFE
2610 splx(s);
2611 #endif
2612
2613 if (!sc->sc_stopping)
2614 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2615 }
2616
2617 static int
2618 wm_ifflags_cb(struct ethercom *ec)
2619 {
2620 struct ifnet *ifp = &ec->ec_if;
2621 struct wm_softc *sc = ifp->if_softc;
2622 int change = ifp->if_flags ^ sc->sc_if_flags;
2623 int rc = 0;
2624
2625 WM_BOTH_LOCK(sc);
2626
2627 if (change != 0)
2628 sc->sc_if_flags = ifp->if_flags;
2629
2630 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
2631 rc = ENETRESET;
2632 goto out;
2633 }
2634
2635 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2636 wm_set_filter(sc);
2637
2638 wm_set_vlan(sc);
2639
2640 out:
2641 WM_BOTH_UNLOCK(sc);
2642
2643 return rc;
2644 }
2645
2646 /*
2647 * wm_ioctl: [ifnet interface function]
2648 *
2649 * Handle control requests from the operator.
2650 */
2651 static int
2652 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2653 {
2654 struct wm_softc *sc = ifp->if_softc;
2655 struct ifreq *ifr = (struct ifreq *) data;
2656 struct ifaddr *ifa = (struct ifaddr *)data;
2657 struct sockaddr_dl *sdl;
2658 int s, error;
2659
2660 #ifndef WM_MPSAFE
2661 s = splnet();
2662 #endif
2663 WM_BOTH_LOCK(sc);
2664
2665 switch (cmd) {
2666 case SIOCSIFMEDIA:
2667 case SIOCGIFMEDIA:
2668 /* Flow control requires full-duplex mode. */
2669 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2670 (ifr->ifr_media & IFM_FDX) == 0)
2671 ifr->ifr_media &= ~IFM_ETH_FMASK;
2672 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2673 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2674 /* We can do both TXPAUSE and RXPAUSE. */
2675 ifr->ifr_media |=
2676 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2677 }
2678 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2679 }
2680 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2681 break;
2682 case SIOCINITIFADDR:
2683 if (ifa->ifa_addr->sa_family == AF_LINK) {
2684 sdl = satosdl(ifp->if_dl->ifa_addr);
2685 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2686 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2687 /* unicast address is first multicast entry */
2688 wm_set_filter(sc);
2689 error = 0;
2690 break;
2691 }
2692 /*FALLTHROUGH*/
2693 default:
2694 WM_BOTH_UNLOCK(sc);
2695 #ifdef WM_MPSAFE
2696 s = splnet();
2697 #endif
2698 /* It may call wm_start, so unlock here */
2699 error = ether_ioctl(ifp, cmd, data);
2700 #ifdef WM_MPSAFE
2701 splx(s);
2702 #endif
2703 WM_BOTH_LOCK(sc);
2704
2705 if (error != ENETRESET)
2706 break;
2707
2708 error = 0;
2709
2710 if (cmd == SIOCSIFCAP) {
2711 WM_BOTH_UNLOCK(sc);
2712 error = (*ifp->if_init)(ifp);
2713 WM_BOTH_LOCK(sc);
2714 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2715 ;
2716 else if (ifp->if_flags & IFF_RUNNING) {
2717 /*
2718 * Multicast list has changed; set the hardware filter
2719 * accordingly.
2720 */
2721 wm_set_filter(sc);
2722 }
2723 break;
2724 }
2725
2726 WM_BOTH_UNLOCK(sc);
2727
2728 /* Try to get more packets going. */
2729 ifp->if_start(ifp);
2730
2731 #ifndef WM_MPSAFE
2732 splx(s);
2733 #endif
2734 return error;
2735 }
2736
2737 /* MAC address related */
2738
2739 static int
2740 wm_check_alt_mac_addr(struct wm_softc *sc)
2741 {
2742 uint16_t myea[ETHER_ADDR_LEN / 2];
2743 uint16_t offset = NVM_OFF_MACADDR;
2744
2745 /* Try to read alternative MAC address pointer */
2746 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2747 return -1;
2748
2749 /* Check pointer */
2750 if (offset == 0xffff)
2751 return -1;
2752
2753 /*
2754 * Check whether alternative MAC address is valid or not.
2755 * Some cards have non 0xffff pointer but those don't use
2756 * alternative MAC address in reality.
2757 *
2758 * Check whether the broadcast bit is set or not.
2759 */
2760 if (wm_nvm_read(sc, offset, 1, myea) == 0)
2761 if (((myea[0] & 0xff) & 0x01) == 0)
2762 return 0; /* found! */
2763
2764 /* not found */
2765 return -1;
2766 }
2767
2768 static int
2769 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2770 {
2771 uint16_t myea[ETHER_ADDR_LEN / 2];
2772 uint16_t offset = NVM_OFF_MACADDR;
2773 int do_invert = 0;
2774
2775 switch (sc->sc_type) {
2776 case WM_T_82580:
2777 case WM_T_I350:
2778 case WM_T_I354:
2779 switch (sc->sc_funcid) {
2780 case 0:
2781 /* default value (== NVM_OFF_MACADDR) */
2782 break;
2783 case 1:
2784 offset = NVM_OFF_LAN1;
2785 break;
2786 case 2:
2787 offset = NVM_OFF_LAN2;
2788 break;
2789 case 3:
2790 offset = NVM_OFF_LAN3;
2791 break;
2792 default:
2793 goto bad;
2794 /* NOTREACHED */
2795 break;
2796 }
2797 break;
2798 case WM_T_82571:
2799 case WM_T_82575:
2800 case WM_T_82576:
2801 case WM_T_80003:
2802 case WM_T_I210:
2803 case WM_T_I211:
2804 if (wm_check_alt_mac_addr(sc) != 0) {
2805 /* reset the offset to LAN0 */
2806 offset = NVM_OFF_MACADDR;
2807 if ((sc->sc_funcid & 0x01) == 1)
2808 do_invert = 1;
2809 goto do_read;
2810 }
2811 switch (sc->sc_funcid) {
2812 case 0:
2813 /*
2814 * The offset is the value in NVM_OFF_ALT_MAC_ADDR_PTR
2815 * itself.
2816 */
2817 break;
2818 case 1:
2819 offset += NVM_OFF_MACADDR_LAN1;
2820 break;
2821 case 2:
2822 offset += NVM_OFF_MACADDR_LAN2;
2823 break;
2824 case 3:
2825 offset += NVM_OFF_MACADDR_LAN3;
2826 break;
2827 default:
2828 goto bad;
2829 /* NOTREACHED */
2830 break;
2831 }
2832 break;
2833 default:
2834 if ((sc->sc_funcid & 0x01) == 1)
2835 do_invert = 1;
2836 break;
2837 }
2838
2839 do_read:
2840 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
2841 myea) != 0) {
2842 goto bad;
2843 }
2844
2845 enaddr[0] = myea[0] & 0xff;
2846 enaddr[1] = myea[0] >> 8;
2847 enaddr[2] = myea[1] & 0xff;
2848 enaddr[3] = myea[1] >> 8;
2849 enaddr[4] = myea[2] & 0xff;
2850 enaddr[5] = myea[2] >> 8;
2851
2852 /*
2853 * Toggle the LSB of the MAC address on the second port
2854 * of some dual port cards.
2855 */
2856 if (do_invert != 0)
2857 enaddr[5] ^= 1;
2858
2859 return 0;
2860
2861 bad:
2862 return -1;
2863 }
2864
2865 /*
2866 * wm_set_ral:
2867 *
2868 * Set an entery in the receive address list.
2869 */
2870 static void
2871 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2872 {
2873 uint32_t ral_lo, ral_hi;
2874
2875 if (enaddr != NULL) {
2876 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2877 (enaddr[3] << 24);
2878 ral_hi = enaddr[4] | (enaddr[5] << 8);
2879 ral_hi |= RAL_AV;
2880 } else {
2881 ral_lo = 0;
2882 ral_hi = 0;
2883 }
2884
2885 if (sc->sc_type >= WM_T_82544) {
2886 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2887 ral_lo);
2888 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2889 ral_hi);
2890 } else {
2891 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2892 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2893 }
2894 }
2895
2896 /*
2897 * wm_mchash:
2898 *
2899 * Compute the hash of the multicast address for the 4096-bit
2900 * multicast filter.
2901 */
2902 static uint32_t
2903 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2904 {
2905 static const int lo_shift[4] = { 4, 3, 2, 0 };
2906 static const int hi_shift[4] = { 4, 5, 6, 8 };
2907 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
2908 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
2909 uint32_t hash;
2910
2911 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2912 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2913 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
2914 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
2915 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
2916 return (hash & 0x3ff);
2917 }
2918 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2919 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2920
2921 return (hash & 0xfff);
2922 }
2923
2924 /*
2925 * wm_set_filter:
2926 *
2927 * Set up the receive filter.
2928 */
2929 static void
2930 wm_set_filter(struct wm_softc *sc)
2931 {
2932 struct ethercom *ec = &sc->sc_ethercom;
2933 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2934 struct ether_multi *enm;
2935 struct ether_multistep step;
2936 bus_addr_t mta_reg;
2937 uint32_t hash, reg, bit;
2938 int i, size;
2939
2940 if (sc->sc_type >= WM_T_82544)
2941 mta_reg = WMREG_CORDOVA_MTA;
2942 else
2943 mta_reg = WMREG_MTA;
2944
2945 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2946
2947 if (ifp->if_flags & IFF_BROADCAST)
2948 sc->sc_rctl |= RCTL_BAM;
2949 if (ifp->if_flags & IFF_PROMISC) {
2950 sc->sc_rctl |= RCTL_UPE;
2951 goto allmulti;
2952 }
2953
2954 /*
2955 * Set the station address in the first RAL slot, and
2956 * clear the remaining slots.
2957 */
2958 if (sc->sc_type == WM_T_ICH8)
2959 size = WM_RAL_TABSIZE_ICH8 -1;
2960 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
2961 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
2962 || (sc->sc_type == WM_T_PCH_LPT))
2963 size = WM_RAL_TABSIZE_ICH8;
2964 else if (sc->sc_type == WM_T_82575)
2965 size = WM_RAL_TABSIZE_82575;
2966 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
2967 size = WM_RAL_TABSIZE_82576;
2968 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2969 size = WM_RAL_TABSIZE_I350;
2970 else
2971 size = WM_RAL_TABSIZE;
2972 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
2973 for (i = 1; i < size; i++)
2974 wm_set_ral(sc, NULL, i);
2975
2976 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2977 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2978 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
2979 size = WM_ICH8_MC_TABSIZE;
2980 else
2981 size = WM_MC_TABSIZE;
2982 /* Clear out the multicast table. */
2983 for (i = 0; i < size; i++)
2984 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2985
2986 ETHER_FIRST_MULTI(step, ec, enm);
2987 while (enm != NULL) {
2988 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2989 /*
2990 * We must listen to a range of multicast addresses.
2991 * For now, just accept all multicasts, rather than
2992 * trying to set only those filter bits needed to match
2993 * the range. (At this time, the only use of address
2994 * ranges is for IP multicast routing, for which the
2995 * range is big enough to require all bits set.)
2996 */
2997 goto allmulti;
2998 }
2999
3000 hash = wm_mchash(sc, enm->enm_addrlo);
3001
3002 reg = (hash >> 5);
3003 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3004 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3005 || (sc->sc_type == WM_T_PCH2)
3006 || (sc->sc_type == WM_T_PCH_LPT))
3007 reg &= 0x1f;
3008 else
3009 reg &= 0x7f;
3010 bit = hash & 0x1f;
3011
3012 hash = CSR_READ(sc, mta_reg + (reg << 2));
3013 hash |= 1U << bit;
3014
3015 /* XXX Hardware bug?? */
3016 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3017 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3018 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3019 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3020 } else
3021 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3022
3023 ETHER_NEXT_MULTI(step, enm);
3024 }
3025
3026 ifp->if_flags &= ~IFF_ALLMULTI;
3027 goto setit;
3028
3029 allmulti:
3030 ifp->if_flags |= IFF_ALLMULTI;
3031 sc->sc_rctl |= RCTL_MPE;
3032
3033 setit:
3034 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3035 }
3036
3037 /* Reset and init related */
3038
3039 static void
3040 wm_set_vlan(struct wm_softc *sc)
3041 {
3042 /* Deal with VLAN enables. */
3043 if (VLAN_ATTACHED(&sc->sc_ethercom))
3044 sc->sc_ctrl |= CTRL_VME;
3045 else
3046 sc->sc_ctrl &= ~CTRL_VME;
3047
3048 /* Write the control registers. */
3049 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3050 }
3051
3052 static void
3053 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3054 {
3055 uint32_t gcr;
3056 pcireg_t ctrl2;
3057
3058 gcr = CSR_READ(sc, WMREG_GCR);
3059
3060 /* Only take action if timeout value is defaulted to 0 */
3061 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3062 goto out;
3063
3064 if ((gcr & GCR_CAP_VER2) == 0) {
3065 gcr |= GCR_CMPL_TMOUT_10MS;
3066 goto out;
3067 }
3068
3069 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3070 sc->sc_pcixe_capoff + PCIE_DCSR2);
3071 ctrl2 |= WM_PCIE_DCSR2_16MS;
3072 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3073 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3074
3075 out:
3076 /* Disable completion timeout resend */
3077 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3078
3079 CSR_WRITE(sc, WMREG_GCR, gcr);
3080 }
3081
3082 void
3083 wm_get_auto_rd_done(struct wm_softc *sc)
3084 {
3085 int i;
3086
3087 /* wait for eeprom to reload */
3088 switch (sc->sc_type) {
3089 case WM_T_82571:
3090 case WM_T_82572:
3091 case WM_T_82573:
3092 case WM_T_82574:
3093 case WM_T_82583:
3094 case WM_T_82575:
3095 case WM_T_82576:
3096 case WM_T_82580:
3097 case WM_T_I350:
3098 case WM_T_I354:
3099 case WM_T_I210:
3100 case WM_T_I211:
3101 case WM_T_80003:
3102 case WM_T_ICH8:
3103 case WM_T_ICH9:
3104 for (i = 0; i < 10; i++) {
3105 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3106 break;
3107 delay(1000);
3108 }
3109 if (i == 10) {
3110 log(LOG_ERR, "%s: auto read from eeprom failed to "
3111 "complete\n", device_xname(sc->sc_dev));
3112 }
3113 break;
3114 default:
3115 break;
3116 }
3117 }
3118
3119 void
3120 wm_lan_init_done(struct wm_softc *sc)
3121 {
3122 uint32_t reg = 0;
3123 int i;
3124
3125 /* wait for eeprom to reload */
3126 switch (sc->sc_type) {
3127 case WM_T_ICH10:
3128 case WM_T_PCH:
3129 case WM_T_PCH2:
3130 case WM_T_PCH_LPT:
3131 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3132 reg = CSR_READ(sc, WMREG_STATUS);
3133 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3134 break;
3135 delay(100);
3136 }
3137 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3138 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3139 "complete\n", device_xname(sc->sc_dev), __func__);
3140 }
3141 break;
3142 default:
3143 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3144 __func__);
3145 break;
3146 }
3147
3148 reg &= ~STATUS_LAN_INIT_DONE;
3149 CSR_WRITE(sc, WMREG_STATUS, reg);
3150 }
3151
3152 void
3153 wm_get_cfg_done(struct wm_softc *sc)
3154 {
3155 int mask;
3156 uint32_t reg;
3157 int i;
3158
3159 /* wait for eeprom to reload */
3160 switch (sc->sc_type) {
3161 case WM_T_82542_2_0:
3162 case WM_T_82542_2_1:
3163 /* null */
3164 break;
3165 case WM_T_82543:
3166 case WM_T_82544:
3167 case WM_T_82540:
3168 case WM_T_82545:
3169 case WM_T_82545_3:
3170 case WM_T_82546:
3171 case WM_T_82546_3:
3172 case WM_T_82541:
3173 case WM_T_82541_2:
3174 case WM_T_82547:
3175 case WM_T_82547_2:
3176 case WM_T_82573:
3177 case WM_T_82574:
3178 case WM_T_82583:
3179 /* generic */
3180 delay(10*1000);
3181 break;
3182 case WM_T_80003:
3183 case WM_T_82571:
3184 case WM_T_82572:
3185 case WM_T_82575:
3186 case WM_T_82576:
3187 case WM_T_82580:
3188 case WM_T_I350:
3189 case WM_T_I354:
3190 case WM_T_I210:
3191 case WM_T_I211:
3192 if (sc->sc_type == WM_T_82571) {
3193 /* Only 82571 shares port 0 */
3194 mask = EEMNGCTL_CFGDONE_0;
3195 } else
3196 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3197 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3198 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3199 break;
3200 delay(1000);
3201 }
3202 if (i >= WM_PHY_CFG_TIMEOUT) {
3203 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3204 device_xname(sc->sc_dev), __func__));
3205 }
3206 break;
3207 case WM_T_ICH8:
3208 case WM_T_ICH9:
3209 case WM_T_ICH10:
3210 case WM_T_PCH:
3211 case WM_T_PCH2:
3212 case WM_T_PCH_LPT:
3213 delay(10*1000);
3214 if (sc->sc_type >= WM_T_ICH10)
3215 wm_lan_init_done(sc);
3216 else
3217 wm_get_auto_rd_done(sc);
3218
3219 reg = CSR_READ(sc, WMREG_STATUS);
3220 if ((reg & STATUS_PHYRA) != 0)
3221 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3222 break;
3223 default:
3224 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3225 __func__);
3226 break;
3227 }
3228 }
3229
3230 /*
3231 * wm_reset:
3232 *
3233 * Reset the i82542 chip.
3234 */
3235 static void
3236 wm_reset(struct wm_softc *sc)
3237 {
3238 int phy_reset = 0;
3239 int error = 0;
3240 uint32_t reg, mask;
3241
3242 /*
3243 * Allocate on-chip memory according to the MTU size.
3244 * The Packet Buffer Allocation register must be written
3245 * before the chip is reset.
3246 */
3247 switch (sc->sc_type) {
3248 case WM_T_82547:
3249 case WM_T_82547_2:
3250 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3251 PBA_22K : PBA_30K;
3252 sc->sc_txfifo_head = 0;
3253 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3254 sc->sc_txfifo_size =
3255 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3256 sc->sc_txfifo_stall = 0;
3257 break;
3258 case WM_T_82571:
3259 case WM_T_82572:
3260 case WM_T_82575: /* XXX need special handing for jumbo frames */
3261 case WM_T_I350:
3262 case WM_T_I354:
3263 case WM_T_80003:
3264 sc->sc_pba = PBA_32K;
3265 break;
3266 case WM_T_82580:
3267 sc->sc_pba = PBA_35K;
3268 break;
3269 case WM_T_I210:
3270 case WM_T_I211:
3271 sc->sc_pba = PBA_34K;
3272 break;
3273 case WM_T_82576:
3274 sc->sc_pba = PBA_64K;
3275 break;
3276 case WM_T_82573:
3277 sc->sc_pba = PBA_12K;
3278 break;
3279 case WM_T_82574:
3280 case WM_T_82583:
3281 sc->sc_pba = PBA_20K;
3282 break;
3283 case WM_T_ICH8:
3284 sc->sc_pba = PBA_8K;
3285 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3286 break;
3287 case WM_T_ICH9:
3288 case WM_T_ICH10:
3289 sc->sc_pba = PBA_10K;
3290 break;
3291 case WM_T_PCH:
3292 case WM_T_PCH2:
3293 case WM_T_PCH_LPT:
3294 sc->sc_pba = PBA_26K;
3295 break;
3296 default:
3297 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3298 PBA_40K : PBA_48K;
3299 break;
3300 }
3301 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3302
3303 /* Prevent the PCI-E bus from sticking */
3304 if (sc->sc_flags & WM_F_PCIE) {
3305 int timeout = 800;
3306
3307 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3308 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3309
3310 while (timeout--) {
3311 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3312 == 0)
3313 break;
3314 delay(100);
3315 }
3316 }
3317
3318 /* Set the completion timeout for interface */
3319 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3320 || (sc->sc_type == WM_T_82580)
3321 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3322 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3323 wm_set_pcie_completion_timeout(sc);
3324
3325 /* Clear interrupt */
3326 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3327
3328 /* Stop the transmit and receive processes. */
3329 CSR_WRITE(sc, WMREG_RCTL, 0);
3330 sc->sc_rctl &= ~RCTL_EN;
3331 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3332 CSR_WRITE_FLUSH(sc);
3333
3334 /* XXX set_tbi_sbp_82543() */
3335
3336 delay(10*1000);
3337
3338 /* Must acquire the MDIO ownership before MAC reset */
3339 switch (sc->sc_type) {
3340 case WM_T_82573:
3341 case WM_T_82574:
3342 case WM_T_82583:
3343 error = wm_get_hw_semaphore_82573(sc);
3344 break;
3345 default:
3346 break;
3347 }
3348
3349 /*
3350 * 82541 Errata 29? & 82547 Errata 28?
3351 * See also the description about PHY_RST bit in CTRL register
3352 * in 8254x_GBe_SDM.pdf.
3353 */
3354 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3355 CSR_WRITE(sc, WMREG_CTRL,
3356 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3357 CSR_WRITE_FLUSH(sc);
3358 delay(5000);
3359 }
3360
3361 switch (sc->sc_type) {
3362 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3363 case WM_T_82541:
3364 case WM_T_82541_2:
3365 case WM_T_82547:
3366 case WM_T_82547_2:
3367 /*
3368 * On some chipsets, a reset through a memory-mapped write
3369 * cycle can cause the chip to reset before completing the
3370 * write cycle. This causes major headache that can be
3371 * avoided by issuing the reset via indirect register writes
3372 * through I/O space.
3373 *
3374 * So, if we successfully mapped the I/O BAR at attach time,
3375 * use that. Otherwise, try our luck with a memory-mapped
3376 * reset.
3377 */
3378 if (sc->sc_flags & WM_F_IOH_VALID)
3379 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3380 else
3381 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3382 break;
3383 case WM_T_82545_3:
3384 case WM_T_82546_3:
3385 /* Use the shadow control register on these chips. */
3386 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3387 break;
3388 case WM_T_80003:
3389 mask = swfwphysem[sc->sc_funcid];
3390 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3391 wm_get_swfw_semaphore(sc, mask);
3392 CSR_WRITE(sc, WMREG_CTRL, reg);
3393 wm_put_swfw_semaphore(sc, mask);
3394 break;
3395 case WM_T_ICH8:
3396 case WM_T_ICH9:
3397 case WM_T_ICH10:
3398 case WM_T_PCH:
3399 case WM_T_PCH2:
3400 case WM_T_PCH_LPT:
3401 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3402 if (wm_check_reset_block(sc) == 0) {
3403 /*
3404 * Gate automatic PHY configuration by hardware on
3405 * non-managed 82579
3406 */
3407 if ((sc->sc_type == WM_T_PCH2)
3408 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3409 != 0))
3410 wm_gate_hw_phy_config_ich8lan(sc, 1);
3411
3412
3413 reg |= CTRL_PHY_RESET;
3414 phy_reset = 1;
3415 }
3416 wm_get_swfwhw_semaphore(sc);
3417 CSR_WRITE(sc, WMREG_CTRL, reg);
3418 /* Don't insert a completion barrier when reset */
3419 delay(20*1000);
3420 wm_put_swfwhw_semaphore(sc);
3421 break;
3422 case WM_T_82542_2_0:
3423 case WM_T_82542_2_1:
3424 case WM_T_82543:
3425 case WM_T_82540:
3426 case WM_T_82545:
3427 case WM_T_82546:
3428 case WM_T_82571:
3429 case WM_T_82572:
3430 case WM_T_82573:
3431 case WM_T_82574:
3432 case WM_T_82575:
3433 case WM_T_82576:
3434 case WM_T_82580:
3435 case WM_T_82583:
3436 case WM_T_I350:
3437 case WM_T_I354:
3438 case WM_T_I210:
3439 case WM_T_I211:
3440 default:
3441 /* Everything else can safely use the documented method. */
3442 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3443 break;
3444 }
3445
3446 /* Must release the MDIO ownership after MAC reset */
3447 switch (sc->sc_type) {
3448 case WM_T_82573:
3449 case WM_T_82574:
3450 case WM_T_82583:
3451 if (error == 0)
3452 wm_put_hw_semaphore_82573(sc);
3453 break;
3454 default:
3455 break;
3456 }
3457
3458 if (phy_reset != 0)
3459 wm_get_cfg_done(sc);
3460
3461 /* reload EEPROM */
3462 switch (sc->sc_type) {
3463 case WM_T_82542_2_0:
3464 case WM_T_82542_2_1:
3465 case WM_T_82543:
3466 case WM_T_82544:
3467 delay(10);
3468 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3469 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3470 CSR_WRITE_FLUSH(sc);
3471 delay(2000);
3472 break;
3473 case WM_T_82540:
3474 case WM_T_82545:
3475 case WM_T_82545_3:
3476 case WM_T_82546:
3477 case WM_T_82546_3:
3478 delay(5*1000);
3479 /* XXX Disable HW ARPs on ASF enabled adapters */
3480 break;
3481 case WM_T_82541:
3482 case WM_T_82541_2:
3483 case WM_T_82547:
3484 case WM_T_82547_2:
3485 delay(20000);
3486 /* XXX Disable HW ARPs on ASF enabled adapters */
3487 break;
3488 case WM_T_82571:
3489 case WM_T_82572:
3490 case WM_T_82573:
3491 case WM_T_82574:
3492 case WM_T_82583:
3493 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3494 delay(10);
3495 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3496 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3497 CSR_WRITE_FLUSH(sc);
3498 }
3499 /* check EECD_EE_AUTORD */
3500 wm_get_auto_rd_done(sc);
3501 /*
3502 * Phy configuration from NVM just starts after EECD_AUTO_RD
3503 * is set.
3504 */
3505 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3506 || (sc->sc_type == WM_T_82583))
3507 delay(25*1000);
3508 break;
3509 case WM_T_82575:
3510 case WM_T_82576:
3511 case WM_T_82580:
3512 case WM_T_I350:
3513 case WM_T_I354:
3514 case WM_T_I210:
3515 case WM_T_I211:
3516 case WM_T_80003:
3517 /* check EECD_EE_AUTORD */
3518 wm_get_auto_rd_done(sc);
3519 break;
3520 case WM_T_ICH8:
3521 case WM_T_ICH9:
3522 case WM_T_ICH10:
3523 case WM_T_PCH:
3524 case WM_T_PCH2:
3525 case WM_T_PCH_LPT:
3526 break;
3527 default:
3528 panic("%s: unknown type\n", __func__);
3529 }
3530
3531 /* Check whether EEPROM is present or not */
3532 switch (sc->sc_type) {
3533 case WM_T_82575:
3534 case WM_T_82576:
3535 #if 0 /* XXX */
3536 case WM_T_82580:
3537 #endif
3538 case WM_T_I350:
3539 case WM_T_I354:
3540 case WM_T_ICH8:
3541 case WM_T_ICH9:
3542 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3543 /* Not found */
3544 sc->sc_flags |= WM_F_EEPROM_INVALID;
3545 if ((sc->sc_type == WM_T_82575)
3546 || (sc->sc_type == WM_T_82576)
3547 || (sc->sc_type == WM_T_82580)
3548 || (sc->sc_type == WM_T_I350)
3549 || (sc->sc_type == WM_T_I354))
3550 wm_reset_init_script_82575(sc);
3551 }
3552 break;
3553 default:
3554 break;
3555 }
3556
3557 if ((sc->sc_type == WM_T_82580)
3558 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
3559 /* clear global device reset status bit */
3560 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3561 }
3562
3563 /* Clear any pending interrupt events. */
3564 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3565 reg = CSR_READ(sc, WMREG_ICR);
3566
3567 /* reload sc_ctrl */
3568 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3569
3570 if (sc->sc_type == WM_T_I350)
3571 wm_set_eee_i350(sc);
3572
3573 /* dummy read from WUC */
3574 if (sc->sc_type == WM_T_PCH)
3575 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3576 /*
3577 * For PCH, this write will make sure that any noise will be detected
3578 * as a CRC error and be dropped rather than show up as a bad packet
3579 * to the DMA engine
3580 */
3581 if (sc->sc_type == WM_T_PCH)
3582 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3583
3584 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3585 CSR_WRITE(sc, WMREG_WUC, 0);
3586
3587 /* XXX need special handling for 82580 */
3588 }
3589
3590 /*
3591 * wm_add_rxbuf:
3592 *
3593 * Add a receive buffer to the indiciated descriptor.
3594 */
3595 static int
3596 wm_add_rxbuf(struct wm_softc *sc, int idx)
3597 {
3598 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3599 struct mbuf *m;
3600 int error;
3601
3602 KASSERT(WM_RX_LOCKED(sc));
3603
3604 MGETHDR(m, M_DONTWAIT, MT_DATA);
3605 if (m == NULL)
3606 return ENOBUFS;
3607
3608 MCLGET(m, M_DONTWAIT);
3609 if ((m->m_flags & M_EXT) == 0) {
3610 m_freem(m);
3611 return ENOBUFS;
3612 }
3613
3614 if (rxs->rxs_mbuf != NULL)
3615 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3616
3617 rxs->rxs_mbuf = m;
3618
3619 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3620 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3621 BUS_DMA_READ|BUS_DMA_NOWAIT);
3622 if (error) {
3623 /* XXX XXX XXX */
3624 aprint_error_dev(sc->sc_dev,
3625 "unable to load rx DMA map %d, error = %d\n",
3626 idx, error);
3627 panic("wm_add_rxbuf");
3628 }
3629
3630 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3631 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3632
3633 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3634 if ((sc->sc_rctl & RCTL_EN) != 0)
3635 WM_INIT_RXDESC(sc, idx);
3636 } else
3637 WM_INIT_RXDESC(sc, idx);
3638
3639 return 0;
3640 }
3641
3642 /*
3643 * wm_rxdrain:
3644 *
3645 * Drain the receive queue.
3646 */
3647 static void
3648 wm_rxdrain(struct wm_softc *sc)
3649 {
3650 struct wm_rxsoft *rxs;
3651 int i;
3652
3653 KASSERT(WM_RX_LOCKED(sc));
3654
3655 for (i = 0; i < WM_NRXDESC; i++) {
3656 rxs = &sc->sc_rxsoft[i];
3657 if (rxs->rxs_mbuf != NULL) {
3658 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3659 m_freem(rxs->rxs_mbuf);
3660 rxs->rxs_mbuf = NULL;
3661 }
3662 }
3663 }
3664
3665 /*
3666 * wm_init: [ifnet interface function]
3667 *
3668 * Initialize the interface.
3669 */
3670 static int
3671 wm_init(struct ifnet *ifp)
3672 {
3673 struct wm_softc *sc = ifp->if_softc;
3674 int ret;
3675
3676 WM_BOTH_LOCK(sc);
3677 ret = wm_init_locked(ifp);
3678 WM_BOTH_UNLOCK(sc);
3679
3680 return ret;
3681 }
3682
3683 static int
3684 wm_init_locked(struct ifnet *ifp)
3685 {
3686 struct wm_softc *sc = ifp->if_softc;
3687 struct wm_rxsoft *rxs;
3688 int i, j, trynum, error = 0;
3689 uint32_t reg;
3690
3691 KASSERT(WM_BOTH_LOCKED(sc));
3692 /*
3693 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3694 * There is a small but measurable benefit to avoiding the adjusment
3695 * of the descriptor so that the headers are aligned, for normal mtu,
3696 * on such platforms. One possibility is that the DMA itself is
3697 * slightly more efficient if the front of the entire packet (instead
3698 * of the front of the headers) is aligned.
3699 *
3700 * Note we must always set align_tweak to 0 if we are using
3701 * jumbo frames.
3702 */
3703 #ifdef __NO_STRICT_ALIGNMENT
3704 sc->sc_align_tweak = 0;
3705 #else
3706 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3707 sc->sc_align_tweak = 0;
3708 else
3709 sc->sc_align_tweak = 2;
3710 #endif /* __NO_STRICT_ALIGNMENT */
3711
3712 /* Cancel any pending I/O. */
3713 wm_stop_locked(ifp, 0);
3714
3715 /* update statistics before reset */
3716 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3717 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3718
3719 /* Reset the chip to a known state. */
3720 wm_reset(sc);
3721
3722 switch (sc->sc_type) {
3723 case WM_T_82571:
3724 case WM_T_82572:
3725 case WM_T_82573:
3726 case WM_T_82574:
3727 case WM_T_82583:
3728 case WM_T_80003:
3729 case WM_T_ICH8:
3730 case WM_T_ICH9:
3731 case WM_T_ICH10:
3732 case WM_T_PCH:
3733 case WM_T_PCH2:
3734 case WM_T_PCH_LPT:
3735 if (wm_check_mng_mode(sc) != 0)
3736 wm_get_hw_control(sc);
3737 break;
3738 default:
3739 break;
3740 }
3741
3742 /* Reset the PHY. */
3743 if (sc->sc_flags & WM_F_HAS_MII)
3744 wm_gmii_reset(sc);
3745
3746 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3747 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3748 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
3749 || (sc->sc_type == WM_T_PCH_LPT))
3750 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
3751
3752 /* Initialize the transmit descriptor ring. */
3753 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3754 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3755 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3756 sc->sc_txfree = WM_NTXDESC(sc);
3757 sc->sc_txnext = 0;
3758
3759 if (sc->sc_type < WM_T_82543) {
3760 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
3761 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
3762 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3763 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3764 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3765 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3766 } else {
3767 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
3768 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
3769 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3770 CSR_WRITE(sc, WMREG_TDH, 0);
3771 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3772 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3773
3774 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3775 /*
3776 * Don't write TDT before TCTL.EN is set.
3777 * See the document.
3778 */
3779 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
3780 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
3781 | TXDCTL_WTHRESH(0));
3782 else {
3783 CSR_WRITE(sc, WMREG_TDT, 0);
3784 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3785 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3786 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3787 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3788 }
3789 }
3790 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3791 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3792
3793 /* Initialize the transmit job descriptors. */
3794 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3795 sc->sc_txsoft[i].txs_mbuf = NULL;
3796 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3797 sc->sc_txsnext = 0;
3798 sc->sc_txsdirty = 0;
3799
3800 /*
3801 * Initialize the receive descriptor and receive job
3802 * descriptor rings.
3803 */
3804 if (sc->sc_type < WM_T_82543) {
3805 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3806 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3807 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3808 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3809 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3810 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3811
3812 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3813 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3814 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3815 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3816 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3817 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3818 } else {
3819 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3820 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3821 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3822 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3823 CSR_WRITE(sc, WMREG_EITR(0), 450);
3824 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
3825 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
3826 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
3827 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
3828 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
3829 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
3830 | RXDCTL_WTHRESH(1));
3831 } else {
3832 CSR_WRITE(sc, WMREG_RDH, 0);
3833 CSR_WRITE(sc, WMREG_RDT, 0);
3834 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3835 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
3836 }
3837 }
3838 for (i = 0; i < WM_NRXDESC; i++) {
3839 rxs = &sc->sc_rxsoft[i];
3840 if (rxs->rxs_mbuf == NULL) {
3841 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3842 log(LOG_ERR, "%s: unable to allocate or map "
3843 "rx buffer %d, error = %d\n",
3844 device_xname(sc->sc_dev), i, error);
3845 /*
3846 * XXX Should attempt to run with fewer receive
3847 * XXX buffers instead of just failing.
3848 */
3849 wm_rxdrain(sc);
3850 goto out;
3851 }
3852 } else {
3853 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3854 WM_INIT_RXDESC(sc, i);
3855 /*
3856 * For 82575 and newer device, the RX descriptors
3857 * must be initialized after the setting of RCTL.EN in
3858 * wm_set_filter()
3859 */
3860 }
3861 }
3862 sc->sc_rxptr = 0;
3863 sc->sc_rxdiscard = 0;
3864 WM_RXCHAIN_RESET(sc);
3865
3866 /*
3867 * Clear out the VLAN table -- we don't use it (yet).
3868 */
3869 CSR_WRITE(sc, WMREG_VET, 0);
3870 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3871 trynum = 10; /* Due to hw errata */
3872 else
3873 trynum = 1;
3874 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3875 for (j = 0; j < trynum; j++)
3876 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3877
3878 /*
3879 * Set up flow-control parameters.
3880 *
3881 * XXX Values could probably stand some tuning.
3882 */
3883 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3884 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
3885 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
3886 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3887 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3888 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3889 }
3890
3891 sc->sc_fcrtl = FCRTL_DFLT;
3892 if (sc->sc_type < WM_T_82543) {
3893 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3894 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3895 } else {
3896 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3897 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3898 }
3899
3900 if (sc->sc_type == WM_T_80003)
3901 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3902 else
3903 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3904
3905 /* Writes the control register. */
3906 wm_set_vlan(sc);
3907
3908 if (sc->sc_flags & WM_F_HAS_MII) {
3909 int val;
3910
3911 switch (sc->sc_type) {
3912 case WM_T_80003:
3913 case WM_T_ICH8:
3914 case WM_T_ICH9:
3915 case WM_T_ICH10:
3916 case WM_T_PCH:
3917 case WM_T_PCH2:
3918 case WM_T_PCH_LPT:
3919 /*
3920 * Set the mac to wait the maximum time between each
3921 * iteration and increase the max iterations when
3922 * polling the phy; this fixes erroneous timeouts at
3923 * 10Mbps.
3924 */
3925 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
3926 0xFFFF);
3927 val = wm_kmrn_readreg(sc,
3928 KUMCTRLSTA_OFFSET_INB_PARAM);
3929 val |= 0x3F;
3930 wm_kmrn_writereg(sc,
3931 KUMCTRLSTA_OFFSET_INB_PARAM, val);
3932 break;
3933 default:
3934 break;
3935 }
3936
3937 if (sc->sc_type == WM_T_80003) {
3938 val = CSR_READ(sc, WMREG_CTRL_EXT);
3939 val &= ~CTRL_EXT_LINK_MODE_MASK;
3940 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3941
3942 /* Bypass RX and TX FIFO's */
3943 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3944 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
3945 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3946 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3947 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3948 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3949 }
3950 }
3951 #if 0
3952 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3953 #endif
3954
3955 /* Set up checksum offload parameters. */
3956 reg = CSR_READ(sc, WMREG_RXCSUM);
3957 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3958 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3959 reg |= RXCSUM_IPOFL;
3960 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3961 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3962 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3963 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3964 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3965
3966 /* Set up the interrupt registers. */
3967 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3968 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3969 ICR_RXO | ICR_RXT0;
3970 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3971
3972 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3973 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3974 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
3975 reg = CSR_READ(sc, WMREG_KABGTXD);
3976 reg |= KABGTXD_BGSQLBIAS;
3977 CSR_WRITE(sc, WMREG_KABGTXD, reg);
3978 }
3979
3980 /* Set up the inter-packet gap. */
3981 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3982
3983 if (sc->sc_type >= WM_T_82543) {
3984 /*
3985 * Set up the interrupt throttling register (units of 256ns)
3986 * Note that a footnote in Intel's documentation says this
3987 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3988 * or 10Mbit mode. Empirically, it appears to be the case
3989 * that that is also true for the 1024ns units of the other
3990 * interrupt-related timer registers -- so, really, we ought
3991 * to divide this value by 4 when the link speed is low.
3992 *
3993 * XXX implement this division at link speed change!
3994 */
3995
3996 /*
3997 * For N interrupts/sec, set this value to:
3998 * 1000000000 / (N * 256). Note that we set the
3999 * absolute and packet timer values to this value
4000 * divided by 4 to get "simple timer" behavior.
4001 */
4002
4003 sc->sc_itr = 1500; /* 2604 ints/sec */
4004 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4005 }
4006
4007 /* Set the VLAN ethernetype. */
4008 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4009
4010 /*
4011 * Set up the transmit control register; we start out with
4012 * a collision distance suitable for FDX, but update it whe
4013 * we resolve the media type.
4014 */
4015 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4016 | TCTL_CT(TX_COLLISION_THRESHOLD)
4017 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4018 if (sc->sc_type >= WM_T_82571)
4019 sc->sc_tctl |= TCTL_MULR;
4020 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4021
4022 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4023 /* Write TDT after TCTL.EN is set. See the document. */
4024 CSR_WRITE(sc, WMREG_TDT, 0);
4025 }
4026
4027 if (sc->sc_type == WM_T_80003) {
4028 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4029 reg &= ~TCTL_EXT_GCEX_MASK;
4030 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4031 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4032 }
4033
4034 /* Set the media. */
4035 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4036 goto out;
4037
4038 /* Configure for OS presence */
4039 wm_init_manageability(sc);
4040
4041 /*
4042 * Set up the receive control register; we actually program
4043 * the register when we set the receive filter. Use multicast
4044 * address offset type 0.
4045 *
4046 * Only the i82544 has the ability to strip the incoming
4047 * CRC, so we don't enable that feature.
4048 */
4049 sc->sc_mchash_type = 0;
4050 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4051 | RCTL_MO(sc->sc_mchash_type);
4052
4053 /*
4054 * The I350 has a bug where it always strips the CRC whether
4055 * asked to or not. So ask for stripped CRC here and cope in rxeof
4056 */
4057 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4058 || (sc->sc_type == WM_T_I210))
4059 sc->sc_rctl |= RCTL_SECRC;
4060
4061 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4062 && (ifp->if_mtu > ETHERMTU)) {
4063 sc->sc_rctl |= RCTL_LPE;
4064 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4065 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4066 }
4067
4068 if (MCLBYTES == 2048) {
4069 sc->sc_rctl |= RCTL_2k;
4070 } else {
4071 if (sc->sc_type >= WM_T_82543) {
4072 switch (MCLBYTES) {
4073 case 4096:
4074 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4075 break;
4076 case 8192:
4077 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4078 break;
4079 case 16384:
4080 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4081 break;
4082 default:
4083 panic("wm_init: MCLBYTES %d unsupported",
4084 MCLBYTES);
4085 break;
4086 }
4087 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4088 }
4089
4090 /* Set the receive filter. */
4091 wm_set_filter(sc);
4092
4093 /* Enable ECC */
4094 switch (sc->sc_type) {
4095 case WM_T_82571:
4096 reg = CSR_READ(sc, WMREG_PBA_ECC);
4097 reg |= PBA_ECC_CORR_EN;
4098 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4099 break;
4100 case WM_T_PCH_LPT:
4101 reg = CSR_READ(sc, WMREG_PBECCSTS);
4102 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4103 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4104
4105 reg = CSR_READ(sc, WMREG_CTRL);
4106 reg |= CTRL_MEHE;
4107 CSR_WRITE(sc, WMREG_CTRL, reg);
4108 break;
4109 default:
4110 break;
4111 }
4112
4113 /* On 575 and later set RDT only if RX enabled */
4114 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4115 for (i = 0; i < WM_NRXDESC; i++)
4116 WM_INIT_RXDESC(sc, i);
4117
4118 sc->sc_stopping = false;
4119
4120 /* Start the one second link check clock. */
4121 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4122
4123 /* ...all done! */
4124 ifp->if_flags |= IFF_RUNNING;
4125 ifp->if_flags &= ~IFF_OACTIVE;
4126
4127 out:
4128 sc->sc_if_flags = ifp->if_flags;
4129 if (error)
4130 log(LOG_ERR, "%s: interface not running\n",
4131 device_xname(sc->sc_dev));
4132 return error;
4133 }
4134
4135 /*
4136 * wm_stop: [ifnet interface function]
4137 *
4138 * Stop transmission on the interface.
4139 */
4140 static void
4141 wm_stop(struct ifnet *ifp, int disable)
4142 {
4143 struct wm_softc *sc = ifp->if_softc;
4144
4145 WM_BOTH_LOCK(sc);
4146 wm_stop_locked(ifp, disable);
4147 WM_BOTH_UNLOCK(sc);
4148 }
4149
4150 static void
4151 wm_stop_locked(struct ifnet *ifp, int disable)
4152 {
4153 struct wm_softc *sc = ifp->if_softc;
4154 struct wm_txsoft *txs;
4155 int i;
4156
4157 KASSERT(WM_BOTH_LOCKED(sc));
4158
4159 sc->sc_stopping = true;
4160
4161 /* Stop the one second clock. */
4162 callout_stop(&sc->sc_tick_ch);
4163
4164 /* Stop the 82547 Tx FIFO stall check timer. */
4165 if (sc->sc_type == WM_T_82547)
4166 callout_stop(&sc->sc_txfifo_ch);
4167
4168 if (sc->sc_flags & WM_F_HAS_MII) {
4169 /* Down the MII. */
4170 mii_down(&sc->sc_mii);
4171 } else {
4172 #if 0
4173 /* Should we clear PHY's status properly? */
4174 wm_reset(sc);
4175 #endif
4176 }
4177
4178 /* Stop the transmit and receive processes. */
4179 CSR_WRITE(sc, WMREG_TCTL, 0);
4180 CSR_WRITE(sc, WMREG_RCTL, 0);
4181 sc->sc_rctl &= ~RCTL_EN;
4182
4183 /*
4184 * Clear the interrupt mask to ensure the device cannot assert its
4185 * interrupt line.
4186 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4187 * any currently pending or shared interrupt.
4188 */
4189 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4190 sc->sc_icr = 0;
4191
4192 /* Release any queued transmit buffers. */
4193 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4194 txs = &sc->sc_txsoft[i];
4195 if (txs->txs_mbuf != NULL) {
4196 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4197 m_freem(txs->txs_mbuf);
4198 txs->txs_mbuf = NULL;
4199 }
4200 }
4201
4202 /* Mark the interface as down and cancel the watchdog timer. */
4203 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4204 ifp->if_timer = 0;
4205
4206 if (disable)
4207 wm_rxdrain(sc);
4208
4209 #if 0 /* notyet */
4210 if (sc->sc_type >= WM_T_82544)
4211 CSR_WRITE(sc, WMREG_WUC, 0);
4212 #endif
4213 }
4214
4215 /*
4216 * wm_tx_offload:
4217 *
4218 * Set up TCP/IP checksumming parameters for the
4219 * specified packet.
4220 */
4221 static int
4222 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
4223 uint8_t *fieldsp)
4224 {
4225 struct mbuf *m0 = txs->txs_mbuf;
4226 struct livengood_tcpip_ctxdesc *t;
4227 uint32_t ipcs, tucs, cmd, cmdlen, seg;
4228 uint32_t ipcse;
4229 struct ether_header *eh;
4230 int offset, iphl;
4231 uint8_t fields;
4232
4233 /*
4234 * XXX It would be nice if the mbuf pkthdr had offset
4235 * fields for the protocol headers.
4236 */
4237
4238 eh = mtod(m0, struct ether_header *);
4239 switch (htons(eh->ether_type)) {
4240 case ETHERTYPE_IP:
4241 case ETHERTYPE_IPV6:
4242 offset = ETHER_HDR_LEN;
4243 break;
4244
4245 case ETHERTYPE_VLAN:
4246 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4247 break;
4248
4249 default:
4250 /*
4251 * Don't support this protocol or encapsulation.
4252 */
4253 *fieldsp = 0;
4254 *cmdp = 0;
4255 return 0;
4256 }
4257
4258 if ((m0->m_pkthdr.csum_flags &
4259 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
4260 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4261 } else {
4262 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4263 }
4264 ipcse = offset + iphl - 1;
4265
4266 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
4267 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
4268 seg = 0;
4269 fields = 0;
4270
4271 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4272 int hlen = offset + iphl;
4273 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4274
4275 if (__predict_false(m0->m_len <
4276 (hlen + sizeof(struct tcphdr)))) {
4277 /*
4278 * TCP/IP headers are not in the first mbuf; we need
4279 * to do this the slow and painful way. Let's just
4280 * hope this doesn't happen very often.
4281 */
4282 struct tcphdr th;
4283
4284 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4285
4286 m_copydata(m0, hlen, sizeof(th), &th);
4287 if (v4) {
4288 struct ip ip;
4289
4290 m_copydata(m0, offset, sizeof(ip), &ip);
4291 ip.ip_len = 0;
4292 m_copyback(m0,
4293 offset + offsetof(struct ip, ip_len),
4294 sizeof(ip.ip_len), &ip.ip_len);
4295 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4296 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4297 } else {
4298 struct ip6_hdr ip6;
4299
4300 m_copydata(m0, offset, sizeof(ip6), &ip6);
4301 ip6.ip6_plen = 0;
4302 m_copyback(m0,
4303 offset + offsetof(struct ip6_hdr, ip6_plen),
4304 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4305 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4306 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4307 }
4308 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4309 sizeof(th.th_sum), &th.th_sum);
4310
4311 hlen += th.th_off << 2;
4312 } else {
4313 /*
4314 * TCP/IP headers are in the first mbuf; we can do
4315 * this the easy way.
4316 */
4317 struct tcphdr *th;
4318
4319 if (v4) {
4320 struct ip *ip =
4321 (void *)(mtod(m0, char *) + offset);
4322 th = (void *)(mtod(m0, char *) + hlen);
4323
4324 ip->ip_len = 0;
4325 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4326 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4327 } else {
4328 struct ip6_hdr *ip6 =
4329 (void *)(mtod(m0, char *) + offset);
4330 th = (void *)(mtod(m0, char *) + hlen);
4331
4332 ip6->ip6_plen = 0;
4333 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4334 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4335 }
4336 hlen += th->th_off << 2;
4337 }
4338
4339 if (v4) {
4340 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4341 cmdlen |= WTX_TCPIP_CMD_IP;
4342 } else {
4343 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4344 ipcse = 0;
4345 }
4346 cmd |= WTX_TCPIP_CMD_TSE;
4347 cmdlen |= WTX_TCPIP_CMD_TSE |
4348 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
4349 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
4350 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
4351 }
4352
4353 /*
4354 * NOTE: Even if we're not using the IP or TCP/UDP checksum
4355 * offload feature, if we load the context descriptor, we
4356 * MUST provide valid values for IPCSS and TUCSS fields.
4357 */
4358
4359 ipcs = WTX_TCPIP_IPCSS(offset) |
4360 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
4361 WTX_TCPIP_IPCSE(ipcse);
4362 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
4363 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
4364 fields |= WTX_IXSM;
4365 }
4366
4367 offset += iphl;
4368
4369 if (m0->m_pkthdr.csum_flags &
4370 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
4371 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
4372 fields |= WTX_TXSM;
4373 tucs = WTX_TCPIP_TUCSS(offset) |
4374 WTX_TCPIP_TUCSO(offset +
4375 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
4376 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4377 } else if ((m0->m_pkthdr.csum_flags &
4378 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
4379 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
4380 fields |= WTX_TXSM;
4381 tucs = WTX_TCPIP_TUCSS(offset) |
4382 WTX_TCPIP_TUCSO(offset +
4383 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
4384 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4385 } else {
4386 /* Just initialize it to a valid TCP context. */
4387 tucs = WTX_TCPIP_TUCSS(offset) |
4388 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
4389 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4390 }
4391
4392 /* Fill in the context descriptor. */
4393 t = (struct livengood_tcpip_ctxdesc *)
4394 &sc->sc_txdescs[sc->sc_txnext];
4395 t->tcpip_ipcs = htole32(ipcs);
4396 t->tcpip_tucs = htole32(tucs);
4397 t->tcpip_cmdlen = htole32(cmdlen);
4398 t->tcpip_seg = htole32(seg);
4399 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
4400
4401 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
4402 txs->txs_ndesc++;
4403
4404 *cmdp = cmd;
4405 *fieldsp = fields;
4406
4407 return 0;
4408 }
4409
4410 static void
4411 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
4412 {
4413 struct mbuf *m;
4414 int i;
4415
4416 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
4417 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
4418 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
4419 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
4420 m->m_data, m->m_len, m->m_flags);
4421 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
4422 i, i == 1 ? "" : "s");
4423 }
4424
4425 /*
4426 * wm_82547_txfifo_stall:
4427 *
4428 * Callout used to wait for the 82547 Tx FIFO to drain,
4429 * reset the FIFO pointers, and restart packet transmission.
4430 */
4431 static void
4432 wm_82547_txfifo_stall(void *arg)
4433 {
4434 struct wm_softc *sc = arg;
4435 #ifndef WM_MPSAFE
4436 int s;
4437
4438 s = splnet();
4439 #endif
4440 WM_TX_LOCK(sc);
4441
4442 if (sc->sc_stopping)
4443 goto out;
4444
4445 if (sc->sc_txfifo_stall) {
4446 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
4447 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
4448 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
4449 /*
4450 * Packets have drained. Stop transmitter, reset
4451 * FIFO pointers, restart transmitter, and kick
4452 * the packet queue.
4453 */
4454 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
4455 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
4456 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
4457 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
4458 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
4459 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
4460 CSR_WRITE(sc, WMREG_TCTL, tctl);
4461 CSR_WRITE_FLUSH(sc);
4462
4463 sc->sc_txfifo_head = 0;
4464 sc->sc_txfifo_stall = 0;
4465 wm_start_locked(&sc->sc_ethercom.ec_if);
4466 } else {
4467 /*
4468 * Still waiting for packets to drain; try again in
4469 * another tick.
4470 */
4471 callout_schedule(&sc->sc_txfifo_ch, 1);
4472 }
4473 }
4474
4475 out:
4476 WM_TX_UNLOCK(sc);
4477 #ifndef WM_MPSAFE
4478 splx(s);
4479 #endif
4480 }
4481
4482 /*
4483 * wm_82547_txfifo_bugchk:
4484 *
4485 * Check for bug condition in the 82547 Tx FIFO. We need to
4486 * prevent enqueueing a packet that would wrap around the end
4487 * if the Tx FIFO ring buffer, otherwise the chip will croak.
4488 *
4489 * We do this by checking the amount of space before the end
4490 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
4491 * the Tx FIFO, wait for all remaining packets to drain, reset
4492 * the internal FIFO pointers to the beginning, and restart
4493 * transmission on the interface.
4494 */
4495 #define WM_FIFO_HDR 0x10
4496 #define WM_82547_PAD_LEN 0x3e0
4497 static int
4498 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
4499 {
4500 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
4501 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
4502
4503 /* Just return if already stalled. */
4504 if (sc->sc_txfifo_stall)
4505 return 1;
4506
4507 if (sc->sc_mii.mii_media_active & IFM_FDX) {
4508 /* Stall only occurs in half-duplex mode. */
4509 goto send_packet;
4510 }
4511
4512 if (len >= WM_82547_PAD_LEN + space) {
4513 sc->sc_txfifo_stall = 1;
4514 callout_schedule(&sc->sc_txfifo_ch, 1);
4515 return 1;
4516 }
4517
4518 send_packet:
4519 sc->sc_txfifo_head += len;
4520 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
4521 sc->sc_txfifo_head -= sc->sc_txfifo_size;
4522
4523 return 0;
4524 }
4525
4526 /*
4527 * wm_start: [ifnet interface function]
4528 *
4529 * Start packet transmission on the interface.
4530 */
4531 static void
4532 wm_start(struct ifnet *ifp)
4533 {
4534 struct wm_softc *sc = ifp->if_softc;
4535
4536 WM_TX_LOCK(sc);
4537 if (!sc->sc_stopping)
4538 wm_start_locked(ifp);
4539 WM_TX_UNLOCK(sc);
4540 }
4541
4542 static void
4543 wm_start_locked(struct ifnet *ifp)
4544 {
4545 struct wm_softc *sc = ifp->if_softc;
4546 struct mbuf *m0;
4547 struct m_tag *mtag;
4548 struct wm_txsoft *txs;
4549 bus_dmamap_t dmamap;
4550 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
4551 bus_addr_t curaddr;
4552 bus_size_t seglen, curlen;
4553 uint32_t cksumcmd;
4554 uint8_t cksumfields;
4555
4556 KASSERT(WM_TX_LOCKED(sc));
4557
4558 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
4559 return;
4560
4561 /* Remember the previous number of free descriptors. */
4562 ofree = sc->sc_txfree;
4563
4564 /*
4565 * Loop through the send queue, setting up transmit descriptors
4566 * until we drain the queue, or use up all available transmit
4567 * descriptors.
4568 */
4569 for (;;) {
4570 m0 = NULL;
4571
4572 /* Get a work queue entry. */
4573 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
4574 wm_txintr(sc);
4575 if (sc->sc_txsfree == 0) {
4576 DPRINTF(WM_DEBUG_TX,
4577 ("%s: TX: no free job descriptors\n",
4578 device_xname(sc->sc_dev)));
4579 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
4580 break;
4581 }
4582 }
4583
4584 /* Grab a packet off the queue. */
4585 IFQ_DEQUEUE(&ifp->if_snd, m0);
4586 if (m0 == NULL)
4587 break;
4588
4589 DPRINTF(WM_DEBUG_TX,
4590 ("%s: TX: have packet to transmit: %p\n",
4591 device_xname(sc->sc_dev), m0));
4592
4593 txs = &sc->sc_txsoft[sc->sc_txsnext];
4594 dmamap = txs->txs_dmamap;
4595
4596 use_tso = (m0->m_pkthdr.csum_flags &
4597 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
4598
4599 /*
4600 * So says the Linux driver:
4601 * The controller does a simple calculation to make sure
4602 * there is enough room in the FIFO before initiating the
4603 * DMA for each buffer. The calc is:
4604 * 4 = ceil(buffer len / MSS)
4605 * To make sure we don't overrun the FIFO, adjust the max
4606 * buffer len if the MSS drops.
4607 */
4608 dmamap->dm_maxsegsz =
4609 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
4610 ? m0->m_pkthdr.segsz << 2
4611 : WTX_MAX_LEN;
4612
4613 /*
4614 * Load the DMA map. If this fails, the packet either
4615 * didn't fit in the allotted number of segments, or we
4616 * were short on resources. For the too-many-segments
4617 * case, we simply report an error and drop the packet,
4618 * since we can't sanely copy a jumbo packet to a single
4619 * buffer.
4620 */
4621 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
4622 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
4623 if (error) {
4624 if (error == EFBIG) {
4625 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4626 log(LOG_ERR, "%s: Tx packet consumes too many "
4627 "DMA segments, dropping...\n",
4628 device_xname(sc->sc_dev));
4629 wm_dump_mbuf_chain(sc, m0);
4630 m_freem(m0);
4631 continue;
4632 }
4633 /* Short on resources, just stop for now. */
4634 DPRINTF(WM_DEBUG_TX,
4635 ("%s: TX: dmamap load failed: %d\n",
4636 device_xname(sc->sc_dev), error));
4637 break;
4638 }
4639
4640 segs_needed = dmamap->dm_nsegs;
4641 if (use_tso) {
4642 /* For sentinel descriptor; see below. */
4643 segs_needed++;
4644 }
4645
4646 /*
4647 * Ensure we have enough descriptors free to describe
4648 * the packet. Note, we always reserve one descriptor
4649 * at the end of the ring due to the semantics of the
4650 * TDT register, plus one more in the event we need
4651 * to load offload context.
4652 */
4653 if (segs_needed > sc->sc_txfree - 2) {
4654 /*
4655 * Not enough free descriptors to transmit this
4656 * packet. We haven't committed anything yet,
4657 * so just unload the DMA map, put the packet
4658 * pack on the queue, and punt. Notify the upper
4659 * layer that there are no more slots left.
4660 */
4661 DPRINTF(WM_DEBUG_TX,
4662 ("%s: TX: need %d (%d) descriptors, have %d\n",
4663 device_xname(sc->sc_dev), dmamap->dm_nsegs,
4664 segs_needed, sc->sc_txfree - 1));
4665 ifp->if_flags |= IFF_OACTIVE;
4666 bus_dmamap_unload(sc->sc_dmat, dmamap);
4667 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
4668 break;
4669 }
4670
4671 /*
4672 * Check for 82547 Tx FIFO bug. We need to do this
4673 * once we know we can transmit the packet, since we
4674 * do some internal FIFO space accounting here.
4675 */
4676 if (sc->sc_type == WM_T_82547 &&
4677 wm_82547_txfifo_bugchk(sc, m0)) {
4678 DPRINTF(WM_DEBUG_TX,
4679 ("%s: TX: 82547 Tx FIFO bug detected\n",
4680 device_xname(sc->sc_dev)));
4681 ifp->if_flags |= IFF_OACTIVE;
4682 bus_dmamap_unload(sc->sc_dmat, dmamap);
4683 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
4684 break;
4685 }
4686
4687 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
4688
4689 DPRINTF(WM_DEBUG_TX,
4690 ("%s: TX: packet has %d (%d) DMA segments\n",
4691 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
4692
4693 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
4694
4695 /*
4696 * Store a pointer to the packet so that we can free it
4697 * later.
4698 *
4699 * Initially, we consider the number of descriptors the
4700 * packet uses the number of DMA segments. This may be
4701 * incremented by 1 if we do checksum offload (a descriptor
4702 * is used to set the checksum context).
4703 */
4704 txs->txs_mbuf = m0;
4705 txs->txs_firstdesc = sc->sc_txnext;
4706 txs->txs_ndesc = segs_needed;
4707
4708 /* Set up offload parameters for this packet. */
4709 if (m0->m_pkthdr.csum_flags &
4710 (M_CSUM_TSOv4|M_CSUM_TSOv6|
4711 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
4712 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
4713 if (wm_tx_offload(sc, txs, &cksumcmd,
4714 &cksumfields) != 0) {
4715 /* Error message already displayed. */
4716 bus_dmamap_unload(sc->sc_dmat, dmamap);
4717 continue;
4718 }
4719 } else {
4720 cksumcmd = 0;
4721 cksumfields = 0;
4722 }
4723
4724 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
4725
4726 /* Sync the DMA map. */
4727 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
4728 BUS_DMASYNC_PREWRITE);
4729
4730 /* Initialize the transmit descriptor. */
4731 for (nexttx = sc->sc_txnext, seg = 0;
4732 seg < dmamap->dm_nsegs; seg++) {
4733 for (seglen = dmamap->dm_segs[seg].ds_len,
4734 curaddr = dmamap->dm_segs[seg].ds_addr;
4735 seglen != 0;
4736 curaddr += curlen, seglen -= curlen,
4737 nexttx = WM_NEXTTX(sc, nexttx)) {
4738 curlen = seglen;
4739
4740 /*
4741 * So says the Linux driver:
4742 * Work around for premature descriptor
4743 * write-backs in TSO mode. Append a
4744 * 4-byte sentinel descriptor.
4745 */
4746 if (use_tso &&
4747 seg == dmamap->dm_nsegs - 1 &&
4748 curlen > 8)
4749 curlen -= 4;
4750
4751 wm_set_dma_addr(
4752 &sc->sc_txdescs[nexttx].wtx_addr,
4753 curaddr);
4754 sc->sc_txdescs[nexttx].wtx_cmdlen =
4755 htole32(cksumcmd | curlen);
4756 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
4757 0;
4758 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
4759 cksumfields;
4760 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
4761 lasttx = nexttx;
4762
4763 DPRINTF(WM_DEBUG_TX,
4764 ("%s: TX: desc %d: low %#" PRIx64 ", "
4765 "len %#04zx\n",
4766 device_xname(sc->sc_dev), nexttx,
4767 (uint64_t)curaddr, curlen));
4768 }
4769 }
4770
4771 KASSERT(lasttx != -1);
4772
4773 /*
4774 * Set up the command byte on the last descriptor of
4775 * the packet. If we're in the interrupt delay window,
4776 * delay the interrupt.
4777 */
4778 sc->sc_txdescs[lasttx].wtx_cmdlen |=
4779 htole32(WTX_CMD_EOP | WTX_CMD_RS);
4780
4781 /*
4782 * If VLANs are enabled and the packet has a VLAN tag, set
4783 * up the descriptor to encapsulate the packet for us.
4784 *
4785 * This is only valid on the last descriptor of the packet.
4786 */
4787 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
4788 sc->sc_txdescs[lasttx].wtx_cmdlen |=
4789 htole32(WTX_CMD_VLE);
4790 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
4791 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
4792 }
4793
4794 txs->txs_lastdesc = lasttx;
4795
4796 DPRINTF(WM_DEBUG_TX,
4797 ("%s: TX: desc %d: cmdlen 0x%08x\n",
4798 device_xname(sc->sc_dev),
4799 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
4800
4801 /* Sync the descriptors we're using. */
4802 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
4803 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4804
4805 /* Give the packet to the chip. */
4806 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
4807
4808 DPRINTF(WM_DEBUG_TX,
4809 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
4810
4811 DPRINTF(WM_DEBUG_TX,
4812 ("%s: TX: finished transmitting packet, job %d\n",
4813 device_xname(sc->sc_dev), sc->sc_txsnext));
4814
4815 /* Advance the tx pointer. */
4816 sc->sc_txfree -= txs->txs_ndesc;
4817 sc->sc_txnext = nexttx;
4818
4819 sc->sc_txsfree--;
4820 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
4821
4822 /* Pass the packet to any BPF listeners. */
4823 bpf_mtap(ifp, m0);
4824 }
4825
4826 if (m0 != NULL) {
4827 ifp->if_flags |= IFF_OACTIVE;
4828 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4829 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
4830 m_freem(m0);
4831 }
4832
4833 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
4834 /* No more slots; notify upper layer. */
4835 ifp->if_flags |= IFF_OACTIVE;
4836 }
4837
4838 if (sc->sc_txfree != ofree) {
4839 /* Set a watchdog timer in case the chip flakes out. */
4840 ifp->if_timer = 5;
4841 }
4842 }
4843
4844 /*
4845 * wm_nq_tx_offload:
4846 *
4847 * Set up TCP/IP checksumming parameters for the
4848 * specified packet, for NEWQUEUE devices
4849 */
4850 static int
4851 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
4852 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
4853 {
4854 struct mbuf *m0 = txs->txs_mbuf;
4855 struct m_tag *mtag;
4856 uint32_t vl_len, mssidx, cmdc;
4857 struct ether_header *eh;
4858 int offset, iphl;
4859
4860 /*
4861 * XXX It would be nice if the mbuf pkthdr had offset
4862 * fields for the protocol headers.
4863 */
4864 *cmdlenp = 0;
4865 *fieldsp = 0;
4866
4867 eh = mtod(m0, struct ether_header *);
4868 switch (htons(eh->ether_type)) {
4869 case ETHERTYPE_IP:
4870 case ETHERTYPE_IPV6:
4871 offset = ETHER_HDR_LEN;
4872 break;
4873
4874 case ETHERTYPE_VLAN:
4875 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4876 break;
4877
4878 default:
4879 /* Don't support this protocol or encapsulation. */
4880 *do_csum = false;
4881 return 0;
4882 }
4883 *do_csum = true;
4884 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
4885 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
4886
4887 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
4888 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
4889
4890 if ((m0->m_pkthdr.csum_flags &
4891 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
4892 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4893 } else {
4894 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4895 }
4896 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
4897 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
4898
4899 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
4900 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
4901 << NQTXC_VLLEN_VLAN_SHIFT);
4902 *cmdlenp |= NQTX_CMD_VLE;
4903 }
4904
4905 mssidx = 0;
4906
4907 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4908 int hlen = offset + iphl;
4909 int tcp_hlen;
4910 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4911
4912 if (__predict_false(m0->m_len <
4913 (hlen + sizeof(struct tcphdr)))) {
4914 /*
4915 * TCP/IP headers are not in the first mbuf; we need
4916 * to do this the slow and painful way. Let's just
4917 * hope this doesn't happen very often.
4918 */
4919 struct tcphdr th;
4920
4921 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4922
4923 m_copydata(m0, hlen, sizeof(th), &th);
4924 if (v4) {
4925 struct ip ip;
4926
4927 m_copydata(m0, offset, sizeof(ip), &ip);
4928 ip.ip_len = 0;
4929 m_copyback(m0,
4930 offset + offsetof(struct ip, ip_len),
4931 sizeof(ip.ip_len), &ip.ip_len);
4932 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4933 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4934 } else {
4935 struct ip6_hdr ip6;
4936
4937 m_copydata(m0, offset, sizeof(ip6), &ip6);
4938 ip6.ip6_plen = 0;
4939 m_copyback(m0,
4940 offset + offsetof(struct ip6_hdr, ip6_plen),
4941 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4942 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4943 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4944 }
4945 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4946 sizeof(th.th_sum), &th.th_sum);
4947
4948 tcp_hlen = th.th_off << 2;
4949 } else {
4950 /*
4951 * TCP/IP headers are in the first mbuf; we can do
4952 * this the easy way.
4953 */
4954 struct tcphdr *th;
4955
4956 if (v4) {
4957 struct ip *ip =
4958 (void *)(mtod(m0, char *) + offset);
4959 th = (void *)(mtod(m0, char *) + hlen);
4960
4961 ip->ip_len = 0;
4962 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4963 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4964 } else {
4965 struct ip6_hdr *ip6 =
4966 (void *)(mtod(m0, char *) + offset);
4967 th = (void *)(mtod(m0, char *) + hlen);
4968
4969 ip6->ip6_plen = 0;
4970 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4971 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4972 }
4973 tcp_hlen = th->th_off << 2;
4974 }
4975 hlen += tcp_hlen;
4976 *cmdlenp |= NQTX_CMD_TSE;
4977
4978 if (v4) {
4979 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4980 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
4981 } else {
4982 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4983 *fieldsp |= NQTXD_FIELDS_TUXSM;
4984 }
4985 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
4986 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
4987 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
4988 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
4989 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
4990 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
4991 } else {
4992 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
4993 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
4994 }
4995
4996 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
4997 *fieldsp |= NQTXD_FIELDS_IXSM;
4998 cmdc |= NQTXC_CMD_IP4;
4999 }
5000
5001 if (m0->m_pkthdr.csum_flags &
5002 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5003 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5004 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5005 cmdc |= NQTXC_CMD_TCP;
5006 } else {
5007 cmdc |= NQTXC_CMD_UDP;
5008 }
5009 cmdc |= NQTXC_CMD_IP4;
5010 *fieldsp |= NQTXD_FIELDS_TUXSM;
5011 }
5012 if (m0->m_pkthdr.csum_flags &
5013 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5014 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5015 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5016 cmdc |= NQTXC_CMD_TCP;
5017 } else {
5018 cmdc |= NQTXC_CMD_UDP;
5019 }
5020 cmdc |= NQTXC_CMD_IP6;
5021 *fieldsp |= NQTXD_FIELDS_TUXSM;
5022 }
5023
5024 /* Fill in the context descriptor. */
5025 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
5026 htole32(vl_len);
5027 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
5028 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
5029 htole32(cmdc);
5030 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
5031 htole32(mssidx);
5032 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
5033 DPRINTF(WM_DEBUG_TX,
5034 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
5035 sc->sc_txnext, 0, vl_len));
5036 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
5037 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
5038 txs->txs_ndesc++;
5039 return 0;
5040 }
5041
5042 /*
5043 * wm_nq_start: [ifnet interface function]
5044 *
5045 * Start packet transmission on the interface for NEWQUEUE devices
5046 */
5047 static void
5048 wm_nq_start(struct ifnet *ifp)
5049 {
5050 struct wm_softc *sc = ifp->if_softc;
5051
5052 WM_TX_LOCK(sc);
5053 if (!sc->sc_stopping)
5054 wm_nq_start_locked(ifp);
5055 WM_TX_UNLOCK(sc);
5056 }
5057
5058 static void
5059 wm_nq_start_locked(struct ifnet *ifp)
5060 {
5061 struct wm_softc *sc = ifp->if_softc;
5062 struct mbuf *m0;
5063 struct m_tag *mtag;
5064 struct wm_txsoft *txs;
5065 bus_dmamap_t dmamap;
5066 int error, nexttx, lasttx = -1, seg, segs_needed;
5067 bool do_csum, sent;
5068
5069 KASSERT(WM_TX_LOCKED(sc));
5070
5071 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5072 return;
5073
5074 sent = false;
5075
5076 /*
5077 * Loop through the send queue, setting up transmit descriptors
5078 * until we drain the queue, or use up all available transmit
5079 * descriptors.
5080 */
5081 for (;;) {
5082 m0 = NULL;
5083
5084 /* Get a work queue entry. */
5085 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
5086 wm_txintr(sc);
5087 if (sc->sc_txsfree == 0) {
5088 DPRINTF(WM_DEBUG_TX,
5089 ("%s: TX: no free job descriptors\n",
5090 device_xname(sc->sc_dev)));
5091 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
5092 break;
5093 }
5094 }
5095
5096 /* Grab a packet off the queue. */
5097 IFQ_DEQUEUE(&ifp->if_snd, m0);
5098 if (m0 == NULL)
5099 break;
5100
5101 DPRINTF(WM_DEBUG_TX,
5102 ("%s: TX: have packet to transmit: %p\n",
5103 device_xname(sc->sc_dev), m0));
5104
5105 txs = &sc->sc_txsoft[sc->sc_txsnext];
5106 dmamap = txs->txs_dmamap;
5107
5108 /*
5109 * Load the DMA map. If this fails, the packet either
5110 * didn't fit in the allotted number of segments, or we
5111 * were short on resources. For the too-many-segments
5112 * case, we simply report an error and drop the packet,
5113 * since we can't sanely copy a jumbo packet to a single
5114 * buffer.
5115 */
5116 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5117 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5118 if (error) {
5119 if (error == EFBIG) {
5120 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5121 log(LOG_ERR, "%s: Tx packet consumes too many "
5122 "DMA segments, dropping...\n",
5123 device_xname(sc->sc_dev));
5124 wm_dump_mbuf_chain(sc, m0);
5125 m_freem(m0);
5126 continue;
5127 }
5128 /* Short on resources, just stop for now. */
5129 DPRINTF(WM_DEBUG_TX,
5130 ("%s: TX: dmamap load failed: %d\n",
5131 device_xname(sc->sc_dev), error));
5132 break;
5133 }
5134
5135 segs_needed = dmamap->dm_nsegs;
5136
5137 /*
5138 * Ensure we have enough descriptors free to describe
5139 * the packet. Note, we always reserve one descriptor
5140 * at the end of the ring due to the semantics of the
5141 * TDT register, plus one more in the event we need
5142 * to load offload context.
5143 */
5144 if (segs_needed > sc->sc_txfree - 2) {
5145 /*
5146 * Not enough free descriptors to transmit this
5147 * packet. We haven't committed anything yet,
5148 * so just unload the DMA map, put the packet
5149 * pack on the queue, and punt. Notify the upper
5150 * layer that there are no more slots left.
5151 */
5152 DPRINTF(WM_DEBUG_TX,
5153 ("%s: TX: need %d (%d) descriptors, have %d\n",
5154 device_xname(sc->sc_dev), dmamap->dm_nsegs,
5155 segs_needed, sc->sc_txfree - 1));
5156 ifp->if_flags |= IFF_OACTIVE;
5157 bus_dmamap_unload(sc->sc_dmat, dmamap);
5158 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5159 break;
5160 }
5161
5162 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5163
5164 DPRINTF(WM_DEBUG_TX,
5165 ("%s: TX: packet has %d (%d) DMA segments\n",
5166 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5167
5168 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5169
5170 /*
5171 * Store a pointer to the packet so that we can free it
5172 * later.
5173 *
5174 * Initially, we consider the number of descriptors the
5175 * packet uses the number of DMA segments. This may be
5176 * incremented by 1 if we do checksum offload (a descriptor
5177 * is used to set the checksum context).
5178 */
5179 txs->txs_mbuf = m0;
5180 txs->txs_firstdesc = sc->sc_txnext;
5181 txs->txs_ndesc = segs_needed;
5182
5183 /* Set up offload parameters for this packet. */
5184 uint32_t cmdlen, fields, dcmdlen;
5185 if (m0->m_pkthdr.csum_flags &
5186 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5187 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5188 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5189 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
5190 &do_csum) != 0) {
5191 /* Error message already displayed. */
5192 bus_dmamap_unload(sc->sc_dmat, dmamap);
5193 continue;
5194 }
5195 } else {
5196 do_csum = false;
5197 cmdlen = 0;
5198 fields = 0;
5199 }
5200
5201 /* Sync the DMA map. */
5202 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5203 BUS_DMASYNC_PREWRITE);
5204
5205 /* Initialize the first transmit descriptor. */
5206 nexttx = sc->sc_txnext;
5207 if (!do_csum) {
5208 /* setup a legacy descriptor */
5209 wm_set_dma_addr(
5210 &sc->sc_txdescs[nexttx].wtx_addr,
5211 dmamap->dm_segs[0].ds_addr);
5212 sc->sc_txdescs[nexttx].wtx_cmdlen =
5213 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
5214 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
5215 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
5216 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
5217 NULL) {
5218 sc->sc_txdescs[nexttx].wtx_cmdlen |=
5219 htole32(WTX_CMD_VLE);
5220 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
5221 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5222 } else {
5223 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
5224 }
5225 dcmdlen = 0;
5226 } else {
5227 /* setup an advanced data descriptor */
5228 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5229 htole64(dmamap->dm_segs[0].ds_addr);
5230 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
5231 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5232 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
5233 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
5234 htole32(fields);
5235 DPRINTF(WM_DEBUG_TX,
5236 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
5237 device_xname(sc->sc_dev), nexttx,
5238 (uint64_t)dmamap->dm_segs[0].ds_addr));
5239 DPRINTF(WM_DEBUG_TX,
5240 ("\t 0x%08x%08x\n", fields,
5241 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
5242 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
5243 }
5244
5245 lasttx = nexttx;
5246 nexttx = WM_NEXTTX(sc, nexttx);
5247 /*
5248 * fill in the next descriptors. legacy or adcanced format
5249 * is the same here
5250 */
5251 for (seg = 1; seg < dmamap->dm_nsegs;
5252 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
5253 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5254 htole64(dmamap->dm_segs[seg].ds_addr);
5255 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5256 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
5257 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
5258 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
5259 lasttx = nexttx;
5260
5261 DPRINTF(WM_DEBUG_TX,
5262 ("%s: TX: desc %d: %#" PRIx64 ", "
5263 "len %#04zx\n",
5264 device_xname(sc->sc_dev), nexttx,
5265 (uint64_t)dmamap->dm_segs[seg].ds_addr,
5266 dmamap->dm_segs[seg].ds_len));
5267 }
5268
5269 KASSERT(lasttx != -1);
5270
5271 /*
5272 * Set up the command byte on the last descriptor of
5273 * the packet. If we're in the interrupt delay window,
5274 * delay the interrupt.
5275 */
5276 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
5277 (NQTX_CMD_EOP | NQTX_CMD_RS));
5278 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5279 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5280
5281 txs->txs_lastdesc = lasttx;
5282
5283 DPRINTF(WM_DEBUG_TX,
5284 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5285 device_xname(sc->sc_dev),
5286 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5287
5288 /* Sync the descriptors we're using. */
5289 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5290 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5291
5292 /* Give the packet to the chip. */
5293 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5294 sent = true;
5295
5296 DPRINTF(WM_DEBUG_TX,
5297 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5298
5299 DPRINTF(WM_DEBUG_TX,
5300 ("%s: TX: finished transmitting packet, job %d\n",
5301 device_xname(sc->sc_dev), sc->sc_txsnext));
5302
5303 /* Advance the tx pointer. */
5304 sc->sc_txfree -= txs->txs_ndesc;
5305 sc->sc_txnext = nexttx;
5306
5307 sc->sc_txsfree--;
5308 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5309
5310 /* Pass the packet to any BPF listeners. */
5311 bpf_mtap(ifp, m0);
5312 }
5313
5314 if (m0 != NULL) {
5315 ifp->if_flags |= IFF_OACTIVE;
5316 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5317 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5318 m_freem(m0);
5319 }
5320
5321 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5322 /* No more slots; notify upper layer. */
5323 ifp->if_flags |= IFF_OACTIVE;
5324 }
5325
5326 if (sent) {
5327 /* Set a watchdog timer in case the chip flakes out. */
5328 ifp->if_timer = 5;
5329 }
5330 }
5331
5332 /* Interrupt */
5333
5334 /*
5335 * wm_txintr:
5336 *
5337 * Helper; handle transmit interrupts.
5338 */
5339 static void
5340 wm_txintr(struct wm_softc *sc)
5341 {
5342 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5343 struct wm_txsoft *txs;
5344 uint8_t status;
5345 int i;
5346
5347 if (sc->sc_stopping)
5348 return;
5349
5350 ifp->if_flags &= ~IFF_OACTIVE;
5351
5352 /*
5353 * Go through the Tx list and free mbufs for those
5354 * frames which have been transmitted.
5355 */
5356 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
5357 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
5358 txs = &sc->sc_txsoft[i];
5359
5360 DPRINTF(WM_DEBUG_TX,
5361 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
5362
5363 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
5364 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5365
5366 status =
5367 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
5368 if ((status & WTX_ST_DD) == 0) {
5369 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
5370 BUS_DMASYNC_PREREAD);
5371 break;
5372 }
5373
5374 DPRINTF(WM_DEBUG_TX,
5375 ("%s: TX: job %d done: descs %d..%d\n",
5376 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
5377 txs->txs_lastdesc));
5378
5379 /*
5380 * XXX We should probably be using the statistics
5381 * XXX registers, but I don't know if they exist
5382 * XXX on chips before the i82544.
5383 */
5384
5385 #ifdef WM_EVENT_COUNTERS
5386 if (status & WTX_ST_TU)
5387 WM_EVCNT_INCR(&sc->sc_ev_tu);
5388 #endif /* WM_EVENT_COUNTERS */
5389
5390 if (status & (WTX_ST_EC|WTX_ST_LC)) {
5391 ifp->if_oerrors++;
5392 if (status & WTX_ST_LC)
5393 log(LOG_WARNING, "%s: late collision\n",
5394 device_xname(sc->sc_dev));
5395 else if (status & WTX_ST_EC) {
5396 ifp->if_collisions += 16;
5397 log(LOG_WARNING, "%s: excessive collisions\n",
5398 device_xname(sc->sc_dev));
5399 }
5400 } else
5401 ifp->if_opackets++;
5402
5403 sc->sc_txfree += txs->txs_ndesc;
5404 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
5405 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
5406 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
5407 m_freem(txs->txs_mbuf);
5408 txs->txs_mbuf = NULL;
5409 }
5410
5411 /* Update the dirty transmit buffer pointer. */
5412 sc->sc_txsdirty = i;
5413 DPRINTF(WM_DEBUG_TX,
5414 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
5415
5416 /*
5417 * If there are no more pending transmissions, cancel the watchdog
5418 * timer.
5419 */
5420 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
5421 ifp->if_timer = 0;
5422 }
5423
5424 /*
5425 * wm_rxintr:
5426 *
5427 * Helper; handle receive interrupts.
5428 */
5429 static void
5430 wm_rxintr(struct wm_softc *sc)
5431 {
5432 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5433 struct wm_rxsoft *rxs;
5434 struct mbuf *m;
5435 int i, len;
5436 uint8_t status, errors;
5437 uint16_t vlantag;
5438
5439 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
5440 rxs = &sc->sc_rxsoft[i];
5441
5442 DPRINTF(WM_DEBUG_RX,
5443 ("%s: RX: checking descriptor %d\n",
5444 device_xname(sc->sc_dev), i));
5445
5446 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5447
5448 status = sc->sc_rxdescs[i].wrx_status;
5449 errors = sc->sc_rxdescs[i].wrx_errors;
5450 len = le16toh(sc->sc_rxdescs[i].wrx_len);
5451 vlantag = sc->sc_rxdescs[i].wrx_special;
5452
5453 if ((status & WRX_ST_DD) == 0) {
5454 /* We have processed all of the receive descriptors. */
5455 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
5456 break;
5457 }
5458
5459 if (__predict_false(sc->sc_rxdiscard)) {
5460 DPRINTF(WM_DEBUG_RX,
5461 ("%s: RX: discarding contents of descriptor %d\n",
5462 device_xname(sc->sc_dev), i));
5463 WM_INIT_RXDESC(sc, i);
5464 if (status & WRX_ST_EOP) {
5465 /* Reset our state. */
5466 DPRINTF(WM_DEBUG_RX,
5467 ("%s: RX: resetting rxdiscard -> 0\n",
5468 device_xname(sc->sc_dev)));
5469 sc->sc_rxdiscard = 0;
5470 }
5471 continue;
5472 }
5473
5474 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5475 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
5476
5477 m = rxs->rxs_mbuf;
5478
5479 /*
5480 * Add a new receive buffer to the ring, unless of
5481 * course the length is zero. Treat the latter as a
5482 * failed mapping.
5483 */
5484 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
5485 /*
5486 * Failed, throw away what we've done so
5487 * far, and discard the rest of the packet.
5488 */
5489 ifp->if_ierrors++;
5490 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5491 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5492 WM_INIT_RXDESC(sc, i);
5493 if ((status & WRX_ST_EOP) == 0)
5494 sc->sc_rxdiscard = 1;
5495 if (sc->sc_rxhead != NULL)
5496 m_freem(sc->sc_rxhead);
5497 WM_RXCHAIN_RESET(sc);
5498 DPRINTF(WM_DEBUG_RX,
5499 ("%s: RX: Rx buffer allocation failed, "
5500 "dropping packet%s\n", device_xname(sc->sc_dev),
5501 sc->sc_rxdiscard ? " (discard)" : ""));
5502 continue;
5503 }
5504
5505 m->m_len = len;
5506 sc->sc_rxlen += len;
5507 DPRINTF(WM_DEBUG_RX,
5508 ("%s: RX: buffer at %p len %d\n",
5509 device_xname(sc->sc_dev), m->m_data, len));
5510
5511 /* If this is not the end of the packet, keep looking. */
5512 if ((status & WRX_ST_EOP) == 0) {
5513 WM_RXCHAIN_LINK(sc, m);
5514 DPRINTF(WM_DEBUG_RX,
5515 ("%s: RX: not yet EOP, rxlen -> %d\n",
5516 device_xname(sc->sc_dev), sc->sc_rxlen));
5517 continue;
5518 }
5519
5520 /*
5521 * Okay, we have the entire packet now. The chip is
5522 * configured to include the FCS except I350 and I21[01]
5523 * (not all chips can be configured to strip it),
5524 * so we need to trim it.
5525 * May need to adjust length of previous mbuf in the
5526 * chain if the current mbuf is too short.
5527 * For an eratta, the RCTL_SECRC bit in RCTL register
5528 * is always set in I350, so we don't trim it.
5529 */
5530 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
5531 && (sc->sc_type != WM_T_I210)
5532 && (sc->sc_type != WM_T_I211)) {
5533 if (m->m_len < ETHER_CRC_LEN) {
5534 sc->sc_rxtail->m_len
5535 -= (ETHER_CRC_LEN - m->m_len);
5536 m->m_len = 0;
5537 } else
5538 m->m_len -= ETHER_CRC_LEN;
5539 len = sc->sc_rxlen - ETHER_CRC_LEN;
5540 } else
5541 len = sc->sc_rxlen;
5542
5543 WM_RXCHAIN_LINK(sc, m);
5544
5545 *sc->sc_rxtailp = NULL;
5546 m = sc->sc_rxhead;
5547
5548 WM_RXCHAIN_RESET(sc);
5549
5550 DPRINTF(WM_DEBUG_RX,
5551 ("%s: RX: have entire packet, len -> %d\n",
5552 device_xname(sc->sc_dev), len));
5553
5554 /* If an error occurred, update stats and drop the packet. */
5555 if (errors &
5556 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
5557 if (errors & WRX_ER_SE)
5558 log(LOG_WARNING, "%s: symbol error\n",
5559 device_xname(sc->sc_dev));
5560 else if (errors & WRX_ER_SEQ)
5561 log(LOG_WARNING, "%s: receive sequence error\n",
5562 device_xname(sc->sc_dev));
5563 else if (errors & WRX_ER_CE)
5564 log(LOG_WARNING, "%s: CRC error\n",
5565 device_xname(sc->sc_dev));
5566 m_freem(m);
5567 continue;
5568 }
5569
5570 /* No errors. Receive the packet. */
5571 m->m_pkthdr.rcvif = ifp;
5572 m->m_pkthdr.len = len;
5573
5574 /*
5575 * If VLANs are enabled, VLAN packets have been unwrapped
5576 * for us. Associate the tag with the packet.
5577 */
5578 /* XXXX should check for i350 and i354 */
5579 if ((status & WRX_ST_VP) != 0) {
5580 VLAN_INPUT_TAG(ifp, m,
5581 le16toh(vlantag),
5582 continue);
5583 }
5584
5585 /* Set up checksum info for this packet. */
5586 if ((status & WRX_ST_IXSM) == 0) {
5587 if (status & WRX_ST_IPCS) {
5588 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
5589 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
5590 if (errors & WRX_ER_IPE)
5591 m->m_pkthdr.csum_flags |=
5592 M_CSUM_IPv4_BAD;
5593 }
5594 if (status & WRX_ST_TCPCS) {
5595 /*
5596 * Note: we don't know if this was TCP or UDP,
5597 * so we just set both bits, and expect the
5598 * upper layers to deal.
5599 */
5600 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
5601 m->m_pkthdr.csum_flags |=
5602 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
5603 M_CSUM_TCPv6 | M_CSUM_UDPv6;
5604 if (errors & WRX_ER_TCPE)
5605 m->m_pkthdr.csum_flags |=
5606 M_CSUM_TCP_UDP_BAD;
5607 }
5608 }
5609
5610 ifp->if_ipackets++;
5611
5612 WM_RX_UNLOCK(sc);
5613
5614 /* Pass this up to any BPF listeners. */
5615 bpf_mtap(ifp, m);
5616
5617 /* Pass it on. */
5618 (*ifp->if_input)(ifp, m);
5619
5620 WM_RX_LOCK(sc);
5621
5622 if (sc->sc_stopping)
5623 break;
5624 }
5625
5626 /* Update the receive pointer. */
5627 sc->sc_rxptr = i;
5628
5629 DPRINTF(WM_DEBUG_RX,
5630 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
5631 }
5632
5633 /*
5634 * wm_linkintr_gmii:
5635 *
5636 * Helper; handle link interrupts for GMII.
5637 */
5638 static void
5639 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
5640 {
5641
5642 KASSERT(WM_TX_LOCKED(sc));
5643
5644 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5645 __func__));
5646
5647 if (icr & ICR_LSC) {
5648 DPRINTF(WM_DEBUG_LINK,
5649 ("%s: LINK: LSC -> mii_pollstat\n",
5650 device_xname(sc->sc_dev)));
5651 mii_pollstat(&sc->sc_mii);
5652 if (sc->sc_type == WM_T_82543) {
5653 int miistatus, active;
5654
5655 /*
5656 * With 82543, we need to force speed and
5657 * duplex on the MAC equal to what the PHY
5658 * speed and duplex configuration is.
5659 */
5660 miistatus = sc->sc_mii.mii_media_status;
5661
5662 if (miistatus & IFM_ACTIVE) {
5663 active = sc->sc_mii.mii_media_active;
5664 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5665 switch (IFM_SUBTYPE(active)) {
5666 case IFM_10_T:
5667 sc->sc_ctrl |= CTRL_SPEED_10;
5668 break;
5669 case IFM_100_TX:
5670 sc->sc_ctrl |= CTRL_SPEED_100;
5671 break;
5672 case IFM_1000_T:
5673 sc->sc_ctrl |= CTRL_SPEED_1000;
5674 break;
5675 default:
5676 /*
5677 * fiber?
5678 * Shoud not enter here.
5679 */
5680 printf("unknown media (%x)\n",
5681 active);
5682 break;
5683 }
5684 if (active & IFM_FDX)
5685 sc->sc_ctrl |= CTRL_FD;
5686 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5687 }
5688 } else if ((sc->sc_type == WM_T_ICH8)
5689 && (sc->sc_phytype == WMPHY_IGP_3)) {
5690 wm_kmrn_lock_loss_workaround_ich8lan(sc);
5691 } else if (sc->sc_type == WM_T_PCH) {
5692 wm_k1_gig_workaround_hv(sc,
5693 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
5694 }
5695
5696 if ((sc->sc_phytype == WMPHY_82578)
5697 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
5698 == IFM_1000_T)) {
5699
5700 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
5701 delay(200*1000); /* XXX too big */
5702
5703 /* Link stall fix for link up */
5704 wm_gmii_hv_writereg(sc->sc_dev, 1,
5705 HV_MUX_DATA_CTRL,
5706 HV_MUX_DATA_CTRL_GEN_TO_MAC
5707 | HV_MUX_DATA_CTRL_FORCE_SPEED);
5708 wm_gmii_hv_writereg(sc->sc_dev, 1,
5709 HV_MUX_DATA_CTRL,
5710 HV_MUX_DATA_CTRL_GEN_TO_MAC);
5711 }
5712 }
5713 } else if (icr & ICR_RXSEQ) {
5714 DPRINTF(WM_DEBUG_LINK,
5715 ("%s: LINK Receive sequence error\n",
5716 device_xname(sc->sc_dev)));
5717 }
5718 }
5719
5720 /*
5721 * wm_linkintr_tbi:
5722 *
5723 * Helper; handle link interrupts for TBI mode.
5724 */
5725 static void
5726 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
5727 {
5728 uint32_t status;
5729
5730 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5731 __func__));
5732
5733 status = CSR_READ(sc, WMREG_STATUS);
5734 if (icr & ICR_LSC) {
5735 if (status & STATUS_LU) {
5736 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
5737 device_xname(sc->sc_dev),
5738 (status & STATUS_FD) ? "FDX" : "HDX"));
5739 /*
5740 * NOTE: CTRL will update TFCE and RFCE automatically,
5741 * so we should update sc->sc_ctrl
5742 */
5743
5744 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5745 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5746 sc->sc_fcrtl &= ~FCRTL_XONE;
5747 if (status & STATUS_FD)
5748 sc->sc_tctl |=
5749 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5750 else
5751 sc->sc_tctl |=
5752 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5753 if (sc->sc_ctrl & CTRL_TFCE)
5754 sc->sc_fcrtl |= FCRTL_XONE;
5755 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5756 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5757 WMREG_OLD_FCRTL : WMREG_FCRTL,
5758 sc->sc_fcrtl);
5759 sc->sc_tbi_linkup = 1;
5760 } else {
5761 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
5762 device_xname(sc->sc_dev)));
5763 sc->sc_tbi_linkup = 0;
5764 }
5765 wm_tbi_set_linkled(sc);
5766 } else if (icr & ICR_RXSEQ) {
5767 DPRINTF(WM_DEBUG_LINK,
5768 ("%s: LINK: Receive sequence error\n",
5769 device_xname(sc->sc_dev)));
5770 }
5771 }
5772
5773 /*
5774 * wm_linkintr:
5775 *
5776 * Helper; handle link interrupts.
5777 */
5778 static void
5779 wm_linkintr(struct wm_softc *sc, uint32_t icr)
5780 {
5781
5782 if (sc->sc_flags & WM_F_HAS_MII)
5783 wm_linkintr_gmii(sc, icr);
5784 else
5785 wm_linkintr_tbi(sc, icr);
5786 }
5787
5788 /*
5789 * wm_intr:
5790 *
5791 * Interrupt service routine.
5792 */
5793 static int
5794 wm_intr(void *arg)
5795 {
5796 struct wm_softc *sc = arg;
5797 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5798 uint32_t icr;
5799 int handled = 0;
5800
5801 while (1 /* CONSTCOND */) {
5802 icr = CSR_READ(sc, WMREG_ICR);
5803 if ((icr & sc->sc_icr) == 0)
5804 break;
5805 rnd_add_uint32(&sc->rnd_source, icr);
5806
5807 WM_RX_LOCK(sc);
5808
5809 if (sc->sc_stopping) {
5810 WM_RX_UNLOCK(sc);
5811 break;
5812 }
5813
5814 handled = 1;
5815
5816 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
5817 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
5818 DPRINTF(WM_DEBUG_RX,
5819 ("%s: RX: got Rx intr 0x%08x\n",
5820 device_xname(sc->sc_dev),
5821 icr & (ICR_RXDMT0|ICR_RXT0)));
5822 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
5823 }
5824 #endif
5825 wm_rxintr(sc);
5826
5827 WM_RX_UNLOCK(sc);
5828 WM_TX_LOCK(sc);
5829
5830 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
5831 if (icr & ICR_TXDW) {
5832 DPRINTF(WM_DEBUG_TX,
5833 ("%s: TX: got TXDW interrupt\n",
5834 device_xname(sc->sc_dev)));
5835 WM_EVCNT_INCR(&sc->sc_ev_txdw);
5836 }
5837 #endif
5838 wm_txintr(sc);
5839
5840 if (icr & (ICR_LSC|ICR_RXSEQ)) {
5841 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
5842 wm_linkintr(sc, icr);
5843 }
5844
5845 WM_TX_UNLOCK(sc);
5846
5847 if (icr & ICR_RXO) {
5848 #if defined(WM_DEBUG)
5849 log(LOG_WARNING, "%s: Receive overrun\n",
5850 device_xname(sc->sc_dev));
5851 #endif /* defined(WM_DEBUG) */
5852 }
5853 }
5854
5855 if (handled) {
5856 /* Try to get more packets going. */
5857 ifp->if_start(ifp);
5858 }
5859
5860 return handled;
5861 }
5862
5863 /*
5864 * Media related.
5865 * GMII, SGMII, TBI (and SERDES)
5866 */
5867
5868 /* GMII related */
5869
5870 /*
5871 * wm_gmii_reset:
5872 *
5873 * Reset the PHY.
5874 */
5875 static void
5876 wm_gmii_reset(struct wm_softc *sc)
5877 {
5878 uint32_t reg;
5879 int rv;
5880
5881 /* get phy semaphore */
5882 switch (sc->sc_type) {
5883 case WM_T_82571:
5884 case WM_T_82572:
5885 case WM_T_82573:
5886 case WM_T_82574:
5887 case WM_T_82583:
5888 /* XXX should get sw semaphore, too */
5889 rv = wm_get_swsm_semaphore(sc);
5890 break;
5891 case WM_T_82575:
5892 case WM_T_82576:
5893 case WM_T_82580:
5894 case WM_T_I350:
5895 case WM_T_I354:
5896 case WM_T_I210:
5897 case WM_T_I211:
5898 case WM_T_80003:
5899 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5900 break;
5901 case WM_T_ICH8:
5902 case WM_T_ICH9:
5903 case WM_T_ICH10:
5904 case WM_T_PCH:
5905 case WM_T_PCH2:
5906 case WM_T_PCH_LPT:
5907 rv = wm_get_swfwhw_semaphore(sc);
5908 break;
5909 default:
5910 /* nothing to do*/
5911 rv = 0;
5912 break;
5913 }
5914 if (rv != 0) {
5915 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5916 __func__);
5917 return;
5918 }
5919
5920 switch (sc->sc_type) {
5921 case WM_T_82542_2_0:
5922 case WM_T_82542_2_1:
5923 /* null */
5924 break;
5925 case WM_T_82543:
5926 /*
5927 * With 82543, we need to force speed and duplex on the MAC
5928 * equal to what the PHY speed and duplex configuration is.
5929 * In addition, we need to perform a hardware reset on the PHY
5930 * to take it out of reset.
5931 */
5932 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5933 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5934
5935 /* The PHY reset pin is active-low. */
5936 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5937 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
5938 CTRL_EXT_SWDPIN(4));
5939 reg |= CTRL_EXT_SWDPIO(4);
5940
5941 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5942 CSR_WRITE_FLUSH(sc);
5943 delay(10*1000);
5944
5945 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
5946 CSR_WRITE_FLUSH(sc);
5947 delay(150);
5948 #if 0
5949 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
5950 #endif
5951 delay(20*1000); /* XXX extra delay to get PHY ID? */
5952 break;
5953 case WM_T_82544: /* reset 10000us */
5954 case WM_T_82540:
5955 case WM_T_82545:
5956 case WM_T_82545_3:
5957 case WM_T_82546:
5958 case WM_T_82546_3:
5959 case WM_T_82541:
5960 case WM_T_82541_2:
5961 case WM_T_82547:
5962 case WM_T_82547_2:
5963 case WM_T_82571: /* reset 100us */
5964 case WM_T_82572:
5965 case WM_T_82573:
5966 case WM_T_82574:
5967 case WM_T_82575:
5968 case WM_T_82576:
5969 case WM_T_82580:
5970 case WM_T_I350:
5971 case WM_T_I354:
5972 case WM_T_I210:
5973 case WM_T_I211:
5974 case WM_T_82583:
5975 case WM_T_80003:
5976 /* generic reset */
5977 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5978 CSR_WRITE_FLUSH(sc);
5979 delay(20000);
5980 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5981 CSR_WRITE_FLUSH(sc);
5982 delay(20000);
5983
5984 if ((sc->sc_type == WM_T_82541)
5985 || (sc->sc_type == WM_T_82541_2)
5986 || (sc->sc_type == WM_T_82547)
5987 || (sc->sc_type == WM_T_82547_2)) {
5988 /* workaround for igp are done in igp_reset() */
5989 /* XXX add code to set LED after phy reset */
5990 }
5991 break;
5992 case WM_T_ICH8:
5993 case WM_T_ICH9:
5994 case WM_T_ICH10:
5995 case WM_T_PCH:
5996 case WM_T_PCH2:
5997 case WM_T_PCH_LPT:
5998 /* generic reset */
5999 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6000 CSR_WRITE_FLUSH(sc);
6001 delay(100);
6002 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6003 CSR_WRITE_FLUSH(sc);
6004 delay(150);
6005 break;
6006 default:
6007 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6008 __func__);
6009 break;
6010 }
6011
6012 /* release PHY semaphore */
6013 switch (sc->sc_type) {
6014 case WM_T_82571:
6015 case WM_T_82572:
6016 case WM_T_82573:
6017 case WM_T_82574:
6018 case WM_T_82583:
6019 /* XXX should put sw semaphore, too */
6020 wm_put_swsm_semaphore(sc);
6021 break;
6022 case WM_T_82575:
6023 case WM_T_82576:
6024 case WM_T_82580:
6025 case WM_T_I350:
6026 case WM_T_I354:
6027 case WM_T_I210:
6028 case WM_T_I211:
6029 case WM_T_80003:
6030 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6031 break;
6032 case WM_T_ICH8:
6033 case WM_T_ICH9:
6034 case WM_T_ICH10:
6035 case WM_T_PCH:
6036 case WM_T_PCH2:
6037 case WM_T_PCH_LPT:
6038 wm_put_swfwhw_semaphore(sc);
6039 break;
6040 default:
6041 /* nothing to do*/
6042 rv = 0;
6043 break;
6044 }
6045
6046 /* get_cfg_done */
6047 wm_get_cfg_done(sc);
6048
6049 /* extra setup */
6050 switch (sc->sc_type) {
6051 case WM_T_82542_2_0:
6052 case WM_T_82542_2_1:
6053 case WM_T_82543:
6054 case WM_T_82544:
6055 case WM_T_82540:
6056 case WM_T_82545:
6057 case WM_T_82545_3:
6058 case WM_T_82546:
6059 case WM_T_82546_3:
6060 case WM_T_82541_2:
6061 case WM_T_82547_2:
6062 case WM_T_82571:
6063 case WM_T_82572:
6064 case WM_T_82573:
6065 case WM_T_82574:
6066 case WM_T_82575:
6067 case WM_T_82576:
6068 case WM_T_82580:
6069 case WM_T_I350:
6070 case WM_T_I354:
6071 case WM_T_I210:
6072 case WM_T_I211:
6073 case WM_T_82583:
6074 case WM_T_80003:
6075 /* null */
6076 break;
6077 case WM_T_82541:
6078 case WM_T_82547:
6079 /* XXX Configure actively LED after PHY reset */
6080 break;
6081 case WM_T_ICH8:
6082 case WM_T_ICH9:
6083 case WM_T_ICH10:
6084 case WM_T_PCH:
6085 case WM_T_PCH2:
6086 case WM_T_PCH_LPT:
6087 /* Allow time for h/w to get to a quiescent state afer reset */
6088 delay(10*1000);
6089
6090 if (sc->sc_type == WM_T_PCH)
6091 wm_hv_phy_workaround_ich8lan(sc);
6092
6093 if (sc->sc_type == WM_T_PCH2)
6094 wm_lv_phy_workaround_ich8lan(sc);
6095
6096 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6097 /*
6098 * dummy read to clear the phy wakeup bit after lcd
6099 * reset
6100 */
6101 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6102 }
6103
6104 /*
6105 * XXX Configure the LCD with th extended configuration region
6106 * in NVM
6107 */
6108
6109 /* Configure the LCD with the OEM bits in NVM */
6110 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6111 || (sc->sc_type == WM_T_PCH_LPT)) {
6112 /*
6113 * Disable LPLU.
6114 * XXX It seems that 82567 has LPLU, too.
6115 */
6116 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6117 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6118 reg |= HV_OEM_BITS_ANEGNOW;
6119 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6120 }
6121 break;
6122 default:
6123 panic("%s: unknown type\n", __func__);
6124 break;
6125 }
6126 }
6127
6128 /*
6129 * wm_get_phy_id_82575:
6130 *
6131 * Return PHY ID. Return -1 if it failed.
6132 */
6133 static int
6134 wm_get_phy_id_82575(struct wm_softc *sc)
6135 {
6136 uint32_t reg;
6137 int phyid = -1;
6138
6139 /* XXX */
6140 if ((sc->sc_flags & WM_F_SGMII) == 0)
6141 return -1;
6142
6143 if (wm_sgmii_uses_mdio(sc)) {
6144 switch (sc->sc_type) {
6145 case WM_T_82575:
6146 case WM_T_82576:
6147 reg = CSR_READ(sc, WMREG_MDIC);
6148 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
6149 break;
6150 case WM_T_82580:
6151 case WM_T_I350:
6152 case WM_T_I354:
6153 case WM_T_I210:
6154 case WM_T_I211:
6155 reg = CSR_READ(sc, WMREG_MDICNFG);
6156 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
6157 break;
6158 default:
6159 return -1;
6160 }
6161 }
6162
6163 return phyid;
6164 }
6165
6166
6167 /*
6168 * wm_gmii_mediainit:
6169 *
6170 * Initialize media for use on 1000BASE-T devices.
6171 */
6172 static void
6173 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6174 {
6175 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6176 struct mii_data *mii = &sc->sc_mii;
6177 uint32_t reg;
6178
6179 /* We have GMII. */
6180 sc->sc_flags |= WM_F_HAS_MII;
6181
6182 if (sc->sc_type == WM_T_80003)
6183 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6184 else
6185 sc->sc_tipg = TIPG_1000T_DFLT;
6186
6187 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
6188 if ((sc->sc_type == WM_T_82580)
6189 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
6190 || (sc->sc_type == WM_T_I211)) {
6191 reg = CSR_READ(sc, WMREG_PHPM);
6192 reg &= ~PHPM_GO_LINK_D;
6193 CSR_WRITE(sc, WMREG_PHPM, reg);
6194 }
6195
6196 /*
6197 * Let the chip set speed/duplex on its own based on
6198 * signals from the PHY.
6199 * XXXbouyer - I'm not sure this is right for the 80003,
6200 * the em driver only sets CTRL_SLU here - but it seems to work.
6201 */
6202 sc->sc_ctrl |= CTRL_SLU;
6203 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6204
6205 /* Initialize our media structures and probe the GMII. */
6206 mii->mii_ifp = ifp;
6207
6208 /*
6209 * Determine the PHY access method.
6210 *
6211 * For SGMII, use SGMII specific method.
6212 *
6213 * For some devices, we can determine the PHY access method
6214 * from sc_type.
6215 *
6216 * For ICH8 variants, it's difficult to detemine the PHY access
6217 * method by sc_type, so use the PCI product ID for some devices.
6218 * For other ICH8 variants, try to use igp's method. If the PHY
6219 * can't detect, then use bm's method.
6220 */
6221 switch (prodid) {
6222 case PCI_PRODUCT_INTEL_PCH_M_LM:
6223 case PCI_PRODUCT_INTEL_PCH_M_LC:
6224 /* 82577 */
6225 sc->sc_phytype = WMPHY_82577;
6226 mii->mii_readreg = wm_gmii_hv_readreg;
6227 mii->mii_writereg = wm_gmii_hv_writereg;
6228 break;
6229 case PCI_PRODUCT_INTEL_PCH_D_DM:
6230 case PCI_PRODUCT_INTEL_PCH_D_DC:
6231 /* 82578 */
6232 sc->sc_phytype = WMPHY_82578;
6233 mii->mii_readreg = wm_gmii_hv_readreg;
6234 mii->mii_writereg = wm_gmii_hv_writereg;
6235 break;
6236 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6237 case PCI_PRODUCT_INTEL_PCH2_LV_V:
6238 /* 82579 */
6239 sc->sc_phytype = WMPHY_82579;
6240 mii->mii_readreg = wm_gmii_hv_readreg;
6241 mii->mii_writereg = wm_gmii_hv_writereg;
6242 break;
6243 case PCI_PRODUCT_INTEL_I217_LM:
6244 case PCI_PRODUCT_INTEL_I217_V:
6245 case PCI_PRODUCT_INTEL_I218_LM:
6246 case PCI_PRODUCT_INTEL_I218_V:
6247 /* I21[78] */
6248 mii->mii_readreg = wm_gmii_hv_readreg;
6249 mii->mii_writereg = wm_gmii_hv_writereg;
6250 break;
6251 case PCI_PRODUCT_INTEL_82801I_BM:
6252 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6253 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6254 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6255 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6256 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6257 /* 82567 */
6258 sc->sc_phytype = WMPHY_BM;
6259 mii->mii_readreg = wm_gmii_bm_readreg;
6260 mii->mii_writereg = wm_gmii_bm_writereg;
6261 break;
6262 default:
6263 if (((sc->sc_flags & WM_F_SGMII) != 0)
6264 && !wm_sgmii_uses_mdio(sc)){
6265 mii->mii_readreg = wm_sgmii_readreg;
6266 mii->mii_writereg = wm_sgmii_writereg;
6267 } else if (sc->sc_type >= WM_T_80003) {
6268 mii->mii_readreg = wm_gmii_i80003_readreg;
6269 mii->mii_writereg = wm_gmii_i80003_writereg;
6270 } else if (sc->sc_type >= WM_T_I210) {
6271 mii->mii_readreg = wm_gmii_i82544_readreg;
6272 mii->mii_writereg = wm_gmii_i82544_writereg;
6273 } else if (sc->sc_type >= WM_T_82580) {
6274 sc->sc_phytype = WMPHY_82580;
6275 mii->mii_readreg = wm_gmii_82580_readreg;
6276 mii->mii_writereg = wm_gmii_82580_writereg;
6277 } else if (sc->sc_type >= WM_T_82544) {
6278 mii->mii_readreg = wm_gmii_i82544_readreg;
6279 mii->mii_writereg = wm_gmii_i82544_writereg;
6280 } else {
6281 mii->mii_readreg = wm_gmii_i82543_readreg;
6282 mii->mii_writereg = wm_gmii_i82543_writereg;
6283 }
6284 break;
6285 }
6286 mii->mii_statchg = wm_gmii_statchg;
6287
6288 wm_gmii_reset(sc);
6289
6290 sc->sc_ethercom.ec_mii = &sc->sc_mii;
6291 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6292 wm_gmii_mediastatus);
6293
6294 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6295 || (sc->sc_type == WM_T_82580)
6296 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
6297 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
6298 if ((sc->sc_flags & WM_F_SGMII) == 0) {
6299 /* Attach only one port */
6300 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6301 MII_OFFSET_ANY, MIIF_DOPAUSE);
6302 } else {
6303 int i, id;
6304 uint32_t ctrl_ext;
6305
6306 id = wm_get_phy_id_82575(sc);
6307 if (id != -1) {
6308 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6309 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
6310 }
6311 if ((id == -1)
6312 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
6313 /* Power on sgmii phy if it is disabled */
6314 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6315 CSR_WRITE(sc, WMREG_CTRL_EXT,
6316 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6317 CSR_WRITE_FLUSH(sc);
6318 delay(300*1000); /* XXX too long */
6319
6320 /* from 1 to 8 */
6321 for (i = 1; i < 8; i++)
6322 mii_attach(sc->sc_dev, &sc->sc_mii,
6323 0xffffffff, i, MII_OFFSET_ANY,
6324 MIIF_DOPAUSE);
6325
6326 /* restore previous sfp cage power state */
6327 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6328 }
6329 }
6330 } else {
6331 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6332 MII_OFFSET_ANY, MIIF_DOPAUSE);
6333 }
6334
6335 /*
6336 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
6337 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6338 */
6339 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
6340 (LIST_FIRST(&mii->mii_phys) == NULL)) {
6341 wm_set_mdio_slow_mode_hv(sc);
6342 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6343 MII_OFFSET_ANY, MIIF_DOPAUSE);
6344 }
6345
6346 /*
6347 * (For ICH8 variants)
6348 * If PHY detection failed, use BM's r/w function and retry.
6349 */
6350 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6351 /* if failed, retry with *_bm_* */
6352 mii->mii_readreg = wm_gmii_bm_readreg;
6353 mii->mii_writereg = wm_gmii_bm_writereg;
6354
6355 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6356 MII_OFFSET_ANY, MIIF_DOPAUSE);
6357 }
6358
6359 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6360 /* Any PHY wasn't find */
6361 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6362 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6363 sc->sc_phytype = WMPHY_NONE;
6364 } else {
6365 /*
6366 * PHY Found!
6367 * Check PHY type.
6368 */
6369 uint32_t model;
6370 struct mii_softc *child;
6371
6372 child = LIST_FIRST(&mii->mii_phys);
6373 if (device_is_a(child->mii_dev, "igphy")) {
6374 struct igphy_softc *isc = (struct igphy_softc *)child;
6375
6376 model = isc->sc_mii.mii_mpd_model;
6377 if (model == MII_MODEL_yyINTEL_I82566)
6378 sc->sc_phytype = WMPHY_IGP_3;
6379 }
6380
6381 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6382 }
6383 }
6384
6385 /*
6386 * wm_gmii_mediastatus: [ifmedia interface function]
6387 *
6388 * Get the current interface media status on a 1000BASE-T device.
6389 */
6390 static void
6391 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6392 {
6393 struct wm_softc *sc = ifp->if_softc;
6394
6395 ether_mediastatus(ifp, ifmr);
6396 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6397 | sc->sc_flowflags;
6398 }
6399
6400 /*
6401 * wm_gmii_mediachange: [ifmedia interface function]
6402 *
6403 * Set hardware to newly-selected media on a 1000BASE-T device.
6404 */
6405 static int
6406 wm_gmii_mediachange(struct ifnet *ifp)
6407 {
6408 struct wm_softc *sc = ifp->if_softc;
6409 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6410 int rc;
6411
6412 if ((ifp->if_flags & IFF_UP) == 0)
6413 return 0;
6414
6415 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6416 sc->sc_ctrl |= CTRL_SLU;
6417 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6418 || (sc->sc_type > WM_T_82543)) {
6419 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6420 } else {
6421 sc->sc_ctrl &= ~CTRL_ASDE;
6422 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6423 if (ife->ifm_media & IFM_FDX)
6424 sc->sc_ctrl |= CTRL_FD;
6425 switch (IFM_SUBTYPE(ife->ifm_media)) {
6426 case IFM_10_T:
6427 sc->sc_ctrl |= CTRL_SPEED_10;
6428 break;
6429 case IFM_100_TX:
6430 sc->sc_ctrl |= CTRL_SPEED_100;
6431 break;
6432 case IFM_1000_T:
6433 sc->sc_ctrl |= CTRL_SPEED_1000;
6434 break;
6435 default:
6436 panic("wm_gmii_mediachange: bad media 0x%x",
6437 ife->ifm_media);
6438 }
6439 }
6440 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6441 if (sc->sc_type <= WM_T_82543)
6442 wm_gmii_reset(sc);
6443
6444 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6445 return 0;
6446 return rc;
6447 }
6448
6449 #define MDI_IO CTRL_SWDPIN(2)
6450 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
6451 #define MDI_CLK CTRL_SWDPIN(3)
6452
6453 static void
6454 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6455 {
6456 uint32_t i, v;
6457
6458 v = CSR_READ(sc, WMREG_CTRL);
6459 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6460 v |= MDI_DIR | CTRL_SWDPIO(3);
6461
6462 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6463 if (data & i)
6464 v |= MDI_IO;
6465 else
6466 v &= ~MDI_IO;
6467 CSR_WRITE(sc, WMREG_CTRL, v);
6468 CSR_WRITE_FLUSH(sc);
6469 delay(10);
6470 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6471 CSR_WRITE_FLUSH(sc);
6472 delay(10);
6473 CSR_WRITE(sc, WMREG_CTRL, v);
6474 CSR_WRITE_FLUSH(sc);
6475 delay(10);
6476 }
6477 }
6478
6479 static uint32_t
6480 wm_i82543_mii_recvbits(struct wm_softc *sc)
6481 {
6482 uint32_t v, i, data = 0;
6483
6484 v = CSR_READ(sc, WMREG_CTRL);
6485 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6486 v |= CTRL_SWDPIO(3);
6487
6488 CSR_WRITE(sc, WMREG_CTRL, v);
6489 CSR_WRITE_FLUSH(sc);
6490 delay(10);
6491 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6492 CSR_WRITE_FLUSH(sc);
6493 delay(10);
6494 CSR_WRITE(sc, WMREG_CTRL, v);
6495 CSR_WRITE_FLUSH(sc);
6496 delay(10);
6497
6498 for (i = 0; i < 16; i++) {
6499 data <<= 1;
6500 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6501 CSR_WRITE_FLUSH(sc);
6502 delay(10);
6503 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6504 data |= 1;
6505 CSR_WRITE(sc, WMREG_CTRL, v);
6506 CSR_WRITE_FLUSH(sc);
6507 delay(10);
6508 }
6509
6510 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6511 CSR_WRITE_FLUSH(sc);
6512 delay(10);
6513 CSR_WRITE(sc, WMREG_CTRL, v);
6514 CSR_WRITE_FLUSH(sc);
6515 delay(10);
6516
6517 return data;
6518 }
6519
6520 #undef MDI_IO
6521 #undef MDI_DIR
6522 #undef MDI_CLK
6523
6524 /*
6525 * wm_gmii_i82543_readreg: [mii interface function]
6526 *
6527 * Read a PHY register on the GMII (i82543 version).
6528 */
6529 static int
6530 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6531 {
6532 struct wm_softc *sc = device_private(self);
6533 int rv;
6534
6535 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6536 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
6537 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6538 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
6539
6540 DPRINTF(WM_DEBUG_GMII,
6541 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6542 device_xname(sc->sc_dev), phy, reg, rv));
6543
6544 return rv;
6545 }
6546
6547 /*
6548 * wm_gmii_i82543_writereg: [mii interface function]
6549 *
6550 * Write a PHY register on the GMII (i82543 version).
6551 */
6552 static void
6553 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6554 {
6555 struct wm_softc *sc = device_private(self);
6556
6557 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6558 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6559 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6560 (MII_COMMAND_START << 30), 32);
6561 }
6562
6563 /*
6564 * wm_gmii_i82544_readreg: [mii interface function]
6565 *
6566 * Read a PHY register on the GMII.
6567 */
6568 static int
6569 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6570 {
6571 struct wm_softc *sc = device_private(self);
6572 uint32_t mdic = 0;
6573 int i, rv;
6574
6575 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6576 MDIC_REGADD(reg));
6577
6578 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6579 mdic = CSR_READ(sc, WMREG_MDIC);
6580 if (mdic & MDIC_READY)
6581 break;
6582 delay(50);
6583 }
6584
6585 if ((mdic & MDIC_READY) == 0) {
6586 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6587 device_xname(sc->sc_dev), phy, reg);
6588 rv = 0;
6589 } else if (mdic & MDIC_E) {
6590 #if 0 /* This is normal if no PHY is present. */
6591 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6592 device_xname(sc->sc_dev), phy, reg);
6593 #endif
6594 rv = 0;
6595 } else {
6596 rv = MDIC_DATA(mdic);
6597 if (rv == 0xffff)
6598 rv = 0;
6599 }
6600
6601 return rv;
6602 }
6603
6604 /*
6605 * wm_gmii_i82544_writereg: [mii interface function]
6606 *
6607 * Write a PHY register on the GMII.
6608 */
6609 static void
6610 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6611 {
6612 struct wm_softc *sc = device_private(self);
6613 uint32_t mdic = 0;
6614 int i;
6615
6616 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6617 MDIC_REGADD(reg) | MDIC_DATA(val));
6618
6619 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6620 mdic = CSR_READ(sc, WMREG_MDIC);
6621 if (mdic & MDIC_READY)
6622 break;
6623 delay(50);
6624 }
6625
6626 if ((mdic & MDIC_READY) == 0)
6627 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6628 device_xname(sc->sc_dev), phy, reg);
6629 else if (mdic & MDIC_E)
6630 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6631 device_xname(sc->sc_dev), phy, reg);
6632 }
6633
6634 /*
6635 * wm_gmii_i80003_readreg: [mii interface function]
6636 *
6637 * Read a PHY register on the kumeran
6638 * This could be handled by the PHY layer if we didn't have to lock the
6639 * ressource ...
6640 */
6641 static int
6642 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6643 {
6644 struct wm_softc *sc = device_private(self);
6645 int sem;
6646 int rv;
6647
6648 if (phy != 1) /* only one PHY on kumeran bus */
6649 return 0;
6650
6651 sem = swfwphysem[sc->sc_funcid];
6652 if (wm_get_swfw_semaphore(sc, sem)) {
6653 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6654 __func__);
6655 return 0;
6656 }
6657
6658 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6659 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6660 reg >> GG82563_PAGE_SHIFT);
6661 } else {
6662 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6663 reg >> GG82563_PAGE_SHIFT);
6664 }
6665 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6666 delay(200);
6667 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6668 delay(200);
6669
6670 wm_put_swfw_semaphore(sc, sem);
6671 return rv;
6672 }
6673
6674 /*
6675 * wm_gmii_i80003_writereg: [mii interface function]
6676 *
6677 * Write a PHY register on the kumeran.
6678 * This could be handled by the PHY layer if we didn't have to lock the
6679 * ressource ...
6680 */
6681 static void
6682 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6683 {
6684 struct wm_softc *sc = device_private(self);
6685 int sem;
6686
6687 if (phy != 1) /* only one PHY on kumeran bus */
6688 return;
6689
6690 sem = swfwphysem[sc->sc_funcid];
6691 if (wm_get_swfw_semaphore(sc, sem)) {
6692 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6693 __func__);
6694 return;
6695 }
6696
6697 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6698 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6699 reg >> GG82563_PAGE_SHIFT);
6700 } else {
6701 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6702 reg >> GG82563_PAGE_SHIFT);
6703 }
6704 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6705 delay(200);
6706 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6707 delay(200);
6708
6709 wm_put_swfw_semaphore(sc, sem);
6710 }
6711
6712 /*
6713 * wm_gmii_bm_readreg: [mii interface function]
6714 *
6715 * Read a PHY register on the kumeran
6716 * This could be handled by the PHY layer if we didn't have to lock the
6717 * ressource ...
6718 */
6719 static int
6720 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6721 {
6722 struct wm_softc *sc = device_private(self);
6723 int sem;
6724 int rv;
6725
6726 sem = swfwphysem[sc->sc_funcid];
6727 if (wm_get_swfw_semaphore(sc, sem)) {
6728 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6729 __func__);
6730 return 0;
6731 }
6732
6733 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6734 if (phy == 1)
6735 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6736 reg);
6737 else
6738 wm_gmii_i82544_writereg(self, phy,
6739 GG82563_PHY_PAGE_SELECT,
6740 reg >> GG82563_PAGE_SHIFT);
6741 }
6742
6743 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6744 wm_put_swfw_semaphore(sc, sem);
6745 return rv;
6746 }
6747
6748 /*
6749 * wm_gmii_bm_writereg: [mii interface function]
6750 *
6751 * Write a PHY register on the kumeran.
6752 * This could be handled by the PHY layer if we didn't have to lock the
6753 * ressource ...
6754 */
6755 static void
6756 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6757 {
6758 struct wm_softc *sc = device_private(self);
6759 int sem;
6760
6761 sem = swfwphysem[sc->sc_funcid];
6762 if (wm_get_swfw_semaphore(sc, sem)) {
6763 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6764 __func__);
6765 return;
6766 }
6767
6768 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6769 if (phy == 1)
6770 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6771 reg);
6772 else
6773 wm_gmii_i82544_writereg(self, phy,
6774 GG82563_PHY_PAGE_SELECT,
6775 reg >> GG82563_PAGE_SHIFT);
6776 }
6777
6778 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6779 wm_put_swfw_semaphore(sc, sem);
6780 }
6781
6782 static void
6783 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6784 {
6785 struct wm_softc *sc = device_private(self);
6786 uint16_t regnum = BM_PHY_REG_NUM(offset);
6787 uint16_t wuce;
6788
6789 /* XXX Gig must be disabled for MDIO accesses to page 800 */
6790 if (sc->sc_type == WM_T_PCH) {
6791 /* XXX e1000 driver do nothing... why? */
6792 }
6793
6794 /* Set page 769 */
6795 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6796 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6797
6798 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6799
6800 wuce &= ~BM_WUC_HOST_WU_BIT;
6801 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6802 wuce | BM_WUC_ENABLE_BIT);
6803
6804 /* Select page 800 */
6805 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6806 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6807
6808 /* Write page 800 */
6809 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6810
6811 if (rd)
6812 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6813 else
6814 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6815
6816 /* Set page 769 */
6817 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6818 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6819
6820 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6821 }
6822
6823 /*
6824 * wm_gmii_hv_readreg: [mii interface function]
6825 *
6826 * Read a PHY register on the kumeran
6827 * This could be handled by the PHY layer if we didn't have to lock the
6828 * ressource ...
6829 */
6830 static int
6831 wm_gmii_hv_readreg(device_t self, int phy, int reg)
6832 {
6833 struct wm_softc *sc = device_private(self);
6834 uint16_t page = BM_PHY_REG_PAGE(reg);
6835 uint16_t regnum = BM_PHY_REG_NUM(reg);
6836 uint16_t val;
6837 int rv;
6838
6839 if (wm_get_swfwhw_semaphore(sc)) {
6840 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6841 __func__);
6842 return 0;
6843 }
6844
6845 /* XXX Workaround failure in MDIO access while cable is disconnected */
6846 if (sc->sc_phytype == WMPHY_82577) {
6847 /* XXX must write */
6848 }
6849
6850 /* Page 800 works differently than the rest so it has its own func */
6851 if (page == BM_WUC_PAGE) {
6852 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
6853 return val;
6854 }
6855
6856 /*
6857 * Lower than page 768 works differently than the rest so it has its
6858 * own func
6859 */
6860 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6861 printf("gmii_hv_readreg!!!\n");
6862 return 0;
6863 }
6864
6865 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6866 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6867 page << BME1000_PAGE_SHIFT);
6868 }
6869
6870 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
6871 wm_put_swfwhw_semaphore(sc);
6872 return rv;
6873 }
6874
6875 /*
6876 * wm_gmii_hv_writereg: [mii interface function]
6877 *
6878 * Write a PHY register on the kumeran.
6879 * This could be handled by the PHY layer if we didn't have to lock the
6880 * ressource ...
6881 */
6882 static void
6883 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
6884 {
6885 struct wm_softc *sc = device_private(self);
6886 uint16_t page = BM_PHY_REG_PAGE(reg);
6887 uint16_t regnum = BM_PHY_REG_NUM(reg);
6888
6889 if (wm_get_swfwhw_semaphore(sc)) {
6890 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6891 __func__);
6892 return;
6893 }
6894
6895 /* XXX Workaround failure in MDIO access while cable is disconnected */
6896
6897 /* Page 800 works differently than the rest so it has its own func */
6898 if (page == BM_WUC_PAGE) {
6899 uint16_t tmp;
6900
6901 tmp = val;
6902 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
6903 return;
6904 }
6905
6906 /*
6907 * Lower than page 768 works differently than the rest so it has its
6908 * own func
6909 */
6910 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6911 printf("gmii_hv_writereg!!!\n");
6912 return;
6913 }
6914
6915 /*
6916 * XXX Workaround MDIO accesses being disabled after entering IEEE
6917 * Power Down (whenever bit 11 of the PHY control register is set)
6918 */
6919
6920 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6921 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6922 page << BME1000_PAGE_SHIFT);
6923 }
6924
6925 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
6926 wm_put_swfwhw_semaphore(sc);
6927 }
6928
6929 /*
6930 * wm_gmii_82580_readreg: [mii interface function]
6931 *
6932 * Read a PHY register on the 82580 and I350.
6933 * This could be handled by the PHY layer if we didn't have to lock the
6934 * ressource ...
6935 */
6936 static int
6937 wm_gmii_82580_readreg(device_t self, int phy, int reg)
6938 {
6939 struct wm_softc *sc = device_private(self);
6940 int sem;
6941 int rv;
6942
6943 sem = swfwphysem[sc->sc_funcid];
6944 if (wm_get_swfw_semaphore(sc, sem)) {
6945 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6946 __func__);
6947 return 0;
6948 }
6949
6950 rv = wm_gmii_i82544_readreg(self, phy, reg);
6951
6952 wm_put_swfw_semaphore(sc, sem);
6953 return rv;
6954 }
6955
6956 /*
6957 * wm_gmii_82580_writereg: [mii interface function]
6958 *
6959 * Write a PHY register on the 82580 and I350.
6960 * This could be handled by the PHY layer if we didn't have to lock the
6961 * ressource ...
6962 */
6963 static void
6964 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
6965 {
6966 struct wm_softc *sc = device_private(self);
6967 int sem;
6968
6969 sem = swfwphysem[sc->sc_funcid];
6970 if (wm_get_swfw_semaphore(sc, sem)) {
6971 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6972 __func__);
6973 return;
6974 }
6975
6976 wm_gmii_i82544_writereg(self, phy, reg, val);
6977
6978 wm_put_swfw_semaphore(sc, sem);
6979 }
6980
6981 /*
6982 * wm_gmii_statchg: [mii interface function]
6983 *
6984 * Callback from MII layer when media changes.
6985 */
6986 static void
6987 wm_gmii_statchg(struct ifnet *ifp)
6988 {
6989 struct wm_softc *sc = ifp->if_softc;
6990 struct mii_data *mii = &sc->sc_mii;
6991
6992 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
6993 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6994 sc->sc_fcrtl &= ~FCRTL_XONE;
6995
6996 /*
6997 * Get flow control negotiation result.
6998 */
6999 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7000 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7001 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7002 mii->mii_media_active &= ~IFM_ETH_FMASK;
7003 }
7004
7005 if (sc->sc_flowflags & IFM_FLOW) {
7006 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7007 sc->sc_ctrl |= CTRL_TFCE;
7008 sc->sc_fcrtl |= FCRTL_XONE;
7009 }
7010 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7011 sc->sc_ctrl |= CTRL_RFCE;
7012 }
7013
7014 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7015 DPRINTF(WM_DEBUG_LINK,
7016 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7017 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7018 } else {
7019 DPRINTF(WM_DEBUG_LINK,
7020 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7021 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7022 }
7023
7024 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7025 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7026 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7027 : WMREG_FCRTL, sc->sc_fcrtl);
7028 if (sc->sc_type == WM_T_80003) {
7029 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7030 case IFM_1000_T:
7031 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7032 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7033 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7034 break;
7035 default:
7036 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7037 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7038 sc->sc_tipg = TIPG_10_100_80003_DFLT;
7039 break;
7040 }
7041 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7042 }
7043 }
7044
7045 /*
7046 * wm_kmrn_readreg:
7047 *
7048 * Read a kumeran register
7049 */
7050 static int
7051 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7052 {
7053 int rv;
7054
7055 if (sc->sc_flags == WM_F_LOCK_SWFW) {
7056 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7057 aprint_error_dev(sc->sc_dev,
7058 "%s: failed to get semaphore\n", __func__);
7059 return 0;
7060 }
7061 } else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
7062 if (wm_get_swfwhw_semaphore(sc)) {
7063 aprint_error_dev(sc->sc_dev,
7064 "%s: failed to get semaphore\n", __func__);
7065 return 0;
7066 }
7067 }
7068
7069 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7070 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7071 KUMCTRLSTA_REN);
7072 CSR_WRITE_FLUSH(sc);
7073 delay(2);
7074
7075 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7076
7077 if (sc->sc_flags == WM_F_LOCK_SWFW)
7078 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7079 else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
7080 wm_put_swfwhw_semaphore(sc);
7081
7082 return rv;
7083 }
7084
7085 /*
7086 * wm_kmrn_writereg:
7087 *
7088 * Write a kumeran register
7089 */
7090 static void
7091 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7092 {
7093
7094 if (sc->sc_flags == WM_F_LOCK_SWFW) {
7095 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7096 aprint_error_dev(sc->sc_dev,
7097 "%s: failed to get semaphore\n", __func__);
7098 return;
7099 }
7100 } else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
7101 if (wm_get_swfwhw_semaphore(sc)) {
7102 aprint_error_dev(sc->sc_dev,
7103 "%s: failed to get semaphore\n", __func__);
7104 return;
7105 }
7106 }
7107
7108 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7109 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7110 (val & KUMCTRLSTA_MASK));
7111
7112 if (sc->sc_flags == WM_F_LOCK_SWFW)
7113 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7114 else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
7115 wm_put_swfwhw_semaphore(sc);
7116 }
7117
7118 /* SGMII related */
7119
7120 /*
7121 * wm_sgmii_uses_mdio
7122 *
7123 * Check whether the transaction is to the internal PHY or the external
7124 * MDIO interface. Return true if it's MDIO.
7125 */
7126 static bool
7127 wm_sgmii_uses_mdio(struct wm_softc *sc)
7128 {
7129 uint32_t reg;
7130 bool ismdio = false;
7131
7132 switch (sc->sc_type) {
7133 case WM_T_82575:
7134 case WM_T_82576:
7135 reg = CSR_READ(sc, WMREG_MDIC);
7136 ismdio = ((reg & MDIC_DEST) != 0);
7137 break;
7138 case WM_T_82580:
7139 case WM_T_I350:
7140 case WM_T_I354:
7141 case WM_T_I210:
7142 case WM_T_I211:
7143 reg = CSR_READ(sc, WMREG_MDICNFG);
7144 ismdio = ((reg & MDICNFG_DEST) != 0);
7145 break;
7146 default:
7147 break;
7148 }
7149
7150 return ismdio;
7151 }
7152
7153 /*
7154 * wm_sgmii_readreg: [mii interface function]
7155 *
7156 * Read a PHY register on the SGMII
7157 * This could be handled by the PHY layer if we didn't have to lock the
7158 * ressource ...
7159 */
7160 static int
7161 wm_sgmii_readreg(device_t self, int phy, int reg)
7162 {
7163 struct wm_softc *sc = device_private(self);
7164 uint32_t i2ccmd;
7165 int i, rv;
7166
7167 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7168 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7169 __func__);
7170 return 0;
7171 }
7172
7173 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7174 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7175 | I2CCMD_OPCODE_READ;
7176 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7177
7178 /* Poll the ready bit */
7179 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7180 delay(50);
7181 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7182 if (i2ccmd & I2CCMD_READY)
7183 break;
7184 }
7185 if ((i2ccmd & I2CCMD_READY) == 0)
7186 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7187 if ((i2ccmd & I2CCMD_ERROR) != 0)
7188 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7189
7190 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7191
7192 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7193 return rv;
7194 }
7195
7196 /*
7197 * wm_sgmii_writereg: [mii interface function]
7198 *
7199 * Write a PHY register on the SGMII.
7200 * This could be handled by the PHY layer if we didn't have to lock the
7201 * ressource ...
7202 */
7203 static void
7204 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7205 {
7206 struct wm_softc *sc = device_private(self);
7207 uint32_t i2ccmd;
7208 int i;
7209
7210 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7211 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7212 __func__);
7213 return;
7214 }
7215
7216 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7217 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7218 | I2CCMD_OPCODE_WRITE;
7219 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7220
7221 /* Poll the ready bit */
7222 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7223 delay(50);
7224 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7225 if (i2ccmd & I2CCMD_READY)
7226 break;
7227 }
7228 if ((i2ccmd & I2CCMD_READY) == 0)
7229 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7230 if ((i2ccmd & I2CCMD_ERROR) != 0)
7231 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7232
7233 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7234 }
7235
7236 /* TBI related */
7237
7238 /* XXX Currently TBI only */
7239 static int
7240 wm_check_for_link(struct wm_softc *sc)
7241 {
7242 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7243 uint32_t rxcw;
7244 uint32_t ctrl;
7245 uint32_t status;
7246 uint32_t sig;
7247
7248 if (sc->sc_mediatype & WMP_F_SERDES) {
7249 sc->sc_tbi_linkup = 1;
7250 return 0;
7251 }
7252
7253 rxcw = CSR_READ(sc, WMREG_RXCW);
7254 ctrl = CSR_READ(sc, WMREG_CTRL);
7255 status = CSR_READ(sc, WMREG_STATUS);
7256
7257 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7258
7259 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7260 device_xname(sc->sc_dev), __func__,
7261 ((ctrl & CTRL_SWDPIN(1)) == sig),
7262 ((status & STATUS_LU) != 0),
7263 ((rxcw & RXCW_C) != 0)
7264 ));
7265
7266 /*
7267 * SWDPIN LU RXCW
7268 * 0 0 0
7269 * 0 0 1 (should not happen)
7270 * 0 1 0 (should not happen)
7271 * 0 1 1 (should not happen)
7272 * 1 0 0 Disable autonego and force linkup
7273 * 1 0 1 got /C/ but not linkup yet
7274 * 1 1 0 (linkup)
7275 * 1 1 1 If IFM_AUTO, back to autonego
7276 *
7277 */
7278 if (((ctrl & CTRL_SWDPIN(1)) == sig)
7279 && ((status & STATUS_LU) == 0)
7280 && ((rxcw & RXCW_C) == 0)) {
7281 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7282 __func__));
7283 sc->sc_tbi_linkup = 0;
7284 /* Disable auto-negotiation in the TXCW register */
7285 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7286
7287 /*
7288 * Force link-up and also force full-duplex.
7289 *
7290 * NOTE: CTRL was updated TFCE and RFCE automatically,
7291 * so we should update sc->sc_ctrl
7292 */
7293 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7294 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7295 } else if (((status & STATUS_LU) != 0)
7296 && ((rxcw & RXCW_C) != 0)
7297 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7298 sc->sc_tbi_linkup = 1;
7299 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7300 __func__));
7301 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7302 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7303 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7304 && ((rxcw & RXCW_C) != 0)) {
7305 DPRINTF(WM_DEBUG_LINK, ("/C/"));
7306 } else {
7307 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7308 status));
7309 }
7310
7311 return 0;
7312 }
7313
7314 /*
7315 * wm_tbi_mediainit:
7316 *
7317 * Initialize media for use on 1000BASE-X devices.
7318 */
7319 static void
7320 wm_tbi_mediainit(struct wm_softc *sc)
7321 {
7322 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7323 const char *sep = "";
7324
7325 if (sc->sc_type < WM_T_82543)
7326 sc->sc_tipg = TIPG_WM_DFLT;
7327 else
7328 sc->sc_tipg = TIPG_LG_DFLT;
7329
7330 sc->sc_tbi_anegticks = 5;
7331
7332 /* Initialize our media structures */
7333 sc->sc_mii.mii_ifp = ifp;
7334
7335 sc->sc_ethercom.ec_mii = &sc->sc_mii;
7336 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
7337 wm_tbi_mediastatus);
7338
7339 /*
7340 * SWD Pins:
7341 *
7342 * 0 = Link LED (output)
7343 * 1 = Loss Of Signal (input)
7344 */
7345 sc->sc_ctrl |= CTRL_SWDPIO(0);
7346 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
7347 if (sc->sc_mediatype & WMP_F_SERDES)
7348 sc->sc_ctrl &= ~CTRL_LRST;
7349
7350 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7351
7352 #define ADD(ss, mm, dd) \
7353 do { \
7354 aprint_normal("%s%s", sep, ss); \
7355 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
7356 sep = ", "; \
7357 } while (/*CONSTCOND*/0)
7358
7359 aprint_normal_dev(sc->sc_dev, "");
7360
7361 /* Only 82545 is LX */
7362 if (sc->sc_type == WM_T_82545) {
7363 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
7364 ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
7365 } else {
7366 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
7367 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
7368 }
7369 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
7370 aprint_normal("\n");
7371
7372 #undef ADD
7373
7374 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
7375 }
7376
7377 /*
7378 * wm_tbi_mediastatus: [ifmedia interface function]
7379 *
7380 * Get the current interface media status on a 1000BASE-X device.
7381 */
7382 static void
7383 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
7384 {
7385 struct wm_softc *sc = ifp->if_softc;
7386 uint32_t ctrl, status;
7387
7388 ifmr->ifm_status = IFM_AVALID;
7389 ifmr->ifm_active = IFM_ETHER;
7390
7391 status = CSR_READ(sc, WMREG_STATUS);
7392 if ((status & STATUS_LU) == 0) {
7393 ifmr->ifm_active |= IFM_NONE;
7394 return;
7395 }
7396
7397 ifmr->ifm_status |= IFM_ACTIVE;
7398 /* Only 82545 is LX */
7399 if (sc->sc_type == WM_T_82545)
7400 ifmr->ifm_active |= IFM_1000_LX;
7401 else
7402 ifmr->ifm_active |= IFM_1000_SX;
7403 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
7404 ifmr->ifm_active |= IFM_FDX;
7405 else
7406 ifmr->ifm_active |= IFM_HDX;
7407 ctrl = CSR_READ(sc, WMREG_CTRL);
7408 if (ctrl & CTRL_RFCE)
7409 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
7410 if (ctrl & CTRL_TFCE)
7411 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
7412 }
7413
7414 /*
7415 * wm_tbi_mediachange: [ifmedia interface function]
7416 *
7417 * Set hardware to newly-selected media on a 1000BASE-X device.
7418 */
7419 static int
7420 wm_tbi_mediachange(struct ifnet *ifp)
7421 {
7422 struct wm_softc *sc = ifp->if_softc;
7423 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7424 uint32_t status;
7425 int i;
7426
7427 if (sc->sc_mediatype & WMP_F_SERDES)
7428 return 0;
7429
7430 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
7431 || (sc->sc_type >= WM_T_82575))
7432 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
7433
7434 /* XXX power_up_serdes_link_82575() */
7435
7436 sc->sc_ctrl &= ~CTRL_LRST;
7437 sc->sc_txcw = TXCW_ANE;
7438 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
7439 sc->sc_txcw |= TXCW_FD | TXCW_HD;
7440 else if (ife->ifm_media & IFM_FDX)
7441 sc->sc_txcw |= TXCW_FD;
7442 else
7443 sc->sc_txcw |= TXCW_HD;
7444
7445 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
7446 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
7447
7448 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
7449 device_xname(sc->sc_dev), sc->sc_txcw));
7450 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7451 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7452 CSR_WRITE_FLUSH(sc);
7453 delay(1000);
7454
7455 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
7456 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
7457
7458 /*
7459 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
7460 * optics detect a signal, 0 if they don't.
7461 */
7462 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
7463 /* Have signal; wait for the link to come up. */
7464 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
7465 delay(10000);
7466 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
7467 break;
7468 }
7469
7470 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
7471 device_xname(sc->sc_dev),i));
7472
7473 status = CSR_READ(sc, WMREG_STATUS);
7474 DPRINTF(WM_DEBUG_LINK,
7475 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
7476 device_xname(sc->sc_dev),status, STATUS_LU));
7477 if (status & STATUS_LU) {
7478 /* Link is up. */
7479 DPRINTF(WM_DEBUG_LINK,
7480 ("%s: LINK: set media -> link up %s\n",
7481 device_xname(sc->sc_dev),
7482 (status & STATUS_FD) ? "FDX" : "HDX"));
7483
7484 /*
7485 * NOTE: CTRL will update TFCE and RFCE automatically,
7486 * so we should update sc->sc_ctrl
7487 */
7488 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7489 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7490 sc->sc_fcrtl &= ~FCRTL_XONE;
7491 if (status & STATUS_FD)
7492 sc->sc_tctl |=
7493 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7494 else
7495 sc->sc_tctl |=
7496 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7497 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
7498 sc->sc_fcrtl |= FCRTL_XONE;
7499 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7500 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7501 WMREG_OLD_FCRTL : WMREG_FCRTL,
7502 sc->sc_fcrtl);
7503 sc->sc_tbi_linkup = 1;
7504 } else {
7505 if (i == WM_LINKUP_TIMEOUT)
7506 wm_check_for_link(sc);
7507 /* Link is down. */
7508 DPRINTF(WM_DEBUG_LINK,
7509 ("%s: LINK: set media -> link down\n",
7510 device_xname(sc->sc_dev)));
7511 sc->sc_tbi_linkup = 0;
7512 }
7513 } else {
7514 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
7515 device_xname(sc->sc_dev)));
7516 sc->sc_tbi_linkup = 0;
7517 }
7518
7519 wm_tbi_set_linkled(sc);
7520
7521 return 0;
7522 }
7523
7524 /*
7525 * wm_tbi_set_linkled:
7526 *
7527 * Update the link LED on 1000BASE-X devices.
7528 */
7529 static void
7530 wm_tbi_set_linkled(struct wm_softc *sc)
7531 {
7532
7533 if (sc->sc_tbi_linkup)
7534 sc->sc_ctrl |= CTRL_SWDPIN(0);
7535 else
7536 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7537
7538 /* 82540 or newer devices are active low */
7539 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7540
7541 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7542 }
7543
7544 /*
7545 * wm_tbi_check_link:
7546 *
7547 * Check the link on 1000BASE-X devices.
7548 */
7549 static void
7550 wm_tbi_check_link(struct wm_softc *sc)
7551 {
7552 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7553 uint32_t status;
7554
7555 KASSERT(WM_TX_LOCKED(sc));
7556
7557 if (sc->sc_mediatype & WMP_F_SERDES) {
7558 sc->sc_tbi_linkup = 1;
7559 return;
7560 }
7561
7562 status = CSR_READ(sc, WMREG_STATUS);
7563
7564 /* XXX is this needed? */
7565 (void)CSR_READ(sc, WMREG_RXCW);
7566 (void)CSR_READ(sc, WMREG_CTRL);
7567
7568 /* set link status */
7569 if ((status & STATUS_LU) == 0) {
7570 DPRINTF(WM_DEBUG_LINK,
7571 ("%s: LINK: checklink -> down\n",
7572 device_xname(sc->sc_dev)));
7573 sc->sc_tbi_linkup = 0;
7574 } else if (sc->sc_tbi_linkup == 0) {
7575 DPRINTF(WM_DEBUG_LINK,
7576 ("%s: LINK: checklink -> up %s\n",
7577 device_xname(sc->sc_dev),
7578 (status & STATUS_FD) ? "FDX" : "HDX"));
7579 sc->sc_tbi_linkup = 1;
7580 }
7581
7582 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
7583 && ((status & STATUS_LU) == 0)) {
7584 sc->sc_tbi_linkup = 0;
7585 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7586 /* If the timer expired, retry autonegotiation */
7587 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
7588 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
7589 sc->sc_tbi_ticks = 0;
7590 /*
7591 * Reset the link, and let autonegotiation do
7592 * its thing
7593 */
7594 sc->sc_ctrl |= CTRL_LRST;
7595 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7596 CSR_WRITE_FLUSH(sc);
7597 delay(1000);
7598 sc->sc_ctrl &= ~CTRL_LRST;
7599 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7600 CSR_WRITE_FLUSH(sc);
7601 delay(1000);
7602 CSR_WRITE(sc, WMREG_TXCW,
7603 sc->sc_txcw & ~TXCW_ANE);
7604 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7605 }
7606 }
7607 }
7608
7609 wm_tbi_set_linkled(sc);
7610 }
7611
7612 /* SFP related */
7613
7614 static int
7615 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
7616 {
7617 uint32_t i2ccmd;
7618 int i;
7619
7620 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
7621 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7622
7623 /* Poll the ready bit */
7624 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7625 delay(50);
7626 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7627 if (i2ccmd & I2CCMD_READY)
7628 break;
7629 }
7630 if ((i2ccmd & I2CCMD_READY) == 0)
7631 return -1;
7632 if ((i2ccmd & I2CCMD_ERROR) != 0)
7633 return -1;
7634
7635 *data = i2ccmd & 0x00ff;
7636
7637 return 0;
7638 }
7639
7640 static uint32_t
7641 wm_sfp_get_media_type(struct wm_softc *sc)
7642 {
7643 uint32_t ctrl_ext;
7644 uint8_t val = 0;
7645 int timeout = 3;
7646 uint32_t mediatype = WMP_F_UNKNOWN;
7647 int rv = -1;
7648
7649 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7650 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
7651 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
7652 CSR_WRITE_FLUSH(sc);
7653
7654 /* Read SFP module data */
7655 while (timeout) {
7656 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
7657 if (rv == 0)
7658 break;
7659 delay(100*1000); /* XXX too big */
7660 timeout--;
7661 }
7662 if (rv != 0)
7663 goto out;
7664 switch (val) {
7665 case SFF_SFP_ID_SFF:
7666 aprint_normal_dev(sc->sc_dev,
7667 "Module/Connector soldered to board\n");
7668 break;
7669 case SFF_SFP_ID_SFP:
7670 aprint_normal_dev(sc->sc_dev, "SFP\n");
7671 break;
7672 case SFF_SFP_ID_UNKNOWN:
7673 goto out;
7674 default:
7675 break;
7676 }
7677
7678 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
7679 if (rv != 0) {
7680 goto out;
7681 }
7682
7683 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
7684 mediatype = WMP_F_SERDES;
7685 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
7686 sc->sc_flags |= WM_F_SGMII;
7687 mediatype = WMP_F_COPPER;
7688 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
7689 sc->sc_flags |= WM_F_SGMII;
7690 mediatype = WMP_F_SERDES;
7691 }
7692
7693 out:
7694 /* Restore I2C interface setting */
7695 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7696
7697 return mediatype;
7698 }
7699 /*
7700 * NVM related.
7701 * Microwire, SPI (w/wo EERD) and Flash.
7702 */
7703
7704 /* Both spi and uwire */
7705
7706 /*
7707 * wm_eeprom_sendbits:
7708 *
7709 * Send a series of bits to the EEPROM.
7710 */
7711 static void
7712 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
7713 {
7714 uint32_t reg;
7715 int x;
7716
7717 reg = CSR_READ(sc, WMREG_EECD);
7718
7719 for (x = nbits; x > 0; x--) {
7720 if (bits & (1U << (x - 1)))
7721 reg |= EECD_DI;
7722 else
7723 reg &= ~EECD_DI;
7724 CSR_WRITE(sc, WMREG_EECD, reg);
7725 CSR_WRITE_FLUSH(sc);
7726 delay(2);
7727 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7728 CSR_WRITE_FLUSH(sc);
7729 delay(2);
7730 CSR_WRITE(sc, WMREG_EECD, reg);
7731 CSR_WRITE_FLUSH(sc);
7732 delay(2);
7733 }
7734 }
7735
7736 /*
7737 * wm_eeprom_recvbits:
7738 *
7739 * Receive a series of bits from the EEPROM.
7740 */
7741 static void
7742 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
7743 {
7744 uint32_t reg, val;
7745 int x;
7746
7747 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
7748
7749 val = 0;
7750 for (x = nbits; x > 0; x--) {
7751 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7752 CSR_WRITE_FLUSH(sc);
7753 delay(2);
7754 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
7755 val |= (1U << (x - 1));
7756 CSR_WRITE(sc, WMREG_EECD, reg);
7757 CSR_WRITE_FLUSH(sc);
7758 delay(2);
7759 }
7760 *valp = val;
7761 }
7762
7763 /* Microwire */
7764
7765 /*
7766 * wm_nvm_read_uwire:
7767 *
7768 * Read a word from the EEPROM using the MicroWire protocol.
7769 */
7770 static int
7771 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
7772 {
7773 uint32_t reg, val;
7774 int i;
7775
7776 for (i = 0; i < wordcnt; i++) {
7777 /* Clear SK and DI. */
7778 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
7779 CSR_WRITE(sc, WMREG_EECD, reg);
7780
7781 /*
7782 * XXX: workaround for a bug in qemu-0.12.x and prior
7783 * and Xen.
7784 *
7785 * We use this workaround only for 82540 because qemu's
7786 * e1000 act as 82540.
7787 */
7788 if (sc->sc_type == WM_T_82540) {
7789 reg |= EECD_SK;
7790 CSR_WRITE(sc, WMREG_EECD, reg);
7791 reg &= ~EECD_SK;
7792 CSR_WRITE(sc, WMREG_EECD, reg);
7793 CSR_WRITE_FLUSH(sc);
7794 delay(2);
7795 }
7796 /* XXX: end of workaround */
7797
7798 /* Set CHIP SELECT. */
7799 reg |= EECD_CS;
7800 CSR_WRITE(sc, WMREG_EECD, reg);
7801 CSR_WRITE_FLUSH(sc);
7802 delay(2);
7803
7804 /* Shift in the READ command. */
7805 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
7806
7807 /* Shift in address. */
7808 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
7809
7810 /* Shift out the data. */
7811 wm_eeprom_recvbits(sc, &val, 16);
7812 data[i] = val & 0xffff;
7813
7814 /* Clear CHIP SELECT. */
7815 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
7816 CSR_WRITE(sc, WMREG_EECD, reg);
7817 CSR_WRITE_FLUSH(sc);
7818 delay(2);
7819 }
7820
7821 return 0;
7822 }
7823
7824 /* SPI */
7825
7826 /*
7827 * Set SPI and FLASH related information from the EECD register.
7828 * For 82541 and 82547, the word size is taken from EEPROM.
7829 */
7830 static int
7831 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
7832 {
7833 int size;
7834 uint32_t reg;
7835 uint16_t data;
7836
7837 reg = CSR_READ(sc, WMREG_EECD);
7838 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
7839
7840 /* Read the size of NVM from EECD by default */
7841 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
7842 switch (sc->sc_type) {
7843 case WM_T_82541:
7844 case WM_T_82541_2:
7845 case WM_T_82547:
7846 case WM_T_82547_2:
7847 /* Set dummy value to access EEPROM */
7848 sc->sc_nvm_wordsize = 64;
7849 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
7850 reg = data;
7851 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
7852 if (size == 0)
7853 size = 6; /* 64 word size */
7854 else
7855 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
7856 break;
7857 case WM_T_80003:
7858 case WM_T_82571:
7859 case WM_T_82572:
7860 case WM_T_82573: /* SPI case */
7861 case WM_T_82574: /* SPI case */
7862 case WM_T_82583: /* SPI case */
7863 size += NVM_WORD_SIZE_BASE_SHIFT;
7864 if (size > 14)
7865 size = 14;
7866 break;
7867 case WM_T_82575:
7868 case WM_T_82576:
7869 case WM_T_82580:
7870 case WM_T_I350:
7871 case WM_T_I354:
7872 case WM_T_I210:
7873 case WM_T_I211:
7874 size += NVM_WORD_SIZE_BASE_SHIFT;
7875 if (size > 15)
7876 size = 15;
7877 break;
7878 default:
7879 aprint_error_dev(sc->sc_dev,
7880 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
7881 return -1;
7882 break;
7883 }
7884
7885 sc->sc_nvm_wordsize = 1 << size;
7886
7887 return 0;
7888 }
7889
7890 /*
7891 * wm_nvm_ready_spi:
7892 *
7893 * Wait for a SPI EEPROM to be ready for commands.
7894 */
7895 static int
7896 wm_nvm_ready_spi(struct wm_softc *sc)
7897 {
7898 uint32_t val;
7899 int usec;
7900
7901 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
7902 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
7903 wm_eeprom_recvbits(sc, &val, 8);
7904 if ((val & SPI_SR_RDY) == 0)
7905 break;
7906 }
7907 if (usec >= SPI_MAX_RETRIES) {
7908 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
7909 return 1;
7910 }
7911 return 0;
7912 }
7913
7914 /*
7915 * wm_nvm_read_spi:
7916 *
7917 * Read a work from the EEPROM using the SPI protocol.
7918 */
7919 static int
7920 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
7921 {
7922 uint32_t reg, val;
7923 int i;
7924 uint8_t opc;
7925
7926 /* Clear SK and CS. */
7927 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
7928 CSR_WRITE(sc, WMREG_EECD, reg);
7929 CSR_WRITE_FLUSH(sc);
7930 delay(2);
7931
7932 if (wm_nvm_ready_spi(sc))
7933 return 1;
7934
7935 /* Toggle CS to flush commands. */
7936 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
7937 CSR_WRITE_FLUSH(sc);
7938 delay(2);
7939 CSR_WRITE(sc, WMREG_EECD, reg);
7940 CSR_WRITE_FLUSH(sc);
7941 delay(2);
7942
7943 opc = SPI_OPC_READ;
7944 if (sc->sc_nvm_addrbits == 8 && word >= 128)
7945 opc |= SPI_OPC_A8;
7946
7947 wm_eeprom_sendbits(sc, opc, 8);
7948 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
7949
7950 for (i = 0; i < wordcnt; i++) {
7951 wm_eeprom_recvbits(sc, &val, 16);
7952 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
7953 }
7954
7955 /* Raise CS and clear SK. */
7956 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
7957 CSR_WRITE(sc, WMREG_EECD, reg);
7958 CSR_WRITE_FLUSH(sc);
7959 delay(2);
7960
7961 return 0;
7962 }
7963
7964 /* Using with EERD */
7965
7966 static int
7967 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
7968 {
7969 uint32_t attempts = 100000;
7970 uint32_t i, reg = 0;
7971 int32_t done = -1;
7972
7973 for (i = 0; i < attempts; i++) {
7974 reg = CSR_READ(sc, rw);
7975
7976 if (reg & EERD_DONE) {
7977 done = 0;
7978 break;
7979 }
7980 delay(5);
7981 }
7982
7983 return done;
7984 }
7985
7986 static int
7987 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
7988 uint16_t *data)
7989 {
7990 int i, eerd = 0;
7991 int error = 0;
7992
7993 for (i = 0; i < wordcnt; i++) {
7994 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
7995
7996 CSR_WRITE(sc, WMREG_EERD, eerd);
7997 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
7998 if (error != 0)
7999 break;
8000
8001 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
8002 }
8003
8004 return error;
8005 }
8006
8007 /* Flash */
8008
8009 static int
8010 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
8011 {
8012 uint32_t eecd;
8013 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
8014 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
8015 uint8_t sig_byte = 0;
8016
8017 switch (sc->sc_type) {
8018 case WM_T_ICH8:
8019 case WM_T_ICH9:
8020 eecd = CSR_READ(sc, WMREG_EECD);
8021 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
8022 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
8023 return 0;
8024 }
8025 /* FALLTHROUGH */
8026 default:
8027 /* Default to 0 */
8028 *bank = 0;
8029
8030 /* Check bank 0 */
8031 wm_read_ich8_byte(sc, act_offset, &sig_byte);
8032 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
8033 *bank = 0;
8034 return 0;
8035 }
8036
8037 /* Check bank 1 */
8038 wm_read_ich8_byte(sc, act_offset + bank1_offset,
8039 &sig_byte);
8040 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
8041 *bank = 1;
8042 return 0;
8043 }
8044 }
8045
8046 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
8047 device_xname(sc->sc_dev)));
8048 return -1;
8049 }
8050
8051 /******************************************************************************
8052 * This function does initial flash setup so that a new read/write/erase cycle
8053 * can be started.
8054 *
8055 * sc - The pointer to the hw structure
8056 ****************************************************************************/
8057 static int32_t
8058 wm_ich8_cycle_init(struct wm_softc *sc)
8059 {
8060 uint16_t hsfsts;
8061 int32_t error = 1;
8062 int32_t i = 0;
8063
8064 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8065
8066 /* May be check the Flash Des Valid bit in Hw status */
8067 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
8068 return error;
8069 }
8070
8071 /* Clear FCERR in Hw status by writing 1 */
8072 /* Clear DAEL in Hw status by writing a 1 */
8073 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
8074
8075 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8076
8077 /*
8078 * Either we should have a hardware SPI cycle in progress bit to check
8079 * against, in order to start a new cycle or FDONE bit should be
8080 * changed in the hardware so that it is 1 after harware reset, which
8081 * can then be used as an indication whether a cycle is in progress or
8082 * has been completed .. we should also have some software semaphore
8083 * mechanism to guard FDONE or the cycle in progress bit so that two
8084 * threads access to those bits can be sequentiallized or a way so that
8085 * 2 threads dont start the cycle at the same time
8086 */
8087
8088 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8089 /*
8090 * There is no cycle running at present, so we can start a
8091 * cycle
8092 */
8093
8094 /* Begin by setting Flash Cycle Done. */
8095 hsfsts |= HSFSTS_DONE;
8096 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8097 error = 0;
8098 } else {
8099 /*
8100 * otherwise poll for sometime so the current cycle has a
8101 * chance to end before giving up.
8102 */
8103 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
8104 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8105 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8106 error = 0;
8107 break;
8108 }
8109 delay(1);
8110 }
8111 if (error == 0) {
8112 /*
8113 * Successful in waiting for previous cycle to timeout,
8114 * now set the Flash Cycle Done.
8115 */
8116 hsfsts |= HSFSTS_DONE;
8117 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8118 }
8119 }
8120 return error;
8121 }
8122
8123 /******************************************************************************
8124 * This function starts a flash cycle and waits for its completion
8125 *
8126 * sc - The pointer to the hw structure
8127 ****************************************************************************/
8128 static int32_t
8129 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
8130 {
8131 uint16_t hsflctl;
8132 uint16_t hsfsts;
8133 int32_t error = 1;
8134 uint32_t i = 0;
8135
8136 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
8137 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8138 hsflctl |= HSFCTL_GO;
8139 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8140
8141 /* Wait till FDONE bit is set to 1 */
8142 do {
8143 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8144 if (hsfsts & HSFSTS_DONE)
8145 break;
8146 delay(1);
8147 i++;
8148 } while (i < timeout);
8149 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
8150 error = 0;
8151
8152 return error;
8153 }
8154
8155 /******************************************************************************
8156 * Reads a byte or word from the NVM using the ICH8 flash access registers.
8157 *
8158 * sc - The pointer to the hw structure
8159 * index - The index of the byte or word to read.
8160 * size - Size of data to read, 1=byte 2=word
8161 * data - Pointer to the word to store the value read.
8162 *****************************************************************************/
8163 static int32_t
8164 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
8165 uint32_t size, uint16_t *data)
8166 {
8167 uint16_t hsfsts;
8168 uint16_t hsflctl;
8169 uint32_t flash_linear_address;
8170 uint32_t flash_data = 0;
8171 int32_t error = 1;
8172 int32_t count = 0;
8173
8174 if (size < 1 || size > 2 || data == 0x0 ||
8175 index > ICH_FLASH_LINEAR_ADDR_MASK)
8176 return error;
8177
8178 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
8179 sc->sc_ich8_flash_base;
8180
8181 do {
8182 delay(1);
8183 /* Steps */
8184 error = wm_ich8_cycle_init(sc);
8185 if (error)
8186 break;
8187
8188 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8189 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
8190 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
8191 & HSFCTL_BCOUNT_MASK;
8192 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
8193 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8194
8195 /*
8196 * Write the last 24 bits of index into Flash Linear address
8197 * field in Flash Address
8198 */
8199 /* TODO: TBD maybe check the index against the size of flash */
8200
8201 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
8202
8203 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
8204
8205 /*
8206 * Check if FCERR is set to 1, if set to 1, clear it and try
8207 * the whole sequence a few more times, else read in (shift in)
8208 * the Flash Data0, the order is least significant byte first
8209 * msb to lsb
8210 */
8211 if (error == 0) {
8212 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
8213 if (size == 1)
8214 *data = (uint8_t)(flash_data & 0x000000FF);
8215 else if (size == 2)
8216 *data = (uint16_t)(flash_data & 0x0000FFFF);
8217 break;
8218 } else {
8219 /*
8220 * If we've gotten here, then things are probably
8221 * completely hosed, but if the error condition is
8222 * detected, it won't hurt to give it another try...
8223 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
8224 */
8225 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8226 if (hsfsts & HSFSTS_ERR) {
8227 /* Repeat for some time before giving up. */
8228 continue;
8229 } else if ((hsfsts & HSFSTS_DONE) == 0)
8230 break;
8231 }
8232 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
8233
8234 return error;
8235 }
8236
8237 /******************************************************************************
8238 * Reads a single byte from the NVM using the ICH8 flash access registers.
8239 *
8240 * sc - pointer to wm_hw structure
8241 * index - The index of the byte to read.
8242 * data - Pointer to a byte to store the value read.
8243 *****************************************************************************/
8244 static int32_t
8245 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
8246 {
8247 int32_t status;
8248 uint16_t word = 0;
8249
8250 status = wm_read_ich8_data(sc, index, 1, &word);
8251 if (status == 0)
8252 *data = (uint8_t)word;
8253 else
8254 *data = 0;
8255
8256 return status;
8257 }
8258
8259 /******************************************************************************
8260 * Reads a word from the NVM using the ICH8 flash access registers.
8261 *
8262 * sc - pointer to wm_hw structure
8263 * index - The starting byte index of the word to read.
8264 * data - Pointer to a word to store the value read.
8265 *****************************************************************************/
8266 static int32_t
8267 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
8268 {
8269 int32_t status;
8270
8271 status = wm_read_ich8_data(sc, index, 2, data);
8272 return status;
8273 }
8274
8275 /******************************************************************************
8276 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
8277 * register.
8278 *
8279 * sc - Struct containing variables accessed by shared code
8280 * offset - offset of word in the EEPROM to read
8281 * data - word read from the EEPROM
8282 * words - number of words to read
8283 *****************************************************************************/
8284 static int
8285 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
8286 {
8287 int32_t error = 0;
8288 uint32_t flash_bank = 0;
8289 uint32_t act_offset = 0;
8290 uint32_t bank_offset = 0;
8291 uint16_t word = 0;
8292 uint16_t i = 0;
8293
8294 /*
8295 * We need to know which is the valid flash bank. In the event
8296 * that we didn't allocate eeprom_shadow_ram, we may not be
8297 * managing flash_bank. So it cannot be trusted and needs
8298 * to be updated with each read.
8299 */
8300 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
8301 if (error) {
8302 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
8303 device_xname(sc->sc_dev)));
8304 flash_bank = 0;
8305 }
8306
8307 /*
8308 * Adjust offset appropriately if we're on bank 1 - adjust for word
8309 * size
8310 */
8311 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
8312
8313 error = wm_get_swfwhw_semaphore(sc);
8314 if (error) {
8315 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8316 __func__);
8317 return error;
8318 }
8319
8320 for (i = 0; i < words; i++) {
8321 /* The NVM part needs a byte offset, hence * 2 */
8322 act_offset = bank_offset + ((offset + i) * 2);
8323 error = wm_read_ich8_word(sc, act_offset, &word);
8324 if (error) {
8325 aprint_error_dev(sc->sc_dev,
8326 "%s: failed to read NVM\n", __func__);
8327 break;
8328 }
8329 data[i] = word;
8330 }
8331
8332 wm_put_swfwhw_semaphore(sc);
8333 return error;
8334 }
8335
8336 /* Lock, detecting NVM type, validate checksum and read */
8337
8338 /*
8339 * wm_nvm_acquire:
8340 *
8341 * Perform the EEPROM handshake required on some chips.
8342 */
8343 static int
8344 wm_nvm_acquire(struct wm_softc *sc)
8345 {
8346 uint32_t reg;
8347 int x;
8348 int ret = 0;
8349
8350 /* always success */
8351 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8352 return 0;
8353
8354 if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8355 ret = wm_get_swfwhw_semaphore(sc);
8356 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
8357 /* This will also do wm_get_swsm_semaphore() if needed */
8358 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
8359 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
8360 ret = wm_get_swsm_semaphore(sc);
8361 }
8362
8363 if (ret) {
8364 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8365 __func__);
8366 return 1;
8367 }
8368
8369 if (sc->sc_flags & WM_F_LOCK_EECD) {
8370 reg = CSR_READ(sc, WMREG_EECD);
8371
8372 /* Request EEPROM access. */
8373 reg |= EECD_EE_REQ;
8374 CSR_WRITE(sc, WMREG_EECD, reg);
8375
8376 /* ..and wait for it to be granted. */
8377 for (x = 0; x < 1000; x++) {
8378 reg = CSR_READ(sc, WMREG_EECD);
8379 if (reg & EECD_EE_GNT)
8380 break;
8381 delay(5);
8382 }
8383 if ((reg & EECD_EE_GNT) == 0) {
8384 aprint_error_dev(sc->sc_dev,
8385 "could not acquire EEPROM GNT\n");
8386 reg &= ~EECD_EE_REQ;
8387 CSR_WRITE(sc, WMREG_EECD, reg);
8388 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8389 wm_put_swfwhw_semaphore(sc);
8390 if (sc->sc_flags & WM_F_LOCK_SWFW)
8391 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8392 else if (sc->sc_flags & WM_F_LOCK_SWSM)
8393 wm_put_swsm_semaphore(sc);
8394 return 1;
8395 }
8396 }
8397
8398 return 0;
8399 }
8400
8401 /*
8402 * wm_nvm_release:
8403 *
8404 * Release the EEPROM mutex.
8405 */
8406 static void
8407 wm_nvm_release(struct wm_softc *sc)
8408 {
8409 uint32_t reg;
8410
8411 /* always success */
8412 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8413 return;
8414
8415 if (sc->sc_flags & WM_F_LOCK_EECD) {
8416 reg = CSR_READ(sc, WMREG_EECD);
8417 reg &= ~EECD_EE_REQ;
8418 CSR_WRITE(sc, WMREG_EECD, reg);
8419 }
8420
8421 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8422 wm_put_swfwhw_semaphore(sc);
8423 if (sc->sc_flags & WM_F_LOCK_SWFW)
8424 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8425 else if (sc->sc_flags & WM_F_LOCK_SWSM)
8426 wm_put_swsm_semaphore(sc);
8427 }
8428
8429 static int
8430 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
8431 {
8432 uint32_t eecd = 0;
8433
8434 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
8435 || sc->sc_type == WM_T_82583) {
8436 eecd = CSR_READ(sc, WMREG_EECD);
8437
8438 /* Isolate bits 15 & 16 */
8439 eecd = ((eecd >> 15) & 0x03);
8440
8441 /* If both bits are set, device is Flash type */
8442 if (eecd == 0x03)
8443 return 0;
8444 }
8445 return 1;
8446 }
8447
8448 /*
8449 * wm_nvm_validate_checksum
8450 *
8451 * The checksum is defined as the sum of the first 64 (16 bit) words.
8452 */
8453 static int
8454 wm_nvm_validate_checksum(struct wm_softc *sc)
8455 {
8456 uint16_t checksum;
8457 uint16_t eeprom_data;
8458 #ifdef WM_DEBUG
8459 uint16_t csum_wordaddr, valid_checksum;
8460 #endif
8461 int i;
8462
8463 checksum = 0;
8464
8465 /* Don't check for I211 */
8466 if (sc->sc_type == WM_T_I211)
8467 return 0;
8468
8469 #ifdef WM_DEBUG
8470 if (sc->sc_type == WM_T_PCH_LPT) {
8471 csum_wordaddr = NVM_OFF_COMPAT;
8472 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
8473 } else {
8474 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
8475 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
8476 }
8477
8478 /* Dump EEPROM image for debug */
8479 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8480 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8481 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
8482 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
8483 if ((eeprom_data & valid_checksum) == 0) {
8484 DPRINTF(WM_DEBUG_NVM,
8485 ("%s: NVM need to be updated (%04x != %04x)\n",
8486 device_xname(sc->sc_dev), eeprom_data,
8487 valid_checksum));
8488 }
8489 }
8490
8491 if ((wm_debug & WM_DEBUG_NVM) != 0) {
8492 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
8493 for (i = 0; i < NVM_SIZE; i++) {
8494 if (wm_nvm_read(sc, i, 1, &eeprom_data))
8495 printf("XX ");
8496 else
8497 printf("%04x ", eeprom_data);
8498 if (i % 8 == 7)
8499 printf("\n");
8500 }
8501 }
8502
8503 #endif /* WM_DEBUG */
8504
8505 for (i = 0; i < NVM_SIZE; i++) {
8506 if (wm_nvm_read(sc, i, 1, &eeprom_data))
8507 return 1;
8508 checksum += eeprom_data;
8509 }
8510
8511 if (checksum != (uint16_t) NVM_CHECKSUM) {
8512 #ifdef WM_DEBUG
8513 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
8514 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
8515 #endif
8516 }
8517
8518 return 0;
8519 }
8520
8521 /*
8522 * wm_nvm_read:
8523 *
8524 * Read data from the serial EEPROM.
8525 */
8526 static int
8527 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8528 {
8529 int rv;
8530
8531 if (sc->sc_flags & WM_F_EEPROM_INVALID)
8532 return 1;
8533
8534 if (wm_nvm_acquire(sc))
8535 return 1;
8536
8537 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8538 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8539 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
8540 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
8541 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
8542 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
8543 else if (sc->sc_flags & WM_F_EEPROM_SPI)
8544 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
8545 else
8546 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
8547
8548 wm_nvm_release(sc);
8549 return rv;
8550 }
8551
8552 /*
8553 * Hardware semaphores.
8554 * Very complexed...
8555 */
8556
8557 static int
8558 wm_get_swsm_semaphore(struct wm_softc *sc)
8559 {
8560 int32_t timeout;
8561 uint32_t swsm;
8562
8563 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8564 /* Get the SW semaphore. */
8565 timeout = sc->sc_nvm_wordsize + 1;
8566 while (timeout) {
8567 swsm = CSR_READ(sc, WMREG_SWSM);
8568
8569 if ((swsm & SWSM_SMBI) == 0)
8570 break;
8571
8572 delay(50);
8573 timeout--;
8574 }
8575
8576 if (timeout == 0) {
8577 aprint_error_dev(sc->sc_dev,
8578 "could not acquire SWSM SMBI\n");
8579 return 1;
8580 }
8581 }
8582
8583 /* Get the FW semaphore. */
8584 timeout = sc->sc_nvm_wordsize + 1;
8585 while (timeout) {
8586 swsm = CSR_READ(sc, WMREG_SWSM);
8587 swsm |= SWSM_SWESMBI;
8588 CSR_WRITE(sc, WMREG_SWSM, swsm);
8589 /* If we managed to set the bit we got the semaphore. */
8590 swsm = CSR_READ(sc, WMREG_SWSM);
8591 if (swsm & SWSM_SWESMBI)
8592 break;
8593
8594 delay(50);
8595 timeout--;
8596 }
8597
8598 if (timeout == 0) {
8599 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
8600 /* Release semaphores */
8601 wm_put_swsm_semaphore(sc);
8602 return 1;
8603 }
8604 return 0;
8605 }
8606
8607 static void
8608 wm_put_swsm_semaphore(struct wm_softc *sc)
8609 {
8610 uint32_t swsm;
8611
8612 swsm = CSR_READ(sc, WMREG_SWSM);
8613 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
8614 CSR_WRITE(sc, WMREG_SWSM, swsm);
8615 }
8616
8617 static int
8618 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8619 {
8620 uint32_t swfw_sync;
8621 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
8622 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
8623 int timeout = 200;
8624
8625 for (timeout = 0; timeout < 200; timeout++) {
8626 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8627 if (wm_get_swsm_semaphore(sc)) {
8628 aprint_error_dev(sc->sc_dev,
8629 "%s: failed to get semaphore\n",
8630 __func__);
8631 return 1;
8632 }
8633 }
8634 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8635 if ((swfw_sync & (swmask | fwmask)) == 0) {
8636 swfw_sync |= swmask;
8637 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8638 if (sc->sc_flags & WM_F_LOCK_SWSM)
8639 wm_put_swsm_semaphore(sc);
8640 return 0;
8641 }
8642 if (sc->sc_flags & WM_F_LOCK_SWSM)
8643 wm_put_swsm_semaphore(sc);
8644 delay(5000);
8645 }
8646 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
8647 device_xname(sc->sc_dev), mask, swfw_sync);
8648 return 1;
8649 }
8650
8651 static void
8652 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8653 {
8654 uint32_t swfw_sync;
8655
8656 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8657 while (wm_get_swsm_semaphore(sc) != 0)
8658 continue;
8659 }
8660 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8661 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
8662 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8663 if (sc->sc_flags & WM_F_LOCK_SWSM)
8664 wm_put_swsm_semaphore(sc);
8665 }
8666
8667 static int
8668 wm_get_swfwhw_semaphore(struct wm_softc *sc)
8669 {
8670 uint32_t ext_ctrl;
8671 int timeout = 200;
8672
8673 for (timeout = 0; timeout < 200; timeout++) {
8674 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8675 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
8676 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8677
8678 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8679 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
8680 return 0;
8681 delay(5000);
8682 }
8683 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
8684 device_xname(sc->sc_dev), ext_ctrl);
8685 return 1;
8686 }
8687
8688 static void
8689 wm_put_swfwhw_semaphore(struct wm_softc *sc)
8690 {
8691 uint32_t ext_ctrl;
8692 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8693 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
8694 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8695 }
8696
8697 static int
8698 wm_get_hw_semaphore_82573(struct wm_softc *sc)
8699 {
8700 int i = 0;
8701 uint32_t reg;
8702
8703 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8704 do {
8705 CSR_WRITE(sc, WMREG_EXTCNFCTR,
8706 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
8707 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8708 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
8709 break;
8710 delay(2*1000);
8711 i++;
8712 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
8713
8714 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
8715 wm_put_hw_semaphore_82573(sc);
8716 log(LOG_ERR, "%s: Driver can't access the PHY\n",
8717 device_xname(sc->sc_dev));
8718 return -1;
8719 }
8720
8721 return 0;
8722 }
8723
8724 static void
8725 wm_put_hw_semaphore_82573(struct wm_softc *sc)
8726 {
8727 uint32_t reg;
8728
8729 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8730 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
8731 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
8732 }
8733
8734 /*
8735 * Management mode and power management related subroutines.
8736 * BMC, AMT, suspend/resume and EEE.
8737 */
8738
8739 static int
8740 wm_check_mng_mode(struct wm_softc *sc)
8741 {
8742 int rv;
8743
8744 switch (sc->sc_type) {
8745 case WM_T_ICH8:
8746 case WM_T_ICH9:
8747 case WM_T_ICH10:
8748 case WM_T_PCH:
8749 case WM_T_PCH2:
8750 case WM_T_PCH_LPT:
8751 rv = wm_check_mng_mode_ich8lan(sc);
8752 break;
8753 case WM_T_82574:
8754 case WM_T_82583:
8755 rv = wm_check_mng_mode_82574(sc);
8756 break;
8757 case WM_T_82571:
8758 case WM_T_82572:
8759 case WM_T_82573:
8760 case WM_T_80003:
8761 rv = wm_check_mng_mode_generic(sc);
8762 break;
8763 default:
8764 /* noting to do */
8765 rv = 0;
8766 break;
8767 }
8768
8769 return rv;
8770 }
8771
8772 static int
8773 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
8774 {
8775 uint32_t fwsm;
8776
8777 fwsm = CSR_READ(sc, WMREG_FWSM);
8778
8779 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
8780 return 1;
8781
8782 return 0;
8783 }
8784
8785 static int
8786 wm_check_mng_mode_82574(struct wm_softc *sc)
8787 {
8788 uint16_t data;
8789
8790 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
8791
8792 if ((data & NVM_CFG2_MNGM_MASK) != 0)
8793 return 1;
8794
8795 return 0;
8796 }
8797
8798 static int
8799 wm_check_mng_mode_generic(struct wm_softc *sc)
8800 {
8801 uint32_t fwsm;
8802
8803 fwsm = CSR_READ(sc, WMREG_FWSM);
8804
8805 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
8806 return 1;
8807
8808 return 0;
8809 }
8810
8811 static int
8812 wm_enable_mng_pass_thru(struct wm_softc *sc)
8813 {
8814 uint32_t manc, fwsm, factps;
8815
8816 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
8817 return 0;
8818
8819 manc = CSR_READ(sc, WMREG_MANC);
8820
8821 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
8822 device_xname(sc->sc_dev), manc));
8823 if ((manc & MANC_RECV_TCO_EN) == 0)
8824 return 0;
8825
8826 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
8827 fwsm = CSR_READ(sc, WMREG_FWSM);
8828 factps = CSR_READ(sc, WMREG_FACTPS);
8829 if (((factps & FACTPS_MNGCG) == 0)
8830 && ((fwsm & FWSM_MODE_MASK)
8831 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
8832 return 1;
8833 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
8834 uint16_t data;
8835
8836 factps = CSR_READ(sc, WMREG_FACTPS);
8837 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
8838 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
8839 device_xname(sc->sc_dev), factps, data));
8840 if (((factps & FACTPS_MNGCG) == 0)
8841 && ((data & NVM_CFG2_MNGM_MASK)
8842 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
8843 return 1;
8844 } else if (((manc & MANC_SMBUS_EN) != 0)
8845 && ((manc & MANC_ASF_EN) == 0))
8846 return 1;
8847
8848 return 0;
8849 }
8850
8851 static int
8852 wm_check_reset_block(struct wm_softc *sc)
8853 {
8854 uint32_t reg;
8855
8856 switch (sc->sc_type) {
8857 case WM_T_ICH8:
8858 case WM_T_ICH9:
8859 case WM_T_ICH10:
8860 case WM_T_PCH:
8861 case WM_T_PCH2:
8862 case WM_T_PCH_LPT:
8863 reg = CSR_READ(sc, WMREG_FWSM);
8864 if ((reg & FWSM_RSPCIPHY) != 0)
8865 return 0;
8866 else
8867 return -1;
8868 break;
8869 case WM_T_82571:
8870 case WM_T_82572:
8871 case WM_T_82573:
8872 case WM_T_82574:
8873 case WM_T_82583:
8874 case WM_T_80003:
8875 reg = CSR_READ(sc, WMREG_MANC);
8876 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
8877 return -1;
8878 else
8879 return 0;
8880 break;
8881 default:
8882 /* no problem */
8883 break;
8884 }
8885
8886 return 0;
8887 }
8888
8889 static void
8890 wm_get_hw_control(struct wm_softc *sc)
8891 {
8892 uint32_t reg;
8893
8894 switch (sc->sc_type) {
8895 case WM_T_82573:
8896 reg = CSR_READ(sc, WMREG_SWSM);
8897 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
8898 break;
8899 case WM_T_82571:
8900 case WM_T_82572:
8901 case WM_T_82574:
8902 case WM_T_82583:
8903 case WM_T_80003:
8904 case WM_T_ICH8:
8905 case WM_T_ICH9:
8906 case WM_T_ICH10:
8907 case WM_T_PCH:
8908 case WM_T_PCH2:
8909 case WM_T_PCH_LPT:
8910 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8911 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
8912 break;
8913 default:
8914 break;
8915 }
8916 }
8917
8918 static void
8919 wm_release_hw_control(struct wm_softc *sc)
8920 {
8921 uint32_t reg;
8922
8923 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
8924 return;
8925
8926 if (sc->sc_type == WM_T_82573) {
8927 reg = CSR_READ(sc, WMREG_SWSM);
8928 reg &= ~SWSM_DRV_LOAD;
8929 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
8930 } else {
8931 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8932 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
8933 }
8934 }
8935
8936 static void
8937 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
8938 {
8939 uint32_t reg;
8940
8941 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8942
8943 if (on != 0)
8944 reg |= EXTCNFCTR_GATE_PHY_CFG;
8945 else
8946 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
8947
8948 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
8949 }
8950
8951 static void
8952 wm_smbustopci(struct wm_softc *sc)
8953 {
8954 uint32_t fwsm;
8955
8956 fwsm = CSR_READ(sc, WMREG_FWSM);
8957 if (((fwsm & FWSM_FW_VALID) == 0)
8958 && ((wm_check_reset_block(sc) == 0))) {
8959 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
8960 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
8961 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8962 CSR_WRITE_FLUSH(sc);
8963 delay(10);
8964 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
8965 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8966 CSR_WRITE_FLUSH(sc);
8967 delay(50*1000);
8968
8969 /*
8970 * Gate automatic PHY configuration by hardware on non-managed
8971 * 82579
8972 */
8973 if (sc->sc_type == WM_T_PCH2)
8974 wm_gate_hw_phy_config_ich8lan(sc, 1);
8975 }
8976 }
8977
8978 static void
8979 wm_init_manageability(struct wm_softc *sc)
8980 {
8981
8982 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8983 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
8984 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8985
8986 /* Disable hardware interception of ARP */
8987 manc &= ~MANC_ARP_EN;
8988
8989 /* Enable receiving management packets to the host */
8990 if (sc->sc_type >= WM_T_82571) {
8991 manc |= MANC_EN_MNG2HOST;
8992 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
8993 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
8994
8995 }
8996
8997 CSR_WRITE(sc, WMREG_MANC, manc);
8998 }
8999 }
9000
9001 static void
9002 wm_release_manageability(struct wm_softc *sc)
9003 {
9004
9005 if (sc->sc_flags & WM_F_HAS_MANAGE) {
9006 uint32_t manc = CSR_READ(sc, WMREG_MANC);
9007
9008 manc |= MANC_ARP_EN;
9009 if (sc->sc_type >= WM_T_82571)
9010 manc &= ~MANC_EN_MNG2HOST;
9011
9012 CSR_WRITE(sc, WMREG_MANC, manc);
9013 }
9014 }
9015
9016 static void
9017 wm_get_wakeup(struct wm_softc *sc)
9018 {
9019
9020 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
9021 switch (sc->sc_type) {
9022 case WM_T_82573:
9023 case WM_T_82583:
9024 sc->sc_flags |= WM_F_HAS_AMT;
9025 /* FALLTHROUGH */
9026 case WM_T_80003:
9027 case WM_T_82541:
9028 case WM_T_82547:
9029 case WM_T_82571:
9030 case WM_T_82572:
9031 case WM_T_82574:
9032 case WM_T_82575:
9033 case WM_T_82576:
9034 case WM_T_82580:
9035 case WM_T_I350:
9036 case WM_T_I354:
9037 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
9038 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
9039 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
9040 break;
9041 case WM_T_ICH8:
9042 case WM_T_ICH9:
9043 case WM_T_ICH10:
9044 case WM_T_PCH:
9045 case WM_T_PCH2:
9046 case WM_T_PCH_LPT:
9047 sc->sc_flags |= WM_F_HAS_AMT;
9048 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
9049 break;
9050 default:
9051 break;
9052 }
9053
9054 /* 1: HAS_MANAGE */
9055 if (wm_enable_mng_pass_thru(sc) != 0)
9056 sc->sc_flags |= WM_F_HAS_MANAGE;
9057
9058 #ifdef WM_DEBUG
9059 printf("\n");
9060 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
9061 printf("HAS_AMT,");
9062 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
9063 printf("ARC_SUBSYS_VALID,");
9064 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
9065 printf("ASF_FIRMWARE_PRES,");
9066 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
9067 printf("HAS_MANAGE,");
9068 printf("\n");
9069 #endif
9070 /*
9071 * Note that the WOL flags is set after the resetting of the eeprom
9072 * stuff
9073 */
9074 }
9075
9076 #ifdef WM_WOL
9077 /* WOL in the newer chipset interfaces (pchlan) */
9078 static void
9079 wm_enable_phy_wakeup(struct wm_softc *sc)
9080 {
9081 #if 0
9082 uint16_t preg;
9083
9084 /* Copy MAC RARs to PHY RARs */
9085
9086 /* Copy MAC MTA to PHY MTA */
9087
9088 /* Configure PHY Rx Control register */
9089
9090 /* Enable PHY wakeup in MAC register */
9091
9092 /* Configure and enable PHY wakeup in PHY registers */
9093
9094 /* Activate PHY wakeup */
9095
9096 /* XXX */
9097 #endif
9098 }
9099
9100 /* Power down workaround on D3 */
9101 static void
9102 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
9103 {
9104 uint32_t reg;
9105 int i;
9106
9107 for (i = 0; i < 2; i++) {
9108 /* Disable link */
9109 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9110 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9111 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9112
9113 /*
9114 * Call gig speed drop workaround on Gig disable before
9115 * accessing any PHY registers
9116 */
9117 if (sc->sc_type == WM_T_ICH8)
9118 wm_gig_downshift_workaround_ich8lan(sc);
9119
9120 /* Write VR power-down enable */
9121 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
9122 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
9123 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
9124 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
9125
9126 /* Read it back and test */
9127 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
9128 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
9129 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
9130 break;
9131
9132 /* Issue PHY reset and repeat at most one more time */
9133 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
9134 }
9135 }
9136
9137 static void
9138 wm_enable_wakeup(struct wm_softc *sc)
9139 {
9140 uint32_t reg, pmreg;
9141 pcireg_t pmode;
9142
9143 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
9144 &pmreg, NULL) == 0)
9145 return;
9146
9147 /* Advertise the wakeup capability */
9148 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
9149 | CTRL_SWDPIN(3));
9150 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
9151
9152 /* ICH workaround */
9153 switch (sc->sc_type) {
9154 case WM_T_ICH8:
9155 case WM_T_ICH9:
9156 case WM_T_ICH10:
9157 case WM_T_PCH:
9158 case WM_T_PCH2:
9159 case WM_T_PCH_LPT:
9160 /* Disable gig during WOL */
9161 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9162 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
9163 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9164 if (sc->sc_type == WM_T_PCH)
9165 wm_gmii_reset(sc);
9166
9167 /* Power down workaround */
9168 if (sc->sc_phytype == WMPHY_82577) {
9169 struct mii_softc *child;
9170
9171 /* Assume that the PHY is copper */
9172 child = LIST_FIRST(&sc->sc_mii.mii_phys);
9173 if (child->mii_mpd_rev <= 2)
9174 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
9175 (768 << 5) | 25, 0x0444); /* magic num */
9176 }
9177 break;
9178 default:
9179 break;
9180 }
9181
9182 /* Keep the laser running on fiber adapters */
9183 if (((sc->sc_mediatype & WMP_F_FIBER) != 0)
9184 || (sc->sc_mediatype & WMP_F_SERDES) != 0) {
9185 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9186 reg |= CTRL_EXT_SWDPIN(3);
9187 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9188 }
9189
9190 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
9191 #if 0 /* for the multicast packet */
9192 reg |= WUFC_MC;
9193 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
9194 #endif
9195
9196 if (sc->sc_type == WM_T_PCH) {
9197 wm_enable_phy_wakeup(sc);
9198 } else {
9199 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
9200 CSR_WRITE(sc, WMREG_WUFC, reg);
9201 }
9202
9203 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9204 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9205 || (sc->sc_type == WM_T_PCH2))
9206 && (sc->sc_phytype == WMPHY_IGP_3))
9207 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
9208
9209 /* Request PME */
9210 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
9211 #if 0
9212 /* Disable WOL */
9213 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
9214 #else
9215 /* For WOL */
9216 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
9217 #endif
9218 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
9219 }
9220 #endif /* WM_WOL */
9221
9222 /* EEE */
9223
9224 static void
9225 wm_set_eee_i350(struct wm_softc *sc)
9226 {
9227 uint32_t ipcnfg, eeer;
9228
9229 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
9230 eeer = CSR_READ(sc, WMREG_EEER);
9231
9232 if ((sc->sc_flags & WM_F_EEE) != 0) {
9233 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9234 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
9235 | EEER_LPI_FC);
9236 } else {
9237 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9238 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
9239 | EEER_LPI_FC);
9240 }
9241
9242 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
9243 CSR_WRITE(sc, WMREG_EEER, eeer);
9244 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
9245 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
9246 }
9247
9248 /*
9249 * Workarounds (mainly PHY related).
9250 * Basically, PHY's workarounds are in the PHY drivers.
9251 */
9252
9253 /* Work-around for 82566 Kumeran PCS lock loss */
9254 static void
9255 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
9256 {
9257 int miistatus, active, i;
9258 int reg;
9259
9260 miistatus = sc->sc_mii.mii_media_status;
9261
9262 /* If the link is not up, do nothing */
9263 if ((miistatus & IFM_ACTIVE) != 0)
9264 return;
9265
9266 active = sc->sc_mii.mii_media_active;
9267
9268 /* Nothing to do if the link is other than 1Gbps */
9269 if (IFM_SUBTYPE(active) != IFM_1000_T)
9270 return;
9271
9272 for (i = 0; i < 10; i++) {
9273 /* read twice */
9274 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9275 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9276 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
9277 goto out; /* GOOD! */
9278
9279 /* Reset the PHY */
9280 wm_gmii_reset(sc);
9281 delay(5*1000);
9282 }
9283
9284 /* Disable GigE link negotiation */
9285 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9286 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9287 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9288
9289 /*
9290 * Call gig speed drop workaround on Gig disable before accessing
9291 * any PHY registers.
9292 */
9293 wm_gig_downshift_workaround_ich8lan(sc);
9294
9295 out:
9296 return;
9297 }
9298
9299 /* WOL from S5 stops working */
9300 static void
9301 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
9302 {
9303 uint16_t kmrn_reg;
9304
9305 /* Only for igp3 */
9306 if (sc->sc_phytype == WMPHY_IGP_3) {
9307 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
9308 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
9309 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9310 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
9311 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9312 }
9313 }
9314
9315 /*
9316 * Workaround for pch's PHYs
9317 * XXX should be moved to new PHY driver?
9318 */
9319 static void
9320 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
9321 {
9322 if (sc->sc_phytype == WMPHY_82577)
9323 wm_set_mdio_slow_mode_hv(sc);
9324
9325 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
9326
9327 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
9328
9329 /* 82578 */
9330 if (sc->sc_phytype == WMPHY_82578) {
9331 /* PCH rev. < 3 */
9332 if (sc->sc_rev < 3) {
9333 /* XXX 6 bit shift? Why? Is it page2? */
9334 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
9335 0x66c0);
9336 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
9337 0xffff);
9338 }
9339
9340 /* XXX phy rev. < 2 */
9341 }
9342
9343 /* Select page 0 */
9344
9345 /* XXX acquire semaphore */
9346 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
9347 /* XXX release semaphore */
9348
9349 /*
9350 * Configure the K1 Si workaround during phy reset assuming there is
9351 * link so that it disables K1 if link is in 1Gbps.
9352 */
9353 wm_k1_gig_workaround_hv(sc, 1);
9354 }
9355
9356 static void
9357 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
9358 {
9359
9360 wm_set_mdio_slow_mode_hv(sc);
9361 }
9362
9363 static void
9364 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
9365 {
9366 int k1_enable = sc->sc_nvm_k1_enabled;
9367
9368 /* XXX acquire semaphore */
9369
9370 if (link) {
9371 k1_enable = 0;
9372
9373 /* Link stall fix for link up */
9374 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
9375 } else {
9376 /* Link stall fix for link down */
9377 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
9378 }
9379
9380 wm_configure_k1_ich8lan(sc, k1_enable);
9381
9382 /* XXX release semaphore */
9383 }
9384
9385 static void
9386 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
9387 {
9388 uint32_t reg;
9389
9390 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
9391 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
9392 reg | HV_KMRN_MDIO_SLOW);
9393 }
9394
9395 static void
9396 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
9397 {
9398 uint32_t ctrl, ctrl_ext, tmp;
9399 uint16_t kmrn_reg;
9400
9401 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
9402
9403 if (k1_enable)
9404 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
9405 else
9406 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
9407
9408 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
9409
9410 delay(20);
9411
9412 ctrl = CSR_READ(sc, WMREG_CTRL);
9413 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9414
9415 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
9416 tmp |= CTRL_FRCSPD;
9417
9418 CSR_WRITE(sc, WMREG_CTRL, tmp);
9419 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
9420 CSR_WRITE_FLUSH(sc);
9421 delay(20);
9422
9423 CSR_WRITE(sc, WMREG_CTRL, ctrl);
9424 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9425 CSR_WRITE_FLUSH(sc);
9426 delay(20);
9427 }
9428
9429 /* special case - for 82575 - need to do manual init ... */
9430 static void
9431 wm_reset_init_script_82575(struct wm_softc *sc)
9432 {
9433 /*
9434 * remark: this is untested code - we have no board without EEPROM
9435 * same setup as mentioned int the freeBSD driver for the i82575
9436 */
9437
9438 /* SerDes configuration via SERDESCTRL */
9439 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
9440 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
9441 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
9442 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
9443
9444 /* CCM configuration via CCMCTL register */
9445 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
9446 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
9447
9448 /* PCIe lanes configuration */
9449 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
9450 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
9451 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
9452 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
9453
9454 /* PCIe PLL Configuration */
9455 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
9456 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
9457 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
9458 }
9459