if_wm.c revision 1.303 1 /* $NetBSD: if_wm.c,v 1.303 2014/10/07 08:45:02 ozaki-r Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - EEE (Energy Efficiency Ethernet)
77 * - MSI/MSI-X
78 * - Virtual Function
79 * - Set LED correctly (based on contents in EEPROM)
80 * - Rework how parameters are loaded from the EEPROM.
81 */
82
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.303 2014/10/07 08:45:02 ozaki-r Exp $");
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/callout.h>
89 #include <sys/mbuf.h>
90 #include <sys/malloc.h>
91 #include <sys/kernel.h>
92 #include <sys/socket.h>
93 #include <sys/ioctl.h>
94 #include <sys/errno.h>
95 #include <sys/device.h>
96 #include <sys/queue.h>
97 #include <sys/syslog.h>
98
99 #include <sys/rnd.h>
100
101 #include <net/if.h>
102 #include <net/if_dl.h>
103 #include <net/if_media.h>
104 #include <net/if_ether.h>
105
106 #include <net/bpf.h>
107
108 #include <netinet/in.h> /* XXX for struct ip */
109 #include <netinet/in_systm.h> /* XXX for struct ip */
110 #include <netinet/ip.h> /* XXX for struct ip */
111 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
112 #include <netinet/tcp.h> /* XXX for struct tcphdr */
113
114 #include <sys/bus.h>
115 #include <sys/intr.h>
116 #include <machine/endian.h>
117
118 #include <dev/mii/mii.h>
119 #include <dev/mii/miivar.h>
120 #include <dev/mii/miidevs.h>
121 #include <dev/mii/mii_bitbang.h>
122 #include <dev/mii/ikphyreg.h>
123 #include <dev/mii/igphyreg.h>
124 #include <dev/mii/igphyvar.h>
125 #include <dev/mii/inbmphyreg.h>
126
127 #include <dev/pci/pcireg.h>
128 #include <dev/pci/pcivar.h>
129 #include <dev/pci/pcidevs.h>
130
131 #include <dev/pci/if_wmreg.h>
132 #include <dev/pci/if_wmvar.h>
133
134 #ifdef WM_DEBUG
135 #define WM_DEBUG_LINK 0x01
136 #define WM_DEBUG_TX 0x02
137 #define WM_DEBUG_RX 0x04
138 #define WM_DEBUG_GMII 0x08
139 #define WM_DEBUG_MANAGE 0x10
140 #define WM_DEBUG_NVM 0x20
141 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
142 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
143
144 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
145 #else
146 #define DPRINTF(x, y) /* nothing */
147 #endif /* WM_DEBUG */
148
149 #ifdef NET_MPSAFE
150 #define WM_MPSAFE 1
151 #endif
152
153 /*
154 * Transmit descriptor list size. Due to errata, we can only have
155 * 256 hardware descriptors in the ring on < 82544, but we use 4096
156 * on >= 82544. We tell the upper layers that they can queue a lot
157 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
158 * of them at a time.
159 *
160 * We allow up to 256 (!) DMA segments per packet. Pathological packet
161 * chains containing many small mbufs have been observed in zero-copy
162 * situations with jumbo frames.
163 */
164 #define WM_NTXSEGS 256
165 #define WM_IFQUEUELEN 256
166 #define WM_TXQUEUELEN_MAX 64
167 #define WM_TXQUEUELEN_MAX_82547 16
168 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
169 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
170 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
171 #define WM_NTXDESC_82542 256
172 #define WM_NTXDESC_82544 4096
173 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
174 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
175 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
176 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
177 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
178
179 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
180
181 /*
182 * Receive descriptor list size. We have one Rx buffer for normal
183 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
184 * packet. We allocate 256 receive descriptors, each with a 2k
185 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
186 */
187 #define WM_NRXDESC 256
188 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
189 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
190 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
191
192 /*
193 * Control structures are DMA'd to the i82542 chip. We allocate them in
194 * a single clump that maps to a single DMA segment to make several things
195 * easier.
196 */
197 struct wm_control_data_82544 {
198 /*
199 * The receive descriptors.
200 */
201 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
202
203 /*
204 * The transmit descriptors. Put these at the end, because
205 * we might use a smaller number of them.
206 */
207 union {
208 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
209 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
210 } wdc_u;
211 };
212
213 struct wm_control_data_82542 {
214 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
215 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
216 };
217
218 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
219 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
220 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
221
222 /*
223 * Software state for transmit jobs.
224 */
225 struct wm_txsoft {
226 struct mbuf *txs_mbuf; /* head of our mbuf chain */
227 bus_dmamap_t txs_dmamap; /* our DMA map */
228 int txs_firstdesc; /* first descriptor in packet */
229 int txs_lastdesc; /* last descriptor in packet */
230 int txs_ndesc; /* # of descriptors used */
231 };
232
233 /*
234 * Software state for receive buffers. Each descriptor gets a
235 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
236 * more than one buffer, we chain them together.
237 */
238 struct wm_rxsoft {
239 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
240 bus_dmamap_t rxs_dmamap; /* our DMA map */
241 };
242
243 #define WM_LINKUP_TIMEOUT 50
244
245 static uint16_t swfwphysem[] = {
246 SWFW_PHY0_SM,
247 SWFW_PHY1_SM,
248 SWFW_PHY2_SM,
249 SWFW_PHY3_SM
250 };
251
252 /*
253 * Software state per device.
254 */
255 struct wm_softc {
256 device_t sc_dev; /* generic device information */
257 bus_space_tag_t sc_st; /* bus space tag */
258 bus_space_handle_t sc_sh; /* bus space handle */
259 bus_size_t sc_ss; /* bus space size */
260 bus_space_tag_t sc_iot; /* I/O space tag */
261 bus_space_handle_t sc_ioh; /* I/O space handle */
262 bus_size_t sc_ios; /* I/O space size */
263 bus_space_tag_t sc_flasht; /* flash registers space tag */
264 bus_space_handle_t sc_flashh; /* flash registers space handle */
265 bus_dma_tag_t sc_dmat; /* bus DMA tag */
266
267 struct ethercom sc_ethercom; /* ethernet common data */
268 struct mii_data sc_mii; /* MII/media information */
269
270 pci_chipset_tag_t sc_pc;
271 pcitag_t sc_pcitag;
272 int sc_bus_speed; /* PCI/PCIX bus speed */
273 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
274
275 wm_chip_type sc_type; /* MAC type */
276 int sc_rev; /* MAC revision */
277 wm_phy_type sc_phytype; /* PHY type */
278 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
279 int sc_funcid; /* unit number of the chip (0 to 3) */
280 int sc_flags; /* flags; see below */
281 int sc_if_flags; /* last if_flags */
282 int sc_flowflags; /* 802.3x flow control flags */
283 int sc_align_tweak;
284
285 void *sc_ih; /* interrupt cookie */
286 callout_t sc_tick_ch; /* tick callout */
287 bool sc_stopping;
288
289 int sc_nvm_addrbits; /* NVM address bits */
290 unsigned int sc_nvm_wordsize; /* NVM word size */
291 int sc_ich8_flash_base;
292 int sc_ich8_flash_bank_size;
293 int sc_nvm_k1_enabled;
294
295 /* Software state for the transmit and receive descriptors. */
296 int sc_txnum; /* must be a power of two */
297 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
298 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
299
300 /* Control data structures. */
301 int sc_ntxdesc; /* must be a power of two */
302 struct wm_control_data_82544 *sc_control_data;
303 bus_dmamap_t sc_cddmamap; /* control data DMA map */
304 bus_dma_segment_t sc_cd_seg; /* control data segment */
305 int sc_cd_rseg; /* real number of control segment */
306 size_t sc_cd_size; /* control data size */
307 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
308 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
309 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
310 #define sc_rxdescs sc_control_data->wcd_rxdescs
311
312 #ifdef WM_EVENT_COUNTERS
313 /* Event counters. */
314 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
315 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
316 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
317 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
318 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
319 struct evcnt sc_ev_rxintr; /* Rx interrupts */
320 struct evcnt sc_ev_linkintr; /* Link interrupts */
321
322 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
323 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
324 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
325 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
326 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
327 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
328 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
329 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
330
331 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
332 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
333
334 struct evcnt sc_ev_tu; /* Tx underrun */
335
336 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
337 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
338 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
339 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
340 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
341 #endif /* WM_EVENT_COUNTERS */
342
343 bus_addr_t sc_tdt_reg; /* offset of TDT register */
344
345 int sc_txfree; /* number of free Tx descriptors */
346 int sc_txnext; /* next ready Tx descriptor */
347
348 int sc_txsfree; /* number of free Tx jobs */
349 int sc_txsnext; /* next free Tx job */
350 int sc_txsdirty; /* dirty Tx jobs */
351
352 /* These 5 variables are used only on the 82547. */
353 int sc_txfifo_size; /* Tx FIFO size */
354 int sc_txfifo_head; /* current head of FIFO */
355 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
356 int sc_txfifo_stall; /* Tx FIFO is stalled */
357 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
358
359 bus_addr_t sc_rdt_reg; /* offset of RDT register */
360
361 int sc_rxptr; /* next ready Rx descriptor/queue ent */
362 int sc_rxdiscard;
363 int sc_rxlen;
364 struct mbuf *sc_rxhead;
365 struct mbuf *sc_rxtail;
366 struct mbuf **sc_rxtailp;
367
368 uint32_t sc_ctrl; /* prototype CTRL register */
369 #if 0
370 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
371 #endif
372 uint32_t sc_icr; /* prototype interrupt bits */
373 uint32_t sc_itr; /* prototype intr throttling reg */
374 uint32_t sc_tctl; /* prototype TCTL register */
375 uint32_t sc_rctl; /* prototype RCTL register */
376 uint32_t sc_txcw; /* prototype TXCW register */
377 uint32_t sc_tipg; /* prototype TIPG register */
378 uint32_t sc_fcrtl; /* prototype FCRTL register */
379 uint32_t sc_pba; /* prototype PBA register */
380
381 int sc_tbi_linkup; /* TBI link status */
382 int sc_tbi_anegticks; /* autonegotiation ticks */
383 int sc_tbi_ticks; /* tbi ticks */
384
385 int sc_mchash_type; /* multicast filter offset */
386
387 krndsource_t rnd_source; /* random source */
388
389 kmutex_t *sc_tx_lock; /* lock for tx operations */
390 kmutex_t *sc_rx_lock; /* lock for rx operations */
391 };
392
393 #define WM_TX_LOCK(_sc) if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
394 #define WM_TX_UNLOCK(_sc) if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
395 #define WM_TX_LOCKED(_sc) (!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
396 #define WM_RX_LOCK(_sc) if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
397 #define WM_RX_UNLOCK(_sc) if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
398 #define WM_RX_LOCKED(_sc) (!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
399 #define WM_BOTH_LOCK(_sc) do {WM_TX_LOCK(_sc); WM_RX_LOCK(_sc);} while (0)
400 #define WM_BOTH_UNLOCK(_sc) do {WM_RX_UNLOCK(_sc); WM_TX_UNLOCK(_sc);} while (0)
401 #define WM_BOTH_LOCKED(_sc) (WM_TX_LOCKED(_sc) && WM_RX_LOCKED(_sc))
402
403 #ifdef WM_MPSAFE
404 #define CALLOUT_FLAGS CALLOUT_MPSAFE
405 #else
406 #define CALLOUT_FLAGS 0
407 #endif
408
409 #define WM_RXCHAIN_RESET(sc) \
410 do { \
411 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
412 *(sc)->sc_rxtailp = NULL; \
413 (sc)->sc_rxlen = 0; \
414 } while (/*CONSTCOND*/0)
415
416 #define WM_RXCHAIN_LINK(sc, m) \
417 do { \
418 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
419 (sc)->sc_rxtailp = &(m)->m_next; \
420 } while (/*CONSTCOND*/0)
421
422 #ifdef WM_EVENT_COUNTERS
423 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
424 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
425 #else
426 #define WM_EVCNT_INCR(ev) /* nothing */
427 #define WM_EVCNT_ADD(ev, val) /* nothing */
428 #endif
429
430 #define CSR_READ(sc, reg) \
431 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
432 #define CSR_WRITE(sc, reg, val) \
433 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
434 #define CSR_WRITE_FLUSH(sc) \
435 (void) CSR_READ((sc), WMREG_STATUS)
436
437 #define ICH8_FLASH_READ32(sc, reg) \
438 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
439 #define ICH8_FLASH_WRITE32(sc, reg, data) \
440 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
441
442 #define ICH8_FLASH_READ16(sc, reg) \
443 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
444 #define ICH8_FLASH_WRITE16(sc, reg, data) \
445 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
446
447 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
448 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
449
450 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
451 #define WM_CDTXADDR_HI(sc, x) \
452 (sizeof(bus_addr_t) == 8 ? \
453 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
454
455 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
456 #define WM_CDRXADDR_HI(sc, x) \
457 (sizeof(bus_addr_t) == 8 ? \
458 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
459
460 #define WM_CDTXSYNC(sc, x, n, ops) \
461 do { \
462 int __x, __n; \
463 \
464 __x = (x); \
465 __n = (n); \
466 \
467 /* If it will wrap around, sync to the end of the ring. */ \
468 if ((__x + __n) > WM_NTXDESC(sc)) { \
469 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
470 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
471 (WM_NTXDESC(sc) - __x), (ops)); \
472 __n -= (WM_NTXDESC(sc) - __x); \
473 __x = 0; \
474 } \
475 \
476 /* Now sync whatever is left. */ \
477 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
478 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
479 } while (/*CONSTCOND*/0)
480
481 #define WM_CDRXSYNC(sc, x, ops) \
482 do { \
483 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
484 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
485 } while (/*CONSTCOND*/0)
486
487 #define WM_INIT_RXDESC(sc, x) \
488 do { \
489 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
490 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
491 struct mbuf *__m = __rxs->rxs_mbuf; \
492 \
493 /* \
494 * Note: We scoot the packet forward 2 bytes in the buffer \
495 * so that the payload after the Ethernet header is aligned \
496 * to a 4-byte boundary. \
497 * \
498 * XXX BRAINDAMAGE ALERT! \
499 * The stupid chip uses the same size for every buffer, which \
500 * is set in the Receive Control register. We are using the 2K \
501 * size option, but what we REALLY want is (2K - 2)! For this \
502 * reason, we can't "scoot" packets longer than the standard \
503 * Ethernet MTU. On strict-alignment platforms, if the total \
504 * size exceeds (2K - 2) we set align_tweak to 0 and let \
505 * the upper layer copy the headers. \
506 */ \
507 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
508 \
509 wm_set_dma_addr(&__rxd->wrx_addr, \
510 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
511 __rxd->wrx_len = 0; \
512 __rxd->wrx_cksum = 0; \
513 __rxd->wrx_status = 0; \
514 __rxd->wrx_errors = 0; \
515 __rxd->wrx_special = 0; \
516 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
517 \
518 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
519 } while (/*CONSTCOND*/0)
520
521 /*
522 * Register read/write functions.
523 * Other than CSR_{READ|WRITE}().
524 */
525 #if 0
526 static inline uint32_t wm_io_read(struct wm_softc *, int);
527 #endif
528 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
529 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
530 uint32_t, uint32_t);
531 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
532
533 /*
534 * Device driver interface functions and commonly used functions.
535 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
536 */
537 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
538 static int wm_match(device_t, cfdata_t, void *);
539 static void wm_attach(device_t, device_t, void *);
540 static int wm_detach(device_t, int);
541 static bool wm_suspend(device_t, const pmf_qual_t *);
542 static bool wm_resume(device_t, const pmf_qual_t *);
543 static void wm_watchdog(struct ifnet *);
544 static void wm_tick(void *);
545 static int wm_ifflags_cb(struct ethercom *);
546 static int wm_ioctl(struct ifnet *, u_long, void *);
547 /* MAC address related */
548 static int wm_check_alt_mac_addr(struct wm_softc *);
549 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
550 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
551 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
552 static void wm_set_filter(struct wm_softc *);
553 /* Reset and init related */
554 static void wm_set_vlan(struct wm_softc *);
555 static void wm_set_pcie_completion_timeout(struct wm_softc *);
556 static void wm_get_auto_rd_done(struct wm_softc *);
557 static void wm_lan_init_done(struct wm_softc *);
558 static void wm_get_cfg_done(struct wm_softc *);
559 static void wm_reset(struct wm_softc *);
560 static int wm_add_rxbuf(struct wm_softc *, int);
561 static void wm_rxdrain(struct wm_softc *);
562 static int wm_init(struct ifnet *);
563 static int wm_init_locked(struct ifnet *);
564 static void wm_stop(struct ifnet *, int);
565 static void wm_stop_locked(struct ifnet *, int);
566 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
567 uint32_t *, uint8_t *);
568 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
569 static void wm_82547_txfifo_stall(void *);
570 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
571 /* Start */
572 static void wm_start(struct ifnet *);
573 static void wm_start_locked(struct ifnet *);
574 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
575 uint32_t *, uint32_t *, bool *);
576 static void wm_nq_start(struct ifnet *);
577 static void wm_nq_start_locked(struct ifnet *);
578 /* Interrupt */
579 static void wm_txintr(struct wm_softc *);
580 static void wm_rxintr(struct wm_softc *);
581 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
582 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
583 static void wm_linkintr(struct wm_softc *, uint32_t);
584 static int wm_intr(void *);
585
586 /*
587 * Media related.
588 * GMII, SGMII, TBI, SERDES and SFP.
589 */
590 /* GMII related */
591 static void wm_gmii_reset(struct wm_softc *);
592 static int wm_get_phy_id_82575(struct wm_softc *);
593 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
594 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
595 static int wm_gmii_mediachange(struct ifnet *);
596 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
597 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
598 static int wm_gmii_i82543_readreg(device_t, int, int);
599 static void wm_gmii_i82543_writereg(device_t, int, int, int);
600 static int wm_gmii_i82544_readreg(device_t, int, int);
601 static void wm_gmii_i82544_writereg(device_t, int, int, int);
602 static int wm_gmii_i80003_readreg(device_t, int, int);
603 static void wm_gmii_i80003_writereg(device_t, int, int, int);
604 static int wm_gmii_bm_readreg(device_t, int, int);
605 static void wm_gmii_bm_writereg(device_t, int, int, int);
606 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
607 static int wm_gmii_hv_readreg(device_t, int, int);
608 static void wm_gmii_hv_writereg(device_t, int, int, int);
609 static int wm_gmii_82580_readreg(device_t, int, int);
610 static void wm_gmii_82580_writereg(device_t, int, int, int);
611 static void wm_gmii_statchg(struct ifnet *);
612 static int wm_kmrn_readreg(struct wm_softc *, int);
613 static void wm_kmrn_writereg(struct wm_softc *, int, int);
614 /* SGMII */
615 static bool wm_sgmii_uses_mdio(struct wm_softc *);
616 static int wm_sgmii_readreg(device_t, int, int);
617 static void wm_sgmii_writereg(device_t, int, int, int);
618 /* TBI related */
619 static int wm_check_for_link(struct wm_softc *);
620 static void wm_tbi_mediainit(struct wm_softc *);
621 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
622 static int wm_tbi_mediachange(struct ifnet *);
623 static void wm_tbi_set_linkled(struct wm_softc *);
624 static void wm_tbi_check_link(struct wm_softc *);
625 /* SFP related */
626 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
627 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
628
629 /*
630 * NVM related.
631 * Microwire, SPI (w/wo EERD) and Flash.
632 */
633 /* Misc functions */
634 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
635 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
636 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
637 /* Microwire */
638 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
639 /* SPI */
640 static int wm_nvm_ready_spi(struct wm_softc *);
641 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
642 /* Using with EERD */
643 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
644 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
645 /* Flash */
646 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
647 unsigned int *);
648 static int32_t wm_ich8_cycle_init(struct wm_softc *);
649 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
650 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
651 uint16_t *);
652 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
653 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
654 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
655 /* Lock, detecting NVM type, validate checksum and read */
656 static int wm_nvm_acquire(struct wm_softc *);
657 static void wm_nvm_release(struct wm_softc *);
658 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
659 static int wm_nvm_validate_checksum(struct wm_softc *);
660 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
661
662 /*
663 * Hardware semaphores.
664 * Very complexed...
665 */
666 static int wm_get_swsm_semaphore(struct wm_softc *);
667 static void wm_put_swsm_semaphore(struct wm_softc *);
668 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
669 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
670 static int wm_get_swfwhw_semaphore(struct wm_softc *);
671 static void wm_put_swfwhw_semaphore(struct wm_softc *);
672 static int wm_get_hw_semaphore_82573(struct wm_softc *);
673 static void wm_put_hw_semaphore_82573(struct wm_softc *);
674
675 /*
676 * Management mode and power management related subroutines.
677 * BMC, AMT, suspend/resume and EEE.
678 */
679 static int wm_check_mng_mode(struct wm_softc *);
680 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
681 static int wm_check_mng_mode_82574(struct wm_softc *);
682 static int wm_check_mng_mode_generic(struct wm_softc *);
683 static int wm_enable_mng_pass_thru(struct wm_softc *);
684 static int wm_check_reset_block(struct wm_softc *);
685 static void wm_get_hw_control(struct wm_softc *);
686 static void wm_release_hw_control(struct wm_softc *);
687 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
688 static void wm_smbustopci(struct wm_softc *);
689 static void wm_init_manageability(struct wm_softc *);
690 static void wm_release_manageability(struct wm_softc *);
691 static void wm_get_wakeup(struct wm_softc *);
692 #ifdef WM_WOL
693 static void wm_enable_phy_wakeup(struct wm_softc *);
694 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
695 static void wm_enable_wakeup(struct wm_softc *);
696 #endif
697 /* EEE */
698 static void wm_set_eee_i350(struct wm_softc *);
699
700 /*
701 * Workarounds (mainly PHY related).
702 * Basically, PHY's workarounds are in the PHY drivers.
703 */
704 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
705 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
706 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
707 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
708 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
709 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
710 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
711 static void wm_reset_init_script_82575(struct wm_softc *);
712
713 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
714 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
715
716 /*
717 * Devices supported by this driver.
718 */
719 static const struct wm_product {
720 pci_vendor_id_t wmp_vendor;
721 pci_product_id_t wmp_product;
722 const char *wmp_name;
723 wm_chip_type wmp_type;
724 uint32_t wmp_flags;
725 #define WMP_F_UNKNOWN 0x00
726 #define WMP_F_FIBER 0x01
727 #define WMP_F_COPPER 0x02
728 #define WMP_F_SERDES 0x03 /* Internal SERDES */
729 #define WMP_MEDIATYPE(x) ((x) & 0x03)
730 } wm_products[] = {
731 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
732 "Intel i82542 1000BASE-X Ethernet",
733 WM_T_82542_2_1, WMP_F_FIBER },
734
735 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
736 "Intel i82543GC 1000BASE-X Ethernet",
737 WM_T_82543, WMP_F_FIBER },
738
739 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
740 "Intel i82543GC 1000BASE-T Ethernet",
741 WM_T_82543, WMP_F_COPPER },
742
743 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
744 "Intel i82544EI 1000BASE-T Ethernet",
745 WM_T_82544, WMP_F_COPPER },
746
747 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
748 "Intel i82544EI 1000BASE-X Ethernet",
749 WM_T_82544, WMP_F_FIBER },
750
751 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
752 "Intel i82544GC 1000BASE-T Ethernet",
753 WM_T_82544, WMP_F_COPPER },
754
755 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
756 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
757 WM_T_82544, WMP_F_COPPER },
758
759 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
760 "Intel i82540EM 1000BASE-T Ethernet",
761 WM_T_82540, WMP_F_COPPER },
762
763 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
764 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
765 WM_T_82540, WMP_F_COPPER },
766
767 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
768 "Intel i82540EP 1000BASE-T Ethernet",
769 WM_T_82540, WMP_F_COPPER },
770
771 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
772 "Intel i82540EP 1000BASE-T Ethernet",
773 WM_T_82540, WMP_F_COPPER },
774
775 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
776 "Intel i82540EP 1000BASE-T Ethernet",
777 WM_T_82540, WMP_F_COPPER },
778
779 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
780 "Intel i82545EM 1000BASE-T Ethernet",
781 WM_T_82545, WMP_F_COPPER },
782
783 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
784 "Intel i82545GM 1000BASE-T Ethernet",
785 WM_T_82545_3, WMP_F_COPPER },
786
787 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
788 "Intel i82545GM 1000BASE-X Ethernet",
789 WM_T_82545_3, WMP_F_FIBER },
790
791 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
792 "Intel i82545GM Gigabit Ethernet (SERDES)",
793 WM_T_82545_3, WMP_F_SERDES },
794
795 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
796 "Intel i82546EB 1000BASE-T Ethernet",
797 WM_T_82546, WMP_F_COPPER },
798
799 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
800 "Intel i82546EB 1000BASE-T Ethernet",
801 WM_T_82546, WMP_F_COPPER },
802
803 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
804 "Intel i82545EM 1000BASE-X Ethernet",
805 WM_T_82545, WMP_F_FIBER },
806
807 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
808 "Intel i82546EB 1000BASE-X Ethernet",
809 WM_T_82546, WMP_F_FIBER },
810
811 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
812 "Intel i82546GB 1000BASE-T Ethernet",
813 WM_T_82546_3, WMP_F_COPPER },
814
815 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
816 "Intel i82546GB 1000BASE-X Ethernet",
817 WM_T_82546_3, WMP_F_FIBER },
818
819 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
820 "Intel i82546GB Gigabit Ethernet (SERDES)",
821 WM_T_82546_3, WMP_F_SERDES },
822
823 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
824 "i82546GB quad-port Gigabit Ethernet",
825 WM_T_82546_3, WMP_F_COPPER },
826
827 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
828 "i82546GB quad-port Gigabit Ethernet (KSP3)",
829 WM_T_82546_3, WMP_F_COPPER },
830
831 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
832 "Intel PRO/1000MT (82546GB)",
833 WM_T_82546_3, WMP_F_COPPER },
834
835 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
836 "Intel i82541EI 1000BASE-T Ethernet",
837 WM_T_82541, WMP_F_COPPER },
838
839 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
840 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
841 WM_T_82541, WMP_F_COPPER },
842
843 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
844 "Intel i82541EI Mobile 1000BASE-T Ethernet",
845 WM_T_82541, WMP_F_COPPER },
846
847 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
848 "Intel i82541ER 1000BASE-T Ethernet",
849 WM_T_82541_2, WMP_F_COPPER },
850
851 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
852 "Intel i82541GI 1000BASE-T Ethernet",
853 WM_T_82541_2, WMP_F_COPPER },
854
855 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
856 "Intel i82541GI Mobile 1000BASE-T Ethernet",
857 WM_T_82541_2, WMP_F_COPPER },
858
859 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
860 "Intel i82541PI 1000BASE-T Ethernet",
861 WM_T_82541_2, WMP_F_COPPER },
862
863 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
864 "Intel i82547EI 1000BASE-T Ethernet",
865 WM_T_82547, WMP_F_COPPER },
866
867 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
868 "Intel i82547EI Mobile 1000BASE-T Ethernet",
869 WM_T_82547, WMP_F_COPPER },
870
871 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
872 "Intel i82547GI 1000BASE-T Ethernet",
873 WM_T_82547_2, WMP_F_COPPER },
874
875 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
876 "Intel PRO/1000 PT (82571EB)",
877 WM_T_82571, WMP_F_COPPER },
878
879 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
880 "Intel PRO/1000 PF (82571EB)",
881 WM_T_82571, WMP_F_FIBER },
882
883 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
884 "Intel PRO/1000 PB (82571EB)",
885 WM_T_82571, WMP_F_SERDES },
886
887 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
888 "Intel PRO/1000 QT (82571EB)",
889 WM_T_82571, WMP_F_COPPER },
890
891 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
892 "Intel PRO/1000 PT Quad Port Server Adapter",
893 WM_T_82571, WMP_F_COPPER, },
894
895 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
896 "Intel Gigabit PT Quad Port Server ExpressModule",
897 WM_T_82571, WMP_F_COPPER, },
898
899 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
900 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
901 WM_T_82571, WMP_F_SERDES, },
902
903 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
904 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
905 WM_T_82571, WMP_F_SERDES, },
906
907 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
908 "Intel 82571EB Quad 1000baseX Ethernet",
909 WM_T_82571, WMP_F_FIBER, },
910
911 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
912 "Intel i82572EI 1000baseT Ethernet",
913 WM_T_82572, WMP_F_COPPER },
914
915 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
916 "Intel i82572EI 1000baseX Ethernet",
917 WM_T_82572, WMP_F_FIBER },
918
919 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
920 "Intel i82572EI Gigabit Ethernet (SERDES)",
921 WM_T_82572, WMP_F_SERDES },
922
923 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
924 "Intel i82572EI 1000baseT Ethernet",
925 WM_T_82572, WMP_F_COPPER },
926
927 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
928 "Intel i82573E",
929 WM_T_82573, WMP_F_COPPER },
930
931 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
932 "Intel i82573E IAMT",
933 WM_T_82573, WMP_F_COPPER },
934
935 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
936 "Intel i82573L Gigabit Ethernet",
937 WM_T_82573, WMP_F_COPPER },
938
939 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
940 "Intel i82574L",
941 WM_T_82574, WMP_F_COPPER },
942
943 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
944 "Intel i82574L",
945 WM_T_82574, WMP_F_COPPER },
946
947 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
948 "Intel i82583V",
949 WM_T_82583, WMP_F_COPPER },
950
951 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
952 "i80003 dual 1000baseT Ethernet",
953 WM_T_80003, WMP_F_COPPER },
954
955 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
956 "i80003 dual 1000baseX Ethernet",
957 WM_T_80003, WMP_F_COPPER },
958
959 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
960 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
961 WM_T_80003, WMP_F_SERDES },
962
963 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
964 "Intel i80003 1000baseT Ethernet",
965 WM_T_80003, WMP_F_COPPER },
966
967 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
968 "Intel i80003 Gigabit Ethernet (SERDES)",
969 WM_T_80003, WMP_F_SERDES },
970
971 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
972 "Intel i82801H (M_AMT) LAN Controller",
973 WM_T_ICH8, WMP_F_COPPER },
974 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
975 "Intel i82801H (AMT) LAN Controller",
976 WM_T_ICH8, WMP_F_COPPER },
977 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
978 "Intel i82801H LAN Controller",
979 WM_T_ICH8, WMP_F_COPPER },
980 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
981 "Intel i82801H (IFE) LAN Controller",
982 WM_T_ICH8, WMP_F_COPPER },
983 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
984 "Intel i82801H (M) LAN Controller",
985 WM_T_ICH8, WMP_F_COPPER },
986 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
987 "Intel i82801H IFE (GT) LAN Controller",
988 WM_T_ICH8, WMP_F_COPPER },
989 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
990 "Intel i82801H IFE (G) LAN Controller",
991 WM_T_ICH8, WMP_F_COPPER },
992 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
993 "82801I (AMT) LAN Controller",
994 WM_T_ICH9, WMP_F_COPPER },
995 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
996 "82801I LAN Controller",
997 WM_T_ICH9, WMP_F_COPPER },
998 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
999 "82801I (G) LAN Controller",
1000 WM_T_ICH9, WMP_F_COPPER },
1001 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1002 "82801I (GT) LAN Controller",
1003 WM_T_ICH9, WMP_F_COPPER },
1004 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1005 "82801I (C) LAN Controller",
1006 WM_T_ICH9, WMP_F_COPPER },
1007 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1008 "82801I mobile LAN Controller",
1009 WM_T_ICH9, WMP_F_COPPER },
1010 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1011 "82801I mobile (V) LAN Controller",
1012 WM_T_ICH9, WMP_F_COPPER },
1013 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1014 "82801I mobile (AMT) LAN Controller",
1015 WM_T_ICH9, WMP_F_COPPER },
1016 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1017 "82567LM-4 LAN Controller",
1018 WM_T_ICH9, WMP_F_COPPER },
1019 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
1020 "82567V-3 LAN Controller",
1021 WM_T_ICH9, WMP_F_COPPER },
1022 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1023 "82567LM-2 LAN Controller",
1024 WM_T_ICH10, WMP_F_COPPER },
1025 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1026 "82567LF-2 LAN Controller",
1027 WM_T_ICH10, WMP_F_COPPER },
1028 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1029 "82567LM-3 LAN Controller",
1030 WM_T_ICH10, WMP_F_COPPER },
1031 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1032 "82567LF-3 LAN Controller",
1033 WM_T_ICH10, WMP_F_COPPER },
1034 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1035 "82567V-2 LAN Controller",
1036 WM_T_ICH10, WMP_F_COPPER },
1037 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1038 "82567V-3? LAN Controller",
1039 WM_T_ICH10, WMP_F_COPPER },
1040 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1041 "HANKSVILLE LAN Controller",
1042 WM_T_ICH10, WMP_F_COPPER },
1043 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1044 "PCH LAN (82577LM) Controller",
1045 WM_T_PCH, WMP_F_COPPER },
1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1047 "PCH LAN (82577LC) Controller",
1048 WM_T_PCH, WMP_F_COPPER },
1049 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1050 "PCH LAN (82578DM) Controller",
1051 WM_T_PCH, WMP_F_COPPER },
1052 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1053 "PCH LAN (82578DC) Controller",
1054 WM_T_PCH, WMP_F_COPPER },
1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1056 "PCH2 LAN (82579LM) Controller",
1057 WM_T_PCH2, WMP_F_COPPER },
1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1059 "PCH2 LAN (82579V) Controller",
1060 WM_T_PCH2, WMP_F_COPPER },
1061 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1062 "82575EB dual-1000baseT Ethernet",
1063 WM_T_82575, WMP_F_COPPER },
1064 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1065 "82575EB dual-1000baseX Ethernet (SERDES)",
1066 WM_T_82575, WMP_F_SERDES },
1067 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1068 "82575GB quad-1000baseT Ethernet",
1069 WM_T_82575, WMP_F_COPPER },
1070 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1071 "82575GB quad-1000baseT Ethernet (PM)",
1072 WM_T_82575, WMP_F_COPPER },
1073 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1074 "82576 1000BaseT Ethernet",
1075 WM_T_82576, WMP_F_COPPER },
1076 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1077 "82576 1000BaseX Ethernet",
1078 WM_T_82576, WMP_F_FIBER },
1079
1080 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1081 "82576 gigabit Ethernet (SERDES)",
1082 WM_T_82576, WMP_F_SERDES },
1083
1084 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1085 "82576 quad-1000BaseT Ethernet",
1086 WM_T_82576, WMP_F_COPPER },
1087
1088 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1089 "82576 Gigabit ET2 Quad Port Server Adapter",
1090 WM_T_82576, WMP_F_COPPER },
1091
1092 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1093 "82576 gigabit Ethernet",
1094 WM_T_82576, WMP_F_COPPER },
1095
1096 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1097 "82576 gigabit Ethernet (SERDES)",
1098 WM_T_82576, WMP_F_SERDES },
1099 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1100 "82576 quad-gigabit Ethernet (SERDES)",
1101 WM_T_82576, WMP_F_SERDES },
1102
1103 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1104 "82580 1000BaseT Ethernet",
1105 WM_T_82580, WMP_F_COPPER },
1106 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1107 "82580 1000BaseX Ethernet",
1108 WM_T_82580, WMP_F_FIBER },
1109
1110 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1111 "82580 1000BaseT Ethernet (SERDES)",
1112 WM_T_82580, WMP_F_SERDES },
1113
1114 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1115 "82580 gigabit Ethernet (SGMII)",
1116 WM_T_82580, WMP_F_COPPER },
1117 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1118 "82580 dual-1000BaseT Ethernet",
1119 WM_T_82580, WMP_F_COPPER },
1120
1121 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1122 "82580 quad-1000BaseX Ethernet",
1123 WM_T_82580, WMP_F_FIBER },
1124
1125 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1126 "I350 Gigabit Network Connection",
1127 WM_T_I350, WMP_F_COPPER },
1128 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1129 "I350 Gigabit Fiber Network Connection",
1130 WM_T_I350, WMP_F_FIBER },
1131
1132 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1133 "I350 Gigabit Backplane Connection",
1134 WM_T_I350, WMP_F_SERDES },
1135
1136 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1137 "I350 Quad Port Gigabit Ethernet",
1138 WM_T_I350, WMP_F_SERDES },
1139
1140 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1141 "I350 Gigabit Connection",
1142 WM_T_I350, WMP_F_COPPER },
1143
1144 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1145 "I354 Gigabit Connection",
1146 WM_T_I354, WMP_F_COPPER },
1147 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1148 "I210-T1 Ethernet Server Adapter",
1149 WM_T_I210, WMP_F_COPPER },
1150
1151 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1152 "I210 Ethernet (Copper OEM)",
1153 WM_T_I210, WMP_F_COPPER },
1154
1155 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1156 "I210 Ethernet (Copper IT)",
1157 WM_T_I210, WMP_F_COPPER },
1158
1159 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1160 "I210 Ethernet (FLASH less)",
1161 WM_T_I210, WMP_F_COPPER },
1162
1163 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1164 "I210 Gigabit Ethernet (Fiber)",
1165 WM_T_I210, WMP_F_FIBER },
1166
1167 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1168 "I210 Gigabit Ethernet (SERDES)",
1169 WM_T_I210, WMP_F_SERDES },
1170
1171 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1172 "I210 Gigabit Ethernet (FLASH less)",
1173 WM_T_I210, WMP_F_SERDES },
1174
1175 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1176 "I210 Gigabit Ethernet (SGMII)",
1177 WM_T_I210, WMP_F_COPPER },
1178
1179 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1180 "I211 Ethernet (COPPER)",
1181 WM_T_I211, WMP_F_COPPER },
1182 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1183 "I217 V Ethernet Connection",
1184 WM_T_PCH_LPT, WMP_F_COPPER },
1185 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1186 "I217 LM Ethernet Connection",
1187 WM_T_PCH_LPT, WMP_F_COPPER },
1188 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1189 "I218 V Ethernet Connection",
1190 WM_T_PCH_LPT, WMP_F_COPPER },
1191 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1192 "I218 V Ethernet Connection",
1193 WM_T_PCH_LPT, WMP_F_COPPER },
1194 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1195 "I218 V Ethernet Connection",
1196 WM_T_PCH_LPT, WMP_F_COPPER },
1197 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1198 "I218 LM Ethernet Connection",
1199 WM_T_PCH_LPT, WMP_F_COPPER },
1200 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1201 "I218 LM Ethernet Connection",
1202 WM_T_PCH_LPT, WMP_F_COPPER },
1203 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1204 "I218 LM Ethernet Connection",
1205 WM_T_PCH_LPT, WMP_F_COPPER },
1206 { 0, 0,
1207 NULL,
1208 0, 0 },
1209 };
1210
1211 #ifdef WM_EVENT_COUNTERS
1212 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1213 #endif /* WM_EVENT_COUNTERS */
1214
1215
1216 /*
1217 * Register read/write functions.
1218 * Other than CSR_{READ|WRITE}().
1219 */
1220
1221 #if 0 /* Not currently used */
1222 static inline uint32_t
1223 wm_io_read(struct wm_softc *sc, int reg)
1224 {
1225
1226 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1227 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1228 }
1229 #endif
1230
1231 static inline void
1232 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1233 {
1234
1235 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1236 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1237 }
1238
1239 static inline void
1240 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1241 uint32_t data)
1242 {
1243 uint32_t regval;
1244 int i;
1245
1246 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1247
1248 CSR_WRITE(sc, reg, regval);
1249
1250 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1251 delay(5);
1252 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1253 break;
1254 }
1255 if (i == SCTL_CTL_POLL_TIMEOUT) {
1256 aprint_error("%s: WARNING:"
1257 " i82575 reg 0x%08x setup did not indicate ready\n",
1258 device_xname(sc->sc_dev), reg);
1259 }
1260 }
1261
1262 static inline void
1263 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1264 {
1265 wa->wa_low = htole32(v & 0xffffffffU);
1266 if (sizeof(bus_addr_t) == 8)
1267 wa->wa_high = htole32((uint64_t) v >> 32);
1268 else
1269 wa->wa_high = 0;
1270 }
1271
1272 /*
1273 * Device driver interface functions and commonly used functions.
1274 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1275 */
1276
1277 /* Lookup supported device table */
1278 static const struct wm_product *
1279 wm_lookup(const struct pci_attach_args *pa)
1280 {
1281 const struct wm_product *wmp;
1282
1283 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1284 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1285 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1286 return wmp;
1287 }
1288 return NULL;
1289 }
1290
1291 /* The match function (ca_match) */
1292 static int
1293 wm_match(device_t parent, cfdata_t cf, void *aux)
1294 {
1295 struct pci_attach_args *pa = aux;
1296
1297 if (wm_lookup(pa) != NULL)
1298 return 1;
1299
1300 return 0;
1301 }
1302
1303 /* The attach function (ca_attach) */
1304 static void
1305 wm_attach(device_t parent, device_t self, void *aux)
1306 {
1307 struct wm_softc *sc = device_private(self);
1308 struct pci_attach_args *pa = aux;
1309 prop_dictionary_t dict;
1310 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1311 pci_chipset_tag_t pc = pa->pa_pc;
1312 pci_intr_handle_t ih;
1313 const char *intrstr = NULL;
1314 const char *eetype, *xname;
1315 bus_space_tag_t memt;
1316 bus_space_handle_t memh;
1317 bus_size_t memsize;
1318 int memh_valid;
1319 int i, error;
1320 const struct wm_product *wmp;
1321 prop_data_t ea;
1322 prop_number_t pn;
1323 uint8_t enaddr[ETHER_ADDR_LEN];
1324 uint16_t cfg1, cfg2, swdpin, io3;
1325 pcireg_t preg, memtype;
1326 uint16_t eeprom_data, apme_mask;
1327 bool force_clear_smbi;
1328 uint32_t link_mode;
1329 uint32_t reg;
1330 char intrbuf[PCI_INTRSTR_LEN];
1331
1332 sc->sc_dev = self;
1333 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1334 sc->sc_stopping = false;
1335
1336 wmp = wm_lookup(pa);
1337 #ifdef DIAGNOSTIC
1338 if (wmp == NULL) {
1339 printf("\n");
1340 panic("wm_attach: impossible");
1341 }
1342 #endif
1343 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1344
1345 sc->sc_pc = pa->pa_pc;
1346 sc->sc_pcitag = pa->pa_tag;
1347
1348 if (pci_dma64_available(pa))
1349 sc->sc_dmat = pa->pa_dmat64;
1350 else
1351 sc->sc_dmat = pa->pa_dmat;
1352
1353 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1354 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1355
1356 sc->sc_type = wmp->wmp_type;
1357 if (sc->sc_type < WM_T_82543) {
1358 if (sc->sc_rev < 2) {
1359 aprint_error_dev(sc->sc_dev,
1360 "i82542 must be at least rev. 2\n");
1361 return;
1362 }
1363 if (sc->sc_rev < 3)
1364 sc->sc_type = WM_T_82542_2_0;
1365 }
1366
1367 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1368 || (sc->sc_type == WM_T_82580)
1369 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1370 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1371 sc->sc_flags |= WM_F_NEWQUEUE;
1372
1373 /* Set device properties (mactype) */
1374 dict = device_properties(sc->sc_dev);
1375 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1376
1377 /*
1378 * Map the device. All devices support memory-mapped acccess,
1379 * and it is really required for normal operation.
1380 */
1381 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1382 switch (memtype) {
1383 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1384 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1385 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1386 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1387 break;
1388 default:
1389 memh_valid = 0;
1390 break;
1391 }
1392
1393 if (memh_valid) {
1394 sc->sc_st = memt;
1395 sc->sc_sh = memh;
1396 sc->sc_ss = memsize;
1397 } else {
1398 aprint_error_dev(sc->sc_dev,
1399 "unable to map device registers\n");
1400 return;
1401 }
1402
1403 /*
1404 * In addition, i82544 and later support I/O mapped indirect
1405 * register access. It is not desirable (nor supported in
1406 * this driver) to use it for normal operation, though it is
1407 * required to work around bugs in some chip versions.
1408 */
1409 if (sc->sc_type >= WM_T_82544) {
1410 /* First we have to find the I/O BAR. */
1411 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1412 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1413 if (memtype == PCI_MAPREG_TYPE_IO)
1414 break;
1415 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1416 PCI_MAPREG_MEM_TYPE_64BIT)
1417 i += 4; /* skip high bits, too */
1418 }
1419 if (i < PCI_MAPREG_END) {
1420 /*
1421 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1422 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1423 * It's no problem because newer chips has no this
1424 * bug.
1425 *
1426 * The i8254x doesn't apparently respond when the
1427 * I/O BAR is 0, which looks somewhat like it's not
1428 * been configured.
1429 */
1430 preg = pci_conf_read(pc, pa->pa_tag, i);
1431 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1432 aprint_error_dev(sc->sc_dev,
1433 "WARNING: I/O BAR at zero.\n");
1434 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1435 0, &sc->sc_iot, &sc->sc_ioh,
1436 NULL, &sc->sc_ios) == 0) {
1437 sc->sc_flags |= WM_F_IOH_VALID;
1438 } else {
1439 aprint_error_dev(sc->sc_dev,
1440 "WARNING: unable to map I/O space\n");
1441 }
1442 }
1443
1444 }
1445
1446 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1447 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1448 preg |= PCI_COMMAND_MASTER_ENABLE;
1449 if (sc->sc_type < WM_T_82542_2_1)
1450 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1451 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1452
1453 /* power up chip */
1454 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1455 NULL)) && error != EOPNOTSUPP) {
1456 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1457 return;
1458 }
1459
1460 /*
1461 * Map and establish our interrupt.
1462 */
1463 if (pci_intr_map(pa, &ih)) {
1464 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1465 return;
1466 }
1467 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1468 #ifdef WM_MPSAFE
1469 pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
1470 #endif
1471 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1472 if (sc->sc_ih == NULL) {
1473 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1474 if (intrstr != NULL)
1475 aprint_error(" at %s", intrstr);
1476 aprint_error("\n");
1477 return;
1478 }
1479 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1480
1481 /*
1482 * Check the function ID (unit number of the chip).
1483 */
1484 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1485 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1486 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1487 || (sc->sc_type == WM_T_82580)
1488 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1489 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1490 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1491 else
1492 sc->sc_funcid = 0;
1493
1494 /*
1495 * Determine a few things about the bus we're connected to.
1496 */
1497 if (sc->sc_type < WM_T_82543) {
1498 /* We don't really know the bus characteristics here. */
1499 sc->sc_bus_speed = 33;
1500 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1501 /*
1502 * CSA (Communication Streaming Architecture) is about as fast
1503 * a 32-bit 66MHz PCI Bus.
1504 */
1505 sc->sc_flags |= WM_F_CSA;
1506 sc->sc_bus_speed = 66;
1507 aprint_verbose_dev(sc->sc_dev,
1508 "Communication Streaming Architecture\n");
1509 if (sc->sc_type == WM_T_82547) {
1510 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1511 callout_setfunc(&sc->sc_txfifo_ch,
1512 wm_82547_txfifo_stall, sc);
1513 aprint_verbose_dev(sc->sc_dev,
1514 "using 82547 Tx FIFO stall work-around\n");
1515 }
1516 } else if (sc->sc_type >= WM_T_82571) {
1517 sc->sc_flags |= WM_F_PCIE;
1518 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1519 && (sc->sc_type != WM_T_ICH10)
1520 && (sc->sc_type != WM_T_PCH)
1521 && (sc->sc_type != WM_T_PCH2)
1522 && (sc->sc_type != WM_T_PCH_LPT)) {
1523 /* ICH* and PCH* have no PCIe capability registers */
1524 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1525 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1526 NULL) == 0)
1527 aprint_error_dev(sc->sc_dev,
1528 "unable to find PCIe capability\n");
1529 }
1530 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1531 } else {
1532 reg = CSR_READ(sc, WMREG_STATUS);
1533 if (reg & STATUS_BUS64)
1534 sc->sc_flags |= WM_F_BUS64;
1535 if ((reg & STATUS_PCIX_MODE) != 0) {
1536 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1537
1538 sc->sc_flags |= WM_F_PCIX;
1539 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1540 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1541 aprint_error_dev(sc->sc_dev,
1542 "unable to find PCIX capability\n");
1543 else if (sc->sc_type != WM_T_82545_3 &&
1544 sc->sc_type != WM_T_82546_3) {
1545 /*
1546 * Work around a problem caused by the BIOS
1547 * setting the max memory read byte count
1548 * incorrectly.
1549 */
1550 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1551 sc->sc_pcixe_capoff + PCIX_CMD);
1552 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1553 sc->sc_pcixe_capoff + PCIX_STATUS);
1554
1555 bytecnt =
1556 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1557 PCIX_CMD_BYTECNT_SHIFT;
1558 maxb =
1559 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1560 PCIX_STATUS_MAXB_SHIFT;
1561 if (bytecnt > maxb) {
1562 aprint_verbose_dev(sc->sc_dev,
1563 "resetting PCI-X MMRBC: %d -> %d\n",
1564 512 << bytecnt, 512 << maxb);
1565 pcix_cmd = (pcix_cmd &
1566 ~PCIX_CMD_BYTECNT_MASK) |
1567 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1568 pci_conf_write(pa->pa_pc, pa->pa_tag,
1569 sc->sc_pcixe_capoff + PCIX_CMD,
1570 pcix_cmd);
1571 }
1572 }
1573 }
1574 /*
1575 * The quad port adapter is special; it has a PCIX-PCIX
1576 * bridge on the board, and can run the secondary bus at
1577 * a higher speed.
1578 */
1579 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1580 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1581 : 66;
1582 } else if (sc->sc_flags & WM_F_PCIX) {
1583 switch (reg & STATUS_PCIXSPD_MASK) {
1584 case STATUS_PCIXSPD_50_66:
1585 sc->sc_bus_speed = 66;
1586 break;
1587 case STATUS_PCIXSPD_66_100:
1588 sc->sc_bus_speed = 100;
1589 break;
1590 case STATUS_PCIXSPD_100_133:
1591 sc->sc_bus_speed = 133;
1592 break;
1593 default:
1594 aprint_error_dev(sc->sc_dev,
1595 "unknown PCIXSPD %d; assuming 66MHz\n",
1596 reg & STATUS_PCIXSPD_MASK);
1597 sc->sc_bus_speed = 66;
1598 break;
1599 }
1600 } else
1601 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1602 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1603 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1604 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1605 }
1606
1607 /*
1608 * Allocate the control data structures, and create and load the
1609 * DMA map for it.
1610 *
1611 * NOTE: All Tx descriptors must be in the same 4G segment of
1612 * memory. So must Rx descriptors. We simplify by allocating
1613 * both sets within the same 4G segment.
1614 */
1615 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1616 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1617 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1618 sizeof(struct wm_control_data_82542) :
1619 sizeof(struct wm_control_data_82544);
1620 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1621 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1622 &sc->sc_cd_rseg, 0)) != 0) {
1623 aprint_error_dev(sc->sc_dev,
1624 "unable to allocate control data, error = %d\n",
1625 error);
1626 goto fail_0;
1627 }
1628
1629 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1630 sc->sc_cd_rseg, sc->sc_cd_size,
1631 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1632 aprint_error_dev(sc->sc_dev,
1633 "unable to map control data, error = %d\n", error);
1634 goto fail_1;
1635 }
1636
1637 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1638 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1639 aprint_error_dev(sc->sc_dev,
1640 "unable to create control data DMA map, error = %d\n",
1641 error);
1642 goto fail_2;
1643 }
1644
1645 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1646 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1647 aprint_error_dev(sc->sc_dev,
1648 "unable to load control data DMA map, error = %d\n",
1649 error);
1650 goto fail_3;
1651 }
1652
1653 /* Create the transmit buffer DMA maps. */
1654 WM_TXQUEUELEN(sc) =
1655 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1656 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1657 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1658 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1659 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1660 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1661 aprint_error_dev(sc->sc_dev,
1662 "unable to create Tx DMA map %d, error = %d\n",
1663 i, error);
1664 goto fail_4;
1665 }
1666 }
1667
1668 /* Create the receive buffer DMA maps. */
1669 for (i = 0; i < WM_NRXDESC; i++) {
1670 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1671 MCLBYTES, 0, 0,
1672 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1673 aprint_error_dev(sc->sc_dev,
1674 "unable to create Rx DMA map %d error = %d\n",
1675 i, error);
1676 goto fail_5;
1677 }
1678 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1679 }
1680
1681 /* clear interesting stat counters */
1682 CSR_READ(sc, WMREG_COLC);
1683 CSR_READ(sc, WMREG_RXERRC);
1684
1685 /* get PHY control from SMBus to PCIe */
1686 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1687 || (sc->sc_type == WM_T_PCH_LPT))
1688 wm_smbustopci(sc);
1689
1690 /* Reset the chip to a known state. */
1691 wm_reset(sc);
1692
1693 /* Get some information about the EEPROM. */
1694 switch (sc->sc_type) {
1695 case WM_T_82542_2_0:
1696 case WM_T_82542_2_1:
1697 case WM_T_82543:
1698 case WM_T_82544:
1699 /* Microwire */
1700 sc->sc_nvm_wordsize = 64;
1701 sc->sc_nvm_addrbits = 6;
1702 break;
1703 case WM_T_82540:
1704 case WM_T_82545:
1705 case WM_T_82545_3:
1706 case WM_T_82546:
1707 case WM_T_82546_3:
1708 /* Microwire */
1709 reg = CSR_READ(sc, WMREG_EECD);
1710 if (reg & EECD_EE_SIZE) {
1711 sc->sc_nvm_wordsize = 256;
1712 sc->sc_nvm_addrbits = 8;
1713 } else {
1714 sc->sc_nvm_wordsize = 64;
1715 sc->sc_nvm_addrbits = 6;
1716 }
1717 sc->sc_flags |= WM_F_LOCK_EECD;
1718 break;
1719 case WM_T_82541:
1720 case WM_T_82541_2:
1721 case WM_T_82547:
1722 case WM_T_82547_2:
1723 reg = CSR_READ(sc, WMREG_EECD);
1724 if (reg & EECD_EE_TYPE) {
1725 /* SPI */
1726 sc->sc_flags |= WM_F_EEPROM_SPI;
1727 wm_nvm_set_addrbits_size_eecd(sc);
1728 } else {
1729 /* Microwire */
1730 if ((reg & EECD_EE_ABITS) != 0) {
1731 sc->sc_nvm_wordsize = 256;
1732 sc->sc_nvm_addrbits = 8;
1733 } else {
1734 sc->sc_nvm_wordsize = 64;
1735 sc->sc_nvm_addrbits = 6;
1736 }
1737 }
1738 sc->sc_flags |= WM_F_LOCK_EECD;
1739 break;
1740 case WM_T_82571:
1741 case WM_T_82572:
1742 /* SPI */
1743 sc->sc_flags |= WM_F_EEPROM_SPI;
1744 wm_nvm_set_addrbits_size_eecd(sc);
1745 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1746 break;
1747 case WM_T_82573:
1748 sc->sc_flags |= WM_F_LOCK_SWSM;
1749 /* FALLTHROUGH */
1750 case WM_T_82574:
1751 case WM_T_82583:
1752 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1753 sc->sc_flags |= WM_F_EEPROM_FLASH;
1754 sc->sc_nvm_wordsize = 2048;
1755 } else {
1756 /* SPI */
1757 sc->sc_flags |= WM_F_EEPROM_SPI;
1758 wm_nvm_set_addrbits_size_eecd(sc);
1759 }
1760 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1761 break;
1762 case WM_T_82575:
1763 case WM_T_82576:
1764 case WM_T_82580:
1765 case WM_T_I350:
1766 case WM_T_I354:
1767 case WM_T_80003:
1768 /* SPI */
1769 sc->sc_flags |= WM_F_EEPROM_SPI;
1770 wm_nvm_set_addrbits_size_eecd(sc);
1771 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1772 | WM_F_LOCK_SWSM;
1773 break;
1774 case WM_T_ICH8:
1775 case WM_T_ICH9:
1776 case WM_T_ICH10:
1777 case WM_T_PCH:
1778 case WM_T_PCH2:
1779 case WM_T_PCH_LPT:
1780 /* FLASH */
1781 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1782 sc->sc_nvm_wordsize = 2048;
1783 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1784 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1785 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1786 aprint_error_dev(sc->sc_dev,
1787 "can't map FLASH registers\n");
1788 goto fail_5;
1789 }
1790 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1791 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1792 ICH_FLASH_SECTOR_SIZE;
1793 sc->sc_ich8_flash_bank_size =
1794 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1795 sc->sc_ich8_flash_bank_size -=
1796 (reg & ICH_GFPREG_BASE_MASK);
1797 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1798 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1799 break;
1800 case WM_T_I210:
1801 case WM_T_I211:
1802 wm_nvm_set_addrbits_size_eecd(sc);
1803 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1804 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1805 break;
1806 default:
1807 break;
1808 }
1809
1810 /* Ensure the SMBI bit is clear before first NVM or PHY access */
1811 switch (sc->sc_type) {
1812 case WM_T_82571:
1813 case WM_T_82572:
1814 reg = CSR_READ(sc, WMREG_SWSM2);
1815 if ((reg & SWSM2_LOCK) != 0) {
1816 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
1817 force_clear_smbi = true;
1818 } else
1819 force_clear_smbi = false;
1820 break;
1821 case WM_T_82573:
1822 case WM_T_82574:
1823 case WM_T_82583:
1824 force_clear_smbi = true;
1825 break;
1826 default:
1827 force_clear_smbi = false;
1828 break;
1829 }
1830 if (force_clear_smbi) {
1831 reg = CSR_READ(sc, WMREG_SWSM);
1832 if ((reg & SWSM_SMBI) != 0)
1833 aprint_error_dev(sc->sc_dev,
1834 "Please update the Bootagent\n");
1835 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
1836 }
1837
1838 /*
1839 * Defer printing the EEPROM type until after verifying the checksum
1840 * This allows the EEPROM type to be printed correctly in the case
1841 * that no EEPROM is attached.
1842 */
1843 /*
1844 * Validate the EEPROM checksum. If the checksum fails, flag
1845 * this for later, so we can fail future reads from the EEPROM.
1846 */
1847 if (wm_nvm_validate_checksum(sc)) {
1848 /*
1849 * Read twice again because some PCI-e parts fail the
1850 * first check due to the link being in sleep state.
1851 */
1852 if (wm_nvm_validate_checksum(sc))
1853 sc->sc_flags |= WM_F_EEPROM_INVALID;
1854 }
1855
1856 /* Set device properties (macflags) */
1857 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1858
1859 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1860 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1861 else {
1862 aprint_verbose_dev(sc->sc_dev, "%u words ",
1863 sc->sc_nvm_wordsize);
1864 if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
1865 aprint_verbose("FLASH(HW)\n");
1866 } else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1867 aprint_verbose("FLASH\n");
1868 } else {
1869 if (sc->sc_flags & WM_F_EEPROM_SPI)
1870 eetype = "SPI";
1871 else
1872 eetype = "MicroWire";
1873 aprint_verbose("(%d address bits) %s EEPROM\n",
1874 sc->sc_nvm_addrbits, eetype);
1875 }
1876 }
1877
1878 switch (sc->sc_type) {
1879 case WM_T_82571:
1880 case WM_T_82572:
1881 case WM_T_82573:
1882 case WM_T_82574:
1883 case WM_T_82583:
1884 case WM_T_80003:
1885 case WM_T_ICH8:
1886 case WM_T_ICH9:
1887 case WM_T_ICH10:
1888 case WM_T_PCH:
1889 case WM_T_PCH2:
1890 case WM_T_PCH_LPT:
1891 if (wm_check_mng_mode(sc) != 0)
1892 wm_get_hw_control(sc);
1893 break;
1894 default:
1895 break;
1896 }
1897 wm_get_wakeup(sc);
1898 /*
1899 * Read the Ethernet address from the EEPROM, if not first found
1900 * in device properties.
1901 */
1902 ea = prop_dictionary_get(dict, "mac-address");
1903 if (ea != NULL) {
1904 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1905 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1906 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1907 } else {
1908 if (wm_read_mac_addr(sc, enaddr) != 0) {
1909 aprint_error_dev(sc->sc_dev,
1910 "unable to read Ethernet address\n");
1911 goto fail_5;
1912 }
1913 }
1914
1915 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1916 ether_sprintf(enaddr));
1917
1918 /*
1919 * Read the config info from the EEPROM, and set up various
1920 * bits in the control registers based on their contents.
1921 */
1922 pn = prop_dictionary_get(dict, "i82543-cfg1");
1923 if (pn != NULL) {
1924 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1925 cfg1 = (uint16_t) prop_number_integer_value(pn);
1926 } else {
1927 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
1928 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1929 goto fail_5;
1930 }
1931 }
1932
1933 pn = prop_dictionary_get(dict, "i82543-cfg2");
1934 if (pn != NULL) {
1935 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1936 cfg2 = (uint16_t) prop_number_integer_value(pn);
1937 } else {
1938 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
1939 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1940 goto fail_5;
1941 }
1942 }
1943
1944 /* check for WM_F_WOL */
1945 switch (sc->sc_type) {
1946 case WM_T_82542_2_0:
1947 case WM_T_82542_2_1:
1948 case WM_T_82543:
1949 /* dummy? */
1950 eeprom_data = 0;
1951 apme_mask = NVM_CFG3_APME;
1952 break;
1953 case WM_T_82544:
1954 apme_mask = NVM_CFG2_82544_APM_EN;
1955 eeprom_data = cfg2;
1956 break;
1957 case WM_T_82546:
1958 case WM_T_82546_3:
1959 case WM_T_82571:
1960 case WM_T_82572:
1961 case WM_T_82573:
1962 case WM_T_82574:
1963 case WM_T_82583:
1964 case WM_T_80003:
1965 default:
1966 apme_mask = NVM_CFG3_APME;
1967 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
1968 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
1969 break;
1970 case WM_T_82575:
1971 case WM_T_82576:
1972 case WM_T_82580:
1973 case WM_T_I350:
1974 case WM_T_I354: /* XXX ok? */
1975 case WM_T_ICH8:
1976 case WM_T_ICH9:
1977 case WM_T_ICH10:
1978 case WM_T_PCH:
1979 case WM_T_PCH2:
1980 case WM_T_PCH_LPT:
1981 /* XXX The funcid should be checked on some devices */
1982 apme_mask = WUC_APME;
1983 eeprom_data = CSR_READ(sc, WMREG_WUC);
1984 break;
1985 }
1986
1987 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1988 if ((eeprom_data & apme_mask) != 0)
1989 sc->sc_flags |= WM_F_WOL;
1990 #ifdef WM_DEBUG
1991 if ((sc->sc_flags & WM_F_WOL) != 0)
1992 printf("WOL\n");
1993 #endif
1994
1995 /*
1996 * XXX need special handling for some multiple port cards
1997 * to disable a paticular port.
1998 */
1999
2000 if (sc->sc_type >= WM_T_82544) {
2001 pn = prop_dictionary_get(dict, "i82543-swdpin");
2002 if (pn != NULL) {
2003 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2004 swdpin = (uint16_t) prop_number_integer_value(pn);
2005 } else {
2006 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2007 aprint_error_dev(sc->sc_dev,
2008 "unable to read SWDPIN\n");
2009 goto fail_5;
2010 }
2011 }
2012 }
2013
2014 if (cfg1 & NVM_CFG1_ILOS)
2015 sc->sc_ctrl |= CTRL_ILOS;
2016 if (sc->sc_type >= WM_T_82544) {
2017 sc->sc_ctrl |=
2018 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2019 CTRL_SWDPIO_SHIFT;
2020 sc->sc_ctrl |=
2021 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2022 CTRL_SWDPINS_SHIFT;
2023 } else {
2024 sc->sc_ctrl |=
2025 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2026 CTRL_SWDPIO_SHIFT;
2027 }
2028
2029 #if 0
2030 if (sc->sc_type >= WM_T_82544) {
2031 if (cfg1 & NVM_CFG1_IPS0)
2032 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2033 if (cfg1 & NVM_CFG1_IPS1)
2034 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2035 sc->sc_ctrl_ext |=
2036 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2037 CTRL_EXT_SWDPIO_SHIFT;
2038 sc->sc_ctrl_ext |=
2039 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2040 CTRL_EXT_SWDPINS_SHIFT;
2041 } else {
2042 sc->sc_ctrl_ext |=
2043 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2044 CTRL_EXT_SWDPIO_SHIFT;
2045 }
2046 #endif
2047
2048 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2049 #if 0
2050 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2051 #endif
2052
2053 /*
2054 * Set up some register offsets that are different between
2055 * the i82542 and the i82543 and later chips.
2056 */
2057 if (sc->sc_type < WM_T_82543) {
2058 sc->sc_rdt_reg = WMREG_OLD_RDT0;
2059 sc->sc_tdt_reg = WMREG_OLD_TDT;
2060 } else {
2061 sc->sc_rdt_reg = WMREG_RDT;
2062 sc->sc_tdt_reg = WMREG_TDT;
2063 }
2064
2065 if (sc->sc_type == WM_T_PCH) {
2066 uint16_t val;
2067
2068 /* Save the NVM K1 bit setting */
2069 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2070
2071 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2072 sc->sc_nvm_k1_enabled = 1;
2073 else
2074 sc->sc_nvm_k1_enabled = 0;
2075 }
2076
2077 /*
2078 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2079 * media structures accordingly.
2080 */
2081 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2082 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2083 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2084 || sc->sc_type == WM_T_82573
2085 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2086 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2087 wm_gmii_mediainit(sc, wmp->wmp_product);
2088 } else if (sc->sc_type < WM_T_82543 ||
2089 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2090 if (sc->sc_mediatype & WMP_F_COPPER) {
2091 aprint_error_dev(sc->sc_dev,
2092 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2093 sc->sc_mediatype = WMP_F_FIBER;
2094 }
2095 wm_tbi_mediainit(sc);
2096 } else {
2097 switch (sc->sc_type) {
2098 case WM_T_82575:
2099 case WM_T_82576:
2100 case WM_T_82580:
2101 case WM_T_I350:
2102 case WM_T_I354:
2103 case WM_T_I210:
2104 case WM_T_I211:
2105 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2106 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2107 switch (link_mode) {
2108 case CTRL_EXT_LINK_MODE_1000KX:
2109 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2110 sc->sc_mediatype = WMP_F_SERDES;
2111 break;
2112 case CTRL_EXT_LINK_MODE_SGMII:
2113 if (wm_sgmii_uses_mdio(sc)) {
2114 aprint_verbose_dev(sc->sc_dev,
2115 "SGMII(MDIO)\n");
2116 sc->sc_flags |= WM_F_SGMII;
2117 sc->sc_mediatype = WMP_F_COPPER;
2118 break;
2119 }
2120 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2121 /*FALLTHROUGH*/
2122 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2123 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2124 if (sc->sc_mediatype == WMP_F_UNKNOWN) {
2125 if (link_mode
2126 == CTRL_EXT_LINK_MODE_SGMII) {
2127 sc->sc_mediatype
2128 = WMP_F_COPPER;
2129 sc->sc_flags |= WM_F_SGMII;
2130 } else {
2131 sc->sc_mediatype
2132 = WMP_F_SERDES;
2133 aprint_verbose_dev(sc->sc_dev,
2134 "SERDES\n");
2135 }
2136 break;
2137 }
2138 if (sc->sc_mediatype == WMP_F_SERDES)
2139 aprint_verbose_dev(sc->sc_dev,
2140 "SERDES\n");
2141
2142 /* Change current link mode setting */
2143 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2144 switch (sc->sc_mediatype) {
2145 case WMP_F_COPPER:
2146 reg |= CTRL_EXT_LINK_MODE_SGMII;
2147 break;
2148 case WMP_F_SERDES:
2149 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2150 break;
2151 default:
2152 break;
2153 }
2154 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2155 break;
2156 case CTRL_EXT_LINK_MODE_GMII:
2157 default:
2158 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2159 sc->sc_mediatype = WMP_F_COPPER;
2160 break;
2161 }
2162
2163 reg &= ~CTRL_EXT_I2C_ENA;
2164 if ((sc->sc_flags & WM_F_SGMII) != 0)
2165 reg |= CTRL_EXT_I2C_ENA;
2166 else
2167 reg &= ~CTRL_EXT_I2C_ENA;
2168 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2169
2170 if (sc->sc_mediatype == WMP_F_COPPER)
2171 wm_gmii_mediainit(sc, wmp->wmp_product);
2172 else
2173 wm_tbi_mediainit(sc);
2174 break;
2175 default:
2176 if (sc->sc_mediatype & WMP_F_FIBER)
2177 aprint_error_dev(sc->sc_dev,
2178 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2179 sc->sc_mediatype = WMP_F_COPPER;
2180 wm_gmii_mediainit(sc, wmp->wmp_product);
2181 }
2182 }
2183
2184 ifp = &sc->sc_ethercom.ec_if;
2185 xname = device_xname(sc->sc_dev);
2186 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2187 ifp->if_softc = sc;
2188 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2189 ifp->if_ioctl = wm_ioctl;
2190 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2191 ifp->if_start = wm_nq_start;
2192 else
2193 ifp->if_start = wm_start;
2194 ifp->if_watchdog = wm_watchdog;
2195 ifp->if_init = wm_init;
2196 ifp->if_stop = wm_stop;
2197 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2198 IFQ_SET_READY(&ifp->if_snd);
2199
2200 /* Check for jumbo frame */
2201 switch (sc->sc_type) {
2202 case WM_T_82573:
2203 /* XXX limited to 9234 if ASPM is disabled */
2204 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &io3);
2205 if ((io3 & NVM_3GIO_3_ASPM_MASK) != 0)
2206 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2207 break;
2208 case WM_T_82571:
2209 case WM_T_82572:
2210 case WM_T_82574:
2211 case WM_T_82575:
2212 case WM_T_82576:
2213 case WM_T_82580:
2214 case WM_T_I350:
2215 case WM_T_I354: /* XXXX ok? */
2216 case WM_T_I210:
2217 case WM_T_I211:
2218 case WM_T_80003:
2219 case WM_T_ICH9:
2220 case WM_T_ICH10:
2221 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2222 case WM_T_PCH_LPT:
2223 /* XXX limited to 9234 */
2224 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2225 break;
2226 case WM_T_PCH:
2227 /* XXX limited to 4096 */
2228 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2229 break;
2230 case WM_T_82542_2_0:
2231 case WM_T_82542_2_1:
2232 case WM_T_82583:
2233 case WM_T_ICH8:
2234 /* No support for jumbo frame */
2235 break;
2236 default:
2237 /* ETHER_MAX_LEN_JUMBO */
2238 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2239 break;
2240 }
2241
2242 /* If we're a i82543 or greater, we can support VLANs. */
2243 if (sc->sc_type >= WM_T_82543)
2244 sc->sc_ethercom.ec_capabilities |=
2245 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2246
2247 /*
2248 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2249 * on i82543 and later.
2250 */
2251 if (sc->sc_type >= WM_T_82543) {
2252 ifp->if_capabilities |=
2253 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2254 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2255 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2256 IFCAP_CSUM_TCPv6_Tx |
2257 IFCAP_CSUM_UDPv6_Tx;
2258 }
2259
2260 /*
2261 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2262 *
2263 * 82541GI (8086:1076) ... no
2264 * 82572EI (8086:10b9) ... yes
2265 */
2266 if (sc->sc_type >= WM_T_82571) {
2267 ifp->if_capabilities |=
2268 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2269 }
2270
2271 /*
2272 * If we're a i82544 or greater (except i82547), we can do
2273 * TCP segmentation offload.
2274 */
2275 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2276 ifp->if_capabilities |= IFCAP_TSOv4;
2277 }
2278
2279 if (sc->sc_type >= WM_T_82571) {
2280 ifp->if_capabilities |= IFCAP_TSOv6;
2281 }
2282
2283 #ifdef WM_MPSAFE
2284 sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2285 sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2286 #else
2287 sc->sc_tx_lock = NULL;
2288 sc->sc_rx_lock = NULL;
2289 #endif
2290
2291 /* Attach the interface. */
2292 if_attach(ifp);
2293 ether_ifattach(ifp, enaddr);
2294 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2295 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2296 RND_FLAG_DEFAULT);
2297
2298 #ifdef WM_EVENT_COUNTERS
2299 /* Attach event counters. */
2300 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2301 NULL, xname, "txsstall");
2302 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2303 NULL, xname, "txdstall");
2304 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2305 NULL, xname, "txfifo_stall");
2306 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2307 NULL, xname, "txdw");
2308 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2309 NULL, xname, "txqe");
2310 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2311 NULL, xname, "rxintr");
2312 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2313 NULL, xname, "linkintr");
2314
2315 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2316 NULL, xname, "rxipsum");
2317 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2318 NULL, xname, "rxtusum");
2319 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2320 NULL, xname, "txipsum");
2321 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2322 NULL, xname, "txtusum");
2323 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2324 NULL, xname, "txtusum6");
2325
2326 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2327 NULL, xname, "txtso");
2328 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2329 NULL, xname, "txtso6");
2330 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2331 NULL, xname, "txtsopain");
2332
2333 for (i = 0; i < WM_NTXSEGS; i++) {
2334 snprintf(wm_txseg_evcnt_names[i],
2335 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2336 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2337 NULL, xname, wm_txseg_evcnt_names[i]);
2338 }
2339
2340 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2341 NULL, xname, "txdrop");
2342
2343 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2344 NULL, xname, "tu");
2345
2346 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2347 NULL, xname, "tx_xoff");
2348 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2349 NULL, xname, "tx_xon");
2350 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2351 NULL, xname, "rx_xoff");
2352 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2353 NULL, xname, "rx_xon");
2354 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2355 NULL, xname, "rx_macctl");
2356 #endif /* WM_EVENT_COUNTERS */
2357
2358 if (pmf_device_register(self, wm_suspend, wm_resume))
2359 pmf_class_network_register(self, ifp);
2360 else
2361 aprint_error_dev(self, "couldn't establish power handler\n");
2362
2363 sc->sc_flags |= WM_F_ATTACHED;
2364 return;
2365
2366 /*
2367 * Free any resources we've allocated during the failed attach
2368 * attempt. Do this in reverse order and fall through.
2369 */
2370 fail_5:
2371 for (i = 0; i < WM_NRXDESC; i++) {
2372 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2373 bus_dmamap_destroy(sc->sc_dmat,
2374 sc->sc_rxsoft[i].rxs_dmamap);
2375 }
2376 fail_4:
2377 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2378 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2379 bus_dmamap_destroy(sc->sc_dmat,
2380 sc->sc_txsoft[i].txs_dmamap);
2381 }
2382 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2383 fail_3:
2384 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2385 fail_2:
2386 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2387 sc->sc_cd_size);
2388 fail_1:
2389 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2390 fail_0:
2391 return;
2392 }
2393
2394 /* The detach function (ca_detach) */
2395 static int
2396 wm_detach(device_t self, int flags __unused)
2397 {
2398 struct wm_softc *sc = device_private(self);
2399 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2400 int i;
2401 #ifndef WM_MPSAFE
2402 int s;
2403 #endif
2404
2405 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2406 return 0;
2407
2408 #ifndef WM_MPSAFE
2409 s = splnet();
2410 #endif
2411 /* Stop the interface. Callouts are stopped in it. */
2412 wm_stop(ifp, 1);
2413
2414 #ifndef WM_MPSAFE
2415 splx(s);
2416 #endif
2417
2418 pmf_device_deregister(self);
2419
2420 /* Tell the firmware about the release */
2421 WM_BOTH_LOCK(sc);
2422 wm_release_manageability(sc);
2423 wm_release_hw_control(sc);
2424 WM_BOTH_UNLOCK(sc);
2425
2426 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2427
2428 /* Delete all remaining media. */
2429 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2430
2431 ether_ifdetach(ifp);
2432 if_detach(ifp);
2433
2434
2435 /* Unload RX dmamaps and free mbufs */
2436 WM_RX_LOCK(sc);
2437 wm_rxdrain(sc);
2438 WM_RX_UNLOCK(sc);
2439 /* Must unlock here */
2440
2441 /* Free dmamap. It's the same as the end of the wm_attach() function */
2442 for (i = 0; i < WM_NRXDESC; i++) {
2443 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2444 bus_dmamap_destroy(sc->sc_dmat,
2445 sc->sc_rxsoft[i].rxs_dmamap);
2446 }
2447 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2448 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2449 bus_dmamap_destroy(sc->sc_dmat,
2450 sc->sc_txsoft[i].txs_dmamap);
2451 }
2452 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2453 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2454 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2455 sc->sc_cd_size);
2456 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2457
2458 /* Disestablish the interrupt handler */
2459 if (sc->sc_ih != NULL) {
2460 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2461 sc->sc_ih = NULL;
2462 }
2463
2464 /* Unmap the registers */
2465 if (sc->sc_ss) {
2466 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2467 sc->sc_ss = 0;
2468 }
2469
2470 if (sc->sc_ios) {
2471 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2472 sc->sc_ios = 0;
2473 }
2474
2475 if (sc->sc_tx_lock)
2476 mutex_obj_free(sc->sc_tx_lock);
2477 if (sc->sc_rx_lock)
2478 mutex_obj_free(sc->sc_rx_lock);
2479
2480 return 0;
2481 }
2482
2483 static bool
2484 wm_suspend(device_t self, const pmf_qual_t *qual)
2485 {
2486 struct wm_softc *sc = device_private(self);
2487
2488 wm_release_manageability(sc);
2489 wm_release_hw_control(sc);
2490 #ifdef WM_WOL
2491 wm_enable_wakeup(sc);
2492 #endif
2493
2494 return true;
2495 }
2496
2497 static bool
2498 wm_resume(device_t self, const pmf_qual_t *qual)
2499 {
2500 struct wm_softc *sc = device_private(self);
2501
2502 wm_init_manageability(sc);
2503
2504 return true;
2505 }
2506
2507 /*
2508 * wm_watchdog: [ifnet interface function]
2509 *
2510 * Watchdog timer handler.
2511 */
2512 static void
2513 wm_watchdog(struct ifnet *ifp)
2514 {
2515 struct wm_softc *sc = ifp->if_softc;
2516
2517 /*
2518 * Since we're using delayed interrupts, sweep up
2519 * before we report an error.
2520 */
2521 WM_TX_LOCK(sc);
2522 wm_txintr(sc);
2523 WM_TX_UNLOCK(sc);
2524
2525 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2526 #ifdef WM_DEBUG
2527 int i, j;
2528 struct wm_txsoft *txs;
2529 #endif
2530 log(LOG_ERR,
2531 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2532 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2533 sc->sc_txnext);
2534 ifp->if_oerrors++;
2535 #ifdef WM_DEBUG
2536 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
2537 i = WM_NEXTTXS(sc, i)) {
2538 txs = &sc->sc_txsoft[i];
2539 printf("txs %d tx %d -> %d\n",
2540 i, txs->txs_firstdesc, txs->txs_lastdesc);
2541 for (j = txs->txs_firstdesc; ;
2542 j = WM_NEXTTX(sc, j)) {
2543 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2544 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
2545 printf("\t %#08x%08x\n",
2546 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
2547 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
2548 if (j == txs->txs_lastdesc)
2549 break;
2550 }
2551 }
2552 #endif
2553 /* Reset the interface. */
2554 (void) wm_init(ifp);
2555 }
2556
2557 /* Try to get more packets going. */
2558 ifp->if_start(ifp);
2559 }
2560
2561 /*
2562 * wm_tick:
2563 *
2564 * One second timer, used to check link status, sweep up
2565 * completed transmit jobs, etc.
2566 */
2567 static void
2568 wm_tick(void *arg)
2569 {
2570 struct wm_softc *sc = arg;
2571 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2572 #ifndef WM_MPSAFE
2573 int s;
2574
2575 s = splnet();
2576 #endif
2577
2578 WM_TX_LOCK(sc);
2579
2580 if (sc->sc_stopping)
2581 goto out;
2582
2583 if (sc->sc_type >= WM_T_82542_2_1) {
2584 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2585 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2586 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2587 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2588 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2589 }
2590
2591 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2592 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2593 + CSR_READ(sc, WMREG_CRCERRS)
2594 + CSR_READ(sc, WMREG_ALGNERRC)
2595 + CSR_READ(sc, WMREG_SYMERRC)
2596 + CSR_READ(sc, WMREG_RXERRC)
2597 + CSR_READ(sc, WMREG_SEC)
2598 + CSR_READ(sc, WMREG_CEXTERR)
2599 + CSR_READ(sc, WMREG_RLEC);
2600 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2601
2602 if (sc->sc_flags & WM_F_HAS_MII)
2603 mii_tick(&sc->sc_mii);
2604 else
2605 wm_tbi_check_link(sc);
2606
2607 out:
2608 WM_TX_UNLOCK(sc);
2609 #ifndef WM_MPSAFE
2610 splx(s);
2611 #endif
2612
2613 if (!sc->sc_stopping)
2614 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2615 }
2616
2617 static int
2618 wm_ifflags_cb(struct ethercom *ec)
2619 {
2620 struct ifnet *ifp = &ec->ec_if;
2621 struct wm_softc *sc = ifp->if_softc;
2622 int change = ifp->if_flags ^ sc->sc_if_flags;
2623 int rc = 0;
2624
2625 WM_BOTH_LOCK(sc);
2626
2627 if (change != 0)
2628 sc->sc_if_flags = ifp->if_flags;
2629
2630 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
2631 rc = ENETRESET;
2632 goto out;
2633 }
2634
2635 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2636 wm_set_filter(sc);
2637
2638 wm_set_vlan(sc);
2639
2640 out:
2641 WM_BOTH_UNLOCK(sc);
2642
2643 return rc;
2644 }
2645
2646 /*
2647 * wm_ioctl: [ifnet interface function]
2648 *
2649 * Handle control requests from the operator.
2650 */
2651 static int
2652 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2653 {
2654 struct wm_softc *sc = ifp->if_softc;
2655 struct ifreq *ifr = (struct ifreq *) data;
2656 struct ifaddr *ifa = (struct ifaddr *)data;
2657 struct sockaddr_dl *sdl;
2658 int s, error;
2659
2660 #ifndef WM_MPSAFE
2661 s = splnet();
2662 #endif
2663 switch (cmd) {
2664 case SIOCSIFMEDIA:
2665 case SIOCGIFMEDIA:
2666 WM_BOTH_LOCK(sc);
2667 /* Flow control requires full-duplex mode. */
2668 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2669 (ifr->ifr_media & IFM_FDX) == 0)
2670 ifr->ifr_media &= ~IFM_ETH_FMASK;
2671 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2672 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2673 /* We can do both TXPAUSE and RXPAUSE. */
2674 ifr->ifr_media |=
2675 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2676 }
2677 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2678 }
2679 WM_BOTH_UNLOCK(sc);
2680 #ifdef WM_MPSAFE
2681 s = splnet();
2682 #endif
2683 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2684 #ifdef WM_MPSAFE
2685 splx(s);
2686 #endif
2687 break;
2688 case SIOCINITIFADDR:
2689 WM_BOTH_LOCK(sc);
2690 if (ifa->ifa_addr->sa_family == AF_LINK) {
2691 sdl = satosdl(ifp->if_dl->ifa_addr);
2692 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2693 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2694 /* unicast address is first multicast entry */
2695 wm_set_filter(sc);
2696 error = 0;
2697 WM_BOTH_UNLOCK(sc);
2698 break;
2699 }
2700 WM_BOTH_UNLOCK(sc);
2701 /*FALLTHROUGH*/
2702 default:
2703 #ifdef WM_MPSAFE
2704 s = splnet();
2705 #endif
2706 /* It may call wm_start, so unlock here */
2707 error = ether_ioctl(ifp, cmd, data);
2708 #ifdef WM_MPSAFE
2709 splx(s);
2710 #endif
2711 if (error != ENETRESET)
2712 break;
2713
2714 error = 0;
2715
2716 if (cmd == SIOCSIFCAP) {
2717 error = (*ifp->if_init)(ifp);
2718 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2719 ;
2720 else if (ifp->if_flags & IFF_RUNNING) {
2721 /*
2722 * Multicast list has changed; set the hardware filter
2723 * accordingly.
2724 */
2725 WM_BOTH_LOCK(sc);
2726 wm_set_filter(sc);
2727 WM_BOTH_UNLOCK(sc);
2728 }
2729 break;
2730 }
2731
2732 /* Try to get more packets going. */
2733 ifp->if_start(ifp);
2734
2735 #ifndef WM_MPSAFE
2736 splx(s);
2737 #endif
2738 return error;
2739 }
2740
2741 /* MAC address related */
2742
2743 static int
2744 wm_check_alt_mac_addr(struct wm_softc *sc)
2745 {
2746 uint16_t myea[ETHER_ADDR_LEN / 2];
2747 uint16_t offset = NVM_OFF_MACADDR;
2748
2749 /* Try to read alternative MAC address pointer */
2750 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2751 return -1;
2752
2753 /* Check pointer */
2754 if (offset == 0xffff)
2755 return -1;
2756
2757 /*
2758 * Check whether alternative MAC address is valid or not.
2759 * Some cards have non 0xffff pointer but those don't use
2760 * alternative MAC address in reality.
2761 *
2762 * Check whether the broadcast bit is set or not.
2763 */
2764 if (wm_nvm_read(sc, offset, 1, myea) == 0)
2765 if (((myea[0] & 0xff) & 0x01) == 0)
2766 return 0; /* found! */
2767
2768 /* not found */
2769 return -1;
2770 }
2771
2772 static int
2773 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2774 {
2775 uint16_t myea[ETHER_ADDR_LEN / 2];
2776 uint16_t offset = NVM_OFF_MACADDR;
2777 int do_invert = 0;
2778
2779 switch (sc->sc_type) {
2780 case WM_T_82580:
2781 case WM_T_I350:
2782 case WM_T_I354:
2783 switch (sc->sc_funcid) {
2784 case 0:
2785 /* default value (== NVM_OFF_MACADDR) */
2786 break;
2787 case 1:
2788 offset = NVM_OFF_LAN1;
2789 break;
2790 case 2:
2791 offset = NVM_OFF_LAN2;
2792 break;
2793 case 3:
2794 offset = NVM_OFF_LAN3;
2795 break;
2796 default:
2797 goto bad;
2798 /* NOTREACHED */
2799 break;
2800 }
2801 break;
2802 case WM_T_82571:
2803 case WM_T_82575:
2804 case WM_T_82576:
2805 case WM_T_80003:
2806 case WM_T_I210:
2807 case WM_T_I211:
2808 if (wm_check_alt_mac_addr(sc) != 0) {
2809 /* reset the offset to LAN0 */
2810 offset = NVM_OFF_MACADDR;
2811 if ((sc->sc_funcid & 0x01) == 1)
2812 do_invert = 1;
2813 goto do_read;
2814 }
2815 switch (sc->sc_funcid) {
2816 case 0:
2817 /*
2818 * The offset is the value in NVM_OFF_ALT_MAC_ADDR_PTR
2819 * itself.
2820 */
2821 break;
2822 case 1:
2823 offset += NVM_OFF_MACADDR_LAN1;
2824 break;
2825 case 2:
2826 offset += NVM_OFF_MACADDR_LAN2;
2827 break;
2828 case 3:
2829 offset += NVM_OFF_MACADDR_LAN3;
2830 break;
2831 default:
2832 goto bad;
2833 /* NOTREACHED */
2834 break;
2835 }
2836 break;
2837 default:
2838 if ((sc->sc_funcid & 0x01) == 1)
2839 do_invert = 1;
2840 break;
2841 }
2842
2843 do_read:
2844 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
2845 myea) != 0) {
2846 goto bad;
2847 }
2848
2849 enaddr[0] = myea[0] & 0xff;
2850 enaddr[1] = myea[0] >> 8;
2851 enaddr[2] = myea[1] & 0xff;
2852 enaddr[3] = myea[1] >> 8;
2853 enaddr[4] = myea[2] & 0xff;
2854 enaddr[5] = myea[2] >> 8;
2855
2856 /*
2857 * Toggle the LSB of the MAC address on the second port
2858 * of some dual port cards.
2859 */
2860 if (do_invert != 0)
2861 enaddr[5] ^= 1;
2862
2863 return 0;
2864
2865 bad:
2866 return -1;
2867 }
2868
2869 /*
2870 * wm_set_ral:
2871 *
2872 * Set an entery in the receive address list.
2873 */
2874 static void
2875 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2876 {
2877 uint32_t ral_lo, ral_hi;
2878
2879 if (enaddr != NULL) {
2880 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2881 (enaddr[3] << 24);
2882 ral_hi = enaddr[4] | (enaddr[5] << 8);
2883 ral_hi |= RAL_AV;
2884 } else {
2885 ral_lo = 0;
2886 ral_hi = 0;
2887 }
2888
2889 if (sc->sc_type >= WM_T_82544) {
2890 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2891 ral_lo);
2892 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2893 ral_hi);
2894 } else {
2895 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2896 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2897 }
2898 }
2899
2900 /*
2901 * wm_mchash:
2902 *
2903 * Compute the hash of the multicast address for the 4096-bit
2904 * multicast filter.
2905 */
2906 static uint32_t
2907 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2908 {
2909 static const int lo_shift[4] = { 4, 3, 2, 0 };
2910 static const int hi_shift[4] = { 4, 5, 6, 8 };
2911 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
2912 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
2913 uint32_t hash;
2914
2915 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2916 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2917 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
2918 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
2919 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
2920 return (hash & 0x3ff);
2921 }
2922 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2923 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2924
2925 return (hash & 0xfff);
2926 }
2927
2928 /*
2929 * wm_set_filter:
2930 *
2931 * Set up the receive filter.
2932 */
2933 static void
2934 wm_set_filter(struct wm_softc *sc)
2935 {
2936 struct ethercom *ec = &sc->sc_ethercom;
2937 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2938 struct ether_multi *enm;
2939 struct ether_multistep step;
2940 bus_addr_t mta_reg;
2941 uint32_t hash, reg, bit;
2942 int i, size;
2943
2944 if (sc->sc_type >= WM_T_82544)
2945 mta_reg = WMREG_CORDOVA_MTA;
2946 else
2947 mta_reg = WMREG_MTA;
2948
2949 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2950
2951 if (ifp->if_flags & IFF_BROADCAST)
2952 sc->sc_rctl |= RCTL_BAM;
2953 if (ifp->if_flags & IFF_PROMISC) {
2954 sc->sc_rctl |= RCTL_UPE;
2955 goto allmulti;
2956 }
2957
2958 /*
2959 * Set the station address in the first RAL slot, and
2960 * clear the remaining slots.
2961 */
2962 if (sc->sc_type == WM_T_ICH8)
2963 size = WM_RAL_TABSIZE_ICH8 -1;
2964 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
2965 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
2966 || (sc->sc_type == WM_T_PCH_LPT))
2967 size = WM_RAL_TABSIZE_ICH8;
2968 else if (sc->sc_type == WM_T_82575)
2969 size = WM_RAL_TABSIZE_82575;
2970 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
2971 size = WM_RAL_TABSIZE_82576;
2972 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2973 size = WM_RAL_TABSIZE_I350;
2974 else
2975 size = WM_RAL_TABSIZE;
2976 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
2977 for (i = 1; i < size; i++)
2978 wm_set_ral(sc, NULL, i);
2979
2980 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2981 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2982 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
2983 size = WM_ICH8_MC_TABSIZE;
2984 else
2985 size = WM_MC_TABSIZE;
2986 /* Clear out the multicast table. */
2987 for (i = 0; i < size; i++)
2988 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2989
2990 ETHER_FIRST_MULTI(step, ec, enm);
2991 while (enm != NULL) {
2992 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2993 /*
2994 * We must listen to a range of multicast addresses.
2995 * For now, just accept all multicasts, rather than
2996 * trying to set only those filter bits needed to match
2997 * the range. (At this time, the only use of address
2998 * ranges is for IP multicast routing, for which the
2999 * range is big enough to require all bits set.)
3000 */
3001 goto allmulti;
3002 }
3003
3004 hash = wm_mchash(sc, enm->enm_addrlo);
3005
3006 reg = (hash >> 5);
3007 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3008 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3009 || (sc->sc_type == WM_T_PCH2)
3010 || (sc->sc_type == WM_T_PCH_LPT))
3011 reg &= 0x1f;
3012 else
3013 reg &= 0x7f;
3014 bit = hash & 0x1f;
3015
3016 hash = CSR_READ(sc, mta_reg + (reg << 2));
3017 hash |= 1U << bit;
3018
3019 /* XXX Hardware bug?? */
3020 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3021 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3022 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3023 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3024 } else
3025 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3026
3027 ETHER_NEXT_MULTI(step, enm);
3028 }
3029
3030 ifp->if_flags &= ~IFF_ALLMULTI;
3031 goto setit;
3032
3033 allmulti:
3034 ifp->if_flags |= IFF_ALLMULTI;
3035 sc->sc_rctl |= RCTL_MPE;
3036
3037 setit:
3038 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3039 }
3040
3041 /* Reset and init related */
3042
3043 static void
3044 wm_set_vlan(struct wm_softc *sc)
3045 {
3046 /* Deal with VLAN enables. */
3047 if (VLAN_ATTACHED(&sc->sc_ethercom))
3048 sc->sc_ctrl |= CTRL_VME;
3049 else
3050 sc->sc_ctrl &= ~CTRL_VME;
3051
3052 /* Write the control registers. */
3053 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3054 }
3055
3056 static void
3057 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3058 {
3059 uint32_t gcr;
3060 pcireg_t ctrl2;
3061
3062 gcr = CSR_READ(sc, WMREG_GCR);
3063
3064 /* Only take action if timeout value is defaulted to 0 */
3065 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3066 goto out;
3067
3068 if ((gcr & GCR_CAP_VER2) == 0) {
3069 gcr |= GCR_CMPL_TMOUT_10MS;
3070 goto out;
3071 }
3072
3073 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3074 sc->sc_pcixe_capoff + PCIE_DCSR2);
3075 ctrl2 |= WM_PCIE_DCSR2_16MS;
3076 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3077 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3078
3079 out:
3080 /* Disable completion timeout resend */
3081 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3082
3083 CSR_WRITE(sc, WMREG_GCR, gcr);
3084 }
3085
3086 void
3087 wm_get_auto_rd_done(struct wm_softc *sc)
3088 {
3089 int i;
3090
3091 /* wait for eeprom to reload */
3092 switch (sc->sc_type) {
3093 case WM_T_82571:
3094 case WM_T_82572:
3095 case WM_T_82573:
3096 case WM_T_82574:
3097 case WM_T_82583:
3098 case WM_T_82575:
3099 case WM_T_82576:
3100 case WM_T_82580:
3101 case WM_T_I350:
3102 case WM_T_I354:
3103 case WM_T_I210:
3104 case WM_T_I211:
3105 case WM_T_80003:
3106 case WM_T_ICH8:
3107 case WM_T_ICH9:
3108 for (i = 0; i < 10; i++) {
3109 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3110 break;
3111 delay(1000);
3112 }
3113 if (i == 10) {
3114 log(LOG_ERR, "%s: auto read from eeprom failed to "
3115 "complete\n", device_xname(sc->sc_dev));
3116 }
3117 break;
3118 default:
3119 break;
3120 }
3121 }
3122
3123 void
3124 wm_lan_init_done(struct wm_softc *sc)
3125 {
3126 uint32_t reg = 0;
3127 int i;
3128
3129 /* wait for eeprom to reload */
3130 switch (sc->sc_type) {
3131 case WM_T_ICH10:
3132 case WM_T_PCH:
3133 case WM_T_PCH2:
3134 case WM_T_PCH_LPT:
3135 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3136 reg = CSR_READ(sc, WMREG_STATUS);
3137 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3138 break;
3139 delay(100);
3140 }
3141 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3142 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3143 "complete\n", device_xname(sc->sc_dev), __func__);
3144 }
3145 break;
3146 default:
3147 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3148 __func__);
3149 break;
3150 }
3151
3152 reg &= ~STATUS_LAN_INIT_DONE;
3153 CSR_WRITE(sc, WMREG_STATUS, reg);
3154 }
3155
3156 void
3157 wm_get_cfg_done(struct wm_softc *sc)
3158 {
3159 int mask;
3160 uint32_t reg;
3161 int i;
3162
3163 /* wait for eeprom to reload */
3164 switch (sc->sc_type) {
3165 case WM_T_82542_2_0:
3166 case WM_T_82542_2_1:
3167 /* null */
3168 break;
3169 case WM_T_82543:
3170 case WM_T_82544:
3171 case WM_T_82540:
3172 case WM_T_82545:
3173 case WM_T_82545_3:
3174 case WM_T_82546:
3175 case WM_T_82546_3:
3176 case WM_T_82541:
3177 case WM_T_82541_2:
3178 case WM_T_82547:
3179 case WM_T_82547_2:
3180 case WM_T_82573:
3181 case WM_T_82574:
3182 case WM_T_82583:
3183 /* generic */
3184 delay(10*1000);
3185 break;
3186 case WM_T_80003:
3187 case WM_T_82571:
3188 case WM_T_82572:
3189 case WM_T_82575:
3190 case WM_T_82576:
3191 case WM_T_82580:
3192 case WM_T_I350:
3193 case WM_T_I354:
3194 case WM_T_I210:
3195 case WM_T_I211:
3196 if (sc->sc_type == WM_T_82571) {
3197 /* Only 82571 shares port 0 */
3198 mask = EEMNGCTL_CFGDONE_0;
3199 } else
3200 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3201 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3202 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3203 break;
3204 delay(1000);
3205 }
3206 if (i >= WM_PHY_CFG_TIMEOUT) {
3207 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3208 device_xname(sc->sc_dev), __func__));
3209 }
3210 break;
3211 case WM_T_ICH8:
3212 case WM_T_ICH9:
3213 case WM_T_ICH10:
3214 case WM_T_PCH:
3215 case WM_T_PCH2:
3216 case WM_T_PCH_LPT:
3217 delay(10*1000);
3218 if (sc->sc_type >= WM_T_ICH10)
3219 wm_lan_init_done(sc);
3220 else
3221 wm_get_auto_rd_done(sc);
3222
3223 reg = CSR_READ(sc, WMREG_STATUS);
3224 if ((reg & STATUS_PHYRA) != 0)
3225 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3226 break;
3227 default:
3228 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3229 __func__);
3230 break;
3231 }
3232 }
3233
3234 /*
3235 * wm_reset:
3236 *
3237 * Reset the i82542 chip.
3238 */
3239 static void
3240 wm_reset(struct wm_softc *sc)
3241 {
3242 int phy_reset = 0;
3243 int error = 0;
3244 uint32_t reg, mask;
3245
3246 /*
3247 * Allocate on-chip memory according to the MTU size.
3248 * The Packet Buffer Allocation register must be written
3249 * before the chip is reset.
3250 */
3251 switch (sc->sc_type) {
3252 case WM_T_82547:
3253 case WM_T_82547_2:
3254 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3255 PBA_22K : PBA_30K;
3256 sc->sc_txfifo_head = 0;
3257 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3258 sc->sc_txfifo_size =
3259 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3260 sc->sc_txfifo_stall = 0;
3261 break;
3262 case WM_T_82571:
3263 case WM_T_82572:
3264 case WM_T_82575: /* XXX need special handing for jumbo frames */
3265 case WM_T_I350:
3266 case WM_T_I354:
3267 case WM_T_80003:
3268 sc->sc_pba = PBA_32K;
3269 break;
3270 case WM_T_82580:
3271 sc->sc_pba = PBA_35K;
3272 break;
3273 case WM_T_I210:
3274 case WM_T_I211:
3275 sc->sc_pba = PBA_34K;
3276 break;
3277 case WM_T_82576:
3278 sc->sc_pba = PBA_64K;
3279 break;
3280 case WM_T_82573:
3281 sc->sc_pba = PBA_12K;
3282 break;
3283 case WM_T_82574:
3284 case WM_T_82583:
3285 sc->sc_pba = PBA_20K;
3286 break;
3287 case WM_T_ICH8:
3288 sc->sc_pba = PBA_8K;
3289 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3290 break;
3291 case WM_T_ICH9:
3292 case WM_T_ICH10:
3293 sc->sc_pba = PBA_10K;
3294 break;
3295 case WM_T_PCH:
3296 case WM_T_PCH2:
3297 case WM_T_PCH_LPT:
3298 sc->sc_pba = PBA_26K;
3299 break;
3300 default:
3301 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3302 PBA_40K : PBA_48K;
3303 break;
3304 }
3305 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3306
3307 /* Prevent the PCI-E bus from sticking */
3308 if (sc->sc_flags & WM_F_PCIE) {
3309 int timeout = 800;
3310
3311 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3312 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3313
3314 while (timeout--) {
3315 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3316 == 0)
3317 break;
3318 delay(100);
3319 }
3320 }
3321
3322 /* Set the completion timeout for interface */
3323 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3324 || (sc->sc_type == WM_T_82580)
3325 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3326 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3327 wm_set_pcie_completion_timeout(sc);
3328
3329 /* Clear interrupt */
3330 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3331
3332 /* Stop the transmit and receive processes. */
3333 CSR_WRITE(sc, WMREG_RCTL, 0);
3334 sc->sc_rctl &= ~RCTL_EN;
3335 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3336 CSR_WRITE_FLUSH(sc);
3337
3338 /* XXX set_tbi_sbp_82543() */
3339
3340 delay(10*1000);
3341
3342 /* Must acquire the MDIO ownership before MAC reset */
3343 switch (sc->sc_type) {
3344 case WM_T_82573:
3345 case WM_T_82574:
3346 case WM_T_82583:
3347 error = wm_get_hw_semaphore_82573(sc);
3348 break;
3349 default:
3350 break;
3351 }
3352
3353 /*
3354 * 82541 Errata 29? & 82547 Errata 28?
3355 * See also the description about PHY_RST bit in CTRL register
3356 * in 8254x_GBe_SDM.pdf.
3357 */
3358 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3359 CSR_WRITE(sc, WMREG_CTRL,
3360 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3361 CSR_WRITE_FLUSH(sc);
3362 delay(5000);
3363 }
3364
3365 switch (sc->sc_type) {
3366 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3367 case WM_T_82541:
3368 case WM_T_82541_2:
3369 case WM_T_82547:
3370 case WM_T_82547_2:
3371 /*
3372 * On some chipsets, a reset through a memory-mapped write
3373 * cycle can cause the chip to reset before completing the
3374 * write cycle. This causes major headache that can be
3375 * avoided by issuing the reset via indirect register writes
3376 * through I/O space.
3377 *
3378 * So, if we successfully mapped the I/O BAR at attach time,
3379 * use that. Otherwise, try our luck with a memory-mapped
3380 * reset.
3381 */
3382 if (sc->sc_flags & WM_F_IOH_VALID)
3383 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3384 else
3385 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3386 break;
3387 case WM_T_82545_3:
3388 case WM_T_82546_3:
3389 /* Use the shadow control register on these chips. */
3390 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3391 break;
3392 case WM_T_80003:
3393 mask = swfwphysem[sc->sc_funcid];
3394 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3395 wm_get_swfw_semaphore(sc, mask);
3396 CSR_WRITE(sc, WMREG_CTRL, reg);
3397 wm_put_swfw_semaphore(sc, mask);
3398 break;
3399 case WM_T_ICH8:
3400 case WM_T_ICH9:
3401 case WM_T_ICH10:
3402 case WM_T_PCH:
3403 case WM_T_PCH2:
3404 case WM_T_PCH_LPT:
3405 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3406 if (wm_check_reset_block(sc) == 0) {
3407 /*
3408 * Gate automatic PHY configuration by hardware on
3409 * non-managed 82579
3410 */
3411 if ((sc->sc_type == WM_T_PCH2)
3412 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3413 != 0))
3414 wm_gate_hw_phy_config_ich8lan(sc, 1);
3415
3416
3417 reg |= CTRL_PHY_RESET;
3418 phy_reset = 1;
3419 }
3420 wm_get_swfwhw_semaphore(sc);
3421 CSR_WRITE(sc, WMREG_CTRL, reg);
3422 /* Don't insert a completion barrier when reset */
3423 delay(20*1000);
3424 wm_put_swfwhw_semaphore(sc);
3425 break;
3426 case WM_T_82542_2_0:
3427 case WM_T_82542_2_1:
3428 case WM_T_82543:
3429 case WM_T_82540:
3430 case WM_T_82545:
3431 case WM_T_82546:
3432 case WM_T_82571:
3433 case WM_T_82572:
3434 case WM_T_82573:
3435 case WM_T_82574:
3436 case WM_T_82575:
3437 case WM_T_82576:
3438 case WM_T_82580:
3439 case WM_T_82583:
3440 case WM_T_I350:
3441 case WM_T_I354:
3442 case WM_T_I210:
3443 case WM_T_I211:
3444 default:
3445 /* Everything else can safely use the documented method. */
3446 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3447 break;
3448 }
3449
3450 /* Must release the MDIO ownership after MAC reset */
3451 switch (sc->sc_type) {
3452 case WM_T_82573:
3453 case WM_T_82574:
3454 case WM_T_82583:
3455 if (error == 0)
3456 wm_put_hw_semaphore_82573(sc);
3457 break;
3458 default:
3459 break;
3460 }
3461
3462 if (phy_reset != 0)
3463 wm_get_cfg_done(sc);
3464
3465 /* reload EEPROM */
3466 switch (sc->sc_type) {
3467 case WM_T_82542_2_0:
3468 case WM_T_82542_2_1:
3469 case WM_T_82543:
3470 case WM_T_82544:
3471 delay(10);
3472 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3473 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3474 CSR_WRITE_FLUSH(sc);
3475 delay(2000);
3476 break;
3477 case WM_T_82540:
3478 case WM_T_82545:
3479 case WM_T_82545_3:
3480 case WM_T_82546:
3481 case WM_T_82546_3:
3482 delay(5*1000);
3483 /* XXX Disable HW ARPs on ASF enabled adapters */
3484 break;
3485 case WM_T_82541:
3486 case WM_T_82541_2:
3487 case WM_T_82547:
3488 case WM_T_82547_2:
3489 delay(20000);
3490 /* XXX Disable HW ARPs on ASF enabled adapters */
3491 break;
3492 case WM_T_82571:
3493 case WM_T_82572:
3494 case WM_T_82573:
3495 case WM_T_82574:
3496 case WM_T_82583:
3497 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3498 delay(10);
3499 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3500 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3501 CSR_WRITE_FLUSH(sc);
3502 }
3503 /* check EECD_EE_AUTORD */
3504 wm_get_auto_rd_done(sc);
3505 /*
3506 * Phy configuration from NVM just starts after EECD_AUTO_RD
3507 * is set.
3508 */
3509 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3510 || (sc->sc_type == WM_T_82583))
3511 delay(25*1000);
3512 break;
3513 case WM_T_82575:
3514 case WM_T_82576:
3515 case WM_T_82580:
3516 case WM_T_I350:
3517 case WM_T_I354:
3518 case WM_T_I210:
3519 case WM_T_I211:
3520 case WM_T_80003:
3521 /* check EECD_EE_AUTORD */
3522 wm_get_auto_rd_done(sc);
3523 break;
3524 case WM_T_ICH8:
3525 case WM_T_ICH9:
3526 case WM_T_ICH10:
3527 case WM_T_PCH:
3528 case WM_T_PCH2:
3529 case WM_T_PCH_LPT:
3530 break;
3531 default:
3532 panic("%s: unknown type\n", __func__);
3533 }
3534
3535 /* Check whether EEPROM is present or not */
3536 switch (sc->sc_type) {
3537 case WM_T_82575:
3538 case WM_T_82576:
3539 #if 0 /* XXX */
3540 case WM_T_82580:
3541 #endif
3542 case WM_T_I350:
3543 case WM_T_I354:
3544 case WM_T_ICH8:
3545 case WM_T_ICH9:
3546 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3547 /* Not found */
3548 sc->sc_flags |= WM_F_EEPROM_INVALID;
3549 if ((sc->sc_type == WM_T_82575)
3550 || (sc->sc_type == WM_T_82576)
3551 || (sc->sc_type == WM_T_82580)
3552 || (sc->sc_type == WM_T_I350)
3553 || (sc->sc_type == WM_T_I354))
3554 wm_reset_init_script_82575(sc);
3555 }
3556 break;
3557 default:
3558 break;
3559 }
3560
3561 if ((sc->sc_type == WM_T_82580)
3562 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
3563 /* clear global device reset status bit */
3564 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3565 }
3566
3567 /* Clear any pending interrupt events. */
3568 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3569 reg = CSR_READ(sc, WMREG_ICR);
3570
3571 /* reload sc_ctrl */
3572 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3573
3574 if (sc->sc_type == WM_T_I350)
3575 wm_set_eee_i350(sc);
3576
3577 /* dummy read from WUC */
3578 if (sc->sc_type == WM_T_PCH)
3579 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3580 /*
3581 * For PCH, this write will make sure that any noise will be detected
3582 * as a CRC error and be dropped rather than show up as a bad packet
3583 * to the DMA engine
3584 */
3585 if (sc->sc_type == WM_T_PCH)
3586 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3587
3588 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3589 CSR_WRITE(sc, WMREG_WUC, 0);
3590
3591 /* XXX need special handling for 82580 */
3592 }
3593
3594 /*
3595 * wm_add_rxbuf:
3596 *
3597 * Add a receive buffer to the indiciated descriptor.
3598 */
3599 static int
3600 wm_add_rxbuf(struct wm_softc *sc, int idx)
3601 {
3602 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3603 struct mbuf *m;
3604 int error;
3605
3606 KASSERT(WM_RX_LOCKED(sc));
3607
3608 MGETHDR(m, M_DONTWAIT, MT_DATA);
3609 if (m == NULL)
3610 return ENOBUFS;
3611
3612 MCLGET(m, M_DONTWAIT);
3613 if ((m->m_flags & M_EXT) == 0) {
3614 m_freem(m);
3615 return ENOBUFS;
3616 }
3617
3618 if (rxs->rxs_mbuf != NULL)
3619 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3620
3621 rxs->rxs_mbuf = m;
3622
3623 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3624 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3625 BUS_DMA_READ|BUS_DMA_NOWAIT);
3626 if (error) {
3627 /* XXX XXX XXX */
3628 aprint_error_dev(sc->sc_dev,
3629 "unable to load rx DMA map %d, error = %d\n",
3630 idx, error);
3631 panic("wm_add_rxbuf");
3632 }
3633
3634 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3635 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3636
3637 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3638 if ((sc->sc_rctl & RCTL_EN) != 0)
3639 WM_INIT_RXDESC(sc, idx);
3640 } else
3641 WM_INIT_RXDESC(sc, idx);
3642
3643 return 0;
3644 }
3645
3646 /*
3647 * wm_rxdrain:
3648 *
3649 * Drain the receive queue.
3650 */
3651 static void
3652 wm_rxdrain(struct wm_softc *sc)
3653 {
3654 struct wm_rxsoft *rxs;
3655 int i;
3656
3657 KASSERT(WM_RX_LOCKED(sc));
3658
3659 for (i = 0; i < WM_NRXDESC; i++) {
3660 rxs = &sc->sc_rxsoft[i];
3661 if (rxs->rxs_mbuf != NULL) {
3662 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3663 m_freem(rxs->rxs_mbuf);
3664 rxs->rxs_mbuf = NULL;
3665 }
3666 }
3667 }
3668
3669 /*
3670 * wm_init: [ifnet interface function]
3671 *
3672 * Initialize the interface.
3673 */
3674 static int
3675 wm_init(struct ifnet *ifp)
3676 {
3677 struct wm_softc *sc = ifp->if_softc;
3678 int ret;
3679
3680 WM_BOTH_LOCK(sc);
3681 ret = wm_init_locked(ifp);
3682 WM_BOTH_UNLOCK(sc);
3683
3684 return ret;
3685 }
3686
3687 static int
3688 wm_init_locked(struct ifnet *ifp)
3689 {
3690 struct wm_softc *sc = ifp->if_softc;
3691 struct wm_rxsoft *rxs;
3692 int i, j, trynum, error = 0;
3693 uint32_t reg;
3694
3695 KASSERT(WM_BOTH_LOCKED(sc));
3696 /*
3697 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3698 * There is a small but measurable benefit to avoiding the adjusment
3699 * of the descriptor so that the headers are aligned, for normal mtu,
3700 * on such platforms. One possibility is that the DMA itself is
3701 * slightly more efficient if the front of the entire packet (instead
3702 * of the front of the headers) is aligned.
3703 *
3704 * Note we must always set align_tweak to 0 if we are using
3705 * jumbo frames.
3706 */
3707 #ifdef __NO_STRICT_ALIGNMENT
3708 sc->sc_align_tweak = 0;
3709 #else
3710 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3711 sc->sc_align_tweak = 0;
3712 else
3713 sc->sc_align_tweak = 2;
3714 #endif /* __NO_STRICT_ALIGNMENT */
3715
3716 /* Cancel any pending I/O. */
3717 wm_stop_locked(ifp, 0);
3718
3719 /* update statistics before reset */
3720 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3721 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3722
3723 /* Reset the chip to a known state. */
3724 wm_reset(sc);
3725
3726 switch (sc->sc_type) {
3727 case WM_T_82571:
3728 case WM_T_82572:
3729 case WM_T_82573:
3730 case WM_T_82574:
3731 case WM_T_82583:
3732 case WM_T_80003:
3733 case WM_T_ICH8:
3734 case WM_T_ICH9:
3735 case WM_T_ICH10:
3736 case WM_T_PCH:
3737 case WM_T_PCH2:
3738 case WM_T_PCH_LPT:
3739 if (wm_check_mng_mode(sc) != 0)
3740 wm_get_hw_control(sc);
3741 break;
3742 default:
3743 break;
3744 }
3745
3746 /* Reset the PHY. */
3747 if (sc->sc_flags & WM_F_HAS_MII)
3748 wm_gmii_reset(sc);
3749
3750 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3751 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3752 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
3753 || (sc->sc_type == WM_T_PCH_LPT))
3754 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
3755
3756 /* Initialize the transmit descriptor ring. */
3757 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3758 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3759 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3760 sc->sc_txfree = WM_NTXDESC(sc);
3761 sc->sc_txnext = 0;
3762
3763 if (sc->sc_type < WM_T_82543) {
3764 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
3765 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
3766 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3767 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3768 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3769 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3770 } else {
3771 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
3772 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
3773 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3774 CSR_WRITE(sc, WMREG_TDH, 0);
3775 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3776 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3777
3778 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3779 /*
3780 * Don't write TDT before TCTL.EN is set.
3781 * See the document.
3782 */
3783 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
3784 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
3785 | TXDCTL_WTHRESH(0));
3786 else {
3787 CSR_WRITE(sc, WMREG_TDT, 0);
3788 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3789 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3790 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3791 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3792 }
3793 }
3794 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3795 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3796
3797 /* Initialize the transmit job descriptors. */
3798 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3799 sc->sc_txsoft[i].txs_mbuf = NULL;
3800 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3801 sc->sc_txsnext = 0;
3802 sc->sc_txsdirty = 0;
3803
3804 /*
3805 * Initialize the receive descriptor and receive job
3806 * descriptor rings.
3807 */
3808 if (sc->sc_type < WM_T_82543) {
3809 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3810 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3811 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3812 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3813 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3814 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3815
3816 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3817 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3818 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3819 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3820 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3821 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3822 } else {
3823 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3824 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3825 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3826 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3827 CSR_WRITE(sc, WMREG_EITR(0), 450);
3828 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
3829 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
3830 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
3831 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
3832 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
3833 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
3834 | RXDCTL_WTHRESH(1));
3835 } else {
3836 CSR_WRITE(sc, WMREG_RDH, 0);
3837 CSR_WRITE(sc, WMREG_RDT, 0);
3838 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3839 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
3840 }
3841 }
3842 for (i = 0; i < WM_NRXDESC; i++) {
3843 rxs = &sc->sc_rxsoft[i];
3844 if (rxs->rxs_mbuf == NULL) {
3845 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3846 log(LOG_ERR, "%s: unable to allocate or map "
3847 "rx buffer %d, error = %d\n",
3848 device_xname(sc->sc_dev), i, error);
3849 /*
3850 * XXX Should attempt to run with fewer receive
3851 * XXX buffers instead of just failing.
3852 */
3853 wm_rxdrain(sc);
3854 goto out;
3855 }
3856 } else {
3857 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3858 WM_INIT_RXDESC(sc, i);
3859 /*
3860 * For 82575 and newer device, the RX descriptors
3861 * must be initialized after the setting of RCTL.EN in
3862 * wm_set_filter()
3863 */
3864 }
3865 }
3866 sc->sc_rxptr = 0;
3867 sc->sc_rxdiscard = 0;
3868 WM_RXCHAIN_RESET(sc);
3869
3870 /*
3871 * Clear out the VLAN table -- we don't use it (yet).
3872 */
3873 CSR_WRITE(sc, WMREG_VET, 0);
3874 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3875 trynum = 10; /* Due to hw errata */
3876 else
3877 trynum = 1;
3878 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3879 for (j = 0; j < trynum; j++)
3880 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3881
3882 /*
3883 * Set up flow-control parameters.
3884 *
3885 * XXX Values could probably stand some tuning.
3886 */
3887 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3888 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
3889 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
3890 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3891 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3892 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3893 }
3894
3895 sc->sc_fcrtl = FCRTL_DFLT;
3896 if (sc->sc_type < WM_T_82543) {
3897 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3898 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3899 } else {
3900 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3901 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3902 }
3903
3904 if (sc->sc_type == WM_T_80003)
3905 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3906 else
3907 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3908
3909 /* Writes the control register. */
3910 wm_set_vlan(sc);
3911
3912 if (sc->sc_flags & WM_F_HAS_MII) {
3913 int val;
3914
3915 switch (sc->sc_type) {
3916 case WM_T_80003:
3917 case WM_T_ICH8:
3918 case WM_T_ICH9:
3919 case WM_T_ICH10:
3920 case WM_T_PCH:
3921 case WM_T_PCH2:
3922 case WM_T_PCH_LPT:
3923 /*
3924 * Set the mac to wait the maximum time between each
3925 * iteration and increase the max iterations when
3926 * polling the phy; this fixes erroneous timeouts at
3927 * 10Mbps.
3928 */
3929 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
3930 0xFFFF);
3931 val = wm_kmrn_readreg(sc,
3932 KUMCTRLSTA_OFFSET_INB_PARAM);
3933 val |= 0x3F;
3934 wm_kmrn_writereg(sc,
3935 KUMCTRLSTA_OFFSET_INB_PARAM, val);
3936 break;
3937 default:
3938 break;
3939 }
3940
3941 if (sc->sc_type == WM_T_80003) {
3942 val = CSR_READ(sc, WMREG_CTRL_EXT);
3943 val &= ~CTRL_EXT_LINK_MODE_MASK;
3944 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3945
3946 /* Bypass RX and TX FIFO's */
3947 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3948 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
3949 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3950 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3951 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3952 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3953 }
3954 }
3955 #if 0
3956 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3957 #endif
3958
3959 /* Set up checksum offload parameters. */
3960 reg = CSR_READ(sc, WMREG_RXCSUM);
3961 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3962 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3963 reg |= RXCSUM_IPOFL;
3964 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3965 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3966 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3967 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3968 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3969
3970 /* Set up the interrupt registers. */
3971 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3972 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3973 ICR_RXO | ICR_RXT0;
3974 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3975
3976 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3977 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3978 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
3979 reg = CSR_READ(sc, WMREG_KABGTXD);
3980 reg |= KABGTXD_BGSQLBIAS;
3981 CSR_WRITE(sc, WMREG_KABGTXD, reg);
3982 }
3983
3984 /* Set up the inter-packet gap. */
3985 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3986
3987 if (sc->sc_type >= WM_T_82543) {
3988 /*
3989 * Set up the interrupt throttling register (units of 256ns)
3990 * Note that a footnote in Intel's documentation says this
3991 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3992 * or 10Mbit mode. Empirically, it appears to be the case
3993 * that that is also true for the 1024ns units of the other
3994 * interrupt-related timer registers -- so, really, we ought
3995 * to divide this value by 4 when the link speed is low.
3996 *
3997 * XXX implement this division at link speed change!
3998 */
3999
4000 /*
4001 * For N interrupts/sec, set this value to:
4002 * 1000000000 / (N * 256). Note that we set the
4003 * absolute and packet timer values to this value
4004 * divided by 4 to get "simple timer" behavior.
4005 */
4006
4007 sc->sc_itr = 1500; /* 2604 ints/sec */
4008 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4009 }
4010
4011 /* Set the VLAN ethernetype. */
4012 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4013
4014 /*
4015 * Set up the transmit control register; we start out with
4016 * a collision distance suitable for FDX, but update it whe
4017 * we resolve the media type.
4018 */
4019 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4020 | TCTL_CT(TX_COLLISION_THRESHOLD)
4021 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4022 if (sc->sc_type >= WM_T_82571)
4023 sc->sc_tctl |= TCTL_MULR;
4024 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4025
4026 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4027 /* Write TDT after TCTL.EN is set. See the document. */
4028 CSR_WRITE(sc, WMREG_TDT, 0);
4029 }
4030
4031 if (sc->sc_type == WM_T_80003) {
4032 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4033 reg &= ~TCTL_EXT_GCEX_MASK;
4034 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4035 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4036 }
4037
4038 /* Set the media. */
4039 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4040 goto out;
4041
4042 /* Configure for OS presence */
4043 wm_init_manageability(sc);
4044
4045 /*
4046 * Set up the receive control register; we actually program
4047 * the register when we set the receive filter. Use multicast
4048 * address offset type 0.
4049 *
4050 * Only the i82544 has the ability to strip the incoming
4051 * CRC, so we don't enable that feature.
4052 */
4053 sc->sc_mchash_type = 0;
4054 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4055 | RCTL_MO(sc->sc_mchash_type);
4056
4057 /*
4058 * The I350 has a bug where it always strips the CRC whether
4059 * asked to or not. So ask for stripped CRC here and cope in rxeof
4060 */
4061 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4062 || (sc->sc_type == WM_T_I210))
4063 sc->sc_rctl |= RCTL_SECRC;
4064
4065 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4066 && (ifp->if_mtu > ETHERMTU)) {
4067 sc->sc_rctl |= RCTL_LPE;
4068 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4069 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4070 }
4071
4072 if (MCLBYTES == 2048) {
4073 sc->sc_rctl |= RCTL_2k;
4074 } else {
4075 if (sc->sc_type >= WM_T_82543) {
4076 switch (MCLBYTES) {
4077 case 4096:
4078 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4079 break;
4080 case 8192:
4081 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4082 break;
4083 case 16384:
4084 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4085 break;
4086 default:
4087 panic("wm_init: MCLBYTES %d unsupported",
4088 MCLBYTES);
4089 break;
4090 }
4091 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4092 }
4093
4094 /* Set the receive filter. */
4095 wm_set_filter(sc);
4096
4097 /* Enable ECC */
4098 switch (sc->sc_type) {
4099 case WM_T_82571:
4100 reg = CSR_READ(sc, WMREG_PBA_ECC);
4101 reg |= PBA_ECC_CORR_EN;
4102 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4103 break;
4104 case WM_T_PCH_LPT:
4105 reg = CSR_READ(sc, WMREG_PBECCSTS);
4106 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4107 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4108
4109 reg = CSR_READ(sc, WMREG_CTRL);
4110 reg |= CTRL_MEHE;
4111 CSR_WRITE(sc, WMREG_CTRL, reg);
4112 break;
4113 default:
4114 break;
4115 }
4116
4117 /* On 575 and later set RDT only if RX enabled */
4118 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4119 for (i = 0; i < WM_NRXDESC; i++)
4120 WM_INIT_RXDESC(sc, i);
4121
4122 sc->sc_stopping = false;
4123
4124 /* Start the one second link check clock. */
4125 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4126
4127 /* ...all done! */
4128 ifp->if_flags |= IFF_RUNNING;
4129 ifp->if_flags &= ~IFF_OACTIVE;
4130
4131 out:
4132 sc->sc_if_flags = ifp->if_flags;
4133 if (error)
4134 log(LOG_ERR, "%s: interface not running\n",
4135 device_xname(sc->sc_dev));
4136 return error;
4137 }
4138
4139 /*
4140 * wm_stop: [ifnet interface function]
4141 *
4142 * Stop transmission on the interface.
4143 */
4144 static void
4145 wm_stop(struct ifnet *ifp, int disable)
4146 {
4147 struct wm_softc *sc = ifp->if_softc;
4148
4149 WM_BOTH_LOCK(sc);
4150 wm_stop_locked(ifp, disable);
4151 WM_BOTH_UNLOCK(sc);
4152 }
4153
4154 static void
4155 wm_stop_locked(struct ifnet *ifp, int disable)
4156 {
4157 struct wm_softc *sc = ifp->if_softc;
4158 struct wm_txsoft *txs;
4159 int i;
4160
4161 KASSERT(WM_BOTH_LOCKED(sc));
4162
4163 sc->sc_stopping = true;
4164
4165 /* Stop the one second clock. */
4166 callout_stop(&sc->sc_tick_ch);
4167
4168 /* Stop the 82547 Tx FIFO stall check timer. */
4169 if (sc->sc_type == WM_T_82547)
4170 callout_stop(&sc->sc_txfifo_ch);
4171
4172 if (sc->sc_flags & WM_F_HAS_MII) {
4173 /* Down the MII. */
4174 mii_down(&sc->sc_mii);
4175 } else {
4176 #if 0
4177 /* Should we clear PHY's status properly? */
4178 wm_reset(sc);
4179 #endif
4180 }
4181
4182 /* Stop the transmit and receive processes. */
4183 CSR_WRITE(sc, WMREG_TCTL, 0);
4184 CSR_WRITE(sc, WMREG_RCTL, 0);
4185 sc->sc_rctl &= ~RCTL_EN;
4186
4187 /*
4188 * Clear the interrupt mask to ensure the device cannot assert its
4189 * interrupt line.
4190 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4191 * any currently pending or shared interrupt.
4192 */
4193 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4194 sc->sc_icr = 0;
4195
4196 /* Release any queued transmit buffers. */
4197 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4198 txs = &sc->sc_txsoft[i];
4199 if (txs->txs_mbuf != NULL) {
4200 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4201 m_freem(txs->txs_mbuf);
4202 txs->txs_mbuf = NULL;
4203 }
4204 }
4205
4206 /* Mark the interface as down and cancel the watchdog timer. */
4207 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4208 ifp->if_timer = 0;
4209
4210 if (disable)
4211 wm_rxdrain(sc);
4212
4213 #if 0 /* notyet */
4214 if (sc->sc_type >= WM_T_82544)
4215 CSR_WRITE(sc, WMREG_WUC, 0);
4216 #endif
4217 }
4218
4219 /*
4220 * wm_tx_offload:
4221 *
4222 * Set up TCP/IP checksumming parameters for the
4223 * specified packet.
4224 */
4225 static int
4226 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
4227 uint8_t *fieldsp)
4228 {
4229 struct mbuf *m0 = txs->txs_mbuf;
4230 struct livengood_tcpip_ctxdesc *t;
4231 uint32_t ipcs, tucs, cmd, cmdlen, seg;
4232 uint32_t ipcse;
4233 struct ether_header *eh;
4234 int offset, iphl;
4235 uint8_t fields;
4236
4237 /*
4238 * XXX It would be nice if the mbuf pkthdr had offset
4239 * fields for the protocol headers.
4240 */
4241
4242 eh = mtod(m0, struct ether_header *);
4243 switch (htons(eh->ether_type)) {
4244 case ETHERTYPE_IP:
4245 case ETHERTYPE_IPV6:
4246 offset = ETHER_HDR_LEN;
4247 break;
4248
4249 case ETHERTYPE_VLAN:
4250 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4251 break;
4252
4253 default:
4254 /*
4255 * Don't support this protocol or encapsulation.
4256 */
4257 *fieldsp = 0;
4258 *cmdp = 0;
4259 return 0;
4260 }
4261
4262 if ((m0->m_pkthdr.csum_flags &
4263 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
4264 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4265 } else {
4266 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4267 }
4268 ipcse = offset + iphl - 1;
4269
4270 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
4271 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
4272 seg = 0;
4273 fields = 0;
4274
4275 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4276 int hlen = offset + iphl;
4277 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4278
4279 if (__predict_false(m0->m_len <
4280 (hlen + sizeof(struct tcphdr)))) {
4281 /*
4282 * TCP/IP headers are not in the first mbuf; we need
4283 * to do this the slow and painful way. Let's just
4284 * hope this doesn't happen very often.
4285 */
4286 struct tcphdr th;
4287
4288 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4289
4290 m_copydata(m0, hlen, sizeof(th), &th);
4291 if (v4) {
4292 struct ip ip;
4293
4294 m_copydata(m0, offset, sizeof(ip), &ip);
4295 ip.ip_len = 0;
4296 m_copyback(m0,
4297 offset + offsetof(struct ip, ip_len),
4298 sizeof(ip.ip_len), &ip.ip_len);
4299 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4300 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4301 } else {
4302 struct ip6_hdr ip6;
4303
4304 m_copydata(m0, offset, sizeof(ip6), &ip6);
4305 ip6.ip6_plen = 0;
4306 m_copyback(m0,
4307 offset + offsetof(struct ip6_hdr, ip6_plen),
4308 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4309 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4310 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4311 }
4312 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4313 sizeof(th.th_sum), &th.th_sum);
4314
4315 hlen += th.th_off << 2;
4316 } else {
4317 /*
4318 * TCP/IP headers are in the first mbuf; we can do
4319 * this the easy way.
4320 */
4321 struct tcphdr *th;
4322
4323 if (v4) {
4324 struct ip *ip =
4325 (void *)(mtod(m0, char *) + offset);
4326 th = (void *)(mtod(m0, char *) + hlen);
4327
4328 ip->ip_len = 0;
4329 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4330 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4331 } else {
4332 struct ip6_hdr *ip6 =
4333 (void *)(mtod(m0, char *) + offset);
4334 th = (void *)(mtod(m0, char *) + hlen);
4335
4336 ip6->ip6_plen = 0;
4337 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4338 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4339 }
4340 hlen += th->th_off << 2;
4341 }
4342
4343 if (v4) {
4344 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4345 cmdlen |= WTX_TCPIP_CMD_IP;
4346 } else {
4347 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4348 ipcse = 0;
4349 }
4350 cmd |= WTX_TCPIP_CMD_TSE;
4351 cmdlen |= WTX_TCPIP_CMD_TSE |
4352 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
4353 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
4354 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
4355 }
4356
4357 /*
4358 * NOTE: Even if we're not using the IP or TCP/UDP checksum
4359 * offload feature, if we load the context descriptor, we
4360 * MUST provide valid values for IPCSS and TUCSS fields.
4361 */
4362
4363 ipcs = WTX_TCPIP_IPCSS(offset) |
4364 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
4365 WTX_TCPIP_IPCSE(ipcse);
4366 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
4367 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
4368 fields |= WTX_IXSM;
4369 }
4370
4371 offset += iphl;
4372
4373 if (m0->m_pkthdr.csum_flags &
4374 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
4375 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
4376 fields |= WTX_TXSM;
4377 tucs = WTX_TCPIP_TUCSS(offset) |
4378 WTX_TCPIP_TUCSO(offset +
4379 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
4380 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4381 } else if ((m0->m_pkthdr.csum_flags &
4382 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
4383 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
4384 fields |= WTX_TXSM;
4385 tucs = WTX_TCPIP_TUCSS(offset) |
4386 WTX_TCPIP_TUCSO(offset +
4387 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
4388 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4389 } else {
4390 /* Just initialize it to a valid TCP context. */
4391 tucs = WTX_TCPIP_TUCSS(offset) |
4392 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
4393 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4394 }
4395
4396 /* Fill in the context descriptor. */
4397 t = (struct livengood_tcpip_ctxdesc *)
4398 &sc->sc_txdescs[sc->sc_txnext];
4399 t->tcpip_ipcs = htole32(ipcs);
4400 t->tcpip_tucs = htole32(tucs);
4401 t->tcpip_cmdlen = htole32(cmdlen);
4402 t->tcpip_seg = htole32(seg);
4403 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
4404
4405 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
4406 txs->txs_ndesc++;
4407
4408 *cmdp = cmd;
4409 *fieldsp = fields;
4410
4411 return 0;
4412 }
4413
4414 static void
4415 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
4416 {
4417 struct mbuf *m;
4418 int i;
4419
4420 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
4421 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
4422 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
4423 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
4424 m->m_data, m->m_len, m->m_flags);
4425 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
4426 i, i == 1 ? "" : "s");
4427 }
4428
4429 /*
4430 * wm_82547_txfifo_stall:
4431 *
4432 * Callout used to wait for the 82547 Tx FIFO to drain,
4433 * reset the FIFO pointers, and restart packet transmission.
4434 */
4435 static void
4436 wm_82547_txfifo_stall(void *arg)
4437 {
4438 struct wm_softc *sc = arg;
4439 #ifndef WM_MPSAFE
4440 int s;
4441
4442 s = splnet();
4443 #endif
4444 WM_TX_LOCK(sc);
4445
4446 if (sc->sc_stopping)
4447 goto out;
4448
4449 if (sc->sc_txfifo_stall) {
4450 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
4451 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
4452 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
4453 /*
4454 * Packets have drained. Stop transmitter, reset
4455 * FIFO pointers, restart transmitter, and kick
4456 * the packet queue.
4457 */
4458 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
4459 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
4460 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
4461 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
4462 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
4463 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
4464 CSR_WRITE(sc, WMREG_TCTL, tctl);
4465 CSR_WRITE_FLUSH(sc);
4466
4467 sc->sc_txfifo_head = 0;
4468 sc->sc_txfifo_stall = 0;
4469 wm_start_locked(&sc->sc_ethercom.ec_if);
4470 } else {
4471 /*
4472 * Still waiting for packets to drain; try again in
4473 * another tick.
4474 */
4475 callout_schedule(&sc->sc_txfifo_ch, 1);
4476 }
4477 }
4478
4479 out:
4480 WM_TX_UNLOCK(sc);
4481 #ifndef WM_MPSAFE
4482 splx(s);
4483 #endif
4484 }
4485
4486 /*
4487 * wm_82547_txfifo_bugchk:
4488 *
4489 * Check for bug condition in the 82547 Tx FIFO. We need to
4490 * prevent enqueueing a packet that would wrap around the end
4491 * if the Tx FIFO ring buffer, otherwise the chip will croak.
4492 *
4493 * We do this by checking the amount of space before the end
4494 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
4495 * the Tx FIFO, wait for all remaining packets to drain, reset
4496 * the internal FIFO pointers to the beginning, and restart
4497 * transmission on the interface.
4498 */
4499 #define WM_FIFO_HDR 0x10
4500 #define WM_82547_PAD_LEN 0x3e0
4501 static int
4502 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
4503 {
4504 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
4505 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
4506
4507 /* Just return if already stalled. */
4508 if (sc->sc_txfifo_stall)
4509 return 1;
4510
4511 if (sc->sc_mii.mii_media_active & IFM_FDX) {
4512 /* Stall only occurs in half-duplex mode. */
4513 goto send_packet;
4514 }
4515
4516 if (len >= WM_82547_PAD_LEN + space) {
4517 sc->sc_txfifo_stall = 1;
4518 callout_schedule(&sc->sc_txfifo_ch, 1);
4519 return 1;
4520 }
4521
4522 send_packet:
4523 sc->sc_txfifo_head += len;
4524 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
4525 sc->sc_txfifo_head -= sc->sc_txfifo_size;
4526
4527 return 0;
4528 }
4529
4530 /*
4531 * wm_start: [ifnet interface function]
4532 *
4533 * Start packet transmission on the interface.
4534 */
4535 static void
4536 wm_start(struct ifnet *ifp)
4537 {
4538 struct wm_softc *sc = ifp->if_softc;
4539
4540 WM_TX_LOCK(sc);
4541 if (!sc->sc_stopping)
4542 wm_start_locked(ifp);
4543 WM_TX_UNLOCK(sc);
4544 }
4545
4546 static void
4547 wm_start_locked(struct ifnet *ifp)
4548 {
4549 struct wm_softc *sc = ifp->if_softc;
4550 struct mbuf *m0;
4551 struct m_tag *mtag;
4552 struct wm_txsoft *txs;
4553 bus_dmamap_t dmamap;
4554 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
4555 bus_addr_t curaddr;
4556 bus_size_t seglen, curlen;
4557 uint32_t cksumcmd;
4558 uint8_t cksumfields;
4559
4560 KASSERT(WM_TX_LOCKED(sc));
4561
4562 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
4563 return;
4564
4565 /* Remember the previous number of free descriptors. */
4566 ofree = sc->sc_txfree;
4567
4568 /*
4569 * Loop through the send queue, setting up transmit descriptors
4570 * until we drain the queue, or use up all available transmit
4571 * descriptors.
4572 */
4573 for (;;) {
4574 m0 = NULL;
4575
4576 /* Get a work queue entry. */
4577 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
4578 wm_txintr(sc);
4579 if (sc->sc_txsfree == 0) {
4580 DPRINTF(WM_DEBUG_TX,
4581 ("%s: TX: no free job descriptors\n",
4582 device_xname(sc->sc_dev)));
4583 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
4584 break;
4585 }
4586 }
4587
4588 /* Grab a packet off the queue. */
4589 IFQ_DEQUEUE(&ifp->if_snd, m0);
4590 if (m0 == NULL)
4591 break;
4592
4593 DPRINTF(WM_DEBUG_TX,
4594 ("%s: TX: have packet to transmit: %p\n",
4595 device_xname(sc->sc_dev), m0));
4596
4597 txs = &sc->sc_txsoft[sc->sc_txsnext];
4598 dmamap = txs->txs_dmamap;
4599
4600 use_tso = (m0->m_pkthdr.csum_flags &
4601 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
4602
4603 /*
4604 * So says the Linux driver:
4605 * The controller does a simple calculation to make sure
4606 * there is enough room in the FIFO before initiating the
4607 * DMA for each buffer. The calc is:
4608 * 4 = ceil(buffer len / MSS)
4609 * To make sure we don't overrun the FIFO, adjust the max
4610 * buffer len if the MSS drops.
4611 */
4612 dmamap->dm_maxsegsz =
4613 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
4614 ? m0->m_pkthdr.segsz << 2
4615 : WTX_MAX_LEN;
4616
4617 /*
4618 * Load the DMA map. If this fails, the packet either
4619 * didn't fit in the allotted number of segments, or we
4620 * were short on resources. For the too-many-segments
4621 * case, we simply report an error and drop the packet,
4622 * since we can't sanely copy a jumbo packet to a single
4623 * buffer.
4624 */
4625 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
4626 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
4627 if (error) {
4628 if (error == EFBIG) {
4629 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4630 log(LOG_ERR, "%s: Tx packet consumes too many "
4631 "DMA segments, dropping...\n",
4632 device_xname(sc->sc_dev));
4633 wm_dump_mbuf_chain(sc, m0);
4634 m_freem(m0);
4635 continue;
4636 }
4637 /* Short on resources, just stop for now. */
4638 DPRINTF(WM_DEBUG_TX,
4639 ("%s: TX: dmamap load failed: %d\n",
4640 device_xname(sc->sc_dev), error));
4641 break;
4642 }
4643
4644 segs_needed = dmamap->dm_nsegs;
4645 if (use_tso) {
4646 /* For sentinel descriptor; see below. */
4647 segs_needed++;
4648 }
4649
4650 /*
4651 * Ensure we have enough descriptors free to describe
4652 * the packet. Note, we always reserve one descriptor
4653 * at the end of the ring due to the semantics of the
4654 * TDT register, plus one more in the event we need
4655 * to load offload context.
4656 */
4657 if (segs_needed > sc->sc_txfree - 2) {
4658 /*
4659 * Not enough free descriptors to transmit this
4660 * packet. We haven't committed anything yet,
4661 * so just unload the DMA map, put the packet
4662 * pack on the queue, and punt. Notify the upper
4663 * layer that there are no more slots left.
4664 */
4665 DPRINTF(WM_DEBUG_TX,
4666 ("%s: TX: need %d (%d) descriptors, have %d\n",
4667 device_xname(sc->sc_dev), dmamap->dm_nsegs,
4668 segs_needed, sc->sc_txfree - 1));
4669 ifp->if_flags |= IFF_OACTIVE;
4670 bus_dmamap_unload(sc->sc_dmat, dmamap);
4671 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
4672 break;
4673 }
4674
4675 /*
4676 * Check for 82547 Tx FIFO bug. We need to do this
4677 * once we know we can transmit the packet, since we
4678 * do some internal FIFO space accounting here.
4679 */
4680 if (sc->sc_type == WM_T_82547 &&
4681 wm_82547_txfifo_bugchk(sc, m0)) {
4682 DPRINTF(WM_DEBUG_TX,
4683 ("%s: TX: 82547 Tx FIFO bug detected\n",
4684 device_xname(sc->sc_dev)));
4685 ifp->if_flags |= IFF_OACTIVE;
4686 bus_dmamap_unload(sc->sc_dmat, dmamap);
4687 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
4688 break;
4689 }
4690
4691 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
4692
4693 DPRINTF(WM_DEBUG_TX,
4694 ("%s: TX: packet has %d (%d) DMA segments\n",
4695 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
4696
4697 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
4698
4699 /*
4700 * Store a pointer to the packet so that we can free it
4701 * later.
4702 *
4703 * Initially, we consider the number of descriptors the
4704 * packet uses the number of DMA segments. This may be
4705 * incremented by 1 if we do checksum offload (a descriptor
4706 * is used to set the checksum context).
4707 */
4708 txs->txs_mbuf = m0;
4709 txs->txs_firstdesc = sc->sc_txnext;
4710 txs->txs_ndesc = segs_needed;
4711
4712 /* Set up offload parameters for this packet. */
4713 if (m0->m_pkthdr.csum_flags &
4714 (M_CSUM_TSOv4|M_CSUM_TSOv6|
4715 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
4716 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
4717 if (wm_tx_offload(sc, txs, &cksumcmd,
4718 &cksumfields) != 0) {
4719 /* Error message already displayed. */
4720 bus_dmamap_unload(sc->sc_dmat, dmamap);
4721 continue;
4722 }
4723 } else {
4724 cksumcmd = 0;
4725 cksumfields = 0;
4726 }
4727
4728 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
4729
4730 /* Sync the DMA map. */
4731 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
4732 BUS_DMASYNC_PREWRITE);
4733
4734 /* Initialize the transmit descriptor. */
4735 for (nexttx = sc->sc_txnext, seg = 0;
4736 seg < dmamap->dm_nsegs; seg++) {
4737 for (seglen = dmamap->dm_segs[seg].ds_len,
4738 curaddr = dmamap->dm_segs[seg].ds_addr;
4739 seglen != 0;
4740 curaddr += curlen, seglen -= curlen,
4741 nexttx = WM_NEXTTX(sc, nexttx)) {
4742 curlen = seglen;
4743
4744 /*
4745 * So says the Linux driver:
4746 * Work around for premature descriptor
4747 * write-backs in TSO mode. Append a
4748 * 4-byte sentinel descriptor.
4749 */
4750 if (use_tso &&
4751 seg == dmamap->dm_nsegs - 1 &&
4752 curlen > 8)
4753 curlen -= 4;
4754
4755 wm_set_dma_addr(
4756 &sc->sc_txdescs[nexttx].wtx_addr,
4757 curaddr);
4758 sc->sc_txdescs[nexttx].wtx_cmdlen =
4759 htole32(cksumcmd | curlen);
4760 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
4761 0;
4762 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
4763 cksumfields;
4764 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
4765 lasttx = nexttx;
4766
4767 DPRINTF(WM_DEBUG_TX,
4768 ("%s: TX: desc %d: low %#" PRIx64 ", "
4769 "len %#04zx\n",
4770 device_xname(sc->sc_dev), nexttx,
4771 (uint64_t)curaddr, curlen));
4772 }
4773 }
4774
4775 KASSERT(lasttx != -1);
4776
4777 /*
4778 * Set up the command byte on the last descriptor of
4779 * the packet. If we're in the interrupt delay window,
4780 * delay the interrupt.
4781 */
4782 sc->sc_txdescs[lasttx].wtx_cmdlen |=
4783 htole32(WTX_CMD_EOP | WTX_CMD_RS);
4784
4785 /*
4786 * If VLANs are enabled and the packet has a VLAN tag, set
4787 * up the descriptor to encapsulate the packet for us.
4788 *
4789 * This is only valid on the last descriptor of the packet.
4790 */
4791 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
4792 sc->sc_txdescs[lasttx].wtx_cmdlen |=
4793 htole32(WTX_CMD_VLE);
4794 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
4795 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
4796 }
4797
4798 txs->txs_lastdesc = lasttx;
4799
4800 DPRINTF(WM_DEBUG_TX,
4801 ("%s: TX: desc %d: cmdlen 0x%08x\n",
4802 device_xname(sc->sc_dev),
4803 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
4804
4805 /* Sync the descriptors we're using. */
4806 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
4807 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4808
4809 /* Give the packet to the chip. */
4810 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
4811
4812 DPRINTF(WM_DEBUG_TX,
4813 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
4814
4815 DPRINTF(WM_DEBUG_TX,
4816 ("%s: TX: finished transmitting packet, job %d\n",
4817 device_xname(sc->sc_dev), sc->sc_txsnext));
4818
4819 /* Advance the tx pointer. */
4820 sc->sc_txfree -= txs->txs_ndesc;
4821 sc->sc_txnext = nexttx;
4822
4823 sc->sc_txsfree--;
4824 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
4825
4826 /* Pass the packet to any BPF listeners. */
4827 bpf_mtap(ifp, m0);
4828 }
4829
4830 if (m0 != NULL) {
4831 ifp->if_flags |= IFF_OACTIVE;
4832 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4833 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
4834 m_freem(m0);
4835 }
4836
4837 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
4838 /* No more slots; notify upper layer. */
4839 ifp->if_flags |= IFF_OACTIVE;
4840 }
4841
4842 if (sc->sc_txfree != ofree) {
4843 /* Set a watchdog timer in case the chip flakes out. */
4844 ifp->if_timer = 5;
4845 }
4846 }
4847
4848 /*
4849 * wm_nq_tx_offload:
4850 *
4851 * Set up TCP/IP checksumming parameters for the
4852 * specified packet, for NEWQUEUE devices
4853 */
4854 static int
4855 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
4856 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
4857 {
4858 struct mbuf *m0 = txs->txs_mbuf;
4859 struct m_tag *mtag;
4860 uint32_t vl_len, mssidx, cmdc;
4861 struct ether_header *eh;
4862 int offset, iphl;
4863
4864 /*
4865 * XXX It would be nice if the mbuf pkthdr had offset
4866 * fields for the protocol headers.
4867 */
4868 *cmdlenp = 0;
4869 *fieldsp = 0;
4870
4871 eh = mtod(m0, struct ether_header *);
4872 switch (htons(eh->ether_type)) {
4873 case ETHERTYPE_IP:
4874 case ETHERTYPE_IPV6:
4875 offset = ETHER_HDR_LEN;
4876 break;
4877
4878 case ETHERTYPE_VLAN:
4879 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4880 break;
4881
4882 default:
4883 /* Don't support this protocol or encapsulation. */
4884 *do_csum = false;
4885 return 0;
4886 }
4887 *do_csum = true;
4888 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
4889 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
4890
4891 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
4892 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
4893
4894 if ((m0->m_pkthdr.csum_flags &
4895 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
4896 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4897 } else {
4898 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4899 }
4900 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
4901 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
4902
4903 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
4904 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
4905 << NQTXC_VLLEN_VLAN_SHIFT);
4906 *cmdlenp |= NQTX_CMD_VLE;
4907 }
4908
4909 mssidx = 0;
4910
4911 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4912 int hlen = offset + iphl;
4913 int tcp_hlen;
4914 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4915
4916 if (__predict_false(m0->m_len <
4917 (hlen + sizeof(struct tcphdr)))) {
4918 /*
4919 * TCP/IP headers are not in the first mbuf; we need
4920 * to do this the slow and painful way. Let's just
4921 * hope this doesn't happen very often.
4922 */
4923 struct tcphdr th;
4924
4925 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4926
4927 m_copydata(m0, hlen, sizeof(th), &th);
4928 if (v4) {
4929 struct ip ip;
4930
4931 m_copydata(m0, offset, sizeof(ip), &ip);
4932 ip.ip_len = 0;
4933 m_copyback(m0,
4934 offset + offsetof(struct ip, ip_len),
4935 sizeof(ip.ip_len), &ip.ip_len);
4936 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4937 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4938 } else {
4939 struct ip6_hdr ip6;
4940
4941 m_copydata(m0, offset, sizeof(ip6), &ip6);
4942 ip6.ip6_plen = 0;
4943 m_copyback(m0,
4944 offset + offsetof(struct ip6_hdr, ip6_plen),
4945 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4946 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4947 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4948 }
4949 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4950 sizeof(th.th_sum), &th.th_sum);
4951
4952 tcp_hlen = th.th_off << 2;
4953 } else {
4954 /*
4955 * TCP/IP headers are in the first mbuf; we can do
4956 * this the easy way.
4957 */
4958 struct tcphdr *th;
4959
4960 if (v4) {
4961 struct ip *ip =
4962 (void *)(mtod(m0, char *) + offset);
4963 th = (void *)(mtod(m0, char *) + hlen);
4964
4965 ip->ip_len = 0;
4966 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4967 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4968 } else {
4969 struct ip6_hdr *ip6 =
4970 (void *)(mtod(m0, char *) + offset);
4971 th = (void *)(mtod(m0, char *) + hlen);
4972
4973 ip6->ip6_plen = 0;
4974 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4975 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4976 }
4977 tcp_hlen = th->th_off << 2;
4978 }
4979 hlen += tcp_hlen;
4980 *cmdlenp |= NQTX_CMD_TSE;
4981
4982 if (v4) {
4983 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4984 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
4985 } else {
4986 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4987 *fieldsp |= NQTXD_FIELDS_TUXSM;
4988 }
4989 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
4990 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
4991 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
4992 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
4993 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
4994 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
4995 } else {
4996 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
4997 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
4998 }
4999
5000 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
5001 *fieldsp |= NQTXD_FIELDS_IXSM;
5002 cmdc |= NQTXC_CMD_IP4;
5003 }
5004
5005 if (m0->m_pkthdr.csum_flags &
5006 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5007 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5008 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5009 cmdc |= NQTXC_CMD_TCP;
5010 } else {
5011 cmdc |= NQTXC_CMD_UDP;
5012 }
5013 cmdc |= NQTXC_CMD_IP4;
5014 *fieldsp |= NQTXD_FIELDS_TUXSM;
5015 }
5016 if (m0->m_pkthdr.csum_flags &
5017 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5018 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5019 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5020 cmdc |= NQTXC_CMD_TCP;
5021 } else {
5022 cmdc |= NQTXC_CMD_UDP;
5023 }
5024 cmdc |= NQTXC_CMD_IP6;
5025 *fieldsp |= NQTXD_FIELDS_TUXSM;
5026 }
5027
5028 /* Fill in the context descriptor. */
5029 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
5030 htole32(vl_len);
5031 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
5032 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
5033 htole32(cmdc);
5034 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
5035 htole32(mssidx);
5036 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
5037 DPRINTF(WM_DEBUG_TX,
5038 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
5039 sc->sc_txnext, 0, vl_len));
5040 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
5041 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
5042 txs->txs_ndesc++;
5043 return 0;
5044 }
5045
5046 /*
5047 * wm_nq_start: [ifnet interface function]
5048 *
5049 * Start packet transmission on the interface for NEWQUEUE devices
5050 */
5051 static void
5052 wm_nq_start(struct ifnet *ifp)
5053 {
5054 struct wm_softc *sc = ifp->if_softc;
5055
5056 WM_TX_LOCK(sc);
5057 if (!sc->sc_stopping)
5058 wm_nq_start_locked(ifp);
5059 WM_TX_UNLOCK(sc);
5060 }
5061
5062 static void
5063 wm_nq_start_locked(struct ifnet *ifp)
5064 {
5065 struct wm_softc *sc = ifp->if_softc;
5066 struct mbuf *m0;
5067 struct m_tag *mtag;
5068 struct wm_txsoft *txs;
5069 bus_dmamap_t dmamap;
5070 int error, nexttx, lasttx = -1, seg, segs_needed;
5071 bool do_csum, sent;
5072
5073 KASSERT(WM_TX_LOCKED(sc));
5074
5075 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5076 return;
5077
5078 sent = false;
5079
5080 /*
5081 * Loop through the send queue, setting up transmit descriptors
5082 * until we drain the queue, or use up all available transmit
5083 * descriptors.
5084 */
5085 for (;;) {
5086 m0 = NULL;
5087
5088 /* Get a work queue entry. */
5089 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
5090 wm_txintr(sc);
5091 if (sc->sc_txsfree == 0) {
5092 DPRINTF(WM_DEBUG_TX,
5093 ("%s: TX: no free job descriptors\n",
5094 device_xname(sc->sc_dev)));
5095 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
5096 break;
5097 }
5098 }
5099
5100 /* Grab a packet off the queue. */
5101 IFQ_DEQUEUE(&ifp->if_snd, m0);
5102 if (m0 == NULL)
5103 break;
5104
5105 DPRINTF(WM_DEBUG_TX,
5106 ("%s: TX: have packet to transmit: %p\n",
5107 device_xname(sc->sc_dev), m0));
5108
5109 txs = &sc->sc_txsoft[sc->sc_txsnext];
5110 dmamap = txs->txs_dmamap;
5111
5112 /*
5113 * Load the DMA map. If this fails, the packet either
5114 * didn't fit in the allotted number of segments, or we
5115 * were short on resources. For the too-many-segments
5116 * case, we simply report an error and drop the packet,
5117 * since we can't sanely copy a jumbo packet to a single
5118 * buffer.
5119 */
5120 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5121 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5122 if (error) {
5123 if (error == EFBIG) {
5124 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5125 log(LOG_ERR, "%s: Tx packet consumes too many "
5126 "DMA segments, dropping...\n",
5127 device_xname(sc->sc_dev));
5128 wm_dump_mbuf_chain(sc, m0);
5129 m_freem(m0);
5130 continue;
5131 }
5132 /* Short on resources, just stop for now. */
5133 DPRINTF(WM_DEBUG_TX,
5134 ("%s: TX: dmamap load failed: %d\n",
5135 device_xname(sc->sc_dev), error));
5136 break;
5137 }
5138
5139 segs_needed = dmamap->dm_nsegs;
5140
5141 /*
5142 * Ensure we have enough descriptors free to describe
5143 * the packet. Note, we always reserve one descriptor
5144 * at the end of the ring due to the semantics of the
5145 * TDT register, plus one more in the event we need
5146 * to load offload context.
5147 */
5148 if (segs_needed > sc->sc_txfree - 2) {
5149 /*
5150 * Not enough free descriptors to transmit this
5151 * packet. We haven't committed anything yet,
5152 * so just unload the DMA map, put the packet
5153 * pack on the queue, and punt. Notify the upper
5154 * layer that there are no more slots left.
5155 */
5156 DPRINTF(WM_DEBUG_TX,
5157 ("%s: TX: need %d (%d) descriptors, have %d\n",
5158 device_xname(sc->sc_dev), dmamap->dm_nsegs,
5159 segs_needed, sc->sc_txfree - 1));
5160 ifp->if_flags |= IFF_OACTIVE;
5161 bus_dmamap_unload(sc->sc_dmat, dmamap);
5162 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5163 break;
5164 }
5165
5166 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5167
5168 DPRINTF(WM_DEBUG_TX,
5169 ("%s: TX: packet has %d (%d) DMA segments\n",
5170 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5171
5172 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5173
5174 /*
5175 * Store a pointer to the packet so that we can free it
5176 * later.
5177 *
5178 * Initially, we consider the number of descriptors the
5179 * packet uses the number of DMA segments. This may be
5180 * incremented by 1 if we do checksum offload (a descriptor
5181 * is used to set the checksum context).
5182 */
5183 txs->txs_mbuf = m0;
5184 txs->txs_firstdesc = sc->sc_txnext;
5185 txs->txs_ndesc = segs_needed;
5186
5187 /* Set up offload parameters for this packet. */
5188 uint32_t cmdlen, fields, dcmdlen;
5189 if (m0->m_pkthdr.csum_flags &
5190 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5191 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5192 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5193 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
5194 &do_csum) != 0) {
5195 /* Error message already displayed. */
5196 bus_dmamap_unload(sc->sc_dmat, dmamap);
5197 continue;
5198 }
5199 } else {
5200 do_csum = false;
5201 cmdlen = 0;
5202 fields = 0;
5203 }
5204
5205 /* Sync the DMA map. */
5206 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5207 BUS_DMASYNC_PREWRITE);
5208
5209 /* Initialize the first transmit descriptor. */
5210 nexttx = sc->sc_txnext;
5211 if (!do_csum) {
5212 /* setup a legacy descriptor */
5213 wm_set_dma_addr(
5214 &sc->sc_txdescs[nexttx].wtx_addr,
5215 dmamap->dm_segs[0].ds_addr);
5216 sc->sc_txdescs[nexttx].wtx_cmdlen =
5217 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
5218 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
5219 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
5220 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
5221 NULL) {
5222 sc->sc_txdescs[nexttx].wtx_cmdlen |=
5223 htole32(WTX_CMD_VLE);
5224 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
5225 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5226 } else {
5227 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
5228 }
5229 dcmdlen = 0;
5230 } else {
5231 /* setup an advanced data descriptor */
5232 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5233 htole64(dmamap->dm_segs[0].ds_addr);
5234 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
5235 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5236 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
5237 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
5238 htole32(fields);
5239 DPRINTF(WM_DEBUG_TX,
5240 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
5241 device_xname(sc->sc_dev), nexttx,
5242 (uint64_t)dmamap->dm_segs[0].ds_addr));
5243 DPRINTF(WM_DEBUG_TX,
5244 ("\t 0x%08x%08x\n", fields,
5245 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
5246 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
5247 }
5248
5249 lasttx = nexttx;
5250 nexttx = WM_NEXTTX(sc, nexttx);
5251 /*
5252 * fill in the next descriptors. legacy or adcanced format
5253 * is the same here
5254 */
5255 for (seg = 1; seg < dmamap->dm_nsegs;
5256 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
5257 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5258 htole64(dmamap->dm_segs[seg].ds_addr);
5259 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5260 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
5261 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
5262 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
5263 lasttx = nexttx;
5264
5265 DPRINTF(WM_DEBUG_TX,
5266 ("%s: TX: desc %d: %#" PRIx64 ", "
5267 "len %#04zx\n",
5268 device_xname(sc->sc_dev), nexttx,
5269 (uint64_t)dmamap->dm_segs[seg].ds_addr,
5270 dmamap->dm_segs[seg].ds_len));
5271 }
5272
5273 KASSERT(lasttx != -1);
5274
5275 /*
5276 * Set up the command byte on the last descriptor of
5277 * the packet. If we're in the interrupt delay window,
5278 * delay the interrupt.
5279 */
5280 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
5281 (NQTX_CMD_EOP | NQTX_CMD_RS));
5282 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5283 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5284
5285 txs->txs_lastdesc = lasttx;
5286
5287 DPRINTF(WM_DEBUG_TX,
5288 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5289 device_xname(sc->sc_dev),
5290 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5291
5292 /* Sync the descriptors we're using. */
5293 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5294 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5295
5296 /* Give the packet to the chip. */
5297 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5298 sent = true;
5299
5300 DPRINTF(WM_DEBUG_TX,
5301 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5302
5303 DPRINTF(WM_DEBUG_TX,
5304 ("%s: TX: finished transmitting packet, job %d\n",
5305 device_xname(sc->sc_dev), sc->sc_txsnext));
5306
5307 /* Advance the tx pointer. */
5308 sc->sc_txfree -= txs->txs_ndesc;
5309 sc->sc_txnext = nexttx;
5310
5311 sc->sc_txsfree--;
5312 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5313
5314 /* Pass the packet to any BPF listeners. */
5315 bpf_mtap(ifp, m0);
5316 }
5317
5318 if (m0 != NULL) {
5319 ifp->if_flags |= IFF_OACTIVE;
5320 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5321 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5322 m_freem(m0);
5323 }
5324
5325 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5326 /* No more slots; notify upper layer. */
5327 ifp->if_flags |= IFF_OACTIVE;
5328 }
5329
5330 if (sent) {
5331 /* Set a watchdog timer in case the chip flakes out. */
5332 ifp->if_timer = 5;
5333 }
5334 }
5335
5336 /* Interrupt */
5337
5338 /*
5339 * wm_txintr:
5340 *
5341 * Helper; handle transmit interrupts.
5342 */
5343 static void
5344 wm_txintr(struct wm_softc *sc)
5345 {
5346 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5347 struct wm_txsoft *txs;
5348 uint8_t status;
5349 int i;
5350
5351 if (sc->sc_stopping)
5352 return;
5353
5354 ifp->if_flags &= ~IFF_OACTIVE;
5355
5356 /*
5357 * Go through the Tx list and free mbufs for those
5358 * frames which have been transmitted.
5359 */
5360 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
5361 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
5362 txs = &sc->sc_txsoft[i];
5363
5364 DPRINTF(WM_DEBUG_TX,
5365 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
5366
5367 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
5368 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5369
5370 status =
5371 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
5372 if ((status & WTX_ST_DD) == 0) {
5373 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
5374 BUS_DMASYNC_PREREAD);
5375 break;
5376 }
5377
5378 DPRINTF(WM_DEBUG_TX,
5379 ("%s: TX: job %d done: descs %d..%d\n",
5380 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
5381 txs->txs_lastdesc));
5382
5383 /*
5384 * XXX We should probably be using the statistics
5385 * XXX registers, but I don't know if they exist
5386 * XXX on chips before the i82544.
5387 */
5388
5389 #ifdef WM_EVENT_COUNTERS
5390 if (status & WTX_ST_TU)
5391 WM_EVCNT_INCR(&sc->sc_ev_tu);
5392 #endif /* WM_EVENT_COUNTERS */
5393
5394 if (status & (WTX_ST_EC|WTX_ST_LC)) {
5395 ifp->if_oerrors++;
5396 if (status & WTX_ST_LC)
5397 log(LOG_WARNING, "%s: late collision\n",
5398 device_xname(sc->sc_dev));
5399 else if (status & WTX_ST_EC) {
5400 ifp->if_collisions += 16;
5401 log(LOG_WARNING, "%s: excessive collisions\n",
5402 device_xname(sc->sc_dev));
5403 }
5404 } else
5405 ifp->if_opackets++;
5406
5407 sc->sc_txfree += txs->txs_ndesc;
5408 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
5409 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
5410 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
5411 m_freem(txs->txs_mbuf);
5412 txs->txs_mbuf = NULL;
5413 }
5414
5415 /* Update the dirty transmit buffer pointer. */
5416 sc->sc_txsdirty = i;
5417 DPRINTF(WM_DEBUG_TX,
5418 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
5419
5420 /*
5421 * If there are no more pending transmissions, cancel the watchdog
5422 * timer.
5423 */
5424 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
5425 ifp->if_timer = 0;
5426 }
5427
5428 /*
5429 * wm_rxintr:
5430 *
5431 * Helper; handle receive interrupts.
5432 */
5433 static void
5434 wm_rxintr(struct wm_softc *sc)
5435 {
5436 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5437 struct wm_rxsoft *rxs;
5438 struct mbuf *m;
5439 int i, len;
5440 uint8_t status, errors;
5441 uint16_t vlantag;
5442
5443 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
5444 rxs = &sc->sc_rxsoft[i];
5445
5446 DPRINTF(WM_DEBUG_RX,
5447 ("%s: RX: checking descriptor %d\n",
5448 device_xname(sc->sc_dev), i));
5449
5450 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5451
5452 status = sc->sc_rxdescs[i].wrx_status;
5453 errors = sc->sc_rxdescs[i].wrx_errors;
5454 len = le16toh(sc->sc_rxdescs[i].wrx_len);
5455 vlantag = sc->sc_rxdescs[i].wrx_special;
5456
5457 if ((status & WRX_ST_DD) == 0) {
5458 /* We have processed all of the receive descriptors. */
5459 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
5460 break;
5461 }
5462
5463 if (__predict_false(sc->sc_rxdiscard)) {
5464 DPRINTF(WM_DEBUG_RX,
5465 ("%s: RX: discarding contents of descriptor %d\n",
5466 device_xname(sc->sc_dev), i));
5467 WM_INIT_RXDESC(sc, i);
5468 if (status & WRX_ST_EOP) {
5469 /* Reset our state. */
5470 DPRINTF(WM_DEBUG_RX,
5471 ("%s: RX: resetting rxdiscard -> 0\n",
5472 device_xname(sc->sc_dev)));
5473 sc->sc_rxdiscard = 0;
5474 }
5475 continue;
5476 }
5477
5478 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5479 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
5480
5481 m = rxs->rxs_mbuf;
5482
5483 /*
5484 * Add a new receive buffer to the ring, unless of
5485 * course the length is zero. Treat the latter as a
5486 * failed mapping.
5487 */
5488 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
5489 /*
5490 * Failed, throw away what we've done so
5491 * far, and discard the rest of the packet.
5492 */
5493 ifp->if_ierrors++;
5494 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5495 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5496 WM_INIT_RXDESC(sc, i);
5497 if ((status & WRX_ST_EOP) == 0)
5498 sc->sc_rxdiscard = 1;
5499 if (sc->sc_rxhead != NULL)
5500 m_freem(sc->sc_rxhead);
5501 WM_RXCHAIN_RESET(sc);
5502 DPRINTF(WM_DEBUG_RX,
5503 ("%s: RX: Rx buffer allocation failed, "
5504 "dropping packet%s\n", device_xname(sc->sc_dev),
5505 sc->sc_rxdiscard ? " (discard)" : ""));
5506 continue;
5507 }
5508
5509 m->m_len = len;
5510 sc->sc_rxlen += len;
5511 DPRINTF(WM_DEBUG_RX,
5512 ("%s: RX: buffer at %p len %d\n",
5513 device_xname(sc->sc_dev), m->m_data, len));
5514
5515 /* If this is not the end of the packet, keep looking. */
5516 if ((status & WRX_ST_EOP) == 0) {
5517 WM_RXCHAIN_LINK(sc, m);
5518 DPRINTF(WM_DEBUG_RX,
5519 ("%s: RX: not yet EOP, rxlen -> %d\n",
5520 device_xname(sc->sc_dev), sc->sc_rxlen));
5521 continue;
5522 }
5523
5524 /*
5525 * Okay, we have the entire packet now. The chip is
5526 * configured to include the FCS except I350 and I21[01]
5527 * (not all chips can be configured to strip it),
5528 * so we need to trim it.
5529 * May need to adjust length of previous mbuf in the
5530 * chain if the current mbuf is too short.
5531 * For an eratta, the RCTL_SECRC bit in RCTL register
5532 * is always set in I350, so we don't trim it.
5533 */
5534 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
5535 && (sc->sc_type != WM_T_I210)
5536 && (sc->sc_type != WM_T_I211)) {
5537 if (m->m_len < ETHER_CRC_LEN) {
5538 sc->sc_rxtail->m_len
5539 -= (ETHER_CRC_LEN - m->m_len);
5540 m->m_len = 0;
5541 } else
5542 m->m_len -= ETHER_CRC_LEN;
5543 len = sc->sc_rxlen - ETHER_CRC_LEN;
5544 } else
5545 len = sc->sc_rxlen;
5546
5547 WM_RXCHAIN_LINK(sc, m);
5548
5549 *sc->sc_rxtailp = NULL;
5550 m = sc->sc_rxhead;
5551
5552 WM_RXCHAIN_RESET(sc);
5553
5554 DPRINTF(WM_DEBUG_RX,
5555 ("%s: RX: have entire packet, len -> %d\n",
5556 device_xname(sc->sc_dev), len));
5557
5558 /* If an error occurred, update stats and drop the packet. */
5559 if (errors &
5560 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
5561 if (errors & WRX_ER_SE)
5562 log(LOG_WARNING, "%s: symbol error\n",
5563 device_xname(sc->sc_dev));
5564 else if (errors & WRX_ER_SEQ)
5565 log(LOG_WARNING, "%s: receive sequence error\n",
5566 device_xname(sc->sc_dev));
5567 else if (errors & WRX_ER_CE)
5568 log(LOG_WARNING, "%s: CRC error\n",
5569 device_xname(sc->sc_dev));
5570 m_freem(m);
5571 continue;
5572 }
5573
5574 /* No errors. Receive the packet. */
5575 m->m_pkthdr.rcvif = ifp;
5576 m->m_pkthdr.len = len;
5577
5578 /*
5579 * If VLANs are enabled, VLAN packets have been unwrapped
5580 * for us. Associate the tag with the packet.
5581 */
5582 /* XXXX should check for i350 and i354 */
5583 if ((status & WRX_ST_VP) != 0) {
5584 VLAN_INPUT_TAG(ifp, m,
5585 le16toh(vlantag),
5586 continue);
5587 }
5588
5589 /* Set up checksum info for this packet. */
5590 if ((status & WRX_ST_IXSM) == 0) {
5591 if (status & WRX_ST_IPCS) {
5592 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
5593 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
5594 if (errors & WRX_ER_IPE)
5595 m->m_pkthdr.csum_flags |=
5596 M_CSUM_IPv4_BAD;
5597 }
5598 if (status & WRX_ST_TCPCS) {
5599 /*
5600 * Note: we don't know if this was TCP or UDP,
5601 * so we just set both bits, and expect the
5602 * upper layers to deal.
5603 */
5604 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
5605 m->m_pkthdr.csum_flags |=
5606 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
5607 M_CSUM_TCPv6 | M_CSUM_UDPv6;
5608 if (errors & WRX_ER_TCPE)
5609 m->m_pkthdr.csum_flags |=
5610 M_CSUM_TCP_UDP_BAD;
5611 }
5612 }
5613
5614 ifp->if_ipackets++;
5615
5616 WM_RX_UNLOCK(sc);
5617
5618 /* Pass this up to any BPF listeners. */
5619 bpf_mtap(ifp, m);
5620
5621 /* Pass it on. */
5622 (*ifp->if_input)(ifp, m);
5623
5624 WM_RX_LOCK(sc);
5625
5626 if (sc->sc_stopping)
5627 break;
5628 }
5629
5630 /* Update the receive pointer. */
5631 sc->sc_rxptr = i;
5632
5633 DPRINTF(WM_DEBUG_RX,
5634 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
5635 }
5636
5637 /*
5638 * wm_linkintr_gmii:
5639 *
5640 * Helper; handle link interrupts for GMII.
5641 */
5642 static void
5643 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
5644 {
5645
5646 KASSERT(WM_TX_LOCKED(sc));
5647
5648 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5649 __func__));
5650
5651 if (icr & ICR_LSC) {
5652 DPRINTF(WM_DEBUG_LINK,
5653 ("%s: LINK: LSC -> mii_pollstat\n",
5654 device_xname(sc->sc_dev)));
5655 mii_pollstat(&sc->sc_mii);
5656 if (sc->sc_type == WM_T_82543) {
5657 int miistatus, active;
5658
5659 /*
5660 * With 82543, we need to force speed and
5661 * duplex on the MAC equal to what the PHY
5662 * speed and duplex configuration is.
5663 */
5664 miistatus = sc->sc_mii.mii_media_status;
5665
5666 if (miistatus & IFM_ACTIVE) {
5667 active = sc->sc_mii.mii_media_active;
5668 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5669 switch (IFM_SUBTYPE(active)) {
5670 case IFM_10_T:
5671 sc->sc_ctrl |= CTRL_SPEED_10;
5672 break;
5673 case IFM_100_TX:
5674 sc->sc_ctrl |= CTRL_SPEED_100;
5675 break;
5676 case IFM_1000_T:
5677 sc->sc_ctrl |= CTRL_SPEED_1000;
5678 break;
5679 default:
5680 /*
5681 * fiber?
5682 * Shoud not enter here.
5683 */
5684 printf("unknown media (%x)\n",
5685 active);
5686 break;
5687 }
5688 if (active & IFM_FDX)
5689 sc->sc_ctrl |= CTRL_FD;
5690 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5691 }
5692 } else if ((sc->sc_type == WM_T_ICH8)
5693 && (sc->sc_phytype == WMPHY_IGP_3)) {
5694 wm_kmrn_lock_loss_workaround_ich8lan(sc);
5695 } else if (sc->sc_type == WM_T_PCH) {
5696 wm_k1_gig_workaround_hv(sc,
5697 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
5698 }
5699
5700 if ((sc->sc_phytype == WMPHY_82578)
5701 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
5702 == IFM_1000_T)) {
5703
5704 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
5705 delay(200*1000); /* XXX too big */
5706
5707 /* Link stall fix for link up */
5708 wm_gmii_hv_writereg(sc->sc_dev, 1,
5709 HV_MUX_DATA_CTRL,
5710 HV_MUX_DATA_CTRL_GEN_TO_MAC
5711 | HV_MUX_DATA_CTRL_FORCE_SPEED);
5712 wm_gmii_hv_writereg(sc->sc_dev, 1,
5713 HV_MUX_DATA_CTRL,
5714 HV_MUX_DATA_CTRL_GEN_TO_MAC);
5715 }
5716 }
5717 } else if (icr & ICR_RXSEQ) {
5718 DPRINTF(WM_DEBUG_LINK,
5719 ("%s: LINK Receive sequence error\n",
5720 device_xname(sc->sc_dev)));
5721 }
5722 }
5723
5724 /*
5725 * wm_linkintr_tbi:
5726 *
5727 * Helper; handle link interrupts for TBI mode.
5728 */
5729 static void
5730 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
5731 {
5732 uint32_t status;
5733
5734 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5735 __func__));
5736
5737 status = CSR_READ(sc, WMREG_STATUS);
5738 if (icr & ICR_LSC) {
5739 if (status & STATUS_LU) {
5740 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
5741 device_xname(sc->sc_dev),
5742 (status & STATUS_FD) ? "FDX" : "HDX"));
5743 /*
5744 * NOTE: CTRL will update TFCE and RFCE automatically,
5745 * so we should update sc->sc_ctrl
5746 */
5747
5748 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5749 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5750 sc->sc_fcrtl &= ~FCRTL_XONE;
5751 if (status & STATUS_FD)
5752 sc->sc_tctl |=
5753 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5754 else
5755 sc->sc_tctl |=
5756 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5757 if (sc->sc_ctrl & CTRL_TFCE)
5758 sc->sc_fcrtl |= FCRTL_XONE;
5759 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5760 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5761 WMREG_OLD_FCRTL : WMREG_FCRTL,
5762 sc->sc_fcrtl);
5763 sc->sc_tbi_linkup = 1;
5764 } else {
5765 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
5766 device_xname(sc->sc_dev)));
5767 sc->sc_tbi_linkup = 0;
5768 }
5769 wm_tbi_set_linkled(sc);
5770 } else if (icr & ICR_RXSEQ) {
5771 DPRINTF(WM_DEBUG_LINK,
5772 ("%s: LINK: Receive sequence error\n",
5773 device_xname(sc->sc_dev)));
5774 }
5775 }
5776
5777 /*
5778 * wm_linkintr:
5779 *
5780 * Helper; handle link interrupts.
5781 */
5782 static void
5783 wm_linkintr(struct wm_softc *sc, uint32_t icr)
5784 {
5785
5786 if (sc->sc_flags & WM_F_HAS_MII)
5787 wm_linkintr_gmii(sc, icr);
5788 else
5789 wm_linkintr_tbi(sc, icr);
5790 }
5791
5792 /*
5793 * wm_intr:
5794 *
5795 * Interrupt service routine.
5796 */
5797 static int
5798 wm_intr(void *arg)
5799 {
5800 struct wm_softc *sc = arg;
5801 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5802 uint32_t icr;
5803 int handled = 0;
5804
5805 while (1 /* CONSTCOND */) {
5806 icr = CSR_READ(sc, WMREG_ICR);
5807 if ((icr & sc->sc_icr) == 0)
5808 break;
5809 rnd_add_uint32(&sc->rnd_source, icr);
5810
5811 WM_RX_LOCK(sc);
5812
5813 if (sc->sc_stopping) {
5814 WM_RX_UNLOCK(sc);
5815 break;
5816 }
5817
5818 handled = 1;
5819
5820 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
5821 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
5822 DPRINTF(WM_DEBUG_RX,
5823 ("%s: RX: got Rx intr 0x%08x\n",
5824 device_xname(sc->sc_dev),
5825 icr & (ICR_RXDMT0|ICR_RXT0)));
5826 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
5827 }
5828 #endif
5829 wm_rxintr(sc);
5830
5831 WM_RX_UNLOCK(sc);
5832 WM_TX_LOCK(sc);
5833
5834 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
5835 if (icr & ICR_TXDW) {
5836 DPRINTF(WM_DEBUG_TX,
5837 ("%s: TX: got TXDW interrupt\n",
5838 device_xname(sc->sc_dev)));
5839 WM_EVCNT_INCR(&sc->sc_ev_txdw);
5840 }
5841 #endif
5842 wm_txintr(sc);
5843
5844 if (icr & (ICR_LSC|ICR_RXSEQ)) {
5845 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
5846 wm_linkintr(sc, icr);
5847 }
5848
5849 WM_TX_UNLOCK(sc);
5850
5851 if (icr & ICR_RXO) {
5852 #if defined(WM_DEBUG)
5853 log(LOG_WARNING, "%s: Receive overrun\n",
5854 device_xname(sc->sc_dev));
5855 #endif /* defined(WM_DEBUG) */
5856 }
5857 }
5858
5859 if (handled) {
5860 /* Try to get more packets going. */
5861 ifp->if_start(ifp);
5862 }
5863
5864 return handled;
5865 }
5866
5867 /*
5868 * Media related.
5869 * GMII, SGMII, TBI (and SERDES)
5870 */
5871
5872 /* GMII related */
5873
5874 /*
5875 * wm_gmii_reset:
5876 *
5877 * Reset the PHY.
5878 */
5879 static void
5880 wm_gmii_reset(struct wm_softc *sc)
5881 {
5882 uint32_t reg;
5883 int rv;
5884
5885 /* get phy semaphore */
5886 switch (sc->sc_type) {
5887 case WM_T_82571:
5888 case WM_T_82572:
5889 case WM_T_82573:
5890 case WM_T_82574:
5891 case WM_T_82583:
5892 /* XXX should get sw semaphore, too */
5893 rv = wm_get_swsm_semaphore(sc);
5894 break;
5895 case WM_T_82575:
5896 case WM_T_82576:
5897 case WM_T_82580:
5898 case WM_T_I350:
5899 case WM_T_I354:
5900 case WM_T_I210:
5901 case WM_T_I211:
5902 case WM_T_80003:
5903 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5904 break;
5905 case WM_T_ICH8:
5906 case WM_T_ICH9:
5907 case WM_T_ICH10:
5908 case WM_T_PCH:
5909 case WM_T_PCH2:
5910 case WM_T_PCH_LPT:
5911 rv = wm_get_swfwhw_semaphore(sc);
5912 break;
5913 default:
5914 /* nothing to do*/
5915 rv = 0;
5916 break;
5917 }
5918 if (rv != 0) {
5919 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5920 __func__);
5921 return;
5922 }
5923
5924 switch (sc->sc_type) {
5925 case WM_T_82542_2_0:
5926 case WM_T_82542_2_1:
5927 /* null */
5928 break;
5929 case WM_T_82543:
5930 /*
5931 * With 82543, we need to force speed and duplex on the MAC
5932 * equal to what the PHY speed and duplex configuration is.
5933 * In addition, we need to perform a hardware reset on the PHY
5934 * to take it out of reset.
5935 */
5936 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5937 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5938
5939 /* The PHY reset pin is active-low. */
5940 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5941 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
5942 CTRL_EXT_SWDPIN(4));
5943 reg |= CTRL_EXT_SWDPIO(4);
5944
5945 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5946 CSR_WRITE_FLUSH(sc);
5947 delay(10*1000);
5948
5949 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
5950 CSR_WRITE_FLUSH(sc);
5951 delay(150);
5952 #if 0
5953 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
5954 #endif
5955 delay(20*1000); /* XXX extra delay to get PHY ID? */
5956 break;
5957 case WM_T_82544: /* reset 10000us */
5958 case WM_T_82540:
5959 case WM_T_82545:
5960 case WM_T_82545_3:
5961 case WM_T_82546:
5962 case WM_T_82546_3:
5963 case WM_T_82541:
5964 case WM_T_82541_2:
5965 case WM_T_82547:
5966 case WM_T_82547_2:
5967 case WM_T_82571: /* reset 100us */
5968 case WM_T_82572:
5969 case WM_T_82573:
5970 case WM_T_82574:
5971 case WM_T_82575:
5972 case WM_T_82576:
5973 case WM_T_82580:
5974 case WM_T_I350:
5975 case WM_T_I354:
5976 case WM_T_I210:
5977 case WM_T_I211:
5978 case WM_T_82583:
5979 case WM_T_80003:
5980 /* generic reset */
5981 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5982 CSR_WRITE_FLUSH(sc);
5983 delay(20000);
5984 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5985 CSR_WRITE_FLUSH(sc);
5986 delay(20000);
5987
5988 if ((sc->sc_type == WM_T_82541)
5989 || (sc->sc_type == WM_T_82541_2)
5990 || (sc->sc_type == WM_T_82547)
5991 || (sc->sc_type == WM_T_82547_2)) {
5992 /* workaround for igp are done in igp_reset() */
5993 /* XXX add code to set LED after phy reset */
5994 }
5995 break;
5996 case WM_T_ICH8:
5997 case WM_T_ICH9:
5998 case WM_T_ICH10:
5999 case WM_T_PCH:
6000 case WM_T_PCH2:
6001 case WM_T_PCH_LPT:
6002 /* generic reset */
6003 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6004 CSR_WRITE_FLUSH(sc);
6005 delay(100);
6006 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6007 CSR_WRITE_FLUSH(sc);
6008 delay(150);
6009 break;
6010 default:
6011 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6012 __func__);
6013 break;
6014 }
6015
6016 /* release PHY semaphore */
6017 switch (sc->sc_type) {
6018 case WM_T_82571:
6019 case WM_T_82572:
6020 case WM_T_82573:
6021 case WM_T_82574:
6022 case WM_T_82583:
6023 /* XXX should put sw semaphore, too */
6024 wm_put_swsm_semaphore(sc);
6025 break;
6026 case WM_T_82575:
6027 case WM_T_82576:
6028 case WM_T_82580:
6029 case WM_T_I350:
6030 case WM_T_I354:
6031 case WM_T_I210:
6032 case WM_T_I211:
6033 case WM_T_80003:
6034 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6035 break;
6036 case WM_T_ICH8:
6037 case WM_T_ICH9:
6038 case WM_T_ICH10:
6039 case WM_T_PCH:
6040 case WM_T_PCH2:
6041 case WM_T_PCH_LPT:
6042 wm_put_swfwhw_semaphore(sc);
6043 break;
6044 default:
6045 /* nothing to do*/
6046 rv = 0;
6047 break;
6048 }
6049
6050 /* get_cfg_done */
6051 wm_get_cfg_done(sc);
6052
6053 /* extra setup */
6054 switch (sc->sc_type) {
6055 case WM_T_82542_2_0:
6056 case WM_T_82542_2_1:
6057 case WM_T_82543:
6058 case WM_T_82544:
6059 case WM_T_82540:
6060 case WM_T_82545:
6061 case WM_T_82545_3:
6062 case WM_T_82546:
6063 case WM_T_82546_3:
6064 case WM_T_82541_2:
6065 case WM_T_82547_2:
6066 case WM_T_82571:
6067 case WM_T_82572:
6068 case WM_T_82573:
6069 case WM_T_82574:
6070 case WM_T_82575:
6071 case WM_T_82576:
6072 case WM_T_82580:
6073 case WM_T_I350:
6074 case WM_T_I354:
6075 case WM_T_I210:
6076 case WM_T_I211:
6077 case WM_T_82583:
6078 case WM_T_80003:
6079 /* null */
6080 break;
6081 case WM_T_82541:
6082 case WM_T_82547:
6083 /* XXX Configure actively LED after PHY reset */
6084 break;
6085 case WM_T_ICH8:
6086 case WM_T_ICH9:
6087 case WM_T_ICH10:
6088 case WM_T_PCH:
6089 case WM_T_PCH2:
6090 case WM_T_PCH_LPT:
6091 /* Allow time for h/w to get to a quiescent state afer reset */
6092 delay(10*1000);
6093
6094 if (sc->sc_type == WM_T_PCH)
6095 wm_hv_phy_workaround_ich8lan(sc);
6096
6097 if (sc->sc_type == WM_T_PCH2)
6098 wm_lv_phy_workaround_ich8lan(sc);
6099
6100 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6101 /*
6102 * dummy read to clear the phy wakeup bit after lcd
6103 * reset
6104 */
6105 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6106 }
6107
6108 /*
6109 * XXX Configure the LCD with th extended configuration region
6110 * in NVM
6111 */
6112
6113 /* Configure the LCD with the OEM bits in NVM */
6114 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6115 || (sc->sc_type == WM_T_PCH_LPT)) {
6116 /*
6117 * Disable LPLU.
6118 * XXX It seems that 82567 has LPLU, too.
6119 */
6120 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6121 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6122 reg |= HV_OEM_BITS_ANEGNOW;
6123 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6124 }
6125 break;
6126 default:
6127 panic("%s: unknown type\n", __func__);
6128 break;
6129 }
6130 }
6131
6132 /*
6133 * wm_get_phy_id_82575:
6134 *
6135 * Return PHY ID. Return -1 if it failed.
6136 */
6137 static int
6138 wm_get_phy_id_82575(struct wm_softc *sc)
6139 {
6140 uint32_t reg;
6141 int phyid = -1;
6142
6143 /* XXX */
6144 if ((sc->sc_flags & WM_F_SGMII) == 0)
6145 return -1;
6146
6147 if (wm_sgmii_uses_mdio(sc)) {
6148 switch (sc->sc_type) {
6149 case WM_T_82575:
6150 case WM_T_82576:
6151 reg = CSR_READ(sc, WMREG_MDIC);
6152 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
6153 break;
6154 case WM_T_82580:
6155 case WM_T_I350:
6156 case WM_T_I354:
6157 case WM_T_I210:
6158 case WM_T_I211:
6159 reg = CSR_READ(sc, WMREG_MDICNFG);
6160 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
6161 break;
6162 default:
6163 return -1;
6164 }
6165 }
6166
6167 return phyid;
6168 }
6169
6170
6171 /*
6172 * wm_gmii_mediainit:
6173 *
6174 * Initialize media for use on 1000BASE-T devices.
6175 */
6176 static void
6177 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6178 {
6179 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6180 struct mii_data *mii = &sc->sc_mii;
6181 uint32_t reg;
6182
6183 /* We have GMII. */
6184 sc->sc_flags |= WM_F_HAS_MII;
6185
6186 if (sc->sc_type == WM_T_80003)
6187 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6188 else
6189 sc->sc_tipg = TIPG_1000T_DFLT;
6190
6191 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
6192 if ((sc->sc_type == WM_T_82580)
6193 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
6194 || (sc->sc_type == WM_T_I211)) {
6195 reg = CSR_READ(sc, WMREG_PHPM);
6196 reg &= ~PHPM_GO_LINK_D;
6197 CSR_WRITE(sc, WMREG_PHPM, reg);
6198 }
6199
6200 /*
6201 * Let the chip set speed/duplex on its own based on
6202 * signals from the PHY.
6203 * XXXbouyer - I'm not sure this is right for the 80003,
6204 * the em driver only sets CTRL_SLU here - but it seems to work.
6205 */
6206 sc->sc_ctrl |= CTRL_SLU;
6207 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6208
6209 /* Initialize our media structures and probe the GMII. */
6210 mii->mii_ifp = ifp;
6211
6212 /*
6213 * Determine the PHY access method.
6214 *
6215 * For SGMII, use SGMII specific method.
6216 *
6217 * For some devices, we can determine the PHY access method
6218 * from sc_type.
6219 *
6220 * For ICH8 variants, it's difficult to detemine the PHY access
6221 * method by sc_type, so use the PCI product ID for some devices.
6222 * For other ICH8 variants, try to use igp's method. If the PHY
6223 * can't detect, then use bm's method.
6224 */
6225 switch (prodid) {
6226 case PCI_PRODUCT_INTEL_PCH_M_LM:
6227 case PCI_PRODUCT_INTEL_PCH_M_LC:
6228 /* 82577 */
6229 sc->sc_phytype = WMPHY_82577;
6230 mii->mii_readreg = wm_gmii_hv_readreg;
6231 mii->mii_writereg = wm_gmii_hv_writereg;
6232 break;
6233 case PCI_PRODUCT_INTEL_PCH_D_DM:
6234 case PCI_PRODUCT_INTEL_PCH_D_DC:
6235 /* 82578 */
6236 sc->sc_phytype = WMPHY_82578;
6237 mii->mii_readreg = wm_gmii_hv_readreg;
6238 mii->mii_writereg = wm_gmii_hv_writereg;
6239 break;
6240 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6241 case PCI_PRODUCT_INTEL_PCH2_LV_V:
6242 /* 82579 */
6243 sc->sc_phytype = WMPHY_82579;
6244 mii->mii_readreg = wm_gmii_hv_readreg;
6245 mii->mii_writereg = wm_gmii_hv_writereg;
6246 break;
6247 case PCI_PRODUCT_INTEL_I217_LM:
6248 case PCI_PRODUCT_INTEL_I217_V:
6249 case PCI_PRODUCT_INTEL_I218_LM:
6250 case PCI_PRODUCT_INTEL_I218_V:
6251 /* I21[78] */
6252 mii->mii_readreg = wm_gmii_hv_readreg;
6253 mii->mii_writereg = wm_gmii_hv_writereg;
6254 break;
6255 case PCI_PRODUCT_INTEL_82801I_BM:
6256 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6257 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6258 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6259 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6260 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6261 /* 82567 */
6262 sc->sc_phytype = WMPHY_BM;
6263 mii->mii_readreg = wm_gmii_bm_readreg;
6264 mii->mii_writereg = wm_gmii_bm_writereg;
6265 break;
6266 default:
6267 if (((sc->sc_flags & WM_F_SGMII) != 0)
6268 && !wm_sgmii_uses_mdio(sc)){
6269 mii->mii_readreg = wm_sgmii_readreg;
6270 mii->mii_writereg = wm_sgmii_writereg;
6271 } else if (sc->sc_type >= WM_T_80003) {
6272 mii->mii_readreg = wm_gmii_i80003_readreg;
6273 mii->mii_writereg = wm_gmii_i80003_writereg;
6274 } else if (sc->sc_type >= WM_T_I210) {
6275 mii->mii_readreg = wm_gmii_i82544_readreg;
6276 mii->mii_writereg = wm_gmii_i82544_writereg;
6277 } else if (sc->sc_type >= WM_T_82580) {
6278 sc->sc_phytype = WMPHY_82580;
6279 mii->mii_readreg = wm_gmii_82580_readreg;
6280 mii->mii_writereg = wm_gmii_82580_writereg;
6281 } else if (sc->sc_type >= WM_T_82544) {
6282 mii->mii_readreg = wm_gmii_i82544_readreg;
6283 mii->mii_writereg = wm_gmii_i82544_writereg;
6284 } else {
6285 mii->mii_readreg = wm_gmii_i82543_readreg;
6286 mii->mii_writereg = wm_gmii_i82543_writereg;
6287 }
6288 break;
6289 }
6290 mii->mii_statchg = wm_gmii_statchg;
6291
6292 wm_gmii_reset(sc);
6293
6294 sc->sc_ethercom.ec_mii = &sc->sc_mii;
6295 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6296 wm_gmii_mediastatus);
6297
6298 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6299 || (sc->sc_type == WM_T_82580)
6300 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
6301 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
6302 if ((sc->sc_flags & WM_F_SGMII) == 0) {
6303 /* Attach only one port */
6304 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6305 MII_OFFSET_ANY, MIIF_DOPAUSE);
6306 } else {
6307 int i, id;
6308 uint32_t ctrl_ext;
6309
6310 id = wm_get_phy_id_82575(sc);
6311 if (id != -1) {
6312 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6313 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
6314 }
6315 if ((id == -1)
6316 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
6317 /* Power on sgmii phy if it is disabled */
6318 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6319 CSR_WRITE(sc, WMREG_CTRL_EXT,
6320 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6321 CSR_WRITE_FLUSH(sc);
6322 delay(300*1000); /* XXX too long */
6323
6324 /* from 1 to 8 */
6325 for (i = 1; i < 8; i++)
6326 mii_attach(sc->sc_dev, &sc->sc_mii,
6327 0xffffffff, i, MII_OFFSET_ANY,
6328 MIIF_DOPAUSE);
6329
6330 /* restore previous sfp cage power state */
6331 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6332 }
6333 }
6334 } else {
6335 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6336 MII_OFFSET_ANY, MIIF_DOPAUSE);
6337 }
6338
6339 /*
6340 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
6341 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6342 */
6343 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
6344 (LIST_FIRST(&mii->mii_phys) == NULL)) {
6345 wm_set_mdio_slow_mode_hv(sc);
6346 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6347 MII_OFFSET_ANY, MIIF_DOPAUSE);
6348 }
6349
6350 /*
6351 * (For ICH8 variants)
6352 * If PHY detection failed, use BM's r/w function and retry.
6353 */
6354 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6355 /* if failed, retry with *_bm_* */
6356 mii->mii_readreg = wm_gmii_bm_readreg;
6357 mii->mii_writereg = wm_gmii_bm_writereg;
6358
6359 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6360 MII_OFFSET_ANY, MIIF_DOPAUSE);
6361 }
6362
6363 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6364 /* Any PHY wasn't find */
6365 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6366 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6367 sc->sc_phytype = WMPHY_NONE;
6368 } else {
6369 /*
6370 * PHY Found!
6371 * Check PHY type.
6372 */
6373 uint32_t model;
6374 struct mii_softc *child;
6375
6376 child = LIST_FIRST(&mii->mii_phys);
6377 if (device_is_a(child->mii_dev, "igphy")) {
6378 struct igphy_softc *isc = (struct igphy_softc *)child;
6379
6380 model = isc->sc_mii.mii_mpd_model;
6381 if (model == MII_MODEL_yyINTEL_I82566)
6382 sc->sc_phytype = WMPHY_IGP_3;
6383 }
6384
6385 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6386 }
6387 }
6388
6389 /*
6390 * wm_gmii_mediastatus: [ifmedia interface function]
6391 *
6392 * Get the current interface media status on a 1000BASE-T device.
6393 */
6394 static void
6395 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6396 {
6397 struct wm_softc *sc = ifp->if_softc;
6398
6399 ether_mediastatus(ifp, ifmr);
6400 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6401 | sc->sc_flowflags;
6402 }
6403
6404 /*
6405 * wm_gmii_mediachange: [ifmedia interface function]
6406 *
6407 * Set hardware to newly-selected media on a 1000BASE-T device.
6408 */
6409 static int
6410 wm_gmii_mediachange(struct ifnet *ifp)
6411 {
6412 struct wm_softc *sc = ifp->if_softc;
6413 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6414 int rc;
6415
6416 if ((ifp->if_flags & IFF_UP) == 0)
6417 return 0;
6418
6419 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6420 sc->sc_ctrl |= CTRL_SLU;
6421 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6422 || (sc->sc_type > WM_T_82543)) {
6423 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6424 } else {
6425 sc->sc_ctrl &= ~CTRL_ASDE;
6426 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6427 if (ife->ifm_media & IFM_FDX)
6428 sc->sc_ctrl |= CTRL_FD;
6429 switch (IFM_SUBTYPE(ife->ifm_media)) {
6430 case IFM_10_T:
6431 sc->sc_ctrl |= CTRL_SPEED_10;
6432 break;
6433 case IFM_100_TX:
6434 sc->sc_ctrl |= CTRL_SPEED_100;
6435 break;
6436 case IFM_1000_T:
6437 sc->sc_ctrl |= CTRL_SPEED_1000;
6438 break;
6439 default:
6440 panic("wm_gmii_mediachange: bad media 0x%x",
6441 ife->ifm_media);
6442 }
6443 }
6444 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6445 if (sc->sc_type <= WM_T_82543)
6446 wm_gmii_reset(sc);
6447
6448 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6449 return 0;
6450 return rc;
6451 }
6452
6453 #define MDI_IO CTRL_SWDPIN(2)
6454 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
6455 #define MDI_CLK CTRL_SWDPIN(3)
6456
6457 static void
6458 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6459 {
6460 uint32_t i, v;
6461
6462 v = CSR_READ(sc, WMREG_CTRL);
6463 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6464 v |= MDI_DIR | CTRL_SWDPIO(3);
6465
6466 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6467 if (data & i)
6468 v |= MDI_IO;
6469 else
6470 v &= ~MDI_IO;
6471 CSR_WRITE(sc, WMREG_CTRL, v);
6472 CSR_WRITE_FLUSH(sc);
6473 delay(10);
6474 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6475 CSR_WRITE_FLUSH(sc);
6476 delay(10);
6477 CSR_WRITE(sc, WMREG_CTRL, v);
6478 CSR_WRITE_FLUSH(sc);
6479 delay(10);
6480 }
6481 }
6482
6483 static uint32_t
6484 wm_i82543_mii_recvbits(struct wm_softc *sc)
6485 {
6486 uint32_t v, i, data = 0;
6487
6488 v = CSR_READ(sc, WMREG_CTRL);
6489 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6490 v |= CTRL_SWDPIO(3);
6491
6492 CSR_WRITE(sc, WMREG_CTRL, v);
6493 CSR_WRITE_FLUSH(sc);
6494 delay(10);
6495 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6496 CSR_WRITE_FLUSH(sc);
6497 delay(10);
6498 CSR_WRITE(sc, WMREG_CTRL, v);
6499 CSR_WRITE_FLUSH(sc);
6500 delay(10);
6501
6502 for (i = 0; i < 16; i++) {
6503 data <<= 1;
6504 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6505 CSR_WRITE_FLUSH(sc);
6506 delay(10);
6507 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6508 data |= 1;
6509 CSR_WRITE(sc, WMREG_CTRL, v);
6510 CSR_WRITE_FLUSH(sc);
6511 delay(10);
6512 }
6513
6514 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6515 CSR_WRITE_FLUSH(sc);
6516 delay(10);
6517 CSR_WRITE(sc, WMREG_CTRL, v);
6518 CSR_WRITE_FLUSH(sc);
6519 delay(10);
6520
6521 return data;
6522 }
6523
6524 #undef MDI_IO
6525 #undef MDI_DIR
6526 #undef MDI_CLK
6527
6528 /*
6529 * wm_gmii_i82543_readreg: [mii interface function]
6530 *
6531 * Read a PHY register on the GMII (i82543 version).
6532 */
6533 static int
6534 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6535 {
6536 struct wm_softc *sc = device_private(self);
6537 int rv;
6538
6539 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6540 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
6541 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6542 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
6543
6544 DPRINTF(WM_DEBUG_GMII,
6545 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6546 device_xname(sc->sc_dev), phy, reg, rv));
6547
6548 return rv;
6549 }
6550
6551 /*
6552 * wm_gmii_i82543_writereg: [mii interface function]
6553 *
6554 * Write a PHY register on the GMII (i82543 version).
6555 */
6556 static void
6557 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6558 {
6559 struct wm_softc *sc = device_private(self);
6560
6561 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6562 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6563 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6564 (MII_COMMAND_START << 30), 32);
6565 }
6566
6567 /*
6568 * wm_gmii_i82544_readreg: [mii interface function]
6569 *
6570 * Read a PHY register on the GMII.
6571 */
6572 static int
6573 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6574 {
6575 struct wm_softc *sc = device_private(self);
6576 uint32_t mdic = 0;
6577 int i, rv;
6578
6579 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6580 MDIC_REGADD(reg));
6581
6582 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6583 mdic = CSR_READ(sc, WMREG_MDIC);
6584 if (mdic & MDIC_READY)
6585 break;
6586 delay(50);
6587 }
6588
6589 if ((mdic & MDIC_READY) == 0) {
6590 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6591 device_xname(sc->sc_dev), phy, reg);
6592 rv = 0;
6593 } else if (mdic & MDIC_E) {
6594 #if 0 /* This is normal if no PHY is present. */
6595 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6596 device_xname(sc->sc_dev), phy, reg);
6597 #endif
6598 rv = 0;
6599 } else {
6600 rv = MDIC_DATA(mdic);
6601 if (rv == 0xffff)
6602 rv = 0;
6603 }
6604
6605 return rv;
6606 }
6607
6608 /*
6609 * wm_gmii_i82544_writereg: [mii interface function]
6610 *
6611 * Write a PHY register on the GMII.
6612 */
6613 static void
6614 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6615 {
6616 struct wm_softc *sc = device_private(self);
6617 uint32_t mdic = 0;
6618 int i;
6619
6620 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6621 MDIC_REGADD(reg) | MDIC_DATA(val));
6622
6623 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6624 mdic = CSR_READ(sc, WMREG_MDIC);
6625 if (mdic & MDIC_READY)
6626 break;
6627 delay(50);
6628 }
6629
6630 if ((mdic & MDIC_READY) == 0)
6631 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6632 device_xname(sc->sc_dev), phy, reg);
6633 else if (mdic & MDIC_E)
6634 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6635 device_xname(sc->sc_dev), phy, reg);
6636 }
6637
6638 /*
6639 * wm_gmii_i80003_readreg: [mii interface function]
6640 *
6641 * Read a PHY register on the kumeran
6642 * This could be handled by the PHY layer if we didn't have to lock the
6643 * ressource ...
6644 */
6645 static int
6646 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6647 {
6648 struct wm_softc *sc = device_private(self);
6649 int sem;
6650 int rv;
6651
6652 if (phy != 1) /* only one PHY on kumeran bus */
6653 return 0;
6654
6655 sem = swfwphysem[sc->sc_funcid];
6656 if (wm_get_swfw_semaphore(sc, sem)) {
6657 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6658 __func__);
6659 return 0;
6660 }
6661
6662 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6663 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6664 reg >> GG82563_PAGE_SHIFT);
6665 } else {
6666 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6667 reg >> GG82563_PAGE_SHIFT);
6668 }
6669 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6670 delay(200);
6671 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6672 delay(200);
6673
6674 wm_put_swfw_semaphore(sc, sem);
6675 return rv;
6676 }
6677
6678 /*
6679 * wm_gmii_i80003_writereg: [mii interface function]
6680 *
6681 * Write a PHY register on the kumeran.
6682 * This could be handled by the PHY layer if we didn't have to lock the
6683 * ressource ...
6684 */
6685 static void
6686 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6687 {
6688 struct wm_softc *sc = device_private(self);
6689 int sem;
6690
6691 if (phy != 1) /* only one PHY on kumeran bus */
6692 return;
6693
6694 sem = swfwphysem[sc->sc_funcid];
6695 if (wm_get_swfw_semaphore(sc, sem)) {
6696 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6697 __func__);
6698 return;
6699 }
6700
6701 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6702 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6703 reg >> GG82563_PAGE_SHIFT);
6704 } else {
6705 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6706 reg >> GG82563_PAGE_SHIFT);
6707 }
6708 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6709 delay(200);
6710 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6711 delay(200);
6712
6713 wm_put_swfw_semaphore(sc, sem);
6714 }
6715
6716 /*
6717 * wm_gmii_bm_readreg: [mii interface function]
6718 *
6719 * Read a PHY register on the kumeran
6720 * This could be handled by the PHY layer if we didn't have to lock the
6721 * ressource ...
6722 */
6723 static int
6724 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6725 {
6726 struct wm_softc *sc = device_private(self);
6727 int sem;
6728 int rv;
6729
6730 sem = swfwphysem[sc->sc_funcid];
6731 if (wm_get_swfw_semaphore(sc, sem)) {
6732 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6733 __func__);
6734 return 0;
6735 }
6736
6737 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6738 if (phy == 1)
6739 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6740 reg);
6741 else
6742 wm_gmii_i82544_writereg(self, phy,
6743 GG82563_PHY_PAGE_SELECT,
6744 reg >> GG82563_PAGE_SHIFT);
6745 }
6746
6747 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6748 wm_put_swfw_semaphore(sc, sem);
6749 return rv;
6750 }
6751
6752 /*
6753 * wm_gmii_bm_writereg: [mii interface function]
6754 *
6755 * Write a PHY register on the kumeran.
6756 * This could be handled by the PHY layer if we didn't have to lock the
6757 * ressource ...
6758 */
6759 static void
6760 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6761 {
6762 struct wm_softc *sc = device_private(self);
6763 int sem;
6764
6765 sem = swfwphysem[sc->sc_funcid];
6766 if (wm_get_swfw_semaphore(sc, sem)) {
6767 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6768 __func__);
6769 return;
6770 }
6771
6772 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6773 if (phy == 1)
6774 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6775 reg);
6776 else
6777 wm_gmii_i82544_writereg(self, phy,
6778 GG82563_PHY_PAGE_SELECT,
6779 reg >> GG82563_PAGE_SHIFT);
6780 }
6781
6782 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6783 wm_put_swfw_semaphore(sc, sem);
6784 }
6785
6786 static void
6787 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6788 {
6789 struct wm_softc *sc = device_private(self);
6790 uint16_t regnum = BM_PHY_REG_NUM(offset);
6791 uint16_t wuce;
6792
6793 /* XXX Gig must be disabled for MDIO accesses to page 800 */
6794 if (sc->sc_type == WM_T_PCH) {
6795 /* XXX e1000 driver do nothing... why? */
6796 }
6797
6798 /* Set page 769 */
6799 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6800 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6801
6802 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6803
6804 wuce &= ~BM_WUC_HOST_WU_BIT;
6805 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6806 wuce | BM_WUC_ENABLE_BIT);
6807
6808 /* Select page 800 */
6809 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6810 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6811
6812 /* Write page 800 */
6813 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6814
6815 if (rd)
6816 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6817 else
6818 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6819
6820 /* Set page 769 */
6821 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6822 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6823
6824 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6825 }
6826
6827 /*
6828 * wm_gmii_hv_readreg: [mii interface function]
6829 *
6830 * Read a PHY register on the kumeran
6831 * This could be handled by the PHY layer if we didn't have to lock the
6832 * ressource ...
6833 */
6834 static int
6835 wm_gmii_hv_readreg(device_t self, int phy, int reg)
6836 {
6837 struct wm_softc *sc = device_private(self);
6838 uint16_t page = BM_PHY_REG_PAGE(reg);
6839 uint16_t regnum = BM_PHY_REG_NUM(reg);
6840 uint16_t val;
6841 int rv;
6842
6843 if (wm_get_swfwhw_semaphore(sc)) {
6844 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6845 __func__);
6846 return 0;
6847 }
6848
6849 /* XXX Workaround failure in MDIO access while cable is disconnected */
6850 if (sc->sc_phytype == WMPHY_82577) {
6851 /* XXX must write */
6852 }
6853
6854 /* Page 800 works differently than the rest so it has its own func */
6855 if (page == BM_WUC_PAGE) {
6856 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
6857 return val;
6858 }
6859
6860 /*
6861 * Lower than page 768 works differently than the rest so it has its
6862 * own func
6863 */
6864 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6865 printf("gmii_hv_readreg!!!\n");
6866 return 0;
6867 }
6868
6869 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6870 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6871 page << BME1000_PAGE_SHIFT);
6872 }
6873
6874 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
6875 wm_put_swfwhw_semaphore(sc);
6876 return rv;
6877 }
6878
6879 /*
6880 * wm_gmii_hv_writereg: [mii interface function]
6881 *
6882 * Write a PHY register on the kumeran.
6883 * This could be handled by the PHY layer if we didn't have to lock the
6884 * ressource ...
6885 */
6886 static void
6887 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
6888 {
6889 struct wm_softc *sc = device_private(self);
6890 uint16_t page = BM_PHY_REG_PAGE(reg);
6891 uint16_t regnum = BM_PHY_REG_NUM(reg);
6892
6893 if (wm_get_swfwhw_semaphore(sc)) {
6894 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6895 __func__);
6896 return;
6897 }
6898
6899 /* XXX Workaround failure in MDIO access while cable is disconnected */
6900
6901 /* Page 800 works differently than the rest so it has its own func */
6902 if (page == BM_WUC_PAGE) {
6903 uint16_t tmp;
6904
6905 tmp = val;
6906 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
6907 return;
6908 }
6909
6910 /*
6911 * Lower than page 768 works differently than the rest so it has its
6912 * own func
6913 */
6914 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6915 printf("gmii_hv_writereg!!!\n");
6916 return;
6917 }
6918
6919 /*
6920 * XXX Workaround MDIO accesses being disabled after entering IEEE
6921 * Power Down (whenever bit 11 of the PHY control register is set)
6922 */
6923
6924 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6925 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6926 page << BME1000_PAGE_SHIFT);
6927 }
6928
6929 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
6930 wm_put_swfwhw_semaphore(sc);
6931 }
6932
6933 /*
6934 * wm_gmii_82580_readreg: [mii interface function]
6935 *
6936 * Read a PHY register on the 82580 and I350.
6937 * This could be handled by the PHY layer if we didn't have to lock the
6938 * ressource ...
6939 */
6940 static int
6941 wm_gmii_82580_readreg(device_t self, int phy, int reg)
6942 {
6943 struct wm_softc *sc = device_private(self);
6944 int sem;
6945 int rv;
6946
6947 sem = swfwphysem[sc->sc_funcid];
6948 if (wm_get_swfw_semaphore(sc, sem)) {
6949 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6950 __func__);
6951 return 0;
6952 }
6953
6954 rv = wm_gmii_i82544_readreg(self, phy, reg);
6955
6956 wm_put_swfw_semaphore(sc, sem);
6957 return rv;
6958 }
6959
6960 /*
6961 * wm_gmii_82580_writereg: [mii interface function]
6962 *
6963 * Write a PHY register on the 82580 and I350.
6964 * This could be handled by the PHY layer if we didn't have to lock the
6965 * ressource ...
6966 */
6967 static void
6968 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
6969 {
6970 struct wm_softc *sc = device_private(self);
6971 int sem;
6972
6973 sem = swfwphysem[sc->sc_funcid];
6974 if (wm_get_swfw_semaphore(sc, sem)) {
6975 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6976 __func__);
6977 return;
6978 }
6979
6980 wm_gmii_i82544_writereg(self, phy, reg, val);
6981
6982 wm_put_swfw_semaphore(sc, sem);
6983 }
6984
6985 /*
6986 * wm_gmii_statchg: [mii interface function]
6987 *
6988 * Callback from MII layer when media changes.
6989 */
6990 static void
6991 wm_gmii_statchg(struct ifnet *ifp)
6992 {
6993 struct wm_softc *sc = ifp->if_softc;
6994 struct mii_data *mii = &sc->sc_mii;
6995
6996 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
6997 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6998 sc->sc_fcrtl &= ~FCRTL_XONE;
6999
7000 /*
7001 * Get flow control negotiation result.
7002 */
7003 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7004 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7005 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7006 mii->mii_media_active &= ~IFM_ETH_FMASK;
7007 }
7008
7009 if (sc->sc_flowflags & IFM_FLOW) {
7010 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7011 sc->sc_ctrl |= CTRL_TFCE;
7012 sc->sc_fcrtl |= FCRTL_XONE;
7013 }
7014 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7015 sc->sc_ctrl |= CTRL_RFCE;
7016 }
7017
7018 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7019 DPRINTF(WM_DEBUG_LINK,
7020 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7021 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7022 } else {
7023 DPRINTF(WM_DEBUG_LINK,
7024 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7025 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7026 }
7027
7028 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7029 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7030 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7031 : WMREG_FCRTL, sc->sc_fcrtl);
7032 if (sc->sc_type == WM_T_80003) {
7033 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7034 case IFM_1000_T:
7035 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7036 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7037 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7038 break;
7039 default:
7040 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7041 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7042 sc->sc_tipg = TIPG_10_100_80003_DFLT;
7043 break;
7044 }
7045 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7046 }
7047 }
7048
7049 /*
7050 * wm_kmrn_readreg:
7051 *
7052 * Read a kumeran register
7053 */
7054 static int
7055 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7056 {
7057 int rv;
7058
7059 if (sc->sc_flags == WM_F_LOCK_SWFW) {
7060 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7061 aprint_error_dev(sc->sc_dev,
7062 "%s: failed to get semaphore\n", __func__);
7063 return 0;
7064 }
7065 } else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
7066 if (wm_get_swfwhw_semaphore(sc)) {
7067 aprint_error_dev(sc->sc_dev,
7068 "%s: failed to get semaphore\n", __func__);
7069 return 0;
7070 }
7071 }
7072
7073 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7074 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7075 KUMCTRLSTA_REN);
7076 CSR_WRITE_FLUSH(sc);
7077 delay(2);
7078
7079 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7080
7081 if (sc->sc_flags == WM_F_LOCK_SWFW)
7082 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7083 else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
7084 wm_put_swfwhw_semaphore(sc);
7085
7086 return rv;
7087 }
7088
7089 /*
7090 * wm_kmrn_writereg:
7091 *
7092 * Write a kumeran register
7093 */
7094 static void
7095 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7096 {
7097
7098 if (sc->sc_flags == WM_F_LOCK_SWFW) {
7099 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7100 aprint_error_dev(sc->sc_dev,
7101 "%s: failed to get semaphore\n", __func__);
7102 return;
7103 }
7104 } else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
7105 if (wm_get_swfwhw_semaphore(sc)) {
7106 aprint_error_dev(sc->sc_dev,
7107 "%s: failed to get semaphore\n", __func__);
7108 return;
7109 }
7110 }
7111
7112 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7113 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7114 (val & KUMCTRLSTA_MASK));
7115
7116 if (sc->sc_flags == WM_F_LOCK_SWFW)
7117 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7118 else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
7119 wm_put_swfwhw_semaphore(sc);
7120 }
7121
7122 /* SGMII related */
7123
7124 /*
7125 * wm_sgmii_uses_mdio
7126 *
7127 * Check whether the transaction is to the internal PHY or the external
7128 * MDIO interface. Return true if it's MDIO.
7129 */
7130 static bool
7131 wm_sgmii_uses_mdio(struct wm_softc *sc)
7132 {
7133 uint32_t reg;
7134 bool ismdio = false;
7135
7136 switch (sc->sc_type) {
7137 case WM_T_82575:
7138 case WM_T_82576:
7139 reg = CSR_READ(sc, WMREG_MDIC);
7140 ismdio = ((reg & MDIC_DEST) != 0);
7141 break;
7142 case WM_T_82580:
7143 case WM_T_I350:
7144 case WM_T_I354:
7145 case WM_T_I210:
7146 case WM_T_I211:
7147 reg = CSR_READ(sc, WMREG_MDICNFG);
7148 ismdio = ((reg & MDICNFG_DEST) != 0);
7149 break;
7150 default:
7151 break;
7152 }
7153
7154 return ismdio;
7155 }
7156
7157 /*
7158 * wm_sgmii_readreg: [mii interface function]
7159 *
7160 * Read a PHY register on the SGMII
7161 * This could be handled by the PHY layer if we didn't have to lock the
7162 * ressource ...
7163 */
7164 static int
7165 wm_sgmii_readreg(device_t self, int phy, int reg)
7166 {
7167 struct wm_softc *sc = device_private(self);
7168 uint32_t i2ccmd;
7169 int i, rv;
7170
7171 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7172 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7173 __func__);
7174 return 0;
7175 }
7176
7177 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7178 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7179 | I2CCMD_OPCODE_READ;
7180 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7181
7182 /* Poll the ready bit */
7183 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7184 delay(50);
7185 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7186 if (i2ccmd & I2CCMD_READY)
7187 break;
7188 }
7189 if ((i2ccmd & I2CCMD_READY) == 0)
7190 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7191 if ((i2ccmd & I2CCMD_ERROR) != 0)
7192 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7193
7194 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7195
7196 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7197 return rv;
7198 }
7199
7200 /*
7201 * wm_sgmii_writereg: [mii interface function]
7202 *
7203 * Write a PHY register on the SGMII.
7204 * This could be handled by the PHY layer if we didn't have to lock the
7205 * ressource ...
7206 */
7207 static void
7208 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7209 {
7210 struct wm_softc *sc = device_private(self);
7211 uint32_t i2ccmd;
7212 int i;
7213
7214 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7215 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7216 __func__);
7217 return;
7218 }
7219
7220 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7221 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7222 | I2CCMD_OPCODE_WRITE;
7223 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7224
7225 /* Poll the ready bit */
7226 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7227 delay(50);
7228 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7229 if (i2ccmd & I2CCMD_READY)
7230 break;
7231 }
7232 if ((i2ccmd & I2CCMD_READY) == 0)
7233 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7234 if ((i2ccmd & I2CCMD_ERROR) != 0)
7235 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7236
7237 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7238 }
7239
7240 /* TBI related */
7241
7242 /* XXX Currently TBI only */
7243 static int
7244 wm_check_for_link(struct wm_softc *sc)
7245 {
7246 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7247 uint32_t rxcw;
7248 uint32_t ctrl;
7249 uint32_t status;
7250 uint32_t sig;
7251
7252 if (sc->sc_mediatype & WMP_F_SERDES) {
7253 sc->sc_tbi_linkup = 1;
7254 return 0;
7255 }
7256
7257 rxcw = CSR_READ(sc, WMREG_RXCW);
7258 ctrl = CSR_READ(sc, WMREG_CTRL);
7259 status = CSR_READ(sc, WMREG_STATUS);
7260
7261 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7262
7263 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7264 device_xname(sc->sc_dev), __func__,
7265 ((ctrl & CTRL_SWDPIN(1)) == sig),
7266 ((status & STATUS_LU) != 0),
7267 ((rxcw & RXCW_C) != 0)
7268 ));
7269
7270 /*
7271 * SWDPIN LU RXCW
7272 * 0 0 0
7273 * 0 0 1 (should not happen)
7274 * 0 1 0 (should not happen)
7275 * 0 1 1 (should not happen)
7276 * 1 0 0 Disable autonego and force linkup
7277 * 1 0 1 got /C/ but not linkup yet
7278 * 1 1 0 (linkup)
7279 * 1 1 1 If IFM_AUTO, back to autonego
7280 *
7281 */
7282 if (((ctrl & CTRL_SWDPIN(1)) == sig)
7283 && ((status & STATUS_LU) == 0)
7284 && ((rxcw & RXCW_C) == 0)) {
7285 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7286 __func__));
7287 sc->sc_tbi_linkup = 0;
7288 /* Disable auto-negotiation in the TXCW register */
7289 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7290
7291 /*
7292 * Force link-up and also force full-duplex.
7293 *
7294 * NOTE: CTRL was updated TFCE and RFCE automatically,
7295 * so we should update sc->sc_ctrl
7296 */
7297 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7298 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7299 } else if (((status & STATUS_LU) != 0)
7300 && ((rxcw & RXCW_C) != 0)
7301 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7302 sc->sc_tbi_linkup = 1;
7303 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7304 __func__));
7305 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7306 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7307 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7308 && ((rxcw & RXCW_C) != 0)) {
7309 DPRINTF(WM_DEBUG_LINK, ("/C/"));
7310 } else {
7311 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7312 status));
7313 }
7314
7315 return 0;
7316 }
7317
7318 /*
7319 * wm_tbi_mediainit:
7320 *
7321 * Initialize media for use on 1000BASE-X devices.
7322 */
7323 static void
7324 wm_tbi_mediainit(struct wm_softc *sc)
7325 {
7326 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7327 const char *sep = "";
7328
7329 if (sc->sc_type < WM_T_82543)
7330 sc->sc_tipg = TIPG_WM_DFLT;
7331 else
7332 sc->sc_tipg = TIPG_LG_DFLT;
7333
7334 sc->sc_tbi_anegticks = 5;
7335
7336 /* Initialize our media structures */
7337 sc->sc_mii.mii_ifp = ifp;
7338
7339 sc->sc_ethercom.ec_mii = &sc->sc_mii;
7340 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
7341 wm_tbi_mediastatus);
7342
7343 /*
7344 * SWD Pins:
7345 *
7346 * 0 = Link LED (output)
7347 * 1 = Loss Of Signal (input)
7348 */
7349 sc->sc_ctrl |= CTRL_SWDPIO(0);
7350 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
7351 if (sc->sc_mediatype & WMP_F_SERDES)
7352 sc->sc_ctrl &= ~CTRL_LRST;
7353
7354 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7355
7356 #define ADD(ss, mm, dd) \
7357 do { \
7358 aprint_normal("%s%s", sep, ss); \
7359 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
7360 sep = ", "; \
7361 } while (/*CONSTCOND*/0)
7362
7363 aprint_normal_dev(sc->sc_dev, "");
7364
7365 /* Only 82545 is LX */
7366 if (sc->sc_type == WM_T_82545) {
7367 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
7368 ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
7369 } else {
7370 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
7371 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
7372 }
7373 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
7374 aprint_normal("\n");
7375
7376 #undef ADD
7377
7378 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
7379 }
7380
7381 /*
7382 * wm_tbi_mediastatus: [ifmedia interface function]
7383 *
7384 * Get the current interface media status on a 1000BASE-X device.
7385 */
7386 static void
7387 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
7388 {
7389 struct wm_softc *sc = ifp->if_softc;
7390 uint32_t ctrl, status;
7391
7392 ifmr->ifm_status = IFM_AVALID;
7393 ifmr->ifm_active = IFM_ETHER;
7394
7395 status = CSR_READ(sc, WMREG_STATUS);
7396 if ((status & STATUS_LU) == 0) {
7397 ifmr->ifm_active |= IFM_NONE;
7398 return;
7399 }
7400
7401 ifmr->ifm_status |= IFM_ACTIVE;
7402 /* Only 82545 is LX */
7403 if (sc->sc_type == WM_T_82545)
7404 ifmr->ifm_active |= IFM_1000_LX;
7405 else
7406 ifmr->ifm_active |= IFM_1000_SX;
7407 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
7408 ifmr->ifm_active |= IFM_FDX;
7409 else
7410 ifmr->ifm_active |= IFM_HDX;
7411 ctrl = CSR_READ(sc, WMREG_CTRL);
7412 if (ctrl & CTRL_RFCE)
7413 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
7414 if (ctrl & CTRL_TFCE)
7415 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
7416 }
7417
7418 /*
7419 * wm_tbi_mediachange: [ifmedia interface function]
7420 *
7421 * Set hardware to newly-selected media on a 1000BASE-X device.
7422 */
7423 static int
7424 wm_tbi_mediachange(struct ifnet *ifp)
7425 {
7426 struct wm_softc *sc = ifp->if_softc;
7427 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7428 uint32_t status;
7429 int i;
7430
7431 if (sc->sc_mediatype & WMP_F_SERDES)
7432 return 0;
7433
7434 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
7435 || (sc->sc_type >= WM_T_82575))
7436 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
7437
7438 /* XXX power_up_serdes_link_82575() */
7439
7440 sc->sc_ctrl &= ~CTRL_LRST;
7441 sc->sc_txcw = TXCW_ANE;
7442 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
7443 sc->sc_txcw |= TXCW_FD | TXCW_HD;
7444 else if (ife->ifm_media & IFM_FDX)
7445 sc->sc_txcw |= TXCW_FD;
7446 else
7447 sc->sc_txcw |= TXCW_HD;
7448
7449 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
7450 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
7451
7452 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
7453 device_xname(sc->sc_dev), sc->sc_txcw));
7454 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7455 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7456 CSR_WRITE_FLUSH(sc);
7457 delay(1000);
7458
7459 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
7460 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
7461
7462 /*
7463 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
7464 * optics detect a signal, 0 if they don't.
7465 */
7466 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
7467 /* Have signal; wait for the link to come up. */
7468 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
7469 delay(10000);
7470 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
7471 break;
7472 }
7473
7474 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
7475 device_xname(sc->sc_dev),i));
7476
7477 status = CSR_READ(sc, WMREG_STATUS);
7478 DPRINTF(WM_DEBUG_LINK,
7479 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
7480 device_xname(sc->sc_dev),status, STATUS_LU));
7481 if (status & STATUS_LU) {
7482 /* Link is up. */
7483 DPRINTF(WM_DEBUG_LINK,
7484 ("%s: LINK: set media -> link up %s\n",
7485 device_xname(sc->sc_dev),
7486 (status & STATUS_FD) ? "FDX" : "HDX"));
7487
7488 /*
7489 * NOTE: CTRL will update TFCE and RFCE automatically,
7490 * so we should update sc->sc_ctrl
7491 */
7492 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7493 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7494 sc->sc_fcrtl &= ~FCRTL_XONE;
7495 if (status & STATUS_FD)
7496 sc->sc_tctl |=
7497 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7498 else
7499 sc->sc_tctl |=
7500 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7501 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
7502 sc->sc_fcrtl |= FCRTL_XONE;
7503 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7504 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7505 WMREG_OLD_FCRTL : WMREG_FCRTL,
7506 sc->sc_fcrtl);
7507 sc->sc_tbi_linkup = 1;
7508 } else {
7509 if (i == WM_LINKUP_TIMEOUT)
7510 wm_check_for_link(sc);
7511 /* Link is down. */
7512 DPRINTF(WM_DEBUG_LINK,
7513 ("%s: LINK: set media -> link down\n",
7514 device_xname(sc->sc_dev)));
7515 sc->sc_tbi_linkup = 0;
7516 }
7517 } else {
7518 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
7519 device_xname(sc->sc_dev)));
7520 sc->sc_tbi_linkup = 0;
7521 }
7522
7523 wm_tbi_set_linkled(sc);
7524
7525 return 0;
7526 }
7527
7528 /*
7529 * wm_tbi_set_linkled:
7530 *
7531 * Update the link LED on 1000BASE-X devices.
7532 */
7533 static void
7534 wm_tbi_set_linkled(struct wm_softc *sc)
7535 {
7536
7537 if (sc->sc_tbi_linkup)
7538 sc->sc_ctrl |= CTRL_SWDPIN(0);
7539 else
7540 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7541
7542 /* 82540 or newer devices are active low */
7543 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7544
7545 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7546 }
7547
7548 /*
7549 * wm_tbi_check_link:
7550 *
7551 * Check the link on 1000BASE-X devices.
7552 */
7553 static void
7554 wm_tbi_check_link(struct wm_softc *sc)
7555 {
7556 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7557 uint32_t status;
7558
7559 KASSERT(WM_TX_LOCKED(sc));
7560
7561 if (sc->sc_mediatype & WMP_F_SERDES) {
7562 sc->sc_tbi_linkup = 1;
7563 return;
7564 }
7565
7566 status = CSR_READ(sc, WMREG_STATUS);
7567
7568 /* XXX is this needed? */
7569 (void)CSR_READ(sc, WMREG_RXCW);
7570 (void)CSR_READ(sc, WMREG_CTRL);
7571
7572 /* set link status */
7573 if ((status & STATUS_LU) == 0) {
7574 DPRINTF(WM_DEBUG_LINK,
7575 ("%s: LINK: checklink -> down\n",
7576 device_xname(sc->sc_dev)));
7577 sc->sc_tbi_linkup = 0;
7578 } else if (sc->sc_tbi_linkup == 0) {
7579 DPRINTF(WM_DEBUG_LINK,
7580 ("%s: LINK: checklink -> up %s\n",
7581 device_xname(sc->sc_dev),
7582 (status & STATUS_FD) ? "FDX" : "HDX"));
7583 sc->sc_tbi_linkup = 1;
7584 }
7585
7586 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
7587 && ((status & STATUS_LU) == 0)) {
7588 sc->sc_tbi_linkup = 0;
7589 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7590 /* If the timer expired, retry autonegotiation */
7591 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
7592 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
7593 sc->sc_tbi_ticks = 0;
7594 /*
7595 * Reset the link, and let autonegotiation do
7596 * its thing
7597 */
7598 sc->sc_ctrl |= CTRL_LRST;
7599 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7600 CSR_WRITE_FLUSH(sc);
7601 delay(1000);
7602 sc->sc_ctrl &= ~CTRL_LRST;
7603 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7604 CSR_WRITE_FLUSH(sc);
7605 delay(1000);
7606 CSR_WRITE(sc, WMREG_TXCW,
7607 sc->sc_txcw & ~TXCW_ANE);
7608 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7609 }
7610 }
7611 }
7612
7613 wm_tbi_set_linkled(sc);
7614 }
7615
7616 /* SFP related */
7617
7618 static int
7619 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
7620 {
7621 uint32_t i2ccmd;
7622 int i;
7623
7624 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
7625 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7626
7627 /* Poll the ready bit */
7628 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7629 delay(50);
7630 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7631 if (i2ccmd & I2CCMD_READY)
7632 break;
7633 }
7634 if ((i2ccmd & I2CCMD_READY) == 0)
7635 return -1;
7636 if ((i2ccmd & I2CCMD_ERROR) != 0)
7637 return -1;
7638
7639 *data = i2ccmd & 0x00ff;
7640
7641 return 0;
7642 }
7643
7644 static uint32_t
7645 wm_sfp_get_media_type(struct wm_softc *sc)
7646 {
7647 uint32_t ctrl_ext;
7648 uint8_t val = 0;
7649 int timeout = 3;
7650 uint32_t mediatype = WMP_F_UNKNOWN;
7651 int rv = -1;
7652
7653 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7654 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
7655 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
7656 CSR_WRITE_FLUSH(sc);
7657
7658 /* Read SFP module data */
7659 while (timeout) {
7660 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
7661 if (rv == 0)
7662 break;
7663 delay(100*1000); /* XXX too big */
7664 timeout--;
7665 }
7666 if (rv != 0)
7667 goto out;
7668 switch (val) {
7669 case SFF_SFP_ID_SFF:
7670 aprint_normal_dev(sc->sc_dev,
7671 "Module/Connector soldered to board\n");
7672 break;
7673 case SFF_SFP_ID_SFP:
7674 aprint_normal_dev(sc->sc_dev, "SFP\n");
7675 break;
7676 case SFF_SFP_ID_UNKNOWN:
7677 goto out;
7678 default:
7679 break;
7680 }
7681
7682 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
7683 if (rv != 0) {
7684 goto out;
7685 }
7686
7687 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
7688 mediatype = WMP_F_SERDES;
7689 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
7690 sc->sc_flags |= WM_F_SGMII;
7691 mediatype = WMP_F_COPPER;
7692 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
7693 sc->sc_flags |= WM_F_SGMII;
7694 mediatype = WMP_F_SERDES;
7695 }
7696
7697 out:
7698 /* Restore I2C interface setting */
7699 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7700
7701 return mediatype;
7702 }
7703 /*
7704 * NVM related.
7705 * Microwire, SPI (w/wo EERD) and Flash.
7706 */
7707
7708 /* Both spi and uwire */
7709
7710 /*
7711 * wm_eeprom_sendbits:
7712 *
7713 * Send a series of bits to the EEPROM.
7714 */
7715 static void
7716 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
7717 {
7718 uint32_t reg;
7719 int x;
7720
7721 reg = CSR_READ(sc, WMREG_EECD);
7722
7723 for (x = nbits; x > 0; x--) {
7724 if (bits & (1U << (x - 1)))
7725 reg |= EECD_DI;
7726 else
7727 reg &= ~EECD_DI;
7728 CSR_WRITE(sc, WMREG_EECD, reg);
7729 CSR_WRITE_FLUSH(sc);
7730 delay(2);
7731 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7732 CSR_WRITE_FLUSH(sc);
7733 delay(2);
7734 CSR_WRITE(sc, WMREG_EECD, reg);
7735 CSR_WRITE_FLUSH(sc);
7736 delay(2);
7737 }
7738 }
7739
7740 /*
7741 * wm_eeprom_recvbits:
7742 *
7743 * Receive a series of bits from the EEPROM.
7744 */
7745 static void
7746 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
7747 {
7748 uint32_t reg, val;
7749 int x;
7750
7751 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
7752
7753 val = 0;
7754 for (x = nbits; x > 0; x--) {
7755 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7756 CSR_WRITE_FLUSH(sc);
7757 delay(2);
7758 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
7759 val |= (1U << (x - 1));
7760 CSR_WRITE(sc, WMREG_EECD, reg);
7761 CSR_WRITE_FLUSH(sc);
7762 delay(2);
7763 }
7764 *valp = val;
7765 }
7766
7767 /* Microwire */
7768
7769 /*
7770 * wm_nvm_read_uwire:
7771 *
7772 * Read a word from the EEPROM using the MicroWire protocol.
7773 */
7774 static int
7775 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
7776 {
7777 uint32_t reg, val;
7778 int i;
7779
7780 for (i = 0; i < wordcnt; i++) {
7781 /* Clear SK and DI. */
7782 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
7783 CSR_WRITE(sc, WMREG_EECD, reg);
7784
7785 /*
7786 * XXX: workaround for a bug in qemu-0.12.x and prior
7787 * and Xen.
7788 *
7789 * We use this workaround only for 82540 because qemu's
7790 * e1000 act as 82540.
7791 */
7792 if (sc->sc_type == WM_T_82540) {
7793 reg |= EECD_SK;
7794 CSR_WRITE(sc, WMREG_EECD, reg);
7795 reg &= ~EECD_SK;
7796 CSR_WRITE(sc, WMREG_EECD, reg);
7797 CSR_WRITE_FLUSH(sc);
7798 delay(2);
7799 }
7800 /* XXX: end of workaround */
7801
7802 /* Set CHIP SELECT. */
7803 reg |= EECD_CS;
7804 CSR_WRITE(sc, WMREG_EECD, reg);
7805 CSR_WRITE_FLUSH(sc);
7806 delay(2);
7807
7808 /* Shift in the READ command. */
7809 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
7810
7811 /* Shift in address. */
7812 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
7813
7814 /* Shift out the data. */
7815 wm_eeprom_recvbits(sc, &val, 16);
7816 data[i] = val & 0xffff;
7817
7818 /* Clear CHIP SELECT. */
7819 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
7820 CSR_WRITE(sc, WMREG_EECD, reg);
7821 CSR_WRITE_FLUSH(sc);
7822 delay(2);
7823 }
7824
7825 return 0;
7826 }
7827
7828 /* SPI */
7829
7830 /*
7831 * Set SPI and FLASH related information from the EECD register.
7832 * For 82541 and 82547, the word size is taken from EEPROM.
7833 */
7834 static int
7835 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
7836 {
7837 int size;
7838 uint32_t reg;
7839 uint16_t data;
7840
7841 reg = CSR_READ(sc, WMREG_EECD);
7842 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
7843
7844 /* Read the size of NVM from EECD by default */
7845 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
7846 switch (sc->sc_type) {
7847 case WM_T_82541:
7848 case WM_T_82541_2:
7849 case WM_T_82547:
7850 case WM_T_82547_2:
7851 /* Set dummy value to access EEPROM */
7852 sc->sc_nvm_wordsize = 64;
7853 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
7854 reg = data;
7855 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
7856 if (size == 0)
7857 size = 6; /* 64 word size */
7858 else
7859 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
7860 break;
7861 case WM_T_80003:
7862 case WM_T_82571:
7863 case WM_T_82572:
7864 case WM_T_82573: /* SPI case */
7865 case WM_T_82574: /* SPI case */
7866 case WM_T_82583: /* SPI case */
7867 size += NVM_WORD_SIZE_BASE_SHIFT;
7868 if (size > 14)
7869 size = 14;
7870 break;
7871 case WM_T_82575:
7872 case WM_T_82576:
7873 case WM_T_82580:
7874 case WM_T_I350:
7875 case WM_T_I354:
7876 case WM_T_I210:
7877 case WM_T_I211:
7878 size += NVM_WORD_SIZE_BASE_SHIFT;
7879 if (size > 15)
7880 size = 15;
7881 break;
7882 default:
7883 aprint_error_dev(sc->sc_dev,
7884 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
7885 return -1;
7886 break;
7887 }
7888
7889 sc->sc_nvm_wordsize = 1 << size;
7890
7891 return 0;
7892 }
7893
7894 /*
7895 * wm_nvm_ready_spi:
7896 *
7897 * Wait for a SPI EEPROM to be ready for commands.
7898 */
7899 static int
7900 wm_nvm_ready_spi(struct wm_softc *sc)
7901 {
7902 uint32_t val;
7903 int usec;
7904
7905 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
7906 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
7907 wm_eeprom_recvbits(sc, &val, 8);
7908 if ((val & SPI_SR_RDY) == 0)
7909 break;
7910 }
7911 if (usec >= SPI_MAX_RETRIES) {
7912 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
7913 return 1;
7914 }
7915 return 0;
7916 }
7917
7918 /*
7919 * wm_nvm_read_spi:
7920 *
7921 * Read a work from the EEPROM using the SPI protocol.
7922 */
7923 static int
7924 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
7925 {
7926 uint32_t reg, val;
7927 int i;
7928 uint8_t opc;
7929
7930 /* Clear SK and CS. */
7931 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
7932 CSR_WRITE(sc, WMREG_EECD, reg);
7933 CSR_WRITE_FLUSH(sc);
7934 delay(2);
7935
7936 if (wm_nvm_ready_spi(sc))
7937 return 1;
7938
7939 /* Toggle CS to flush commands. */
7940 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
7941 CSR_WRITE_FLUSH(sc);
7942 delay(2);
7943 CSR_WRITE(sc, WMREG_EECD, reg);
7944 CSR_WRITE_FLUSH(sc);
7945 delay(2);
7946
7947 opc = SPI_OPC_READ;
7948 if (sc->sc_nvm_addrbits == 8 && word >= 128)
7949 opc |= SPI_OPC_A8;
7950
7951 wm_eeprom_sendbits(sc, opc, 8);
7952 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
7953
7954 for (i = 0; i < wordcnt; i++) {
7955 wm_eeprom_recvbits(sc, &val, 16);
7956 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
7957 }
7958
7959 /* Raise CS and clear SK. */
7960 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
7961 CSR_WRITE(sc, WMREG_EECD, reg);
7962 CSR_WRITE_FLUSH(sc);
7963 delay(2);
7964
7965 return 0;
7966 }
7967
7968 /* Using with EERD */
7969
7970 static int
7971 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
7972 {
7973 uint32_t attempts = 100000;
7974 uint32_t i, reg = 0;
7975 int32_t done = -1;
7976
7977 for (i = 0; i < attempts; i++) {
7978 reg = CSR_READ(sc, rw);
7979
7980 if (reg & EERD_DONE) {
7981 done = 0;
7982 break;
7983 }
7984 delay(5);
7985 }
7986
7987 return done;
7988 }
7989
7990 static int
7991 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
7992 uint16_t *data)
7993 {
7994 int i, eerd = 0;
7995 int error = 0;
7996
7997 for (i = 0; i < wordcnt; i++) {
7998 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
7999
8000 CSR_WRITE(sc, WMREG_EERD, eerd);
8001 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
8002 if (error != 0)
8003 break;
8004
8005 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
8006 }
8007
8008 return error;
8009 }
8010
8011 /* Flash */
8012
8013 static int
8014 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
8015 {
8016 uint32_t eecd;
8017 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
8018 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
8019 uint8_t sig_byte = 0;
8020
8021 switch (sc->sc_type) {
8022 case WM_T_ICH8:
8023 case WM_T_ICH9:
8024 eecd = CSR_READ(sc, WMREG_EECD);
8025 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
8026 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
8027 return 0;
8028 }
8029 /* FALLTHROUGH */
8030 default:
8031 /* Default to 0 */
8032 *bank = 0;
8033
8034 /* Check bank 0 */
8035 wm_read_ich8_byte(sc, act_offset, &sig_byte);
8036 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
8037 *bank = 0;
8038 return 0;
8039 }
8040
8041 /* Check bank 1 */
8042 wm_read_ich8_byte(sc, act_offset + bank1_offset,
8043 &sig_byte);
8044 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
8045 *bank = 1;
8046 return 0;
8047 }
8048 }
8049
8050 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
8051 device_xname(sc->sc_dev)));
8052 return -1;
8053 }
8054
8055 /******************************************************************************
8056 * This function does initial flash setup so that a new read/write/erase cycle
8057 * can be started.
8058 *
8059 * sc - The pointer to the hw structure
8060 ****************************************************************************/
8061 static int32_t
8062 wm_ich8_cycle_init(struct wm_softc *sc)
8063 {
8064 uint16_t hsfsts;
8065 int32_t error = 1;
8066 int32_t i = 0;
8067
8068 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8069
8070 /* May be check the Flash Des Valid bit in Hw status */
8071 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
8072 return error;
8073 }
8074
8075 /* Clear FCERR in Hw status by writing 1 */
8076 /* Clear DAEL in Hw status by writing a 1 */
8077 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
8078
8079 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8080
8081 /*
8082 * Either we should have a hardware SPI cycle in progress bit to check
8083 * against, in order to start a new cycle or FDONE bit should be
8084 * changed in the hardware so that it is 1 after harware reset, which
8085 * can then be used as an indication whether a cycle is in progress or
8086 * has been completed .. we should also have some software semaphore
8087 * mechanism to guard FDONE or the cycle in progress bit so that two
8088 * threads access to those bits can be sequentiallized or a way so that
8089 * 2 threads dont start the cycle at the same time
8090 */
8091
8092 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8093 /*
8094 * There is no cycle running at present, so we can start a
8095 * cycle
8096 */
8097
8098 /* Begin by setting Flash Cycle Done. */
8099 hsfsts |= HSFSTS_DONE;
8100 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8101 error = 0;
8102 } else {
8103 /*
8104 * otherwise poll for sometime so the current cycle has a
8105 * chance to end before giving up.
8106 */
8107 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
8108 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8109 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8110 error = 0;
8111 break;
8112 }
8113 delay(1);
8114 }
8115 if (error == 0) {
8116 /*
8117 * Successful in waiting for previous cycle to timeout,
8118 * now set the Flash Cycle Done.
8119 */
8120 hsfsts |= HSFSTS_DONE;
8121 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8122 }
8123 }
8124 return error;
8125 }
8126
8127 /******************************************************************************
8128 * This function starts a flash cycle and waits for its completion
8129 *
8130 * sc - The pointer to the hw structure
8131 ****************************************************************************/
8132 static int32_t
8133 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
8134 {
8135 uint16_t hsflctl;
8136 uint16_t hsfsts;
8137 int32_t error = 1;
8138 uint32_t i = 0;
8139
8140 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
8141 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8142 hsflctl |= HSFCTL_GO;
8143 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8144
8145 /* Wait till FDONE bit is set to 1 */
8146 do {
8147 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8148 if (hsfsts & HSFSTS_DONE)
8149 break;
8150 delay(1);
8151 i++;
8152 } while (i < timeout);
8153 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
8154 error = 0;
8155
8156 return error;
8157 }
8158
8159 /******************************************************************************
8160 * Reads a byte or word from the NVM using the ICH8 flash access registers.
8161 *
8162 * sc - The pointer to the hw structure
8163 * index - The index of the byte or word to read.
8164 * size - Size of data to read, 1=byte 2=word
8165 * data - Pointer to the word to store the value read.
8166 *****************************************************************************/
8167 static int32_t
8168 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
8169 uint32_t size, uint16_t *data)
8170 {
8171 uint16_t hsfsts;
8172 uint16_t hsflctl;
8173 uint32_t flash_linear_address;
8174 uint32_t flash_data = 0;
8175 int32_t error = 1;
8176 int32_t count = 0;
8177
8178 if (size < 1 || size > 2 || data == 0x0 ||
8179 index > ICH_FLASH_LINEAR_ADDR_MASK)
8180 return error;
8181
8182 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
8183 sc->sc_ich8_flash_base;
8184
8185 do {
8186 delay(1);
8187 /* Steps */
8188 error = wm_ich8_cycle_init(sc);
8189 if (error)
8190 break;
8191
8192 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8193 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
8194 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
8195 & HSFCTL_BCOUNT_MASK;
8196 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
8197 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8198
8199 /*
8200 * Write the last 24 bits of index into Flash Linear address
8201 * field in Flash Address
8202 */
8203 /* TODO: TBD maybe check the index against the size of flash */
8204
8205 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
8206
8207 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
8208
8209 /*
8210 * Check if FCERR is set to 1, if set to 1, clear it and try
8211 * the whole sequence a few more times, else read in (shift in)
8212 * the Flash Data0, the order is least significant byte first
8213 * msb to lsb
8214 */
8215 if (error == 0) {
8216 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
8217 if (size == 1)
8218 *data = (uint8_t)(flash_data & 0x000000FF);
8219 else if (size == 2)
8220 *data = (uint16_t)(flash_data & 0x0000FFFF);
8221 break;
8222 } else {
8223 /*
8224 * If we've gotten here, then things are probably
8225 * completely hosed, but if the error condition is
8226 * detected, it won't hurt to give it another try...
8227 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
8228 */
8229 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8230 if (hsfsts & HSFSTS_ERR) {
8231 /* Repeat for some time before giving up. */
8232 continue;
8233 } else if ((hsfsts & HSFSTS_DONE) == 0)
8234 break;
8235 }
8236 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
8237
8238 return error;
8239 }
8240
8241 /******************************************************************************
8242 * Reads a single byte from the NVM using the ICH8 flash access registers.
8243 *
8244 * sc - pointer to wm_hw structure
8245 * index - The index of the byte to read.
8246 * data - Pointer to a byte to store the value read.
8247 *****************************************************************************/
8248 static int32_t
8249 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
8250 {
8251 int32_t status;
8252 uint16_t word = 0;
8253
8254 status = wm_read_ich8_data(sc, index, 1, &word);
8255 if (status == 0)
8256 *data = (uint8_t)word;
8257 else
8258 *data = 0;
8259
8260 return status;
8261 }
8262
8263 /******************************************************************************
8264 * Reads a word from the NVM using the ICH8 flash access registers.
8265 *
8266 * sc - pointer to wm_hw structure
8267 * index - The starting byte index of the word to read.
8268 * data - Pointer to a word to store the value read.
8269 *****************************************************************************/
8270 static int32_t
8271 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
8272 {
8273 int32_t status;
8274
8275 status = wm_read_ich8_data(sc, index, 2, data);
8276 return status;
8277 }
8278
8279 /******************************************************************************
8280 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
8281 * register.
8282 *
8283 * sc - Struct containing variables accessed by shared code
8284 * offset - offset of word in the EEPROM to read
8285 * data - word read from the EEPROM
8286 * words - number of words to read
8287 *****************************************************************************/
8288 static int
8289 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
8290 {
8291 int32_t error = 0;
8292 uint32_t flash_bank = 0;
8293 uint32_t act_offset = 0;
8294 uint32_t bank_offset = 0;
8295 uint16_t word = 0;
8296 uint16_t i = 0;
8297
8298 /*
8299 * We need to know which is the valid flash bank. In the event
8300 * that we didn't allocate eeprom_shadow_ram, we may not be
8301 * managing flash_bank. So it cannot be trusted and needs
8302 * to be updated with each read.
8303 */
8304 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
8305 if (error) {
8306 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
8307 device_xname(sc->sc_dev)));
8308 flash_bank = 0;
8309 }
8310
8311 /*
8312 * Adjust offset appropriately if we're on bank 1 - adjust for word
8313 * size
8314 */
8315 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
8316
8317 error = wm_get_swfwhw_semaphore(sc);
8318 if (error) {
8319 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8320 __func__);
8321 return error;
8322 }
8323
8324 for (i = 0; i < words; i++) {
8325 /* The NVM part needs a byte offset, hence * 2 */
8326 act_offset = bank_offset + ((offset + i) * 2);
8327 error = wm_read_ich8_word(sc, act_offset, &word);
8328 if (error) {
8329 aprint_error_dev(sc->sc_dev,
8330 "%s: failed to read NVM\n", __func__);
8331 break;
8332 }
8333 data[i] = word;
8334 }
8335
8336 wm_put_swfwhw_semaphore(sc);
8337 return error;
8338 }
8339
8340 /* Lock, detecting NVM type, validate checksum and read */
8341
8342 /*
8343 * wm_nvm_acquire:
8344 *
8345 * Perform the EEPROM handshake required on some chips.
8346 */
8347 static int
8348 wm_nvm_acquire(struct wm_softc *sc)
8349 {
8350 uint32_t reg;
8351 int x;
8352 int ret = 0;
8353
8354 /* always success */
8355 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8356 return 0;
8357
8358 if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8359 ret = wm_get_swfwhw_semaphore(sc);
8360 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
8361 /* This will also do wm_get_swsm_semaphore() if needed */
8362 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
8363 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
8364 ret = wm_get_swsm_semaphore(sc);
8365 }
8366
8367 if (ret) {
8368 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8369 __func__);
8370 return 1;
8371 }
8372
8373 if (sc->sc_flags & WM_F_LOCK_EECD) {
8374 reg = CSR_READ(sc, WMREG_EECD);
8375
8376 /* Request EEPROM access. */
8377 reg |= EECD_EE_REQ;
8378 CSR_WRITE(sc, WMREG_EECD, reg);
8379
8380 /* ..and wait for it to be granted. */
8381 for (x = 0; x < 1000; x++) {
8382 reg = CSR_READ(sc, WMREG_EECD);
8383 if (reg & EECD_EE_GNT)
8384 break;
8385 delay(5);
8386 }
8387 if ((reg & EECD_EE_GNT) == 0) {
8388 aprint_error_dev(sc->sc_dev,
8389 "could not acquire EEPROM GNT\n");
8390 reg &= ~EECD_EE_REQ;
8391 CSR_WRITE(sc, WMREG_EECD, reg);
8392 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8393 wm_put_swfwhw_semaphore(sc);
8394 if (sc->sc_flags & WM_F_LOCK_SWFW)
8395 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8396 else if (sc->sc_flags & WM_F_LOCK_SWSM)
8397 wm_put_swsm_semaphore(sc);
8398 return 1;
8399 }
8400 }
8401
8402 return 0;
8403 }
8404
8405 /*
8406 * wm_nvm_release:
8407 *
8408 * Release the EEPROM mutex.
8409 */
8410 static void
8411 wm_nvm_release(struct wm_softc *sc)
8412 {
8413 uint32_t reg;
8414
8415 /* always success */
8416 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8417 return;
8418
8419 if (sc->sc_flags & WM_F_LOCK_EECD) {
8420 reg = CSR_READ(sc, WMREG_EECD);
8421 reg &= ~EECD_EE_REQ;
8422 CSR_WRITE(sc, WMREG_EECD, reg);
8423 }
8424
8425 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8426 wm_put_swfwhw_semaphore(sc);
8427 if (sc->sc_flags & WM_F_LOCK_SWFW)
8428 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8429 else if (sc->sc_flags & WM_F_LOCK_SWSM)
8430 wm_put_swsm_semaphore(sc);
8431 }
8432
8433 static int
8434 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
8435 {
8436 uint32_t eecd = 0;
8437
8438 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
8439 || sc->sc_type == WM_T_82583) {
8440 eecd = CSR_READ(sc, WMREG_EECD);
8441
8442 /* Isolate bits 15 & 16 */
8443 eecd = ((eecd >> 15) & 0x03);
8444
8445 /* If both bits are set, device is Flash type */
8446 if (eecd == 0x03)
8447 return 0;
8448 }
8449 return 1;
8450 }
8451
8452 /*
8453 * wm_nvm_validate_checksum
8454 *
8455 * The checksum is defined as the sum of the first 64 (16 bit) words.
8456 */
8457 static int
8458 wm_nvm_validate_checksum(struct wm_softc *sc)
8459 {
8460 uint16_t checksum;
8461 uint16_t eeprom_data;
8462 #ifdef WM_DEBUG
8463 uint16_t csum_wordaddr, valid_checksum;
8464 #endif
8465 int i;
8466
8467 checksum = 0;
8468
8469 /* Don't check for I211 */
8470 if (sc->sc_type == WM_T_I211)
8471 return 0;
8472
8473 #ifdef WM_DEBUG
8474 if (sc->sc_type == WM_T_PCH_LPT) {
8475 csum_wordaddr = NVM_OFF_COMPAT;
8476 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
8477 } else {
8478 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
8479 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
8480 }
8481
8482 /* Dump EEPROM image for debug */
8483 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8484 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8485 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
8486 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
8487 if ((eeprom_data & valid_checksum) == 0) {
8488 DPRINTF(WM_DEBUG_NVM,
8489 ("%s: NVM need to be updated (%04x != %04x)\n",
8490 device_xname(sc->sc_dev), eeprom_data,
8491 valid_checksum));
8492 }
8493 }
8494
8495 if ((wm_debug & WM_DEBUG_NVM) != 0) {
8496 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
8497 for (i = 0; i < NVM_SIZE; i++) {
8498 if (wm_nvm_read(sc, i, 1, &eeprom_data))
8499 printf("XXXX ");
8500 else
8501 printf("%04hx ", eeprom_data);
8502 if (i % 8 == 7)
8503 printf("\n");
8504 }
8505 }
8506
8507 #endif /* WM_DEBUG */
8508
8509 for (i = 0; i < NVM_SIZE; i++) {
8510 if (wm_nvm_read(sc, i, 1, &eeprom_data))
8511 return 1;
8512 checksum += eeprom_data;
8513 }
8514
8515 if (checksum != (uint16_t) NVM_CHECKSUM) {
8516 #ifdef WM_DEBUG
8517 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
8518 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
8519 #endif
8520 }
8521
8522 return 0;
8523 }
8524
8525 /*
8526 * wm_nvm_read:
8527 *
8528 * Read data from the serial EEPROM.
8529 */
8530 static int
8531 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8532 {
8533 int rv;
8534
8535 if (sc->sc_flags & WM_F_EEPROM_INVALID)
8536 return 1;
8537
8538 if (wm_nvm_acquire(sc))
8539 return 1;
8540
8541 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8542 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8543 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
8544 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
8545 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
8546 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
8547 else if (sc->sc_flags & WM_F_EEPROM_SPI)
8548 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
8549 else
8550 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
8551
8552 wm_nvm_release(sc);
8553 return rv;
8554 }
8555
8556 /*
8557 * Hardware semaphores.
8558 * Very complexed...
8559 */
8560
8561 static int
8562 wm_get_swsm_semaphore(struct wm_softc *sc)
8563 {
8564 int32_t timeout;
8565 uint32_t swsm;
8566
8567 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8568 /* Get the SW semaphore. */
8569 timeout = sc->sc_nvm_wordsize + 1;
8570 while (timeout) {
8571 swsm = CSR_READ(sc, WMREG_SWSM);
8572
8573 if ((swsm & SWSM_SMBI) == 0)
8574 break;
8575
8576 delay(50);
8577 timeout--;
8578 }
8579
8580 if (timeout == 0) {
8581 aprint_error_dev(sc->sc_dev,
8582 "could not acquire SWSM SMBI\n");
8583 return 1;
8584 }
8585 }
8586
8587 /* Get the FW semaphore. */
8588 timeout = sc->sc_nvm_wordsize + 1;
8589 while (timeout) {
8590 swsm = CSR_READ(sc, WMREG_SWSM);
8591 swsm |= SWSM_SWESMBI;
8592 CSR_WRITE(sc, WMREG_SWSM, swsm);
8593 /* If we managed to set the bit we got the semaphore. */
8594 swsm = CSR_READ(sc, WMREG_SWSM);
8595 if (swsm & SWSM_SWESMBI)
8596 break;
8597
8598 delay(50);
8599 timeout--;
8600 }
8601
8602 if (timeout == 0) {
8603 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
8604 /* Release semaphores */
8605 wm_put_swsm_semaphore(sc);
8606 return 1;
8607 }
8608 return 0;
8609 }
8610
8611 static void
8612 wm_put_swsm_semaphore(struct wm_softc *sc)
8613 {
8614 uint32_t swsm;
8615
8616 swsm = CSR_READ(sc, WMREG_SWSM);
8617 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
8618 CSR_WRITE(sc, WMREG_SWSM, swsm);
8619 }
8620
8621 static int
8622 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8623 {
8624 uint32_t swfw_sync;
8625 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
8626 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
8627 int timeout = 200;
8628
8629 for (timeout = 0; timeout < 200; timeout++) {
8630 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8631 if (wm_get_swsm_semaphore(sc)) {
8632 aprint_error_dev(sc->sc_dev,
8633 "%s: failed to get semaphore\n",
8634 __func__);
8635 return 1;
8636 }
8637 }
8638 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8639 if ((swfw_sync & (swmask | fwmask)) == 0) {
8640 swfw_sync |= swmask;
8641 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8642 if (sc->sc_flags & WM_F_LOCK_SWSM)
8643 wm_put_swsm_semaphore(sc);
8644 return 0;
8645 }
8646 if (sc->sc_flags & WM_F_LOCK_SWSM)
8647 wm_put_swsm_semaphore(sc);
8648 delay(5000);
8649 }
8650 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
8651 device_xname(sc->sc_dev), mask, swfw_sync);
8652 return 1;
8653 }
8654
8655 static void
8656 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8657 {
8658 uint32_t swfw_sync;
8659
8660 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8661 while (wm_get_swsm_semaphore(sc) != 0)
8662 continue;
8663 }
8664 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8665 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
8666 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8667 if (sc->sc_flags & WM_F_LOCK_SWSM)
8668 wm_put_swsm_semaphore(sc);
8669 }
8670
8671 static int
8672 wm_get_swfwhw_semaphore(struct wm_softc *sc)
8673 {
8674 uint32_t ext_ctrl;
8675 int timeout = 200;
8676
8677 for (timeout = 0; timeout < 200; timeout++) {
8678 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8679 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
8680 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8681
8682 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8683 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
8684 return 0;
8685 delay(5000);
8686 }
8687 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
8688 device_xname(sc->sc_dev), ext_ctrl);
8689 return 1;
8690 }
8691
8692 static void
8693 wm_put_swfwhw_semaphore(struct wm_softc *sc)
8694 {
8695 uint32_t ext_ctrl;
8696 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8697 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
8698 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8699 }
8700
8701 static int
8702 wm_get_hw_semaphore_82573(struct wm_softc *sc)
8703 {
8704 int i = 0;
8705 uint32_t reg;
8706
8707 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8708 do {
8709 CSR_WRITE(sc, WMREG_EXTCNFCTR,
8710 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
8711 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8712 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
8713 break;
8714 delay(2*1000);
8715 i++;
8716 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
8717
8718 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
8719 wm_put_hw_semaphore_82573(sc);
8720 log(LOG_ERR, "%s: Driver can't access the PHY\n",
8721 device_xname(sc->sc_dev));
8722 return -1;
8723 }
8724
8725 return 0;
8726 }
8727
8728 static void
8729 wm_put_hw_semaphore_82573(struct wm_softc *sc)
8730 {
8731 uint32_t reg;
8732
8733 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8734 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
8735 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
8736 }
8737
8738 /*
8739 * Management mode and power management related subroutines.
8740 * BMC, AMT, suspend/resume and EEE.
8741 */
8742
8743 static int
8744 wm_check_mng_mode(struct wm_softc *sc)
8745 {
8746 int rv;
8747
8748 switch (sc->sc_type) {
8749 case WM_T_ICH8:
8750 case WM_T_ICH9:
8751 case WM_T_ICH10:
8752 case WM_T_PCH:
8753 case WM_T_PCH2:
8754 case WM_T_PCH_LPT:
8755 rv = wm_check_mng_mode_ich8lan(sc);
8756 break;
8757 case WM_T_82574:
8758 case WM_T_82583:
8759 rv = wm_check_mng_mode_82574(sc);
8760 break;
8761 case WM_T_82571:
8762 case WM_T_82572:
8763 case WM_T_82573:
8764 case WM_T_80003:
8765 rv = wm_check_mng_mode_generic(sc);
8766 break;
8767 default:
8768 /* noting to do */
8769 rv = 0;
8770 break;
8771 }
8772
8773 return rv;
8774 }
8775
8776 static int
8777 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
8778 {
8779 uint32_t fwsm;
8780
8781 fwsm = CSR_READ(sc, WMREG_FWSM);
8782
8783 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
8784 return 1;
8785
8786 return 0;
8787 }
8788
8789 static int
8790 wm_check_mng_mode_82574(struct wm_softc *sc)
8791 {
8792 uint16_t data;
8793
8794 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
8795
8796 if ((data & NVM_CFG2_MNGM_MASK) != 0)
8797 return 1;
8798
8799 return 0;
8800 }
8801
8802 static int
8803 wm_check_mng_mode_generic(struct wm_softc *sc)
8804 {
8805 uint32_t fwsm;
8806
8807 fwsm = CSR_READ(sc, WMREG_FWSM);
8808
8809 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
8810 return 1;
8811
8812 return 0;
8813 }
8814
8815 static int
8816 wm_enable_mng_pass_thru(struct wm_softc *sc)
8817 {
8818 uint32_t manc, fwsm, factps;
8819
8820 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
8821 return 0;
8822
8823 manc = CSR_READ(sc, WMREG_MANC);
8824
8825 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
8826 device_xname(sc->sc_dev), manc));
8827 if ((manc & MANC_RECV_TCO_EN) == 0)
8828 return 0;
8829
8830 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
8831 fwsm = CSR_READ(sc, WMREG_FWSM);
8832 factps = CSR_READ(sc, WMREG_FACTPS);
8833 if (((factps & FACTPS_MNGCG) == 0)
8834 && ((fwsm & FWSM_MODE_MASK)
8835 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
8836 return 1;
8837 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
8838 uint16_t data;
8839
8840 factps = CSR_READ(sc, WMREG_FACTPS);
8841 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
8842 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
8843 device_xname(sc->sc_dev), factps, data));
8844 if (((factps & FACTPS_MNGCG) == 0)
8845 && ((data & NVM_CFG2_MNGM_MASK)
8846 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
8847 return 1;
8848 } else if (((manc & MANC_SMBUS_EN) != 0)
8849 && ((manc & MANC_ASF_EN) == 0))
8850 return 1;
8851
8852 return 0;
8853 }
8854
8855 static int
8856 wm_check_reset_block(struct wm_softc *sc)
8857 {
8858 uint32_t reg;
8859
8860 switch (sc->sc_type) {
8861 case WM_T_ICH8:
8862 case WM_T_ICH9:
8863 case WM_T_ICH10:
8864 case WM_T_PCH:
8865 case WM_T_PCH2:
8866 case WM_T_PCH_LPT:
8867 reg = CSR_READ(sc, WMREG_FWSM);
8868 if ((reg & FWSM_RSPCIPHY) != 0)
8869 return 0;
8870 else
8871 return -1;
8872 break;
8873 case WM_T_82571:
8874 case WM_T_82572:
8875 case WM_T_82573:
8876 case WM_T_82574:
8877 case WM_T_82583:
8878 case WM_T_80003:
8879 reg = CSR_READ(sc, WMREG_MANC);
8880 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
8881 return -1;
8882 else
8883 return 0;
8884 break;
8885 default:
8886 /* no problem */
8887 break;
8888 }
8889
8890 return 0;
8891 }
8892
8893 static void
8894 wm_get_hw_control(struct wm_softc *sc)
8895 {
8896 uint32_t reg;
8897
8898 switch (sc->sc_type) {
8899 case WM_T_82573:
8900 reg = CSR_READ(sc, WMREG_SWSM);
8901 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
8902 break;
8903 case WM_T_82571:
8904 case WM_T_82572:
8905 case WM_T_82574:
8906 case WM_T_82583:
8907 case WM_T_80003:
8908 case WM_T_ICH8:
8909 case WM_T_ICH9:
8910 case WM_T_ICH10:
8911 case WM_T_PCH:
8912 case WM_T_PCH2:
8913 case WM_T_PCH_LPT:
8914 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8915 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
8916 break;
8917 default:
8918 break;
8919 }
8920 }
8921
8922 static void
8923 wm_release_hw_control(struct wm_softc *sc)
8924 {
8925 uint32_t reg;
8926
8927 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
8928 return;
8929
8930 if (sc->sc_type == WM_T_82573) {
8931 reg = CSR_READ(sc, WMREG_SWSM);
8932 reg &= ~SWSM_DRV_LOAD;
8933 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
8934 } else {
8935 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8936 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
8937 }
8938 }
8939
8940 static void
8941 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
8942 {
8943 uint32_t reg;
8944
8945 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8946
8947 if (on != 0)
8948 reg |= EXTCNFCTR_GATE_PHY_CFG;
8949 else
8950 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
8951
8952 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
8953 }
8954
8955 static void
8956 wm_smbustopci(struct wm_softc *sc)
8957 {
8958 uint32_t fwsm;
8959
8960 fwsm = CSR_READ(sc, WMREG_FWSM);
8961 if (((fwsm & FWSM_FW_VALID) == 0)
8962 && ((wm_check_reset_block(sc) == 0))) {
8963 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
8964 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
8965 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8966 CSR_WRITE_FLUSH(sc);
8967 delay(10);
8968 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
8969 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8970 CSR_WRITE_FLUSH(sc);
8971 delay(50*1000);
8972
8973 /*
8974 * Gate automatic PHY configuration by hardware on non-managed
8975 * 82579
8976 */
8977 if (sc->sc_type == WM_T_PCH2)
8978 wm_gate_hw_phy_config_ich8lan(sc, 1);
8979 }
8980 }
8981
8982 static void
8983 wm_init_manageability(struct wm_softc *sc)
8984 {
8985
8986 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8987 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
8988 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8989
8990 /* Disable hardware interception of ARP */
8991 manc &= ~MANC_ARP_EN;
8992
8993 /* Enable receiving management packets to the host */
8994 if (sc->sc_type >= WM_T_82571) {
8995 manc |= MANC_EN_MNG2HOST;
8996 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
8997 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
8998
8999 }
9000
9001 CSR_WRITE(sc, WMREG_MANC, manc);
9002 }
9003 }
9004
9005 static void
9006 wm_release_manageability(struct wm_softc *sc)
9007 {
9008
9009 if (sc->sc_flags & WM_F_HAS_MANAGE) {
9010 uint32_t manc = CSR_READ(sc, WMREG_MANC);
9011
9012 manc |= MANC_ARP_EN;
9013 if (sc->sc_type >= WM_T_82571)
9014 manc &= ~MANC_EN_MNG2HOST;
9015
9016 CSR_WRITE(sc, WMREG_MANC, manc);
9017 }
9018 }
9019
9020 static void
9021 wm_get_wakeup(struct wm_softc *sc)
9022 {
9023
9024 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
9025 switch (sc->sc_type) {
9026 case WM_T_82573:
9027 case WM_T_82583:
9028 sc->sc_flags |= WM_F_HAS_AMT;
9029 /* FALLTHROUGH */
9030 case WM_T_80003:
9031 case WM_T_82541:
9032 case WM_T_82547:
9033 case WM_T_82571:
9034 case WM_T_82572:
9035 case WM_T_82574:
9036 case WM_T_82575:
9037 case WM_T_82576:
9038 case WM_T_82580:
9039 case WM_T_I350:
9040 case WM_T_I354:
9041 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
9042 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
9043 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
9044 break;
9045 case WM_T_ICH8:
9046 case WM_T_ICH9:
9047 case WM_T_ICH10:
9048 case WM_T_PCH:
9049 case WM_T_PCH2:
9050 case WM_T_PCH_LPT:
9051 sc->sc_flags |= WM_F_HAS_AMT;
9052 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
9053 break;
9054 default:
9055 break;
9056 }
9057
9058 /* 1: HAS_MANAGE */
9059 if (wm_enable_mng_pass_thru(sc) != 0)
9060 sc->sc_flags |= WM_F_HAS_MANAGE;
9061
9062 #ifdef WM_DEBUG
9063 printf("\n");
9064 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
9065 printf("HAS_AMT,");
9066 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
9067 printf("ARC_SUBSYS_VALID,");
9068 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
9069 printf("ASF_FIRMWARE_PRES,");
9070 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
9071 printf("HAS_MANAGE,");
9072 printf("\n");
9073 #endif
9074 /*
9075 * Note that the WOL flags is set after the resetting of the eeprom
9076 * stuff
9077 */
9078 }
9079
9080 #ifdef WM_WOL
9081 /* WOL in the newer chipset interfaces (pchlan) */
9082 static void
9083 wm_enable_phy_wakeup(struct wm_softc *sc)
9084 {
9085 #if 0
9086 uint16_t preg;
9087
9088 /* Copy MAC RARs to PHY RARs */
9089
9090 /* Copy MAC MTA to PHY MTA */
9091
9092 /* Configure PHY Rx Control register */
9093
9094 /* Enable PHY wakeup in MAC register */
9095
9096 /* Configure and enable PHY wakeup in PHY registers */
9097
9098 /* Activate PHY wakeup */
9099
9100 /* XXX */
9101 #endif
9102 }
9103
9104 /* Power down workaround on D3 */
9105 static void
9106 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
9107 {
9108 uint32_t reg;
9109 int i;
9110
9111 for (i = 0; i < 2; i++) {
9112 /* Disable link */
9113 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9114 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9115 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9116
9117 /*
9118 * Call gig speed drop workaround on Gig disable before
9119 * accessing any PHY registers
9120 */
9121 if (sc->sc_type == WM_T_ICH8)
9122 wm_gig_downshift_workaround_ich8lan(sc);
9123
9124 /* Write VR power-down enable */
9125 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
9126 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
9127 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
9128 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
9129
9130 /* Read it back and test */
9131 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
9132 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
9133 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
9134 break;
9135
9136 /* Issue PHY reset and repeat at most one more time */
9137 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
9138 }
9139 }
9140
9141 static void
9142 wm_enable_wakeup(struct wm_softc *sc)
9143 {
9144 uint32_t reg, pmreg;
9145 pcireg_t pmode;
9146
9147 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
9148 &pmreg, NULL) == 0)
9149 return;
9150
9151 /* Advertise the wakeup capability */
9152 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
9153 | CTRL_SWDPIN(3));
9154 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
9155
9156 /* ICH workaround */
9157 switch (sc->sc_type) {
9158 case WM_T_ICH8:
9159 case WM_T_ICH9:
9160 case WM_T_ICH10:
9161 case WM_T_PCH:
9162 case WM_T_PCH2:
9163 case WM_T_PCH_LPT:
9164 /* Disable gig during WOL */
9165 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9166 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
9167 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9168 if (sc->sc_type == WM_T_PCH)
9169 wm_gmii_reset(sc);
9170
9171 /* Power down workaround */
9172 if (sc->sc_phytype == WMPHY_82577) {
9173 struct mii_softc *child;
9174
9175 /* Assume that the PHY is copper */
9176 child = LIST_FIRST(&sc->sc_mii.mii_phys);
9177 if (child->mii_mpd_rev <= 2)
9178 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
9179 (768 << 5) | 25, 0x0444); /* magic num */
9180 }
9181 break;
9182 default:
9183 break;
9184 }
9185
9186 /* Keep the laser running on fiber adapters */
9187 if (((sc->sc_mediatype & WMP_F_FIBER) != 0)
9188 || (sc->sc_mediatype & WMP_F_SERDES) != 0) {
9189 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9190 reg |= CTRL_EXT_SWDPIN(3);
9191 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9192 }
9193
9194 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
9195 #if 0 /* for the multicast packet */
9196 reg |= WUFC_MC;
9197 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
9198 #endif
9199
9200 if (sc->sc_type == WM_T_PCH) {
9201 wm_enable_phy_wakeup(sc);
9202 } else {
9203 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
9204 CSR_WRITE(sc, WMREG_WUFC, reg);
9205 }
9206
9207 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9208 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9209 || (sc->sc_type == WM_T_PCH2))
9210 && (sc->sc_phytype == WMPHY_IGP_3))
9211 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
9212
9213 /* Request PME */
9214 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
9215 #if 0
9216 /* Disable WOL */
9217 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
9218 #else
9219 /* For WOL */
9220 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
9221 #endif
9222 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
9223 }
9224 #endif /* WM_WOL */
9225
9226 /* EEE */
9227
9228 static void
9229 wm_set_eee_i350(struct wm_softc *sc)
9230 {
9231 uint32_t ipcnfg, eeer;
9232
9233 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
9234 eeer = CSR_READ(sc, WMREG_EEER);
9235
9236 if ((sc->sc_flags & WM_F_EEE) != 0) {
9237 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9238 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
9239 | EEER_LPI_FC);
9240 } else {
9241 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9242 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
9243 | EEER_LPI_FC);
9244 }
9245
9246 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
9247 CSR_WRITE(sc, WMREG_EEER, eeer);
9248 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
9249 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
9250 }
9251
9252 /*
9253 * Workarounds (mainly PHY related).
9254 * Basically, PHY's workarounds are in the PHY drivers.
9255 */
9256
9257 /* Work-around for 82566 Kumeran PCS lock loss */
9258 static void
9259 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
9260 {
9261 int miistatus, active, i;
9262 int reg;
9263
9264 miistatus = sc->sc_mii.mii_media_status;
9265
9266 /* If the link is not up, do nothing */
9267 if ((miistatus & IFM_ACTIVE) != 0)
9268 return;
9269
9270 active = sc->sc_mii.mii_media_active;
9271
9272 /* Nothing to do if the link is other than 1Gbps */
9273 if (IFM_SUBTYPE(active) != IFM_1000_T)
9274 return;
9275
9276 for (i = 0; i < 10; i++) {
9277 /* read twice */
9278 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9279 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9280 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
9281 goto out; /* GOOD! */
9282
9283 /* Reset the PHY */
9284 wm_gmii_reset(sc);
9285 delay(5*1000);
9286 }
9287
9288 /* Disable GigE link negotiation */
9289 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9290 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9291 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9292
9293 /*
9294 * Call gig speed drop workaround on Gig disable before accessing
9295 * any PHY registers.
9296 */
9297 wm_gig_downshift_workaround_ich8lan(sc);
9298
9299 out:
9300 return;
9301 }
9302
9303 /* WOL from S5 stops working */
9304 static void
9305 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
9306 {
9307 uint16_t kmrn_reg;
9308
9309 /* Only for igp3 */
9310 if (sc->sc_phytype == WMPHY_IGP_3) {
9311 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
9312 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
9313 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9314 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
9315 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9316 }
9317 }
9318
9319 /*
9320 * Workaround for pch's PHYs
9321 * XXX should be moved to new PHY driver?
9322 */
9323 static void
9324 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
9325 {
9326 if (sc->sc_phytype == WMPHY_82577)
9327 wm_set_mdio_slow_mode_hv(sc);
9328
9329 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
9330
9331 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
9332
9333 /* 82578 */
9334 if (sc->sc_phytype == WMPHY_82578) {
9335 /* PCH rev. < 3 */
9336 if (sc->sc_rev < 3) {
9337 /* XXX 6 bit shift? Why? Is it page2? */
9338 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
9339 0x66c0);
9340 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
9341 0xffff);
9342 }
9343
9344 /* XXX phy rev. < 2 */
9345 }
9346
9347 /* Select page 0 */
9348
9349 /* XXX acquire semaphore */
9350 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
9351 /* XXX release semaphore */
9352
9353 /*
9354 * Configure the K1 Si workaround during phy reset assuming there is
9355 * link so that it disables K1 if link is in 1Gbps.
9356 */
9357 wm_k1_gig_workaround_hv(sc, 1);
9358 }
9359
9360 static void
9361 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
9362 {
9363
9364 wm_set_mdio_slow_mode_hv(sc);
9365 }
9366
9367 static void
9368 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
9369 {
9370 int k1_enable = sc->sc_nvm_k1_enabled;
9371
9372 /* XXX acquire semaphore */
9373
9374 if (link) {
9375 k1_enable = 0;
9376
9377 /* Link stall fix for link up */
9378 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
9379 } else {
9380 /* Link stall fix for link down */
9381 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
9382 }
9383
9384 wm_configure_k1_ich8lan(sc, k1_enable);
9385
9386 /* XXX release semaphore */
9387 }
9388
9389 static void
9390 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
9391 {
9392 uint32_t reg;
9393
9394 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
9395 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
9396 reg | HV_KMRN_MDIO_SLOW);
9397 }
9398
9399 static void
9400 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
9401 {
9402 uint32_t ctrl, ctrl_ext, tmp;
9403 uint16_t kmrn_reg;
9404
9405 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
9406
9407 if (k1_enable)
9408 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
9409 else
9410 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
9411
9412 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
9413
9414 delay(20);
9415
9416 ctrl = CSR_READ(sc, WMREG_CTRL);
9417 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9418
9419 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
9420 tmp |= CTRL_FRCSPD;
9421
9422 CSR_WRITE(sc, WMREG_CTRL, tmp);
9423 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
9424 CSR_WRITE_FLUSH(sc);
9425 delay(20);
9426
9427 CSR_WRITE(sc, WMREG_CTRL, ctrl);
9428 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9429 CSR_WRITE_FLUSH(sc);
9430 delay(20);
9431 }
9432
9433 /* special case - for 82575 - need to do manual init ... */
9434 static void
9435 wm_reset_init_script_82575(struct wm_softc *sc)
9436 {
9437 /*
9438 * remark: this is untested code - we have no board without EEPROM
9439 * same setup as mentioned int the freeBSD driver for the i82575
9440 */
9441
9442 /* SerDes configuration via SERDESCTRL */
9443 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
9444 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
9445 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
9446 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
9447
9448 /* CCM configuration via CCMCTL register */
9449 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
9450 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
9451
9452 /* PCIe lanes configuration */
9453 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
9454 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
9455 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
9456 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
9457
9458 /* PCIe PLL Configuration */
9459 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
9460 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
9461 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
9462 }
9463