if_wm.c revision 1.206 1 /* $NetBSD: if_wm.c,v 1.206 2010/04/05 07:20:28 joerg Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.206 2010/04/05 07:20:28 joerg Exp $");
80
81 #include "rnd.h"
82
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/callout.h>
86 #include <sys/mbuf.h>
87 #include <sys/malloc.h>
88 #include <sys/kernel.h>
89 #include <sys/socket.h>
90 #include <sys/ioctl.h>
91 #include <sys/errno.h>
92 #include <sys/device.h>
93 #include <sys/queue.h>
94 #include <sys/syslog.h>
95
96 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
97
98 #if NRND > 0
99 #include <sys/rnd.h>
100 #endif
101
102 #include <net/if.h>
103 #include <net/if_dl.h>
104 #include <net/if_media.h>
105 #include <net/if_ether.h>
106
107 #include <net/bpf.h>
108
109 #include <netinet/in.h> /* XXX for struct ip */
110 #include <netinet/in_systm.h> /* XXX for struct ip */
111 #include <netinet/ip.h> /* XXX for struct ip */
112 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
113 #include <netinet/tcp.h> /* XXX for struct tcphdr */
114
115 #include <sys/bus.h>
116 #include <sys/intr.h>
117 #include <machine/endian.h>
118
119 #include <dev/mii/mii.h>
120 #include <dev/mii/miivar.h>
121 #include <dev/mii/miidevs.h>
122 #include <dev/mii/mii_bitbang.h>
123 #include <dev/mii/ikphyreg.h>
124 #include <dev/mii/igphyreg.h>
125 #include <dev/mii/igphyvar.h>
126 #include <dev/mii/inbmphyreg.h>
127
128 #include <dev/pci/pcireg.h>
129 #include <dev/pci/pcivar.h>
130 #include <dev/pci/pcidevs.h>
131
132 #include <dev/pci/if_wmreg.h>
133 #include <dev/pci/if_wmvar.h>
134
135 #ifdef WM_DEBUG
136 #define WM_DEBUG_LINK 0x01
137 #define WM_DEBUG_TX 0x02
138 #define WM_DEBUG_RX 0x04
139 #define WM_DEBUG_GMII 0x08
140 #define WM_DEBUG_MANAGE 0x10
141 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
142 | WM_DEBUG_MANAGE;
143
144 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
145 #else
146 #define DPRINTF(x, y) /* nothing */
147 #endif /* WM_DEBUG */
148
149 /*
150 * Transmit descriptor list size. Due to errata, we can only have
151 * 256 hardware descriptors in the ring on < 82544, but we use 4096
152 * on >= 82544. We tell the upper layers that they can queue a lot
153 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
154 * of them at a time.
155 *
156 * We allow up to 256 (!) DMA segments per packet. Pathological packet
157 * chains containing many small mbufs have been observed in zero-copy
158 * situations with jumbo frames.
159 */
160 #define WM_NTXSEGS 256
161 #define WM_IFQUEUELEN 256
162 #define WM_TXQUEUELEN_MAX 64
163 #define WM_TXQUEUELEN_MAX_82547 16
164 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
165 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
166 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
167 #define WM_NTXDESC_82542 256
168 #define WM_NTXDESC_82544 4096
169 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
170 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
171 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
172 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
173 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
174
175 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
176
177 /*
178 * Receive descriptor list size. We have one Rx buffer for normal
179 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
180 * packet. We allocate 256 receive descriptors, each with a 2k
181 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
182 */
183 #define WM_NRXDESC 256
184 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
185 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
186 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
187
188 /*
189 * Control structures are DMA'd to the i82542 chip. We allocate them in
190 * a single clump that maps to a single DMA segment to make several things
191 * easier.
192 */
193 struct wm_control_data_82544 {
194 /*
195 * The receive descriptors.
196 */
197 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
198
199 /*
200 * The transmit descriptors. Put these at the end, because
201 * we might use a smaller number of them.
202 */
203 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
204 };
205
206 struct wm_control_data_82542 {
207 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
208 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
209 };
210
211 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
212 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
213 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
214
215 /*
216 * Software state for transmit jobs.
217 */
218 struct wm_txsoft {
219 struct mbuf *txs_mbuf; /* head of our mbuf chain */
220 bus_dmamap_t txs_dmamap; /* our DMA map */
221 int txs_firstdesc; /* first descriptor in packet */
222 int txs_lastdesc; /* last descriptor in packet */
223 int txs_ndesc; /* # of descriptors used */
224 };
225
226 /*
227 * Software state for receive buffers. Each descriptor gets a
228 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
229 * more than one buffer, we chain them together.
230 */
231 struct wm_rxsoft {
232 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
233 bus_dmamap_t rxs_dmamap; /* our DMA map */
234 };
235
236 #define WM_LINKUP_TIMEOUT 50
237
238 static uint16_t swfwphysem[] = {
239 SWFW_PHY0_SM,
240 SWFW_PHY1_SM,
241 SWFW_PHY2_SM,
242 SWFW_PHY3_SM
243 };
244
245 /*
246 * Software state per device.
247 */
248 struct wm_softc {
249 device_t sc_dev; /* generic device information */
250 bus_space_tag_t sc_st; /* bus space tag */
251 bus_space_handle_t sc_sh; /* bus space handle */
252 bus_size_t sc_ss; /* bus space size */
253 bus_space_tag_t sc_iot; /* I/O space tag */
254 bus_space_handle_t sc_ioh; /* I/O space handle */
255 bus_space_tag_t sc_flasht; /* flash registers space tag */
256 bus_space_handle_t sc_flashh; /* flash registers space handle */
257 bus_dma_tag_t sc_dmat; /* bus DMA tag */
258
259 struct ethercom sc_ethercom; /* ethernet common data */
260 struct mii_data sc_mii; /* MII/media information */
261
262 pci_chipset_tag_t sc_pc;
263 pcitag_t sc_pcitag;
264 int sc_bus_speed; /* PCI/PCIX bus speed */
265 int sc_pcixe_capoff; /* PCI[Xe] capability register offset */
266
267 const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
268 wm_chip_type sc_type; /* MAC type */
269 int sc_rev; /* MAC revision */
270 wm_phy_type sc_phytype; /* PHY type */
271 int sc_funcid; /* unit number of the chip (0 to 3) */
272 int sc_flags; /* flags; see below */
273 int sc_if_flags; /* last if_flags */
274 int sc_flowflags; /* 802.3x flow control flags */
275 int sc_align_tweak;
276
277 void *sc_ih; /* interrupt cookie */
278 callout_t sc_tick_ch; /* tick callout */
279
280 int sc_ee_addrbits; /* EEPROM address bits */
281 int sc_ich8_flash_base;
282 int sc_ich8_flash_bank_size;
283 int sc_nvm_k1_enabled;
284
285 /*
286 * Software state for the transmit and receive descriptors.
287 */
288 int sc_txnum; /* must be a power of two */
289 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
290 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
291
292 /*
293 * Control data structures.
294 */
295 int sc_ntxdesc; /* must be a power of two */
296 struct wm_control_data_82544 *sc_control_data;
297 bus_dmamap_t sc_cddmamap; /* control data DMA map */
298 bus_dma_segment_t sc_cd_seg; /* control data segment */
299 int sc_cd_rseg; /* real number of control segment */
300 size_t sc_cd_size; /* control data size */
301 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
302 #define sc_txdescs sc_control_data->wcd_txdescs
303 #define sc_rxdescs sc_control_data->wcd_rxdescs
304
305 #ifdef WM_EVENT_COUNTERS
306 /* Event counters. */
307 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
308 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
309 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
310 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
311 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
312 struct evcnt sc_ev_rxintr; /* Rx interrupts */
313 struct evcnt sc_ev_linkintr; /* Link interrupts */
314
315 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
316 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
317 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
318 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
319 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
320 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
321 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
322 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
323
324 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
325 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
326
327 struct evcnt sc_ev_tu; /* Tx underrun */
328
329 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
330 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
331 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
332 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
333 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
334 #endif /* WM_EVENT_COUNTERS */
335
336 bus_addr_t sc_tdt_reg; /* offset of TDT register */
337
338 int sc_txfree; /* number of free Tx descriptors */
339 int sc_txnext; /* next ready Tx descriptor */
340
341 int sc_txsfree; /* number of free Tx jobs */
342 int sc_txsnext; /* next free Tx job */
343 int sc_txsdirty; /* dirty Tx jobs */
344
345 /* These 5 variables are used only on the 82547. */
346 int sc_txfifo_size; /* Tx FIFO size */
347 int sc_txfifo_head; /* current head of FIFO */
348 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
349 int sc_txfifo_stall; /* Tx FIFO is stalled */
350 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
351
352 bus_addr_t sc_rdt_reg; /* offset of RDT register */
353
354 int sc_rxptr; /* next ready Rx descriptor/queue ent */
355 int sc_rxdiscard;
356 int sc_rxlen;
357 struct mbuf *sc_rxhead;
358 struct mbuf *sc_rxtail;
359 struct mbuf **sc_rxtailp;
360
361 uint32_t sc_ctrl; /* prototype CTRL register */
362 #if 0
363 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
364 #endif
365 uint32_t sc_icr; /* prototype interrupt bits */
366 uint32_t sc_itr; /* prototype intr throttling reg */
367 uint32_t sc_tctl; /* prototype TCTL register */
368 uint32_t sc_rctl; /* prototype RCTL register */
369 uint32_t sc_txcw; /* prototype TXCW register */
370 uint32_t sc_tipg; /* prototype TIPG register */
371 uint32_t sc_fcrtl; /* prototype FCRTL register */
372 uint32_t sc_pba; /* prototype PBA register */
373
374 int sc_tbi_linkup; /* TBI link status */
375 int sc_tbi_anegticks; /* autonegotiation ticks */
376 int sc_tbi_ticks; /* tbi ticks */
377 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
378 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
379
380 int sc_mchash_type; /* multicast filter offset */
381
382 #if NRND > 0
383 rndsource_element_t rnd_source; /* random source */
384 #endif
385 };
386
387 #define WM_RXCHAIN_RESET(sc) \
388 do { \
389 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
390 *(sc)->sc_rxtailp = NULL; \
391 (sc)->sc_rxlen = 0; \
392 } while (/*CONSTCOND*/0)
393
394 #define WM_RXCHAIN_LINK(sc, m) \
395 do { \
396 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
397 (sc)->sc_rxtailp = &(m)->m_next; \
398 } while (/*CONSTCOND*/0)
399
400 #ifdef WM_EVENT_COUNTERS
401 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
402 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
403 #else
404 #define WM_EVCNT_INCR(ev) /* nothing */
405 #define WM_EVCNT_ADD(ev, val) /* nothing */
406 #endif
407
408 #define CSR_READ(sc, reg) \
409 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
410 #define CSR_WRITE(sc, reg, val) \
411 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
412 #define CSR_WRITE_FLUSH(sc) \
413 (void) CSR_READ((sc), WMREG_STATUS)
414
415 #define ICH8_FLASH_READ32(sc, reg) \
416 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
417 #define ICH8_FLASH_WRITE32(sc, reg, data) \
418 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
419
420 #define ICH8_FLASH_READ16(sc, reg) \
421 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
422 #define ICH8_FLASH_WRITE16(sc, reg, data) \
423 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
424
425 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
426 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
427
428 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
429 #define WM_CDTXADDR_HI(sc, x) \
430 (sizeof(bus_addr_t) == 8 ? \
431 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
432
433 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
434 #define WM_CDRXADDR_HI(sc, x) \
435 (sizeof(bus_addr_t) == 8 ? \
436 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
437
438 #define WM_CDTXSYNC(sc, x, n, ops) \
439 do { \
440 int __x, __n; \
441 \
442 __x = (x); \
443 __n = (n); \
444 \
445 /* If it will wrap around, sync to the end of the ring. */ \
446 if ((__x + __n) > WM_NTXDESC(sc)) { \
447 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
448 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
449 (WM_NTXDESC(sc) - __x), (ops)); \
450 __n -= (WM_NTXDESC(sc) - __x); \
451 __x = 0; \
452 } \
453 \
454 /* Now sync whatever is left. */ \
455 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
456 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
457 } while (/*CONSTCOND*/0)
458
459 #define WM_CDRXSYNC(sc, x, ops) \
460 do { \
461 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
462 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
463 } while (/*CONSTCOND*/0)
464
465 #define WM_INIT_RXDESC(sc, x) \
466 do { \
467 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
468 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
469 struct mbuf *__m = __rxs->rxs_mbuf; \
470 \
471 /* \
472 * Note: We scoot the packet forward 2 bytes in the buffer \
473 * so that the payload after the Ethernet header is aligned \
474 * to a 4-byte boundary. \
475 * \
476 * XXX BRAINDAMAGE ALERT! \
477 * The stupid chip uses the same size for every buffer, which \
478 * is set in the Receive Control register. We are using the 2K \
479 * size option, but what we REALLY want is (2K - 2)! For this \
480 * reason, we can't "scoot" packets longer than the standard \
481 * Ethernet MTU. On strict-alignment platforms, if the total \
482 * size exceeds (2K - 2) we set align_tweak to 0 and let \
483 * the upper layer copy the headers. \
484 */ \
485 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
486 \
487 wm_set_dma_addr(&__rxd->wrx_addr, \
488 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
489 __rxd->wrx_len = 0; \
490 __rxd->wrx_cksum = 0; \
491 __rxd->wrx_status = 0; \
492 __rxd->wrx_errors = 0; \
493 __rxd->wrx_special = 0; \
494 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
495 \
496 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
497 } while (/*CONSTCOND*/0)
498
499 static void wm_start(struct ifnet *);
500 static void wm_watchdog(struct ifnet *);
501 static int wm_ioctl(struct ifnet *, u_long, void *);
502 static int wm_init(struct ifnet *);
503 static void wm_stop(struct ifnet *, int);
504 static bool wm_suspend(device_t, const pmf_qual_t *);
505 static bool wm_resume(device_t, const pmf_qual_t *);
506
507 static void wm_reset(struct wm_softc *);
508 static void wm_rxdrain(struct wm_softc *);
509 static int wm_add_rxbuf(struct wm_softc *, int);
510 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
511 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
512 static int wm_validate_eeprom_checksum(struct wm_softc *);
513 static void wm_tick(void *);
514
515 static void wm_set_filter(struct wm_softc *);
516
517 static int wm_intr(void *);
518 static void wm_txintr(struct wm_softc *);
519 static void wm_rxintr(struct wm_softc *);
520 static void wm_linkintr(struct wm_softc *, uint32_t);
521
522 static void wm_tbi_mediainit(struct wm_softc *);
523 static int wm_tbi_mediachange(struct ifnet *);
524 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
525
526 static void wm_tbi_set_linkled(struct wm_softc *);
527 static void wm_tbi_check_link(struct wm_softc *);
528
529 static void wm_gmii_reset(struct wm_softc *);
530
531 static int wm_gmii_i82543_readreg(device_t, int, int);
532 static void wm_gmii_i82543_writereg(device_t, int, int, int);
533
534 static int wm_gmii_i82544_readreg(device_t, int, int);
535 static void wm_gmii_i82544_writereg(device_t, int, int, int);
536
537 static int wm_gmii_i80003_readreg(device_t, int, int);
538 static void wm_gmii_i80003_writereg(device_t, int, int, int);
539 static int wm_gmii_bm_readreg(device_t, int, int);
540 static void wm_gmii_bm_writereg(device_t, int, int, int);
541 static int wm_gmii_hv_readreg(device_t, int, int);
542 static void wm_gmii_hv_writereg(device_t, int, int, int);
543 static int wm_sgmii_readreg(device_t, int, int);
544 static void wm_sgmii_writereg(device_t, int, int, int);
545
546 static void wm_gmii_statchg(device_t);
547
548 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
549 static int wm_gmii_mediachange(struct ifnet *);
550 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
551
552 static int wm_kmrn_readreg(struct wm_softc *, int);
553 static void wm_kmrn_writereg(struct wm_softc *, int, int);
554
555 static void wm_set_spiaddrbits(struct wm_softc *);
556 static int wm_match(device_t, cfdata_t, void *);
557 static void wm_attach(device_t, device_t, void *);
558 static int wm_detach(device_t, int);
559 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
560 static void wm_get_auto_rd_done(struct wm_softc *);
561 static void wm_lan_init_done(struct wm_softc *);
562 static void wm_get_cfg_done(struct wm_softc *);
563 static int wm_get_swsm_semaphore(struct wm_softc *);
564 static void wm_put_swsm_semaphore(struct wm_softc *);
565 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
566 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
567 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
568 static int wm_get_swfwhw_semaphore(struct wm_softc *);
569 static void wm_put_swfwhw_semaphore(struct wm_softc *);
570
571 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
572 static int32_t wm_ich8_cycle_init(struct wm_softc *);
573 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
574 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
575 uint32_t, uint16_t *);
576 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
577 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
578 static void wm_82547_txfifo_stall(void *);
579 static int wm_check_mng_mode(struct wm_softc *);
580 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
581 static int wm_check_mng_mode_82574(struct wm_softc *);
582 static int wm_check_mng_mode_generic(struct wm_softc *);
583 static int wm_enable_mng_pass_thru(struct wm_softc *);
584 static int wm_check_reset_block(struct wm_softc *);
585 static void wm_get_hw_control(struct wm_softc *);
586 static int wm_check_for_link(struct wm_softc *);
587 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
588 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
589 #ifdef WM_WOL
590 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
591 #endif
592 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
593 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
594 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
595 static void wm_set_pcie_completion_timeout(struct wm_softc *);
596 static void wm_reset_init_script_82575(struct wm_softc *);
597 static void wm_release_manageability(struct wm_softc *);
598 static void wm_release_hw_control(struct wm_softc *);
599 static void wm_get_wakeup(struct wm_softc *);
600 #ifdef WM_WOL
601 static void wm_enable_phy_wakeup(struct wm_softc *);
602 static void wm_enable_wakeup(struct wm_softc *);
603 #endif
604 static void wm_init_manageability(struct wm_softc *);
605
606 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
607 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
608
609 /*
610 * Devices supported by this driver.
611 */
612 static const struct wm_product {
613 pci_vendor_id_t wmp_vendor;
614 pci_product_id_t wmp_product;
615 const char *wmp_name;
616 wm_chip_type wmp_type;
617 int wmp_flags;
618 #define WMP_F_1000X 0x01
619 #define WMP_F_1000T 0x02
620 #define WMP_F_SERDES 0x04
621 } wm_products[] = {
622 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
623 "Intel i82542 1000BASE-X Ethernet",
624 WM_T_82542_2_1, WMP_F_1000X },
625
626 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
627 "Intel i82543GC 1000BASE-X Ethernet",
628 WM_T_82543, WMP_F_1000X },
629
630 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
631 "Intel i82543GC 1000BASE-T Ethernet",
632 WM_T_82543, WMP_F_1000T },
633
634 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
635 "Intel i82544EI 1000BASE-T Ethernet",
636 WM_T_82544, WMP_F_1000T },
637
638 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
639 "Intel i82544EI 1000BASE-X Ethernet",
640 WM_T_82544, WMP_F_1000X },
641
642 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
643 "Intel i82544GC 1000BASE-T Ethernet",
644 WM_T_82544, WMP_F_1000T },
645
646 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
647 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
648 WM_T_82544, WMP_F_1000T },
649
650 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
651 "Intel i82540EM 1000BASE-T Ethernet",
652 WM_T_82540, WMP_F_1000T },
653
654 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
655 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
656 WM_T_82540, WMP_F_1000T },
657
658 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
659 "Intel i82540EP 1000BASE-T Ethernet",
660 WM_T_82540, WMP_F_1000T },
661
662 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
663 "Intel i82540EP 1000BASE-T Ethernet",
664 WM_T_82540, WMP_F_1000T },
665
666 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
667 "Intel i82540EP 1000BASE-T Ethernet",
668 WM_T_82540, WMP_F_1000T },
669
670 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
671 "Intel i82545EM 1000BASE-T Ethernet",
672 WM_T_82545, WMP_F_1000T },
673
674 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
675 "Intel i82545GM 1000BASE-T Ethernet",
676 WM_T_82545_3, WMP_F_1000T },
677
678 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
679 "Intel i82545GM 1000BASE-X Ethernet",
680 WM_T_82545_3, WMP_F_1000X },
681 #if 0
682 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
683 "Intel i82545GM Gigabit Ethernet (SERDES)",
684 WM_T_82545_3, WMP_F_SERDES },
685 #endif
686 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
687 "Intel i82546EB 1000BASE-T Ethernet",
688 WM_T_82546, WMP_F_1000T },
689
690 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
691 "Intel i82546EB 1000BASE-T Ethernet",
692 WM_T_82546, WMP_F_1000T },
693
694 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
695 "Intel i82545EM 1000BASE-X Ethernet",
696 WM_T_82545, WMP_F_1000X },
697
698 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
699 "Intel i82546EB 1000BASE-X Ethernet",
700 WM_T_82546, WMP_F_1000X },
701
702 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
703 "Intel i82546GB 1000BASE-T Ethernet",
704 WM_T_82546_3, WMP_F_1000T },
705
706 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
707 "Intel i82546GB 1000BASE-X Ethernet",
708 WM_T_82546_3, WMP_F_1000X },
709 #if 0
710 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
711 "Intel i82546GB Gigabit Ethernet (SERDES)",
712 WM_T_82546_3, WMP_F_SERDES },
713 #endif
714 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
715 "i82546GB quad-port Gigabit Ethernet",
716 WM_T_82546_3, WMP_F_1000T },
717
718 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
719 "i82546GB quad-port Gigabit Ethernet (KSP3)",
720 WM_T_82546_3, WMP_F_1000T },
721
722 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
723 "Intel PRO/1000MT (82546GB)",
724 WM_T_82546_3, WMP_F_1000T },
725
726 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
727 "Intel i82541EI 1000BASE-T Ethernet",
728 WM_T_82541, WMP_F_1000T },
729
730 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
731 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
732 WM_T_82541, WMP_F_1000T },
733
734 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
735 "Intel i82541EI Mobile 1000BASE-T Ethernet",
736 WM_T_82541, WMP_F_1000T },
737
738 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
739 "Intel i82541ER 1000BASE-T Ethernet",
740 WM_T_82541_2, WMP_F_1000T },
741
742 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
743 "Intel i82541GI 1000BASE-T Ethernet",
744 WM_T_82541_2, WMP_F_1000T },
745
746 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
747 "Intel i82541GI Mobile 1000BASE-T Ethernet",
748 WM_T_82541_2, WMP_F_1000T },
749
750 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
751 "Intel i82541PI 1000BASE-T Ethernet",
752 WM_T_82541_2, WMP_F_1000T },
753
754 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
755 "Intel i82547EI 1000BASE-T Ethernet",
756 WM_T_82547, WMP_F_1000T },
757
758 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
759 "Intel i82547EI Mobile 1000BASE-T Ethernet",
760 WM_T_82547, WMP_F_1000T },
761
762 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
763 "Intel i82547GI 1000BASE-T Ethernet",
764 WM_T_82547_2, WMP_F_1000T },
765
766 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
767 "Intel PRO/1000 PT (82571EB)",
768 WM_T_82571, WMP_F_1000T },
769
770 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
771 "Intel PRO/1000 PF (82571EB)",
772 WM_T_82571, WMP_F_1000X },
773 #if 0
774 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
775 "Intel PRO/1000 PB (82571EB)",
776 WM_T_82571, WMP_F_SERDES },
777 #endif
778 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
779 "Intel PRO/1000 QT (82571EB)",
780 WM_T_82571, WMP_F_1000T },
781
782 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
783 "Intel i82572EI 1000baseT Ethernet",
784 WM_T_82572, WMP_F_1000T },
785
786 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
787 "Intel PRO/1000 PT Quad Port Server Adapter",
788 WM_T_82571, WMP_F_1000T, },
789
790 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
791 "Intel i82572EI 1000baseX Ethernet",
792 WM_T_82572, WMP_F_1000X },
793 #if 0
794 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
795 "Intel i82572EI Gigabit Ethernet (SERDES)",
796 WM_T_82572, WMP_F_SERDES },
797 #endif
798
799 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
800 "Intel i82572EI 1000baseT Ethernet",
801 WM_T_82572, WMP_F_1000T },
802
803 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
804 "Intel i82573E",
805 WM_T_82573, WMP_F_1000T },
806
807 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
808 "Intel i82573E IAMT",
809 WM_T_82573, WMP_F_1000T },
810
811 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
812 "Intel i82573L Gigabit Ethernet",
813 WM_T_82573, WMP_F_1000T },
814
815 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
816 "Intel i82574L",
817 WM_T_82574, WMP_F_1000T },
818
819 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
820 "Intel i82583V",
821 WM_T_82583, WMP_F_1000T },
822
823 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
824 "i80003 dual 1000baseT Ethernet",
825 WM_T_80003, WMP_F_1000T },
826
827 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
828 "i80003 dual 1000baseX Ethernet",
829 WM_T_80003, WMP_F_1000T },
830 #if 0
831 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
832 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
833 WM_T_80003, WMP_F_SERDES },
834 #endif
835
836 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
837 "Intel i80003 1000baseT Ethernet",
838 WM_T_80003, WMP_F_1000T },
839 #if 0
840 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
841 "Intel i80003 Gigabit Ethernet (SERDES)",
842 WM_T_80003, WMP_F_SERDES },
843 #endif
844 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
845 "Intel i82801H (M_AMT) LAN Controller",
846 WM_T_ICH8, WMP_F_1000T },
847 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
848 "Intel i82801H (AMT) LAN Controller",
849 WM_T_ICH8, WMP_F_1000T },
850 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
851 "Intel i82801H LAN Controller",
852 WM_T_ICH8, WMP_F_1000T },
853 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
854 "Intel i82801H (IFE) LAN Controller",
855 WM_T_ICH8, WMP_F_1000T },
856 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
857 "Intel i82801H (M) LAN Controller",
858 WM_T_ICH8, WMP_F_1000T },
859 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
860 "Intel i82801H IFE (GT) LAN Controller",
861 WM_T_ICH8, WMP_F_1000T },
862 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
863 "Intel i82801H IFE (G) LAN Controller",
864 WM_T_ICH8, WMP_F_1000T },
865 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
866 "82801I (AMT) LAN Controller",
867 WM_T_ICH9, WMP_F_1000T },
868 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
869 "82801I LAN Controller",
870 WM_T_ICH9, WMP_F_1000T },
871 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
872 "82801I (G) LAN Controller",
873 WM_T_ICH9, WMP_F_1000T },
874 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
875 "82801I (GT) LAN Controller",
876 WM_T_ICH9, WMP_F_1000T },
877 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
878 "82801I (C) LAN Controller",
879 WM_T_ICH9, WMP_F_1000T },
880 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
881 "82801I mobile LAN Controller",
882 WM_T_ICH9, WMP_F_1000T },
883 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
884 "82801I mobile (V) LAN Controller",
885 WM_T_ICH9, WMP_F_1000T },
886 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
887 "82801I mobile (AMT) LAN Controller",
888 WM_T_ICH9, WMP_F_1000T },
889 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
890 "82567LM-4 LAN Controller",
891 WM_T_ICH9, WMP_F_1000T },
892 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
893 "82567V-3 LAN Controller",
894 WM_T_ICH9, WMP_F_1000T },
895 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
896 "82567LM-2 LAN Controller",
897 WM_T_ICH10, WMP_F_1000T },
898 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
899 "82567LF-2 LAN Controller",
900 WM_T_ICH10, WMP_F_1000T },
901 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
902 "82567LM-3 LAN Controller",
903 WM_T_ICH10, WMP_F_1000T },
904 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
905 "82567LF-3 LAN Controller",
906 WM_T_ICH10, WMP_F_1000T },
907 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
908 "82567V-2 LAN Controller",
909 WM_T_ICH10, WMP_F_1000T },
910 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
911 "PCH LAN (82578LM) Controller",
912 WM_T_PCH, WMP_F_1000T },
913 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
914 "PCH LAN (82578LC) Controller",
915 WM_T_PCH, WMP_F_1000T },
916 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
917 "PCH LAN (82578DM) Controller",
918 WM_T_PCH, WMP_F_1000T },
919 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
920 "PCH LAN (82578DC) Controller",
921 WM_T_PCH, WMP_F_1000T },
922 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
923 "82575EB dual-1000baseT Ethernet",
924 WM_T_82575, WMP_F_1000T },
925 #if 0
926 /*
927 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
928 * disabled for now ...
929 */
930 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
931 "82575EB dual-1000baseX Ethernet (SERDES)",
932 WM_T_82575, WMP_F_SERDES },
933 #endif
934 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
935 "82575GB quad-1000baseT Ethernet",
936 WM_T_82575, WMP_F_1000T },
937 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
938 "82575GB quad-1000baseT Ethernet (PM)",
939 WM_T_82575, WMP_F_1000T },
940 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
941 "82576 1000BaseT Ethernet",
942 WM_T_82576, WMP_F_1000T },
943 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
944 "82576 1000BaseX Ethernet",
945 WM_T_82576, WMP_F_1000X },
946 #if 0
947 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
948 "82576 gigabit Ethernet (SERDES)",
949 WM_T_82576, WMP_F_SERDES },
950 #endif
951 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
952 "82576 quad-1000BaseT Ethernet",
953 WM_T_82576, WMP_F_1000T },
954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
955 "82576 gigabit Ethernet",
956 WM_T_82576, WMP_F_1000T },
957 #if 0
958 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
959 "82576 gigabit Ethernet (SERDES)",
960 WM_T_82576, WMP_F_SERDES },
961 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
962 "82576 quad-gigabit Ethernet (SERDES)",
963 WM_T_82576, WMP_F_SERDES },
964 #endif
965 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
966 "82580 1000BaseT Ethernet",
967 WM_T_82580, WMP_F_1000T },
968 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
969 "82580 1000BaseX Ethernet",
970 WM_T_82580, WMP_F_1000X },
971 #if 0
972 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
973 "82580 1000BaseT Ethernet (SERDES)",
974 WM_T_82580, WMP_F_SERDES },
975 #endif
976 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
977 "82580 gigabit Ethernet (SGMII)",
978 WM_T_82580, WMP_F_1000T },
979 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
980 "82580 dual-1000BaseT Ethernet",
981 WM_T_82580, WMP_F_1000T },
982 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER,
983 "82580 1000BaseT Ethernet",
984 WM_T_82580ER, WMP_F_1000T },
985 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER_DUAL,
986 "82580 dual-1000BaseT Ethernet",
987 WM_T_82580ER, WMP_F_1000T },
988 { 0, 0,
989 NULL,
990 0, 0 },
991 };
992
993 #ifdef WM_EVENT_COUNTERS
994 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
995 #endif /* WM_EVENT_COUNTERS */
996
997 #if 0 /* Not currently used */
998 static inline uint32_t
999 wm_io_read(struct wm_softc *sc, int reg)
1000 {
1001
1002 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1003 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1004 }
1005 #endif
1006
1007 static inline void
1008 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1009 {
1010
1011 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1012 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1013 }
1014
1015 static inline void
1016 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1017 uint32_t data)
1018 {
1019 uint32_t regval;
1020 int i;
1021
1022 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1023
1024 CSR_WRITE(sc, reg, regval);
1025
1026 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1027 delay(5);
1028 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1029 break;
1030 }
1031 if (i == SCTL_CTL_POLL_TIMEOUT) {
1032 aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
1033 device_xname(sc->sc_dev), reg);
1034 }
1035 }
1036
1037 static inline void
1038 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1039 {
1040 wa->wa_low = htole32(v & 0xffffffffU);
1041 if (sizeof(bus_addr_t) == 8)
1042 wa->wa_high = htole32((uint64_t) v >> 32);
1043 else
1044 wa->wa_high = 0;
1045 }
1046
1047 static void
1048 wm_set_spiaddrbits(struct wm_softc *sc)
1049 {
1050 uint32_t reg;
1051
1052 sc->sc_flags |= WM_F_EEPROM_SPI;
1053 reg = CSR_READ(sc, WMREG_EECD);
1054 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1055 }
1056
1057 static const struct wm_product *
1058 wm_lookup(const struct pci_attach_args *pa)
1059 {
1060 const struct wm_product *wmp;
1061
1062 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1063 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1064 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1065 return wmp;
1066 }
1067 return NULL;
1068 }
1069
1070 static int
1071 wm_match(device_t parent, cfdata_t cf, void *aux)
1072 {
1073 struct pci_attach_args *pa = aux;
1074
1075 if (wm_lookup(pa) != NULL)
1076 return 1;
1077
1078 return 0;
1079 }
1080
1081 static void
1082 wm_attach(device_t parent, device_t self, void *aux)
1083 {
1084 struct wm_softc *sc = device_private(self);
1085 struct pci_attach_args *pa = aux;
1086 prop_dictionary_t dict;
1087 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1088 pci_chipset_tag_t pc = pa->pa_pc;
1089 pci_intr_handle_t ih;
1090 const char *intrstr = NULL;
1091 const char *eetype, *xname;
1092 bus_space_tag_t memt;
1093 bus_space_handle_t memh;
1094 bus_size_t memsize;
1095 int memh_valid;
1096 int i, error;
1097 const struct wm_product *wmp;
1098 prop_data_t ea;
1099 prop_number_t pn;
1100 uint8_t enaddr[ETHER_ADDR_LEN];
1101 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin, io3;
1102 pcireg_t preg, memtype;
1103 uint16_t eeprom_data, apme_mask;
1104 uint32_t reg;
1105
1106 sc->sc_dev = self;
1107 callout_init(&sc->sc_tick_ch, 0);
1108
1109 sc->sc_wmp = wmp = wm_lookup(pa);
1110 if (wmp == NULL) {
1111 printf("\n");
1112 panic("wm_attach: impossible");
1113 }
1114
1115 sc->sc_pc = pa->pa_pc;
1116 sc->sc_pcitag = pa->pa_tag;
1117
1118 if (pci_dma64_available(pa))
1119 sc->sc_dmat = pa->pa_dmat64;
1120 else
1121 sc->sc_dmat = pa->pa_dmat;
1122
1123 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1124 aprint_naive(": Ethernet controller\n");
1125 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, sc->sc_rev);
1126
1127 sc->sc_type = wmp->wmp_type;
1128 if (sc->sc_type < WM_T_82543) {
1129 if (sc->sc_rev < 2) {
1130 aprint_error_dev(sc->sc_dev,
1131 "i82542 must be at least rev. 2\n");
1132 return;
1133 }
1134 if (sc->sc_rev < 3)
1135 sc->sc_type = WM_T_82542_2_0;
1136 }
1137
1138 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1139 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER))
1140 sc->sc_flags |= WM_F_NEWQUEUE;
1141
1142 /* Set device properties (mactype) */
1143 dict = device_properties(sc->sc_dev);
1144 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1145
1146 /*
1147 * Map the device. All devices support memory-mapped acccess,
1148 * and it is really required for normal operation.
1149 */
1150 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1151 switch (memtype) {
1152 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1153 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1154 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1155 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1156 break;
1157 default:
1158 memh_valid = 0;
1159 break;
1160 }
1161
1162 if (memh_valid) {
1163 sc->sc_st = memt;
1164 sc->sc_sh = memh;
1165 sc->sc_ss = memsize;
1166 } else {
1167 aprint_error_dev(sc->sc_dev,
1168 "unable to map device registers\n");
1169 return;
1170 }
1171
1172 wm_get_wakeup(sc);
1173
1174 /*
1175 * In addition, i82544 and later support I/O mapped indirect
1176 * register access. It is not desirable (nor supported in
1177 * this driver) to use it for normal operation, though it is
1178 * required to work around bugs in some chip versions.
1179 */
1180 if (sc->sc_type >= WM_T_82544) {
1181 /* First we have to find the I/O BAR. */
1182 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1183 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1184 PCI_MAPREG_TYPE_IO)
1185 break;
1186 }
1187 if (i == PCI_MAPREG_END)
1188 aprint_error_dev(sc->sc_dev,
1189 "WARNING: unable to find I/O BAR\n");
1190 else {
1191 /*
1192 * The i8254x doesn't apparently respond when the
1193 * I/O BAR is 0, which looks somewhat like it's not
1194 * been configured.
1195 */
1196 preg = pci_conf_read(pc, pa->pa_tag, i);
1197 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1198 aprint_error_dev(sc->sc_dev,
1199 "WARNING: I/O BAR at zero.\n");
1200 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1201 0, &sc->sc_iot, &sc->sc_ioh,
1202 NULL, NULL) == 0) {
1203 sc->sc_flags |= WM_F_IOH_VALID;
1204 } else {
1205 aprint_error_dev(sc->sc_dev,
1206 "WARNING: unable to map I/O space\n");
1207 }
1208 }
1209
1210 }
1211
1212 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1213 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1214 preg |= PCI_COMMAND_MASTER_ENABLE;
1215 if (sc->sc_type < WM_T_82542_2_1)
1216 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1217 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1218
1219 /* power up chip */
1220 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1221 NULL)) && error != EOPNOTSUPP) {
1222 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1223 return;
1224 }
1225
1226 /*
1227 * Map and establish our interrupt.
1228 */
1229 if (pci_intr_map(pa, &ih)) {
1230 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1231 return;
1232 }
1233 intrstr = pci_intr_string(pc, ih);
1234 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1235 if (sc->sc_ih == NULL) {
1236 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1237 if (intrstr != NULL)
1238 aprint_error(" at %s", intrstr);
1239 aprint_error("\n");
1240 return;
1241 }
1242 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1243
1244 /*
1245 * Check the function ID (unit number of the chip).
1246 */
1247 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1248 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1249 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
1250 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1251 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1252 else
1253 sc->sc_funcid = 0;
1254
1255 /*
1256 * Determine a few things about the bus we're connected to.
1257 */
1258 if (sc->sc_type < WM_T_82543) {
1259 /* We don't really know the bus characteristics here. */
1260 sc->sc_bus_speed = 33;
1261 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1262 /*
1263 * CSA (Communication Streaming Architecture) is about as fast
1264 * a 32-bit 66MHz PCI Bus.
1265 */
1266 sc->sc_flags |= WM_F_CSA;
1267 sc->sc_bus_speed = 66;
1268 aprint_verbose_dev(sc->sc_dev,
1269 "Communication Streaming Architecture\n");
1270 if (sc->sc_type == WM_T_82547) {
1271 callout_init(&sc->sc_txfifo_ch, 0);
1272 callout_setfunc(&sc->sc_txfifo_ch,
1273 wm_82547_txfifo_stall, sc);
1274 aprint_verbose_dev(sc->sc_dev,
1275 "using 82547 Tx FIFO stall work-around\n");
1276 }
1277 } else if (sc->sc_type >= WM_T_82571) {
1278 sc->sc_flags |= WM_F_PCIE;
1279 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1280 && (sc->sc_type != WM_T_ICH10)
1281 && (sc->sc_type != WM_T_PCH)) {
1282 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1283 /* ICH* and PCH have no PCIe capability registers */
1284 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1285 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1286 NULL) == 0)
1287 aprint_error_dev(sc->sc_dev,
1288 "unable to find PCIe capability\n");
1289 }
1290 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1291 } else {
1292 reg = CSR_READ(sc, WMREG_STATUS);
1293 if (reg & STATUS_BUS64)
1294 sc->sc_flags |= WM_F_BUS64;
1295 if ((reg & STATUS_PCIX_MODE) != 0) {
1296 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1297
1298 sc->sc_flags |= WM_F_PCIX;
1299 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1300 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1301 aprint_error_dev(sc->sc_dev,
1302 "unable to find PCIX capability\n");
1303 else if (sc->sc_type != WM_T_82545_3 &&
1304 sc->sc_type != WM_T_82546_3) {
1305 /*
1306 * Work around a problem caused by the BIOS
1307 * setting the max memory read byte count
1308 * incorrectly.
1309 */
1310 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1311 sc->sc_pcixe_capoff + PCI_PCIX_CMD);
1312 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1313 sc->sc_pcixe_capoff + PCI_PCIX_STATUS);
1314
1315 bytecnt =
1316 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1317 PCI_PCIX_CMD_BYTECNT_SHIFT;
1318 maxb =
1319 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1320 PCI_PCIX_STATUS_MAXB_SHIFT;
1321 if (bytecnt > maxb) {
1322 aprint_verbose_dev(sc->sc_dev,
1323 "resetting PCI-X MMRBC: %d -> %d\n",
1324 512 << bytecnt, 512 << maxb);
1325 pcix_cmd = (pcix_cmd &
1326 ~PCI_PCIX_CMD_BYTECNT_MASK) |
1327 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1328 pci_conf_write(pa->pa_pc, pa->pa_tag,
1329 sc->sc_pcixe_capoff + PCI_PCIX_CMD,
1330 pcix_cmd);
1331 }
1332 }
1333 }
1334 /*
1335 * The quad port adapter is special; it has a PCIX-PCIX
1336 * bridge on the board, and can run the secondary bus at
1337 * a higher speed.
1338 */
1339 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1340 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1341 : 66;
1342 } else if (sc->sc_flags & WM_F_PCIX) {
1343 switch (reg & STATUS_PCIXSPD_MASK) {
1344 case STATUS_PCIXSPD_50_66:
1345 sc->sc_bus_speed = 66;
1346 break;
1347 case STATUS_PCIXSPD_66_100:
1348 sc->sc_bus_speed = 100;
1349 break;
1350 case STATUS_PCIXSPD_100_133:
1351 sc->sc_bus_speed = 133;
1352 break;
1353 default:
1354 aprint_error_dev(sc->sc_dev,
1355 "unknown PCIXSPD %d; assuming 66MHz\n",
1356 reg & STATUS_PCIXSPD_MASK);
1357 sc->sc_bus_speed = 66;
1358 break;
1359 }
1360 } else
1361 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1362 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1363 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1364 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1365 }
1366
1367 /*
1368 * Allocate the control data structures, and create and load the
1369 * DMA map for it.
1370 *
1371 * NOTE: All Tx descriptors must be in the same 4G segment of
1372 * memory. So must Rx descriptors. We simplify by allocating
1373 * both sets within the same 4G segment.
1374 */
1375 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1376 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1377 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1378 sizeof(struct wm_control_data_82542) :
1379 sizeof(struct wm_control_data_82544);
1380 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1381 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1382 &sc->sc_cd_rseg, 0)) != 0) {
1383 aprint_error_dev(sc->sc_dev,
1384 "unable to allocate control data, error = %d\n",
1385 error);
1386 goto fail_0;
1387 }
1388
1389 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1390 sc->sc_cd_rseg, sc->sc_cd_size,
1391 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1392 aprint_error_dev(sc->sc_dev,
1393 "unable to map control data, error = %d\n", error);
1394 goto fail_1;
1395 }
1396
1397 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1398 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1399 aprint_error_dev(sc->sc_dev,
1400 "unable to create control data DMA map, error = %d\n",
1401 error);
1402 goto fail_2;
1403 }
1404
1405 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1406 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1407 aprint_error_dev(sc->sc_dev,
1408 "unable to load control data DMA map, error = %d\n",
1409 error);
1410 goto fail_3;
1411 }
1412
1413 /*
1414 * Create the transmit buffer DMA maps.
1415 */
1416 WM_TXQUEUELEN(sc) =
1417 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1418 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1419 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1420 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1421 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1422 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1423 aprint_error_dev(sc->sc_dev,
1424 "unable to create Tx DMA map %d, error = %d\n",
1425 i, error);
1426 goto fail_4;
1427 }
1428 }
1429
1430 /*
1431 * Create the receive buffer DMA maps.
1432 */
1433 for (i = 0; i < WM_NRXDESC; i++) {
1434 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1435 MCLBYTES, 0, 0,
1436 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1437 aprint_error_dev(sc->sc_dev,
1438 "unable to create Rx DMA map %d error = %d\n",
1439 i, error);
1440 goto fail_5;
1441 }
1442 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1443 }
1444
1445 /* clear interesting stat counters */
1446 CSR_READ(sc, WMREG_COLC);
1447 CSR_READ(sc, WMREG_RXERRC);
1448
1449 /*
1450 * Reset the chip to a known state.
1451 */
1452 wm_reset(sc);
1453
1454 switch (sc->sc_type) {
1455 case WM_T_82571:
1456 case WM_T_82572:
1457 case WM_T_82573:
1458 case WM_T_82574:
1459 case WM_T_82583:
1460 case WM_T_80003:
1461 case WM_T_ICH8:
1462 case WM_T_ICH9:
1463 case WM_T_ICH10:
1464 case WM_T_PCH:
1465 if (wm_check_mng_mode(sc) != 0)
1466 wm_get_hw_control(sc);
1467 break;
1468 default:
1469 break;
1470 }
1471
1472 /*
1473 * Get some information about the EEPROM.
1474 */
1475 switch (sc->sc_type) {
1476 case WM_T_82542_2_0:
1477 case WM_T_82542_2_1:
1478 case WM_T_82543:
1479 case WM_T_82544:
1480 /* Microwire */
1481 sc->sc_ee_addrbits = 6;
1482 break;
1483 case WM_T_82540:
1484 case WM_T_82545:
1485 case WM_T_82545_3:
1486 case WM_T_82546:
1487 case WM_T_82546_3:
1488 /* Microwire */
1489 reg = CSR_READ(sc, WMREG_EECD);
1490 if (reg & EECD_EE_SIZE)
1491 sc->sc_ee_addrbits = 8;
1492 else
1493 sc->sc_ee_addrbits = 6;
1494 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1495 break;
1496 case WM_T_82541:
1497 case WM_T_82541_2:
1498 case WM_T_82547:
1499 case WM_T_82547_2:
1500 reg = CSR_READ(sc, WMREG_EECD);
1501 if (reg & EECD_EE_TYPE) {
1502 /* SPI */
1503 wm_set_spiaddrbits(sc);
1504 } else
1505 /* Microwire */
1506 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1507 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1508 break;
1509 case WM_T_82571:
1510 case WM_T_82572:
1511 /* SPI */
1512 wm_set_spiaddrbits(sc);
1513 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1514 break;
1515 case WM_T_82573:
1516 case WM_T_82574:
1517 case WM_T_82583:
1518 if (wm_is_onboard_nvm_eeprom(sc) == 0)
1519 sc->sc_flags |= WM_F_EEPROM_FLASH;
1520 else {
1521 /* SPI */
1522 wm_set_spiaddrbits(sc);
1523 }
1524 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1525 break;
1526 case WM_T_82575:
1527 case WM_T_82576:
1528 case WM_T_82580:
1529 case WM_T_82580ER:
1530 case WM_T_80003:
1531 /* SPI */
1532 wm_set_spiaddrbits(sc);
1533 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1534 break;
1535 case WM_T_ICH8:
1536 case WM_T_ICH9:
1537 case WM_T_ICH10:
1538 case WM_T_PCH:
1539 /* FLASH */
1540 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1541 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1542 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1543 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1544 aprint_error_dev(sc->sc_dev,
1545 "can't map FLASH registers\n");
1546 return;
1547 }
1548 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1549 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1550 ICH_FLASH_SECTOR_SIZE;
1551 sc->sc_ich8_flash_bank_size =
1552 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1553 sc->sc_ich8_flash_bank_size -=
1554 (reg & ICH_GFPREG_BASE_MASK);
1555 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1556 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1557 break;
1558 default:
1559 break;
1560 }
1561
1562 /*
1563 * Defer printing the EEPROM type until after verifying the checksum
1564 * This allows the EEPROM type to be printed correctly in the case
1565 * that no EEPROM is attached.
1566 */
1567 /*
1568 * Validate the EEPROM checksum. If the checksum fails, flag
1569 * this for later, so we can fail future reads from the EEPROM.
1570 */
1571 if (wm_validate_eeprom_checksum(sc)) {
1572 /*
1573 * Read twice again because some PCI-e parts fail the
1574 * first check due to the link being in sleep state.
1575 */
1576 if (wm_validate_eeprom_checksum(sc))
1577 sc->sc_flags |= WM_F_EEPROM_INVALID;
1578 }
1579
1580 /* Set device properties (macflags) */
1581 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1582
1583 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1584 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1585 else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1586 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1587 } else {
1588 if (sc->sc_flags & WM_F_EEPROM_SPI)
1589 eetype = "SPI";
1590 else
1591 eetype = "MicroWire";
1592 aprint_verbose_dev(sc->sc_dev,
1593 "%u word (%d address bits) %s EEPROM\n",
1594 1U << sc->sc_ee_addrbits,
1595 sc->sc_ee_addrbits, eetype);
1596 }
1597
1598 /*
1599 * Read the Ethernet address from the EEPROM, if not first found
1600 * in device properties.
1601 */
1602 ea = prop_dictionary_get(dict, "mac-address");
1603 if (ea != NULL) {
1604 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1605 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1606 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1607 } else {
1608 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1609 sizeof(myea) / sizeof(myea[0]), myea)) {
1610 aprint_error_dev(sc->sc_dev,
1611 "unable to read Ethernet address\n");
1612 return;
1613 }
1614 enaddr[0] = myea[0] & 0xff;
1615 enaddr[1] = myea[0] >> 8;
1616 enaddr[2] = myea[1] & 0xff;
1617 enaddr[3] = myea[1] >> 8;
1618 enaddr[4] = myea[2] & 0xff;
1619 enaddr[5] = myea[2] >> 8;
1620 }
1621
1622 /*
1623 * Toggle the LSB of the MAC address on the second port
1624 * of the dual port controller.
1625 */
1626 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1627 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1628 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
1629 if (sc->sc_funcid == 1)
1630 enaddr[5] ^= 1;
1631 }
1632
1633 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1634 ether_sprintf(enaddr));
1635
1636 /*
1637 * Read the config info from the EEPROM, and set up various
1638 * bits in the control registers based on their contents.
1639 */
1640 pn = prop_dictionary_get(dict, "i82543-cfg1");
1641 if (pn != NULL) {
1642 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1643 cfg1 = (uint16_t) prop_number_integer_value(pn);
1644 } else {
1645 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1646 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1647 return;
1648 }
1649 }
1650
1651 pn = prop_dictionary_get(dict, "i82543-cfg2");
1652 if (pn != NULL) {
1653 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1654 cfg2 = (uint16_t) prop_number_integer_value(pn);
1655 } else {
1656 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1657 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1658 return;
1659 }
1660 }
1661
1662 /* check for WM_F_WOL */
1663 switch (sc->sc_type) {
1664 case WM_T_82542_2_0:
1665 case WM_T_82542_2_1:
1666 case WM_T_82543:
1667 /* dummy? */
1668 eeprom_data = 0;
1669 apme_mask = EEPROM_CFG3_APME;
1670 break;
1671 case WM_T_82544:
1672 apme_mask = EEPROM_CFG2_82544_APM_EN;
1673 eeprom_data = cfg2;
1674 break;
1675 case WM_T_82546:
1676 case WM_T_82546_3:
1677 case WM_T_82571:
1678 case WM_T_82572:
1679 case WM_T_82573:
1680 case WM_T_82574:
1681 case WM_T_82583:
1682 case WM_T_80003:
1683 default:
1684 apme_mask = EEPROM_CFG3_APME;
1685 wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1686 : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1687 break;
1688 case WM_T_82575:
1689 case WM_T_82576:
1690 case WM_T_82580:
1691 case WM_T_82580ER:
1692 case WM_T_ICH8:
1693 case WM_T_ICH9:
1694 case WM_T_ICH10:
1695 case WM_T_PCH:
1696 apme_mask = WUC_APME;
1697 eeprom_data = CSR_READ(sc, WMREG_WUC);
1698 break;
1699 }
1700
1701 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1702 if ((eeprom_data & apme_mask) != 0)
1703 sc->sc_flags |= WM_F_WOL;
1704 #ifdef WM_DEBUG
1705 if ((sc->sc_flags & WM_F_WOL) != 0)
1706 printf("WOL\n");
1707 #endif
1708
1709 /*
1710 * XXX need special handling for some multiple port cards
1711 * to disable a paticular port.
1712 */
1713
1714 if (sc->sc_type >= WM_T_82544) {
1715 pn = prop_dictionary_get(dict, "i82543-swdpin");
1716 if (pn != NULL) {
1717 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1718 swdpin = (uint16_t) prop_number_integer_value(pn);
1719 } else {
1720 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1721 aprint_error_dev(sc->sc_dev,
1722 "unable to read SWDPIN\n");
1723 return;
1724 }
1725 }
1726 }
1727
1728 if (cfg1 & EEPROM_CFG1_ILOS)
1729 sc->sc_ctrl |= CTRL_ILOS;
1730 if (sc->sc_type >= WM_T_82544) {
1731 sc->sc_ctrl |=
1732 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1733 CTRL_SWDPIO_SHIFT;
1734 sc->sc_ctrl |=
1735 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1736 CTRL_SWDPINS_SHIFT;
1737 } else {
1738 sc->sc_ctrl |=
1739 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1740 CTRL_SWDPIO_SHIFT;
1741 }
1742
1743 #if 0
1744 if (sc->sc_type >= WM_T_82544) {
1745 if (cfg1 & EEPROM_CFG1_IPS0)
1746 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1747 if (cfg1 & EEPROM_CFG1_IPS1)
1748 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1749 sc->sc_ctrl_ext |=
1750 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1751 CTRL_EXT_SWDPIO_SHIFT;
1752 sc->sc_ctrl_ext |=
1753 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1754 CTRL_EXT_SWDPINS_SHIFT;
1755 } else {
1756 sc->sc_ctrl_ext |=
1757 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1758 CTRL_EXT_SWDPIO_SHIFT;
1759 }
1760 #endif
1761
1762 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1763 #if 0
1764 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1765 #endif
1766
1767 /*
1768 * Set up some register offsets that are different between
1769 * the i82542 and the i82543 and later chips.
1770 */
1771 if (sc->sc_type < WM_T_82543) {
1772 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1773 sc->sc_tdt_reg = WMREG_OLD_TDT;
1774 } else {
1775 sc->sc_rdt_reg = WMREG_RDT;
1776 sc->sc_tdt_reg = WMREG_TDT;
1777 }
1778
1779 if (sc->sc_type == WM_T_PCH) {
1780 uint16_t val;
1781
1782 /* Save the NVM K1 bit setting */
1783 wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1784
1785 if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1786 sc->sc_nvm_k1_enabled = 1;
1787 else
1788 sc->sc_nvm_k1_enabled = 0;
1789 }
1790
1791 /*
1792 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1793 * media structures accordingly.
1794 */
1795 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1796 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1797 || sc->sc_type == WM_T_82573
1798 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1799 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1800 wm_gmii_mediainit(sc, wmp->wmp_product);
1801 } else if (sc->sc_type < WM_T_82543 ||
1802 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1803 if (wmp->wmp_flags & WMP_F_1000T)
1804 aprint_error_dev(sc->sc_dev,
1805 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1806 wm_tbi_mediainit(sc);
1807 } else {
1808 switch (sc->sc_type) {
1809 case WM_T_82575:
1810 case WM_T_82576:
1811 case WM_T_82580:
1812 case WM_T_82580ER:
1813 reg = CSR_READ(sc, WMREG_CTRL_EXT);
1814 switch (reg & CTRL_EXT_LINK_MODE_MASK) {
1815 case CTRL_EXT_LINK_MODE_SGMII:
1816 aprint_verbose_dev(sc->sc_dev, "SGMII\n");
1817 sc->sc_flags |= WM_F_SGMII;
1818 CSR_WRITE(sc, WMREG_CTRL_EXT,
1819 reg | CTRL_EXT_I2C_ENA);
1820 wm_gmii_mediainit(sc, wmp->wmp_product);
1821 break;
1822 case CTRL_EXT_LINK_MODE_1000KX:
1823 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
1824 aprint_verbose_dev(sc->sc_dev, "1000KX or SERDES\n");
1825 CSR_WRITE(sc, WMREG_CTRL_EXT,
1826 reg | CTRL_EXT_I2C_ENA);
1827 panic("not supported yet\n");
1828 break;
1829 case CTRL_EXT_LINK_MODE_GMII:
1830 default:
1831 CSR_WRITE(sc, WMREG_CTRL_EXT,
1832 reg & ~CTRL_EXT_I2C_ENA);
1833 wm_gmii_mediainit(sc, wmp->wmp_product);
1834 break;
1835 }
1836 break;
1837 default:
1838 if (wmp->wmp_flags & WMP_F_1000X)
1839 aprint_error_dev(sc->sc_dev,
1840 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1841 wm_gmii_mediainit(sc, wmp->wmp_product);
1842 }
1843 }
1844
1845 ifp = &sc->sc_ethercom.ec_if;
1846 xname = device_xname(sc->sc_dev);
1847 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1848 ifp->if_softc = sc;
1849 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1850 ifp->if_ioctl = wm_ioctl;
1851 ifp->if_start = wm_start;
1852 ifp->if_watchdog = wm_watchdog;
1853 ifp->if_init = wm_init;
1854 ifp->if_stop = wm_stop;
1855 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1856 IFQ_SET_READY(&ifp->if_snd);
1857
1858 /* Check for jumbo frame */
1859 switch (sc->sc_type) {
1860 case WM_T_82573:
1861 /* XXX limited to 9234 if ASPM is disabled */
1862 wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1863 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1864 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1865 break;
1866 case WM_T_82571:
1867 case WM_T_82572:
1868 case WM_T_82574:
1869 case WM_T_82575:
1870 case WM_T_82576:
1871 case WM_T_82580:
1872 case WM_T_82580ER:
1873 case WM_T_80003:
1874 case WM_T_ICH9:
1875 case WM_T_ICH10:
1876 /* XXX limited to 9234 */
1877 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1878 break;
1879 case WM_T_PCH:
1880 /* XXX limited to 4096 */
1881 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1882 break;
1883 case WM_T_82542_2_0:
1884 case WM_T_82542_2_1:
1885 case WM_T_82583:
1886 case WM_T_ICH8:
1887 /* No support for jumbo frame */
1888 break;
1889 default:
1890 /* ETHER_MAX_LEN_JUMBO */
1891 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1892 break;
1893 }
1894
1895 /*
1896 * If we're a i82543 or greater, we can support VLANs.
1897 */
1898 if (sc->sc_type >= WM_T_82543)
1899 sc->sc_ethercom.ec_capabilities |=
1900 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1901
1902 /*
1903 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1904 * on i82543 and later.
1905 */
1906 if (sc->sc_type >= WM_T_82543) {
1907 ifp->if_capabilities |=
1908 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1909 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1910 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1911 IFCAP_CSUM_TCPv6_Tx |
1912 IFCAP_CSUM_UDPv6_Tx;
1913 }
1914
1915 /*
1916 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1917 *
1918 * 82541GI (8086:1076) ... no
1919 * 82572EI (8086:10b9) ... yes
1920 */
1921 if (sc->sc_type >= WM_T_82571) {
1922 ifp->if_capabilities |=
1923 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1924 }
1925
1926 /*
1927 * If we're a i82544 or greater (except i82547), we can do
1928 * TCP segmentation offload.
1929 */
1930 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1931 ifp->if_capabilities |= IFCAP_TSOv4;
1932 }
1933
1934 if (sc->sc_type >= WM_T_82571) {
1935 ifp->if_capabilities |= IFCAP_TSOv6;
1936 }
1937
1938 /*
1939 * Attach the interface.
1940 */
1941 if_attach(ifp);
1942 ether_ifattach(ifp, enaddr);
1943 #if NRND > 0
1944 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1945 #endif
1946
1947 #ifdef WM_EVENT_COUNTERS
1948 /* Attach event counters. */
1949 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1950 NULL, xname, "txsstall");
1951 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1952 NULL, xname, "txdstall");
1953 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1954 NULL, xname, "txfifo_stall");
1955 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1956 NULL, xname, "txdw");
1957 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1958 NULL, xname, "txqe");
1959 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1960 NULL, xname, "rxintr");
1961 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1962 NULL, xname, "linkintr");
1963
1964 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1965 NULL, xname, "rxipsum");
1966 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1967 NULL, xname, "rxtusum");
1968 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1969 NULL, xname, "txipsum");
1970 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1971 NULL, xname, "txtusum");
1972 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1973 NULL, xname, "txtusum6");
1974
1975 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1976 NULL, xname, "txtso");
1977 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1978 NULL, xname, "txtso6");
1979 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1980 NULL, xname, "txtsopain");
1981
1982 for (i = 0; i < WM_NTXSEGS; i++) {
1983 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1984 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1985 NULL, xname, wm_txseg_evcnt_names[i]);
1986 }
1987
1988 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1989 NULL, xname, "txdrop");
1990
1991 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1992 NULL, xname, "tu");
1993
1994 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1995 NULL, xname, "tx_xoff");
1996 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1997 NULL, xname, "tx_xon");
1998 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1999 NULL, xname, "rx_xoff");
2000 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2001 NULL, xname, "rx_xon");
2002 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2003 NULL, xname, "rx_macctl");
2004 #endif /* WM_EVENT_COUNTERS */
2005
2006 if (pmf_device_register(self, wm_suspend, wm_resume))
2007 pmf_class_network_register(self, ifp);
2008 else
2009 aprint_error_dev(self, "couldn't establish power handler\n");
2010
2011 return;
2012
2013 /*
2014 * Free any resources we've allocated during the failed attach
2015 * attempt. Do this in reverse order and fall through.
2016 */
2017 fail_5:
2018 for (i = 0; i < WM_NRXDESC; i++) {
2019 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2020 bus_dmamap_destroy(sc->sc_dmat,
2021 sc->sc_rxsoft[i].rxs_dmamap);
2022 }
2023 fail_4:
2024 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2025 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2026 bus_dmamap_destroy(sc->sc_dmat,
2027 sc->sc_txsoft[i].txs_dmamap);
2028 }
2029 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2030 fail_3:
2031 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2032 fail_2:
2033 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2034 sc->sc_cd_size);
2035 fail_1:
2036 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2037 fail_0:
2038 return;
2039 }
2040
2041 static int
2042 wm_detach(device_t self, int flags __unused)
2043 {
2044 struct wm_softc *sc = device_private(self);
2045 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2046 int i, s;
2047
2048 s = splnet();
2049 /* Stop the interface. Callouts are stopped in it. */
2050 wm_stop(ifp, 1);
2051 splx(s);
2052
2053 pmf_device_deregister(self);
2054
2055 /* Tell the firmware about the release */
2056 wm_release_manageability(sc);
2057
2058 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2059
2060 /* Delete all remaining media. */
2061 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2062
2063 ether_ifdetach(ifp);
2064 if_detach(ifp);
2065
2066
2067 /* Unload RX dmamaps and free mbufs */
2068 wm_rxdrain(sc);
2069
2070 /* Free dmamap. It's the same as the end of the wm_attach() function */
2071 for (i = 0; i < WM_NRXDESC; i++) {
2072 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2073 bus_dmamap_destroy(sc->sc_dmat,
2074 sc->sc_rxsoft[i].rxs_dmamap);
2075 }
2076 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2077 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2078 bus_dmamap_destroy(sc->sc_dmat,
2079 sc->sc_txsoft[i].txs_dmamap);
2080 }
2081 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2082 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2083 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2084 sc->sc_cd_size);
2085 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2086
2087 /* Disestablish the interrupt handler */
2088 if (sc->sc_ih != NULL) {
2089 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2090 sc->sc_ih = NULL;
2091 }
2092
2093 /* Unmap the register */
2094 if (sc->sc_ss) {
2095 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2096 sc->sc_ss = 0;
2097 }
2098
2099 wm_release_hw_control(sc);
2100
2101 return 0;
2102 }
2103
2104 /*
2105 * wm_tx_offload:
2106 *
2107 * Set up TCP/IP checksumming parameters for the
2108 * specified packet.
2109 */
2110 static int
2111 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
2112 uint8_t *fieldsp)
2113 {
2114 struct mbuf *m0 = txs->txs_mbuf;
2115 struct livengood_tcpip_ctxdesc *t;
2116 uint32_t ipcs, tucs, cmd, cmdlen, seg;
2117 uint32_t ipcse;
2118 struct ether_header *eh;
2119 int offset, iphl;
2120 uint8_t fields;
2121
2122 /*
2123 * XXX It would be nice if the mbuf pkthdr had offset
2124 * fields for the protocol headers.
2125 */
2126
2127 eh = mtod(m0, struct ether_header *);
2128 switch (htons(eh->ether_type)) {
2129 case ETHERTYPE_IP:
2130 case ETHERTYPE_IPV6:
2131 offset = ETHER_HDR_LEN;
2132 break;
2133
2134 case ETHERTYPE_VLAN:
2135 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2136 break;
2137
2138 default:
2139 /*
2140 * Don't support this protocol or encapsulation.
2141 */
2142 *fieldsp = 0;
2143 *cmdp = 0;
2144 return 0;
2145 }
2146
2147 if ((m0->m_pkthdr.csum_flags &
2148 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
2149 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2150 } else {
2151 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2152 }
2153 ipcse = offset + iphl - 1;
2154
2155 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
2156 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
2157 seg = 0;
2158 fields = 0;
2159
2160 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2161 int hlen = offset + iphl;
2162 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2163
2164 if (__predict_false(m0->m_len <
2165 (hlen + sizeof(struct tcphdr)))) {
2166 /*
2167 * TCP/IP headers are not in the first mbuf; we need
2168 * to do this the slow and painful way. Let's just
2169 * hope this doesn't happen very often.
2170 */
2171 struct tcphdr th;
2172
2173 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2174
2175 m_copydata(m0, hlen, sizeof(th), &th);
2176 if (v4) {
2177 struct ip ip;
2178
2179 m_copydata(m0, offset, sizeof(ip), &ip);
2180 ip.ip_len = 0;
2181 m_copyback(m0,
2182 offset + offsetof(struct ip, ip_len),
2183 sizeof(ip.ip_len), &ip.ip_len);
2184 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2185 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2186 } else {
2187 struct ip6_hdr ip6;
2188
2189 m_copydata(m0, offset, sizeof(ip6), &ip6);
2190 ip6.ip6_plen = 0;
2191 m_copyback(m0,
2192 offset + offsetof(struct ip6_hdr, ip6_plen),
2193 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2194 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2195 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2196 }
2197 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2198 sizeof(th.th_sum), &th.th_sum);
2199
2200 hlen += th.th_off << 2;
2201 } else {
2202 /*
2203 * TCP/IP headers are in the first mbuf; we can do
2204 * this the easy way.
2205 */
2206 struct tcphdr *th;
2207
2208 if (v4) {
2209 struct ip *ip =
2210 (void *)(mtod(m0, char *) + offset);
2211 th = (void *)(mtod(m0, char *) + hlen);
2212
2213 ip->ip_len = 0;
2214 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2215 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2216 } else {
2217 struct ip6_hdr *ip6 =
2218 (void *)(mtod(m0, char *) + offset);
2219 th = (void *)(mtod(m0, char *) + hlen);
2220
2221 ip6->ip6_plen = 0;
2222 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2223 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2224 }
2225 hlen += th->th_off << 2;
2226 }
2227
2228 if (v4) {
2229 WM_EVCNT_INCR(&sc->sc_ev_txtso);
2230 cmdlen |= WTX_TCPIP_CMD_IP;
2231 } else {
2232 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2233 ipcse = 0;
2234 }
2235 cmd |= WTX_TCPIP_CMD_TSE;
2236 cmdlen |= WTX_TCPIP_CMD_TSE |
2237 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
2238 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
2239 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
2240 }
2241
2242 /*
2243 * NOTE: Even if we're not using the IP or TCP/UDP checksum
2244 * offload feature, if we load the context descriptor, we
2245 * MUST provide valid values for IPCSS and TUCSS fields.
2246 */
2247
2248 ipcs = WTX_TCPIP_IPCSS(offset) |
2249 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
2250 WTX_TCPIP_IPCSE(ipcse);
2251 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
2252 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
2253 fields |= WTX_IXSM;
2254 }
2255
2256 offset += iphl;
2257
2258 if (m0->m_pkthdr.csum_flags &
2259 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
2260 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2261 fields |= WTX_TXSM;
2262 tucs = WTX_TCPIP_TUCSS(offset) |
2263 WTX_TCPIP_TUCSO(offset +
2264 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
2265 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2266 } else if ((m0->m_pkthdr.csum_flags &
2267 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
2268 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2269 fields |= WTX_TXSM;
2270 tucs = WTX_TCPIP_TUCSS(offset) |
2271 WTX_TCPIP_TUCSO(offset +
2272 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
2273 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2274 } else {
2275 /* Just initialize it to a valid TCP context. */
2276 tucs = WTX_TCPIP_TUCSS(offset) |
2277 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
2278 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2279 }
2280
2281 /* Fill in the context descriptor. */
2282 t = (struct livengood_tcpip_ctxdesc *)
2283 &sc->sc_txdescs[sc->sc_txnext];
2284 t->tcpip_ipcs = htole32(ipcs);
2285 t->tcpip_tucs = htole32(tucs);
2286 t->tcpip_cmdlen = htole32(cmdlen);
2287 t->tcpip_seg = htole32(seg);
2288 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2289
2290 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2291 txs->txs_ndesc++;
2292
2293 *cmdp = cmd;
2294 *fieldsp = fields;
2295
2296 return 0;
2297 }
2298
2299 static void
2300 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2301 {
2302 struct mbuf *m;
2303 int i;
2304
2305 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2306 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2307 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2308 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2309 m->m_data, m->m_len, m->m_flags);
2310 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2311 i, i == 1 ? "" : "s");
2312 }
2313
2314 /*
2315 * wm_82547_txfifo_stall:
2316 *
2317 * Callout used to wait for the 82547 Tx FIFO to drain,
2318 * reset the FIFO pointers, and restart packet transmission.
2319 */
2320 static void
2321 wm_82547_txfifo_stall(void *arg)
2322 {
2323 struct wm_softc *sc = arg;
2324 int s;
2325
2326 s = splnet();
2327
2328 if (sc->sc_txfifo_stall) {
2329 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2330 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2331 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2332 /*
2333 * Packets have drained. Stop transmitter, reset
2334 * FIFO pointers, restart transmitter, and kick
2335 * the packet queue.
2336 */
2337 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2338 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2339 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2340 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2341 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2342 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2343 CSR_WRITE(sc, WMREG_TCTL, tctl);
2344 CSR_WRITE_FLUSH(sc);
2345
2346 sc->sc_txfifo_head = 0;
2347 sc->sc_txfifo_stall = 0;
2348 wm_start(&sc->sc_ethercom.ec_if);
2349 } else {
2350 /*
2351 * Still waiting for packets to drain; try again in
2352 * another tick.
2353 */
2354 callout_schedule(&sc->sc_txfifo_ch, 1);
2355 }
2356 }
2357
2358 splx(s);
2359 }
2360
2361 /*
2362 * wm_82547_txfifo_bugchk:
2363 *
2364 * Check for bug condition in the 82547 Tx FIFO. We need to
2365 * prevent enqueueing a packet that would wrap around the end
2366 * if the Tx FIFO ring buffer, otherwise the chip will croak.
2367 *
2368 * We do this by checking the amount of space before the end
2369 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
2370 * the Tx FIFO, wait for all remaining packets to drain, reset
2371 * the internal FIFO pointers to the beginning, and restart
2372 * transmission on the interface.
2373 */
2374 #define WM_FIFO_HDR 0x10
2375 #define WM_82547_PAD_LEN 0x3e0
2376 static int
2377 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2378 {
2379 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2380 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2381
2382 /* Just return if already stalled. */
2383 if (sc->sc_txfifo_stall)
2384 return 1;
2385
2386 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2387 /* Stall only occurs in half-duplex mode. */
2388 goto send_packet;
2389 }
2390
2391 if (len >= WM_82547_PAD_LEN + space) {
2392 sc->sc_txfifo_stall = 1;
2393 callout_schedule(&sc->sc_txfifo_ch, 1);
2394 return 1;
2395 }
2396
2397 send_packet:
2398 sc->sc_txfifo_head += len;
2399 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2400 sc->sc_txfifo_head -= sc->sc_txfifo_size;
2401
2402 return 0;
2403 }
2404
2405 /*
2406 * wm_start: [ifnet interface function]
2407 *
2408 * Start packet transmission on the interface.
2409 */
2410 static void
2411 wm_start(struct ifnet *ifp)
2412 {
2413 struct wm_softc *sc = ifp->if_softc;
2414 struct mbuf *m0;
2415 struct m_tag *mtag;
2416 struct wm_txsoft *txs;
2417 bus_dmamap_t dmamap;
2418 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2419 bus_addr_t curaddr;
2420 bus_size_t seglen, curlen;
2421 uint32_t cksumcmd;
2422 uint8_t cksumfields;
2423
2424 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2425 return;
2426
2427 /*
2428 * Remember the previous number of free descriptors.
2429 */
2430 ofree = sc->sc_txfree;
2431
2432 /*
2433 * Loop through the send queue, setting up transmit descriptors
2434 * until we drain the queue, or use up all available transmit
2435 * descriptors.
2436 */
2437 for (;;) {
2438 /* Grab a packet off the queue. */
2439 IFQ_POLL(&ifp->if_snd, m0);
2440 if (m0 == NULL)
2441 break;
2442
2443 DPRINTF(WM_DEBUG_TX,
2444 ("%s: TX: have packet to transmit: %p\n",
2445 device_xname(sc->sc_dev), m0));
2446
2447 /* Get a work queue entry. */
2448 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2449 wm_txintr(sc);
2450 if (sc->sc_txsfree == 0) {
2451 DPRINTF(WM_DEBUG_TX,
2452 ("%s: TX: no free job descriptors\n",
2453 device_xname(sc->sc_dev)));
2454 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2455 break;
2456 }
2457 }
2458
2459 txs = &sc->sc_txsoft[sc->sc_txsnext];
2460 dmamap = txs->txs_dmamap;
2461
2462 use_tso = (m0->m_pkthdr.csum_flags &
2463 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2464
2465 /*
2466 * So says the Linux driver:
2467 * The controller does a simple calculation to make sure
2468 * there is enough room in the FIFO before initiating the
2469 * DMA for each buffer. The calc is:
2470 * 4 = ceil(buffer len / MSS)
2471 * To make sure we don't overrun the FIFO, adjust the max
2472 * buffer len if the MSS drops.
2473 */
2474 dmamap->dm_maxsegsz =
2475 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2476 ? m0->m_pkthdr.segsz << 2
2477 : WTX_MAX_LEN;
2478
2479 /*
2480 * Load the DMA map. If this fails, the packet either
2481 * didn't fit in the allotted number of segments, or we
2482 * were short on resources. For the too-many-segments
2483 * case, we simply report an error and drop the packet,
2484 * since we can't sanely copy a jumbo packet to a single
2485 * buffer.
2486 */
2487 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2488 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2489 if (error) {
2490 if (error == EFBIG) {
2491 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2492 log(LOG_ERR, "%s: Tx packet consumes too many "
2493 "DMA segments, dropping...\n",
2494 device_xname(sc->sc_dev));
2495 IFQ_DEQUEUE(&ifp->if_snd, m0);
2496 wm_dump_mbuf_chain(sc, m0);
2497 m_freem(m0);
2498 continue;
2499 }
2500 /*
2501 * Short on resources, just stop for now.
2502 */
2503 DPRINTF(WM_DEBUG_TX,
2504 ("%s: TX: dmamap load failed: %d\n",
2505 device_xname(sc->sc_dev), error));
2506 break;
2507 }
2508
2509 segs_needed = dmamap->dm_nsegs;
2510 if (use_tso) {
2511 /* For sentinel descriptor; see below. */
2512 segs_needed++;
2513 }
2514
2515 /*
2516 * Ensure we have enough descriptors free to describe
2517 * the packet. Note, we always reserve one descriptor
2518 * at the end of the ring due to the semantics of the
2519 * TDT register, plus one more in the event we need
2520 * to load offload context.
2521 */
2522 if (segs_needed > sc->sc_txfree - 2) {
2523 /*
2524 * Not enough free descriptors to transmit this
2525 * packet. We haven't committed anything yet,
2526 * so just unload the DMA map, put the packet
2527 * pack on the queue, and punt. Notify the upper
2528 * layer that there are no more slots left.
2529 */
2530 DPRINTF(WM_DEBUG_TX,
2531 ("%s: TX: need %d (%d) descriptors, have %d\n",
2532 device_xname(sc->sc_dev), dmamap->dm_nsegs,
2533 segs_needed, sc->sc_txfree - 1));
2534 ifp->if_flags |= IFF_OACTIVE;
2535 bus_dmamap_unload(sc->sc_dmat, dmamap);
2536 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2537 break;
2538 }
2539
2540 /*
2541 * Check for 82547 Tx FIFO bug. We need to do this
2542 * once we know we can transmit the packet, since we
2543 * do some internal FIFO space accounting here.
2544 */
2545 if (sc->sc_type == WM_T_82547 &&
2546 wm_82547_txfifo_bugchk(sc, m0)) {
2547 DPRINTF(WM_DEBUG_TX,
2548 ("%s: TX: 82547 Tx FIFO bug detected\n",
2549 device_xname(sc->sc_dev)));
2550 ifp->if_flags |= IFF_OACTIVE;
2551 bus_dmamap_unload(sc->sc_dmat, dmamap);
2552 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2553 break;
2554 }
2555
2556 IFQ_DEQUEUE(&ifp->if_snd, m0);
2557
2558 /*
2559 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2560 */
2561
2562 DPRINTF(WM_DEBUG_TX,
2563 ("%s: TX: packet has %d (%d) DMA segments\n",
2564 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2565
2566 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2567
2568 /*
2569 * Store a pointer to the packet so that we can free it
2570 * later.
2571 *
2572 * Initially, we consider the number of descriptors the
2573 * packet uses the number of DMA segments. This may be
2574 * incremented by 1 if we do checksum offload (a descriptor
2575 * is used to set the checksum context).
2576 */
2577 txs->txs_mbuf = m0;
2578 txs->txs_firstdesc = sc->sc_txnext;
2579 txs->txs_ndesc = segs_needed;
2580
2581 /* Set up offload parameters for this packet. */
2582 if (m0->m_pkthdr.csum_flags &
2583 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2584 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2585 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2586 if (wm_tx_offload(sc, txs, &cksumcmd,
2587 &cksumfields) != 0) {
2588 /* Error message already displayed. */
2589 bus_dmamap_unload(sc->sc_dmat, dmamap);
2590 continue;
2591 }
2592 } else {
2593 cksumcmd = 0;
2594 cksumfields = 0;
2595 }
2596
2597 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2598
2599 /* Sync the DMA map. */
2600 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2601 BUS_DMASYNC_PREWRITE);
2602
2603 /*
2604 * Initialize the transmit descriptor.
2605 */
2606 for (nexttx = sc->sc_txnext, seg = 0;
2607 seg < dmamap->dm_nsegs; seg++) {
2608 for (seglen = dmamap->dm_segs[seg].ds_len,
2609 curaddr = dmamap->dm_segs[seg].ds_addr;
2610 seglen != 0;
2611 curaddr += curlen, seglen -= curlen,
2612 nexttx = WM_NEXTTX(sc, nexttx)) {
2613 curlen = seglen;
2614
2615 /*
2616 * So says the Linux driver:
2617 * Work around for premature descriptor
2618 * write-backs in TSO mode. Append a
2619 * 4-byte sentinel descriptor.
2620 */
2621 if (use_tso &&
2622 seg == dmamap->dm_nsegs - 1 &&
2623 curlen > 8)
2624 curlen -= 4;
2625
2626 wm_set_dma_addr(
2627 &sc->sc_txdescs[nexttx].wtx_addr,
2628 curaddr);
2629 sc->sc_txdescs[nexttx].wtx_cmdlen =
2630 htole32(cksumcmd | curlen);
2631 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2632 0;
2633 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2634 cksumfields;
2635 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2636 lasttx = nexttx;
2637
2638 DPRINTF(WM_DEBUG_TX,
2639 ("%s: TX: desc %d: low 0x%08lx, "
2640 "len 0x%04x\n",
2641 device_xname(sc->sc_dev), nexttx,
2642 curaddr & 0xffffffffUL, (unsigned)curlen));
2643 }
2644 }
2645
2646 KASSERT(lasttx != -1);
2647
2648 /*
2649 * Set up the command byte on the last descriptor of
2650 * the packet. If we're in the interrupt delay window,
2651 * delay the interrupt.
2652 */
2653 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2654 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2655
2656 /*
2657 * If VLANs are enabled and the packet has a VLAN tag, set
2658 * up the descriptor to encapsulate the packet for us.
2659 *
2660 * This is only valid on the last descriptor of the packet.
2661 */
2662 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2663 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2664 htole32(WTX_CMD_VLE);
2665 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2666 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2667 }
2668
2669 txs->txs_lastdesc = lasttx;
2670
2671 DPRINTF(WM_DEBUG_TX,
2672 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2673 device_xname(sc->sc_dev),
2674 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2675
2676 /* Sync the descriptors we're using. */
2677 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2678 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2679
2680 /* Give the packet to the chip. */
2681 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2682
2683 DPRINTF(WM_DEBUG_TX,
2684 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2685
2686 DPRINTF(WM_DEBUG_TX,
2687 ("%s: TX: finished transmitting packet, job %d\n",
2688 device_xname(sc->sc_dev), sc->sc_txsnext));
2689
2690 /* Advance the tx pointer. */
2691 sc->sc_txfree -= txs->txs_ndesc;
2692 sc->sc_txnext = nexttx;
2693
2694 sc->sc_txsfree--;
2695 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2696
2697 /* Pass the packet to any BPF listeners. */
2698 bpf_mtap(ifp, m0);
2699 }
2700
2701 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2702 /* No more slots; notify upper layer. */
2703 ifp->if_flags |= IFF_OACTIVE;
2704 }
2705
2706 if (sc->sc_txfree != ofree) {
2707 /* Set a watchdog timer in case the chip flakes out. */
2708 ifp->if_timer = 5;
2709 }
2710 }
2711
2712 /*
2713 * wm_watchdog: [ifnet interface function]
2714 *
2715 * Watchdog timer handler.
2716 */
2717 static void
2718 wm_watchdog(struct ifnet *ifp)
2719 {
2720 struct wm_softc *sc = ifp->if_softc;
2721
2722 /*
2723 * Since we're using delayed interrupts, sweep up
2724 * before we report an error.
2725 */
2726 wm_txintr(sc);
2727
2728 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2729 log(LOG_ERR,
2730 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2731 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2732 sc->sc_txnext);
2733 ifp->if_oerrors++;
2734
2735 /* Reset the interface. */
2736 (void) wm_init(ifp);
2737 }
2738
2739 /* Try to get more packets going. */
2740 wm_start(ifp);
2741 }
2742
2743 /*
2744 * wm_ioctl: [ifnet interface function]
2745 *
2746 * Handle control requests from the operator.
2747 */
2748 static int
2749 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2750 {
2751 struct wm_softc *sc = ifp->if_softc;
2752 struct ifreq *ifr = (struct ifreq *) data;
2753 struct ifaddr *ifa = (struct ifaddr *)data;
2754 struct sockaddr_dl *sdl;
2755 int diff, s, error;
2756
2757 s = splnet();
2758
2759 switch (cmd) {
2760 case SIOCSIFFLAGS:
2761 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
2762 break;
2763 if (ifp->if_flags & IFF_UP) {
2764 diff = (ifp->if_flags ^ sc->sc_if_flags)
2765 & (IFF_PROMISC | IFF_ALLMULTI);
2766 if ((diff & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2767 /*
2768 * If the difference bettween last flag and
2769 * new flag is only IFF_PROMISC or
2770 * IFF_ALLMULTI, set multicast filter only
2771 * (don't reset to prevent link down).
2772 */
2773 wm_set_filter(sc);
2774 } else {
2775 /*
2776 * Reset the interface to pick up changes in
2777 * any other flags that affect the hardware
2778 * state.
2779 */
2780 wm_init(ifp);
2781 }
2782 } else {
2783 if (ifp->if_flags & IFF_RUNNING)
2784 wm_stop(ifp, 1);
2785 }
2786 sc->sc_if_flags = ifp->if_flags;
2787 error = 0;
2788 break;
2789 case SIOCSIFMEDIA:
2790 case SIOCGIFMEDIA:
2791 /* Flow control requires full-duplex mode. */
2792 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2793 (ifr->ifr_media & IFM_FDX) == 0)
2794 ifr->ifr_media &= ~IFM_ETH_FMASK;
2795 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2796 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2797 /* We can do both TXPAUSE and RXPAUSE. */
2798 ifr->ifr_media |=
2799 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2800 }
2801 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2802 }
2803 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2804 break;
2805 case SIOCINITIFADDR:
2806 if (ifa->ifa_addr->sa_family == AF_LINK) {
2807 sdl = satosdl(ifp->if_dl->ifa_addr);
2808 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2809 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2810 /* unicast address is first multicast entry */
2811 wm_set_filter(sc);
2812 error = 0;
2813 break;
2814 }
2815 /* Fall through for rest */
2816 default:
2817 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2818 break;
2819
2820 error = 0;
2821
2822 if (cmd == SIOCSIFCAP)
2823 error = (*ifp->if_init)(ifp);
2824 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2825 ;
2826 else if (ifp->if_flags & IFF_RUNNING) {
2827 /*
2828 * Multicast list has changed; set the hardware filter
2829 * accordingly.
2830 */
2831 wm_set_filter(sc);
2832 }
2833 break;
2834 }
2835
2836 /* Try to get more packets going. */
2837 wm_start(ifp);
2838
2839 splx(s);
2840 return error;
2841 }
2842
2843 /*
2844 * wm_intr:
2845 *
2846 * Interrupt service routine.
2847 */
2848 static int
2849 wm_intr(void *arg)
2850 {
2851 struct wm_softc *sc = arg;
2852 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2853 uint32_t icr;
2854 int handled = 0;
2855
2856 while (1 /* CONSTCOND */) {
2857 icr = CSR_READ(sc, WMREG_ICR);
2858 if ((icr & sc->sc_icr) == 0)
2859 break;
2860 #if 0 /*NRND > 0*/
2861 if (RND_ENABLED(&sc->rnd_source))
2862 rnd_add_uint32(&sc->rnd_source, icr);
2863 #endif
2864
2865 handled = 1;
2866
2867 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2868 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2869 DPRINTF(WM_DEBUG_RX,
2870 ("%s: RX: got Rx intr 0x%08x\n",
2871 device_xname(sc->sc_dev),
2872 icr & (ICR_RXDMT0|ICR_RXT0)));
2873 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2874 }
2875 #endif
2876 wm_rxintr(sc);
2877
2878 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2879 if (icr & ICR_TXDW) {
2880 DPRINTF(WM_DEBUG_TX,
2881 ("%s: TX: got TXDW interrupt\n",
2882 device_xname(sc->sc_dev)));
2883 WM_EVCNT_INCR(&sc->sc_ev_txdw);
2884 }
2885 #endif
2886 wm_txintr(sc);
2887
2888 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2889 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2890 wm_linkintr(sc, icr);
2891 }
2892
2893 if (icr & ICR_RXO) {
2894 #if defined(WM_DEBUG)
2895 log(LOG_WARNING, "%s: Receive overrun\n",
2896 device_xname(sc->sc_dev));
2897 #endif /* defined(WM_DEBUG) */
2898 }
2899 }
2900
2901 if (handled) {
2902 /* Try to get more packets going. */
2903 wm_start(ifp);
2904 }
2905
2906 return handled;
2907 }
2908
2909 /*
2910 * wm_txintr:
2911 *
2912 * Helper; handle transmit interrupts.
2913 */
2914 static void
2915 wm_txintr(struct wm_softc *sc)
2916 {
2917 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2918 struct wm_txsoft *txs;
2919 uint8_t status;
2920 int i;
2921
2922 ifp->if_flags &= ~IFF_OACTIVE;
2923
2924 /*
2925 * Go through the Tx list and free mbufs for those
2926 * frames which have been transmitted.
2927 */
2928 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2929 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2930 txs = &sc->sc_txsoft[i];
2931
2932 DPRINTF(WM_DEBUG_TX,
2933 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
2934
2935 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2936 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2937
2938 status =
2939 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2940 if ((status & WTX_ST_DD) == 0) {
2941 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2942 BUS_DMASYNC_PREREAD);
2943 break;
2944 }
2945
2946 DPRINTF(WM_DEBUG_TX,
2947 ("%s: TX: job %d done: descs %d..%d\n",
2948 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
2949 txs->txs_lastdesc));
2950
2951 /*
2952 * XXX We should probably be using the statistics
2953 * XXX registers, but I don't know if they exist
2954 * XXX on chips before the i82544.
2955 */
2956
2957 #ifdef WM_EVENT_COUNTERS
2958 if (status & WTX_ST_TU)
2959 WM_EVCNT_INCR(&sc->sc_ev_tu);
2960 #endif /* WM_EVENT_COUNTERS */
2961
2962 if (status & (WTX_ST_EC|WTX_ST_LC)) {
2963 ifp->if_oerrors++;
2964 if (status & WTX_ST_LC)
2965 log(LOG_WARNING, "%s: late collision\n",
2966 device_xname(sc->sc_dev));
2967 else if (status & WTX_ST_EC) {
2968 ifp->if_collisions += 16;
2969 log(LOG_WARNING, "%s: excessive collisions\n",
2970 device_xname(sc->sc_dev));
2971 }
2972 } else
2973 ifp->if_opackets++;
2974
2975 sc->sc_txfree += txs->txs_ndesc;
2976 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2977 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2978 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2979 m_freem(txs->txs_mbuf);
2980 txs->txs_mbuf = NULL;
2981 }
2982
2983 /* Update the dirty transmit buffer pointer. */
2984 sc->sc_txsdirty = i;
2985 DPRINTF(WM_DEBUG_TX,
2986 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
2987
2988 /*
2989 * If there are no more pending transmissions, cancel the watchdog
2990 * timer.
2991 */
2992 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2993 ifp->if_timer = 0;
2994 }
2995
2996 /*
2997 * wm_rxintr:
2998 *
2999 * Helper; handle receive interrupts.
3000 */
3001 static void
3002 wm_rxintr(struct wm_softc *sc)
3003 {
3004 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3005 struct wm_rxsoft *rxs;
3006 struct mbuf *m;
3007 int i, len;
3008 uint8_t status, errors;
3009 uint16_t vlantag;
3010
3011 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
3012 rxs = &sc->sc_rxsoft[i];
3013
3014 DPRINTF(WM_DEBUG_RX,
3015 ("%s: RX: checking descriptor %d\n",
3016 device_xname(sc->sc_dev), i));
3017
3018 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3019
3020 status = sc->sc_rxdescs[i].wrx_status;
3021 errors = sc->sc_rxdescs[i].wrx_errors;
3022 len = le16toh(sc->sc_rxdescs[i].wrx_len);
3023 vlantag = sc->sc_rxdescs[i].wrx_special;
3024
3025 if ((status & WRX_ST_DD) == 0) {
3026 /*
3027 * We have processed all of the receive descriptors.
3028 */
3029 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
3030 break;
3031 }
3032
3033 if (__predict_false(sc->sc_rxdiscard)) {
3034 DPRINTF(WM_DEBUG_RX,
3035 ("%s: RX: discarding contents of descriptor %d\n",
3036 device_xname(sc->sc_dev), i));
3037 WM_INIT_RXDESC(sc, i);
3038 if (status & WRX_ST_EOP) {
3039 /* Reset our state. */
3040 DPRINTF(WM_DEBUG_RX,
3041 ("%s: RX: resetting rxdiscard -> 0\n",
3042 device_xname(sc->sc_dev)));
3043 sc->sc_rxdiscard = 0;
3044 }
3045 continue;
3046 }
3047
3048 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3049 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3050
3051 m = rxs->rxs_mbuf;
3052
3053 /*
3054 * Add a new receive buffer to the ring, unless of
3055 * course the length is zero. Treat the latter as a
3056 * failed mapping.
3057 */
3058 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
3059 /*
3060 * Failed, throw away what we've done so
3061 * far, and discard the rest of the packet.
3062 */
3063 ifp->if_ierrors++;
3064 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3065 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3066 WM_INIT_RXDESC(sc, i);
3067 if ((status & WRX_ST_EOP) == 0)
3068 sc->sc_rxdiscard = 1;
3069 if (sc->sc_rxhead != NULL)
3070 m_freem(sc->sc_rxhead);
3071 WM_RXCHAIN_RESET(sc);
3072 DPRINTF(WM_DEBUG_RX,
3073 ("%s: RX: Rx buffer allocation failed, "
3074 "dropping packet%s\n", device_xname(sc->sc_dev),
3075 sc->sc_rxdiscard ? " (discard)" : ""));
3076 continue;
3077 }
3078
3079 m->m_len = len;
3080 sc->sc_rxlen += len;
3081 DPRINTF(WM_DEBUG_RX,
3082 ("%s: RX: buffer at %p len %d\n",
3083 device_xname(sc->sc_dev), m->m_data, len));
3084
3085 /*
3086 * If this is not the end of the packet, keep
3087 * looking.
3088 */
3089 if ((status & WRX_ST_EOP) == 0) {
3090 WM_RXCHAIN_LINK(sc, m);
3091 DPRINTF(WM_DEBUG_RX,
3092 ("%s: RX: not yet EOP, rxlen -> %d\n",
3093 device_xname(sc->sc_dev), sc->sc_rxlen));
3094 continue;
3095 }
3096
3097 /*
3098 * Okay, we have the entire packet now. The chip is
3099 * configured to include the FCS (not all chips can
3100 * be configured to strip it), so we need to trim it.
3101 * May need to adjust length of previous mbuf in the
3102 * chain if the current mbuf is too short.
3103 */
3104 if (m->m_len < ETHER_CRC_LEN) {
3105 sc->sc_rxtail->m_len -= (ETHER_CRC_LEN - m->m_len);
3106 m->m_len = 0;
3107 } else {
3108 m->m_len -= ETHER_CRC_LEN;
3109 }
3110 len = sc->sc_rxlen - ETHER_CRC_LEN;
3111
3112 WM_RXCHAIN_LINK(sc, m);
3113
3114 *sc->sc_rxtailp = NULL;
3115 m = sc->sc_rxhead;
3116
3117 WM_RXCHAIN_RESET(sc);
3118
3119 DPRINTF(WM_DEBUG_RX,
3120 ("%s: RX: have entire packet, len -> %d\n",
3121 device_xname(sc->sc_dev), len));
3122
3123 /*
3124 * If an error occurred, update stats and drop the packet.
3125 */
3126 if (errors &
3127 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3128 if (errors & WRX_ER_SE)
3129 log(LOG_WARNING, "%s: symbol error\n",
3130 device_xname(sc->sc_dev));
3131 else if (errors & WRX_ER_SEQ)
3132 log(LOG_WARNING, "%s: receive sequence error\n",
3133 device_xname(sc->sc_dev));
3134 else if (errors & WRX_ER_CE)
3135 log(LOG_WARNING, "%s: CRC error\n",
3136 device_xname(sc->sc_dev));
3137 m_freem(m);
3138 continue;
3139 }
3140
3141 /*
3142 * No errors. Receive the packet.
3143 */
3144 m->m_pkthdr.rcvif = ifp;
3145 m->m_pkthdr.len = len;
3146
3147 /*
3148 * If VLANs are enabled, VLAN packets have been unwrapped
3149 * for us. Associate the tag with the packet.
3150 */
3151 if ((status & WRX_ST_VP) != 0) {
3152 VLAN_INPUT_TAG(ifp, m,
3153 le16toh(vlantag),
3154 continue);
3155 }
3156
3157 /*
3158 * Set up checksum info for this packet.
3159 */
3160 if ((status & WRX_ST_IXSM) == 0) {
3161 if (status & WRX_ST_IPCS) {
3162 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3163 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3164 if (errors & WRX_ER_IPE)
3165 m->m_pkthdr.csum_flags |=
3166 M_CSUM_IPv4_BAD;
3167 }
3168 if (status & WRX_ST_TCPCS) {
3169 /*
3170 * Note: we don't know if this was TCP or UDP,
3171 * so we just set both bits, and expect the
3172 * upper layers to deal.
3173 */
3174 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3175 m->m_pkthdr.csum_flags |=
3176 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3177 M_CSUM_TCPv6 | M_CSUM_UDPv6;
3178 if (errors & WRX_ER_TCPE)
3179 m->m_pkthdr.csum_flags |=
3180 M_CSUM_TCP_UDP_BAD;
3181 }
3182 }
3183
3184 ifp->if_ipackets++;
3185
3186 /* Pass this up to any BPF listeners. */
3187 bpf_mtap(ifp, m);
3188
3189 /* Pass it on. */
3190 (*ifp->if_input)(ifp, m);
3191 }
3192
3193 /* Update the receive pointer. */
3194 sc->sc_rxptr = i;
3195
3196 DPRINTF(WM_DEBUG_RX,
3197 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3198 }
3199
3200 /*
3201 * wm_linkintr_gmii:
3202 *
3203 * Helper; handle link interrupts for GMII.
3204 */
3205 static void
3206 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
3207 {
3208
3209 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3210 __func__));
3211
3212 if (icr & ICR_LSC) {
3213 DPRINTF(WM_DEBUG_LINK,
3214 ("%s: LINK: LSC -> mii_tick\n",
3215 device_xname(sc->sc_dev)));
3216 mii_tick(&sc->sc_mii);
3217 if (sc->sc_type == WM_T_82543) {
3218 int miistatus, active;
3219
3220 /*
3221 * With 82543, we need to force speed and
3222 * duplex on the MAC equal to what the PHY
3223 * speed and duplex configuration is.
3224 */
3225 miistatus = sc->sc_mii.mii_media_status;
3226
3227 if (miistatus & IFM_ACTIVE) {
3228 active = sc->sc_mii.mii_media_active;
3229 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
3230 switch (IFM_SUBTYPE(active)) {
3231 case IFM_10_T:
3232 sc->sc_ctrl |= CTRL_SPEED_10;
3233 break;
3234 case IFM_100_TX:
3235 sc->sc_ctrl |= CTRL_SPEED_100;
3236 break;
3237 case IFM_1000_T:
3238 sc->sc_ctrl |= CTRL_SPEED_1000;
3239 break;
3240 default:
3241 /*
3242 * fiber?
3243 * Shoud not enter here.
3244 */
3245 printf("unknown media (%x)\n",
3246 active);
3247 break;
3248 }
3249 if (active & IFM_FDX)
3250 sc->sc_ctrl |= CTRL_FD;
3251 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3252 }
3253 } else if ((sc->sc_type == WM_T_ICH8)
3254 && (sc->sc_phytype == WMPHY_IGP_3)) {
3255 wm_kmrn_lock_loss_workaround_ich8lan(sc);
3256 } else if (sc->sc_type == WM_T_PCH) {
3257 wm_k1_gig_workaround_hv(sc,
3258 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
3259 }
3260
3261 if ((sc->sc_phytype == WMPHY_82578)
3262 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
3263 == IFM_1000_T)) {
3264
3265 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
3266 delay(200*1000); /* XXX too big */
3267
3268 /* Link stall fix for link up */
3269 wm_gmii_hv_writereg(sc->sc_dev, 1,
3270 HV_MUX_DATA_CTRL,
3271 HV_MUX_DATA_CTRL_GEN_TO_MAC
3272 | HV_MUX_DATA_CTRL_FORCE_SPEED);
3273 wm_gmii_hv_writereg(sc->sc_dev, 1,
3274 HV_MUX_DATA_CTRL,
3275 HV_MUX_DATA_CTRL_GEN_TO_MAC);
3276 }
3277 }
3278 } else if (icr & ICR_RXSEQ) {
3279 DPRINTF(WM_DEBUG_LINK,
3280 ("%s: LINK Receive sequence error\n",
3281 device_xname(sc->sc_dev)));
3282 }
3283 }
3284
3285 /*
3286 * wm_linkintr_tbi:
3287 *
3288 * Helper; handle link interrupts for TBI mode.
3289 */
3290 static void
3291 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3292 {
3293 uint32_t status;
3294
3295 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3296 __func__));
3297
3298 status = CSR_READ(sc, WMREG_STATUS);
3299 if (icr & ICR_LSC) {
3300 if (status & STATUS_LU) {
3301 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3302 device_xname(sc->sc_dev),
3303 (status & STATUS_FD) ? "FDX" : "HDX"));
3304 /*
3305 * NOTE: CTRL will update TFCE and RFCE automatically,
3306 * so we should update sc->sc_ctrl
3307 */
3308
3309 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3310 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3311 sc->sc_fcrtl &= ~FCRTL_XONE;
3312 if (status & STATUS_FD)
3313 sc->sc_tctl |=
3314 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3315 else
3316 sc->sc_tctl |=
3317 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3318 if (sc->sc_ctrl & CTRL_TFCE)
3319 sc->sc_fcrtl |= FCRTL_XONE;
3320 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3321 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3322 WMREG_OLD_FCRTL : WMREG_FCRTL,
3323 sc->sc_fcrtl);
3324 sc->sc_tbi_linkup = 1;
3325 } else {
3326 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3327 device_xname(sc->sc_dev)));
3328 sc->sc_tbi_linkup = 0;
3329 }
3330 wm_tbi_set_linkled(sc);
3331 } else if (icr & ICR_RXCFG) {
3332 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3333 device_xname(sc->sc_dev)));
3334 sc->sc_tbi_nrxcfg++;
3335 wm_check_for_link(sc);
3336 } else if (icr & ICR_RXSEQ) {
3337 DPRINTF(WM_DEBUG_LINK,
3338 ("%s: LINK: Receive sequence error\n",
3339 device_xname(sc->sc_dev)));
3340 }
3341 }
3342
3343 /*
3344 * wm_linkintr:
3345 *
3346 * Helper; handle link interrupts.
3347 */
3348 static void
3349 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3350 {
3351
3352 if (sc->sc_flags & WM_F_HAS_MII)
3353 wm_linkintr_gmii(sc, icr);
3354 else
3355 wm_linkintr_tbi(sc, icr);
3356 }
3357
3358 /*
3359 * wm_tick:
3360 *
3361 * One second timer, used to check link status, sweep up
3362 * completed transmit jobs, etc.
3363 */
3364 static void
3365 wm_tick(void *arg)
3366 {
3367 struct wm_softc *sc = arg;
3368 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3369 int s;
3370
3371 s = splnet();
3372
3373 if (sc->sc_type >= WM_T_82542_2_1) {
3374 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3375 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3376 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3377 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3378 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3379 }
3380
3381 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3382 ifp->if_ierrors += 0ULL + /* ensure quad_t */
3383 + CSR_READ(sc, WMREG_CRCERRS)
3384 + CSR_READ(sc, WMREG_ALGNERRC)
3385 + CSR_READ(sc, WMREG_SYMERRC)
3386 + CSR_READ(sc, WMREG_RXERRC)
3387 + CSR_READ(sc, WMREG_SEC)
3388 + CSR_READ(sc, WMREG_CEXTERR)
3389 + CSR_READ(sc, WMREG_RLEC);
3390 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
3391
3392 if (sc->sc_flags & WM_F_HAS_MII)
3393 mii_tick(&sc->sc_mii);
3394 else
3395 wm_tbi_check_link(sc);
3396
3397 splx(s);
3398
3399 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3400 }
3401
3402 /*
3403 * wm_reset:
3404 *
3405 * Reset the i82542 chip.
3406 */
3407 static void
3408 wm_reset(struct wm_softc *sc)
3409 {
3410 int phy_reset = 0;
3411 uint32_t reg, mask;
3412 int i;
3413
3414 /*
3415 * Allocate on-chip memory according to the MTU size.
3416 * The Packet Buffer Allocation register must be written
3417 * before the chip is reset.
3418 */
3419 switch (sc->sc_type) {
3420 case WM_T_82547:
3421 case WM_T_82547_2:
3422 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3423 PBA_22K : PBA_30K;
3424 sc->sc_txfifo_head = 0;
3425 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3426 sc->sc_txfifo_size =
3427 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3428 sc->sc_txfifo_stall = 0;
3429 break;
3430 case WM_T_82571:
3431 case WM_T_82572:
3432 case WM_T_82575: /* XXX need special handing for jumbo frames */
3433 case WM_T_80003:
3434 sc->sc_pba = PBA_32K;
3435 break;
3436 case WM_T_82580:
3437 case WM_T_82580ER:
3438 sc->sc_pba = PBA_35K;
3439 break;
3440 case WM_T_82576:
3441 sc->sc_pba = PBA_64K;
3442 break;
3443 case WM_T_82573:
3444 sc->sc_pba = PBA_12K;
3445 break;
3446 case WM_T_82574:
3447 case WM_T_82583:
3448 sc->sc_pba = PBA_20K;
3449 break;
3450 case WM_T_ICH8:
3451 sc->sc_pba = PBA_8K;
3452 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3453 break;
3454 case WM_T_ICH9:
3455 case WM_T_ICH10:
3456 case WM_T_PCH:
3457 sc->sc_pba = PBA_10K;
3458 break;
3459 default:
3460 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3461 PBA_40K : PBA_48K;
3462 break;
3463 }
3464 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3465
3466 /* Prevent the PCI-E bus from sticking */
3467 if (sc->sc_flags & WM_F_PCIE) {
3468 int timeout = 800;
3469
3470 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3471 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3472
3473 while (timeout--) {
3474 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
3475 break;
3476 delay(100);
3477 }
3478 }
3479
3480 /* Set the completion timeout for interface */
3481 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
3482 wm_set_pcie_completion_timeout(sc);
3483
3484 /* Clear interrupt */
3485 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3486
3487 /* Stop the transmit and receive processes. */
3488 CSR_WRITE(sc, WMREG_RCTL, 0);
3489 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3490 sc->sc_rctl &= ~RCTL_EN;
3491
3492 /* XXX set_tbi_sbp_82543() */
3493
3494 delay(10*1000);
3495
3496 /* Must acquire the MDIO ownership before MAC reset */
3497 switch (sc->sc_type) {
3498 case WM_T_82573:
3499 case WM_T_82574:
3500 case WM_T_82583:
3501 i = 0;
3502 reg = CSR_READ(sc, WMREG_EXTCNFCTR)
3503 | EXTCNFCTR_MDIO_SW_OWNERSHIP;
3504 do {
3505 CSR_WRITE(sc, WMREG_EXTCNFCTR,
3506 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
3507 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
3508 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
3509 break;
3510 reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
3511 delay(2*1000);
3512 i++;
3513 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
3514 break;
3515 default:
3516 break;
3517 }
3518
3519 /*
3520 * 82541 Errata 29? & 82547 Errata 28?
3521 * See also the description about PHY_RST bit in CTRL register
3522 * in 8254x_GBe_SDM.pdf.
3523 */
3524 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3525 CSR_WRITE(sc, WMREG_CTRL,
3526 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3527 delay(5000);
3528 }
3529
3530 switch (sc->sc_type) {
3531 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3532 case WM_T_82541:
3533 case WM_T_82541_2:
3534 case WM_T_82547:
3535 case WM_T_82547_2:
3536 /*
3537 * On some chipsets, a reset through a memory-mapped write
3538 * cycle can cause the chip to reset before completing the
3539 * write cycle. This causes major headache that can be
3540 * avoided by issuing the reset via indirect register writes
3541 * through I/O space.
3542 *
3543 * So, if we successfully mapped the I/O BAR at attach time,
3544 * use that. Otherwise, try our luck with a memory-mapped
3545 * reset.
3546 */
3547 if (sc->sc_flags & WM_F_IOH_VALID)
3548 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3549 else
3550 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3551 break;
3552 case WM_T_82545_3:
3553 case WM_T_82546_3:
3554 /* Use the shadow control register on these chips. */
3555 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3556 break;
3557 case WM_T_80003:
3558 mask = swfwphysem[sc->sc_funcid];
3559 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3560 wm_get_swfw_semaphore(sc, mask);
3561 CSR_WRITE(sc, WMREG_CTRL, reg);
3562 wm_put_swfw_semaphore(sc, mask);
3563 break;
3564 case WM_T_ICH8:
3565 case WM_T_ICH9:
3566 case WM_T_ICH10:
3567 case WM_T_PCH:
3568 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3569 if (wm_check_reset_block(sc) == 0) {
3570 if (sc->sc_type >= WM_T_PCH) {
3571 uint32_t status;
3572
3573 status = CSR_READ(sc, WMREG_STATUS);
3574 CSR_WRITE(sc, WMREG_STATUS,
3575 status & ~STATUS_PHYRA);
3576 }
3577
3578 reg |= CTRL_PHY_RESET;
3579 phy_reset = 1;
3580 }
3581 wm_get_swfwhw_semaphore(sc);
3582 CSR_WRITE(sc, WMREG_CTRL, reg);
3583 delay(20*1000);
3584 wm_put_swfwhw_semaphore(sc);
3585 break;
3586 case WM_T_82542_2_0:
3587 case WM_T_82542_2_1:
3588 case WM_T_82543:
3589 case WM_T_82540:
3590 case WM_T_82545:
3591 case WM_T_82546:
3592 case WM_T_82571:
3593 case WM_T_82572:
3594 case WM_T_82573:
3595 case WM_T_82574:
3596 case WM_T_82575:
3597 case WM_T_82576:
3598 case WM_T_82583:
3599 default:
3600 /* Everything else can safely use the documented method. */
3601 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3602 break;
3603 }
3604
3605 if (phy_reset != 0)
3606 wm_get_cfg_done(sc);
3607
3608 /* reload EEPROM */
3609 switch (sc->sc_type) {
3610 case WM_T_82542_2_0:
3611 case WM_T_82542_2_1:
3612 case WM_T_82543:
3613 case WM_T_82544:
3614 delay(10);
3615 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3616 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3617 delay(2000);
3618 break;
3619 case WM_T_82540:
3620 case WM_T_82545:
3621 case WM_T_82545_3:
3622 case WM_T_82546:
3623 case WM_T_82546_3:
3624 delay(5*1000);
3625 /* XXX Disable HW ARPs on ASF enabled adapters */
3626 break;
3627 case WM_T_82541:
3628 case WM_T_82541_2:
3629 case WM_T_82547:
3630 case WM_T_82547_2:
3631 delay(20000);
3632 /* XXX Disable HW ARPs on ASF enabled adapters */
3633 break;
3634 case WM_T_82571:
3635 case WM_T_82572:
3636 case WM_T_82573:
3637 case WM_T_82574:
3638 case WM_T_82583:
3639 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3640 delay(10);
3641 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3642 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3643 }
3644 /* check EECD_EE_AUTORD */
3645 wm_get_auto_rd_done(sc);
3646 /*
3647 * Phy configuration from NVM just starts after EECD_AUTO_RD
3648 * is set.
3649 */
3650 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3651 || (sc->sc_type == WM_T_82583))
3652 delay(25*1000);
3653 break;
3654 case WM_T_82575:
3655 case WM_T_82576:
3656 case WM_T_80003:
3657 case WM_T_ICH8:
3658 case WM_T_ICH9:
3659 /* check EECD_EE_AUTORD */
3660 wm_get_auto_rd_done(sc);
3661 break;
3662 case WM_T_ICH10:
3663 case WM_T_PCH:
3664 wm_lan_init_done(sc);
3665 break;
3666 default:
3667 panic("%s: unknown type\n", __func__);
3668 }
3669
3670 /* Check whether EEPROM is present or not */
3671 switch (sc->sc_type) {
3672 case WM_T_82575:
3673 case WM_T_82576:
3674 case WM_T_82580:
3675 case WM_T_ICH8:
3676 case WM_T_ICH9:
3677 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3678 /* Not found */
3679 sc->sc_flags |= WM_F_EEPROM_INVALID;
3680 if (sc->sc_type == WM_T_82575) /* 82575 only */
3681 wm_reset_init_script_82575(sc);
3682 }
3683 break;
3684 default:
3685 break;
3686 }
3687
3688 /* Clear any pending interrupt events. */
3689 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3690 reg = CSR_READ(sc, WMREG_ICR);
3691
3692 /* reload sc_ctrl */
3693 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3694
3695 /* dummy read from WUC */
3696 if (sc->sc_type == WM_T_PCH)
3697 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3698 /*
3699 * For PCH, this write will make sure that any noise will be detected
3700 * as a CRC error and be dropped rather than show up as a bad packet
3701 * to the DMA engine
3702 */
3703 if (sc->sc_type == WM_T_PCH)
3704 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3705
3706 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3707 CSR_WRITE(sc, WMREG_WUC, 0);
3708
3709 /* XXX need special handling for 82580 */
3710 }
3711
3712 /*
3713 * wm_init: [ifnet interface function]
3714 *
3715 * Initialize the interface. Must be called at splnet().
3716 */
3717 static int
3718 wm_init(struct ifnet *ifp)
3719 {
3720 struct wm_softc *sc = ifp->if_softc;
3721 struct wm_rxsoft *rxs;
3722 int i, error = 0;
3723 uint32_t reg;
3724
3725 /*
3726 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3727 * There is a small but measurable benefit to avoiding the adjusment
3728 * of the descriptor so that the headers are aligned, for normal mtu,
3729 * on such platforms. One possibility is that the DMA itself is
3730 * slightly more efficient if the front of the entire packet (instead
3731 * of the front of the headers) is aligned.
3732 *
3733 * Note we must always set align_tweak to 0 if we are using
3734 * jumbo frames.
3735 */
3736 #ifdef __NO_STRICT_ALIGNMENT
3737 sc->sc_align_tweak = 0;
3738 #else
3739 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3740 sc->sc_align_tweak = 0;
3741 else
3742 sc->sc_align_tweak = 2;
3743 #endif /* __NO_STRICT_ALIGNMENT */
3744
3745 /* Cancel any pending I/O. */
3746 wm_stop(ifp, 0);
3747
3748 /* update statistics before reset */
3749 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3750 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3751
3752 /* Reset the chip to a known state. */
3753 wm_reset(sc);
3754
3755 switch (sc->sc_type) {
3756 case WM_T_82571:
3757 case WM_T_82572:
3758 case WM_T_82573:
3759 case WM_T_82574:
3760 case WM_T_82583:
3761 case WM_T_80003:
3762 case WM_T_ICH8:
3763 case WM_T_ICH9:
3764 case WM_T_ICH10:
3765 case WM_T_PCH:
3766 if (wm_check_mng_mode(sc) != 0)
3767 wm_get_hw_control(sc);
3768 break;
3769 default:
3770 break;
3771 }
3772
3773 /* Reset the PHY. */
3774 if (sc->sc_flags & WM_F_HAS_MII)
3775 wm_gmii_reset(sc);
3776
3777 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3778 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3779 if (sc->sc_type == WM_T_PCH)
3780 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
3781
3782 /* Initialize the transmit descriptor ring. */
3783 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3784 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3785 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3786 sc->sc_txfree = WM_NTXDESC(sc);
3787 sc->sc_txnext = 0;
3788
3789 if (sc->sc_type < WM_T_82543) {
3790 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
3791 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
3792 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3793 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3794 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3795 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3796 } else {
3797 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
3798 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
3799 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3800 CSR_WRITE(sc, WMREG_TDH, 0);
3801 CSR_WRITE(sc, WMREG_TDT, 0);
3802 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3803 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3804
3805 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3806 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
3807 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
3808 | TXDCTL_WTHRESH(0));
3809 else {
3810 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3811 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3812 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3813 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3814 }
3815 }
3816 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3817 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3818
3819 /* Initialize the transmit job descriptors. */
3820 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3821 sc->sc_txsoft[i].txs_mbuf = NULL;
3822 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3823 sc->sc_txsnext = 0;
3824 sc->sc_txsdirty = 0;
3825
3826 /*
3827 * Initialize the receive descriptor and receive job
3828 * descriptor rings.
3829 */
3830 if (sc->sc_type < WM_T_82543) {
3831 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3832 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3833 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3834 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3835 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3836 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3837
3838 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3839 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3840 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3841 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3842 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3843 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3844 } else {
3845 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3846 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3847 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3848 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3849 CSR_WRITE(sc, WMREG_EITR(0), 450);
3850 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
3851 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
3852 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
3853 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
3854 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
3855 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
3856 | RXDCTL_WTHRESH(1));
3857 } else {
3858 CSR_WRITE(sc, WMREG_RDH, 0);
3859 CSR_WRITE(sc, WMREG_RDT, 0);
3860 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3861 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
3862 }
3863 }
3864 for (i = 0; i < WM_NRXDESC; i++) {
3865 rxs = &sc->sc_rxsoft[i];
3866 if (rxs->rxs_mbuf == NULL) {
3867 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3868 log(LOG_ERR, "%s: unable to allocate or map rx "
3869 "buffer %d, error = %d\n",
3870 device_xname(sc->sc_dev), i, error);
3871 /*
3872 * XXX Should attempt to run with fewer receive
3873 * XXX buffers instead of just failing.
3874 */
3875 wm_rxdrain(sc);
3876 goto out;
3877 }
3878 } else {
3879 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3880 WM_INIT_RXDESC(sc, i);
3881 }
3882 }
3883 sc->sc_rxptr = 0;
3884 sc->sc_rxdiscard = 0;
3885 WM_RXCHAIN_RESET(sc);
3886
3887 /*
3888 * Clear out the VLAN table -- we don't use it (yet).
3889 */
3890 CSR_WRITE(sc, WMREG_VET, 0);
3891 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3892 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3893
3894 /*
3895 * Set up flow-control parameters.
3896 *
3897 * XXX Values could probably stand some tuning.
3898 */
3899 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3900 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
3901 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3902 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3903 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3904 }
3905
3906 sc->sc_fcrtl = FCRTL_DFLT;
3907 if (sc->sc_type < WM_T_82543) {
3908 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3909 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3910 } else {
3911 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3912 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3913 }
3914
3915 if (sc->sc_type == WM_T_80003)
3916 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3917 else
3918 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3919
3920 /* Deal with VLAN enables. */
3921 if (VLAN_ATTACHED(&sc->sc_ethercom))
3922 sc->sc_ctrl |= CTRL_VME;
3923 else
3924 sc->sc_ctrl &= ~CTRL_VME;
3925
3926 /* Write the control registers. */
3927 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3928
3929 if (sc->sc_flags & WM_F_HAS_MII) {
3930 int val;
3931
3932 switch (sc->sc_type) {
3933 case WM_T_80003:
3934 case WM_T_ICH8:
3935 case WM_T_ICH9:
3936 case WM_T_ICH10:
3937 case WM_T_PCH:
3938 /*
3939 * Set the mac to wait the maximum time between each
3940 * iteration and increase the max iterations when
3941 * polling the phy; this fixes erroneous timeouts at
3942 * 10Mbps.
3943 */
3944 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
3945 0xFFFF);
3946 val = wm_kmrn_readreg(sc,
3947 KUMCTRLSTA_OFFSET_INB_PARAM);
3948 val |= 0x3F;
3949 wm_kmrn_writereg(sc,
3950 KUMCTRLSTA_OFFSET_INB_PARAM, val);
3951 break;
3952 default:
3953 break;
3954 }
3955
3956 if (sc->sc_type == WM_T_80003) {
3957 val = CSR_READ(sc, WMREG_CTRL_EXT);
3958 val &= ~CTRL_EXT_LINK_MODE_MASK;
3959 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3960
3961 /* Bypass RX and TX FIFO's */
3962 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3963 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
3964 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3965 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3966 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3967 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3968 }
3969 }
3970 #if 0
3971 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3972 #endif
3973
3974 /*
3975 * Set up checksum offload parameters.
3976 */
3977 reg = CSR_READ(sc, WMREG_RXCSUM);
3978 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3979 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3980 reg |= RXCSUM_IPOFL;
3981 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3982 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3983 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3984 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3985 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3986
3987 /* Reset TBI's RXCFG count */
3988 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
3989
3990 /*
3991 * Set up the interrupt registers.
3992 */
3993 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3994 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3995 ICR_RXO | ICR_RXT0;
3996 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
3997 sc->sc_icr |= ICR_RXCFG;
3998 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3999
4000 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4001 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)) {
4002 reg = CSR_READ(sc, WMREG_KABGTXD);
4003 reg |= KABGTXD_BGSQLBIAS;
4004 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4005 }
4006
4007 /* Set up the inter-packet gap. */
4008 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4009
4010 if (sc->sc_type >= WM_T_82543) {
4011 /*
4012 * Set up the interrupt throttling register (units of 256ns)
4013 * Note that a footnote in Intel's documentation says this
4014 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4015 * or 10Mbit mode. Empirically, it appears to be the case
4016 * that that is also true for the 1024ns units of the other
4017 * interrupt-related timer registers -- so, really, we ought
4018 * to divide this value by 4 when the link speed is low.
4019 *
4020 * XXX implement this division at link speed change!
4021 */
4022
4023 /*
4024 * For N interrupts/sec, set this value to:
4025 * 1000000000 / (N * 256). Note that we set the
4026 * absolute and packet timer values to this value
4027 * divided by 4 to get "simple timer" behavior.
4028 */
4029
4030 sc->sc_itr = 1500; /* 2604 ints/sec */
4031 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4032 }
4033
4034 /* Set the VLAN ethernetype. */
4035 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4036
4037 /*
4038 * Set up the transmit control register; we start out with
4039 * a collision distance suitable for FDX, but update it whe
4040 * we resolve the media type.
4041 */
4042 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4043 | TCTL_CT(TX_COLLISION_THRESHOLD)
4044 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4045 if (sc->sc_type >= WM_T_82571)
4046 sc->sc_tctl |= TCTL_MULR;
4047 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4048
4049 if (sc->sc_type == WM_T_80003) {
4050 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4051 reg &= ~TCTL_EXT_GCEX_MASK;
4052 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4053 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4054 }
4055
4056 /* Set the media. */
4057 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4058 goto out;
4059
4060 /* Configure for OS presence */
4061 wm_init_manageability(sc);
4062
4063 /*
4064 * Set up the receive control register; we actually program
4065 * the register when we set the receive filter. Use multicast
4066 * address offset type 0.
4067 *
4068 * Only the i82544 has the ability to strip the incoming
4069 * CRC, so we don't enable that feature.
4070 */
4071 sc->sc_mchash_type = 0;
4072 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4073 | RCTL_MO(sc->sc_mchash_type);
4074
4075 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4076 && (ifp->if_mtu > ETHERMTU)) {
4077 sc->sc_rctl |= RCTL_LPE;
4078 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4079 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4080 }
4081
4082 if (MCLBYTES == 2048) {
4083 sc->sc_rctl |= RCTL_2k;
4084 } else {
4085 if (sc->sc_type >= WM_T_82543) {
4086 switch (MCLBYTES) {
4087 case 4096:
4088 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4089 break;
4090 case 8192:
4091 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4092 break;
4093 case 16384:
4094 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4095 break;
4096 default:
4097 panic("wm_init: MCLBYTES %d unsupported",
4098 MCLBYTES);
4099 break;
4100 }
4101 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4102 }
4103
4104 /* Set the receive filter. */
4105 wm_set_filter(sc);
4106
4107 /* On 575 and later set RDT only if RX enabled... */
4108 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4109 for (i = 0; i < WM_NRXDESC; i++)
4110 WM_INIT_RXDESC(sc, i);
4111
4112 /* Start the one second link check clock. */
4113 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4114
4115 /* ...all done! */
4116 ifp->if_flags |= IFF_RUNNING;
4117 ifp->if_flags &= ~IFF_OACTIVE;
4118
4119 out:
4120 if (error)
4121 log(LOG_ERR, "%s: interface not running\n",
4122 device_xname(sc->sc_dev));
4123 return error;
4124 }
4125
4126 /*
4127 * wm_rxdrain:
4128 *
4129 * Drain the receive queue.
4130 */
4131 static void
4132 wm_rxdrain(struct wm_softc *sc)
4133 {
4134 struct wm_rxsoft *rxs;
4135 int i;
4136
4137 for (i = 0; i < WM_NRXDESC; i++) {
4138 rxs = &sc->sc_rxsoft[i];
4139 if (rxs->rxs_mbuf != NULL) {
4140 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4141 m_freem(rxs->rxs_mbuf);
4142 rxs->rxs_mbuf = NULL;
4143 }
4144 }
4145 }
4146
4147 /*
4148 * wm_stop: [ifnet interface function]
4149 *
4150 * Stop transmission on the interface.
4151 */
4152 static void
4153 wm_stop(struct ifnet *ifp, int disable)
4154 {
4155 struct wm_softc *sc = ifp->if_softc;
4156 struct wm_txsoft *txs;
4157 int i;
4158
4159 /* Stop the one second clock. */
4160 callout_stop(&sc->sc_tick_ch);
4161
4162 /* Stop the 82547 Tx FIFO stall check timer. */
4163 if (sc->sc_type == WM_T_82547)
4164 callout_stop(&sc->sc_txfifo_ch);
4165
4166 if (sc->sc_flags & WM_F_HAS_MII) {
4167 /* Down the MII. */
4168 mii_down(&sc->sc_mii);
4169 } else {
4170 #if 0
4171 /* Should we clear PHY's status properly? */
4172 wm_reset(sc);
4173 #endif
4174 }
4175
4176 /* Stop the transmit and receive processes. */
4177 CSR_WRITE(sc, WMREG_TCTL, 0);
4178 CSR_WRITE(sc, WMREG_RCTL, 0);
4179 sc->sc_rctl &= ~RCTL_EN;
4180
4181 /*
4182 * Clear the interrupt mask to ensure the device cannot assert its
4183 * interrupt line.
4184 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4185 * any currently pending or shared interrupt.
4186 */
4187 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4188 sc->sc_icr = 0;
4189
4190 /* Release any queued transmit buffers. */
4191 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4192 txs = &sc->sc_txsoft[i];
4193 if (txs->txs_mbuf != NULL) {
4194 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4195 m_freem(txs->txs_mbuf);
4196 txs->txs_mbuf = NULL;
4197 }
4198 }
4199
4200 /* Mark the interface as down and cancel the watchdog timer. */
4201 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4202 ifp->if_timer = 0;
4203
4204 if (disable)
4205 wm_rxdrain(sc);
4206
4207 #if 0 /* notyet */
4208 if (sc->sc_type >= WM_T_82544)
4209 CSR_WRITE(sc, WMREG_WUC, 0);
4210 #endif
4211 }
4212
4213 void
4214 wm_get_auto_rd_done(struct wm_softc *sc)
4215 {
4216 int i;
4217
4218 /* wait for eeprom to reload */
4219 switch (sc->sc_type) {
4220 case WM_T_82571:
4221 case WM_T_82572:
4222 case WM_T_82573:
4223 case WM_T_82574:
4224 case WM_T_82583:
4225 case WM_T_82575:
4226 case WM_T_82576:
4227 case WM_T_80003:
4228 case WM_T_ICH8:
4229 case WM_T_ICH9:
4230 for (i = 0; i < 10; i++) {
4231 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4232 break;
4233 delay(1000);
4234 }
4235 if (i == 10) {
4236 log(LOG_ERR, "%s: auto read from eeprom failed to "
4237 "complete\n", device_xname(sc->sc_dev));
4238 }
4239 break;
4240 default:
4241 break;
4242 }
4243 }
4244
4245 void
4246 wm_lan_init_done(struct wm_softc *sc)
4247 {
4248 uint32_t reg = 0;
4249 int i;
4250
4251 /* wait for eeprom to reload */
4252 switch (sc->sc_type) {
4253 case WM_T_ICH10:
4254 case WM_T_PCH:
4255 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4256 reg = CSR_READ(sc, WMREG_STATUS);
4257 if ((reg & STATUS_LAN_INIT_DONE) != 0)
4258 break;
4259 delay(100);
4260 }
4261 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4262 log(LOG_ERR, "%s: %s: lan_init_done failed to "
4263 "complete\n", device_xname(sc->sc_dev), __func__);
4264 }
4265 break;
4266 default:
4267 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4268 __func__);
4269 break;
4270 }
4271
4272 reg &= ~STATUS_LAN_INIT_DONE;
4273 CSR_WRITE(sc, WMREG_STATUS, reg);
4274 }
4275
4276 void
4277 wm_get_cfg_done(struct wm_softc *sc)
4278 {
4279 int mask;
4280 uint32_t reg;
4281 int i;
4282
4283 /* wait for eeprom to reload */
4284 switch (sc->sc_type) {
4285 case WM_T_82542_2_0:
4286 case WM_T_82542_2_1:
4287 /* null */
4288 break;
4289 case WM_T_82543:
4290 case WM_T_82544:
4291 case WM_T_82540:
4292 case WM_T_82545:
4293 case WM_T_82545_3:
4294 case WM_T_82546:
4295 case WM_T_82546_3:
4296 case WM_T_82541:
4297 case WM_T_82541_2:
4298 case WM_T_82547:
4299 case WM_T_82547_2:
4300 case WM_T_82573:
4301 case WM_T_82574:
4302 case WM_T_82583:
4303 /* generic */
4304 delay(10*1000);
4305 break;
4306 case WM_T_80003:
4307 case WM_T_82571:
4308 case WM_T_82572:
4309 case WM_T_82575:
4310 case WM_T_82576:
4311 case WM_T_82580:
4312 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4313 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4314 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4315 break;
4316 delay(1000);
4317 }
4318 if (i >= WM_PHY_CFG_TIMEOUT) {
4319 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
4320 device_xname(sc->sc_dev), __func__));
4321 }
4322 break;
4323 case WM_T_ICH8:
4324 case WM_T_ICH9:
4325 case WM_T_ICH10:
4326 case WM_T_PCH:
4327 if (sc->sc_type >= WM_T_PCH) {
4328 reg = CSR_READ(sc, WMREG_STATUS);
4329 if ((reg & STATUS_PHYRA) != 0)
4330 CSR_WRITE(sc, WMREG_STATUS,
4331 reg & ~STATUS_PHYRA);
4332 }
4333 delay(10*1000);
4334 break;
4335 default:
4336 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4337 __func__);
4338 break;
4339 }
4340 }
4341
4342 /*
4343 * wm_acquire_eeprom:
4344 *
4345 * Perform the EEPROM handshake required on some chips.
4346 */
4347 static int
4348 wm_acquire_eeprom(struct wm_softc *sc)
4349 {
4350 uint32_t reg;
4351 int x;
4352 int ret = 0;
4353
4354 /* always success */
4355 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4356 return 0;
4357
4358 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
4359 ret = wm_get_swfwhw_semaphore(sc);
4360 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
4361 /* this will also do wm_get_swsm_semaphore() if needed */
4362 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
4363 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4364 ret = wm_get_swsm_semaphore(sc);
4365 }
4366
4367 if (ret) {
4368 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4369 __func__);
4370 return 1;
4371 }
4372
4373 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4374 reg = CSR_READ(sc, WMREG_EECD);
4375
4376 /* Request EEPROM access. */
4377 reg |= EECD_EE_REQ;
4378 CSR_WRITE(sc, WMREG_EECD, reg);
4379
4380 /* ..and wait for it to be granted. */
4381 for (x = 0; x < 1000; x++) {
4382 reg = CSR_READ(sc, WMREG_EECD);
4383 if (reg & EECD_EE_GNT)
4384 break;
4385 delay(5);
4386 }
4387 if ((reg & EECD_EE_GNT) == 0) {
4388 aprint_error_dev(sc->sc_dev,
4389 "could not acquire EEPROM GNT\n");
4390 reg &= ~EECD_EE_REQ;
4391 CSR_WRITE(sc, WMREG_EECD, reg);
4392 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4393 wm_put_swfwhw_semaphore(sc);
4394 if (sc->sc_flags & WM_F_SWFW_SYNC)
4395 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4396 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4397 wm_put_swsm_semaphore(sc);
4398 return 1;
4399 }
4400 }
4401
4402 return 0;
4403 }
4404
4405 /*
4406 * wm_release_eeprom:
4407 *
4408 * Release the EEPROM mutex.
4409 */
4410 static void
4411 wm_release_eeprom(struct wm_softc *sc)
4412 {
4413 uint32_t reg;
4414
4415 /* always success */
4416 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4417 return;
4418
4419 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4420 reg = CSR_READ(sc, WMREG_EECD);
4421 reg &= ~EECD_EE_REQ;
4422 CSR_WRITE(sc, WMREG_EECD, reg);
4423 }
4424
4425 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4426 wm_put_swfwhw_semaphore(sc);
4427 if (sc->sc_flags & WM_F_SWFW_SYNC)
4428 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4429 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4430 wm_put_swsm_semaphore(sc);
4431 }
4432
4433 /*
4434 * wm_eeprom_sendbits:
4435 *
4436 * Send a series of bits to the EEPROM.
4437 */
4438 static void
4439 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
4440 {
4441 uint32_t reg;
4442 int x;
4443
4444 reg = CSR_READ(sc, WMREG_EECD);
4445
4446 for (x = nbits; x > 0; x--) {
4447 if (bits & (1U << (x - 1)))
4448 reg |= EECD_DI;
4449 else
4450 reg &= ~EECD_DI;
4451 CSR_WRITE(sc, WMREG_EECD, reg);
4452 delay(2);
4453 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4454 delay(2);
4455 CSR_WRITE(sc, WMREG_EECD, reg);
4456 delay(2);
4457 }
4458 }
4459
4460 /*
4461 * wm_eeprom_recvbits:
4462 *
4463 * Receive a series of bits from the EEPROM.
4464 */
4465 static void
4466 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
4467 {
4468 uint32_t reg, val;
4469 int x;
4470
4471 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
4472
4473 val = 0;
4474 for (x = nbits; x > 0; x--) {
4475 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4476 delay(2);
4477 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
4478 val |= (1U << (x - 1));
4479 CSR_WRITE(sc, WMREG_EECD, reg);
4480 delay(2);
4481 }
4482 *valp = val;
4483 }
4484
4485 /*
4486 * wm_read_eeprom_uwire:
4487 *
4488 * Read a word from the EEPROM using the MicroWire protocol.
4489 */
4490 static int
4491 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4492 {
4493 uint32_t reg, val;
4494 int i;
4495
4496 for (i = 0; i < wordcnt; i++) {
4497 /* Clear SK and DI. */
4498 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
4499 CSR_WRITE(sc, WMREG_EECD, reg);
4500
4501 /* Set CHIP SELECT. */
4502 reg |= EECD_CS;
4503 CSR_WRITE(sc, WMREG_EECD, reg);
4504 delay(2);
4505
4506 /* Shift in the READ command. */
4507 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
4508
4509 /* Shift in address. */
4510 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
4511
4512 /* Shift out the data. */
4513 wm_eeprom_recvbits(sc, &val, 16);
4514 data[i] = val & 0xffff;
4515
4516 /* Clear CHIP SELECT. */
4517 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
4518 CSR_WRITE(sc, WMREG_EECD, reg);
4519 delay(2);
4520 }
4521
4522 return 0;
4523 }
4524
4525 /*
4526 * wm_spi_eeprom_ready:
4527 *
4528 * Wait for a SPI EEPROM to be ready for commands.
4529 */
4530 static int
4531 wm_spi_eeprom_ready(struct wm_softc *sc)
4532 {
4533 uint32_t val;
4534 int usec;
4535
4536 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
4537 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
4538 wm_eeprom_recvbits(sc, &val, 8);
4539 if ((val & SPI_SR_RDY) == 0)
4540 break;
4541 }
4542 if (usec >= SPI_MAX_RETRIES) {
4543 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
4544 return 1;
4545 }
4546 return 0;
4547 }
4548
4549 /*
4550 * wm_read_eeprom_spi:
4551 *
4552 * Read a work from the EEPROM using the SPI protocol.
4553 */
4554 static int
4555 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4556 {
4557 uint32_t reg, val;
4558 int i;
4559 uint8_t opc;
4560
4561 /* Clear SK and CS. */
4562 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
4563 CSR_WRITE(sc, WMREG_EECD, reg);
4564 delay(2);
4565
4566 if (wm_spi_eeprom_ready(sc))
4567 return 1;
4568
4569 /* Toggle CS to flush commands. */
4570 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
4571 delay(2);
4572 CSR_WRITE(sc, WMREG_EECD, reg);
4573 delay(2);
4574
4575 opc = SPI_OPC_READ;
4576 if (sc->sc_ee_addrbits == 8 && word >= 128)
4577 opc |= SPI_OPC_A8;
4578
4579 wm_eeprom_sendbits(sc, opc, 8);
4580 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
4581
4582 for (i = 0; i < wordcnt; i++) {
4583 wm_eeprom_recvbits(sc, &val, 16);
4584 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
4585 }
4586
4587 /* Raise CS and clear SK. */
4588 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
4589 CSR_WRITE(sc, WMREG_EECD, reg);
4590 delay(2);
4591
4592 return 0;
4593 }
4594
4595 #define EEPROM_CHECKSUM 0xBABA
4596 #define EEPROM_SIZE 0x0040
4597
4598 /*
4599 * wm_validate_eeprom_checksum
4600 *
4601 * The checksum is defined as the sum of the first 64 (16 bit) words.
4602 */
4603 static int
4604 wm_validate_eeprom_checksum(struct wm_softc *sc)
4605 {
4606 uint16_t checksum;
4607 uint16_t eeprom_data;
4608 int i;
4609
4610 checksum = 0;
4611
4612 for (i = 0; i < EEPROM_SIZE; i++) {
4613 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
4614 return 1;
4615 checksum += eeprom_data;
4616 }
4617
4618 if (checksum != (uint16_t) EEPROM_CHECKSUM)
4619 return 1;
4620
4621 return 0;
4622 }
4623
4624 /*
4625 * wm_read_eeprom:
4626 *
4627 * Read data from the serial EEPROM.
4628 */
4629 static int
4630 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4631 {
4632 int rv;
4633
4634 if (sc->sc_flags & WM_F_EEPROM_INVALID)
4635 return 1;
4636
4637 if (wm_acquire_eeprom(sc))
4638 return 1;
4639
4640 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4641 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4642 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
4643 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
4644 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
4645 else if (sc->sc_flags & WM_F_EEPROM_SPI)
4646 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
4647 else
4648 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
4649
4650 wm_release_eeprom(sc);
4651 return rv;
4652 }
4653
4654 static int
4655 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
4656 uint16_t *data)
4657 {
4658 int i, eerd = 0;
4659 int error = 0;
4660
4661 for (i = 0; i < wordcnt; i++) {
4662 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
4663
4664 CSR_WRITE(sc, WMREG_EERD, eerd);
4665 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
4666 if (error != 0)
4667 break;
4668
4669 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
4670 }
4671
4672 return error;
4673 }
4674
4675 static int
4676 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
4677 {
4678 uint32_t attempts = 100000;
4679 uint32_t i, reg = 0;
4680 int32_t done = -1;
4681
4682 for (i = 0; i < attempts; i++) {
4683 reg = CSR_READ(sc, rw);
4684
4685 if (reg & EERD_DONE) {
4686 done = 0;
4687 break;
4688 }
4689 delay(5);
4690 }
4691
4692 return done;
4693 }
4694
4695 /*
4696 * wm_add_rxbuf:
4697 *
4698 * Add a receive buffer to the indiciated descriptor.
4699 */
4700 static int
4701 wm_add_rxbuf(struct wm_softc *sc, int idx)
4702 {
4703 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
4704 struct mbuf *m;
4705 int error;
4706
4707 MGETHDR(m, M_DONTWAIT, MT_DATA);
4708 if (m == NULL)
4709 return ENOBUFS;
4710
4711 MCLGET(m, M_DONTWAIT);
4712 if ((m->m_flags & M_EXT) == 0) {
4713 m_freem(m);
4714 return ENOBUFS;
4715 }
4716
4717 if (rxs->rxs_mbuf != NULL)
4718 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4719
4720 rxs->rxs_mbuf = m;
4721
4722 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4723 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4724 BUS_DMA_READ|BUS_DMA_NOWAIT);
4725 if (error) {
4726 /* XXX XXX XXX */
4727 aprint_error_dev(sc->sc_dev,
4728 "unable to load rx DMA map %d, error = %d\n",
4729 idx, error);
4730 panic("wm_add_rxbuf");
4731 }
4732
4733 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4734 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4735
4736 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4737 if ((sc->sc_rctl & RCTL_EN) != 0)
4738 WM_INIT_RXDESC(sc, idx);
4739 } else
4740 WM_INIT_RXDESC(sc, idx);
4741
4742 return 0;
4743 }
4744
4745 /*
4746 * wm_set_ral:
4747 *
4748 * Set an entery in the receive address list.
4749 */
4750 static void
4751 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4752 {
4753 uint32_t ral_lo, ral_hi;
4754
4755 if (enaddr != NULL) {
4756 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
4757 (enaddr[3] << 24);
4758 ral_hi = enaddr[4] | (enaddr[5] << 8);
4759 ral_hi |= RAL_AV;
4760 } else {
4761 ral_lo = 0;
4762 ral_hi = 0;
4763 }
4764
4765 if (sc->sc_type >= WM_T_82544) {
4766 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
4767 ral_lo);
4768 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
4769 ral_hi);
4770 } else {
4771 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
4772 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
4773 }
4774 }
4775
4776 /*
4777 * wm_mchash:
4778 *
4779 * Compute the hash of the multicast address for the 4096-bit
4780 * multicast filter.
4781 */
4782 static uint32_t
4783 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4784 {
4785 static const int lo_shift[4] = { 4, 3, 2, 0 };
4786 static const int hi_shift[4] = { 4, 5, 6, 8 };
4787 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4788 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4789 uint32_t hash;
4790
4791 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4792 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)) {
4793 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4794 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4795 return (hash & 0x3ff);
4796 }
4797 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4798 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4799
4800 return (hash & 0xfff);
4801 }
4802
4803 /*
4804 * wm_set_filter:
4805 *
4806 * Set up the receive filter.
4807 */
4808 static void
4809 wm_set_filter(struct wm_softc *sc)
4810 {
4811 struct ethercom *ec = &sc->sc_ethercom;
4812 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4813 struct ether_multi *enm;
4814 struct ether_multistep step;
4815 bus_addr_t mta_reg;
4816 uint32_t hash, reg, bit;
4817 int i, size;
4818
4819 if (sc->sc_type >= WM_T_82544)
4820 mta_reg = WMREG_CORDOVA_MTA;
4821 else
4822 mta_reg = WMREG_MTA;
4823
4824 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4825
4826 if (ifp->if_flags & IFF_BROADCAST)
4827 sc->sc_rctl |= RCTL_BAM;
4828 if (ifp->if_flags & IFF_PROMISC) {
4829 sc->sc_rctl |= RCTL_UPE;
4830 goto allmulti;
4831 }
4832
4833 /*
4834 * Set the station address in the first RAL slot, and
4835 * clear the remaining slots.
4836 */
4837 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4838 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4839 size = WM_ICH8_RAL_TABSIZE;
4840 else
4841 size = WM_RAL_TABSIZE;
4842 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4843 for (i = 1; i < size; i++)
4844 wm_set_ral(sc, NULL, i);
4845
4846 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4847 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4848 size = WM_ICH8_MC_TABSIZE;
4849 else
4850 size = WM_MC_TABSIZE;
4851 /* Clear out the multicast table. */
4852 for (i = 0; i < size; i++)
4853 CSR_WRITE(sc, mta_reg + (i << 2), 0);
4854
4855 ETHER_FIRST_MULTI(step, ec, enm);
4856 while (enm != NULL) {
4857 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4858 /*
4859 * We must listen to a range of multicast addresses.
4860 * For now, just accept all multicasts, rather than
4861 * trying to set only those filter bits needed to match
4862 * the range. (At this time, the only use of address
4863 * ranges is for IP multicast routing, for which the
4864 * range is big enough to require all bits set.)
4865 */
4866 goto allmulti;
4867 }
4868
4869 hash = wm_mchash(sc, enm->enm_addrlo);
4870
4871 reg = (hash >> 5);
4872 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4873 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4874 reg &= 0x1f;
4875 else
4876 reg &= 0x7f;
4877 bit = hash & 0x1f;
4878
4879 hash = CSR_READ(sc, mta_reg + (reg << 2));
4880 hash |= 1U << bit;
4881
4882 /* XXX Hardware bug?? */
4883 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
4884 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4885 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4886 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4887 } else
4888 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4889
4890 ETHER_NEXT_MULTI(step, enm);
4891 }
4892
4893 ifp->if_flags &= ~IFF_ALLMULTI;
4894 goto setit;
4895
4896 allmulti:
4897 ifp->if_flags |= IFF_ALLMULTI;
4898 sc->sc_rctl |= RCTL_MPE;
4899
4900 setit:
4901 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4902 }
4903
4904 /*
4905 * wm_tbi_mediainit:
4906 *
4907 * Initialize media for use on 1000BASE-X devices.
4908 */
4909 static void
4910 wm_tbi_mediainit(struct wm_softc *sc)
4911 {
4912 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4913 const char *sep = "";
4914
4915 if (sc->sc_type < WM_T_82543)
4916 sc->sc_tipg = TIPG_WM_DFLT;
4917 else
4918 sc->sc_tipg = TIPG_LG_DFLT;
4919
4920 sc->sc_tbi_anegticks = 5;
4921
4922 /* Initialize our media structures */
4923 sc->sc_mii.mii_ifp = ifp;
4924
4925 sc->sc_ethercom.ec_mii = &sc->sc_mii;
4926 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
4927 wm_tbi_mediastatus);
4928
4929 /*
4930 * SWD Pins:
4931 *
4932 * 0 = Link LED (output)
4933 * 1 = Loss Of Signal (input)
4934 */
4935 sc->sc_ctrl |= CTRL_SWDPIO(0);
4936 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
4937
4938 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4939
4940 #define ADD(ss, mm, dd) \
4941 do { \
4942 aprint_normal("%s%s", sep, ss); \
4943 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
4944 sep = ", "; \
4945 } while (/*CONSTCOND*/0)
4946
4947 aprint_normal_dev(sc->sc_dev, "");
4948 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
4949 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
4950 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
4951 aprint_normal("\n");
4952
4953 #undef ADD
4954
4955 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
4956 }
4957
4958 /*
4959 * wm_tbi_mediastatus: [ifmedia interface function]
4960 *
4961 * Get the current interface media status on a 1000BASE-X device.
4962 */
4963 static void
4964 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4965 {
4966 struct wm_softc *sc = ifp->if_softc;
4967 uint32_t ctrl, status;
4968
4969 ifmr->ifm_status = IFM_AVALID;
4970 ifmr->ifm_active = IFM_ETHER;
4971
4972 status = CSR_READ(sc, WMREG_STATUS);
4973 if ((status & STATUS_LU) == 0) {
4974 ifmr->ifm_active |= IFM_NONE;
4975 return;
4976 }
4977
4978 ifmr->ifm_status |= IFM_ACTIVE;
4979 ifmr->ifm_active |= IFM_1000_SX;
4980 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
4981 ifmr->ifm_active |= IFM_FDX;
4982 ctrl = CSR_READ(sc, WMREG_CTRL);
4983 if (ctrl & CTRL_RFCE)
4984 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
4985 if (ctrl & CTRL_TFCE)
4986 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
4987 }
4988
4989 /*
4990 * wm_tbi_mediachange: [ifmedia interface function]
4991 *
4992 * Set hardware to newly-selected media on a 1000BASE-X device.
4993 */
4994 static int
4995 wm_tbi_mediachange(struct ifnet *ifp)
4996 {
4997 struct wm_softc *sc = ifp->if_softc;
4998 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4999 uint32_t status;
5000 int i;
5001
5002 sc->sc_txcw = 0;
5003 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
5004 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
5005 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
5006 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5007 sc->sc_txcw |= TXCW_ANE;
5008 } else {
5009 /*
5010 * If autonegotiation is turned off, force link up and turn on
5011 * full duplex
5012 */
5013 sc->sc_txcw &= ~TXCW_ANE;
5014 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
5015 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5016 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5017 delay(1000);
5018 }
5019
5020 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
5021 device_xname(sc->sc_dev),sc->sc_txcw));
5022 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5023 delay(10000);
5024
5025 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
5026 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
5027
5028 /*
5029 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
5030 * optics detect a signal, 0 if they don't.
5031 */
5032 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
5033 /* Have signal; wait for the link to come up. */
5034
5035 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5036 /*
5037 * Reset the link, and let autonegotiation do its thing
5038 */
5039 sc->sc_ctrl |= CTRL_LRST;
5040 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5041 delay(1000);
5042 sc->sc_ctrl &= ~CTRL_LRST;
5043 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5044 delay(1000);
5045 }
5046
5047 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
5048 delay(10000);
5049 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
5050 break;
5051 }
5052
5053 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
5054 device_xname(sc->sc_dev),i));
5055
5056 status = CSR_READ(sc, WMREG_STATUS);
5057 DPRINTF(WM_DEBUG_LINK,
5058 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
5059 device_xname(sc->sc_dev),status, STATUS_LU));
5060 if (status & STATUS_LU) {
5061 /* Link is up. */
5062 DPRINTF(WM_DEBUG_LINK,
5063 ("%s: LINK: set media -> link up %s\n",
5064 device_xname(sc->sc_dev),
5065 (status & STATUS_FD) ? "FDX" : "HDX"));
5066
5067 /*
5068 * NOTE: CTRL will update TFCE and RFCE automatically,
5069 * so we should update sc->sc_ctrl
5070 */
5071 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5072 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5073 sc->sc_fcrtl &= ~FCRTL_XONE;
5074 if (status & STATUS_FD)
5075 sc->sc_tctl |=
5076 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5077 else
5078 sc->sc_tctl |=
5079 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5080 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
5081 sc->sc_fcrtl |= FCRTL_XONE;
5082 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5083 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5084 WMREG_OLD_FCRTL : WMREG_FCRTL,
5085 sc->sc_fcrtl);
5086 sc->sc_tbi_linkup = 1;
5087 } else {
5088 if (i == WM_LINKUP_TIMEOUT)
5089 wm_check_for_link(sc);
5090 /* Link is down. */
5091 DPRINTF(WM_DEBUG_LINK,
5092 ("%s: LINK: set media -> link down\n",
5093 device_xname(sc->sc_dev)));
5094 sc->sc_tbi_linkup = 0;
5095 }
5096 } else {
5097 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
5098 device_xname(sc->sc_dev)));
5099 sc->sc_tbi_linkup = 0;
5100 }
5101
5102 wm_tbi_set_linkled(sc);
5103
5104 return 0;
5105 }
5106
5107 /*
5108 * wm_tbi_set_linkled:
5109 *
5110 * Update the link LED on 1000BASE-X devices.
5111 */
5112 static void
5113 wm_tbi_set_linkled(struct wm_softc *sc)
5114 {
5115
5116 if (sc->sc_tbi_linkup)
5117 sc->sc_ctrl |= CTRL_SWDPIN(0);
5118 else
5119 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
5120
5121 /* 82540 or newer devices are active low */
5122 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
5123
5124 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5125 }
5126
5127 /*
5128 * wm_tbi_check_link:
5129 *
5130 * Check the link on 1000BASE-X devices.
5131 */
5132 static void
5133 wm_tbi_check_link(struct wm_softc *sc)
5134 {
5135 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5136 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5137 uint32_t rxcw, ctrl, status;
5138
5139 status = CSR_READ(sc, WMREG_STATUS);
5140
5141 rxcw = CSR_READ(sc, WMREG_RXCW);
5142 ctrl = CSR_READ(sc, WMREG_CTRL);
5143
5144 /* set link status */
5145 if ((status & STATUS_LU) == 0) {
5146 DPRINTF(WM_DEBUG_LINK,
5147 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
5148 sc->sc_tbi_linkup = 0;
5149 } else if (sc->sc_tbi_linkup == 0) {
5150 DPRINTF(WM_DEBUG_LINK,
5151 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
5152 (status & STATUS_FD) ? "FDX" : "HDX"));
5153 sc->sc_tbi_linkup = 1;
5154 }
5155
5156 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
5157 && ((status & STATUS_LU) == 0)) {
5158 sc->sc_tbi_linkup = 0;
5159 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
5160 /* RXCFG storm! */
5161 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
5162 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
5163 wm_init(ifp);
5164 wm_start(ifp);
5165 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5166 /* If the timer expired, retry autonegotiation */
5167 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
5168 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
5169 sc->sc_tbi_ticks = 0;
5170 /*
5171 * Reset the link, and let autonegotiation do
5172 * its thing
5173 */
5174 sc->sc_ctrl |= CTRL_LRST;
5175 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5176 delay(1000);
5177 sc->sc_ctrl &= ~CTRL_LRST;
5178 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5179 delay(1000);
5180 CSR_WRITE(sc, WMREG_TXCW,
5181 sc->sc_txcw & ~TXCW_ANE);
5182 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5183 }
5184 }
5185 }
5186
5187 wm_tbi_set_linkled(sc);
5188 }
5189
5190 /*
5191 * wm_gmii_reset:
5192 *
5193 * Reset the PHY.
5194 */
5195 static void
5196 wm_gmii_reset(struct wm_softc *sc)
5197 {
5198 uint32_t reg;
5199 int rv;
5200
5201 /* get phy semaphore */
5202 switch (sc->sc_type) {
5203 case WM_T_82571:
5204 case WM_T_82572:
5205 case WM_T_82573:
5206 case WM_T_82574:
5207 case WM_T_82583:
5208 /* XXX should get sw semaphore, too */
5209 rv = wm_get_swsm_semaphore(sc);
5210 break;
5211 case WM_T_82575:
5212 case WM_T_82576:
5213 case WM_T_82580:
5214 case WM_T_82580ER:
5215 case WM_T_80003:
5216 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5217 break;
5218 case WM_T_ICH8:
5219 case WM_T_ICH9:
5220 case WM_T_ICH10:
5221 case WM_T_PCH:
5222 rv = wm_get_swfwhw_semaphore(sc);
5223 break;
5224 default:
5225 /* nothing to do*/
5226 rv = 0;
5227 break;
5228 }
5229 if (rv != 0) {
5230 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5231 __func__);
5232 return;
5233 }
5234
5235 switch (sc->sc_type) {
5236 case WM_T_82542_2_0:
5237 case WM_T_82542_2_1:
5238 /* null */
5239 break;
5240 case WM_T_82543:
5241 /*
5242 * With 82543, we need to force speed and duplex on the MAC
5243 * equal to what the PHY speed and duplex configuration is.
5244 * In addition, we need to perform a hardware reset on the PHY
5245 * to take it out of reset.
5246 */
5247 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5248 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5249
5250 /* The PHY reset pin is active-low. */
5251 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5252 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
5253 CTRL_EXT_SWDPIN(4));
5254 reg |= CTRL_EXT_SWDPIO(4);
5255
5256 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5257 delay(10*1000);
5258
5259 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
5260 delay(150);
5261 #if 0
5262 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
5263 #endif
5264 delay(20*1000); /* XXX extra delay to get PHY ID? */
5265 break;
5266 case WM_T_82544: /* reset 10000us */
5267 case WM_T_82540:
5268 case WM_T_82545:
5269 case WM_T_82545_3:
5270 case WM_T_82546:
5271 case WM_T_82546_3:
5272 case WM_T_82541:
5273 case WM_T_82541_2:
5274 case WM_T_82547:
5275 case WM_T_82547_2:
5276 case WM_T_82571: /* reset 100us */
5277 case WM_T_82572:
5278 case WM_T_82573:
5279 case WM_T_82574:
5280 case WM_T_82575:
5281 case WM_T_82576:
5282 case WM_T_82580:
5283 case WM_T_82580ER:
5284 case WM_T_82583:
5285 case WM_T_80003:
5286 /* generic reset */
5287 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5288 delay((sc->sc_type >= WM_T_82571) ? 100 : 10*1000);
5289 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5290 delay(150);
5291
5292 if ((sc->sc_type == WM_T_82541)
5293 || (sc->sc_type == WM_T_82541_2)
5294 || (sc->sc_type == WM_T_82547)
5295 || (sc->sc_type == WM_T_82547_2)) {
5296 /* workaround for igp are done in igp_reset() */
5297 /* XXX add code to set LED after phy reset */
5298 }
5299 break;
5300 case WM_T_ICH8:
5301 case WM_T_ICH9:
5302 case WM_T_ICH10:
5303 case WM_T_PCH:
5304 /* generic reset */
5305 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5306 delay(100);
5307 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5308 delay(150);
5309 break;
5310 default:
5311 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5312 __func__);
5313 break;
5314 }
5315
5316 /* release PHY semaphore */
5317 switch (sc->sc_type) {
5318 case WM_T_82571:
5319 case WM_T_82572:
5320 case WM_T_82573:
5321 case WM_T_82574:
5322 case WM_T_82583:
5323 /* XXX sould put sw semaphore, too */
5324 wm_put_swsm_semaphore(sc);
5325 break;
5326 case WM_T_82575:
5327 case WM_T_82576:
5328 case WM_T_82580:
5329 case WM_T_82580ER:
5330 case WM_T_80003:
5331 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5332 break;
5333 case WM_T_ICH8:
5334 case WM_T_ICH9:
5335 case WM_T_ICH10:
5336 case WM_T_PCH:
5337 wm_put_swfwhw_semaphore(sc);
5338 break;
5339 default:
5340 /* nothing to do*/
5341 rv = 0;
5342 break;
5343 }
5344
5345 /* get_cfg_done */
5346 wm_get_cfg_done(sc);
5347
5348 /* extra setup */
5349 switch (sc->sc_type) {
5350 case WM_T_82542_2_0:
5351 case WM_T_82542_2_1:
5352 case WM_T_82543:
5353 case WM_T_82544:
5354 case WM_T_82540:
5355 case WM_T_82545:
5356 case WM_T_82545_3:
5357 case WM_T_82546:
5358 case WM_T_82546_3:
5359 case WM_T_82541_2:
5360 case WM_T_82547_2:
5361 case WM_T_82571:
5362 case WM_T_82572:
5363 case WM_T_82573:
5364 case WM_T_82574:
5365 case WM_T_82575:
5366 case WM_T_82576:
5367 case WM_T_82580:
5368 case WM_T_82580ER:
5369 case WM_T_82583:
5370 case WM_T_80003:
5371 /* null */
5372 break;
5373 case WM_T_82541:
5374 case WM_T_82547:
5375 /* XXX Configure actively LED after PHY reset */
5376 break;
5377 case WM_T_ICH8:
5378 case WM_T_ICH9:
5379 case WM_T_ICH10:
5380 case WM_T_PCH:
5381 /* Allow time for h/w to get to a quiescent state afer reset */
5382 delay(10*1000);
5383
5384 if (sc->sc_type == WM_T_PCH) {
5385 wm_hv_phy_workaround_ich8lan(sc);
5386
5387 /*
5388 * dummy read to clear the phy wakeup bit after lcd
5389 * reset
5390 */
5391 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
5392 }
5393
5394 /*
5395 * XXX Configure the LCD with th extended configuration region
5396 * in NVM
5397 */
5398
5399 /* Configure the LCD with the OEM bits in NVM */
5400 if (sc->sc_type == WM_T_PCH) {
5401 /*
5402 * Disable LPLU.
5403 * XXX It seems that 82567 has LPLU, too.
5404 */
5405 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
5406 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
5407 reg |= HV_OEM_BITS_ANEGNOW;
5408 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
5409 }
5410 break;
5411 default:
5412 panic("%s: unknown type\n", __func__);
5413 break;
5414 }
5415 }
5416
5417 /*
5418 * wm_gmii_mediainit:
5419 *
5420 * Initialize media for use on 1000BASE-T devices.
5421 */
5422 static void
5423 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
5424 {
5425 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5426
5427 /* We have MII. */
5428 sc->sc_flags |= WM_F_HAS_MII;
5429
5430 if (sc->sc_type == WM_T_80003)
5431 sc->sc_tipg = TIPG_1000T_80003_DFLT;
5432 else
5433 sc->sc_tipg = TIPG_1000T_DFLT;
5434
5435 /*
5436 * Let the chip set speed/duplex on its own based on
5437 * signals from the PHY.
5438 * XXXbouyer - I'm not sure this is right for the 80003,
5439 * the em driver only sets CTRL_SLU here - but it seems to work.
5440 */
5441 sc->sc_ctrl |= CTRL_SLU;
5442 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5443
5444 /* Initialize our media structures and probe the GMII. */
5445 sc->sc_mii.mii_ifp = ifp;
5446
5447 switch (prodid) {
5448 case PCI_PRODUCT_INTEL_PCH_M_LM:
5449 case PCI_PRODUCT_INTEL_PCH_M_LC:
5450 /* 82577 */
5451 sc->sc_phytype = WMPHY_82577;
5452 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5453 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5454 break;
5455 case PCI_PRODUCT_INTEL_PCH_D_DM:
5456 case PCI_PRODUCT_INTEL_PCH_D_DC:
5457 /* 82578 */
5458 sc->sc_phytype = WMPHY_82578;
5459 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5460 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5461 break;
5462 case PCI_PRODUCT_INTEL_82801I_BM:
5463 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
5464 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
5465 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
5466 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
5467 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
5468 /* 82567 */
5469 sc->sc_phytype = WMPHY_BM;
5470 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5471 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5472 break;
5473 default:
5474 if ((sc->sc_flags & WM_F_SGMII) != 0) {
5475 sc->sc_mii.mii_readreg = wm_sgmii_readreg;
5476 sc->sc_mii.mii_writereg = wm_sgmii_writereg;
5477 } else if (sc->sc_type >= WM_T_80003) {
5478 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
5479 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
5480 } else if (sc->sc_type >= WM_T_82544) {
5481 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
5482 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
5483 } else {
5484 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
5485 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
5486 }
5487 break;
5488 }
5489 sc->sc_mii.mii_statchg = wm_gmii_statchg;
5490
5491 wm_gmii_reset(sc);
5492
5493 sc->sc_ethercom.ec_mii = &sc->sc_mii;
5494 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
5495 wm_gmii_mediastatus);
5496
5497 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5498 MII_OFFSET_ANY, MIIF_DOPAUSE);
5499
5500 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5501 /* if failed, retry with *_bm_* */
5502 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5503 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5504
5505 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5506 MII_OFFSET_ANY, MIIF_DOPAUSE);
5507 }
5508 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5509 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
5510 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
5511 sc->sc_phytype = WMPHY_NONE;
5512 } else {
5513 /* Check PHY type */
5514 uint32_t model;
5515 struct mii_softc *child;
5516
5517 child = LIST_FIRST(&sc->sc_mii.mii_phys);
5518 if (device_is_a(child->mii_dev, "igphy")) {
5519 struct igphy_softc *isc = (struct igphy_softc *)child;
5520
5521 model = isc->sc_mii.mii_mpd_model;
5522 if (model == MII_MODEL_yyINTEL_I82566)
5523 sc->sc_phytype = WMPHY_IGP_3;
5524 }
5525
5526 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5527 }
5528 }
5529
5530 /*
5531 * wm_gmii_mediastatus: [ifmedia interface function]
5532 *
5533 * Get the current interface media status on a 1000BASE-T device.
5534 */
5535 static void
5536 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5537 {
5538 struct wm_softc *sc = ifp->if_softc;
5539
5540 ether_mediastatus(ifp, ifmr);
5541 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
5542 | sc->sc_flowflags;
5543 }
5544
5545 /*
5546 * wm_gmii_mediachange: [ifmedia interface function]
5547 *
5548 * Set hardware to newly-selected media on a 1000BASE-T device.
5549 */
5550 static int
5551 wm_gmii_mediachange(struct ifnet *ifp)
5552 {
5553 struct wm_softc *sc = ifp->if_softc;
5554 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5555 int rc;
5556
5557 if ((ifp->if_flags & IFF_UP) == 0)
5558 return 0;
5559
5560 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5561 sc->sc_ctrl |= CTRL_SLU;
5562 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
5563 || (sc->sc_type > WM_T_82543)) {
5564 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
5565 } else {
5566 sc->sc_ctrl &= ~CTRL_ASDE;
5567 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5568 if (ife->ifm_media & IFM_FDX)
5569 sc->sc_ctrl |= CTRL_FD;
5570 switch (IFM_SUBTYPE(ife->ifm_media)) {
5571 case IFM_10_T:
5572 sc->sc_ctrl |= CTRL_SPEED_10;
5573 break;
5574 case IFM_100_TX:
5575 sc->sc_ctrl |= CTRL_SPEED_100;
5576 break;
5577 case IFM_1000_T:
5578 sc->sc_ctrl |= CTRL_SPEED_1000;
5579 break;
5580 default:
5581 panic("wm_gmii_mediachange: bad media 0x%x",
5582 ife->ifm_media);
5583 }
5584 }
5585 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5586 if (sc->sc_type <= WM_T_82543)
5587 wm_gmii_reset(sc);
5588
5589 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
5590 return 0;
5591 return rc;
5592 }
5593
5594 #define MDI_IO CTRL_SWDPIN(2)
5595 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
5596 #define MDI_CLK CTRL_SWDPIN(3)
5597
5598 static void
5599 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
5600 {
5601 uint32_t i, v;
5602
5603 v = CSR_READ(sc, WMREG_CTRL);
5604 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5605 v |= MDI_DIR | CTRL_SWDPIO(3);
5606
5607 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
5608 if (data & i)
5609 v |= MDI_IO;
5610 else
5611 v &= ~MDI_IO;
5612 CSR_WRITE(sc, WMREG_CTRL, v);
5613 delay(10);
5614 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5615 delay(10);
5616 CSR_WRITE(sc, WMREG_CTRL, v);
5617 delay(10);
5618 }
5619 }
5620
5621 static uint32_t
5622 i82543_mii_recvbits(struct wm_softc *sc)
5623 {
5624 uint32_t v, i, data = 0;
5625
5626 v = CSR_READ(sc, WMREG_CTRL);
5627 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5628 v |= CTRL_SWDPIO(3);
5629
5630 CSR_WRITE(sc, WMREG_CTRL, v);
5631 delay(10);
5632 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5633 delay(10);
5634 CSR_WRITE(sc, WMREG_CTRL, v);
5635 delay(10);
5636
5637 for (i = 0; i < 16; i++) {
5638 data <<= 1;
5639 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5640 delay(10);
5641 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
5642 data |= 1;
5643 CSR_WRITE(sc, WMREG_CTRL, v);
5644 delay(10);
5645 }
5646
5647 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5648 delay(10);
5649 CSR_WRITE(sc, WMREG_CTRL, v);
5650 delay(10);
5651
5652 return data;
5653 }
5654
5655 #undef MDI_IO
5656 #undef MDI_DIR
5657 #undef MDI_CLK
5658
5659 /*
5660 * wm_gmii_i82543_readreg: [mii interface function]
5661 *
5662 * Read a PHY register on the GMII (i82543 version).
5663 */
5664 static int
5665 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
5666 {
5667 struct wm_softc *sc = device_private(self);
5668 int rv;
5669
5670 i82543_mii_sendbits(sc, 0xffffffffU, 32);
5671 i82543_mii_sendbits(sc, reg | (phy << 5) |
5672 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
5673 rv = i82543_mii_recvbits(sc) & 0xffff;
5674
5675 DPRINTF(WM_DEBUG_GMII,
5676 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
5677 device_xname(sc->sc_dev), phy, reg, rv));
5678
5679 return rv;
5680 }
5681
5682 /*
5683 * wm_gmii_i82543_writereg: [mii interface function]
5684 *
5685 * Write a PHY register on the GMII (i82543 version).
5686 */
5687 static void
5688 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
5689 {
5690 struct wm_softc *sc = device_private(self);
5691
5692 i82543_mii_sendbits(sc, 0xffffffffU, 32);
5693 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
5694 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
5695 (MII_COMMAND_START << 30), 32);
5696 }
5697
5698 /*
5699 * wm_gmii_i82544_readreg: [mii interface function]
5700 *
5701 * Read a PHY register on the GMII.
5702 */
5703 static int
5704 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
5705 {
5706 struct wm_softc *sc = device_private(self);
5707 uint32_t mdic = 0;
5708 int i, rv;
5709
5710 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
5711 MDIC_REGADD(reg));
5712
5713 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
5714 mdic = CSR_READ(sc, WMREG_MDIC);
5715 if (mdic & MDIC_READY)
5716 break;
5717 delay(50);
5718 }
5719
5720 if ((mdic & MDIC_READY) == 0) {
5721 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
5722 device_xname(sc->sc_dev), phy, reg);
5723 rv = 0;
5724 } else if (mdic & MDIC_E) {
5725 #if 0 /* This is normal if no PHY is present. */
5726 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
5727 device_xname(sc->sc_dev), phy, reg);
5728 #endif
5729 rv = 0;
5730 } else {
5731 rv = MDIC_DATA(mdic);
5732 if (rv == 0xffff)
5733 rv = 0;
5734 }
5735
5736 return rv;
5737 }
5738
5739 /*
5740 * wm_gmii_i82544_writereg: [mii interface function]
5741 *
5742 * Write a PHY register on the GMII.
5743 */
5744 static void
5745 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
5746 {
5747 struct wm_softc *sc = device_private(self);
5748 uint32_t mdic = 0;
5749 int i;
5750
5751 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
5752 MDIC_REGADD(reg) | MDIC_DATA(val));
5753
5754 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
5755 mdic = CSR_READ(sc, WMREG_MDIC);
5756 if (mdic & MDIC_READY)
5757 break;
5758 delay(50);
5759 }
5760
5761 if ((mdic & MDIC_READY) == 0)
5762 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
5763 device_xname(sc->sc_dev), phy, reg);
5764 else if (mdic & MDIC_E)
5765 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
5766 device_xname(sc->sc_dev), phy, reg);
5767 }
5768
5769 /*
5770 * wm_gmii_i80003_readreg: [mii interface function]
5771 *
5772 * Read a PHY register on the kumeran
5773 * This could be handled by the PHY layer if we didn't have to lock the
5774 * ressource ...
5775 */
5776 static int
5777 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
5778 {
5779 struct wm_softc *sc = device_private(self);
5780 int sem;
5781 int rv;
5782
5783 if (phy != 1) /* only one PHY on kumeran bus */
5784 return 0;
5785
5786 sem = swfwphysem[sc->sc_funcid];
5787 if (wm_get_swfw_semaphore(sc, sem)) {
5788 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5789 __func__);
5790 return 0;
5791 }
5792
5793 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
5794 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5795 reg >> GG82563_PAGE_SHIFT);
5796 } else {
5797 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
5798 reg >> GG82563_PAGE_SHIFT);
5799 }
5800 /* Wait more 200us for a bug of the ready bit in the MDIC register */
5801 delay(200);
5802 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
5803 delay(200);
5804
5805 wm_put_swfw_semaphore(sc, sem);
5806 return rv;
5807 }
5808
5809 /*
5810 * wm_gmii_i80003_writereg: [mii interface function]
5811 *
5812 * Write a PHY register on the kumeran.
5813 * This could be handled by the PHY layer if we didn't have to lock the
5814 * ressource ...
5815 */
5816 static void
5817 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
5818 {
5819 struct wm_softc *sc = device_private(self);
5820 int sem;
5821
5822 if (phy != 1) /* only one PHY on kumeran bus */
5823 return;
5824
5825 sem = swfwphysem[sc->sc_funcid];
5826 if (wm_get_swfw_semaphore(sc, sem)) {
5827 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5828 __func__);
5829 return;
5830 }
5831
5832 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
5833 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5834 reg >> GG82563_PAGE_SHIFT);
5835 } else {
5836 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
5837 reg >> GG82563_PAGE_SHIFT);
5838 }
5839 /* Wait more 200us for a bug of the ready bit in the MDIC register */
5840 delay(200);
5841 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
5842 delay(200);
5843
5844 wm_put_swfw_semaphore(sc, sem);
5845 }
5846
5847 /*
5848 * wm_gmii_bm_readreg: [mii interface function]
5849 *
5850 * Read a PHY register on the kumeran
5851 * This could be handled by the PHY layer if we didn't have to lock the
5852 * ressource ...
5853 */
5854 static int
5855 wm_gmii_bm_readreg(device_t self, int phy, int reg)
5856 {
5857 struct wm_softc *sc = device_private(self);
5858 int sem;
5859 int rv;
5860
5861 sem = swfwphysem[sc->sc_funcid];
5862 if (wm_get_swfw_semaphore(sc, sem)) {
5863 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5864 __func__);
5865 return 0;
5866 }
5867
5868 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
5869 if (phy == 1)
5870 wm_gmii_i82544_writereg(self, phy, 0x1f,
5871 reg);
5872 else
5873 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5874 reg >> GG82563_PAGE_SHIFT);
5875
5876 }
5877
5878 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
5879 wm_put_swfw_semaphore(sc, sem);
5880 return rv;
5881 }
5882
5883 /*
5884 * wm_gmii_bm_writereg: [mii interface function]
5885 *
5886 * Write a PHY register on the kumeran.
5887 * This could be handled by the PHY layer if we didn't have to lock the
5888 * ressource ...
5889 */
5890 static void
5891 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
5892 {
5893 struct wm_softc *sc = device_private(self);
5894 int sem;
5895
5896 sem = swfwphysem[sc->sc_funcid];
5897 if (wm_get_swfw_semaphore(sc, sem)) {
5898 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5899 __func__);
5900 return;
5901 }
5902
5903 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
5904 if (phy == 1)
5905 wm_gmii_i82544_writereg(self, phy, 0x1f,
5906 reg);
5907 else
5908 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5909 reg >> GG82563_PAGE_SHIFT);
5910
5911 }
5912
5913 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
5914 wm_put_swfw_semaphore(sc, sem);
5915 }
5916
5917 static void
5918 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
5919 {
5920 struct wm_softc *sc = device_private(self);
5921 uint16_t regnum = BM_PHY_REG_NUM(offset);
5922 uint16_t wuce;
5923
5924 /* XXX Gig must be disabled for MDIO accesses to page 800 */
5925 if (sc->sc_type == WM_T_PCH) {
5926 /* XXX e1000 driver do nothing... why? */
5927 }
5928
5929 /* Set page 769 */
5930 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5931 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
5932
5933 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
5934
5935 wuce &= ~BM_WUC_HOST_WU_BIT;
5936 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
5937 wuce | BM_WUC_ENABLE_BIT);
5938
5939 /* Select page 800 */
5940 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5941 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
5942
5943 /* Write page 800 */
5944 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
5945
5946 if (rd)
5947 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
5948 else
5949 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
5950
5951 /* Set page 769 */
5952 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5953 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
5954
5955 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
5956 }
5957
5958 /*
5959 * wm_gmii_hv_readreg: [mii interface function]
5960 *
5961 * Read a PHY register on the kumeran
5962 * This could be handled by the PHY layer if we didn't have to lock the
5963 * ressource ...
5964 */
5965 static int
5966 wm_gmii_hv_readreg(device_t self, int phy, int reg)
5967 {
5968 struct wm_softc *sc = device_private(self);
5969 uint16_t page = BM_PHY_REG_PAGE(reg);
5970 uint16_t regnum = BM_PHY_REG_NUM(reg);
5971 uint16_t val;
5972 int rv;
5973
5974 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
5975 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5976 __func__);
5977 return 0;
5978 }
5979
5980 /* XXX Workaround failure in MDIO access while cable is disconnected */
5981 if (sc->sc_phytype == WMPHY_82577) {
5982 /* XXX must write */
5983 }
5984
5985 /* Page 800 works differently than the rest so it has its own func */
5986 if (page == BM_WUC_PAGE) {
5987 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
5988 return val;
5989 }
5990
5991 /*
5992 * Lower than page 768 works differently than the rest so it has its
5993 * own func
5994 */
5995 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
5996 printf("gmii_hv_readreg!!!\n");
5997 return 0;
5998 }
5999
6000 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6001 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6002 page << BME1000_PAGE_SHIFT);
6003 }
6004
6005 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
6006 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6007 return rv;
6008 }
6009
6010 /*
6011 * wm_gmii_hv_writereg: [mii interface function]
6012 *
6013 * Write a PHY register on the kumeran.
6014 * This could be handled by the PHY layer if we didn't have to lock the
6015 * ressource ...
6016 */
6017 static void
6018 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
6019 {
6020 struct wm_softc *sc = device_private(self);
6021 uint16_t page = BM_PHY_REG_PAGE(reg);
6022 uint16_t regnum = BM_PHY_REG_NUM(reg);
6023
6024 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6025 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6026 __func__);
6027 return;
6028 }
6029
6030 /* XXX Workaround failure in MDIO access while cable is disconnected */
6031
6032 /* Page 800 works differently than the rest so it has its own func */
6033 if (page == BM_WUC_PAGE) {
6034 uint16_t tmp;
6035
6036 tmp = val;
6037 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
6038 return;
6039 }
6040
6041 /*
6042 * Lower than page 768 works differently than the rest so it has its
6043 * own func
6044 */
6045 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6046 printf("gmii_hv_writereg!!!\n");
6047 return;
6048 }
6049
6050 /*
6051 * XXX Workaround MDIO accesses being disabled after entering IEEE
6052 * Power Down (whenever bit 11 of the PHY control register is set)
6053 */
6054
6055 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6056 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6057 page << BME1000_PAGE_SHIFT);
6058 }
6059
6060 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
6061 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6062 }
6063
6064 /*
6065 * wm_gmii_hv_readreg: [mii interface function]
6066 *
6067 * Read a PHY register on the kumeran
6068 * This could be handled by the PHY layer if we didn't have to lock the
6069 * ressource ...
6070 */
6071 static int
6072 wm_sgmii_readreg(device_t self, int phy, int reg)
6073 {
6074 struct wm_softc *sc = device_private(self);
6075 uint32_t i2ccmd;
6076 int i, rv;
6077
6078 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6079 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6080 __func__);
6081 return 0;
6082 }
6083
6084 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6085 | (phy << I2CCMD_PHY_ADDR_SHIFT)
6086 | I2CCMD_OPCODE_READ;
6087 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6088
6089 /* Poll the ready bit */
6090 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6091 delay(50);
6092 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6093 if (i2ccmd & I2CCMD_READY)
6094 break;
6095 }
6096 if ((i2ccmd & I2CCMD_READY) == 0)
6097 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
6098 if ((i2ccmd & I2CCMD_ERROR) != 0)
6099 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6100
6101 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
6102
6103 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6104 return rv;
6105 }
6106
6107 /*
6108 * wm_gmii_hv_writereg: [mii interface function]
6109 *
6110 * Write a PHY register on the kumeran.
6111 * This could be handled by the PHY layer if we didn't have to lock the
6112 * ressource ...
6113 */
6114 static void
6115 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
6116 {
6117 struct wm_softc *sc = device_private(self);
6118 uint32_t i2ccmd;
6119 int i;
6120
6121 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6122 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6123 __func__);
6124 return;
6125 }
6126
6127 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6128 | (phy << I2CCMD_PHY_ADDR_SHIFT)
6129 | I2CCMD_OPCODE_WRITE;
6130 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6131
6132 /* Poll the ready bit */
6133 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6134 delay(50);
6135 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6136 if (i2ccmd & I2CCMD_READY)
6137 break;
6138 }
6139 if ((i2ccmd & I2CCMD_READY) == 0)
6140 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
6141 if ((i2ccmd & I2CCMD_ERROR) != 0)
6142 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6143
6144 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6145 }
6146
6147 /*
6148 * wm_gmii_statchg: [mii interface function]
6149 *
6150 * Callback from MII layer when media changes.
6151 */
6152 static void
6153 wm_gmii_statchg(device_t self)
6154 {
6155 struct wm_softc *sc = device_private(self);
6156 struct mii_data *mii = &sc->sc_mii;
6157
6158 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
6159 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6160 sc->sc_fcrtl &= ~FCRTL_XONE;
6161
6162 /*
6163 * Get flow control negotiation result.
6164 */
6165 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
6166 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
6167 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
6168 mii->mii_media_active &= ~IFM_ETH_FMASK;
6169 }
6170
6171 if (sc->sc_flowflags & IFM_FLOW) {
6172 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
6173 sc->sc_ctrl |= CTRL_TFCE;
6174 sc->sc_fcrtl |= FCRTL_XONE;
6175 }
6176 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
6177 sc->sc_ctrl |= CTRL_RFCE;
6178 }
6179
6180 if (sc->sc_mii.mii_media_active & IFM_FDX) {
6181 DPRINTF(WM_DEBUG_LINK,
6182 ("%s: LINK: statchg: FDX\n", device_xname(sc->sc_dev)));
6183 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6184 } else {
6185 DPRINTF(WM_DEBUG_LINK,
6186 ("%s: LINK: statchg: HDX\n", device_xname(sc->sc_dev)));
6187 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6188 }
6189
6190 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6191 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6192 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
6193 : WMREG_FCRTL, sc->sc_fcrtl);
6194 if (sc->sc_type == WM_T_80003) {
6195 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
6196 case IFM_1000_T:
6197 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6198 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
6199 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6200 break;
6201 default:
6202 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6203 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
6204 sc->sc_tipg = TIPG_10_100_80003_DFLT;
6205 break;
6206 }
6207 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6208 }
6209 }
6210
6211 /*
6212 * wm_kmrn_readreg:
6213 *
6214 * Read a kumeran register
6215 */
6216 static int
6217 wm_kmrn_readreg(struct wm_softc *sc, int reg)
6218 {
6219 int rv;
6220
6221 if (sc->sc_flags == WM_F_SWFW_SYNC) {
6222 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6223 aprint_error_dev(sc->sc_dev,
6224 "%s: failed to get semaphore\n", __func__);
6225 return 0;
6226 }
6227 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
6228 if (wm_get_swfwhw_semaphore(sc)) {
6229 aprint_error_dev(sc->sc_dev,
6230 "%s: failed to get semaphore\n", __func__);
6231 return 0;
6232 }
6233 }
6234
6235 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
6236 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
6237 KUMCTRLSTA_REN);
6238 delay(2);
6239
6240 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
6241
6242 if (sc->sc_flags == WM_F_SWFW_SYNC)
6243 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
6244 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
6245 wm_put_swfwhw_semaphore(sc);
6246
6247 return rv;
6248 }
6249
6250 /*
6251 * wm_kmrn_writereg:
6252 *
6253 * Write a kumeran register
6254 */
6255 static void
6256 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
6257 {
6258
6259 if (sc->sc_flags == WM_F_SWFW_SYNC) {
6260 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6261 aprint_error_dev(sc->sc_dev,
6262 "%s: failed to get semaphore\n", __func__);
6263 return;
6264 }
6265 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
6266 if (wm_get_swfwhw_semaphore(sc)) {
6267 aprint_error_dev(sc->sc_dev,
6268 "%s: failed to get semaphore\n", __func__);
6269 return;
6270 }
6271 }
6272
6273 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
6274 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
6275 (val & KUMCTRLSTA_MASK));
6276
6277 if (sc->sc_flags == WM_F_SWFW_SYNC)
6278 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
6279 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
6280 wm_put_swfwhw_semaphore(sc);
6281 }
6282
6283 static int
6284 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
6285 {
6286 uint32_t eecd = 0;
6287
6288 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
6289 || sc->sc_type == WM_T_82583) {
6290 eecd = CSR_READ(sc, WMREG_EECD);
6291
6292 /* Isolate bits 15 & 16 */
6293 eecd = ((eecd >> 15) & 0x03);
6294
6295 /* If both bits are set, device is Flash type */
6296 if (eecd == 0x03)
6297 return 0;
6298 }
6299 return 1;
6300 }
6301
6302 static int
6303 wm_get_swsm_semaphore(struct wm_softc *sc)
6304 {
6305 int32_t timeout;
6306 uint32_t swsm;
6307
6308 /* Get the FW semaphore. */
6309 timeout = 1000 + 1; /* XXX */
6310 while (timeout) {
6311 swsm = CSR_READ(sc, WMREG_SWSM);
6312 swsm |= SWSM_SWESMBI;
6313 CSR_WRITE(sc, WMREG_SWSM, swsm);
6314 /* if we managed to set the bit we got the semaphore. */
6315 swsm = CSR_READ(sc, WMREG_SWSM);
6316 if (swsm & SWSM_SWESMBI)
6317 break;
6318
6319 delay(50);
6320 timeout--;
6321 }
6322
6323 if (timeout == 0) {
6324 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
6325 /* Release semaphores */
6326 wm_put_swsm_semaphore(sc);
6327 return 1;
6328 }
6329 return 0;
6330 }
6331
6332 static void
6333 wm_put_swsm_semaphore(struct wm_softc *sc)
6334 {
6335 uint32_t swsm;
6336
6337 swsm = CSR_READ(sc, WMREG_SWSM);
6338 swsm &= ~(SWSM_SWESMBI);
6339 CSR_WRITE(sc, WMREG_SWSM, swsm);
6340 }
6341
6342 static int
6343 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
6344 {
6345 uint32_t swfw_sync;
6346 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
6347 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
6348 int timeout = 200;
6349
6350 for (timeout = 0; timeout < 200; timeout++) {
6351 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
6352 if (wm_get_swsm_semaphore(sc)) {
6353 aprint_error_dev(sc->sc_dev,
6354 "%s: failed to get semaphore\n",
6355 __func__);
6356 return 1;
6357 }
6358 }
6359 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
6360 if ((swfw_sync & (swmask | fwmask)) == 0) {
6361 swfw_sync |= swmask;
6362 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
6363 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6364 wm_put_swsm_semaphore(sc);
6365 return 0;
6366 }
6367 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6368 wm_put_swsm_semaphore(sc);
6369 delay(5000);
6370 }
6371 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
6372 device_xname(sc->sc_dev), mask, swfw_sync);
6373 return 1;
6374 }
6375
6376 static void
6377 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
6378 {
6379 uint32_t swfw_sync;
6380
6381 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
6382 while (wm_get_swsm_semaphore(sc) != 0)
6383 continue;
6384 }
6385 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
6386 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
6387 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
6388 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6389 wm_put_swsm_semaphore(sc);
6390 }
6391
6392 static int
6393 wm_get_swfwhw_semaphore(struct wm_softc *sc)
6394 {
6395 uint32_t ext_ctrl;
6396 int timeout = 200;
6397
6398 for (timeout = 0; timeout < 200; timeout++) {
6399 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6400 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
6401 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
6402
6403 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6404 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
6405 return 0;
6406 delay(5000);
6407 }
6408 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
6409 device_xname(sc->sc_dev), ext_ctrl);
6410 return 1;
6411 }
6412
6413 static void
6414 wm_put_swfwhw_semaphore(struct wm_softc *sc)
6415 {
6416 uint32_t ext_ctrl;
6417 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6418 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
6419 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
6420 }
6421
6422 static int
6423 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
6424 {
6425 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
6426 uint8_t bank_high_byte;
6427 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
6428
6429 if ((sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
6430 /* Value of bit 22 corresponds to the flash bank we're on. */
6431 *bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
6432 } else {
6433 wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
6434 if ((bank_high_byte & 0xc0) == 0x80)
6435 *bank = 0;
6436 else {
6437 wm_read_ich8_byte(sc, act_offset + bank1_offset,
6438 &bank_high_byte);
6439 if ((bank_high_byte & 0xc0) == 0x80)
6440 *bank = 1;
6441 else {
6442 aprint_error_dev(sc->sc_dev,
6443 "EEPROM not present\n");
6444 return -1;
6445 }
6446 }
6447 }
6448
6449 return 0;
6450 }
6451
6452 /******************************************************************************
6453 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
6454 * register.
6455 *
6456 * sc - Struct containing variables accessed by shared code
6457 * offset - offset of word in the EEPROM to read
6458 * data - word read from the EEPROM
6459 * words - number of words to read
6460 *****************************************************************************/
6461 static int
6462 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
6463 {
6464 int32_t error = 0;
6465 uint32_t flash_bank = 0;
6466 uint32_t act_offset = 0;
6467 uint32_t bank_offset = 0;
6468 uint16_t word = 0;
6469 uint16_t i = 0;
6470
6471 /* We need to know which is the valid flash bank. In the event
6472 * that we didn't allocate eeprom_shadow_ram, we may not be
6473 * managing flash_bank. So it cannot be trusted and needs
6474 * to be updated with each read.
6475 */
6476 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
6477 if (error) {
6478 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
6479 __func__);
6480 return error;
6481 }
6482
6483 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
6484 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
6485
6486 error = wm_get_swfwhw_semaphore(sc);
6487 if (error) {
6488 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6489 __func__);
6490 return error;
6491 }
6492
6493 for (i = 0; i < words; i++) {
6494 /* The NVM part needs a byte offset, hence * 2 */
6495 act_offset = bank_offset + ((offset + i) * 2);
6496 error = wm_read_ich8_word(sc, act_offset, &word);
6497 if (error) {
6498 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
6499 __func__);
6500 break;
6501 }
6502 data[i] = word;
6503 }
6504
6505 wm_put_swfwhw_semaphore(sc);
6506 return error;
6507 }
6508
6509 /******************************************************************************
6510 * This function does initial flash setup so that a new read/write/erase cycle
6511 * can be started.
6512 *
6513 * sc - The pointer to the hw structure
6514 ****************************************************************************/
6515 static int32_t
6516 wm_ich8_cycle_init(struct wm_softc *sc)
6517 {
6518 uint16_t hsfsts;
6519 int32_t error = 1;
6520 int32_t i = 0;
6521
6522 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6523
6524 /* May be check the Flash Des Valid bit in Hw status */
6525 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
6526 return error;
6527 }
6528
6529 /* Clear FCERR in Hw status by writing 1 */
6530 /* Clear DAEL in Hw status by writing a 1 */
6531 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
6532
6533 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6534
6535 /*
6536 * Either we should have a hardware SPI cycle in progress bit to check
6537 * against, in order to start a new cycle or FDONE bit should be
6538 * changed in the hardware so that it is 1 after harware reset, which
6539 * can then be used as an indication whether a cycle is in progress or
6540 * has been completed .. we should also have some software semaphore me
6541 * chanism to guard FDONE or the cycle in progress bit so that two
6542 * threads access to those bits can be sequentiallized or a way so that
6543 * 2 threads dont start the cycle at the same time
6544 */
6545
6546 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6547 /*
6548 * There is no cycle running at present, so we can start a
6549 * cycle
6550 */
6551
6552 /* Begin by setting Flash Cycle Done. */
6553 hsfsts |= HSFSTS_DONE;
6554 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6555 error = 0;
6556 } else {
6557 /*
6558 * otherwise poll for sometime so the current cycle has a
6559 * chance to end before giving up.
6560 */
6561 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
6562 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6563 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6564 error = 0;
6565 break;
6566 }
6567 delay(1);
6568 }
6569 if (error == 0) {
6570 /*
6571 * Successful in waiting for previous cycle to timeout,
6572 * now set the Flash Cycle Done.
6573 */
6574 hsfsts |= HSFSTS_DONE;
6575 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6576 }
6577 }
6578 return error;
6579 }
6580
6581 /******************************************************************************
6582 * This function starts a flash cycle and waits for its completion
6583 *
6584 * sc - The pointer to the hw structure
6585 ****************************************************************************/
6586 static int32_t
6587 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
6588 {
6589 uint16_t hsflctl;
6590 uint16_t hsfsts;
6591 int32_t error = 1;
6592 uint32_t i = 0;
6593
6594 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
6595 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6596 hsflctl |= HSFCTL_GO;
6597 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6598
6599 /* wait till FDONE bit is set to 1 */
6600 do {
6601 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6602 if (hsfsts & HSFSTS_DONE)
6603 break;
6604 delay(1);
6605 i++;
6606 } while (i < timeout);
6607 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
6608 error = 0;
6609
6610 return error;
6611 }
6612
6613 /******************************************************************************
6614 * Reads a byte or word from the NVM using the ICH8 flash access registers.
6615 *
6616 * sc - The pointer to the hw structure
6617 * index - The index of the byte or word to read.
6618 * size - Size of data to read, 1=byte 2=word
6619 * data - Pointer to the word to store the value read.
6620 *****************************************************************************/
6621 static int32_t
6622 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
6623 uint32_t size, uint16_t* data)
6624 {
6625 uint16_t hsfsts;
6626 uint16_t hsflctl;
6627 uint32_t flash_linear_address;
6628 uint32_t flash_data = 0;
6629 int32_t error = 1;
6630 int32_t count = 0;
6631
6632 if (size < 1 || size > 2 || data == 0x0 ||
6633 index > ICH_FLASH_LINEAR_ADDR_MASK)
6634 return error;
6635
6636 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
6637 sc->sc_ich8_flash_base;
6638
6639 do {
6640 delay(1);
6641 /* Steps */
6642 error = wm_ich8_cycle_init(sc);
6643 if (error)
6644 break;
6645
6646 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6647 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
6648 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
6649 & HSFCTL_BCOUNT_MASK;
6650 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
6651 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6652
6653 /*
6654 * Write the last 24 bits of index into Flash Linear address
6655 * field in Flash Address
6656 */
6657 /* TODO: TBD maybe check the index against the size of flash */
6658
6659 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
6660
6661 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
6662
6663 /*
6664 * Check if FCERR is set to 1, if set to 1, clear it and try
6665 * the whole sequence a few more times, else read in (shift in)
6666 * the Flash Data0, the order is least significant byte first
6667 * msb to lsb
6668 */
6669 if (error == 0) {
6670 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
6671 if (size == 1)
6672 *data = (uint8_t)(flash_data & 0x000000FF);
6673 else if (size == 2)
6674 *data = (uint16_t)(flash_data & 0x0000FFFF);
6675 break;
6676 } else {
6677 /*
6678 * If we've gotten here, then things are probably
6679 * completely hosed, but if the error condition is
6680 * detected, it won't hurt to give it another try...
6681 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
6682 */
6683 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6684 if (hsfsts & HSFSTS_ERR) {
6685 /* Repeat for some time before giving up. */
6686 continue;
6687 } else if ((hsfsts & HSFSTS_DONE) == 0)
6688 break;
6689 }
6690 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
6691
6692 return error;
6693 }
6694
6695 /******************************************************************************
6696 * Reads a single byte from the NVM using the ICH8 flash access registers.
6697 *
6698 * sc - pointer to wm_hw structure
6699 * index - The index of the byte to read.
6700 * data - Pointer to a byte to store the value read.
6701 *****************************************************************************/
6702 static int32_t
6703 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
6704 {
6705 int32_t status;
6706 uint16_t word = 0;
6707
6708 status = wm_read_ich8_data(sc, index, 1, &word);
6709 if (status == 0)
6710 *data = (uint8_t)word;
6711
6712 return status;
6713 }
6714
6715 /******************************************************************************
6716 * Reads a word from the NVM using the ICH8 flash access registers.
6717 *
6718 * sc - pointer to wm_hw structure
6719 * index - The starting byte index of the word to read.
6720 * data - Pointer to a word to store the value read.
6721 *****************************************************************************/
6722 static int32_t
6723 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
6724 {
6725 int32_t status;
6726
6727 status = wm_read_ich8_data(sc, index, 2, data);
6728 return status;
6729 }
6730
6731 static int
6732 wm_check_mng_mode(struct wm_softc *sc)
6733 {
6734 int rv;
6735
6736 switch (sc->sc_type) {
6737 case WM_T_ICH8:
6738 case WM_T_ICH9:
6739 case WM_T_ICH10:
6740 case WM_T_PCH:
6741 rv = wm_check_mng_mode_ich8lan(sc);
6742 break;
6743 case WM_T_82574:
6744 case WM_T_82583:
6745 rv = wm_check_mng_mode_82574(sc);
6746 break;
6747 case WM_T_82571:
6748 case WM_T_82572:
6749 case WM_T_82573:
6750 case WM_T_80003:
6751 rv = wm_check_mng_mode_generic(sc);
6752 break;
6753 default:
6754 /* noting to do */
6755 rv = 0;
6756 break;
6757 }
6758
6759 return rv;
6760 }
6761
6762 static int
6763 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
6764 {
6765 uint32_t fwsm;
6766
6767 fwsm = CSR_READ(sc, WMREG_FWSM);
6768
6769 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
6770 return 1;
6771
6772 return 0;
6773 }
6774
6775 static int
6776 wm_check_mng_mode_82574(struct wm_softc *sc)
6777 {
6778 uint16_t data;
6779
6780 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
6781
6782 if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
6783 return 1;
6784
6785 return 0;
6786 }
6787
6788 static int
6789 wm_check_mng_mode_generic(struct wm_softc *sc)
6790 {
6791 uint32_t fwsm;
6792
6793 fwsm = CSR_READ(sc, WMREG_FWSM);
6794
6795 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
6796 return 1;
6797
6798 return 0;
6799 }
6800
6801 static int
6802 wm_enable_mng_pass_thru(struct wm_softc *sc)
6803 {
6804 uint32_t manc, fwsm, factps;
6805
6806 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
6807 return 0;
6808
6809 manc = CSR_READ(sc, WMREG_MANC);
6810
6811 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
6812 device_xname(sc->sc_dev), manc));
6813 if (((manc & MANC_RECV_TCO_EN) == 0)
6814 || ((manc & MANC_EN_MAC_ADDR_FILTER) == 0))
6815 return 0;
6816
6817 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
6818 fwsm = CSR_READ(sc, WMREG_FWSM);
6819 factps = CSR_READ(sc, WMREG_FACTPS);
6820 if (((factps & FACTPS_MNGCG) == 0)
6821 && ((fwsm & FWSM_MODE_MASK)
6822 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
6823 return 1;
6824 } else if (((manc & MANC_SMBUS_EN) != 0)
6825 && ((manc & MANC_ASF_EN) == 0))
6826 return 1;
6827
6828 return 0;
6829 }
6830
6831 static int
6832 wm_check_reset_block(struct wm_softc *sc)
6833 {
6834 uint32_t reg;
6835
6836 switch (sc->sc_type) {
6837 case WM_T_ICH8:
6838 case WM_T_ICH9:
6839 case WM_T_ICH10:
6840 case WM_T_PCH:
6841 reg = CSR_READ(sc, WMREG_FWSM);
6842 if ((reg & FWSM_RSPCIPHY) != 0)
6843 return 0;
6844 else
6845 return -1;
6846 break;
6847 case WM_T_82571:
6848 case WM_T_82572:
6849 case WM_T_82573:
6850 case WM_T_82574:
6851 case WM_T_82583:
6852 case WM_T_80003:
6853 reg = CSR_READ(sc, WMREG_MANC);
6854 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
6855 return -1;
6856 else
6857 return 0;
6858 break;
6859 default:
6860 /* no problem */
6861 break;
6862 }
6863
6864 return 0;
6865 }
6866
6867 static void
6868 wm_get_hw_control(struct wm_softc *sc)
6869 {
6870 uint32_t reg;
6871
6872 switch (sc->sc_type) {
6873 case WM_T_82573:
6874 reg = CSR_READ(sc, WMREG_SWSM);
6875 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
6876 break;
6877 case WM_T_82571:
6878 case WM_T_82572:
6879 case WM_T_82574:
6880 case WM_T_82583:
6881 case WM_T_80003:
6882 case WM_T_ICH8:
6883 case WM_T_ICH9:
6884 case WM_T_ICH10:
6885 case WM_T_PCH:
6886 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6887 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
6888 break;
6889 default:
6890 break;
6891 }
6892 }
6893
6894 static void
6895 wm_release_hw_control(struct wm_softc *sc)
6896 {
6897 uint32_t reg;
6898
6899 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
6900 return;
6901
6902 if (sc->sc_type == WM_T_82573) {
6903 reg = CSR_READ(sc, WMREG_SWSM);
6904 reg &= ~SWSM_DRV_LOAD;
6905 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
6906 } else {
6907 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6908 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
6909 }
6910 }
6911
6912 /* XXX Currently TBI only */
6913 static int
6914 wm_check_for_link(struct wm_softc *sc)
6915 {
6916 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6917 uint32_t rxcw;
6918 uint32_t ctrl;
6919 uint32_t status;
6920 uint32_t sig;
6921
6922 rxcw = CSR_READ(sc, WMREG_RXCW);
6923 ctrl = CSR_READ(sc, WMREG_CTRL);
6924 status = CSR_READ(sc, WMREG_STATUS);
6925
6926 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
6927
6928 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
6929 device_xname(sc->sc_dev), __func__,
6930 ((ctrl & CTRL_SWDPIN(1)) == sig),
6931 ((status & STATUS_LU) != 0),
6932 ((rxcw & RXCW_C) != 0)
6933 ));
6934
6935 /*
6936 * SWDPIN LU RXCW
6937 * 0 0 0
6938 * 0 0 1 (should not happen)
6939 * 0 1 0 (should not happen)
6940 * 0 1 1 (should not happen)
6941 * 1 0 0 Disable autonego and force linkup
6942 * 1 0 1 got /C/ but not linkup yet
6943 * 1 1 0 (linkup)
6944 * 1 1 1 If IFM_AUTO, back to autonego
6945 *
6946 */
6947 if (((ctrl & CTRL_SWDPIN(1)) == sig)
6948 && ((status & STATUS_LU) == 0)
6949 && ((rxcw & RXCW_C) == 0)) {
6950 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
6951 __func__));
6952 sc->sc_tbi_linkup = 0;
6953 /* Disable auto-negotiation in the TXCW register */
6954 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
6955
6956 /*
6957 * Force link-up and also force full-duplex.
6958 *
6959 * NOTE: CTRL was updated TFCE and RFCE automatically,
6960 * so we should update sc->sc_ctrl
6961 */
6962 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
6963 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6964 } else if (((status & STATUS_LU) != 0)
6965 && ((rxcw & RXCW_C) != 0)
6966 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
6967 sc->sc_tbi_linkup = 1;
6968 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
6969 __func__));
6970 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
6971 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
6972 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
6973 && ((rxcw & RXCW_C) != 0)) {
6974 DPRINTF(WM_DEBUG_LINK, ("/C/"));
6975 } else {
6976 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
6977 status));
6978 }
6979
6980 return 0;
6981 }
6982
6983 /* Work-around for 82566 Kumeran PCS lock loss */
6984 static void
6985 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
6986 {
6987 int miistatus, active, i;
6988 int reg;
6989
6990 miistatus = sc->sc_mii.mii_media_status;
6991
6992 /* If the link is not up, do nothing */
6993 if ((miistatus & IFM_ACTIVE) != 0)
6994 return;
6995
6996 active = sc->sc_mii.mii_media_active;
6997
6998 /* Nothing to do if the link is other than 1Gbps */
6999 if (IFM_SUBTYPE(active) != IFM_1000_T)
7000 return;
7001
7002 for (i = 0; i < 10; i++) {
7003 /* read twice */
7004 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7005 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7006 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
7007 goto out; /* GOOD! */
7008
7009 /* Reset the PHY */
7010 wm_gmii_reset(sc);
7011 delay(5*1000);
7012 }
7013
7014 /* Disable GigE link negotiation */
7015 reg = CSR_READ(sc, WMREG_PHY_CTRL);
7016 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7017 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7018
7019 /*
7020 * Call gig speed drop workaround on Gig disable before accessing
7021 * any PHY registers.
7022 */
7023 wm_gig_downshift_workaround_ich8lan(sc);
7024
7025 out:
7026 return;
7027 }
7028
7029 /* WOL from S5 stops working */
7030 static void
7031 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
7032 {
7033 uint16_t kmrn_reg;
7034
7035 /* Only for igp3 */
7036 if (sc->sc_phytype == WMPHY_IGP_3) {
7037 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
7038 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
7039 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7040 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
7041 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7042 }
7043 }
7044
7045 #ifdef WM_WOL
7046 /* Power down workaround on D3 */
7047 static void
7048 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
7049 {
7050 uint32_t reg;
7051 int i;
7052
7053 for (i = 0; i < 2; i++) {
7054 /* Disable link */
7055 reg = CSR_READ(sc, WMREG_PHY_CTRL);
7056 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7057 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7058
7059 /*
7060 * Call gig speed drop workaround on Gig disable before
7061 * accessing any PHY registers
7062 */
7063 if (sc->sc_type == WM_T_ICH8)
7064 wm_gig_downshift_workaround_ich8lan(sc);
7065
7066 /* Write VR power-down enable */
7067 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7068 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7069 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
7070 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
7071
7072 /* Read it back and test */
7073 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7074 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7075 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
7076 break;
7077
7078 /* Issue PHY reset and repeat at most one more time */
7079 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7080 }
7081 }
7082 #endif /* WM_WOL */
7083
7084 /*
7085 * Workaround for pch's PHYs
7086 * XXX should be moved to new PHY driver?
7087 */
7088 static void
7089 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
7090 {
7091
7092 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
7093
7094 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
7095
7096 /* 82578 */
7097 if (sc->sc_phytype == WMPHY_82578) {
7098 /* PCH rev. < 3 */
7099 if (sc->sc_rev < 3) {
7100 /* XXX 6 bit shift? Why? Is it page2? */
7101 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
7102 0x66c0);
7103 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
7104 0xffff);
7105 }
7106
7107 /* XXX phy rev. < 2 */
7108 }
7109
7110 /* Select page 0 */
7111
7112 /* XXX acquire semaphore */
7113 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
7114 /* XXX release semaphore */
7115
7116 /*
7117 * Configure the K1 Si workaround during phy reset assuming there is
7118 * link so that it disables K1 if link is in 1Gbps.
7119 */
7120 wm_k1_gig_workaround_hv(sc, 1);
7121 }
7122
7123 static void
7124 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
7125 {
7126 int k1_enable = sc->sc_nvm_k1_enabled;
7127
7128 /* XXX acquire semaphore */
7129
7130 if (link) {
7131 k1_enable = 0;
7132
7133 /* Link stall fix for link up */
7134 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
7135 } else {
7136 /* Link stall fix for link down */
7137 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
7138 }
7139
7140 wm_configure_k1_ich8lan(sc, k1_enable);
7141
7142 /* XXX release semaphore */
7143 }
7144
7145 static void
7146 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
7147 {
7148 uint32_t ctrl, ctrl_ext, tmp;
7149 uint16_t kmrn_reg;
7150
7151 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
7152
7153 if (k1_enable)
7154 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
7155 else
7156 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
7157
7158 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
7159
7160 delay(20);
7161
7162 ctrl = CSR_READ(sc, WMREG_CTRL);
7163 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7164
7165 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
7166 tmp |= CTRL_FRCSPD;
7167
7168 CSR_WRITE(sc, WMREG_CTRL, tmp);
7169 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
7170 delay(20);
7171
7172 CSR_WRITE(sc, WMREG_CTRL, ctrl);
7173 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7174 delay(20);
7175 }
7176
7177 static void
7178 wm_set_pcie_completion_timeout(struct wm_softc *sc)
7179 {
7180 uint32_t gcr;
7181 pcireg_t ctrl2;
7182
7183 gcr = CSR_READ(sc, WMREG_GCR);
7184
7185 /* Only take action if timeout value is defaulted to 0 */
7186 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
7187 goto out;
7188
7189 if ((gcr & GCR_CAP_VER2) == 0) {
7190 gcr |= GCR_CMPL_TMOUT_10MS;
7191 goto out;
7192 }
7193
7194 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
7195 sc->sc_pcixe_capoff + PCI_PCIE_DCSR2);
7196 ctrl2 |= WM_PCI_PCIE_DCSR2_16MS;
7197 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
7198 sc->sc_pcixe_capoff + PCI_PCIE_DCSR2, ctrl2);
7199
7200 out:
7201 /* Disable completion timeout resend */
7202 gcr &= ~GCR_CMPL_TMOUT_RESEND;
7203
7204 CSR_WRITE(sc, WMREG_GCR, gcr);
7205 }
7206
7207 /* special case - for 82575 - need to do manual init ... */
7208 static void
7209 wm_reset_init_script_82575(struct wm_softc *sc)
7210 {
7211 /*
7212 * remark: this is untested code - we have no board without EEPROM
7213 * same setup as mentioned int the freeBSD driver for the i82575
7214 */
7215
7216 /* SerDes configuration via SERDESCTRL */
7217 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
7218 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
7219 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
7220 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
7221
7222 /* CCM configuration via CCMCTL register */
7223 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
7224 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
7225
7226 /* PCIe lanes configuration */
7227 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
7228 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
7229 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
7230 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
7231
7232 /* PCIe PLL Configuration */
7233 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
7234 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
7235 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
7236 }
7237
7238 static void
7239 wm_init_manageability(struct wm_softc *sc)
7240 {
7241
7242 if (sc->sc_flags & WM_F_HAS_MANAGE) {
7243 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
7244 uint32_t manc = CSR_READ(sc, WMREG_MANC);
7245
7246 /* disabl hardware interception of ARP */
7247 manc &= ~MANC_ARP_EN;
7248
7249 /* enable receiving management packets to the host */
7250 if (sc->sc_type >= WM_T_82571) {
7251 manc |= MANC_EN_MNG2HOST;
7252 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
7253 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
7254
7255 }
7256
7257 CSR_WRITE(sc, WMREG_MANC, manc);
7258 }
7259 }
7260
7261 static void
7262 wm_release_manageability(struct wm_softc *sc)
7263 {
7264
7265 if (sc->sc_flags & WM_F_HAS_MANAGE) {
7266 uint32_t manc = CSR_READ(sc, WMREG_MANC);
7267
7268 if (sc->sc_type >= WM_T_82571)
7269 manc &= ~MANC_EN_MNG2HOST;
7270
7271 CSR_WRITE(sc, WMREG_MANC, manc);
7272 }
7273 }
7274
7275 static void
7276 wm_get_wakeup(struct wm_softc *sc)
7277 {
7278
7279 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
7280 switch (sc->sc_type) {
7281 case WM_T_82573:
7282 case WM_T_82583:
7283 sc->sc_flags |= WM_F_HAS_AMT;
7284 /* FALLTHROUGH */
7285 case WM_T_80003:
7286 case WM_T_82541:
7287 case WM_T_82547:
7288 case WM_T_82571:
7289 case WM_T_82572:
7290 case WM_T_82574:
7291 case WM_T_82575:
7292 case WM_T_82576:
7293 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
7294 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
7295 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
7296 break;
7297 case WM_T_ICH8:
7298 case WM_T_ICH9:
7299 case WM_T_ICH10:
7300 case WM_T_PCH:
7301 sc->sc_flags |= WM_F_HAS_AMT;
7302 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
7303 break;
7304 default:
7305 break;
7306 }
7307
7308 /* 1: HAS_MANAGE */
7309 if (wm_enable_mng_pass_thru(sc) != 0)
7310 sc->sc_flags |= WM_F_HAS_MANAGE;
7311
7312 #ifdef WM_DEBUG
7313 printf("\n");
7314 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
7315 printf("HAS_AMT,");
7316 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
7317 printf("ARC_SUBSYS_VALID,");
7318 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
7319 printf("ASF_FIRMWARE_PRES,");
7320 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
7321 printf("HAS_MANAGE,");
7322 printf("\n");
7323 #endif
7324 /*
7325 * Note that the WOL flags is set after the resetting of the eeprom
7326 * stuff
7327 */
7328 }
7329
7330 #ifdef WM_WOL
7331 /* WOL in the newer chipset interfaces (pchlan) */
7332 static void
7333 wm_enable_phy_wakeup(struct wm_softc *sc)
7334 {
7335 #if 0
7336 uint16_t preg;
7337
7338 /* Copy MAC RARs to PHY RARs */
7339
7340 /* Copy MAC MTA to PHY MTA */
7341
7342 /* Configure PHY Rx Control register */
7343
7344 /* Enable PHY wakeup in MAC register */
7345
7346 /* Configure and enable PHY wakeup in PHY registers */
7347
7348 /* Activate PHY wakeup */
7349
7350 /* XXX */
7351 #endif
7352 }
7353
7354 static void
7355 wm_enable_wakeup(struct wm_softc *sc)
7356 {
7357 uint32_t reg, pmreg;
7358 pcireg_t pmode;
7359
7360 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
7361 &pmreg, NULL) == 0)
7362 return;
7363
7364 /* Advertise the wakeup capability */
7365 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
7366 | CTRL_SWDPIN(3));
7367 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
7368
7369 /* ICH workaround */
7370 switch (sc->sc_type) {
7371 case WM_T_ICH8:
7372 case WM_T_ICH9:
7373 case WM_T_ICH10:
7374 case WM_T_PCH:
7375 /* Disable gig during WOL */
7376 reg = CSR_READ(sc, WMREG_PHY_CTRL);
7377 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
7378 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7379 if (sc->sc_type == WM_T_PCH)
7380 wm_gmii_reset(sc);
7381
7382 /* Power down workaround */
7383 if (sc->sc_phytype == WMPHY_82577) {
7384 struct mii_softc *child;
7385
7386 /* Assume that the PHY is copper */
7387 child = LIST_FIRST(&sc->sc_mii.mii_phys);
7388 if (child->mii_mpd_rev <= 2)
7389 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
7390 (768 << 5) | 25, 0x0444); /* magic num */
7391 }
7392 break;
7393 default:
7394 break;
7395 }
7396
7397 /* Keep the laser running on fiber adapters */
7398 if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
7399 || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
7400 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7401 reg |= CTRL_EXT_SWDPIN(3);
7402 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7403 }
7404
7405 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
7406 #if 0 /* for the multicast packet */
7407 reg |= WUFC_MC;
7408 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
7409 #endif
7410
7411 if (sc->sc_type == WM_T_PCH) {
7412 wm_enable_phy_wakeup(sc);
7413 } else {
7414 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
7415 CSR_WRITE(sc, WMREG_WUFC, reg);
7416 }
7417
7418 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
7419 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
7420 && (sc->sc_phytype == WMPHY_IGP_3))
7421 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
7422
7423 /* Request PME */
7424 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
7425 #if 0
7426 /* Disable WOL */
7427 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
7428 #else
7429 /* For WOL */
7430 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
7431 #endif
7432 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
7433 }
7434 #endif /* WM_WOL */
7435
7436 static bool
7437 wm_suspend(device_t self, const pmf_qual_t *qual)
7438 {
7439 struct wm_softc *sc = device_private(self);
7440
7441 wm_release_manageability(sc);
7442 wm_release_hw_control(sc);
7443 #ifdef WM_WOL
7444 wm_enable_wakeup(sc);
7445 #endif
7446
7447 return true;
7448 }
7449
7450 static bool
7451 wm_resume(device_t self, const pmf_qual_t *qual)
7452 {
7453 struct wm_softc *sc = device_private(self);
7454
7455 wm_init_manageability(sc);
7456
7457 return true;
7458 }
7459