if_wm.c revision 1.208 1 /* $NetBSD: if_wm.c,v 1.208 2010/06/25 04:03:14 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.208 2010/06/25 04:03:14 msaitoh Exp $");
80
81 #include "rnd.h"
82
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/callout.h>
86 #include <sys/mbuf.h>
87 #include <sys/malloc.h>
88 #include <sys/kernel.h>
89 #include <sys/socket.h>
90 #include <sys/ioctl.h>
91 #include <sys/errno.h>
92 #include <sys/device.h>
93 #include <sys/queue.h>
94 #include <sys/syslog.h>
95
96 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
97
98 #if NRND > 0
99 #include <sys/rnd.h>
100 #endif
101
102 #include <net/if.h>
103 #include <net/if_dl.h>
104 #include <net/if_media.h>
105 #include <net/if_ether.h>
106
107 #include <net/bpf.h>
108
109 #include <netinet/in.h> /* XXX for struct ip */
110 #include <netinet/in_systm.h> /* XXX for struct ip */
111 #include <netinet/ip.h> /* XXX for struct ip */
112 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
113 #include <netinet/tcp.h> /* XXX for struct tcphdr */
114
115 #include <sys/bus.h>
116 #include <sys/intr.h>
117 #include <machine/endian.h>
118
119 #include <dev/mii/mii.h>
120 #include <dev/mii/miivar.h>
121 #include <dev/mii/miidevs.h>
122 #include <dev/mii/mii_bitbang.h>
123 #include <dev/mii/ikphyreg.h>
124 #include <dev/mii/igphyreg.h>
125 #include <dev/mii/igphyvar.h>
126 #include <dev/mii/inbmphyreg.h>
127
128 #include <dev/pci/pcireg.h>
129 #include <dev/pci/pcivar.h>
130 #include <dev/pci/pcidevs.h>
131
132 #include <dev/pci/if_wmreg.h>
133 #include <dev/pci/if_wmvar.h>
134
135 #ifdef WM_DEBUG
136 #define WM_DEBUG_LINK 0x01
137 #define WM_DEBUG_TX 0x02
138 #define WM_DEBUG_RX 0x04
139 #define WM_DEBUG_GMII 0x08
140 #define WM_DEBUG_MANAGE 0x10
141 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
142 | WM_DEBUG_MANAGE;
143
144 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
145 #else
146 #define DPRINTF(x, y) /* nothing */
147 #endif /* WM_DEBUG */
148
149 /*
150 * Transmit descriptor list size. Due to errata, we can only have
151 * 256 hardware descriptors in the ring on < 82544, but we use 4096
152 * on >= 82544. We tell the upper layers that they can queue a lot
153 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
154 * of them at a time.
155 *
156 * We allow up to 256 (!) DMA segments per packet. Pathological packet
157 * chains containing many small mbufs have been observed in zero-copy
158 * situations with jumbo frames.
159 */
160 #define WM_NTXSEGS 256
161 #define WM_IFQUEUELEN 256
162 #define WM_TXQUEUELEN_MAX 64
163 #define WM_TXQUEUELEN_MAX_82547 16
164 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
165 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
166 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
167 #define WM_NTXDESC_82542 256
168 #define WM_NTXDESC_82544 4096
169 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
170 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
171 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
172 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
173 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
174
175 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
176
177 /*
178 * Receive descriptor list size. We have one Rx buffer for normal
179 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
180 * packet. We allocate 256 receive descriptors, each with a 2k
181 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
182 */
183 #define WM_NRXDESC 256
184 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
185 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
186 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
187
188 /*
189 * Control structures are DMA'd to the i82542 chip. We allocate them in
190 * a single clump that maps to a single DMA segment to make several things
191 * easier.
192 */
193 struct wm_control_data_82544 {
194 /*
195 * The receive descriptors.
196 */
197 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
198
199 /*
200 * The transmit descriptors. Put these at the end, because
201 * we might use a smaller number of them.
202 */
203 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
204 };
205
206 struct wm_control_data_82542 {
207 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
208 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
209 };
210
211 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
212 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
213 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
214
215 /*
216 * Software state for transmit jobs.
217 */
218 struct wm_txsoft {
219 struct mbuf *txs_mbuf; /* head of our mbuf chain */
220 bus_dmamap_t txs_dmamap; /* our DMA map */
221 int txs_firstdesc; /* first descriptor in packet */
222 int txs_lastdesc; /* last descriptor in packet */
223 int txs_ndesc; /* # of descriptors used */
224 };
225
226 /*
227 * Software state for receive buffers. Each descriptor gets a
228 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
229 * more than one buffer, we chain them together.
230 */
231 struct wm_rxsoft {
232 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
233 bus_dmamap_t rxs_dmamap; /* our DMA map */
234 };
235
236 #define WM_LINKUP_TIMEOUT 50
237
238 static uint16_t swfwphysem[] = {
239 SWFW_PHY0_SM,
240 SWFW_PHY1_SM,
241 SWFW_PHY2_SM,
242 SWFW_PHY3_SM
243 };
244
245 /*
246 * Software state per device.
247 */
248 struct wm_softc {
249 device_t sc_dev; /* generic device information */
250 bus_space_tag_t sc_st; /* bus space tag */
251 bus_space_handle_t sc_sh; /* bus space handle */
252 bus_size_t sc_ss; /* bus space size */
253 bus_space_tag_t sc_iot; /* I/O space tag */
254 bus_space_handle_t sc_ioh; /* I/O space handle */
255 bus_space_tag_t sc_flasht; /* flash registers space tag */
256 bus_space_handle_t sc_flashh; /* flash registers space handle */
257 bus_dma_tag_t sc_dmat; /* bus DMA tag */
258
259 struct ethercom sc_ethercom; /* ethernet common data */
260 struct mii_data sc_mii; /* MII/media information */
261
262 pci_chipset_tag_t sc_pc;
263 pcitag_t sc_pcitag;
264 int sc_bus_speed; /* PCI/PCIX bus speed */
265 int sc_pcixe_capoff; /* PCI[Xe] capability register offset */
266
267 const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
268 wm_chip_type sc_type; /* MAC type */
269 int sc_rev; /* MAC revision */
270 wm_phy_type sc_phytype; /* PHY type */
271 int sc_funcid; /* unit number of the chip (0 to 3) */
272 int sc_flags; /* flags; see below */
273 int sc_if_flags; /* last if_flags */
274 int sc_flowflags; /* 802.3x flow control flags */
275 int sc_align_tweak;
276
277 void *sc_ih; /* interrupt cookie */
278 callout_t sc_tick_ch; /* tick callout */
279
280 int sc_ee_addrbits; /* EEPROM address bits */
281 int sc_ich8_flash_base;
282 int sc_ich8_flash_bank_size;
283 int sc_nvm_k1_enabled;
284
285 /*
286 * Software state for the transmit and receive descriptors.
287 */
288 int sc_txnum; /* must be a power of two */
289 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
290 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
291
292 /*
293 * Control data structures.
294 */
295 int sc_ntxdesc; /* must be a power of two */
296 struct wm_control_data_82544 *sc_control_data;
297 bus_dmamap_t sc_cddmamap; /* control data DMA map */
298 bus_dma_segment_t sc_cd_seg; /* control data segment */
299 int sc_cd_rseg; /* real number of control segment */
300 size_t sc_cd_size; /* control data size */
301 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
302 #define sc_txdescs sc_control_data->wcd_txdescs
303 #define sc_rxdescs sc_control_data->wcd_rxdescs
304
305 #ifdef WM_EVENT_COUNTERS
306 /* Event counters. */
307 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
308 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
309 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
310 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
311 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
312 struct evcnt sc_ev_rxintr; /* Rx interrupts */
313 struct evcnt sc_ev_linkintr; /* Link interrupts */
314
315 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
316 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
317 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
318 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
319 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
320 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
321 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
322 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
323
324 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
325 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
326
327 struct evcnt sc_ev_tu; /* Tx underrun */
328
329 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
330 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
331 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
332 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
333 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
334 #endif /* WM_EVENT_COUNTERS */
335
336 bus_addr_t sc_tdt_reg; /* offset of TDT register */
337
338 int sc_txfree; /* number of free Tx descriptors */
339 int sc_txnext; /* next ready Tx descriptor */
340
341 int sc_txsfree; /* number of free Tx jobs */
342 int sc_txsnext; /* next free Tx job */
343 int sc_txsdirty; /* dirty Tx jobs */
344
345 /* These 5 variables are used only on the 82547. */
346 int sc_txfifo_size; /* Tx FIFO size */
347 int sc_txfifo_head; /* current head of FIFO */
348 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
349 int sc_txfifo_stall; /* Tx FIFO is stalled */
350 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
351
352 bus_addr_t sc_rdt_reg; /* offset of RDT register */
353
354 int sc_rxptr; /* next ready Rx descriptor/queue ent */
355 int sc_rxdiscard;
356 int sc_rxlen;
357 struct mbuf *sc_rxhead;
358 struct mbuf *sc_rxtail;
359 struct mbuf **sc_rxtailp;
360
361 uint32_t sc_ctrl; /* prototype CTRL register */
362 #if 0
363 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
364 #endif
365 uint32_t sc_icr; /* prototype interrupt bits */
366 uint32_t sc_itr; /* prototype intr throttling reg */
367 uint32_t sc_tctl; /* prototype TCTL register */
368 uint32_t sc_rctl; /* prototype RCTL register */
369 uint32_t sc_txcw; /* prototype TXCW register */
370 uint32_t sc_tipg; /* prototype TIPG register */
371 uint32_t sc_fcrtl; /* prototype FCRTL register */
372 uint32_t sc_pba; /* prototype PBA register */
373
374 int sc_tbi_linkup; /* TBI link status */
375 int sc_tbi_anegticks; /* autonegotiation ticks */
376 int sc_tbi_ticks; /* tbi ticks */
377 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
378 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
379
380 int sc_mchash_type; /* multicast filter offset */
381
382 #if NRND > 0
383 rndsource_element_t rnd_source; /* random source */
384 #endif
385 };
386
387 #define WM_RXCHAIN_RESET(sc) \
388 do { \
389 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
390 *(sc)->sc_rxtailp = NULL; \
391 (sc)->sc_rxlen = 0; \
392 } while (/*CONSTCOND*/0)
393
394 #define WM_RXCHAIN_LINK(sc, m) \
395 do { \
396 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
397 (sc)->sc_rxtailp = &(m)->m_next; \
398 } while (/*CONSTCOND*/0)
399
400 #ifdef WM_EVENT_COUNTERS
401 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
402 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
403 #else
404 #define WM_EVCNT_INCR(ev) /* nothing */
405 #define WM_EVCNT_ADD(ev, val) /* nothing */
406 #endif
407
408 #define CSR_READ(sc, reg) \
409 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
410 #define CSR_WRITE(sc, reg, val) \
411 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
412 #define CSR_WRITE_FLUSH(sc) \
413 (void) CSR_READ((sc), WMREG_STATUS)
414
415 #define ICH8_FLASH_READ32(sc, reg) \
416 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
417 #define ICH8_FLASH_WRITE32(sc, reg, data) \
418 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
419
420 #define ICH8_FLASH_READ16(sc, reg) \
421 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
422 #define ICH8_FLASH_WRITE16(sc, reg, data) \
423 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
424
425 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
426 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
427
428 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
429 #define WM_CDTXADDR_HI(sc, x) \
430 (sizeof(bus_addr_t) == 8 ? \
431 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
432
433 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
434 #define WM_CDRXADDR_HI(sc, x) \
435 (sizeof(bus_addr_t) == 8 ? \
436 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
437
438 #define WM_CDTXSYNC(sc, x, n, ops) \
439 do { \
440 int __x, __n; \
441 \
442 __x = (x); \
443 __n = (n); \
444 \
445 /* If it will wrap around, sync to the end of the ring. */ \
446 if ((__x + __n) > WM_NTXDESC(sc)) { \
447 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
448 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
449 (WM_NTXDESC(sc) - __x), (ops)); \
450 __n -= (WM_NTXDESC(sc) - __x); \
451 __x = 0; \
452 } \
453 \
454 /* Now sync whatever is left. */ \
455 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
456 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
457 } while (/*CONSTCOND*/0)
458
459 #define WM_CDRXSYNC(sc, x, ops) \
460 do { \
461 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
462 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
463 } while (/*CONSTCOND*/0)
464
465 #define WM_INIT_RXDESC(sc, x) \
466 do { \
467 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
468 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
469 struct mbuf *__m = __rxs->rxs_mbuf; \
470 \
471 /* \
472 * Note: We scoot the packet forward 2 bytes in the buffer \
473 * so that the payload after the Ethernet header is aligned \
474 * to a 4-byte boundary. \
475 * \
476 * XXX BRAINDAMAGE ALERT! \
477 * The stupid chip uses the same size for every buffer, which \
478 * is set in the Receive Control register. We are using the 2K \
479 * size option, but what we REALLY want is (2K - 2)! For this \
480 * reason, we can't "scoot" packets longer than the standard \
481 * Ethernet MTU. On strict-alignment platforms, if the total \
482 * size exceeds (2K - 2) we set align_tweak to 0 and let \
483 * the upper layer copy the headers. \
484 */ \
485 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
486 \
487 wm_set_dma_addr(&__rxd->wrx_addr, \
488 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
489 __rxd->wrx_len = 0; \
490 __rxd->wrx_cksum = 0; \
491 __rxd->wrx_status = 0; \
492 __rxd->wrx_errors = 0; \
493 __rxd->wrx_special = 0; \
494 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
495 \
496 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
497 } while (/*CONSTCOND*/0)
498
499 static void wm_start(struct ifnet *);
500 static void wm_watchdog(struct ifnet *);
501 static int wm_ioctl(struct ifnet *, u_long, void *);
502 static int wm_init(struct ifnet *);
503 static void wm_stop(struct ifnet *, int);
504 static bool wm_suspend(device_t, const pmf_qual_t *);
505 static bool wm_resume(device_t, const pmf_qual_t *);
506
507 static void wm_reset(struct wm_softc *);
508 static void wm_rxdrain(struct wm_softc *);
509 static int wm_add_rxbuf(struct wm_softc *, int);
510 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
511 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
512 static int wm_validate_eeprom_checksum(struct wm_softc *);
513 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
514 static void wm_tick(void *);
515
516 static void wm_set_filter(struct wm_softc *);
517
518 static int wm_intr(void *);
519 static void wm_txintr(struct wm_softc *);
520 static void wm_rxintr(struct wm_softc *);
521 static void wm_linkintr(struct wm_softc *, uint32_t);
522
523 static void wm_tbi_mediainit(struct wm_softc *);
524 static int wm_tbi_mediachange(struct ifnet *);
525 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
526
527 static void wm_tbi_set_linkled(struct wm_softc *);
528 static void wm_tbi_check_link(struct wm_softc *);
529
530 static void wm_gmii_reset(struct wm_softc *);
531
532 static int wm_gmii_i82543_readreg(device_t, int, int);
533 static void wm_gmii_i82543_writereg(device_t, int, int, int);
534
535 static int wm_gmii_i82544_readreg(device_t, int, int);
536 static void wm_gmii_i82544_writereg(device_t, int, int, int);
537
538 static int wm_gmii_i80003_readreg(device_t, int, int);
539 static void wm_gmii_i80003_writereg(device_t, int, int, int);
540 static int wm_gmii_bm_readreg(device_t, int, int);
541 static void wm_gmii_bm_writereg(device_t, int, int, int);
542 static int wm_gmii_hv_readreg(device_t, int, int);
543 static void wm_gmii_hv_writereg(device_t, int, int, int);
544 static int wm_sgmii_readreg(device_t, int, int);
545 static void wm_sgmii_writereg(device_t, int, int, int);
546
547 static void wm_gmii_statchg(device_t);
548
549 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
550 static int wm_gmii_mediachange(struct ifnet *);
551 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
552
553 static int wm_kmrn_readreg(struct wm_softc *, int);
554 static void wm_kmrn_writereg(struct wm_softc *, int, int);
555
556 static void wm_set_spiaddrbits(struct wm_softc *);
557 static int wm_match(device_t, cfdata_t, void *);
558 static void wm_attach(device_t, device_t, void *);
559 static int wm_detach(device_t, int);
560 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
561 static void wm_get_auto_rd_done(struct wm_softc *);
562 static void wm_lan_init_done(struct wm_softc *);
563 static void wm_get_cfg_done(struct wm_softc *);
564 static int wm_get_swsm_semaphore(struct wm_softc *);
565 static void wm_put_swsm_semaphore(struct wm_softc *);
566 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
567 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
568 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
569 static int wm_get_swfwhw_semaphore(struct wm_softc *);
570 static void wm_put_swfwhw_semaphore(struct wm_softc *);
571
572 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
573 static int32_t wm_ich8_cycle_init(struct wm_softc *);
574 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
575 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
576 uint32_t, uint16_t *);
577 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
578 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
579 static void wm_82547_txfifo_stall(void *);
580 static int wm_check_mng_mode(struct wm_softc *);
581 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
582 static int wm_check_mng_mode_82574(struct wm_softc *);
583 static int wm_check_mng_mode_generic(struct wm_softc *);
584 static int wm_enable_mng_pass_thru(struct wm_softc *);
585 static int wm_check_reset_block(struct wm_softc *);
586 static void wm_get_hw_control(struct wm_softc *);
587 static int wm_check_for_link(struct wm_softc *);
588 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
589 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
590 #ifdef WM_WOL
591 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
592 #endif
593 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
594 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
595 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
596 static void wm_set_pcie_completion_timeout(struct wm_softc *);
597 static void wm_reset_init_script_82575(struct wm_softc *);
598 static void wm_release_manageability(struct wm_softc *);
599 static void wm_release_hw_control(struct wm_softc *);
600 static void wm_get_wakeup(struct wm_softc *);
601 #ifdef WM_WOL
602 static void wm_enable_phy_wakeup(struct wm_softc *);
603 static void wm_enable_wakeup(struct wm_softc *);
604 #endif
605 static void wm_init_manageability(struct wm_softc *);
606
607 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
608 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
609
610 /*
611 * Devices supported by this driver.
612 */
613 static const struct wm_product {
614 pci_vendor_id_t wmp_vendor;
615 pci_product_id_t wmp_product;
616 const char *wmp_name;
617 wm_chip_type wmp_type;
618 int wmp_flags;
619 #define WMP_F_1000X 0x01
620 #define WMP_F_1000T 0x02
621 #define WMP_F_SERDES 0x04
622 } wm_products[] = {
623 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
624 "Intel i82542 1000BASE-X Ethernet",
625 WM_T_82542_2_1, WMP_F_1000X },
626
627 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
628 "Intel i82543GC 1000BASE-X Ethernet",
629 WM_T_82543, WMP_F_1000X },
630
631 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
632 "Intel i82543GC 1000BASE-T Ethernet",
633 WM_T_82543, WMP_F_1000T },
634
635 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
636 "Intel i82544EI 1000BASE-T Ethernet",
637 WM_T_82544, WMP_F_1000T },
638
639 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
640 "Intel i82544EI 1000BASE-X Ethernet",
641 WM_T_82544, WMP_F_1000X },
642
643 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
644 "Intel i82544GC 1000BASE-T Ethernet",
645 WM_T_82544, WMP_F_1000T },
646
647 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
648 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
649 WM_T_82544, WMP_F_1000T },
650
651 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
652 "Intel i82540EM 1000BASE-T Ethernet",
653 WM_T_82540, WMP_F_1000T },
654
655 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
656 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
657 WM_T_82540, WMP_F_1000T },
658
659 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
660 "Intel i82540EP 1000BASE-T Ethernet",
661 WM_T_82540, WMP_F_1000T },
662
663 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
664 "Intel i82540EP 1000BASE-T Ethernet",
665 WM_T_82540, WMP_F_1000T },
666
667 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
668 "Intel i82540EP 1000BASE-T Ethernet",
669 WM_T_82540, WMP_F_1000T },
670
671 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
672 "Intel i82545EM 1000BASE-T Ethernet",
673 WM_T_82545, WMP_F_1000T },
674
675 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
676 "Intel i82545GM 1000BASE-T Ethernet",
677 WM_T_82545_3, WMP_F_1000T },
678
679 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
680 "Intel i82545GM 1000BASE-X Ethernet",
681 WM_T_82545_3, WMP_F_1000X },
682 #if 0
683 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
684 "Intel i82545GM Gigabit Ethernet (SERDES)",
685 WM_T_82545_3, WMP_F_SERDES },
686 #endif
687 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
688 "Intel i82546EB 1000BASE-T Ethernet",
689 WM_T_82546, WMP_F_1000T },
690
691 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
692 "Intel i82546EB 1000BASE-T Ethernet",
693 WM_T_82546, WMP_F_1000T },
694
695 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
696 "Intel i82545EM 1000BASE-X Ethernet",
697 WM_T_82545, WMP_F_1000X },
698
699 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
700 "Intel i82546EB 1000BASE-X Ethernet",
701 WM_T_82546, WMP_F_1000X },
702
703 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
704 "Intel i82546GB 1000BASE-T Ethernet",
705 WM_T_82546_3, WMP_F_1000T },
706
707 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
708 "Intel i82546GB 1000BASE-X Ethernet",
709 WM_T_82546_3, WMP_F_1000X },
710 #if 0
711 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
712 "Intel i82546GB Gigabit Ethernet (SERDES)",
713 WM_T_82546_3, WMP_F_SERDES },
714 #endif
715 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
716 "i82546GB quad-port Gigabit Ethernet",
717 WM_T_82546_3, WMP_F_1000T },
718
719 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
720 "i82546GB quad-port Gigabit Ethernet (KSP3)",
721 WM_T_82546_3, WMP_F_1000T },
722
723 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
724 "Intel PRO/1000MT (82546GB)",
725 WM_T_82546_3, WMP_F_1000T },
726
727 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
728 "Intel i82541EI 1000BASE-T Ethernet",
729 WM_T_82541, WMP_F_1000T },
730
731 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
732 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
733 WM_T_82541, WMP_F_1000T },
734
735 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
736 "Intel i82541EI Mobile 1000BASE-T Ethernet",
737 WM_T_82541, WMP_F_1000T },
738
739 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
740 "Intel i82541ER 1000BASE-T Ethernet",
741 WM_T_82541_2, WMP_F_1000T },
742
743 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
744 "Intel i82541GI 1000BASE-T Ethernet",
745 WM_T_82541_2, WMP_F_1000T },
746
747 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
748 "Intel i82541GI Mobile 1000BASE-T Ethernet",
749 WM_T_82541_2, WMP_F_1000T },
750
751 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
752 "Intel i82541PI 1000BASE-T Ethernet",
753 WM_T_82541_2, WMP_F_1000T },
754
755 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
756 "Intel i82547EI 1000BASE-T Ethernet",
757 WM_T_82547, WMP_F_1000T },
758
759 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
760 "Intel i82547EI Mobile 1000BASE-T Ethernet",
761 WM_T_82547, WMP_F_1000T },
762
763 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
764 "Intel i82547GI 1000BASE-T Ethernet",
765 WM_T_82547_2, WMP_F_1000T },
766
767 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
768 "Intel PRO/1000 PT (82571EB)",
769 WM_T_82571, WMP_F_1000T },
770
771 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
772 "Intel PRO/1000 PF (82571EB)",
773 WM_T_82571, WMP_F_1000X },
774 #if 0
775 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
776 "Intel PRO/1000 PB (82571EB)",
777 WM_T_82571, WMP_F_SERDES },
778 #endif
779 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
780 "Intel PRO/1000 QT (82571EB)",
781 WM_T_82571, WMP_F_1000T },
782
783 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
784 "Intel i82572EI 1000baseT Ethernet",
785 WM_T_82572, WMP_F_1000T },
786
787 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
788 "Intel PRO/1000 PT Quad Port Server Adapter",
789 WM_T_82571, WMP_F_1000T, },
790
791 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
792 "Intel i82572EI 1000baseX Ethernet",
793 WM_T_82572, WMP_F_1000X },
794 #if 0
795 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
796 "Intel i82572EI Gigabit Ethernet (SERDES)",
797 WM_T_82572, WMP_F_SERDES },
798 #endif
799
800 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
801 "Intel i82572EI 1000baseT Ethernet",
802 WM_T_82572, WMP_F_1000T },
803
804 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
805 "Intel i82573E",
806 WM_T_82573, WMP_F_1000T },
807
808 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
809 "Intel i82573E IAMT",
810 WM_T_82573, WMP_F_1000T },
811
812 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
813 "Intel i82573L Gigabit Ethernet",
814 WM_T_82573, WMP_F_1000T },
815
816 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
817 "Intel i82574L",
818 WM_T_82574, WMP_F_1000T },
819
820 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
821 "Intel i82583V",
822 WM_T_82583, WMP_F_1000T },
823
824 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
825 "i80003 dual 1000baseT Ethernet",
826 WM_T_80003, WMP_F_1000T },
827
828 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
829 "i80003 dual 1000baseX Ethernet",
830 WM_T_80003, WMP_F_1000T },
831 #if 0
832 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
833 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
834 WM_T_80003, WMP_F_SERDES },
835 #endif
836
837 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
838 "Intel i80003 1000baseT Ethernet",
839 WM_T_80003, WMP_F_1000T },
840 #if 0
841 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
842 "Intel i80003 Gigabit Ethernet (SERDES)",
843 WM_T_80003, WMP_F_SERDES },
844 #endif
845 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
846 "Intel i82801H (M_AMT) LAN Controller",
847 WM_T_ICH8, WMP_F_1000T },
848 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
849 "Intel i82801H (AMT) LAN Controller",
850 WM_T_ICH8, WMP_F_1000T },
851 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
852 "Intel i82801H LAN Controller",
853 WM_T_ICH8, WMP_F_1000T },
854 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
855 "Intel i82801H (IFE) LAN Controller",
856 WM_T_ICH8, WMP_F_1000T },
857 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
858 "Intel i82801H (M) LAN Controller",
859 WM_T_ICH8, WMP_F_1000T },
860 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
861 "Intel i82801H IFE (GT) LAN Controller",
862 WM_T_ICH8, WMP_F_1000T },
863 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
864 "Intel i82801H IFE (G) LAN Controller",
865 WM_T_ICH8, WMP_F_1000T },
866 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
867 "82801I (AMT) LAN Controller",
868 WM_T_ICH9, WMP_F_1000T },
869 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
870 "82801I LAN Controller",
871 WM_T_ICH9, WMP_F_1000T },
872 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
873 "82801I (G) LAN Controller",
874 WM_T_ICH9, WMP_F_1000T },
875 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
876 "82801I (GT) LAN Controller",
877 WM_T_ICH9, WMP_F_1000T },
878 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
879 "82801I (C) LAN Controller",
880 WM_T_ICH9, WMP_F_1000T },
881 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
882 "82801I mobile LAN Controller",
883 WM_T_ICH9, WMP_F_1000T },
884 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
885 "82801I mobile (V) LAN Controller",
886 WM_T_ICH9, WMP_F_1000T },
887 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
888 "82801I mobile (AMT) LAN Controller",
889 WM_T_ICH9, WMP_F_1000T },
890 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
891 "82567LM-4 LAN Controller",
892 WM_T_ICH9, WMP_F_1000T },
893 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
894 "82567V-3 LAN Controller",
895 WM_T_ICH9, WMP_F_1000T },
896 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
897 "82567LM-2 LAN Controller",
898 WM_T_ICH10, WMP_F_1000T },
899 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
900 "82567LF-2 LAN Controller",
901 WM_T_ICH10, WMP_F_1000T },
902 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
903 "82567LM-3 LAN Controller",
904 WM_T_ICH10, WMP_F_1000T },
905 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
906 "82567LF-3 LAN Controller",
907 WM_T_ICH10, WMP_F_1000T },
908 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
909 "82567V-2 LAN Controller",
910 WM_T_ICH10, WMP_F_1000T },
911 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
912 "PCH LAN (82577LM) Controller",
913 WM_T_PCH, WMP_F_1000T },
914 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
915 "PCH LAN (82577LC) Controller",
916 WM_T_PCH, WMP_F_1000T },
917 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
918 "PCH LAN (82578DM) Controller",
919 WM_T_PCH, WMP_F_1000T },
920 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
921 "PCH LAN (82578DC) Controller",
922 WM_T_PCH, WMP_F_1000T },
923 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
924 "82575EB dual-1000baseT Ethernet",
925 WM_T_82575, WMP_F_1000T },
926 #if 0
927 /*
928 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
929 * disabled for now ...
930 */
931 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
932 "82575EB dual-1000baseX Ethernet (SERDES)",
933 WM_T_82575, WMP_F_SERDES },
934 #endif
935 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
936 "82575GB quad-1000baseT Ethernet",
937 WM_T_82575, WMP_F_1000T },
938 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
939 "82575GB quad-1000baseT Ethernet (PM)",
940 WM_T_82575, WMP_F_1000T },
941 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
942 "82576 1000BaseT Ethernet",
943 WM_T_82576, WMP_F_1000T },
944 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
945 "82576 1000BaseX Ethernet",
946 WM_T_82576, WMP_F_1000X },
947 #if 0
948 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
949 "82576 gigabit Ethernet (SERDES)",
950 WM_T_82576, WMP_F_SERDES },
951 #endif
952 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
953 "82576 quad-1000BaseT Ethernet",
954 WM_T_82576, WMP_F_1000T },
955 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
956 "82576 gigabit Ethernet",
957 WM_T_82576, WMP_F_1000T },
958 #if 0
959 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
960 "82576 gigabit Ethernet (SERDES)",
961 WM_T_82576, WMP_F_SERDES },
962 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
963 "82576 quad-gigabit Ethernet (SERDES)",
964 WM_T_82576, WMP_F_SERDES },
965 #endif
966 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
967 "82580 1000BaseT Ethernet",
968 WM_T_82580, WMP_F_1000T },
969 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
970 "82580 1000BaseX Ethernet",
971 WM_T_82580, WMP_F_1000X },
972 #if 0
973 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
974 "82580 1000BaseT Ethernet (SERDES)",
975 WM_T_82580, WMP_F_SERDES },
976 #endif
977 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
978 "82580 gigabit Ethernet (SGMII)",
979 WM_T_82580, WMP_F_1000T },
980 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
981 "82580 dual-1000BaseT Ethernet",
982 WM_T_82580, WMP_F_1000T },
983 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER,
984 "82580 1000BaseT Ethernet",
985 WM_T_82580ER, WMP_F_1000T },
986 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER_DUAL,
987 "82580 dual-1000BaseT Ethernet",
988 WM_T_82580ER, WMP_F_1000T },
989 { 0, 0,
990 NULL,
991 0, 0 },
992 };
993
994 #ifdef WM_EVENT_COUNTERS
995 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
996 #endif /* WM_EVENT_COUNTERS */
997
998 #if 0 /* Not currently used */
999 static inline uint32_t
1000 wm_io_read(struct wm_softc *sc, int reg)
1001 {
1002
1003 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1004 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1005 }
1006 #endif
1007
1008 static inline void
1009 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1010 {
1011
1012 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1013 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1014 }
1015
1016 static inline void
1017 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1018 uint32_t data)
1019 {
1020 uint32_t regval;
1021 int i;
1022
1023 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1024
1025 CSR_WRITE(sc, reg, regval);
1026
1027 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1028 delay(5);
1029 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1030 break;
1031 }
1032 if (i == SCTL_CTL_POLL_TIMEOUT) {
1033 aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
1034 device_xname(sc->sc_dev), reg);
1035 }
1036 }
1037
1038 static inline void
1039 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1040 {
1041 wa->wa_low = htole32(v & 0xffffffffU);
1042 if (sizeof(bus_addr_t) == 8)
1043 wa->wa_high = htole32((uint64_t) v >> 32);
1044 else
1045 wa->wa_high = 0;
1046 }
1047
1048 static void
1049 wm_set_spiaddrbits(struct wm_softc *sc)
1050 {
1051 uint32_t reg;
1052
1053 sc->sc_flags |= WM_F_EEPROM_SPI;
1054 reg = CSR_READ(sc, WMREG_EECD);
1055 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1056 }
1057
1058 static const struct wm_product *
1059 wm_lookup(const struct pci_attach_args *pa)
1060 {
1061 const struct wm_product *wmp;
1062
1063 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1064 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1065 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1066 return wmp;
1067 }
1068 return NULL;
1069 }
1070
1071 static int
1072 wm_match(device_t parent, cfdata_t cf, void *aux)
1073 {
1074 struct pci_attach_args *pa = aux;
1075
1076 if (wm_lookup(pa) != NULL)
1077 return 1;
1078
1079 return 0;
1080 }
1081
1082 static void
1083 wm_attach(device_t parent, device_t self, void *aux)
1084 {
1085 struct wm_softc *sc = device_private(self);
1086 struct pci_attach_args *pa = aux;
1087 prop_dictionary_t dict;
1088 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1089 pci_chipset_tag_t pc = pa->pa_pc;
1090 pci_intr_handle_t ih;
1091 const char *intrstr = NULL;
1092 const char *eetype, *xname;
1093 bus_space_tag_t memt;
1094 bus_space_handle_t memh;
1095 bus_size_t memsize;
1096 int memh_valid;
1097 int i, error;
1098 const struct wm_product *wmp;
1099 prop_data_t ea;
1100 prop_number_t pn;
1101 uint8_t enaddr[ETHER_ADDR_LEN];
1102 uint16_t cfg1, cfg2, swdpin, io3;
1103 pcireg_t preg, memtype;
1104 uint16_t eeprom_data, apme_mask;
1105 uint32_t reg;
1106
1107 sc->sc_dev = self;
1108 callout_init(&sc->sc_tick_ch, 0);
1109
1110 sc->sc_wmp = wmp = wm_lookup(pa);
1111 if (wmp == NULL) {
1112 printf("\n");
1113 panic("wm_attach: impossible");
1114 }
1115
1116 sc->sc_pc = pa->pa_pc;
1117 sc->sc_pcitag = pa->pa_tag;
1118
1119 if (pci_dma64_available(pa))
1120 sc->sc_dmat = pa->pa_dmat64;
1121 else
1122 sc->sc_dmat = pa->pa_dmat;
1123
1124 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1125 aprint_naive(": Ethernet controller\n");
1126 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, sc->sc_rev);
1127
1128 sc->sc_type = wmp->wmp_type;
1129 if (sc->sc_type < WM_T_82543) {
1130 if (sc->sc_rev < 2) {
1131 aprint_error_dev(sc->sc_dev,
1132 "i82542 must be at least rev. 2\n");
1133 return;
1134 }
1135 if (sc->sc_rev < 3)
1136 sc->sc_type = WM_T_82542_2_0;
1137 }
1138
1139 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1140 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER))
1141 sc->sc_flags |= WM_F_NEWQUEUE;
1142
1143 /* Set device properties (mactype) */
1144 dict = device_properties(sc->sc_dev);
1145 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1146
1147 /*
1148 * Map the device. All devices support memory-mapped acccess,
1149 * and it is really required for normal operation.
1150 */
1151 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1152 switch (memtype) {
1153 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1154 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1155 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1156 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1157 break;
1158 default:
1159 memh_valid = 0;
1160 break;
1161 }
1162
1163 if (memh_valid) {
1164 sc->sc_st = memt;
1165 sc->sc_sh = memh;
1166 sc->sc_ss = memsize;
1167 } else {
1168 aprint_error_dev(sc->sc_dev,
1169 "unable to map device registers\n");
1170 return;
1171 }
1172
1173 wm_get_wakeup(sc);
1174
1175 /*
1176 * In addition, i82544 and later support I/O mapped indirect
1177 * register access. It is not desirable (nor supported in
1178 * this driver) to use it for normal operation, though it is
1179 * required to work around bugs in some chip versions.
1180 */
1181 if (sc->sc_type >= WM_T_82544) {
1182 /* First we have to find the I/O BAR. */
1183 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1184 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1185 PCI_MAPREG_TYPE_IO)
1186 break;
1187 }
1188 if (i == PCI_MAPREG_END)
1189 aprint_error_dev(sc->sc_dev,
1190 "WARNING: unable to find I/O BAR\n");
1191 else {
1192 /*
1193 * The i8254x doesn't apparently respond when the
1194 * I/O BAR is 0, which looks somewhat like it's not
1195 * been configured.
1196 */
1197 preg = pci_conf_read(pc, pa->pa_tag, i);
1198 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1199 aprint_error_dev(sc->sc_dev,
1200 "WARNING: I/O BAR at zero.\n");
1201 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1202 0, &sc->sc_iot, &sc->sc_ioh,
1203 NULL, NULL) == 0) {
1204 sc->sc_flags |= WM_F_IOH_VALID;
1205 } else {
1206 aprint_error_dev(sc->sc_dev,
1207 "WARNING: unable to map I/O space\n");
1208 }
1209 }
1210
1211 }
1212
1213 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1214 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1215 preg |= PCI_COMMAND_MASTER_ENABLE;
1216 if (sc->sc_type < WM_T_82542_2_1)
1217 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1218 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1219
1220 /* power up chip */
1221 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1222 NULL)) && error != EOPNOTSUPP) {
1223 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1224 return;
1225 }
1226
1227 /*
1228 * Map and establish our interrupt.
1229 */
1230 if (pci_intr_map(pa, &ih)) {
1231 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1232 return;
1233 }
1234 intrstr = pci_intr_string(pc, ih);
1235 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1236 if (sc->sc_ih == NULL) {
1237 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1238 if (intrstr != NULL)
1239 aprint_error(" at %s", intrstr);
1240 aprint_error("\n");
1241 return;
1242 }
1243 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1244
1245 /*
1246 * Check the function ID (unit number of the chip).
1247 */
1248 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1249 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1250 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1251 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER))
1252 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1253 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1254 else
1255 sc->sc_funcid = 0;
1256
1257 /*
1258 * Determine a few things about the bus we're connected to.
1259 */
1260 if (sc->sc_type < WM_T_82543) {
1261 /* We don't really know the bus characteristics here. */
1262 sc->sc_bus_speed = 33;
1263 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1264 /*
1265 * CSA (Communication Streaming Architecture) is about as fast
1266 * a 32-bit 66MHz PCI Bus.
1267 */
1268 sc->sc_flags |= WM_F_CSA;
1269 sc->sc_bus_speed = 66;
1270 aprint_verbose_dev(sc->sc_dev,
1271 "Communication Streaming Architecture\n");
1272 if (sc->sc_type == WM_T_82547) {
1273 callout_init(&sc->sc_txfifo_ch, 0);
1274 callout_setfunc(&sc->sc_txfifo_ch,
1275 wm_82547_txfifo_stall, sc);
1276 aprint_verbose_dev(sc->sc_dev,
1277 "using 82547 Tx FIFO stall work-around\n");
1278 }
1279 } else if (sc->sc_type >= WM_T_82571) {
1280 sc->sc_flags |= WM_F_PCIE;
1281 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1282 && (sc->sc_type != WM_T_ICH10)
1283 && (sc->sc_type != WM_T_PCH)) {
1284 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1285 /* ICH* and PCH have no PCIe capability registers */
1286 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1287 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1288 NULL) == 0)
1289 aprint_error_dev(sc->sc_dev,
1290 "unable to find PCIe capability\n");
1291 }
1292 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1293 } else {
1294 reg = CSR_READ(sc, WMREG_STATUS);
1295 if (reg & STATUS_BUS64)
1296 sc->sc_flags |= WM_F_BUS64;
1297 if ((reg & STATUS_PCIX_MODE) != 0) {
1298 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1299
1300 sc->sc_flags |= WM_F_PCIX;
1301 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1302 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1303 aprint_error_dev(sc->sc_dev,
1304 "unable to find PCIX capability\n");
1305 else if (sc->sc_type != WM_T_82545_3 &&
1306 sc->sc_type != WM_T_82546_3) {
1307 /*
1308 * Work around a problem caused by the BIOS
1309 * setting the max memory read byte count
1310 * incorrectly.
1311 */
1312 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1313 sc->sc_pcixe_capoff + PCI_PCIX_CMD);
1314 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1315 sc->sc_pcixe_capoff + PCI_PCIX_STATUS);
1316
1317 bytecnt =
1318 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1319 PCI_PCIX_CMD_BYTECNT_SHIFT;
1320 maxb =
1321 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1322 PCI_PCIX_STATUS_MAXB_SHIFT;
1323 if (bytecnt > maxb) {
1324 aprint_verbose_dev(sc->sc_dev,
1325 "resetting PCI-X MMRBC: %d -> %d\n",
1326 512 << bytecnt, 512 << maxb);
1327 pcix_cmd = (pcix_cmd &
1328 ~PCI_PCIX_CMD_BYTECNT_MASK) |
1329 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1330 pci_conf_write(pa->pa_pc, pa->pa_tag,
1331 sc->sc_pcixe_capoff + PCI_PCIX_CMD,
1332 pcix_cmd);
1333 }
1334 }
1335 }
1336 /*
1337 * The quad port adapter is special; it has a PCIX-PCIX
1338 * bridge on the board, and can run the secondary bus at
1339 * a higher speed.
1340 */
1341 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1342 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1343 : 66;
1344 } else if (sc->sc_flags & WM_F_PCIX) {
1345 switch (reg & STATUS_PCIXSPD_MASK) {
1346 case STATUS_PCIXSPD_50_66:
1347 sc->sc_bus_speed = 66;
1348 break;
1349 case STATUS_PCIXSPD_66_100:
1350 sc->sc_bus_speed = 100;
1351 break;
1352 case STATUS_PCIXSPD_100_133:
1353 sc->sc_bus_speed = 133;
1354 break;
1355 default:
1356 aprint_error_dev(sc->sc_dev,
1357 "unknown PCIXSPD %d; assuming 66MHz\n",
1358 reg & STATUS_PCIXSPD_MASK);
1359 sc->sc_bus_speed = 66;
1360 break;
1361 }
1362 } else
1363 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1364 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1365 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1366 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1367 }
1368
1369 /*
1370 * Allocate the control data structures, and create and load the
1371 * DMA map for it.
1372 *
1373 * NOTE: All Tx descriptors must be in the same 4G segment of
1374 * memory. So must Rx descriptors. We simplify by allocating
1375 * both sets within the same 4G segment.
1376 */
1377 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1378 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1379 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1380 sizeof(struct wm_control_data_82542) :
1381 sizeof(struct wm_control_data_82544);
1382 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1383 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1384 &sc->sc_cd_rseg, 0)) != 0) {
1385 aprint_error_dev(sc->sc_dev,
1386 "unable to allocate control data, error = %d\n",
1387 error);
1388 goto fail_0;
1389 }
1390
1391 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1392 sc->sc_cd_rseg, sc->sc_cd_size,
1393 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1394 aprint_error_dev(sc->sc_dev,
1395 "unable to map control data, error = %d\n", error);
1396 goto fail_1;
1397 }
1398
1399 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1400 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1401 aprint_error_dev(sc->sc_dev,
1402 "unable to create control data DMA map, error = %d\n",
1403 error);
1404 goto fail_2;
1405 }
1406
1407 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1408 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1409 aprint_error_dev(sc->sc_dev,
1410 "unable to load control data DMA map, error = %d\n",
1411 error);
1412 goto fail_3;
1413 }
1414
1415 /*
1416 * Create the transmit buffer DMA maps.
1417 */
1418 WM_TXQUEUELEN(sc) =
1419 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1420 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1421 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1422 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1423 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1424 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1425 aprint_error_dev(sc->sc_dev,
1426 "unable to create Tx DMA map %d, error = %d\n",
1427 i, error);
1428 goto fail_4;
1429 }
1430 }
1431
1432 /*
1433 * Create the receive buffer DMA maps.
1434 */
1435 for (i = 0; i < WM_NRXDESC; i++) {
1436 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1437 MCLBYTES, 0, 0,
1438 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1439 aprint_error_dev(sc->sc_dev,
1440 "unable to create Rx DMA map %d error = %d\n",
1441 i, error);
1442 goto fail_5;
1443 }
1444 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1445 }
1446
1447 /* clear interesting stat counters */
1448 CSR_READ(sc, WMREG_COLC);
1449 CSR_READ(sc, WMREG_RXERRC);
1450
1451 /*
1452 * Reset the chip to a known state.
1453 */
1454 wm_reset(sc);
1455
1456 switch (sc->sc_type) {
1457 case WM_T_82571:
1458 case WM_T_82572:
1459 case WM_T_82573:
1460 case WM_T_82574:
1461 case WM_T_82583:
1462 case WM_T_80003:
1463 case WM_T_ICH8:
1464 case WM_T_ICH9:
1465 case WM_T_ICH10:
1466 case WM_T_PCH:
1467 if (wm_check_mng_mode(sc) != 0)
1468 wm_get_hw_control(sc);
1469 break;
1470 default:
1471 break;
1472 }
1473
1474 /*
1475 * Get some information about the EEPROM.
1476 */
1477 switch (sc->sc_type) {
1478 case WM_T_82542_2_0:
1479 case WM_T_82542_2_1:
1480 case WM_T_82543:
1481 case WM_T_82544:
1482 /* Microwire */
1483 sc->sc_ee_addrbits = 6;
1484 break;
1485 case WM_T_82540:
1486 case WM_T_82545:
1487 case WM_T_82545_3:
1488 case WM_T_82546:
1489 case WM_T_82546_3:
1490 /* Microwire */
1491 reg = CSR_READ(sc, WMREG_EECD);
1492 if (reg & EECD_EE_SIZE)
1493 sc->sc_ee_addrbits = 8;
1494 else
1495 sc->sc_ee_addrbits = 6;
1496 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1497 break;
1498 case WM_T_82541:
1499 case WM_T_82541_2:
1500 case WM_T_82547:
1501 case WM_T_82547_2:
1502 reg = CSR_READ(sc, WMREG_EECD);
1503 if (reg & EECD_EE_TYPE) {
1504 /* SPI */
1505 wm_set_spiaddrbits(sc);
1506 } else
1507 /* Microwire */
1508 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1509 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1510 break;
1511 case WM_T_82571:
1512 case WM_T_82572:
1513 /* SPI */
1514 wm_set_spiaddrbits(sc);
1515 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1516 break;
1517 case WM_T_82573:
1518 case WM_T_82574:
1519 case WM_T_82583:
1520 if (wm_is_onboard_nvm_eeprom(sc) == 0)
1521 sc->sc_flags |= WM_F_EEPROM_FLASH;
1522 else {
1523 /* SPI */
1524 wm_set_spiaddrbits(sc);
1525 }
1526 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1527 break;
1528 case WM_T_82575:
1529 case WM_T_82576:
1530 case WM_T_82580:
1531 case WM_T_82580ER:
1532 case WM_T_80003:
1533 /* SPI */
1534 wm_set_spiaddrbits(sc);
1535 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1536 break;
1537 case WM_T_ICH8:
1538 case WM_T_ICH9:
1539 case WM_T_ICH10:
1540 case WM_T_PCH:
1541 /* FLASH */
1542 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1543 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1544 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1545 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1546 aprint_error_dev(sc->sc_dev,
1547 "can't map FLASH registers\n");
1548 return;
1549 }
1550 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1551 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1552 ICH_FLASH_SECTOR_SIZE;
1553 sc->sc_ich8_flash_bank_size =
1554 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1555 sc->sc_ich8_flash_bank_size -=
1556 (reg & ICH_GFPREG_BASE_MASK);
1557 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1558 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1559 break;
1560 default:
1561 break;
1562 }
1563
1564 /*
1565 * Defer printing the EEPROM type until after verifying the checksum
1566 * This allows the EEPROM type to be printed correctly in the case
1567 * that no EEPROM is attached.
1568 */
1569 /*
1570 * Validate the EEPROM checksum. If the checksum fails, flag
1571 * this for later, so we can fail future reads from the EEPROM.
1572 */
1573 if (wm_validate_eeprom_checksum(sc)) {
1574 /*
1575 * Read twice again because some PCI-e parts fail the
1576 * first check due to the link being in sleep state.
1577 */
1578 if (wm_validate_eeprom_checksum(sc))
1579 sc->sc_flags |= WM_F_EEPROM_INVALID;
1580 }
1581
1582 /* Set device properties (macflags) */
1583 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1584
1585 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1586 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1587 else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1588 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1589 } else {
1590 if (sc->sc_flags & WM_F_EEPROM_SPI)
1591 eetype = "SPI";
1592 else
1593 eetype = "MicroWire";
1594 aprint_verbose_dev(sc->sc_dev,
1595 "%u word (%d address bits) %s EEPROM\n",
1596 1U << sc->sc_ee_addrbits,
1597 sc->sc_ee_addrbits, eetype);
1598 }
1599
1600 /*
1601 * Read the Ethernet address from the EEPROM, if not first found
1602 * in device properties.
1603 */
1604 ea = prop_dictionary_get(dict, "mac-address");
1605 if (ea != NULL) {
1606 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1607 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1608 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1609 } else {
1610 if (wm_read_mac_addr(sc, enaddr) != 0)
1611 aprint_error_dev(sc->sc_dev,
1612 "unable to read Ethernet address\n");
1613 return;
1614 }
1615
1616 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1617 ether_sprintf(enaddr));
1618
1619 /*
1620 * Read the config info from the EEPROM, and set up various
1621 * bits in the control registers based on their contents.
1622 */
1623 pn = prop_dictionary_get(dict, "i82543-cfg1");
1624 if (pn != NULL) {
1625 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1626 cfg1 = (uint16_t) prop_number_integer_value(pn);
1627 } else {
1628 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1629 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1630 return;
1631 }
1632 }
1633
1634 pn = prop_dictionary_get(dict, "i82543-cfg2");
1635 if (pn != NULL) {
1636 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1637 cfg2 = (uint16_t) prop_number_integer_value(pn);
1638 } else {
1639 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1640 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1641 return;
1642 }
1643 }
1644
1645 /* check for WM_F_WOL */
1646 switch (sc->sc_type) {
1647 case WM_T_82542_2_0:
1648 case WM_T_82542_2_1:
1649 case WM_T_82543:
1650 /* dummy? */
1651 eeprom_data = 0;
1652 apme_mask = EEPROM_CFG3_APME;
1653 break;
1654 case WM_T_82544:
1655 apme_mask = EEPROM_CFG2_82544_APM_EN;
1656 eeprom_data = cfg2;
1657 break;
1658 case WM_T_82546:
1659 case WM_T_82546_3:
1660 case WM_T_82571:
1661 case WM_T_82572:
1662 case WM_T_82573:
1663 case WM_T_82574:
1664 case WM_T_82583:
1665 case WM_T_80003:
1666 default:
1667 apme_mask = EEPROM_CFG3_APME;
1668 wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1669 : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1670 break;
1671 case WM_T_82575:
1672 case WM_T_82576:
1673 case WM_T_82580:
1674 case WM_T_82580ER:
1675 case WM_T_ICH8:
1676 case WM_T_ICH9:
1677 case WM_T_ICH10:
1678 case WM_T_PCH:
1679 apme_mask = WUC_APME;
1680 eeprom_data = CSR_READ(sc, WMREG_WUC);
1681 break;
1682 }
1683
1684 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1685 if ((eeprom_data & apme_mask) != 0)
1686 sc->sc_flags |= WM_F_WOL;
1687 #ifdef WM_DEBUG
1688 if ((sc->sc_flags & WM_F_WOL) != 0)
1689 printf("WOL\n");
1690 #endif
1691
1692 /*
1693 * XXX need special handling for some multiple port cards
1694 * to disable a paticular port.
1695 */
1696
1697 if (sc->sc_type >= WM_T_82544) {
1698 pn = prop_dictionary_get(dict, "i82543-swdpin");
1699 if (pn != NULL) {
1700 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1701 swdpin = (uint16_t) prop_number_integer_value(pn);
1702 } else {
1703 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1704 aprint_error_dev(sc->sc_dev,
1705 "unable to read SWDPIN\n");
1706 return;
1707 }
1708 }
1709 }
1710
1711 if (cfg1 & EEPROM_CFG1_ILOS)
1712 sc->sc_ctrl |= CTRL_ILOS;
1713 if (sc->sc_type >= WM_T_82544) {
1714 sc->sc_ctrl |=
1715 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1716 CTRL_SWDPIO_SHIFT;
1717 sc->sc_ctrl |=
1718 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1719 CTRL_SWDPINS_SHIFT;
1720 } else {
1721 sc->sc_ctrl |=
1722 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1723 CTRL_SWDPIO_SHIFT;
1724 }
1725
1726 #if 0
1727 if (sc->sc_type >= WM_T_82544) {
1728 if (cfg1 & EEPROM_CFG1_IPS0)
1729 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1730 if (cfg1 & EEPROM_CFG1_IPS1)
1731 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1732 sc->sc_ctrl_ext |=
1733 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1734 CTRL_EXT_SWDPIO_SHIFT;
1735 sc->sc_ctrl_ext |=
1736 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1737 CTRL_EXT_SWDPINS_SHIFT;
1738 } else {
1739 sc->sc_ctrl_ext |=
1740 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1741 CTRL_EXT_SWDPIO_SHIFT;
1742 }
1743 #endif
1744
1745 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1746 #if 0
1747 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1748 #endif
1749
1750 /*
1751 * Set up some register offsets that are different between
1752 * the i82542 and the i82543 and later chips.
1753 */
1754 if (sc->sc_type < WM_T_82543) {
1755 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1756 sc->sc_tdt_reg = WMREG_OLD_TDT;
1757 } else {
1758 sc->sc_rdt_reg = WMREG_RDT;
1759 sc->sc_tdt_reg = WMREG_TDT;
1760 }
1761
1762 if (sc->sc_type == WM_T_PCH) {
1763 uint16_t val;
1764
1765 /* Save the NVM K1 bit setting */
1766 wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1767
1768 if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1769 sc->sc_nvm_k1_enabled = 1;
1770 else
1771 sc->sc_nvm_k1_enabled = 0;
1772 }
1773
1774 /*
1775 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1776 * media structures accordingly.
1777 */
1778 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1779 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1780 || sc->sc_type == WM_T_82573
1781 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1782 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1783 wm_gmii_mediainit(sc, wmp->wmp_product);
1784 } else if (sc->sc_type < WM_T_82543 ||
1785 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1786 if (wmp->wmp_flags & WMP_F_1000T)
1787 aprint_error_dev(sc->sc_dev,
1788 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1789 wm_tbi_mediainit(sc);
1790 } else {
1791 switch (sc->sc_type) {
1792 case WM_T_82575:
1793 case WM_T_82576:
1794 case WM_T_82580:
1795 case WM_T_82580ER:
1796 reg = CSR_READ(sc, WMREG_CTRL_EXT);
1797 switch (reg & CTRL_EXT_LINK_MODE_MASK) {
1798 case CTRL_EXT_LINK_MODE_SGMII:
1799 aprint_verbose_dev(sc->sc_dev, "SGMII\n");
1800 sc->sc_flags |= WM_F_SGMII;
1801 CSR_WRITE(sc, WMREG_CTRL_EXT,
1802 reg | CTRL_EXT_I2C_ENA);
1803 wm_gmii_mediainit(sc, wmp->wmp_product);
1804 break;
1805 case CTRL_EXT_LINK_MODE_1000KX:
1806 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
1807 aprint_verbose_dev(sc->sc_dev, "1000KX or SERDES\n");
1808 CSR_WRITE(sc, WMREG_CTRL_EXT,
1809 reg | CTRL_EXT_I2C_ENA);
1810 panic("not supported yet\n");
1811 break;
1812 case CTRL_EXT_LINK_MODE_GMII:
1813 default:
1814 CSR_WRITE(sc, WMREG_CTRL_EXT,
1815 reg & ~CTRL_EXT_I2C_ENA);
1816 wm_gmii_mediainit(sc, wmp->wmp_product);
1817 break;
1818 }
1819 break;
1820 default:
1821 if (wmp->wmp_flags & WMP_F_1000X)
1822 aprint_error_dev(sc->sc_dev,
1823 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1824 wm_gmii_mediainit(sc, wmp->wmp_product);
1825 }
1826 }
1827
1828 ifp = &sc->sc_ethercom.ec_if;
1829 xname = device_xname(sc->sc_dev);
1830 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1831 ifp->if_softc = sc;
1832 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1833 ifp->if_ioctl = wm_ioctl;
1834 ifp->if_start = wm_start;
1835 ifp->if_watchdog = wm_watchdog;
1836 ifp->if_init = wm_init;
1837 ifp->if_stop = wm_stop;
1838 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1839 IFQ_SET_READY(&ifp->if_snd);
1840
1841 /* Check for jumbo frame */
1842 switch (sc->sc_type) {
1843 case WM_T_82573:
1844 /* XXX limited to 9234 if ASPM is disabled */
1845 wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1846 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1847 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1848 break;
1849 case WM_T_82571:
1850 case WM_T_82572:
1851 case WM_T_82574:
1852 case WM_T_82575:
1853 case WM_T_82576:
1854 case WM_T_82580:
1855 case WM_T_82580ER:
1856 case WM_T_80003:
1857 case WM_T_ICH9:
1858 case WM_T_ICH10:
1859 /* XXX limited to 9234 */
1860 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1861 break;
1862 case WM_T_PCH:
1863 /* XXX limited to 4096 */
1864 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1865 break;
1866 case WM_T_82542_2_0:
1867 case WM_T_82542_2_1:
1868 case WM_T_82583:
1869 case WM_T_ICH8:
1870 /* No support for jumbo frame */
1871 break;
1872 default:
1873 /* ETHER_MAX_LEN_JUMBO */
1874 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1875 break;
1876 }
1877
1878 /*
1879 * If we're a i82543 or greater, we can support VLANs.
1880 */
1881 if (sc->sc_type >= WM_T_82543)
1882 sc->sc_ethercom.ec_capabilities |=
1883 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1884
1885 /*
1886 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1887 * on i82543 and later.
1888 */
1889 if (sc->sc_type >= WM_T_82543) {
1890 ifp->if_capabilities |=
1891 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1892 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1893 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1894 IFCAP_CSUM_TCPv6_Tx |
1895 IFCAP_CSUM_UDPv6_Tx;
1896 }
1897
1898 /*
1899 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1900 *
1901 * 82541GI (8086:1076) ... no
1902 * 82572EI (8086:10b9) ... yes
1903 */
1904 if (sc->sc_type >= WM_T_82571) {
1905 ifp->if_capabilities |=
1906 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1907 }
1908
1909 /*
1910 * If we're a i82544 or greater (except i82547), we can do
1911 * TCP segmentation offload.
1912 */
1913 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1914 ifp->if_capabilities |= IFCAP_TSOv4;
1915 }
1916
1917 if (sc->sc_type >= WM_T_82571) {
1918 ifp->if_capabilities |= IFCAP_TSOv6;
1919 }
1920
1921 /*
1922 * Attach the interface.
1923 */
1924 if_attach(ifp);
1925 ether_ifattach(ifp, enaddr);
1926 #if NRND > 0
1927 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1928 #endif
1929
1930 #ifdef WM_EVENT_COUNTERS
1931 /* Attach event counters. */
1932 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1933 NULL, xname, "txsstall");
1934 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1935 NULL, xname, "txdstall");
1936 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1937 NULL, xname, "txfifo_stall");
1938 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1939 NULL, xname, "txdw");
1940 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1941 NULL, xname, "txqe");
1942 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1943 NULL, xname, "rxintr");
1944 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1945 NULL, xname, "linkintr");
1946
1947 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1948 NULL, xname, "rxipsum");
1949 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1950 NULL, xname, "rxtusum");
1951 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1952 NULL, xname, "txipsum");
1953 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1954 NULL, xname, "txtusum");
1955 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1956 NULL, xname, "txtusum6");
1957
1958 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1959 NULL, xname, "txtso");
1960 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1961 NULL, xname, "txtso6");
1962 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1963 NULL, xname, "txtsopain");
1964
1965 for (i = 0; i < WM_NTXSEGS; i++) {
1966 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1967 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1968 NULL, xname, wm_txseg_evcnt_names[i]);
1969 }
1970
1971 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1972 NULL, xname, "txdrop");
1973
1974 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1975 NULL, xname, "tu");
1976
1977 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1978 NULL, xname, "tx_xoff");
1979 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1980 NULL, xname, "tx_xon");
1981 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1982 NULL, xname, "rx_xoff");
1983 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1984 NULL, xname, "rx_xon");
1985 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1986 NULL, xname, "rx_macctl");
1987 #endif /* WM_EVENT_COUNTERS */
1988
1989 if (pmf_device_register(self, wm_suspend, wm_resume))
1990 pmf_class_network_register(self, ifp);
1991 else
1992 aprint_error_dev(self, "couldn't establish power handler\n");
1993
1994 return;
1995
1996 /*
1997 * Free any resources we've allocated during the failed attach
1998 * attempt. Do this in reverse order and fall through.
1999 */
2000 fail_5:
2001 for (i = 0; i < WM_NRXDESC; i++) {
2002 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2003 bus_dmamap_destroy(sc->sc_dmat,
2004 sc->sc_rxsoft[i].rxs_dmamap);
2005 }
2006 fail_4:
2007 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2008 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2009 bus_dmamap_destroy(sc->sc_dmat,
2010 sc->sc_txsoft[i].txs_dmamap);
2011 }
2012 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2013 fail_3:
2014 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2015 fail_2:
2016 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2017 sc->sc_cd_size);
2018 fail_1:
2019 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2020 fail_0:
2021 return;
2022 }
2023
2024 static int
2025 wm_detach(device_t self, int flags __unused)
2026 {
2027 struct wm_softc *sc = device_private(self);
2028 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2029 int i, s;
2030
2031 s = splnet();
2032 /* Stop the interface. Callouts are stopped in it. */
2033 wm_stop(ifp, 1);
2034 splx(s);
2035
2036 pmf_device_deregister(self);
2037
2038 /* Tell the firmware about the release */
2039 wm_release_manageability(sc);
2040
2041 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2042
2043 /* Delete all remaining media. */
2044 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2045
2046 ether_ifdetach(ifp);
2047 if_detach(ifp);
2048
2049
2050 /* Unload RX dmamaps and free mbufs */
2051 wm_rxdrain(sc);
2052
2053 /* Free dmamap. It's the same as the end of the wm_attach() function */
2054 for (i = 0; i < WM_NRXDESC; i++) {
2055 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2056 bus_dmamap_destroy(sc->sc_dmat,
2057 sc->sc_rxsoft[i].rxs_dmamap);
2058 }
2059 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2060 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2061 bus_dmamap_destroy(sc->sc_dmat,
2062 sc->sc_txsoft[i].txs_dmamap);
2063 }
2064 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2065 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2066 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2067 sc->sc_cd_size);
2068 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2069
2070 /* Disestablish the interrupt handler */
2071 if (sc->sc_ih != NULL) {
2072 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2073 sc->sc_ih = NULL;
2074 }
2075
2076 /* Unmap the register */
2077 if (sc->sc_ss) {
2078 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2079 sc->sc_ss = 0;
2080 }
2081
2082 wm_release_hw_control(sc);
2083
2084 return 0;
2085 }
2086
2087 /*
2088 * wm_tx_offload:
2089 *
2090 * Set up TCP/IP checksumming parameters for the
2091 * specified packet.
2092 */
2093 static int
2094 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
2095 uint8_t *fieldsp)
2096 {
2097 struct mbuf *m0 = txs->txs_mbuf;
2098 struct livengood_tcpip_ctxdesc *t;
2099 uint32_t ipcs, tucs, cmd, cmdlen, seg;
2100 uint32_t ipcse;
2101 struct ether_header *eh;
2102 int offset, iphl;
2103 uint8_t fields;
2104
2105 /*
2106 * XXX It would be nice if the mbuf pkthdr had offset
2107 * fields for the protocol headers.
2108 */
2109
2110 eh = mtod(m0, struct ether_header *);
2111 switch (htons(eh->ether_type)) {
2112 case ETHERTYPE_IP:
2113 case ETHERTYPE_IPV6:
2114 offset = ETHER_HDR_LEN;
2115 break;
2116
2117 case ETHERTYPE_VLAN:
2118 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2119 break;
2120
2121 default:
2122 /*
2123 * Don't support this protocol or encapsulation.
2124 */
2125 *fieldsp = 0;
2126 *cmdp = 0;
2127 return 0;
2128 }
2129
2130 if ((m0->m_pkthdr.csum_flags &
2131 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
2132 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2133 } else {
2134 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2135 }
2136 ipcse = offset + iphl - 1;
2137
2138 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
2139 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
2140 seg = 0;
2141 fields = 0;
2142
2143 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2144 int hlen = offset + iphl;
2145 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2146
2147 if (__predict_false(m0->m_len <
2148 (hlen + sizeof(struct tcphdr)))) {
2149 /*
2150 * TCP/IP headers are not in the first mbuf; we need
2151 * to do this the slow and painful way. Let's just
2152 * hope this doesn't happen very often.
2153 */
2154 struct tcphdr th;
2155
2156 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2157
2158 m_copydata(m0, hlen, sizeof(th), &th);
2159 if (v4) {
2160 struct ip ip;
2161
2162 m_copydata(m0, offset, sizeof(ip), &ip);
2163 ip.ip_len = 0;
2164 m_copyback(m0,
2165 offset + offsetof(struct ip, ip_len),
2166 sizeof(ip.ip_len), &ip.ip_len);
2167 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2168 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2169 } else {
2170 struct ip6_hdr ip6;
2171
2172 m_copydata(m0, offset, sizeof(ip6), &ip6);
2173 ip6.ip6_plen = 0;
2174 m_copyback(m0,
2175 offset + offsetof(struct ip6_hdr, ip6_plen),
2176 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2177 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2178 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2179 }
2180 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2181 sizeof(th.th_sum), &th.th_sum);
2182
2183 hlen += th.th_off << 2;
2184 } else {
2185 /*
2186 * TCP/IP headers are in the first mbuf; we can do
2187 * this the easy way.
2188 */
2189 struct tcphdr *th;
2190
2191 if (v4) {
2192 struct ip *ip =
2193 (void *)(mtod(m0, char *) + offset);
2194 th = (void *)(mtod(m0, char *) + hlen);
2195
2196 ip->ip_len = 0;
2197 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2198 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2199 } else {
2200 struct ip6_hdr *ip6 =
2201 (void *)(mtod(m0, char *) + offset);
2202 th = (void *)(mtod(m0, char *) + hlen);
2203
2204 ip6->ip6_plen = 0;
2205 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2206 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2207 }
2208 hlen += th->th_off << 2;
2209 }
2210
2211 if (v4) {
2212 WM_EVCNT_INCR(&sc->sc_ev_txtso);
2213 cmdlen |= WTX_TCPIP_CMD_IP;
2214 } else {
2215 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2216 ipcse = 0;
2217 }
2218 cmd |= WTX_TCPIP_CMD_TSE;
2219 cmdlen |= WTX_TCPIP_CMD_TSE |
2220 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
2221 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
2222 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
2223 }
2224
2225 /*
2226 * NOTE: Even if we're not using the IP or TCP/UDP checksum
2227 * offload feature, if we load the context descriptor, we
2228 * MUST provide valid values for IPCSS and TUCSS fields.
2229 */
2230
2231 ipcs = WTX_TCPIP_IPCSS(offset) |
2232 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
2233 WTX_TCPIP_IPCSE(ipcse);
2234 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
2235 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
2236 fields |= WTX_IXSM;
2237 }
2238
2239 offset += iphl;
2240
2241 if (m0->m_pkthdr.csum_flags &
2242 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
2243 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2244 fields |= WTX_TXSM;
2245 tucs = WTX_TCPIP_TUCSS(offset) |
2246 WTX_TCPIP_TUCSO(offset +
2247 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
2248 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2249 } else if ((m0->m_pkthdr.csum_flags &
2250 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
2251 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2252 fields |= WTX_TXSM;
2253 tucs = WTX_TCPIP_TUCSS(offset) |
2254 WTX_TCPIP_TUCSO(offset +
2255 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
2256 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2257 } else {
2258 /* Just initialize it to a valid TCP context. */
2259 tucs = WTX_TCPIP_TUCSS(offset) |
2260 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
2261 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2262 }
2263
2264 /* Fill in the context descriptor. */
2265 t = (struct livengood_tcpip_ctxdesc *)
2266 &sc->sc_txdescs[sc->sc_txnext];
2267 t->tcpip_ipcs = htole32(ipcs);
2268 t->tcpip_tucs = htole32(tucs);
2269 t->tcpip_cmdlen = htole32(cmdlen);
2270 t->tcpip_seg = htole32(seg);
2271 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2272
2273 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2274 txs->txs_ndesc++;
2275
2276 *cmdp = cmd;
2277 *fieldsp = fields;
2278
2279 return 0;
2280 }
2281
2282 static void
2283 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2284 {
2285 struct mbuf *m;
2286 int i;
2287
2288 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2289 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2290 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2291 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2292 m->m_data, m->m_len, m->m_flags);
2293 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2294 i, i == 1 ? "" : "s");
2295 }
2296
2297 /*
2298 * wm_82547_txfifo_stall:
2299 *
2300 * Callout used to wait for the 82547 Tx FIFO to drain,
2301 * reset the FIFO pointers, and restart packet transmission.
2302 */
2303 static void
2304 wm_82547_txfifo_stall(void *arg)
2305 {
2306 struct wm_softc *sc = arg;
2307 int s;
2308
2309 s = splnet();
2310
2311 if (sc->sc_txfifo_stall) {
2312 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2313 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2314 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2315 /*
2316 * Packets have drained. Stop transmitter, reset
2317 * FIFO pointers, restart transmitter, and kick
2318 * the packet queue.
2319 */
2320 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2321 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2322 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2323 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2324 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2325 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2326 CSR_WRITE(sc, WMREG_TCTL, tctl);
2327 CSR_WRITE_FLUSH(sc);
2328
2329 sc->sc_txfifo_head = 0;
2330 sc->sc_txfifo_stall = 0;
2331 wm_start(&sc->sc_ethercom.ec_if);
2332 } else {
2333 /*
2334 * Still waiting for packets to drain; try again in
2335 * another tick.
2336 */
2337 callout_schedule(&sc->sc_txfifo_ch, 1);
2338 }
2339 }
2340
2341 splx(s);
2342 }
2343
2344 /*
2345 * wm_82547_txfifo_bugchk:
2346 *
2347 * Check for bug condition in the 82547 Tx FIFO. We need to
2348 * prevent enqueueing a packet that would wrap around the end
2349 * if the Tx FIFO ring buffer, otherwise the chip will croak.
2350 *
2351 * We do this by checking the amount of space before the end
2352 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
2353 * the Tx FIFO, wait for all remaining packets to drain, reset
2354 * the internal FIFO pointers to the beginning, and restart
2355 * transmission on the interface.
2356 */
2357 #define WM_FIFO_HDR 0x10
2358 #define WM_82547_PAD_LEN 0x3e0
2359 static int
2360 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2361 {
2362 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2363 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2364
2365 /* Just return if already stalled. */
2366 if (sc->sc_txfifo_stall)
2367 return 1;
2368
2369 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2370 /* Stall only occurs in half-duplex mode. */
2371 goto send_packet;
2372 }
2373
2374 if (len >= WM_82547_PAD_LEN + space) {
2375 sc->sc_txfifo_stall = 1;
2376 callout_schedule(&sc->sc_txfifo_ch, 1);
2377 return 1;
2378 }
2379
2380 send_packet:
2381 sc->sc_txfifo_head += len;
2382 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2383 sc->sc_txfifo_head -= sc->sc_txfifo_size;
2384
2385 return 0;
2386 }
2387
2388 /*
2389 * wm_start: [ifnet interface function]
2390 *
2391 * Start packet transmission on the interface.
2392 */
2393 static void
2394 wm_start(struct ifnet *ifp)
2395 {
2396 struct wm_softc *sc = ifp->if_softc;
2397 struct mbuf *m0;
2398 struct m_tag *mtag;
2399 struct wm_txsoft *txs;
2400 bus_dmamap_t dmamap;
2401 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2402 bus_addr_t curaddr;
2403 bus_size_t seglen, curlen;
2404 uint32_t cksumcmd;
2405 uint8_t cksumfields;
2406
2407 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2408 return;
2409
2410 /*
2411 * Remember the previous number of free descriptors.
2412 */
2413 ofree = sc->sc_txfree;
2414
2415 /*
2416 * Loop through the send queue, setting up transmit descriptors
2417 * until we drain the queue, or use up all available transmit
2418 * descriptors.
2419 */
2420 for (;;) {
2421 /* Grab a packet off the queue. */
2422 IFQ_POLL(&ifp->if_snd, m0);
2423 if (m0 == NULL)
2424 break;
2425
2426 DPRINTF(WM_DEBUG_TX,
2427 ("%s: TX: have packet to transmit: %p\n",
2428 device_xname(sc->sc_dev), m0));
2429
2430 /* Get a work queue entry. */
2431 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2432 wm_txintr(sc);
2433 if (sc->sc_txsfree == 0) {
2434 DPRINTF(WM_DEBUG_TX,
2435 ("%s: TX: no free job descriptors\n",
2436 device_xname(sc->sc_dev)));
2437 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2438 break;
2439 }
2440 }
2441
2442 txs = &sc->sc_txsoft[sc->sc_txsnext];
2443 dmamap = txs->txs_dmamap;
2444
2445 use_tso = (m0->m_pkthdr.csum_flags &
2446 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2447
2448 /*
2449 * So says the Linux driver:
2450 * The controller does a simple calculation to make sure
2451 * there is enough room in the FIFO before initiating the
2452 * DMA for each buffer. The calc is:
2453 * 4 = ceil(buffer len / MSS)
2454 * To make sure we don't overrun the FIFO, adjust the max
2455 * buffer len if the MSS drops.
2456 */
2457 dmamap->dm_maxsegsz =
2458 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2459 ? m0->m_pkthdr.segsz << 2
2460 : WTX_MAX_LEN;
2461
2462 /*
2463 * Load the DMA map. If this fails, the packet either
2464 * didn't fit in the allotted number of segments, or we
2465 * were short on resources. For the too-many-segments
2466 * case, we simply report an error and drop the packet,
2467 * since we can't sanely copy a jumbo packet to a single
2468 * buffer.
2469 */
2470 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2471 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2472 if (error) {
2473 if (error == EFBIG) {
2474 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2475 log(LOG_ERR, "%s: Tx packet consumes too many "
2476 "DMA segments, dropping...\n",
2477 device_xname(sc->sc_dev));
2478 IFQ_DEQUEUE(&ifp->if_snd, m0);
2479 wm_dump_mbuf_chain(sc, m0);
2480 m_freem(m0);
2481 continue;
2482 }
2483 /*
2484 * Short on resources, just stop for now.
2485 */
2486 DPRINTF(WM_DEBUG_TX,
2487 ("%s: TX: dmamap load failed: %d\n",
2488 device_xname(sc->sc_dev), error));
2489 break;
2490 }
2491
2492 segs_needed = dmamap->dm_nsegs;
2493 if (use_tso) {
2494 /* For sentinel descriptor; see below. */
2495 segs_needed++;
2496 }
2497
2498 /*
2499 * Ensure we have enough descriptors free to describe
2500 * the packet. Note, we always reserve one descriptor
2501 * at the end of the ring due to the semantics of the
2502 * TDT register, plus one more in the event we need
2503 * to load offload context.
2504 */
2505 if (segs_needed > sc->sc_txfree - 2) {
2506 /*
2507 * Not enough free descriptors to transmit this
2508 * packet. We haven't committed anything yet,
2509 * so just unload the DMA map, put the packet
2510 * pack on the queue, and punt. Notify the upper
2511 * layer that there are no more slots left.
2512 */
2513 DPRINTF(WM_DEBUG_TX,
2514 ("%s: TX: need %d (%d) descriptors, have %d\n",
2515 device_xname(sc->sc_dev), dmamap->dm_nsegs,
2516 segs_needed, sc->sc_txfree - 1));
2517 ifp->if_flags |= IFF_OACTIVE;
2518 bus_dmamap_unload(sc->sc_dmat, dmamap);
2519 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2520 break;
2521 }
2522
2523 /*
2524 * Check for 82547 Tx FIFO bug. We need to do this
2525 * once we know we can transmit the packet, since we
2526 * do some internal FIFO space accounting here.
2527 */
2528 if (sc->sc_type == WM_T_82547 &&
2529 wm_82547_txfifo_bugchk(sc, m0)) {
2530 DPRINTF(WM_DEBUG_TX,
2531 ("%s: TX: 82547 Tx FIFO bug detected\n",
2532 device_xname(sc->sc_dev)));
2533 ifp->if_flags |= IFF_OACTIVE;
2534 bus_dmamap_unload(sc->sc_dmat, dmamap);
2535 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2536 break;
2537 }
2538
2539 IFQ_DEQUEUE(&ifp->if_snd, m0);
2540
2541 /*
2542 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2543 */
2544
2545 DPRINTF(WM_DEBUG_TX,
2546 ("%s: TX: packet has %d (%d) DMA segments\n",
2547 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2548
2549 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2550
2551 /*
2552 * Store a pointer to the packet so that we can free it
2553 * later.
2554 *
2555 * Initially, we consider the number of descriptors the
2556 * packet uses the number of DMA segments. This may be
2557 * incremented by 1 if we do checksum offload (a descriptor
2558 * is used to set the checksum context).
2559 */
2560 txs->txs_mbuf = m0;
2561 txs->txs_firstdesc = sc->sc_txnext;
2562 txs->txs_ndesc = segs_needed;
2563
2564 /* Set up offload parameters for this packet. */
2565 if (m0->m_pkthdr.csum_flags &
2566 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2567 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2568 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2569 if (wm_tx_offload(sc, txs, &cksumcmd,
2570 &cksumfields) != 0) {
2571 /* Error message already displayed. */
2572 bus_dmamap_unload(sc->sc_dmat, dmamap);
2573 continue;
2574 }
2575 } else {
2576 cksumcmd = 0;
2577 cksumfields = 0;
2578 }
2579
2580 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2581
2582 /* Sync the DMA map. */
2583 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2584 BUS_DMASYNC_PREWRITE);
2585
2586 /*
2587 * Initialize the transmit descriptor.
2588 */
2589 for (nexttx = sc->sc_txnext, seg = 0;
2590 seg < dmamap->dm_nsegs; seg++) {
2591 for (seglen = dmamap->dm_segs[seg].ds_len,
2592 curaddr = dmamap->dm_segs[seg].ds_addr;
2593 seglen != 0;
2594 curaddr += curlen, seglen -= curlen,
2595 nexttx = WM_NEXTTX(sc, nexttx)) {
2596 curlen = seglen;
2597
2598 /*
2599 * So says the Linux driver:
2600 * Work around for premature descriptor
2601 * write-backs in TSO mode. Append a
2602 * 4-byte sentinel descriptor.
2603 */
2604 if (use_tso &&
2605 seg == dmamap->dm_nsegs - 1 &&
2606 curlen > 8)
2607 curlen -= 4;
2608
2609 wm_set_dma_addr(
2610 &sc->sc_txdescs[nexttx].wtx_addr,
2611 curaddr);
2612 sc->sc_txdescs[nexttx].wtx_cmdlen =
2613 htole32(cksumcmd | curlen);
2614 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2615 0;
2616 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2617 cksumfields;
2618 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2619 lasttx = nexttx;
2620
2621 DPRINTF(WM_DEBUG_TX,
2622 ("%s: TX: desc %d: low 0x%08lx, "
2623 "len 0x%04x\n",
2624 device_xname(sc->sc_dev), nexttx,
2625 curaddr & 0xffffffffUL, (unsigned)curlen));
2626 }
2627 }
2628
2629 KASSERT(lasttx != -1);
2630
2631 /*
2632 * Set up the command byte on the last descriptor of
2633 * the packet. If we're in the interrupt delay window,
2634 * delay the interrupt.
2635 */
2636 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2637 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2638
2639 /*
2640 * If VLANs are enabled and the packet has a VLAN tag, set
2641 * up the descriptor to encapsulate the packet for us.
2642 *
2643 * This is only valid on the last descriptor of the packet.
2644 */
2645 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2646 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2647 htole32(WTX_CMD_VLE);
2648 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2649 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2650 }
2651
2652 txs->txs_lastdesc = lasttx;
2653
2654 DPRINTF(WM_DEBUG_TX,
2655 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2656 device_xname(sc->sc_dev),
2657 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2658
2659 /* Sync the descriptors we're using. */
2660 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2661 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2662
2663 /* Give the packet to the chip. */
2664 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2665
2666 DPRINTF(WM_DEBUG_TX,
2667 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2668
2669 DPRINTF(WM_DEBUG_TX,
2670 ("%s: TX: finished transmitting packet, job %d\n",
2671 device_xname(sc->sc_dev), sc->sc_txsnext));
2672
2673 /* Advance the tx pointer. */
2674 sc->sc_txfree -= txs->txs_ndesc;
2675 sc->sc_txnext = nexttx;
2676
2677 sc->sc_txsfree--;
2678 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2679
2680 /* Pass the packet to any BPF listeners. */
2681 bpf_mtap(ifp, m0);
2682 }
2683
2684 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2685 /* No more slots; notify upper layer. */
2686 ifp->if_flags |= IFF_OACTIVE;
2687 }
2688
2689 if (sc->sc_txfree != ofree) {
2690 /* Set a watchdog timer in case the chip flakes out. */
2691 ifp->if_timer = 5;
2692 }
2693 }
2694
2695 /*
2696 * wm_watchdog: [ifnet interface function]
2697 *
2698 * Watchdog timer handler.
2699 */
2700 static void
2701 wm_watchdog(struct ifnet *ifp)
2702 {
2703 struct wm_softc *sc = ifp->if_softc;
2704
2705 /*
2706 * Since we're using delayed interrupts, sweep up
2707 * before we report an error.
2708 */
2709 wm_txintr(sc);
2710
2711 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2712 log(LOG_ERR,
2713 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2714 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2715 sc->sc_txnext);
2716 ifp->if_oerrors++;
2717
2718 /* Reset the interface. */
2719 (void) wm_init(ifp);
2720 }
2721
2722 /* Try to get more packets going. */
2723 wm_start(ifp);
2724 }
2725
2726 /*
2727 * wm_ioctl: [ifnet interface function]
2728 *
2729 * Handle control requests from the operator.
2730 */
2731 static int
2732 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2733 {
2734 struct wm_softc *sc = ifp->if_softc;
2735 struct ifreq *ifr = (struct ifreq *) data;
2736 struct ifaddr *ifa = (struct ifaddr *)data;
2737 struct sockaddr_dl *sdl;
2738 int diff, s, error;
2739
2740 s = splnet();
2741
2742 switch (cmd) {
2743 case SIOCSIFFLAGS:
2744 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
2745 break;
2746 if (ifp->if_flags & IFF_UP) {
2747 diff = (ifp->if_flags ^ sc->sc_if_flags)
2748 & (IFF_PROMISC | IFF_ALLMULTI);
2749 if ((diff & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2750 /*
2751 * If the difference bettween last flag and
2752 * new flag is only IFF_PROMISC or
2753 * IFF_ALLMULTI, set multicast filter only
2754 * (don't reset to prevent link down).
2755 */
2756 wm_set_filter(sc);
2757 } else {
2758 /*
2759 * Reset the interface to pick up changes in
2760 * any other flags that affect the hardware
2761 * state.
2762 */
2763 wm_init(ifp);
2764 }
2765 } else {
2766 if (ifp->if_flags & IFF_RUNNING)
2767 wm_stop(ifp, 1);
2768 }
2769 sc->sc_if_flags = ifp->if_flags;
2770 error = 0;
2771 break;
2772 case SIOCSIFMEDIA:
2773 case SIOCGIFMEDIA:
2774 /* Flow control requires full-duplex mode. */
2775 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2776 (ifr->ifr_media & IFM_FDX) == 0)
2777 ifr->ifr_media &= ~IFM_ETH_FMASK;
2778 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2779 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2780 /* We can do both TXPAUSE and RXPAUSE. */
2781 ifr->ifr_media |=
2782 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2783 }
2784 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2785 }
2786 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2787 break;
2788 case SIOCINITIFADDR:
2789 if (ifa->ifa_addr->sa_family == AF_LINK) {
2790 sdl = satosdl(ifp->if_dl->ifa_addr);
2791 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2792 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2793 /* unicast address is first multicast entry */
2794 wm_set_filter(sc);
2795 error = 0;
2796 break;
2797 }
2798 /* Fall through for rest */
2799 default:
2800 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2801 break;
2802
2803 error = 0;
2804
2805 if (cmd == SIOCSIFCAP)
2806 error = (*ifp->if_init)(ifp);
2807 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2808 ;
2809 else if (ifp->if_flags & IFF_RUNNING) {
2810 /*
2811 * Multicast list has changed; set the hardware filter
2812 * accordingly.
2813 */
2814 wm_set_filter(sc);
2815 }
2816 break;
2817 }
2818
2819 /* Try to get more packets going. */
2820 wm_start(ifp);
2821
2822 splx(s);
2823 return error;
2824 }
2825
2826 /*
2827 * wm_intr:
2828 *
2829 * Interrupt service routine.
2830 */
2831 static int
2832 wm_intr(void *arg)
2833 {
2834 struct wm_softc *sc = arg;
2835 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2836 uint32_t icr;
2837 int handled = 0;
2838
2839 while (1 /* CONSTCOND */) {
2840 icr = CSR_READ(sc, WMREG_ICR);
2841 if ((icr & sc->sc_icr) == 0)
2842 break;
2843 #if 0 /*NRND > 0*/
2844 if (RND_ENABLED(&sc->rnd_source))
2845 rnd_add_uint32(&sc->rnd_source, icr);
2846 #endif
2847
2848 handled = 1;
2849
2850 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2851 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2852 DPRINTF(WM_DEBUG_RX,
2853 ("%s: RX: got Rx intr 0x%08x\n",
2854 device_xname(sc->sc_dev),
2855 icr & (ICR_RXDMT0|ICR_RXT0)));
2856 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2857 }
2858 #endif
2859 wm_rxintr(sc);
2860
2861 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2862 if (icr & ICR_TXDW) {
2863 DPRINTF(WM_DEBUG_TX,
2864 ("%s: TX: got TXDW interrupt\n",
2865 device_xname(sc->sc_dev)));
2866 WM_EVCNT_INCR(&sc->sc_ev_txdw);
2867 }
2868 #endif
2869 wm_txintr(sc);
2870
2871 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2872 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2873 wm_linkintr(sc, icr);
2874 }
2875
2876 if (icr & ICR_RXO) {
2877 #if defined(WM_DEBUG)
2878 log(LOG_WARNING, "%s: Receive overrun\n",
2879 device_xname(sc->sc_dev));
2880 #endif /* defined(WM_DEBUG) */
2881 }
2882 }
2883
2884 if (handled) {
2885 /* Try to get more packets going. */
2886 wm_start(ifp);
2887 }
2888
2889 return handled;
2890 }
2891
2892 /*
2893 * wm_txintr:
2894 *
2895 * Helper; handle transmit interrupts.
2896 */
2897 static void
2898 wm_txintr(struct wm_softc *sc)
2899 {
2900 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2901 struct wm_txsoft *txs;
2902 uint8_t status;
2903 int i;
2904
2905 ifp->if_flags &= ~IFF_OACTIVE;
2906
2907 /*
2908 * Go through the Tx list and free mbufs for those
2909 * frames which have been transmitted.
2910 */
2911 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2912 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2913 txs = &sc->sc_txsoft[i];
2914
2915 DPRINTF(WM_DEBUG_TX,
2916 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
2917
2918 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2919 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2920
2921 status =
2922 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2923 if ((status & WTX_ST_DD) == 0) {
2924 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2925 BUS_DMASYNC_PREREAD);
2926 break;
2927 }
2928
2929 DPRINTF(WM_DEBUG_TX,
2930 ("%s: TX: job %d done: descs %d..%d\n",
2931 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
2932 txs->txs_lastdesc));
2933
2934 /*
2935 * XXX We should probably be using the statistics
2936 * XXX registers, but I don't know if they exist
2937 * XXX on chips before the i82544.
2938 */
2939
2940 #ifdef WM_EVENT_COUNTERS
2941 if (status & WTX_ST_TU)
2942 WM_EVCNT_INCR(&sc->sc_ev_tu);
2943 #endif /* WM_EVENT_COUNTERS */
2944
2945 if (status & (WTX_ST_EC|WTX_ST_LC)) {
2946 ifp->if_oerrors++;
2947 if (status & WTX_ST_LC)
2948 log(LOG_WARNING, "%s: late collision\n",
2949 device_xname(sc->sc_dev));
2950 else if (status & WTX_ST_EC) {
2951 ifp->if_collisions += 16;
2952 log(LOG_WARNING, "%s: excessive collisions\n",
2953 device_xname(sc->sc_dev));
2954 }
2955 } else
2956 ifp->if_opackets++;
2957
2958 sc->sc_txfree += txs->txs_ndesc;
2959 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2960 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2961 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2962 m_freem(txs->txs_mbuf);
2963 txs->txs_mbuf = NULL;
2964 }
2965
2966 /* Update the dirty transmit buffer pointer. */
2967 sc->sc_txsdirty = i;
2968 DPRINTF(WM_DEBUG_TX,
2969 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
2970
2971 /*
2972 * If there are no more pending transmissions, cancel the watchdog
2973 * timer.
2974 */
2975 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2976 ifp->if_timer = 0;
2977 }
2978
2979 /*
2980 * wm_rxintr:
2981 *
2982 * Helper; handle receive interrupts.
2983 */
2984 static void
2985 wm_rxintr(struct wm_softc *sc)
2986 {
2987 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2988 struct wm_rxsoft *rxs;
2989 struct mbuf *m;
2990 int i, len;
2991 uint8_t status, errors;
2992 uint16_t vlantag;
2993
2994 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
2995 rxs = &sc->sc_rxsoft[i];
2996
2997 DPRINTF(WM_DEBUG_RX,
2998 ("%s: RX: checking descriptor %d\n",
2999 device_xname(sc->sc_dev), i));
3000
3001 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3002
3003 status = sc->sc_rxdescs[i].wrx_status;
3004 errors = sc->sc_rxdescs[i].wrx_errors;
3005 len = le16toh(sc->sc_rxdescs[i].wrx_len);
3006 vlantag = sc->sc_rxdescs[i].wrx_special;
3007
3008 if ((status & WRX_ST_DD) == 0) {
3009 /*
3010 * We have processed all of the receive descriptors.
3011 */
3012 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
3013 break;
3014 }
3015
3016 if (__predict_false(sc->sc_rxdiscard)) {
3017 DPRINTF(WM_DEBUG_RX,
3018 ("%s: RX: discarding contents of descriptor %d\n",
3019 device_xname(sc->sc_dev), i));
3020 WM_INIT_RXDESC(sc, i);
3021 if (status & WRX_ST_EOP) {
3022 /* Reset our state. */
3023 DPRINTF(WM_DEBUG_RX,
3024 ("%s: RX: resetting rxdiscard -> 0\n",
3025 device_xname(sc->sc_dev)));
3026 sc->sc_rxdiscard = 0;
3027 }
3028 continue;
3029 }
3030
3031 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3032 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3033
3034 m = rxs->rxs_mbuf;
3035
3036 /*
3037 * Add a new receive buffer to the ring, unless of
3038 * course the length is zero. Treat the latter as a
3039 * failed mapping.
3040 */
3041 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
3042 /*
3043 * Failed, throw away what we've done so
3044 * far, and discard the rest of the packet.
3045 */
3046 ifp->if_ierrors++;
3047 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3048 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3049 WM_INIT_RXDESC(sc, i);
3050 if ((status & WRX_ST_EOP) == 0)
3051 sc->sc_rxdiscard = 1;
3052 if (sc->sc_rxhead != NULL)
3053 m_freem(sc->sc_rxhead);
3054 WM_RXCHAIN_RESET(sc);
3055 DPRINTF(WM_DEBUG_RX,
3056 ("%s: RX: Rx buffer allocation failed, "
3057 "dropping packet%s\n", device_xname(sc->sc_dev),
3058 sc->sc_rxdiscard ? " (discard)" : ""));
3059 continue;
3060 }
3061
3062 m->m_len = len;
3063 sc->sc_rxlen += len;
3064 DPRINTF(WM_DEBUG_RX,
3065 ("%s: RX: buffer at %p len %d\n",
3066 device_xname(sc->sc_dev), m->m_data, len));
3067
3068 /*
3069 * If this is not the end of the packet, keep
3070 * looking.
3071 */
3072 if ((status & WRX_ST_EOP) == 0) {
3073 WM_RXCHAIN_LINK(sc, m);
3074 DPRINTF(WM_DEBUG_RX,
3075 ("%s: RX: not yet EOP, rxlen -> %d\n",
3076 device_xname(sc->sc_dev), sc->sc_rxlen));
3077 continue;
3078 }
3079
3080 /*
3081 * Okay, we have the entire packet now. The chip is
3082 * configured to include the FCS (not all chips can
3083 * be configured to strip it), so we need to trim it.
3084 * May need to adjust length of previous mbuf in the
3085 * chain if the current mbuf is too short.
3086 */
3087 if (m->m_len < ETHER_CRC_LEN) {
3088 sc->sc_rxtail->m_len -= (ETHER_CRC_LEN - m->m_len);
3089 m->m_len = 0;
3090 } else {
3091 m->m_len -= ETHER_CRC_LEN;
3092 }
3093 len = sc->sc_rxlen - ETHER_CRC_LEN;
3094
3095 WM_RXCHAIN_LINK(sc, m);
3096
3097 *sc->sc_rxtailp = NULL;
3098 m = sc->sc_rxhead;
3099
3100 WM_RXCHAIN_RESET(sc);
3101
3102 DPRINTF(WM_DEBUG_RX,
3103 ("%s: RX: have entire packet, len -> %d\n",
3104 device_xname(sc->sc_dev), len));
3105
3106 /*
3107 * If an error occurred, update stats and drop the packet.
3108 */
3109 if (errors &
3110 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3111 if (errors & WRX_ER_SE)
3112 log(LOG_WARNING, "%s: symbol error\n",
3113 device_xname(sc->sc_dev));
3114 else if (errors & WRX_ER_SEQ)
3115 log(LOG_WARNING, "%s: receive sequence error\n",
3116 device_xname(sc->sc_dev));
3117 else if (errors & WRX_ER_CE)
3118 log(LOG_WARNING, "%s: CRC error\n",
3119 device_xname(sc->sc_dev));
3120 m_freem(m);
3121 continue;
3122 }
3123
3124 /*
3125 * No errors. Receive the packet.
3126 */
3127 m->m_pkthdr.rcvif = ifp;
3128 m->m_pkthdr.len = len;
3129
3130 /*
3131 * If VLANs are enabled, VLAN packets have been unwrapped
3132 * for us. Associate the tag with the packet.
3133 */
3134 if ((status & WRX_ST_VP) != 0) {
3135 VLAN_INPUT_TAG(ifp, m,
3136 le16toh(vlantag),
3137 continue);
3138 }
3139
3140 /*
3141 * Set up checksum info for this packet.
3142 */
3143 if ((status & WRX_ST_IXSM) == 0) {
3144 if (status & WRX_ST_IPCS) {
3145 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3146 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3147 if (errors & WRX_ER_IPE)
3148 m->m_pkthdr.csum_flags |=
3149 M_CSUM_IPv4_BAD;
3150 }
3151 if (status & WRX_ST_TCPCS) {
3152 /*
3153 * Note: we don't know if this was TCP or UDP,
3154 * so we just set both bits, and expect the
3155 * upper layers to deal.
3156 */
3157 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3158 m->m_pkthdr.csum_flags |=
3159 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3160 M_CSUM_TCPv6 | M_CSUM_UDPv6;
3161 if (errors & WRX_ER_TCPE)
3162 m->m_pkthdr.csum_flags |=
3163 M_CSUM_TCP_UDP_BAD;
3164 }
3165 }
3166
3167 ifp->if_ipackets++;
3168
3169 /* Pass this up to any BPF listeners. */
3170 bpf_mtap(ifp, m);
3171
3172 /* Pass it on. */
3173 (*ifp->if_input)(ifp, m);
3174 }
3175
3176 /* Update the receive pointer. */
3177 sc->sc_rxptr = i;
3178
3179 DPRINTF(WM_DEBUG_RX,
3180 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3181 }
3182
3183 /*
3184 * wm_linkintr_gmii:
3185 *
3186 * Helper; handle link interrupts for GMII.
3187 */
3188 static void
3189 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
3190 {
3191
3192 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3193 __func__));
3194
3195 if (icr & ICR_LSC) {
3196 DPRINTF(WM_DEBUG_LINK,
3197 ("%s: LINK: LSC -> mii_tick\n",
3198 device_xname(sc->sc_dev)));
3199 mii_tick(&sc->sc_mii);
3200 if (sc->sc_type == WM_T_82543) {
3201 int miistatus, active;
3202
3203 /*
3204 * With 82543, we need to force speed and
3205 * duplex on the MAC equal to what the PHY
3206 * speed and duplex configuration is.
3207 */
3208 miistatus = sc->sc_mii.mii_media_status;
3209
3210 if (miistatus & IFM_ACTIVE) {
3211 active = sc->sc_mii.mii_media_active;
3212 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
3213 switch (IFM_SUBTYPE(active)) {
3214 case IFM_10_T:
3215 sc->sc_ctrl |= CTRL_SPEED_10;
3216 break;
3217 case IFM_100_TX:
3218 sc->sc_ctrl |= CTRL_SPEED_100;
3219 break;
3220 case IFM_1000_T:
3221 sc->sc_ctrl |= CTRL_SPEED_1000;
3222 break;
3223 default:
3224 /*
3225 * fiber?
3226 * Shoud not enter here.
3227 */
3228 printf("unknown media (%x)\n",
3229 active);
3230 break;
3231 }
3232 if (active & IFM_FDX)
3233 sc->sc_ctrl |= CTRL_FD;
3234 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3235 }
3236 } else if ((sc->sc_type == WM_T_ICH8)
3237 && (sc->sc_phytype == WMPHY_IGP_3)) {
3238 wm_kmrn_lock_loss_workaround_ich8lan(sc);
3239 } else if (sc->sc_type == WM_T_PCH) {
3240 wm_k1_gig_workaround_hv(sc,
3241 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
3242 }
3243
3244 if ((sc->sc_phytype == WMPHY_82578)
3245 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
3246 == IFM_1000_T)) {
3247
3248 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
3249 delay(200*1000); /* XXX too big */
3250
3251 /* Link stall fix for link up */
3252 wm_gmii_hv_writereg(sc->sc_dev, 1,
3253 HV_MUX_DATA_CTRL,
3254 HV_MUX_DATA_CTRL_GEN_TO_MAC
3255 | HV_MUX_DATA_CTRL_FORCE_SPEED);
3256 wm_gmii_hv_writereg(sc->sc_dev, 1,
3257 HV_MUX_DATA_CTRL,
3258 HV_MUX_DATA_CTRL_GEN_TO_MAC);
3259 }
3260 }
3261 } else if (icr & ICR_RXSEQ) {
3262 DPRINTF(WM_DEBUG_LINK,
3263 ("%s: LINK Receive sequence error\n",
3264 device_xname(sc->sc_dev)));
3265 }
3266 }
3267
3268 /*
3269 * wm_linkintr_tbi:
3270 *
3271 * Helper; handle link interrupts for TBI mode.
3272 */
3273 static void
3274 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3275 {
3276 uint32_t status;
3277
3278 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3279 __func__));
3280
3281 status = CSR_READ(sc, WMREG_STATUS);
3282 if (icr & ICR_LSC) {
3283 if (status & STATUS_LU) {
3284 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3285 device_xname(sc->sc_dev),
3286 (status & STATUS_FD) ? "FDX" : "HDX"));
3287 /*
3288 * NOTE: CTRL will update TFCE and RFCE automatically,
3289 * so we should update sc->sc_ctrl
3290 */
3291
3292 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3293 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3294 sc->sc_fcrtl &= ~FCRTL_XONE;
3295 if (status & STATUS_FD)
3296 sc->sc_tctl |=
3297 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3298 else
3299 sc->sc_tctl |=
3300 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3301 if (sc->sc_ctrl & CTRL_TFCE)
3302 sc->sc_fcrtl |= FCRTL_XONE;
3303 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3304 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3305 WMREG_OLD_FCRTL : WMREG_FCRTL,
3306 sc->sc_fcrtl);
3307 sc->sc_tbi_linkup = 1;
3308 } else {
3309 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3310 device_xname(sc->sc_dev)));
3311 sc->sc_tbi_linkup = 0;
3312 }
3313 wm_tbi_set_linkled(sc);
3314 } else if (icr & ICR_RXCFG) {
3315 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3316 device_xname(sc->sc_dev)));
3317 sc->sc_tbi_nrxcfg++;
3318 wm_check_for_link(sc);
3319 } else if (icr & ICR_RXSEQ) {
3320 DPRINTF(WM_DEBUG_LINK,
3321 ("%s: LINK: Receive sequence error\n",
3322 device_xname(sc->sc_dev)));
3323 }
3324 }
3325
3326 /*
3327 * wm_linkintr:
3328 *
3329 * Helper; handle link interrupts.
3330 */
3331 static void
3332 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3333 {
3334
3335 if (sc->sc_flags & WM_F_HAS_MII)
3336 wm_linkintr_gmii(sc, icr);
3337 else
3338 wm_linkintr_tbi(sc, icr);
3339 }
3340
3341 /*
3342 * wm_tick:
3343 *
3344 * One second timer, used to check link status, sweep up
3345 * completed transmit jobs, etc.
3346 */
3347 static void
3348 wm_tick(void *arg)
3349 {
3350 struct wm_softc *sc = arg;
3351 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3352 int s;
3353
3354 s = splnet();
3355
3356 if (sc->sc_type >= WM_T_82542_2_1) {
3357 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3358 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3359 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3360 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3361 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3362 }
3363
3364 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3365 ifp->if_ierrors += 0ULL + /* ensure quad_t */
3366 + CSR_READ(sc, WMREG_CRCERRS)
3367 + CSR_READ(sc, WMREG_ALGNERRC)
3368 + CSR_READ(sc, WMREG_SYMERRC)
3369 + CSR_READ(sc, WMREG_RXERRC)
3370 + CSR_READ(sc, WMREG_SEC)
3371 + CSR_READ(sc, WMREG_CEXTERR)
3372 + CSR_READ(sc, WMREG_RLEC);
3373 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
3374
3375 if (sc->sc_flags & WM_F_HAS_MII)
3376 mii_tick(&sc->sc_mii);
3377 else
3378 wm_tbi_check_link(sc);
3379
3380 splx(s);
3381
3382 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3383 }
3384
3385 /*
3386 * wm_reset:
3387 *
3388 * Reset the i82542 chip.
3389 */
3390 static void
3391 wm_reset(struct wm_softc *sc)
3392 {
3393 int phy_reset = 0;
3394 uint32_t reg, mask;
3395 int i;
3396
3397 /*
3398 * Allocate on-chip memory according to the MTU size.
3399 * The Packet Buffer Allocation register must be written
3400 * before the chip is reset.
3401 */
3402 switch (sc->sc_type) {
3403 case WM_T_82547:
3404 case WM_T_82547_2:
3405 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3406 PBA_22K : PBA_30K;
3407 sc->sc_txfifo_head = 0;
3408 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3409 sc->sc_txfifo_size =
3410 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3411 sc->sc_txfifo_stall = 0;
3412 break;
3413 case WM_T_82571:
3414 case WM_T_82572:
3415 case WM_T_82575: /* XXX need special handing for jumbo frames */
3416 case WM_T_80003:
3417 sc->sc_pba = PBA_32K;
3418 break;
3419 case WM_T_82580:
3420 case WM_T_82580ER:
3421 sc->sc_pba = PBA_35K;
3422 break;
3423 case WM_T_82576:
3424 sc->sc_pba = PBA_64K;
3425 break;
3426 case WM_T_82573:
3427 sc->sc_pba = PBA_12K;
3428 break;
3429 case WM_T_82574:
3430 case WM_T_82583:
3431 sc->sc_pba = PBA_20K;
3432 break;
3433 case WM_T_ICH8:
3434 sc->sc_pba = PBA_8K;
3435 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3436 break;
3437 case WM_T_ICH9:
3438 case WM_T_ICH10:
3439 case WM_T_PCH:
3440 sc->sc_pba = PBA_10K;
3441 break;
3442 default:
3443 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3444 PBA_40K : PBA_48K;
3445 break;
3446 }
3447 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3448
3449 /* Prevent the PCI-E bus from sticking */
3450 if (sc->sc_flags & WM_F_PCIE) {
3451 int timeout = 800;
3452
3453 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3454 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3455
3456 while (timeout--) {
3457 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
3458 break;
3459 delay(100);
3460 }
3461 }
3462
3463 /* Set the completion timeout for interface */
3464 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
3465 wm_set_pcie_completion_timeout(sc);
3466
3467 /* Clear interrupt */
3468 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3469
3470 /* Stop the transmit and receive processes. */
3471 CSR_WRITE(sc, WMREG_RCTL, 0);
3472 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3473 sc->sc_rctl &= ~RCTL_EN;
3474
3475 /* XXX set_tbi_sbp_82543() */
3476
3477 delay(10*1000);
3478
3479 /* Must acquire the MDIO ownership before MAC reset */
3480 switch (sc->sc_type) {
3481 case WM_T_82573:
3482 case WM_T_82574:
3483 case WM_T_82583:
3484 i = 0;
3485 reg = CSR_READ(sc, WMREG_EXTCNFCTR)
3486 | EXTCNFCTR_MDIO_SW_OWNERSHIP;
3487 do {
3488 CSR_WRITE(sc, WMREG_EXTCNFCTR,
3489 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
3490 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
3491 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
3492 break;
3493 reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
3494 delay(2*1000);
3495 i++;
3496 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
3497 break;
3498 default:
3499 break;
3500 }
3501
3502 /*
3503 * 82541 Errata 29? & 82547 Errata 28?
3504 * See also the description about PHY_RST bit in CTRL register
3505 * in 8254x_GBe_SDM.pdf.
3506 */
3507 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3508 CSR_WRITE(sc, WMREG_CTRL,
3509 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3510 delay(5000);
3511 }
3512
3513 switch (sc->sc_type) {
3514 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3515 case WM_T_82541:
3516 case WM_T_82541_2:
3517 case WM_T_82547:
3518 case WM_T_82547_2:
3519 /*
3520 * On some chipsets, a reset through a memory-mapped write
3521 * cycle can cause the chip to reset before completing the
3522 * write cycle. This causes major headache that can be
3523 * avoided by issuing the reset via indirect register writes
3524 * through I/O space.
3525 *
3526 * So, if we successfully mapped the I/O BAR at attach time,
3527 * use that. Otherwise, try our luck with a memory-mapped
3528 * reset.
3529 */
3530 if (sc->sc_flags & WM_F_IOH_VALID)
3531 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3532 else
3533 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3534 break;
3535 case WM_T_82545_3:
3536 case WM_T_82546_3:
3537 /* Use the shadow control register on these chips. */
3538 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3539 break;
3540 case WM_T_80003:
3541 mask = swfwphysem[sc->sc_funcid];
3542 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3543 wm_get_swfw_semaphore(sc, mask);
3544 CSR_WRITE(sc, WMREG_CTRL, reg);
3545 wm_put_swfw_semaphore(sc, mask);
3546 break;
3547 case WM_T_ICH8:
3548 case WM_T_ICH9:
3549 case WM_T_ICH10:
3550 case WM_T_PCH:
3551 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3552 if (wm_check_reset_block(sc) == 0) {
3553 if (sc->sc_type >= WM_T_PCH) {
3554 uint32_t status;
3555
3556 status = CSR_READ(sc, WMREG_STATUS);
3557 CSR_WRITE(sc, WMREG_STATUS,
3558 status & ~STATUS_PHYRA);
3559 }
3560
3561 reg |= CTRL_PHY_RESET;
3562 phy_reset = 1;
3563 }
3564 wm_get_swfwhw_semaphore(sc);
3565 CSR_WRITE(sc, WMREG_CTRL, reg);
3566 delay(20*1000);
3567 wm_put_swfwhw_semaphore(sc);
3568 break;
3569 case WM_T_82542_2_0:
3570 case WM_T_82542_2_1:
3571 case WM_T_82543:
3572 case WM_T_82540:
3573 case WM_T_82545:
3574 case WM_T_82546:
3575 case WM_T_82571:
3576 case WM_T_82572:
3577 case WM_T_82573:
3578 case WM_T_82574:
3579 case WM_T_82575:
3580 case WM_T_82576:
3581 case WM_T_82580:
3582 case WM_T_82580ER:
3583 case WM_T_82583:
3584 default:
3585 /* Everything else can safely use the documented method. */
3586 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3587 break;
3588 }
3589
3590 if (phy_reset != 0)
3591 wm_get_cfg_done(sc);
3592
3593 /* reload EEPROM */
3594 switch (sc->sc_type) {
3595 case WM_T_82542_2_0:
3596 case WM_T_82542_2_1:
3597 case WM_T_82543:
3598 case WM_T_82544:
3599 delay(10);
3600 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3601 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3602 delay(2000);
3603 break;
3604 case WM_T_82540:
3605 case WM_T_82545:
3606 case WM_T_82545_3:
3607 case WM_T_82546:
3608 case WM_T_82546_3:
3609 delay(5*1000);
3610 /* XXX Disable HW ARPs on ASF enabled adapters */
3611 break;
3612 case WM_T_82541:
3613 case WM_T_82541_2:
3614 case WM_T_82547:
3615 case WM_T_82547_2:
3616 delay(20000);
3617 /* XXX Disable HW ARPs on ASF enabled adapters */
3618 break;
3619 case WM_T_82571:
3620 case WM_T_82572:
3621 case WM_T_82573:
3622 case WM_T_82574:
3623 case WM_T_82583:
3624 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3625 delay(10);
3626 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3627 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3628 }
3629 /* check EECD_EE_AUTORD */
3630 wm_get_auto_rd_done(sc);
3631 /*
3632 * Phy configuration from NVM just starts after EECD_AUTO_RD
3633 * is set.
3634 */
3635 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3636 || (sc->sc_type == WM_T_82583))
3637 delay(25*1000);
3638 break;
3639 case WM_T_82575:
3640 case WM_T_82576:
3641 case WM_T_82580:
3642 case WM_T_82580ER:
3643 case WM_T_80003:
3644 case WM_T_ICH8:
3645 case WM_T_ICH9:
3646 /* check EECD_EE_AUTORD */
3647 wm_get_auto_rd_done(sc);
3648 break;
3649 case WM_T_ICH10:
3650 case WM_T_PCH:
3651 wm_lan_init_done(sc);
3652 break;
3653 default:
3654 panic("%s: unknown type\n", __func__);
3655 }
3656
3657 /* Check whether EEPROM is present or not */
3658 switch (sc->sc_type) {
3659 case WM_T_82575:
3660 case WM_T_82576:
3661 #if 0 /* XXX */
3662 case WM_T_82580:
3663 case WM_T_82580ER:
3664 #endif
3665 case WM_T_ICH8:
3666 case WM_T_ICH9:
3667 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3668 /* Not found */
3669 sc->sc_flags |= WM_F_EEPROM_INVALID;
3670 if ((sc->sc_type == WM_T_82575)
3671 || (sc->sc_type == WM_T_82576)
3672 || (sc->sc_type == WM_T_82580)
3673 || (sc->sc_type == WM_T_82580ER))
3674 wm_reset_init_script_82575(sc);
3675 }
3676 break;
3677 default:
3678 break;
3679 }
3680
3681 if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)) {
3682 /* clear global device reset status bit */
3683 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3684 }
3685
3686 /* Clear any pending interrupt events. */
3687 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3688 reg = CSR_READ(sc, WMREG_ICR);
3689
3690 /* reload sc_ctrl */
3691 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3692
3693 /* dummy read from WUC */
3694 if (sc->sc_type == WM_T_PCH)
3695 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3696 /*
3697 * For PCH, this write will make sure that any noise will be detected
3698 * as a CRC error and be dropped rather than show up as a bad packet
3699 * to the DMA engine
3700 */
3701 if (sc->sc_type == WM_T_PCH)
3702 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3703
3704 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3705 CSR_WRITE(sc, WMREG_WUC, 0);
3706
3707 /* XXX need special handling for 82580 */
3708 }
3709
3710 /*
3711 * wm_init: [ifnet interface function]
3712 *
3713 * Initialize the interface. Must be called at splnet().
3714 */
3715 static int
3716 wm_init(struct ifnet *ifp)
3717 {
3718 struct wm_softc *sc = ifp->if_softc;
3719 struct wm_rxsoft *rxs;
3720 int i, error = 0;
3721 uint32_t reg;
3722
3723 /*
3724 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3725 * There is a small but measurable benefit to avoiding the adjusment
3726 * of the descriptor so that the headers are aligned, for normal mtu,
3727 * on such platforms. One possibility is that the DMA itself is
3728 * slightly more efficient if the front of the entire packet (instead
3729 * of the front of the headers) is aligned.
3730 *
3731 * Note we must always set align_tweak to 0 if we are using
3732 * jumbo frames.
3733 */
3734 #ifdef __NO_STRICT_ALIGNMENT
3735 sc->sc_align_tweak = 0;
3736 #else
3737 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3738 sc->sc_align_tweak = 0;
3739 else
3740 sc->sc_align_tweak = 2;
3741 #endif /* __NO_STRICT_ALIGNMENT */
3742
3743 /* Cancel any pending I/O. */
3744 wm_stop(ifp, 0);
3745
3746 /* update statistics before reset */
3747 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3748 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3749
3750 /* Reset the chip to a known state. */
3751 wm_reset(sc);
3752
3753 switch (sc->sc_type) {
3754 case WM_T_82571:
3755 case WM_T_82572:
3756 case WM_T_82573:
3757 case WM_T_82574:
3758 case WM_T_82583:
3759 case WM_T_80003:
3760 case WM_T_ICH8:
3761 case WM_T_ICH9:
3762 case WM_T_ICH10:
3763 case WM_T_PCH:
3764 if (wm_check_mng_mode(sc) != 0)
3765 wm_get_hw_control(sc);
3766 break;
3767 default:
3768 break;
3769 }
3770
3771 /* Reset the PHY. */
3772 if (sc->sc_flags & WM_F_HAS_MII)
3773 wm_gmii_reset(sc);
3774
3775 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3776 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3777 if (sc->sc_type == WM_T_PCH)
3778 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
3779
3780 /* Initialize the transmit descriptor ring. */
3781 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3782 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3783 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3784 sc->sc_txfree = WM_NTXDESC(sc);
3785 sc->sc_txnext = 0;
3786
3787 if (sc->sc_type < WM_T_82543) {
3788 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
3789 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
3790 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3791 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3792 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3793 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3794 } else {
3795 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
3796 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
3797 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3798 CSR_WRITE(sc, WMREG_TDH, 0);
3799 CSR_WRITE(sc, WMREG_TDT, 0);
3800 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3801 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3802
3803 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3804 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
3805 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
3806 | TXDCTL_WTHRESH(0));
3807 else {
3808 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3809 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3810 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3811 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3812 }
3813 }
3814 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3815 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3816
3817 /* Initialize the transmit job descriptors. */
3818 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3819 sc->sc_txsoft[i].txs_mbuf = NULL;
3820 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3821 sc->sc_txsnext = 0;
3822 sc->sc_txsdirty = 0;
3823
3824 /*
3825 * Initialize the receive descriptor and receive job
3826 * descriptor rings.
3827 */
3828 if (sc->sc_type < WM_T_82543) {
3829 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3830 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3831 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3832 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3833 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3834 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3835
3836 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3837 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3838 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3839 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3840 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3841 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3842 } else {
3843 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3844 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3845 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3846 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3847 CSR_WRITE(sc, WMREG_EITR(0), 450);
3848 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
3849 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
3850 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
3851 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
3852 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
3853 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
3854 | RXDCTL_WTHRESH(1));
3855 } else {
3856 CSR_WRITE(sc, WMREG_RDH, 0);
3857 CSR_WRITE(sc, WMREG_RDT, 0);
3858 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3859 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
3860 }
3861 }
3862 for (i = 0; i < WM_NRXDESC; i++) {
3863 rxs = &sc->sc_rxsoft[i];
3864 if (rxs->rxs_mbuf == NULL) {
3865 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3866 log(LOG_ERR, "%s: unable to allocate or map rx "
3867 "buffer %d, error = %d\n",
3868 device_xname(sc->sc_dev), i, error);
3869 /*
3870 * XXX Should attempt to run with fewer receive
3871 * XXX buffers instead of just failing.
3872 */
3873 wm_rxdrain(sc);
3874 goto out;
3875 }
3876 } else {
3877 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3878 WM_INIT_RXDESC(sc, i);
3879 }
3880 }
3881 sc->sc_rxptr = 0;
3882 sc->sc_rxdiscard = 0;
3883 WM_RXCHAIN_RESET(sc);
3884
3885 /*
3886 * Clear out the VLAN table -- we don't use it (yet).
3887 */
3888 CSR_WRITE(sc, WMREG_VET, 0);
3889 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3890 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3891
3892 /*
3893 * Set up flow-control parameters.
3894 *
3895 * XXX Values could probably stand some tuning.
3896 */
3897 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3898 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
3899 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3900 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3901 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3902 }
3903
3904 sc->sc_fcrtl = FCRTL_DFLT;
3905 if (sc->sc_type < WM_T_82543) {
3906 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3907 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3908 } else {
3909 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3910 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3911 }
3912
3913 if (sc->sc_type == WM_T_80003)
3914 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3915 else
3916 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3917
3918 /* Deal with VLAN enables. */
3919 if (VLAN_ATTACHED(&sc->sc_ethercom))
3920 sc->sc_ctrl |= CTRL_VME;
3921 else
3922 sc->sc_ctrl &= ~CTRL_VME;
3923
3924 /* Write the control registers. */
3925 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3926
3927 if (sc->sc_flags & WM_F_HAS_MII) {
3928 int val;
3929
3930 switch (sc->sc_type) {
3931 case WM_T_80003:
3932 case WM_T_ICH8:
3933 case WM_T_ICH9:
3934 case WM_T_ICH10:
3935 case WM_T_PCH:
3936 /*
3937 * Set the mac to wait the maximum time between each
3938 * iteration and increase the max iterations when
3939 * polling the phy; this fixes erroneous timeouts at
3940 * 10Mbps.
3941 */
3942 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
3943 0xFFFF);
3944 val = wm_kmrn_readreg(sc,
3945 KUMCTRLSTA_OFFSET_INB_PARAM);
3946 val |= 0x3F;
3947 wm_kmrn_writereg(sc,
3948 KUMCTRLSTA_OFFSET_INB_PARAM, val);
3949 break;
3950 default:
3951 break;
3952 }
3953
3954 if (sc->sc_type == WM_T_80003) {
3955 val = CSR_READ(sc, WMREG_CTRL_EXT);
3956 val &= ~CTRL_EXT_LINK_MODE_MASK;
3957 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3958
3959 /* Bypass RX and TX FIFO's */
3960 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3961 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
3962 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3963 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3964 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3965 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3966 }
3967 }
3968 #if 0
3969 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3970 #endif
3971
3972 /*
3973 * Set up checksum offload parameters.
3974 */
3975 reg = CSR_READ(sc, WMREG_RXCSUM);
3976 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3977 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3978 reg |= RXCSUM_IPOFL;
3979 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3980 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3981 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3982 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3983 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3984
3985 /* Reset TBI's RXCFG count */
3986 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
3987
3988 /*
3989 * Set up the interrupt registers.
3990 */
3991 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3992 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3993 ICR_RXO | ICR_RXT0;
3994 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
3995 sc->sc_icr |= ICR_RXCFG;
3996 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3997
3998 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3999 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)) {
4000 reg = CSR_READ(sc, WMREG_KABGTXD);
4001 reg |= KABGTXD_BGSQLBIAS;
4002 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4003 }
4004
4005 /* Set up the inter-packet gap. */
4006 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4007
4008 if (sc->sc_type >= WM_T_82543) {
4009 /*
4010 * Set up the interrupt throttling register (units of 256ns)
4011 * Note that a footnote in Intel's documentation says this
4012 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4013 * or 10Mbit mode. Empirically, it appears to be the case
4014 * that that is also true for the 1024ns units of the other
4015 * interrupt-related timer registers -- so, really, we ought
4016 * to divide this value by 4 when the link speed is low.
4017 *
4018 * XXX implement this division at link speed change!
4019 */
4020
4021 /*
4022 * For N interrupts/sec, set this value to:
4023 * 1000000000 / (N * 256). Note that we set the
4024 * absolute and packet timer values to this value
4025 * divided by 4 to get "simple timer" behavior.
4026 */
4027
4028 sc->sc_itr = 1500; /* 2604 ints/sec */
4029 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4030 }
4031
4032 /* Set the VLAN ethernetype. */
4033 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4034
4035 /*
4036 * Set up the transmit control register; we start out with
4037 * a collision distance suitable for FDX, but update it whe
4038 * we resolve the media type.
4039 */
4040 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4041 | TCTL_CT(TX_COLLISION_THRESHOLD)
4042 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4043 if (sc->sc_type >= WM_T_82571)
4044 sc->sc_tctl |= TCTL_MULR;
4045 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4046
4047 if (sc->sc_type == WM_T_80003) {
4048 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4049 reg &= ~TCTL_EXT_GCEX_MASK;
4050 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4051 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4052 }
4053
4054 /* Set the media. */
4055 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4056 goto out;
4057
4058 /* Configure for OS presence */
4059 wm_init_manageability(sc);
4060
4061 /*
4062 * Set up the receive control register; we actually program
4063 * the register when we set the receive filter. Use multicast
4064 * address offset type 0.
4065 *
4066 * Only the i82544 has the ability to strip the incoming
4067 * CRC, so we don't enable that feature.
4068 */
4069 sc->sc_mchash_type = 0;
4070 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4071 | RCTL_MO(sc->sc_mchash_type);
4072
4073 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4074 && (ifp->if_mtu > ETHERMTU)) {
4075 sc->sc_rctl |= RCTL_LPE;
4076 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4077 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4078 }
4079
4080 if (MCLBYTES == 2048) {
4081 sc->sc_rctl |= RCTL_2k;
4082 } else {
4083 if (sc->sc_type >= WM_T_82543) {
4084 switch (MCLBYTES) {
4085 case 4096:
4086 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4087 break;
4088 case 8192:
4089 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4090 break;
4091 case 16384:
4092 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4093 break;
4094 default:
4095 panic("wm_init: MCLBYTES %d unsupported",
4096 MCLBYTES);
4097 break;
4098 }
4099 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4100 }
4101
4102 /* Set the receive filter. */
4103 wm_set_filter(sc);
4104
4105 /* On 575 and later set RDT only if RX enabled... */
4106 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4107 for (i = 0; i < WM_NRXDESC; i++)
4108 WM_INIT_RXDESC(sc, i);
4109
4110 /* Start the one second link check clock. */
4111 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4112
4113 /* ...all done! */
4114 ifp->if_flags |= IFF_RUNNING;
4115 ifp->if_flags &= ~IFF_OACTIVE;
4116
4117 out:
4118 if (error)
4119 log(LOG_ERR, "%s: interface not running\n",
4120 device_xname(sc->sc_dev));
4121 return error;
4122 }
4123
4124 /*
4125 * wm_rxdrain:
4126 *
4127 * Drain the receive queue.
4128 */
4129 static void
4130 wm_rxdrain(struct wm_softc *sc)
4131 {
4132 struct wm_rxsoft *rxs;
4133 int i;
4134
4135 for (i = 0; i < WM_NRXDESC; i++) {
4136 rxs = &sc->sc_rxsoft[i];
4137 if (rxs->rxs_mbuf != NULL) {
4138 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4139 m_freem(rxs->rxs_mbuf);
4140 rxs->rxs_mbuf = NULL;
4141 }
4142 }
4143 }
4144
4145 /*
4146 * wm_stop: [ifnet interface function]
4147 *
4148 * Stop transmission on the interface.
4149 */
4150 static void
4151 wm_stop(struct ifnet *ifp, int disable)
4152 {
4153 struct wm_softc *sc = ifp->if_softc;
4154 struct wm_txsoft *txs;
4155 int i;
4156
4157 /* Stop the one second clock. */
4158 callout_stop(&sc->sc_tick_ch);
4159
4160 /* Stop the 82547 Tx FIFO stall check timer. */
4161 if (sc->sc_type == WM_T_82547)
4162 callout_stop(&sc->sc_txfifo_ch);
4163
4164 if (sc->sc_flags & WM_F_HAS_MII) {
4165 /* Down the MII. */
4166 mii_down(&sc->sc_mii);
4167 } else {
4168 #if 0
4169 /* Should we clear PHY's status properly? */
4170 wm_reset(sc);
4171 #endif
4172 }
4173
4174 /* Stop the transmit and receive processes. */
4175 CSR_WRITE(sc, WMREG_TCTL, 0);
4176 CSR_WRITE(sc, WMREG_RCTL, 0);
4177 sc->sc_rctl &= ~RCTL_EN;
4178
4179 /*
4180 * Clear the interrupt mask to ensure the device cannot assert its
4181 * interrupt line.
4182 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4183 * any currently pending or shared interrupt.
4184 */
4185 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4186 sc->sc_icr = 0;
4187
4188 /* Release any queued transmit buffers. */
4189 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4190 txs = &sc->sc_txsoft[i];
4191 if (txs->txs_mbuf != NULL) {
4192 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4193 m_freem(txs->txs_mbuf);
4194 txs->txs_mbuf = NULL;
4195 }
4196 }
4197
4198 /* Mark the interface as down and cancel the watchdog timer. */
4199 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4200 ifp->if_timer = 0;
4201
4202 if (disable)
4203 wm_rxdrain(sc);
4204
4205 #if 0 /* notyet */
4206 if (sc->sc_type >= WM_T_82544)
4207 CSR_WRITE(sc, WMREG_WUC, 0);
4208 #endif
4209 }
4210
4211 void
4212 wm_get_auto_rd_done(struct wm_softc *sc)
4213 {
4214 int i;
4215
4216 /* wait for eeprom to reload */
4217 switch (sc->sc_type) {
4218 case WM_T_82571:
4219 case WM_T_82572:
4220 case WM_T_82573:
4221 case WM_T_82574:
4222 case WM_T_82583:
4223 case WM_T_82575:
4224 case WM_T_82576:
4225 case WM_T_82580:
4226 case WM_T_82580ER:
4227 case WM_T_80003:
4228 case WM_T_ICH8:
4229 case WM_T_ICH9:
4230 for (i = 0; i < 10; i++) {
4231 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4232 break;
4233 delay(1000);
4234 }
4235 if (i == 10) {
4236 log(LOG_ERR, "%s: auto read from eeprom failed to "
4237 "complete\n", device_xname(sc->sc_dev));
4238 }
4239 break;
4240 default:
4241 break;
4242 }
4243 }
4244
4245 void
4246 wm_lan_init_done(struct wm_softc *sc)
4247 {
4248 uint32_t reg = 0;
4249 int i;
4250
4251 /* wait for eeprom to reload */
4252 switch (sc->sc_type) {
4253 case WM_T_ICH10:
4254 case WM_T_PCH:
4255 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4256 reg = CSR_READ(sc, WMREG_STATUS);
4257 if ((reg & STATUS_LAN_INIT_DONE) != 0)
4258 break;
4259 delay(100);
4260 }
4261 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4262 log(LOG_ERR, "%s: %s: lan_init_done failed to "
4263 "complete\n", device_xname(sc->sc_dev), __func__);
4264 }
4265 break;
4266 default:
4267 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4268 __func__);
4269 break;
4270 }
4271
4272 reg &= ~STATUS_LAN_INIT_DONE;
4273 CSR_WRITE(sc, WMREG_STATUS, reg);
4274 }
4275
4276 void
4277 wm_get_cfg_done(struct wm_softc *sc)
4278 {
4279 int mask;
4280 uint32_t reg;
4281 int i;
4282
4283 /* wait for eeprom to reload */
4284 switch (sc->sc_type) {
4285 case WM_T_82542_2_0:
4286 case WM_T_82542_2_1:
4287 /* null */
4288 break;
4289 case WM_T_82543:
4290 case WM_T_82544:
4291 case WM_T_82540:
4292 case WM_T_82545:
4293 case WM_T_82545_3:
4294 case WM_T_82546:
4295 case WM_T_82546_3:
4296 case WM_T_82541:
4297 case WM_T_82541_2:
4298 case WM_T_82547:
4299 case WM_T_82547_2:
4300 case WM_T_82573:
4301 case WM_T_82574:
4302 case WM_T_82583:
4303 /* generic */
4304 delay(10*1000);
4305 break;
4306 case WM_T_80003:
4307 case WM_T_82571:
4308 case WM_T_82572:
4309 case WM_T_82575:
4310 case WM_T_82576:
4311 case WM_T_82580:
4312 case WM_T_82580ER:
4313 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4314 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4315 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4316 break;
4317 delay(1000);
4318 }
4319 if (i >= WM_PHY_CFG_TIMEOUT) {
4320 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
4321 device_xname(sc->sc_dev), __func__));
4322 }
4323 break;
4324 case WM_T_ICH8:
4325 case WM_T_ICH9:
4326 case WM_T_ICH10:
4327 case WM_T_PCH:
4328 if (sc->sc_type >= WM_T_PCH) {
4329 reg = CSR_READ(sc, WMREG_STATUS);
4330 if ((reg & STATUS_PHYRA) != 0)
4331 CSR_WRITE(sc, WMREG_STATUS,
4332 reg & ~STATUS_PHYRA);
4333 }
4334 delay(10*1000);
4335 break;
4336 default:
4337 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4338 __func__);
4339 break;
4340 }
4341 }
4342
4343 /*
4344 * wm_acquire_eeprom:
4345 *
4346 * Perform the EEPROM handshake required on some chips.
4347 */
4348 static int
4349 wm_acquire_eeprom(struct wm_softc *sc)
4350 {
4351 uint32_t reg;
4352 int x;
4353 int ret = 0;
4354
4355 /* always success */
4356 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4357 return 0;
4358
4359 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
4360 ret = wm_get_swfwhw_semaphore(sc);
4361 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
4362 /* this will also do wm_get_swsm_semaphore() if needed */
4363 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
4364 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4365 ret = wm_get_swsm_semaphore(sc);
4366 }
4367
4368 if (ret) {
4369 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4370 __func__);
4371 return 1;
4372 }
4373
4374 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4375 reg = CSR_READ(sc, WMREG_EECD);
4376
4377 /* Request EEPROM access. */
4378 reg |= EECD_EE_REQ;
4379 CSR_WRITE(sc, WMREG_EECD, reg);
4380
4381 /* ..and wait for it to be granted. */
4382 for (x = 0; x < 1000; x++) {
4383 reg = CSR_READ(sc, WMREG_EECD);
4384 if (reg & EECD_EE_GNT)
4385 break;
4386 delay(5);
4387 }
4388 if ((reg & EECD_EE_GNT) == 0) {
4389 aprint_error_dev(sc->sc_dev,
4390 "could not acquire EEPROM GNT\n");
4391 reg &= ~EECD_EE_REQ;
4392 CSR_WRITE(sc, WMREG_EECD, reg);
4393 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4394 wm_put_swfwhw_semaphore(sc);
4395 if (sc->sc_flags & WM_F_SWFW_SYNC)
4396 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4397 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4398 wm_put_swsm_semaphore(sc);
4399 return 1;
4400 }
4401 }
4402
4403 return 0;
4404 }
4405
4406 /*
4407 * wm_release_eeprom:
4408 *
4409 * Release the EEPROM mutex.
4410 */
4411 static void
4412 wm_release_eeprom(struct wm_softc *sc)
4413 {
4414 uint32_t reg;
4415
4416 /* always success */
4417 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4418 return;
4419
4420 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4421 reg = CSR_READ(sc, WMREG_EECD);
4422 reg &= ~EECD_EE_REQ;
4423 CSR_WRITE(sc, WMREG_EECD, reg);
4424 }
4425
4426 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4427 wm_put_swfwhw_semaphore(sc);
4428 if (sc->sc_flags & WM_F_SWFW_SYNC)
4429 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4430 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4431 wm_put_swsm_semaphore(sc);
4432 }
4433
4434 /*
4435 * wm_eeprom_sendbits:
4436 *
4437 * Send a series of bits to the EEPROM.
4438 */
4439 static void
4440 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
4441 {
4442 uint32_t reg;
4443 int x;
4444
4445 reg = CSR_READ(sc, WMREG_EECD);
4446
4447 for (x = nbits; x > 0; x--) {
4448 if (bits & (1U << (x - 1)))
4449 reg |= EECD_DI;
4450 else
4451 reg &= ~EECD_DI;
4452 CSR_WRITE(sc, WMREG_EECD, reg);
4453 delay(2);
4454 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4455 delay(2);
4456 CSR_WRITE(sc, WMREG_EECD, reg);
4457 delay(2);
4458 }
4459 }
4460
4461 /*
4462 * wm_eeprom_recvbits:
4463 *
4464 * Receive a series of bits from the EEPROM.
4465 */
4466 static void
4467 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
4468 {
4469 uint32_t reg, val;
4470 int x;
4471
4472 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
4473
4474 val = 0;
4475 for (x = nbits; x > 0; x--) {
4476 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4477 delay(2);
4478 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
4479 val |= (1U << (x - 1));
4480 CSR_WRITE(sc, WMREG_EECD, reg);
4481 delay(2);
4482 }
4483 *valp = val;
4484 }
4485
4486 /*
4487 * wm_read_eeprom_uwire:
4488 *
4489 * Read a word from the EEPROM using the MicroWire protocol.
4490 */
4491 static int
4492 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4493 {
4494 uint32_t reg, val;
4495 int i;
4496
4497 for (i = 0; i < wordcnt; i++) {
4498 /* Clear SK and DI. */
4499 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
4500 CSR_WRITE(sc, WMREG_EECD, reg);
4501
4502 /* Set CHIP SELECT. */
4503 reg |= EECD_CS;
4504 CSR_WRITE(sc, WMREG_EECD, reg);
4505 delay(2);
4506
4507 /* Shift in the READ command. */
4508 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
4509
4510 /* Shift in address. */
4511 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
4512
4513 /* Shift out the data. */
4514 wm_eeprom_recvbits(sc, &val, 16);
4515 data[i] = val & 0xffff;
4516
4517 /* Clear CHIP SELECT. */
4518 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
4519 CSR_WRITE(sc, WMREG_EECD, reg);
4520 delay(2);
4521 }
4522
4523 return 0;
4524 }
4525
4526 /*
4527 * wm_spi_eeprom_ready:
4528 *
4529 * Wait for a SPI EEPROM to be ready for commands.
4530 */
4531 static int
4532 wm_spi_eeprom_ready(struct wm_softc *sc)
4533 {
4534 uint32_t val;
4535 int usec;
4536
4537 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
4538 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
4539 wm_eeprom_recvbits(sc, &val, 8);
4540 if ((val & SPI_SR_RDY) == 0)
4541 break;
4542 }
4543 if (usec >= SPI_MAX_RETRIES) {
4544 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
4545 return 1;
4546 }
4547 return 0;
4548 }
4549
4550 /*
4551 * wm_read_eeprom_spi:
4552 *
4553 * Read a work from the EEPROM using the SPI protocol.
4554 */
4555 static int
4556 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4557 {
4558 uint32_t reg, val;
4559 int i;
4560 uint8_t opc;
4561
4562 /* Clear SK and CS. */
4563 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
4564 CSR_WRITE(sc, WMREG_EECD, reg);
4565 delay(2);
4566
4567 if (wm_spi_eeprom_ready(sc))
4568 return 1;
4569
4570 /* Toggle CS to flush commands. */
4571 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
4572 delay(2);
4573 CSR_WRITE(sc, WMREG_EECD, reg);
4574 delay(2);
4575
4576 opc = SPI_OPC_READ;
4577 if (sc->sc_ee_addrbits == 8 && word >= 128)
4578 opc |= SPI_OPC_A8;
4579
4580 wm_eeprom_sendbits(sc, opc, 8);
4581 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
4582
4583 for (i = 0; i < wordcnt; i++) {
4584 wm_eeprom_recvbits(sc, &val, 16);
4585 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
4586 }
4587
4588 /* Raise CS and clear SK. */
4589 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
4590 CSR_WRITE(sc, WMREG_EECD, reg);
4591 delay(2);
4592
4593 return 0;
4594 }
4595
4596 #define EEPROM_CHECKSUM 0xBABA
4597 #define EEPROM_SIZE 0x0040
4598
4599 /*
4600 * wm_validate_eeprom_checksum
4601 *
4602 * The checksum is defined as the sum of the first 64 (16 bit) words.
4603 */
4604 static int
4605 wm_validate_eeprom_checksum(struct wm_softc *sc)
4606 {
4607 uint16_t checksum;
4608 uint16_t eeprom_data;
4609 int i;
4610
4611 checksum = 0;
4612
4613 for (i = 0; i < EEPROM_SIZE; i++) {
4614 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
4615 return 1;
4616 checksum += eeprom_data;
4617 }
4618
4619 if (checksum != (uint16_t) EEPROM_CHECKSUM)
4620 return 1;
4621
4622 return 0;
4623 }
4624
4625 /*
4626 * wm_read_eeprom:
4627 *
4628 * Read data from the serial EEPROM.
4629 */
4630 static int
4631 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4632 {
4633 int rv;
4634
4635 if (sc->sc_flags & WM_F_EEPROM_INVALID)
4636 return 1;
4637
4638 if (wm_acquire_eeprom(sc))
4639 return 1;
4640
4641 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4642 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4643 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
4644 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
4645 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
4646 else if (sc->sc_flags & WM_F_EEPROM_SPI)
4647 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
4648 else
4649 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
4650
4651 wm_release_eeprom(sc);
4652 return rv;
4653 }
4654
4655 static int
4656 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
4657 uint16_t *data)
4658 {
4659 int i, eerd = 0;
4660 int error = 0;
4661
4662 for (i = 0; i < wordcnt; i++) {
4663 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
4664
4665 CSR_WRITE(sc, WMREG_EERD, eerd);
4666 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
4667 if (error != 0)
4668 break;
4669
4670 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
4671 }
4672
4673 return error;
4674 }
4675
4676 static int
4677 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
4678 {
4679 uint32_t attempts = 100000;
4680 uint32_t i, reg = 0;
4681 int32_t done = -1;
4682
4683 for (i = 0; i < attempts; i++) {
4684 reg = CSR_READ(sc, rw);
4685
4686 if (reg & EERD_DONE) {
4687 done = 0;
4688 break;
4689 }
4690 delay(5);
4691 }
4692
4693 return done;
4694 }
4695
4696 static int
4697 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
4698 {
4699 uint16_t myea[ETHER_ADDR_LEN / 2];
4700 uint16_t offset;
4701 int do_invert = 0;
4702
4703 if (sc->sc_funcid == 0)
4704 offset = EEPROM_OFF_MACADDR;
4705 else {
4706 switch (sc->sc_type) {
4707 case WM_T_82580:
4708 case WM_T_82580ER:
4709 switch (sc->sc_funcid) {
4710 case 1:
4711 offset = EEPROM_OFF_LAN1;
4712 break;
4713 case 2:
4714 offset = EEPROM_OFF_LAN2;
4715 break;
4716 case 3:
4717 offset = EEPROM_OFF_LAN3;
4718 break;
4719 default:
4720 goto bad;
4721 /* NOTREACHED */
4722 break;
4723 }
4724 break;
4725 case WM_T_82571:
4726 case WM_T_82575:
4727 case WM_T_82576:
4728 case WM_T_80003:
4729 if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1,
4730 &offset) != 0) {
4731 goto bad;
4732 }
4733
4734 /* no pointer */
4735 if (offset == 0xffff) {
4736 /* reset the offset to LAN0 */
4737 offset = EEPROM_OFF_MACADDR;
4738 do_invert = 1;
4739 goto do_read;
4740 }
4741
4742 switch (sc->sc_funcid) {
4743 case 1:
4744 offset += EEPROM_OFF_MACADDR_LAN1;
4745 break;
4746 case 2:
4747 offset += EEPROM_OFF_MACADDR_LAN2;
4748 break;
4749 case 3:
4750 offset += EEPROM_OFF_MACADDR_LAN3;
4751 break;
4752 default:
4753 goto bad;
4754 /* NOTREACHED */
4755 break;
4756 }
4757 break;
4758 default:
4759 do_invert = 1;
4760 break;
4761 }
4762 }
4763 do_read:
4764 if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]),
4765 myea) != 0) {
4766 goto bad;
4767 }
4768
4769 enaddr[0] = myea[0] & 0xff;
4770 enaddr[1] = myea[0] >> 8;
4771 enaddr[2] = myea[1] & 0xff;
4772 enaddr[3] = myea[1] >> 8;
4773 enaddr[4] = myea[2] & 0xff;
4774 enaddr[5] = myea[2] >> 8;
4775
4776 /*
4777 * Toggle the LSB of the MAC address on the second port
4778 * of some dual port cards.
4779 */
4780 if (do_invert != 0)
4781 enaddr[5] ^= 1;
4782
4783 return 0;
4784
4785 bad:
4786 aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n");
4787
4788 return -1;
4789 }
4790
4791 /*
4792 * wm_add_rxbuf:
4793 *
4794 * Add a receive buffer to the indiciated descriptor.
4795 */
4796 static int
4797 wm_add_rxbuf(struct wm_softc *sc, int idx)
4798 {
4799 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
4800 struct mbuf *m;
4801 int error;
4802
4803 MGETHDR(m, M_DONTWAIT, MT_DATA);
4804 if (m == NULL)
4805 return ENOBUFS;
4806
4807 MCLGET(m, M_DONTWAIT);
4808 if ((m->m_flags & M_EXT) == 0) {
4809 m_freem(m);
4810 return ENOBUFS;
4811 }
4812
4813 if (rxs->rxs_mbuf != NULL)
4814 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4815
4816 rxs->rxs_mbuf = m;
4817
4818 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4819 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4820 BUS_DMA_READ|BUS_DMA_NOWAIT);
4821 if (error) {
4822 /* XXX XXX XXX */
4823 aprint_error_dev(sc->sc_dev,
4824 "unable to load rx DMA map %d, error = %d\n",
4825 idx, error);
4826 panic("wm_add_rxbuf");
4827 }
4828
4829 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4830 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4831
4832 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4833 if ((sc->sc_rctl & RCTL_EN) != 0)
4834 WM_INIT_RXDESC(sc, idx);
4835 } else
4836 WM_INIT_RXDESC(sc, idx);
4837
4838 return 0;
4839 }
4840
4841 /*
4842 * wm_set_ral:
4843 *
4844 * Set an entery in the receive address list.
4845 */
4846 static void
4847 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4848 {
4849 uint32_t ral_lo, ral_hi;
4850
4851 if (enaddr != NULL) {
4852 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
4853 (enaddr[3] << 24);
4854 ral_hi = enaddr[4] | (enaddr[5] << 8);
4855 ral_hi |= RAL_AV;
4856 } else {
4857 ral_lo = 0;
4858 ral_hi = 0;
4859 }
4860
4861 if (sc->sc_type >= WM_T_82544) {
4862 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
4863 ral_lo);
4864 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
4865 ral_hi);
4866 } else {
4867 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
4868 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
4869 }
4870 }
4871
4872 /*
4873 * wm_mchash:
4874 *
4875 * Compute the hash of the multicast address for the 4096-bit
4876 * multicast filter.
4877 */
4878 static uint32_t
4879 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4880 {
4881 static const int lo_shift[4] = { 4, 3, 2, 0 };
4882 static const int hi_shift[4] = { 4, 5, 6, 8 };
4883 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4884 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4885 uint32_t hash;
4886
4887 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4888 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)) {
4889 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4890 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4891 return (hash & 0x3ff);
4892 }
4893 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4894 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4895
4896 return (hash & 0xfff);
4897 }
4898
4899 /*
4900 * wm_set_filter:
4901 *
4902 * Set up the receive filter.
4903 */
4904 static void
4905 wm_set_filter(struct wm_softc *sc)
4906 {
4907 struct ethercom *ec = &sc->sc_ethercom;
4908 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4909 struct ether_multi *enm;
4910 struct ether_multistep step;
4911 bus_addr_t mta_reg;
4912 uint32_t hash, reg, bit;
4913 int i, size;
4914
4915 if (sc->sc_type >= WM_T_82544)
4916 mta_reg = WMREG_CORDOVA_MTA;
4917 else
4918 mta_reg = WMREG_MTA;
4919
4920 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4921
4922 if (ifp->if_flags & IFF_BROADCAST)
4923 sc->sc_rctl |= RCTL_BAM;
4924 if (ifp->if_flags & IFF_PROMISC) {
4925 sc->sc_rctl |= RCTL_UPE;
4926 goto allmulti;
4927 }
4928
4929 /*
4930 * Set the station address in the first RAL slot, and
4931 * clear the remaining slots.
4932 */
4933 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4934 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4935 size = WM_ICH8_RAL_TABSIZE;
4936 else
4937 size = WM_RAL_TABSIZE;
4938 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4939 for (i = 1; i < size; i++)
4940 wm_set_ral(sc, NULL, i);
4941
4942 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4943 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4944 size = WM_ICH8_MC_TABSIZE;
4945 else
4946 size = WM_MC_TABSIZE;
4947 /* Clear out the multicast table. */
4948 for (i = 0; i < size; i++)
4949 CSR_WRITE(sc, mta_reg + (i << 2), 0);
4950
4951 ETHER_FIRST_MULTI(step, ec, enm);
4952 while (enm != NULL) {
4953 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4954 /*
4955 * We must listen to a range of multicast addresses.
4956 * For now, just accept all multicasts, rather than
4957 * trying to set only those filter bits needed to match
4958 * the range. (At this time, the only use of address
4959 * ranges is for IP multicast routing, for which the
4960 * range is big enough to require all bits set.)
4961 */
4962 goto allmulti;
4963 }
4964
4965 hash = wm_mchash(sc, enm->enm_addrlo);
4966
4967 reg = (hash >> 5);
4968 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4969 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4970 reg &= 0x1f;
4971 else
4972 reg &= 0x7f;
4973 bit = hash & 0x1f;
4974
4975 hash = CSR_READ(sc, mta_reg + (reg << 2));
4976 hash |= 1U << bit;
4977
4978 /* XXX Hardware bug?? */
4979 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
4980 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4981 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4982 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4983 } else
4984 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4985
4986 ETHER_NEXT_MULTI(step, enm);
4987 }
4988
4989 ifp->if_flags &= ~IFF_ALLMULTI;
4990 goto setit;
4991
4992 allmulti:
4993 ifp->if_flags |= IFF_ALLMULTI;
4994 sc->sc_rctl |= RCTL_MPE;
4995
4996 setit:
4997 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4998 }
4999
5000 /*
5001 * wm_tbi_mediainit:
5002 *
5003 * Initialize media for use on 1000BASE-X devices.
5004 */
5005 static void
5006 wm_tbi_mediainit(struct wm_softc *sc)
5007 {
5008 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5009 const char *sep = "";
5010
5011 if (sc->sc_type < WM_T_82543)
5012 sc->sc_tipg = TIPG_WM_DFLT;
5013 else
5014 sc->sc_tipg = TIPG_LG_DFLT;
5015
5016 sc->sc_tbi_anegticks = 5;
5017
5018 /* Initialize our media structures */
5019 sc->sc_mii.mii_ifp = ifp;
5020
5021 sc->sc_ethercom.ec_mii = &sc->sc_mii;
5022 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
5023 wm_tbi_mediastatus);
5024
5025 /*
5026 * SWD Pins:
5027 *
5028 * 0 = Link LED (output)
5029 * 1 = Loss Of Signal (input)
5030 */
5031 sc->sc_ctrl |= CTRL_SWDPIO(0);
5032 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
5033
5034 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5035
5036 #define ADD(ss, mm, dd) \
5037 do { \
5038 aprint_normal("%s%s", sep, ss); \
5039 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
5040 sep = ", "; \
5041 } while (/*CONSTCOND*/0)
5042
5043 aprint_normal_dev(sc->sc_dev, "");
5044 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
5045 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
5046 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
5047 aprint_normal("\n");
5048
5049 #undef ADD
5050
5051 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5052 }
5053
5054 /*
5055 * wm_tbi_mediastatus: [ifmedia interface function]
5056 *
5057 * Get the current interface media status on a 1000BASE-X device.
5058 */
5059 static void
5060 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5061 {
5062 struct wm_softc *sc = ifp->if_softc;
5063 uint32_t ctrl, status;
5064
5065 ifmr->ifm_status = IFM_AVALID;
5066 ifmr->ifm_active = IFM_ETHER;
5067
5068 status = CSR_READ(sc, WMREG_STATUS);
5069 if ((status & STATUS_LU) == 0) {
5070 ifmr->ifm_active |= IFM_NONE;
5071 return;
5072 }
5073
5074 ifmr->ifm_status |= IFM_ACTIVE;
5075 ifmr->ifm_active |= IFM_1000_SX;
5076 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
5077 ifmr->ifm_active |= IFM_FDX;
5078 ctrl = CSR_READ(sc, WMREG_CTRL);
5079 if (ctrl & CTRL_RFCE)
5080 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
5081 if (ctrl & CTRL_TFCE)
5082 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
5083 }
5084
5085 /*
5086 * wm_tbi_mediachange: [ifmedia interface function]
5087 *
5088 * Set hardware to newly-selected media on a 1000BASE-X device.
5089 */
5090 static int
5091 wm_tbi_mediachange(struct ifnet *ifp)
5092 {
5093 struct wm_softc *sc = ifp->if_softc;
5094 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5095 uint32_t status;
5096 int i;
5097
5098 sc->sc_txcw = 0;
5099 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
5100 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
5101 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
5102 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5103 sc->sc_txcw |= TXCW_ANE;
5104 } else {
5105 /*
5106 * If autonegotiation is turned off, force link up and turn on
5107 * full duplex
5108 */
5109 sc->sc_txcw &= ~TXCW_ANE;
5110 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
5111 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5112 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5113 delay(1000);
5114 }
5115
5116 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
5117 device_xname(sc->sc_dev),sc->sc_txcw));
5118 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5119 delay(10000);
5120
5121 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
5122 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
5123
5124 /*
5125 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
5126 * optics detect a signal, 0 if they don't.
5127 */
5128 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
5129 /* Have signal; wait for the link to come up. */
5130
5131 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5132 /*
5133 * Reset the link, and let autonegotiation do its thing
5134 */
5135 sc->sc_ctrl |= CTRL_LRST;
5136 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5137 delay(1000);
5138 sc->sc_ctrl &= ~CTRL_LRST;
5139 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5140 delay(1000);
5141 }
5142
5143 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
5144 delay(10000);
5145 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
5146 break;
5147 }
5148
5149 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
5150 device_xname(sc->sc_dev),i));
5151
5152 status = CSR_READ(sc, WMREG_STATUS);
5153 DPRINTF(WM_DEBUG_LINK,
5154 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
5155 device_xname(sc->sc_dev),status, STATUS_LU));
5156 if (status & STATUS_LU) {
5157 /* Link is up. */
5158 DPRINTF(WM_DEBUG_LINK,
5159 ("%s: LINK: set media -> link up %s\n",
5160 device_xname(sc->sc_dev),
5161 (status & STATUS_FD) ? "FDX" : "HDX"));
5162
5163 /*
5164 * NOTE: CTRL will update TFCE and RFCE automatically,
5165 * so we should update sc->sc_ctrl
5166 */
5167 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5168 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5169 sc->sc_fcrtl &= ~FCRTL_XONE;
5170 if (status & STATUS_FD)
5171 sc->sc_tctl |=
5172 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5173 else
5174 sc->sc_tctl |=
5175 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5176 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
5177 sc->sc_fcrtl |= FCRTL_XONE;
5178 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5179 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5180 WMREG_OLD_FCRTL : WMREG_FCRTL,
5181 sc->sc_fcrtl);
5182 sc->sc_tbi_linkup = 1;
5183 } else {
5184 if (i == WM_LINKUP_TIMEOUT)
5185 wm_check_for_link(sc);
5186 /* Link is down. */
5187 DPRINTF(WM_DEBUG_LINK,
5188 ("%s: LINK: set media -> link down\n",
5189 device_xname(sc->sc_dev)));
5190 sc->sc_tbi_linkup = 0;
5191 }
5192 } else {
5193 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
5194 device_xname(sc->sc_dev)));
5195 sc->sc_tbi_linkup = 0;
5196 }
5197
5198 wm_tbi_set_linkled(sc);
5199
5200 return 0;
5201 }
5202
5203 /*
5204 * wm_tbi_set_linkled:
5205 *
5206 * Update the link LED on 1000BASE-X devices.
5207 */
5208 static void
5209 wm_tbi_set_linkled(struct wm_softc *sc)
5210 {
5211
5212 if (sc->sc_tbi_linkup)
5213 sc->sc_ctrl |= CTRL_SWDPIN(0);
5214 else
5215 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
5216
5217 /* 82540 or newer devices are active low */
5218 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
5219
5220 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5221 }
5222
5223 /*
5224 * wm_tbi_check_link:
5225 *
5226 * Check the link on 1000BASE-X devices.
5227 */
5228 static void
5229 wm_tbi_check_link(struct wm_softc *sc)
5230 {
5231 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5232 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5233 uint32_t rxcw, ctrl, status;
5234
5235 status = CSR_READ(sc, WMREG_STATUS);
5236
5237 rxcw = CSR_READ(sc, WMREG_RXCW);
5238 ctrl = CSR_READ(sc, WMREG_CTRL);
5239
5240 /* set link status */
5241 if ((status & STATUS_LU) == 0) {
5242 DPRINTF(WM_DEBUG_LINK,
5243 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
5244 sc->sc_tbi_linkup = 0;
5245 } else if (sc->sc_tbi_linkup == 0) {
5246 DPRINTF(WM_DEBUG_LINK,
5247 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
5248 (status & STATUS_FD) ? "FDX" : "HDX"));
5249 sc->sc_tbi_linkup = 1;
5250 }
5251
5252 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
5253 && ((status & STATUS_LU) == 0)) {
5254 sc->sc_tbi_linkup = 0;
5255 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
5256 /* RXCFG storm! */
5257 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
5258 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
5259 wm_init(ifp);
5260 wm_start(ifp);
5261 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5262 /* If the timer expired, retry autonegotiation */
5263 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
5264 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
5265 sc->sc_tbi_ticks = 0;
5266 /*
5267 * Reset the link, and let autonegotiation do
5268 * its thing
5269 */
5270 sc->sc_ctrl |= CTRL_LRST;
5271 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5272 delay(1000);
5273 sc->sc_ctrl &= ~CTRL_LRST;
5274 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5275 delay(1000);
5276 CSR_WRITE(sc, WMREG_TXCW,
5277 sc->sc_txcw & ~TXCW_ANE);
5278 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5279 }
5280 }
5281 }
5282
5283 wm_tbi_set_linkled(sc);
5284 }
5285
5286 /*
5287 * wm_gmii_reset:
5288 *
5289 * Reset the PHY.
5290 */
5291 static void
5292 wm_gmii_reset(struct wm_softc *sc)
5293 {
5294 uint32_t reg;
5295 int rv;
5296
5297 /* get phy semaphore */
5298 switch (sc->sc_type) {
5299 case WM_T_82571:
5300 case WM_T_82572:
5301 case WM_T_82573:
5302 case WM_T_82574:
5303 case WM_T_82583:
5304 /* XXX should get sw semaphore, too */
5305 rv = wm_get_swsm_semaphore(sc);
5306 break;
5307 case WM_T_82575:
5308 case WM_T_82576:
5309 case WM_T_82580:
5310 case WM_T_82580ER:
5311 case WM_T_80003:
5312 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5313 break;
5314 case WM_T_ICH8:
5315 case WM_T_ICH9:
5316 case WM_T_ICH10:
5317 case WM_T_PCH:
5318 rv = wm_get_swfwhw_semaphore(sc);
5319 break;
5320 default:
5321 /* nothing to do*/
5322 rv = 0;
5323 break;
5324 }
5325 if (rv != 0) {
5326 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5327 __func__);
5328 return;
5329 }
5330
5331 switch (sc->sc_type) {
5332 case WM_T_82542_2_0:
5333 case WM_T_82542_2_1:
5334 /* null */
5335 break;
5336 case WM_T_82543:
5337 /*
5338 * With 82543, we need to force speed and duplex on the MAC
5339 * equal to what the PHY speed and duplex configuration is.
5340 * In addition, we need to perform a hardware reset on the PHY
5341 * to take it out of reset.
5342 */
5343 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5344 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5345
5346 /* The PHY reset pin is active-low. */
5347 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5348 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
5349 CTRL_EXT_SWDPIN(4));
5350 reg |= CTRL_EXT_SWDPIO(4);
5351
5352 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5353 delay(10*1000);
5354
5355 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
5356 delay(150);
5357 #if 0
5358 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
5359 #endif
5360 delay(20*1000); /* XXX extra delay to get PHY ID? */
5361 break;
5362 case WM_T_82544: /* reset 10000us */
5363 case WM_T_82540:
5364 case WM_T_82545:
5365 case WM_T_82545_3:
5366 case WM_T_82546:
5367 case WM_T_82546_3:
5368 case WM_T_82541:
5369 case WM_T_82541_2:
5370 case WM_T_82547:
5371 case WM_T_82547_2:
5372 case WM_T_82571: /* reset 100us */
5373 case WM_T_82572:
5374 case WM_T_82573:
5375 case WM_T_82574:
5376 case WM_T_82575:
5377 case WM_T_82576:
5378 case WM_T_82580:
5379 case WM_T_82580ER:
5380 case WM_T_82583:
5381 case WM_T_80003:
5382 /* generic reset */
5383 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5384 delay((sc->sc_type >= WM_T_82571) ? 100 : 10*1000);
5385 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5386 delay(150);
5387
5388 if ((sc->sc_type == WM_T_82541)
5389 || (sc->sc_type == WM_T_82541_2)
5390 || (sc->sc_type == WM_T_82547)
5391 || (sc->sc_type == WM_T_82547_2)) {
5392 /* workaround for igp are done in igp_reset() */
5393 /* XXX add code to set LED after phy reset */
5394 }
5395 break;
5396 case WM_T_ICH8:
5397 case WM_T_ICH9:
5398 case WM_T_ICH10:
5399 case WM_T_PCH:
5400 /* generic reset */
5401 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5402 delay(100);
5403 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5404 delay(150);
5405 break;
5406 default:
5407 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5408 __func__);
5409 break;
5410 }
5411
5412 /* release PHY semaphore */
5413 switch (sc->sc_type) {
5414 case WM_T_82571:
5415 case WM_T_82572:
5416 case WM_T_82573:
5417 case WM_T_82574:
5418 case WM_T_82583:
5419 /* XXX should put sw semaphore, too */
5420 wm_put_swsm_semaphore(sc);
5421 break;
5422 case WM_T_82575:
5423 case WM_T_82576:
5424 case WM_T_82580:
5425 case WM_T_82580ER:
5426 case WM_T_80003:
5427 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5428 break;
5429 case WM_T_ICH8:
5430 case WM_T_ICH9:
5431 case WM_T_ICH10:
5432 case WM_T_PCH:
5433 wm_put_swfwhw_semaphore(sc);
5434 break;
5435 default:
5436 /* nothing to do*/
5437 rv = 0;
5438 break;
5439 }
5440
5441 /* get_cfg_done */
5442 wm_get_cfg_done(sc);
5443
5444 /* extra setup */
5445 switch (sc->sc_type) {
5446 case WM_T_82542_2_0:
5447 case WM_T_82542_2_1:
5448 case WM_T_82543:
5449 case WM_T_82544:
5450 case WM_T_82540:
5451 case WM_T_82545:
5452 case WM_T_82545_3:
5453 case WM_T_82546:
5454 case WM_T_82546_3:
5455 case WM_T_82541_2:
5456 case WM_T_82547_2:
5457 case WM_T_82571:
5458 case WM_T_82572:
5459 case WM_T_82573:
5460 case WM_T_82574:
5461 case WM_T_82575:
5462 case WM_T_82576:
5463 case WM_T_82580:
5464 case WM_T_82580ER:
5465 case WM_T_82583:
5466 case WM_T_80003:
5467 /* null */
5468 break;
5469 case WM_T_82541:
5470 case WM_T_82547:
5471 /* XXX Configure actively LED after PHY reset */
5472 break;
5473 case WM_T_ICH8:
5474 case WM_T_ICH9:
5475 case WM_T_ICH10:
5476 case WM_T_PCH:
5477 /* Allow time for h/w to get to a quiescent state afer reset */
5478 delay(10*1000);
5479
5480 if (sc->sc_type == WM_T_PCH) {
5481 wm_hv_phy_workaround_ich8lan(sc);
5482
5483 /*
5484 * dummy read to clear the phy wakeup bit after lcd
5485 * reset
5486 */
5487 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
5488 }
5489
5490 /*
5491 * XXX Configure the LCD with th extended configuration region
5492 * in NVM
5493 */
5494
5495 /* Configure the LCD with the OEM bits in NVM */
5496 if (sc->sc_type == WM_T_PCH) {
5497 /*
5498 * Disable LPLU.
5499 * XXX It seems that 82567 has LPLU, too.
5500 */
5501 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
5502 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
5503 reg |= HV_OEM_BITS_ANEGNOW;
5504 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
5505 }
5506 break;
5507 default:
5508 panic("%s: unknown type\n", __func__);
5509 break;
5510 }
5511 }
5512
5513 /*
5514 * wm_gmii_mediainit:
5515 *
5516 * Initialize media for use on 1000BASE-T devices.
5517 */
5518 static void
5519 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
5520 {
5521 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5522
5523 /* We have MII. */
5524 sc->sc_flags |= WM_F_HAS_MII;
5525
5526 if (sc->sc_type == WM_T_80003)
5527 sc->sc_tipg = TIPG_1000T_80003_DFLT;
5528 else
5529 sc->sc_tipg = TIPG_1000T_DFLT;
5530
5531 /*
5532 * Let the chip set speed/duplex on its own based on
5533 * signals from the PHY.
5534 * XXXbouyer - I'm not sure this is right for the 80003,
5535 * the em driver only sets CTRL_SLU here - but it seems to work.
5536 */
5537 sc->sc_ctrl |= CTRL_SLU;
5538 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5539
5540 /* Initialize our media structures and probe the GMII. */
5541 sc->sc_mii.mii_ifp = ifp;
5542
5543 switch (prodid) {
5544 case PCI_PRODUCT_INTEL_PCH_M_LM:
5545 case PCI_PRODUCT_INTEL_PCH_M_LC:
5546 /* 82577 */
5547 sc->sc_phytype = WMPHY_82577;
5548 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5549 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5550 break;
5551 case PCI_PRODUCT_INTEL_PCH_D_DM:
5552 case PCI_PRODUCT_INTEL_PCH_D_DC:
5553 /* 82578 */
5554 sc->sc_phytype = WMPHY_82578;
5555 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5556 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5557 break;
5558 case PCI_PRODUCT_INTEL_82801I_BM:
5559 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
5560 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
5561 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
5562 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
5563 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
5564 /* 82567 */
5565 sc->sc_phytype = WMPHY_BM;
5566 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5567 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5568 break;
5569 default:
5570 if ((sc->sc_flags & WM_F_SGMII) != 0) {
5571 sc->sc_mii.mii_readreg = wm_sgmii_readreg;
5572 sc->sc_mii.mii_writereg = wm_sgmii_writereg;
5573 } else if (sc->sc_type >= WM_T_80003) {
5574 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
5575 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
5576 } else if (sc->sc_type >= WM_T_82544) {
5577 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
5578 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
5579 } else {
5580 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
5581 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
5582 }
5583 break;
5584 }
5585 sc->sc_mii.mii_statchg = wm_gmii_statchg;
5586
5587 wm_gmii_reset(sc);
5588
5589 sc->sc_ethercom.ec_mii = &sc->sc_mii;
5590 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
5591 wm_gmii_mediastatus);
5592
5593 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
5594 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)) {
5595 if ((sc->sc_flags & WM_F_SGMII) == 0) {
5596 /* Attach only one port */
5597 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
5598 MII_OFFSET_ANY, MIIF_DOPAUSE);
5599 } else {
5600 int i;
5601 uint32_t ctrl_ext;
5602
5603 /* Power on sgmii phy if it is disabled */
5604 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
5605 CSR_WRITE(sc, WMREG_CTRL_EXT,
5606 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
5607 CSR_WRITE_FLUSH(sc);
5608 delay(300*1000); /* XXX too long */
5609
5610 /* from 1 to 8 */
5611 for (i = 1; i < 8; i++)
5612 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
5613 i, MII_OFFSET_ANY, MIIF_DOPAUSE);
5614
5615 /* restore previous sfp cage power state */
5616 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
5617 }
5618 } else {
5619 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5620 MII_OFFSET_ANY, MIIF_DOPAUSE);
5621 }
5622
5623 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5624 /* if failed, retry with *_bm_* */
5625 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5626 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5627
5628 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5629 MII_OFFSET_ANY, MIIF_DOPAUSE);
5630 }
5631 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5632 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
5633 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
5634 sc->sc_phytype = WMPHY_NONE;
5635 } else {
5636 /* Check PHY type */
5637 uint32_t model;
5638 struct mii_softc *child;
5639
5640 child = LIST_FIRST(&sc->sc_mii.mii_phys);
5641 if (device_is_a(child->mii_dev, "igphy")) {
5642 struct igphy_softc *isc = (struct igphy_softc *)child;
5643
5644 model = isc->sc_mii.mii_mpd_model;
5645 if (model == MII_MODEL_yyINTEL_I82566)
5646 sc->sc_phytype = WMPHY_IGP_3;
5647 }
5648
5649 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5650 }
5651 }
5652
5653 /*
5654 * wm_gmii_mediastatus: [ifmedia interface function]
5655 *
5656 * Get the current interface media status on a 1000BASE-T device.
5657 */
5658 static void
5659 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5660 {
5661 struct wm_softc *sc = ifp->if_softc;
5662
5663 ether_mediastatus(ifp, ifmr);
5664 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
5665 | sc->sc_flowflags;
5666 }
5667
5668 /*
5669 * wm_gmii_mediachange: [ifmedia interface function]
5670 *
5671 * Set hardware to newly-selected media on a 1000BASE-T device.
5672 */
5673 static int
5674 wm_gmii_mediachange(struct ifnet *ifp)
5675 {
5676 struct wm_softc *sc = ifp->if_softc;
5677 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5678 int rc;
5679
5680 if ((ifp->if_flags & IFF_UP) == 0)
5681 return 0;
5682
5683 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5684 sc->sc_ctrl |= CTRL_SLU;
5685 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
5686 || (sc->sc_type > WM_T_82543)) {
5687 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
5688 } else {
5689 sc->sc_ctrl &= ~CTRL_ASDE;
5690 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5691 if (ife->ifm_media & IFM_FDX)
5692 sc->sc_ctrl |= CTRL_FD;
5693 switch (IFM_SUBTYPE(ife->ifm_media)) {
5694 case IFM_10_T:
5695 sc->sc_ctrl |= CTRL_SPEED_10;
5696 break;
5697 case IFM_100_TX:
5698 sc->sc_ctrl |= CTRL_SPEED_100;
5699 break;
5700 case IFM_1000_T:
5701 sc->sc_ctrl |= CTRL_SPEED_1000;
5702 break;
5703 default:
5704 panic("wm_gmii_mediachange: bad media 0x%x",
5705 ife->ifm_media);
5706 }
5707 }
5708 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5709 if (sc->sc_type <= WM_T_82543)
5710 wm_gmii_reset(sc);
5711
5712 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
5713 return 0;
5714 return rc;
5715 }
5716
5717 #define MDI_IO CTRL_SWDPIN(2)
5718 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
5719 #define MDI_CLK CTRL_SWDPIN(3)
5720
5721 static void
5722 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
5723 {
5724 uint32_t i, v;
5725
5726 v = CSR_READ(sc, WMREG_CTRL);
5727 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5728 v |= MDI_DIR | CTRL_SWDPIO(3);
5729
5730 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
5731 if (data & i)
5732 v |= MDI_IO;
5733 else
5734 v &= ~MDI_IO;
5735 CSR_WRITE(sc, WMREG_CTRL, v);
5736 delay(10);
5737 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5738 delay(10);
5739 CSR_WRITE(sc, WMREG_CTRL, v);
5740 delay(10);
5741 }
5742 }
5743
5744 static uint32_t
5745 i82543_mii_recvbits(struct wm_softc *sc)
5746 {
5747 uint32_t v, i, data = 0;
5748
5749 v = CSR_READ(sc, WMREG_CTRL);
5750 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5751 v |= CTRL_SWDPIO(3);
5752
5753 CSR_WRITE(sc, WMREG_CTRL, v);
5754 delay(10);
5755 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5756 delay(10);
5757 CSR_WRITE(sc, WMREG_CTRL, v);
5758 delay(10);
5759
5760 for (i = 0; i < 16; i++) {
5761 data <<= 1;
5762 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5763 delay(10);
5764 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
5765 data |= 1;
5766 CSR_WRITE(sc, WMREG_CTRL, v);
5767 delay(10);
5768 }
5769
5770 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5771 delay(10);
5772 CSR_WRITE(sc, WMREG_CTRL, v);
5773 delay(10);
5774
5775 return data;
5776 }
5777
5778 #undef MDI_IO
5779 #undef MDI_DIR
5780 #undef MDI_CLK
5781
5782 /*
5783 * wm_gmii_i82543_readreg: [mii interface function]
5784 *
5785 * Read a PHY register on the GMII (i82543 version).
5786 */
5787 static int
5788 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
5789 {
5790 struct wm_softc *sc = device_private(self);
5791 int rv;
5792
5793 i82543_mii_sendbits(sc, 0xffffffffU, 32);
5794 i82543_mii_sendbits(sc, reg | (phy << 5) |
5795 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
5796 rv = i82543_mii_recvbits(sc) & 0xffff;
5797
5798 DPRINTF(WM_DEBUG_GMII,
5799 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
5800 device_xname(sc->sc_dev), phy, reg, rv));
5801
5802 return rv;
5803 }
5804
5805 /*
5806 * wm_gmii_i82543_writereg: [mii interface function]
5807 *
5808 * Write a PHY register on the GMII (i82543 version).
5809 */
5810 static void
5811 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
5812 {
5813 struct wm_softc *sc = device_private(self);
5814
5815 i82543_mii_sendbits(sc, 0xffffffffU, 32);
5816 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
5817 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
5818 (MII_COMMAND_START << 30), 32);
5819 }
5820
5821 /*
5822 * wm_gmii_i82544_readreg: [mii interface function]
5823 *
5824 * Read a PHY register on the GMII.
5825 */
5826 static int
5827 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
5828 {
5829 struct wm_softc *sc = device_private(self);
5830 uint32_t mdic = 0;
5831 int i, rv;
5832
5833 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
5834 MDIC_REGADD(reg));
5835
5836 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
5837 mdic = CSR_READ(sc, WMREG_MDIC);
5838 if (mdic & MDIC_READY)
5839 break;
5840 delay(50);
5841 }
5842
5843 if ((mdic & MDIC_READY) == 0) {
5844 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
5845 device_xname(sc->sc_dev), phy, reg);
5846 rv = 0;
5847 } else if (mdic & MDIC_E) {
5848 #if 0 /* This is normal if no PHY is present. */
5849 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
5850 device_xname(sc->sc_dev), phy, reg);
5851 #endif
5852 rv = 0;
5853 } else {
5854 rv = MDIC_DATA(mdic);
5855 if (rv == 0xffff)
5856 rv = 0;
5857 }
5858
5859 return rv;
5860 }
5861
5862 /*
5863 * wm_gmii_i82544_writereg: [mii interface function]
5864 *
5865 * Write a PHY register on the GMII.
5866 */
5867 static void
5868 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
5869 {
5870 struct wm_softc *sc = device_private(self);
5871 uint32_t mdic = 0;
5872 int i;
5873
5874 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
5875 MDIC_REGADD(reg) | MDIC_DATA(val));
5876
5877 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
5878 mdic = CSR_READ(sc, WMREG_MDIC);
5879 if (mdic & MDIC_READY)
5880 break;
5881 delay(50);
5882 }
5883
5884 if ((mdic & MDIC_READY) == 0)
5885 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
5886 device_xname(sc->sc_dev), phy, reg);
5887 else if (mdic & MDIC_E)
5888 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
5889 device_xname(sc->sc_dev), phy, reg);
5890 }
5891
5892 /*
5893 * wm_gmii_i80003_readreg: [mii interface function]
5894 *
5895 * Read a PHY register on the kumeran
5896 * This could be handled by the PHY layer if we didn't have to lock the
5897 * ressource ...
5898 */
5899 static int
5900 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
5901 {
5902 struct wm_softc *sc = device_private(self);
5903 int sem;
5904 int rv;
5905
5906 if (phy != 1) /* only one PHY on kumeran bus */
5907 return 0;
5908
5909 sem = swfwphysem[sc->sc_funcid];
5910 if (wm_get_swfw_semaphore(sc, sem)) {
5911 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5912 __func__);
5913 return 0;
5914 }
5915
5916 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
5917 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5918 reg >> GG82563_PAGE_SHIFT);
5919 } else {
5920 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
5921 reg >> GG82563_PAGE_SHIFT);
5922 }
5923 /* Wait more 200us for a bug of the ready bit in the MDIC register */
5924 delay(200);
5925 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
5926 delay(200);
5927
5928 wm_put_swfw_semaphore(sc, sem);
5929 return rv;
5930 }
5931
5932 /*
5933 * wm_gmii_i80003_writereg: [mii interface function]
5934 *
5935 * Write a PHY register on the kumeran.
5936 * This could be handled by the PHY layer if we didn't have to lock the
5937 * ressource ...
5938 */
5939 static void
5940 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
5941 {
5942 struct wm_softc *sc = device_private(self);
5943 int sem;
5944
5945 if (phy != 1) /* only one PHY on kumeran bus */
5946 return;
5947
5948 sem = swfwphysem[sc->sc_funcid];
5949 if (wm_get_swfw_semaphore(sc, sem)) {
5950 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5951 __func__);
5952 return;
5953 }
5954
5955 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
5956 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5957 reg >> GG82563_PAGE_SHIFT);
5958 } else {
5959 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
5960 reg >> GG82563_PAGE_SHIFT);
5961 }
5962 /* Wait more 200us for a bug of the ready bit in the MDIC register */
5963 delay(200);
5964 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
5965 delay(200);
5966
5967 wm_put_swfw_semaphore(sc, sem);
5968 }
5969
5970 /*
5971 * wm_gmii_bm_readreg: [mii interface function]
5972 *
5973 * Read a PHY register on the kumeran
5974 * This could be handled by the PHY layer if we didn't have to lock the
5975 * ressource ...
5976 */
5977 static int
5978 wm_gmii_bm_readreg(device_t self, int phy, int reg)
5979 {
5980 struct wm_softc *sc = device_private(self);
5981 int sem;
5982 int rv;
5983
5984 sem = swfwphysem[sc->sc_funcid];
5985 if (wm_get_swfw_semaphore(sc, sem)) {
5986 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5987 __func__);
5988 return 0;
5989 }
5990
5991 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
5992 if (phy == 1)
5993 wm_gmii_i82544_writereg(self, phy, 0x1f,
5994 reg);
5995 else
5996 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5997 reg >> GG82563_PAGE_SHIFT);
5998
5999 }
6000
6001 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6002 wm_put_swfw_semaphore(sc, sem);
6003 return rv;
6004 }
6005
6006 /*
6007 * wm_gmii_bm_writereg: [mii interface function]
6008 *
6009 * Write a PHY register on the kumeran.
6010 * This could be handled by the PHY layer if we didn't have to lock the
6011 * ressource ...
6012 */
6013 static void
6014 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6015 {
6016 struct wm_softc *sc = device_private(self);
6017 int sem;
6018
6019 sem = swfwphysem[sc->sc_funcid];
6020 if (wm_get_swfw_semaphore(sc, sem)) {
6021 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6022 __func__);
6023 return;
6024 }
6025
6026 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6027 if (phy == 1)
6028 wm_gmii_i82544_writereg(self, phy, 0x1f,
6029 reg);
6030 else
6031 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6032 reg >> GG82563_PAGE_SHIFT);
6033
6034 }
6035
6036 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6037 wm_put_swfw_semaphore(sc, sem);
6038 }
6039
6040 static void
6041 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6042 {
6043 struct wm_softc *sc = device_private(self);
6044 uint16_t regnum = BM_PHY_REG_NUM(offset);
6045 uint16_t wuce;
6046
6047 /* XXX Gig must be disabled for MDIO accesses to page 800 */
6048 if (sc->sc_type == WM_T_PCH) {
6049 /* XXX e1000 driver do nothing... why? */
6050 }
6051
6052 /* Set page 769 */
6053 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6054 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6055
6056 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6057
6058 wuce &= ~BM_WUC_HOST_WU_BIT;
6059 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6060 wuce | BM_WUC_ENABLE_BIT);
6061
6062 /* Select page 800 */
6063 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6064 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6065
6066 /* Write page 800 */
6067 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6068
6069 if (rd)
6070 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6071 else
6072 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6073
6074 /* Set page 769 */
6075 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6076 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6077
6078 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6079 }
6080
6081 /*
6082 * wm_gmii_hv_readreg: [mii interface function]
6083 *
6084 * Read a PHY register on the kumeran
6085 * This could be handled by the PHY layer if we didn't have to lock the
6086 * ressource ...
6087 */
6088 static int
6089 wm_gmii_hv_readreg(device_t self, int phy, int reg)
6090 {
6091 struct wm_softc *sc = device_private(self);
6092 uint16_t page = BM_PHY_REG_PAGE(reg);
6093 uint16_t regnum = BM_PHY_REG_NUM(reg);
6094 uint16_t val;
6095 int rv;
6096
6097 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6098 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6099 __func__);
6100 return 0;
6101 }
6102
6103 /* XXX Workaround failure in MDIO access while cable is disconnected */
6104 if (sc->sc_phytype == WMPHY_82577) {
6105 /* XXX must write */
6106 }
6107
6108 /* Page 800 works differently than the rest so it has its own func */
6109 if (page == BM_WUC_PAGE) {
6110 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
6111 return val;
6112 }
6113
6114 /*
6115 * Lower than page 768 works differently than the rest so it has its
6116 * own func
6117 */
6118 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6119 printf("gmii_hv_readreg!!!\n");
6120 return 0;
6121 }
6122
6123 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6124 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6125 page << BME1000_PAGE_SHIFT);
6126 }
6127
6128 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
6129 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6130 return rv;
6131 }
6132
6133 /*
6134 * wm_gmii_hv_writereg: [mii interface function]
6135 *
6136 * Write a PHY register on the kumeran.
6137 * This could be handled by the PHY layer if we didn't have to lock the
6138 * ressource ...
6139 */
6140 static void
6141 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
6142 {
6143 struct wm_softc *sc = device_private(self);
6144 uint16_t page = BM_PHY_REG_PAGE(reg);
6145 uint16_t regnum = BM_PHY_REG_NUM(reg);
6146
6147 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6148 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6149 __func__);
6150 return;
6151 }
6152
6153 /* XXX Workaround failure in MDIO access while cable is disconnected */
6154
6155 /* Page 800 works differently than the rest so it has its own func */
6156 if (page == BM_WUC_PAGE) {
6157 uint16_t tmp;
6158
6159 tmp = val;
6160 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
6161 return;
6162 }
6163
6164 /*
6165 * Lower than page 768 works differently than the rest so it has its
6166 * own func
6167 */
6168 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6169 printf("gmii_hv_writereg!!!\n");
6170 return;
6171 }
6172
6173 /*
6174 * XXX Workaround MDIO accesses being disabled after entering IEEE
6175 * Power Down (whenever bit 11 of the PHY control register is set)
6176 */
6177
6178 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6179 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6180 page << BME1000_PAGE_SHIFT);
6181 }
6182
6183 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
6184 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6185 }
6186
6187 /*
6188 * wm_gmii_hv_readreg: [mii interface function]
6189 *
6190 * Read a PHY register on the kumeran
6191 * This could be handled by the PHY layer if we didn't have to lock the
6192 * ressource ...
6193 */
6194 static int
6195 wm_sgmii_readreg(device_t self, int phy, int reg)
6196 {
6197 struct wm_softc *sc = device_private(self);
6198 uint32_t i2ccmd;
6199 int i, rv;
6200
6201 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6202 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6203 __func__);
6204 return 0;
6205 }
6206
6207 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6208 | (phy << I2CCMD_PHY_ADDR_SHIFT)
6209 | I2CCMD_OPCODE_READ;
6210 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6211
6212 /* Poll the ready bit */
6213 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6214 delay(50);
6215 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6216 if (i2ccmd & I2CCMD_READY)
6217 break;
6218 }
6219 if ((i2ccmd & I2CCMD_READY) == 0)
6220 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
6221 if ((i2ccmd & I2CCMD_ERROR) != 0)
6222 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6223
6224 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
6225
6226 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6227 return rv;
6228 }
6229
6230 /*
6231 * wm_gmii_hv_writereg: [mii interface function]
6232 *
6233 * Write a PHY register on the kumeran.
6234 * This could be handled by the PHY layer if we didn't have to lock the
6235 * ressource ...
6236 */
6237 static void
6238 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
6239 {
6240 struct wm_softc *sc = device_private(self);
6241 uint32_t i2ccmd;
6242 int i;
6243
6244 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6245 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6246 __func__);
6247 return;
6248 }
6249
6250 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6251 | (phy << I2CCMD_PHY_ADDR_SHIFT)
6252 | I2CCMD_OPCODE_WRITE;
6253 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6254
6255 /* Poll the ready bit */
6256 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6257 delay(50);
6258 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6259 if (i2ccmd & I2CCMD_READY)
6260 break;
6261 }
6262 if ((i2ccmd & I2CCMD_READY) == 0)
6263 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
6264 if ((i2ccmd & I2CCMD_ERROR) != 0)
6265 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6266
6267 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6268 }
6269
6270 /*
6271 * wm_gmii_statchg: [mii interface function]
6272 *
6273 * Callback from MII layer when media changes.
6274 */
6275 static void
6276 wm_gmii_statchg(device_t self)
6277 {
6278 struct wm_softc *sc = device_private(self);
6279 struct mii_data *mii = &sc->sc_mii;
6280
6281 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
6282 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6283 sc->sc_fcrtl &= ~FCRTL_XONE;
6284
6285 /*
6286 * Get flow control negotiation result.
6287 */
6288 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
6289 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
6290 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
6291 mii->mii_media_active &= ~IFM_ETH_FMASK;
6292 }
6293
6294 if (sc->sc_flowflags & IFM_FLOW) {
6295 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
6296 sc->sc_ctrl |= CTRL_TFCE;
6297 sc->sc_fcrtl |= FCRTL_XONE;
6298 }
6299 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
6300 sc->sc_ctrl |= CTRL_RFCE;
6301 }
6302
6303 if (sc->sc_mii.mii_media_active & IFM_FDX) {
6304 DPRINTF(WM_DEBUG_LINK,
6305 ("%s: LINK: statchg: FDX\n", device_xname(sc->sc_dev)));
6306 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6307 } else {
6308 DPRINTF(WM_DEBUG_LINK,
6309 ("%s: LINK: statchg: HDX\n", device_xname(sc->sc_dev)));
6310 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6311 }
6312
6313 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6314 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6315 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
6316 : WMREG_FCRTL, sc->sc_fcrtl);
6317 if (sc->sc_type == WM_T_80003) {
6318 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
6319 case IFM_1000_T:
6320 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6321 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
6322 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6323 break;
6324 default:
6325 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6326 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
6327 sc->sc_tipg = TIPG_10_100_80003_DFLT;
6328 break;
6329 }
6330 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6331 }
6332 }
6333
6334 /*
6335 * wm_kmrn_readreg:
6336 *
6337 * Read a kumeran register
6338 */
6339 static int
6340 wm_kmrn_readreg(struct wm_softc *sc, int reg)
6341 {
6342 int rv;
6343
6344 if (sc->sc_flags == WM_F_SWFW_SYNC) {
6345 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6346 aprint_error_dev(sc->sc_dev,
6347 "%s: failed to get semaphore\n", __func__);
6348 return 0;
6349 }
6350 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
6351 if (wm_get_swfwhw_semaphore(sc)) {
6352 aprint_error_dev(sc->sc_dev,
6353 "%s: failed to get semaphore\n", __func__);
6354 return 0;
6355 }
6356 }
6357
6358 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
6359 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
6360 KUMCTRLSTA_REN);
6361 delay(2);
6362
6363 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
6364
6365 if (sc->sc_flags == WM_F_SWFW_SYNC)
6366 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
6367 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
6368 wm_put_swfwhw_semaphore(sc);
6369
6370 return rv;
6371 }
6372
6373 /*
6374 * wm_kmrn_writereg:
6375 *
6376 * Write a kumeran register
6377 */
6378 static void
6379 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
6380 {
6381
6382 if (sc->sc_flags == WM_F_SWFW_SYNC) {
6383 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6384 aprint_error_dev(sc->sc_dev,
6385 "%s: failed to get semaphore\n", __func__);
6386 return;
6387 }
6388 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
6389 if (wm_get_swfwhw_semaphore(sc)) {
6390 aprint_error_dev(sc->sc_dev,
6391 "%s: failed to get semaphore\n", __func__);
6392 return;
6393 }
6394 }
6395
6396 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
6397 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
6398 (val & KUMCTRLSTA_MASK));
6399
6400 if (sc->sc_flags == WM_F_SWFW_SYNC)
6401 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
6402 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
6403 wm_put_swfwhw_semaphore(sc);
6404 }
6405
6406 static int
6407 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
6408 {
6409 uint32_t eecd = 0;
6410
6411 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
6412 || sc->sc_type == WM_T_82583) {
6413 eecd = CSR_READ(sc, WMREG_EECD);
6414
6415 /* Isolate bits 15 & 16 */
6416 eecd = ((eecd >> 15) & 0x03);
6417
6418 /* If both bits are set, device is Flash type */
6419 if (eecd == 0x03)
6420 return 0;
6421 }
6422 return 1;
6423 }
6424
6425 static int
6426 wm_get_swsm_semaphore(struct wm_softc *sc)
6427 {
6428 int32_t timeout;
6429 uint32_t swsm;
6430
6431 /* Get the FW semaphore. */
6432 timeout = 1000 + 1; /* XXX */
6433 while (timeout) {
6434 swsm = CSR_READ(sc, WMREG_SWSM);
6435 swsm |= SWSM_SWESMBI;
6436 CSR_WRITE(sc, WMREG_SWSM, swsm);
6437 /* if we managed to set the bit we got the semaphore. */
6438 swsm = CSR_READ(sc, WMREG_SWSM);
6439 if (swsm & SWSM_SWESMBI)
6440 break;
6441
6442 delay(50);
6443 timeout--;
6444 }
6445
6446 if (timeout == 0) {
6447 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
6448 /* Release semaphores */
6449 wm_put_swsm_semaphore(sc);
6450 return 1;
6451 }
6452 return 0;
6453 }
6454
6455 static void
6456 wm_put_swsm_semaphore(struct wm_softc *sc)
6457 {
6458 uint32_t swsm;
6459
6460 swsm = CSR_READ(sc, WMREG_SWSM);
6461 swsm &= ~(SWSM_SWESMBI);
6462 CSR_WRITE(sc, WMREG_SWSM, swsm);
6463 }
6464
6465 static int
6466 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
6467 {
6468 uint32_t swfw_sync;
6469 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
6470 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
6471 int timeout = 200;
6472
6473 for (timeout = 0; timeout < 200; timeout++) {
6474 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
6475 if (wm_get_swsm_semaphore(sc)) {
6476 aprint_error_dev(sc->sc_dev,
6477 "%s: failed to get semaphore\n",
6478 __func__);
6479 return 1;
6480 }
6481 }
6482 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
6483 if ((swfw_sync & (swmask | fwmask)) == 0) {
6484 swfw_sync |= swmask;
6485 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
6486 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6487 wm_put_swsm_semaphore(sc);
6488 return 0;
6489 }
6490 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6491 wm_put_swsm_semaphore(sc);
6492 delay(5000);
6493 }
6494 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
6495 device_xname(sc->sc_dev), mask, swfw_sync);
6496 return 1;
6497 }
6498
6499 static void
6500 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
6501 {
6502 uint32_t swfw_sync;
6503
6504 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
6505 while (wm_get_swsm_semaphore(sc) != 0)
6506 continue;
6507 }
6508 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
6509 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
6510 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
6511 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6512 wm_put_swsm_semaphore(sc);
6513 }
6514
6515 static int
6516 wm_get_swfwhw_semaphore(struct wm_softc *sc)
6517 {
6518 uint32_t ext_ctrl;
6519 int timeout = 200;
6520
6521 for (timeout = 0; timeout < 200; timeout++) {
6522 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6523 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
6524 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
6525
6526 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6527 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
6528 return 0;
6529 delay(5000);
6530 }
6531 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
6532 device_xname(sc->sc_dev), ext_ctrl);
6533 return 1;
6534 }
6535
6536 static void
6537 wm_put_swfwhw_semaphore(struct wm_softc *sc)
6538 {
6539 uint32_t ext_ctrl;
6540 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6541 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
6542 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
6543 }
6544
6545 static int
6546 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
6547 {
6548 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
6549 uint8_t bank_high_byte;
6550 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
6551
6552 if ((sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
6553 /* Value of bit 22 corresponds to the flash bank we're on. */
6554 *bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
6555 } else {
6556 wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
6557 if ((bank_high_byte & 0xc0) == 0x80)
6558 *bank = 0;
6559 else {
6560 wm_read_ich8_byte(sc, act_offset + bank1_offset,
6561 &bank_high_byte);
6562 if ((bank_high_byte & 0xc0) == 0x80)
6563 *bank = 1;
6564 else {
6565 aprint_error_dev(sc->sc_dev,
6566 "EEPROM not present\n");
6567 return -1;
6568 }
6569 }
6570 }
6571
6572 return 0;
6573 }
6574
6575 /******************************************************************************
6576 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
6577 * register.
6578 *
6579 * sc - Struct containing variables accessed by shared code
6580 * offset - offset of word in the EEPROM to read
6581 * data - word read from the EEPROM
6582 * words - number of words to read
6583 *****************************************************************************/
6584 static int
6585 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
6586 {
6587 int32_t error = 0;
6588 uint32_t flash_bank = 0;
6589 uint32_t act_offset = 0;
6590 uint32_t bank_offset = 0;
6591 uint16_t word = 0;
6592 uint16_t i = 0;
6593
6594 /* We need to know which is the valid flash bank. In the event
6595 * that we didn't allocate eeprom_shadow_ram, we may not be
6596 * managing flash_bank. So it cannot be trusted and needs
6597 * to be updated with each read.
6598 */
6599 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
6600 if (error) {
6601 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
6602 __func__);
6603 return error;
6604 }
6605
6606 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
6607 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
6608
6609 error = wm_get_swfwhw_semaphore(sc);
6610 if (error) {
6611 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6612 __func__);
6613 return error;
6614 }
6615
6616 for (i = 0; i < words; i++) {
6617 /* The NVM part needs a byte offset, hence * 2 */
6618 act_offset = bank_offset + ((offset + i) * 2);
6619 error = wm_read_ich8_word(sc, act_offset, &word);
6620 if (error) {
6621 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
6622 __func__);
6623 break;
6624 }
6625 data[i] = word;
6626 }
6627
6628 wm_put_swfwhw_semaphore(sc);
6629 return error;
6630 }
6631
6632 /******************************************************************************
6633 * This function does initial flash setup so that a new read/write/erase cycle
6634 * can be started.
6635 *
6636 * sc - The pointer to the hw structure
6637 ****************************************************************************/
6638 static int32_t
6639 wm_ich8_cycle_init(struct wm_softc *sc)
6640 {
6641 uint16_t hsfsts;
6642 int32_t error = 1;
6643 int32_t i = 0;
6644
6645 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6646
6647 /* May be check the Flash Des Valid bit in Hw status */
6648 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
6649 return error;
6650 }
6651
6652 /* Clear FCERR in Hw status by writing 1 */
6653 /* Clear DAEL in Hw status by writing a 1 */
6654 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
6655
6656 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6657
6658 /*
6659 * Either we should have a hardware SPI cycle in progress bit to check
6660 * against, in order to start a new cycle or FDONE bit should be
6661 * changed in the hardware so that it is 1 after harware reset, which
6662 * can then be used as an indication whether a cycle is in progress or
6663 * has been completed .. we should also have some software semaphore me
6664 * chanism to guard FDONE or the cycle in progress bit so that two
6665 * threads access to those bits can be sequentiallized or a way so that
6666 * 2 threads dont start the cycle at the same time
6667 */
6668
6669 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6670 /*
6671 * There is no cycle running at present, so we can start a
6672 * cycle
6673 */
6674
6675 /* Begin by setting Flash Cycle Done. */
6676 hsfsts |= HSFSTS_DONE;
6677 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6678 error = 0;
6679 } else {
6680 /*
6681 * otherwise poll for sometime so the current cycle has a
6682 * chance to end before giving up.
6683 */
6684 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
6685 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6686 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6687 error = 0;
6688 break;
6689 }
6690 delay(1);
6691 }
6692 if (error == 0) {
6693 /*
6694 * Successful in waiting for previous cycle to timeout,
6695 * now set the Flash Cycle Done.
6696 */
6697 hsfsts |= HSFSTS_DONE;
6698 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6699 }
6700 }
6701 return error;
6702 }
6703
6704 /******************************************************************************
6705 * This function starts a flash cycle and waits for its completion
6706 *
6707 * sc - The pointer to the hw structure
6708 ****************************************************************************/
6709 static int32_t
6710 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
6711 {
6712 uint16_t hsflctl;
6713 uint16_t hsfsts;
6714 int32_t error = 1;
6715 uint32_t i = 0;
6716
6717 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
6718 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6719 hsflctl |= HSFCTL_GO;
6720 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6721
6722 /* wait till FDONE bit is set to 1 */
6723 do {
6724 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6725 if (hsfsts & HSFSTS_DONE)
6726 break;
6727 delay(1);
6728 i++;
6729 } while (i < timeout);
6730 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
6731 error = 0;
6732
6733 return error;
6734 }
6735
6736 /******************************************************************************
6737 * Reads a byte or word from the NVM using the ICH8 flash access registers.
6738 *
6739 * sc - The pointer to the hw structure
6740 * index - The index of the byte or word to read.
6741 * size - Size of data to read, 1=byte 2=word
6742 * data - Pointer to the word to store the value read.
6743 *****************************************************************************/
6744 static int32_t
6745 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
6746 uint32_t size, uint16_t* data)
6747 {
6748 uint16_t hsfsts;
6749 uint16_t hsflctl;
6750 uint32_t flash_linear_address;
6751 uint32_t flash_data = 0;
6752 int32_t error = 1;
6753 int32_t count = 0;
6754
6755 if (size < 1 || size > 2 || data == 0x0 ||
6756 index > ICH_FLASH_LINEAR_ADDR_MASK)
6757 return error;
6758
6759 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
6760 sc->sc_ich8_flash_base;
6761
6762 do {
6763 delay(1);
6764 /* Steps */
6765 error = wm_ich8_cycle_init(sc);
6766 if (error)
6767 break;
6768
6769 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6770 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
6771 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
6772 & HSFCTL_BCOUNT_MASK;
6773 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
6774 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6775
6776 /*
6777 * Write the last 24 bits of index into Flash Linear address
6778 * field in Flash Address
6779 */
6780 /* TODO: TBD maybe check the index against the size of flash */
6781
6782 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
6783
6784 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
6785
6786 /*
6787 * Check if FCERR is set to 1, if set to 1, clear it and try
6788 * the whole sequence a few more times, else read in (shift in)
6789 * the Flash Data0, the order is least significant byte first
6790 * msb to lsb
6791 */
6792 if (error == 0) {
6793 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
6794 if (size == 1)
6795 *data = (uint8_t)(flash_data & 0x000000FF);
6796 else if (size == 2)
6797 *data = (uint16_t)(flash_data & 0x0000FFFF);
6798 break;
6799 } else {
6800 /*
6801 * If we've gotten here, then things are probably
6802 * completely hosed, but if the error condition is
6803 * detected, it won't hurt to give it another try...
6804 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
6805 */
6806 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6807 if (hsfsts & HSFSTS_ERR) {
6808 /* Repeat for some time before giving up. */
6809 continue;
6810 } else if ((hsfsts & HSFSTS_DONE) == 0)
6811 break;
6812 }
6813 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
6814
6815 return error;
6816 }
6817
6818 /******************************************************************************
6819 * Reads a single byte from the NVM using the ICH8 flash access registers.
6820 *
6821 * sc - pointer to wm_hw structure
6822 * index - The index of the byte to read.
6823 * data - Pointer to a byte to store the value read.
6824 *****************************************************************************/
6825 static int32_t
6826 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
6827 {
6828 int32_t status;
6829 uint16_t word = 0;
6830
6831 status = wm_read_ich8_data(sc, index, 1, &word);
6832 if (status == 0)
6833 *data = (uint8_t)word;
6834
6835 return status;
6836 }
6837
6838 /******************************************************************************
6839 * Reads a word from the NVM using the ICH8 flash access registers.
6840 *
6841 * sc - pointer to wm_hw structure
6842 * index - The starting byte index of the word to read.
6843 * data - Pointer to a word to store the value read.
6844 *****************************************************************************/
6845 static int32_t
6846 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
6847 {
6848 int32_t status;
6849
6850 status = wm_read_ich8_data(sc, index, 2, data);
6851 return status;
6852 }
6853
6854 static int
6855 wm_check_mng_mode(struct wm_softc *sc)
6856 {
6857 int rv;
6858
6859 switch (sc->sc_type) {
6860 case WM_T_ICH8:
6861 case WM_T_ICH9:
6862 case WM_T_ICH10:
6863 case WM_T_PCH:
6864 rv = wm_check_mng_mode_ich8lan(sc);
6865 break;
6866 case WM_T_82574:
6867 case WM_T_82583:
6868 rv = wm_check_mng_mode_82574(sc);
6869 break;
6870 case WM_T_82571:
6871 case WM_T_82572:
6872 case WM_T_82573:
6873 case WM_T_80003:
6874 rv = wm_check_mng_mode_generic(sc);
6875 break;
6876 default:
6877 /* noting to do */
6878 rv = 0;
6879 break;
6880 }
6881
6882 return rv;
6883 }
6884
6885 static int
6886 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
6887 {
6888 uint32_t fwsm;
6889
6890 fwsm = CSR_READ(sc, WMREG_FWSM);
6891
6892 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
6893 return 1;
6894
6895 return 0;
6896 }
6897
6898 static int
6899 wm_check_mng_mode_82574(struct wm_softc *sc)
6900 {
6901 uint16_t data;
6902
6903 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
6904
6905 if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
6906 return 1;
6907
6908 return 0;
6909 }
6910
6911 static int
6912 wm_check_mng_mode_generic(struct wm_softc *sc)
6913 {
6914 uint32_t fwsm;
6915
6916 fwsm = CSR_READ(sc, WMREG_FWSM);
6917
6918 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
6919 return 1;
6920
6921 return 0;
6922 }
6923
6924 static int
6925 wm_enable_mng_pass_thru(struct wm_softc *sc)
6926 {
6927 uint32_t manc, fwsm, factps;
6928
6929 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
6930 return 0;
6931
6932 manc = CSR_READ(sc, WMREG_MANC);
6933
6934 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
6935 device_xname(sc->sc_dev), manc));
6936 if (((manc & MANC_RECV_TCO_EN) == 0)
6937 || ((manc & MANC_EN_MAC_ADDR_FILTER) == 0))
6938 return 0;
6939
6940 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
6941 fwsm = CSR_READ(sc, WMREG_FWSM);
6942 factps = CSR_READ(sc, WMREG_FACTPS);
6943 if (((factps & FACTPS_MNGCG) == 0)
6944 && ((fwsm & FWSM_MODE_MASK)
6945 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
6946 return 1;
6947 } else if (((manc & MANC_SMBUS_EN) != 0)
6948 && ((manc & MANC_ASF_EN) == 0))
6949 return 1;
6950
6951 return 0;
6952 }
6953
6954 static int
6955 wm_check_reset_block(struct wm_softc *sc)
6956 {
6957 uint32_t reg;
6958
6959 switch (sc->sc_type) {
6960 case WM_T_ICH8:
6961 case WM_T_ICH9:
6962 case WM_T_ICH10:
6963 case WM_T_PCH:
6964 reg = CSR_READ(sc, WMREG_FWSM);
6965 if ((reg & FWSM_RSPCIPHY) != 0)
6966 return 0;
6967 else
6968 return -1;
6969 break;
6970 case WM_T_82571:
6971 case WM_T_82572:
6972 case WM_T_82573:
6973 case WM_T_82574:
6974 case WM_T_82583:
6975 case WM_T_80003:
6976 reg = CSR_READ(sc, WMREG_MANC);
6977 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
6978 return -1;
6979 else
6980 return 0;
6981 break;
6982 default:
6983 /* no problem */
6984 break;
6985 }
6986
6987 return 0;
6988 }
6989
6990 static void
6991 wm_get_hw_control(struct wm_softc *sc)
6992 {
6993 uint32_t reg;
6994
6995 switch (sc->sc_type) {
6996 case WM_T_82573:
6997 reg = CSR_READ(sc, WMREG_SWSM);
6998 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
6999 break;
7000 case WM_T_82571:
7001 case WM_T_82572:
7002 case WM_T_82574:
7003 case WM_T_82583:
7004 case WM_T_80003:
7005 case WM_T_ICH8:
7006 case WM_T_ICH9:
7007 case WM_T_ICH10:
7008 case WM_T_PCH:
7009 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7010 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
7011 break;
7012 default:
7013 break;
7014 }
7015 }
7016
7017 static void
7018 wm_release_hw_control(struct wm_softc *sc)
7019 {
7020 uint32_t reg;
7021
7022 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
7023 return;
7024
7025 if (sc->sc_type == WM_T_82573) {
7026 reg = CSR_READ(sc, WMREG_SWSM);
7027 reg &= ~SWSM_DRV_LOAD;
7028 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
7029 } else {
7030 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7031 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
7032 }
7033 }
7034
7035 /* XXX Currently TBI only */
7036 static int
7037 wm_check_for_link(struct wm_softc *sc)
7038 {
7039 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7040 uint32_t rxcw;
7041 uint32_t ctrl;
7042 uint32_t status;
7043 uint32_t sig;
7044
7045 rxcw = CSR_READ(sc, WMREG_RXCW);
7046 ctrl = CSR_READ(sc, WMREG_CTRL);
7047 status = CSR_READ(sc, WMREG_STATUS);
7048
7049 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7050
7051 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7052 device_xname(sc->sc_dev), __func__,
7053 ((ctrl & CTRL_SWDPIN(1)) == sig),
7054 ((status & STATUS_LU) != 0),
7055 ((rxcw & RXCW_C) != 0)
7056 ));
7057
7058 /*
7059 * SWDPIN LU RXCW
7060 * 0 0 0
7061 * 0 0 1 (should not happen)
7062 * 0 1 0 (should not happen)
7063 * 0 1 1 (should not happen)
7064 * 1 0 0 Disable autonego and force linkup
7065 * 1 0 1 got /C/ but not linkup yet
7066 * 1 1 0 (linkup)
7067 * 1 1 1 If IFM_AUTO, back to autonego
7068 *
7069 */
7070 if (((ctrl & CTRL_SWDPIN(1)) == sig)
7071 && ((status & STATUS_LU) == 0)
7072 && ((rxcw & RXCW_C) == 0)) {
7073 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7074 __func__));
7075 sc->sc_tbi_linkup = 0;
7076 /* Disable auto-negotiation in the TXCW register */
7077 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7078
7079 /*
7080 * Force link-up and also force full-duplex.
7081 *
7082 * NOTE: CTRL was updated TFCE and RFCE automatically,
7083 * so we should update sc->sc_ctrl
7084 */
7085 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7086 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7087 } else if (((status & STATUS_LU) != 0)
7088 && ((rxcw & RXCW_C) != 0)
7089 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7090 sc->sc_tbi_linkup = 1;
7091 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7092 __func__));
7093 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7094 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7095 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7096 && ((rxcw & RXCW_C) != 0)) {
7097 DPRINTF(WM_DEBUG_LINK, ("/C/"));
7098 } else {
7099 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7100 status));
7101 }
7102
7103 return 0;
7104 }
7105
7106 /* Work-around for 82566 Kumeran PCS lock loss */
7107 static void
7108 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
7109 {
7110 int miistatus, active, i;
7111 int reg;
7112
7113 miistatus = sc->sc_mii.mii_media_status;
7114
7115 /* If the link is not up, do nothing */
7116 if ((miistatus & IFM_ACTIVE) != 0)
7117 return;
7118
7119 active = sc->sc_mii.mii_media_active;
7120
7121 /* Nothing to do if the link is other than 1Gbps */
7122 if (IFM_SUBTYPE(active) != IFM_1000_T)
7123 return;
7124
7125 for (i = 0; i < 10; i++) {
7126 /* read twice */
7127 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7128 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7129 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
7130 goto out; /* GOOD! */
7131
7132 /* Reset the PHY */
7133 wm_gmii_reset(sc);
7134 delay(5*1000);
7135 }
7136
7137 /* Disable GigE link negotiation */
7138 reg = CSR_READ(sc, WMREG_PHY_CTRL);
7139 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7140 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7141
7142 /*
7143 * Call gig speed drop workaround on Gig disable before accessing
7144 * any PHY registers.
7145 */
7146 wm_gig_downshift_workaround_ich8lan(sc);
7147
7148 out:
7149 return;
7150 }
7151
7152 /* WOL from S5 stops working */
7153 static void
7154 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
7155 {
7156 uint16_t kmrn_reg;
7157
7158 /* Only for igp3 */
7159 if (sc->sc_phytype == WMPHY_IGP_3) {
7160 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
7161 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
7162 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7163 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
7164 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7165 }
7166 }
7167
7168 #ifdef WM_WOL
7169 /* Power down workaround on D3 */
7170 static void
7171 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
7172 {
7173 uint32_t reg;
7174 int i;
7175
7176 for (i = 0; i < 2; i++) {
7177 /* Disable link */
7178 reg = CSR_READ(sc, WMREG_PHY_CTRL);
7179 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7180 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7181
7182 /*
7183 * Call gig speed drop workaround on Gig disable before
7184 * accessing any PHY registers
7185 */
7186 if (sc->sc_type == WM_T_ICH8)
7187 wm_gig_downshift_workaround_ich8lan(sc);
7188
7189 /* Write VR power-down enable */
7190 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7191 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7192 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
7193 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
7194
7195 /* Read it back and test */
7196 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7197 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7198 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
7199 break;
7200
7201 /* Issue PHY reset and repeat at most one more time */
7202 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7203 }
7204 }
7205 #endif /* WM_WOL */
7206
7207 /*
7208 * Workaround for pch's PHYs
7209 * XXX should be moved to new PHY driver?
7210 */
7211 static void
7212 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
7213 {
7214
7215 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
7216
7217 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
7218
7219 /* 82578 */
7220 if (sc->sc_phytype == WMPHY_82578) {
7221 /* PCH rev. < 3 */
7222 if (sc->sc_rev < 3) {
7223 /* XXX 6 bit shift? Why? Is it page2? */
7224 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
7225 0x66c0);
7226 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
7227 0xffff);
7228 }
7229
7230 /* XXX phy rev. < 2 */
7231 }
7232
7233 /* Select page 0 */
7234
7235 /* XXX acquire semaphore */
7236 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
7237 /* XXX release semaphore */
7238
7239 /*
7240 * Configure the K1 Si workaround during phy reset assuming there is
7241 * link so that it disables K1 if link is in 1Gbps.
7242 */
7243 wm_k1_gig_workaround_hv(sc, 1);
7244 }
7245
7246 static void
7247 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
7248 {
7249 int k1_enable = sc->sc_nvm_k1_enabled;
7250
7251 /* XXX acquire semaphore */
7252
7253 if (link) {
7254 k1_enable = 0;
7255
7256 /* Link stall fix for link up */
7257 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
7258 } else {
7259 /* Link stall fix for link down */
7260 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
7261 }
7262
7263 wm_configure_k1_ich8lan(sc, k1_enable);
7264
7265 /* XXX release semaphore */
7266 }
7267
7268 static void
7269 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
7270 {
7271 uint32_t ctrl, ctrl_ext, tmp;
7272 uint16_t kmrn_reg;
7273
7274 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
7275
7276 if (k1_enable)
7277 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
7278 else
7279 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
7280
7281 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
7282
7283 delay(20);
7284
7285 ctrl = CSR_READ(sc, WMREG_CTRL);
7286 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7287
7288 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
7289 tmp |= CTRL_FRCSPD;
7290
7291 CSR_WRITE(sc, WMREG_CTRL, tmp);
7292 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
7293 delay(20);
7294
7295 CSR_WRITE(sc, WMREG_CTRL, ctrl);
7296 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7297 delay(20);
7298 }
7299
7300 static void
7301 wm_set_pcie_completion_timeout(struct wm_softc *sc)
7302 {
7303 uint32_t gcr;
7304 pcireg_t ctrl2;
7305
7306 gcr = CSR_READ(sc, WMREG_GCR);
7307
7308 /* Only take action if timeout value is defaulted to 0 */
7309 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
7310 goto out;
7311
7312 if ((gcr & GCR_CAP_VER2) == 0) {
7313 gcr |= GCR_CMPL_TMOUT_10MS;
7314 goto out;
7315 }
7316
7317 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
7318 sc->sc_pcixe_capoff + PCI_PCIE_DCSR2);
7319 ctrl2 |= WM_PCI_PCIE_DCSR2_16MS;
7320 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
7321 sc->sc_pcixe_capoff + PCI_PCIE_DCSR2, ctrl2);
7322
7323 out:
7324 /* Disable completion timeout resend */
7325 gcr &= ~GCR_CMPL_TMOUT_RESEND;
7326
7327 CSR_WRITE(sc, WMREG_GCR, gcr);
7328 }
7329
7330 /* special case - for 82575 - need to do manual init ... */
7331 static void
7332 wm_reset_init_script_82575(struct wm_softc *sc)
7333 {
7334 /*
7335 * remark: this is untested code - we have no board without EEPROM
7336 * same setup as mentioned int the freeBSD driver for the i82575
7337 */
7338
7339 /* SerDes configuration via SERDESCTRL */
7340 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
7341 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
7342 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
7343 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
7344
7345 /* CCM configuration via CCMCTL register */
7346 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
7347 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
7348
7349 /* PCIe lanes configuration */
7350 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
7351 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
7352 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
7353 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
7354
7355 /* PCIe PLL Configuration */
7356 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
7357 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
7358 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
7359 }
7360
7361 static void
7362 wm_init_manageability(struct wm_softc *sc)
7363 {
7364
7365 if (sc->sc_flags & WM_F_HAS_MANAGE) {
7366 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
7367 uint32_t manc = CSR_READ(sc, WMREG_MANC);
7368
7369 /* disabl hardware interception of ARP */
7370 manc &= ~MANC_ARP_EN;
7371
7372 /* enable receiving management packets to the host */
7373 if (sc->sc_type >= WM_T_82571) {
7374 manc |= MANC_EN_MNG2HOST;
7375 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
7376 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
7377
7378 }
7379
7380 CSR_WRITE(sc, WMREG_MANC, manc);
7381 }
7382 }
7383
7384 static void
7385 wm_release_manageability(struct wm_softc *sc)
7386 {
7387
7388 if (sc->sc_flags & WM_F_HAS_MANAGE) {
7389 uint32_t manc = CSR_READ(sc, WMREG_MANC);
7390
7391 if (sc->sc_type >= WM_T_82571)
7392 manc &= ~MANC_EN_MNG2HOST;
7393
7394 CSR_WRITE(sc, WMREG_MANC, manc);
7395 }
7396 }
7397
7398 static void
7399 wm_get_wakeup(struct wm_softc *sc)
7400 {
7401
7402 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
7403 switch (sc->sc_type) {
7404 case WM_T_82573:
7405 case WM_T_82583:
7406 sc->sc_flags |= WM_F_HAS_AMT;
7407 /* FALLTHROUGH */
7408 case WM_T_80003:
7409 case WM_T_82541:
7410 case WM_T_82547:
7411 case WM_T_82571:
7412 case WM_T_82572:
7413 case WM_T_82574:
7414 case WM_T_82575:
7415 case WM_T_82576:
7416 #if 0 /* XXX */
7417 case WM_T_82580:
7418 case WM_T_82580ER:
7419 #endif
7420 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
7421 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
7422 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
7423 break;
7424 case WM_T_ICH8:
7425 case WM_T_ICH9:
7426 case WM_T_ICH10:
7427 case WM_T_PCH:
7428 sc->sc_flags |= WM_F_HAS_AMT;
7429 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
7430 break;
7431 default:
7432 break;
7433 }
7434
7435 /* 1: HAS_MANAGE */
7436 if (wm_enable_mng_pass_thru(sc) != 0)
7437 sc->sc_flags |= WM_F_HAS_MANAGE;
7438
7439 #ifdef WM_DEBUG
7440 printf("\n");
7441 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
7442 printf("HAS_AMT,");
7443 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
7444 printf("ARC_SUBSYS_VALID,");
7445 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
7446 printf("ASF_FIRMWARE_PRES,");
7447 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
7448 printf("HAS_MANAGE,");
7449 printf("\n");
7450 #endif
7451 /*
7452 * Note that the WOL flags is set after the resetting of the eeprom
7453 * stuff
7454 */
7455 }
7456
7457 #ifdef WM_WOL
7458 /* WOL in the newer chipset interfaces (pchlan) */
7459 static void
7460 wm_enable_phy_wakeup(struct wm_softc *sc)
7461 {
7462 #if 0
7463 uint16_t preg;
7464
7465 /* Copy MAC RARs to PHY RARs */
7466
7467 /* Copy MAC MTA to PHY MTA */
7468
7469 /* Configure PHY Rx Control register */
7470
7471 /* Enable PHY wakeup in MAC register */
7472
7473 /* Configure and enable PHY wakeup in PHY registers */
7474
7475 /* Activate PHY wakeup */
7476
7477 /* XXX */
7478 #endif
7479 }
7480
7481 static void
7482 wm_enable_wakeup(struct wm_softc *sc)
7483 {
7484 uint32_t reg, pmreg;
7485 pcireg_t pmode;
7486
7487 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
7488 &pmreg, NULL) == 0)
7489 return;
7490
7491 /* Advertise the wakeup capability */
7492 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
7493 | CTRL_SWDPIN(3));
7494 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
7495
7496 /* ICH workaround */
7497 switch (sc->sc_type) {
7498 case WM_T_ICH8:
7499 case WM_T_ICH9:
7500 case WM_T_ICH10:
7501 case WM_T_PCH:
7502 /* Disable gig during WOL */
7503 reg = CSR_READ(sc, WMREG_PHY_CTRL);
7504 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
7505 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7506 if (sc->sc_type == WM_T_PCH)
7507 wm_gmii_reset(sc);
7508
7509 /* Power down workaround */
7510 if (sc->sc_phytype == WMPHY_82577) {
7511 struct mii_softc *child;
7512
7513 /* Assume that the PHY is copper */
7514 child = LIST_FIRST(&sc->sc_mii.mii_phys);
7515 if (child->mii_mpd_rev <= 2)
7516 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
7517 (768 << 5) | 25, 0x0444); /* magic num */
7518 }
7519 break;
7520 default:
7521 break;
7522 }
7523
7524 /* Keep the laser running on fiber adapters */
7525 if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
7526 || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
7527 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7528 reg |= CTRL_EXT_SWDPIN(3);
7529 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7530 }
7531
7532 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
7533 #if 0 /* for the multicast packet */
7534 reg |= WUFC_MC;
7535 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
7536 #endif
7537
7538 if (sc->sc_type == WM_T_PCH) {
7539 wm_enable_phy_wakeup(sc);
7540 } else {
7541 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
7542 CSR_WRITE(sc, WMREG_WUFC, reg);
7543 }
7544
7545 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
7546 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
7547 && (sc->sc_phytype == WMPHY_IGP_3))
7548 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
7549
7550 /* Request PME */
7551 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
7552 #if 0
7553 /* Disable WOL */
7554 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
7555 #else
7556 /* For WOL */
7557 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
7558 #endif
7559 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
7560 }
7561 #endif /* WM_WOL */
7562
7563 static bool
7564 wm_suspend(device_t self, const pmf_qual_t *qual)
7565 {
7566 struct wm_softc *sc = device_private(self);
7567
7568 wm_release_manageability(sc);
7569 wm_release_hw_control(sc);
7570 #ifdef WM_WOL
7571 wm_enable_wakeup(sc);
7572 #endif
7573
7574 return true;
7575 }
7576
7577 static bool
7578 wm_resume(device_t self, const pmf_qual_t *qual)
7579 {
7580 struct wm_softc *sc = device_private(self);
7581
7582 wm_init_manageability(sc);
7583
7584 return true;
7585 }
7586