if_wm.c revision 1.210 1 /* $NetBSD: if_wm.c,v 1.210 2010/06/28 01:43:39 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.210 2010/06/28 01:43:39 msaitoh Exp $");
80
81 #include "rnd.h"
82
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/callout.h>
86 #include <sys/mbuf.h>
87 #include <sys/malloc.h>
88 #include <sys/kernel.h>
89 #include <sys/socket.h>
90 #include <sys/ioctl.h>
91 #include <sys/errno.h>
92 #include <sys/device.h>
93 #include <sys/queue.h>
94 #include <sys/syslog.h>
95
96 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
97
98 #if NRND > 0
99 #include <sys/rnd.h>
100 #endif
101
102 #include <net/if.h>
103 #include <net/if_dl.h>
104 #include <net/if_media.h>
105 #include <net/if_ether.h>
106
107 #include <net/bpf.h>
108
109 #include <netinet/in.h> /* XXX for struct ip */
110 #include <netinet/in_systm.h> /* XXX for struct ip */
111 #include <netinet/ip.h> /* XXX for struct ip */
112 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
113 #include <netinet/tcp.h> /* XXX for struct tcphdr */
114
115 #include <sys/bus.h>
116 #include <sys/intr.h>
117 #include <machine/endian.h>
118
119 #include <dev/mii/mii.h>
120 #include <dev/mii/miivar.h>
121 #include <dev/mii/miidevs.h>
122 #include <dev/mii/mii_bitbang.h>
123 #include <dev/mii/ikphyreg.h>
124 #include <dev/mii/igphyreg.h>
125 #include <dev/mii/igphyvar.h>
126 #include <dev/mii/inbmphyreg.h>
127
128 #include <dev/pci/pcireg.h>
129 #include <dev/pci/pcivar.h>
130 #include <dev/pci/pcidevs.h>
131
132 #include <dev/pci/if_wmreg.h>
133 #include <dev/pci/if_wmvar.h>
134
135 #ifdef WM_DEBUG
136 #define WM_DEBUG_LINK 0x01
137 #define WM_DEBUG_TX 0x02
138 #define WM_DEBUG_RX 0x04
139 #define WM_DEBUG_GMII 0x08
140 #define WM_DEBUG_MANAGE 0x10
141 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
142 | WM_DEBUG_MANAGE;
143
144 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
145 #else
146 #define DPRINTF(x, y) /* nothing */
147 #endif /* WM_DEBUG */
148
149 /*
150 * Transmit descriptor list size. Due to errata, we can only have
151 * 256 hardware descriptors in the ring on < 82544, but we use 4096
152 * on >= 82544. We tell the upper layers that they can queue a lot
153 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
154 * of them at a time.
155 *
156 * We allow up to 256 (!) DMA segments per packet. Pathological packet
157 * chains containing many small mbufs have been observed in zero-copy
158 * situations with jumbo frames.
159 */
160 #define WM_NTXSEGS 256
161 #define WM_IFQUEUELEN 256
162 #define WM_TXQUEUELEN_MAX 64
163 #define WM_TXQUEUELEN_MAX_82547 16
164 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
165 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
166 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
167 #define WM_NTXDESC_82542 256
168 #define WM_NTXDESC_82544 4096
169 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
170 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
171 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
172 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
173 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
174
175 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
176
177 /*
178 * Receive descriptor list size. We have one Rx buffer for normal
179 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
180 * packet. We allocate 256 receive descriptors, each with a 2k
181 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
182 */
183 #define WM_NRXDESC 256
184 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
185 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
186 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
187
188 /*
189 * Control structures are DMA'd to the i82542 chip. We allocate them in
190 * a single clump that maps to a single DMA segment to make several things
191 * easier.
192 */
193 struct wm_control_data_82544 {
194 /*
195 * The receive descriptors.
196 */
197 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
198
199 /*
200 * The transmit descriptors. Put these at the end, because
201 * we might use a smaller number of them.
202 */
203 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
204 };
205
206 struct wm_control_data_82542 {
207 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
208 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
209 };
210
211 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
212 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
213 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
214
215 /*
216 * Software state for transmit jobs.
217 */
218 struct wm_txsoft {
219 struct mbuf *txs_mbuf; /* head of our mbuf chain */
220 bus_dmamap_t txs_dmamap; /* our DMA map */
221 int txs_firstdesc; /* first descriptor in packet */
222 int txs_lastdesc; /* last descriptor in packet */
223 int txs_ndesc; /* # of descriptors used */
224 };
225
226 /*
227 * Software state for receive buffers. Each descriptor gets a
228 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
229 * more than one buffer, we chain them together.
230 */
231 struct wm_rxsoft {
232 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
233 bus_dmamap_t rxs_dmamap; /* our DMA map */
234 };
235
236 #define WM_LINKUP_TIMEOUT 50
237
238 static uint16_t swfwphysem[] = {
239 SWFW_PHY0_SM,
240 SWFW_PHY1_SM,
241 SWFW_PHY2_SM,
242 SWFW_PHY3_SM
243 };
244
245 /*
246 * Software state per device.
247 */
248 struct wm_softc {
249 device_t sc_dev; /* generic device information */
250 bus_space_tag_t sc_st; /* bus space tag */
251 bus_space_handle_t sc_sh; /* bus space handle */
252 bus_size_t sc_ss; /* bus space size */
253 bus_space_tag_t sc_iot; /* I/O space tag */
254 bus_space_handle_t sc_ioh; /* I/O space handle */
255 bus_space_tag_t sc_flasht; /* flash registers space tag */
256 bus_space_handle_t sc_flashh; /* flash registers space handle */
257 bus_dma_tag_t sc_dmat; /* bus DMA tag */
258
259 struct ethercom sc_ethercom; /* ethernet common data */
260 struct mii_data sc_mii; /* MII/media information */
261
262 pci_chipset_tag_t sc_pc;
263 pcitag_t sc_pcitag;
264 int sc_bus_speed; /* PCI/PCIX bus speed */
265 int sc_pcixe_capoff; /* PCI[Xe] capability register offset */
266
267 const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
268 wm_chip_type sc_type; /* MAC type */
269 int sc_rev; /* MAC revision */
270 wm_phy_type sc_phytype; /* PHY type */
271 int sc_funcid; /* unit number of the chip (0 to 3) */
272 int sc_flags; /* flags; see below */
273 int sc_if_flags; /* last if_flags */
274 int sc_flowflags; /* 802.3x flow control flags */
275 int sc_align_tweak;
276
277 void *sc_ih; /* interrupt cookie */
278 callout_t sc_tick_ch; /* tick callout */
279
280 int sc_ee_addrbits; /* EEPROM address bits */
281 int sc_ich8_flash_base;
282 int sc_ich8_flash_bank_size;
283 int sc_nvm_k1_enabled;
284
285 /*
286 * Software state for the transmit and receive descriptors.
287 */
288 int sc_txnum; /* must be a power of two */
289 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
290 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
291
292 /*
293 * Control data structures.
294 */
295 int sc_ntxdesc; /* must be a power of two */
296 struct wm_control_data_82544 *sc_control_data;
297 bus_dmamap_t sc_cddmamap; /* control data DMA map */
298 bus_dma_segment_t sc_cd_seg; /* control data segment */
299 int sc_cd_rseg; /* real number of control segment */
300 size_t sc_cd_size; /* control data size */
301 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
302 #define sc_txdescs sc_control_data->wcd_txdescs
303 #define sc_rxdescs sc_control_data->wcd_rxdescs
304
305 #ifdef WM_EVENT_COUNTERS
306 /* Event counters. */
307 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
308 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
309 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
310 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
311 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
312 struct evcnt sc_ev_rxintr; /* Rx interrupts */
313 struct evcnt sc_ev_linkintr; /* Link interrupts */
314
315 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
316 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
317 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
318 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
319 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
320 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
321 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
322 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
323
324 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
325 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
326
327 struct evcnt sc_ev_tu; /* Tx underrun */
328
329 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
330 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
331 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
332 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
333 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
334 #endif /* WM_EVENT_COUNTERS */
335
336 bus_addr_t sc_tdt_reg; /* offset of TDT register */
337
338 int sc_txfree; /* number of free Tx descriptors */
339 int sc_txnext; /* next ready Tx descriptor */
340
341 int sc_txsfree; /* number of free Tx jobs */
342 int sc_txsnext; /* next free Tx job */
343 int sc_txsdirty; /* dirty Tx jobs */
344
345 /* These 5 variables are used only on the 82547. */
346 int sc_txfifo_size; /* Tx FIFO size */
347 int sc_txfifo_head; /* current head of FIFO */
348 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
349 int sc_txfifo_stall; /* Tx FIFO is stalled */
350 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
351
352 bus_addr_t sc_rdt_reg; /* offset of RDT register */
353
354 int sc_rxptr; /* next ready Rx descriptor/queue ent */
355 int sc_rxdiscard;
356 int sc_rxlen;
357 struct mbuf *sc_rxhead;
358 struct mbuf *sc_rxtail;
359 struct mbuf **sc_rxtailp;
360
361 uint32_t sc_ctrl; /* prototype CTRL register */
362 #if 0
363 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
364 #endif
365 uint32_t sc_icr; /* prototype interrupt bits */
366 uint32_t sc_itr; /* prototype intr throttling reg */
367 uint32_t sc_tctl; /* prototype TCTL register */
368 uint32_t sc_rctl; /* prototype RCTL register */
369 uint32_t sc_txcw; /* prototype TXCW register */
370 uint32_t sc_tipg; /* prototype TIPG register */
371 uint32_t sc_fcrtl; /* prototype FCRTL register */
372 uint32_t sc_pba; /* prototype PBA register */
373
374 int sc_tbi_linkup; /* TBI link status */
375 int sc_tbi_anegticks; /* autonegotiation ticks */
376 int sc_tbi_ticks; /* tbi ticks */
377 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
378 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
379
380 int sc_mchash_type; /* multicast filter offset */
381
382 #if NRND > 0
383 rndsource_element_t rnd_source; /* random source */
384 #endif
385 };
386
387 #define WM_RXCHAIN_RESET(sc) \
388 do { \
389 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
390 *(sc)->sc_rxtailp = NULL; \
391 (sc)->sc_rxlen = 0; \
392 } while (/*CONSTCOND*/0)
393
394 #define WM_RXCHAIN_LINK(sc, m) \
395 do { \
396 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
397 (sc)->sc_rxtailp = &(m)->m_next; \
398 } while (/*CONSTCOND*/0)
399
400 #ifdef WM_EVENT_COUNTERS
401 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
402 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
403 #else
404 #define WM_EVCNT_INCR(ev) /* nothing */
405 #define WM_EVCNT_ADD(ev, val) /* nothing */
406 #endif
407
408 #define CSR_READ(sc, reg) \
409 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
410 #define CSR_WRITE(sc, reg, val) \
411 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
412 #define CSR_WRITE_FLUSH(sc) \
413 (void) CSR_READ((sc), WMREG_STATUS)
414
415 #define ICH8_FLASH_READ32(sc, reg) \
416 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
417 #define ICH8_FLASH_WRITE32(sc, reg, data) \
418 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
419
420 #define ICH8_FLASH_READ16(sc, reg) \
421 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
422 #define ICH8_FLASH_WRITE16(sc, reg, data) \
423 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
424
425 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
426 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
427
428 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
429 #define WM_CDTXADDR_HI(sc, x) \
430 (sizeof(bus_addr_t) == 8 ? \
431 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
432
433 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
434 #define WM_CDRXADDR_HI(sc, x) \
435 (sizeof(bus_addr_t) == 8 ? \
436 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
437
438 #define WM_CDTXSYNC(sc, x, n, ops) \
439 do { \
440 int __x, __n; \
441 \
442 __x = (x); \
443 __n = (n); \
444 \
445 /* If it will wrap around, sync to the end of the ring. */ \
446 if ((__x + __n) > WM_NTXDESC(sc)) { \
447 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
448 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
449 (WM_NTXDESC(sc) - __x), (ops)); \
450 __n -= (WM_NTXDESC(sc) - __x); \
451 __x = 0; \
452 } \
453 \
454 /* Now sync whatever is left. */ \
455 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
456 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
457 } while (/*CONSTCOND*/0)
458
459 #define WM_CDRXSYNC(sc, x, ops) \
460 do { \
461 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
462 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
463 } while (/*CONSTCOND*/0)
464
465 #define WM_INIT_RXDESC(sc, x) \
466 do { \
467 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
468 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
469 struct mbuf *__m = __rxs->rxs_mbuf; \
470 \
471 /* \
472 * Note: We scoot the packet forward 2 bytes in the buffer \
473 * so that the payload after the Ethernet header is aligned \
474 * to a 4-byte boundary. \
475 * \
476 * XXX BRAINDAMAGE ALERT! \
477 * The stupid chip uses the same size for every buffer, which \
478 * is set in the Receive Control register. We are using the 2K \
479 * size option, but what we REALLY want is (2K - 2)! For this \
480 * reason, we can't "scoot" packets longer than the standard \
481 * Ethernet MTU. On strict-alignment platforms, if the total \
482 * size exceeds (2K - 2) we set align_tweak to 0 and let \
483 * the upper layer copy the headers. \
484 */ \
485 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
486 \
487 wm_set_dma_addr(&__rxd->wrx_addr, \
488 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
489 __rxd->wrx_len = 0; \
490 __rxd->wrx_cksum = 0; \
491 __rxd->wrx_status = 0; \
492 __rxd->wrx_errors = 0; \
493 __rxd->wrx_special = 0; \
494 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
495 \
496 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
497 } while (/*CONSTCOND*/0)
498
499 static void wm_start(struct ifnet *);
500 static void wm_watchdog(struct ifnet *);
501 static int wm_ioctl(struct ifnet *, u_long, void *);
502 static int wm_init(struct ifnet *);
503 static void wm_stop(struct ifnet *, int);
504 static bool wm_suspend(device_t, const pmf_qual_t *);
505 static bool wm_resume(device_t, const pmf_qual_t *);
506
507 static void wm_reset(struct wm_softc *);
508 static void wm_rxdrain(struct wm_softc *);
509 static int wm_add_rxbuf(struct wm_softc *, int);
510 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
511 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
512 static int wm_validate_eeprom_checksum(struct wm_softc *);
513 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
514 static void wm_tick(void *);
515
516 static void wm_set_filter(struct wm_softc *);
517
518 static int wm_intr(void *);
519 static void wm_txintr(struct wm_softc *);
520 static void wm_rxintr(struct wm_softc *);
521 static void wm_linkintr(struct wm_softc *, uint32_t);
522
523 static void wm_tbi_mediainit(struct wm_softc *);
524 static int wm_tbi_mediachange(struct ifnet *);
525 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
526
527 static void wm_tbi_set_linkled(struct wm_softc *);
528 static void wm_tbi_check_link(struct wm_softc *);
529
530 static void wm_gmii_reset(struct wm_softc *);
531
532 static int wm_gmii_i82543_readreg(device_t, int, int);
533 static void wm_gmii_i82543_writereg(device_t, int, int, int);
534
535 static int wm_gmii_i82544_readreg(device_t, int, int);
536 static void wm_gmii_i82544_writereg(device_t, int, int, int);
537
538 static int wm_gmii_i80003_readreg(device_t, int, int);
539 static void wm_gmii_i80003_writereg(device_t, int, int, int);
540 static int wm_gmii_bm_readreg(device_t, int, int);
541 static void wm_gmii_bm_writereg(device_t, int, int, int);
542 static int wm_gmii_hv_readreg(device_t, int, int);
543 static void wm_gmii_hv_writereg(device_t, int, int, int);
544 static int wm_sgmii_readreg(device_t, int, int);
545 static void wm_sgmii_writereg(device_t, int, int, int);
546
547 static void wm_gmii_statchg(device_t);
548
549 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
550 static int wm_gmii_mediachange(struct ifnet *);
551 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
552
553 static int wm_kmrn_readreg(struct wm_softc *, int);
554 static void wm_kmrn_writereg(struct wm_softc *, int, int);
555
556 static void wm_set_spiaddrbits(struct wm_softc *);
557 static int wm_match(device_t, cfdata_t, void *);
558 static void wm_attach(device_t, device_t, void *);
559 static int wm_detach(device_t, int);
560 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
561 static void wm_get_auto_rd_done(struct wm_softc *);
562 static void wm_lan_init_done(struct wm_softc *);
563 static void wm_get_cfg_done(struct wm_softc *);
564 static int wm_get_swsm_semaphore(struct wm_softc *);
565 static void wm_put_swsm_semaphore(struct wm_softc *);
566 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
567 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
568 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
569 static int wm_get_swfwhw_semaphore(struct wm_softc *);
570 static void wm_put_swfwhw_semaphore(struct wm_softc *);
571
572 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
573 static int32_t wm_ich8_cycle_init(struct wm_softc *);
574 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
575 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
576 uint32_t, uint16_t *);
577 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
578 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
579 static void wm_82547_txfifo_stall(void *);
580 static int wm_check_mng_mode(struct wm_softc *);
581 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
582 static int wm_check_mng_mode_82574(struct wm_softc *);
583 static int wm_check_mng_mode_generic(struct wm_softc *);
584 static int wm_enable_mng_pass_thru(struct wm_softc *);
585 static int wm_check_reset_block(struct wm_softc *);
586 static void wm_get_hw_control(struct wm_softc *);
587 static int wm_check_for_link(struct wm_softc *);
588 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
589 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
590 #ifdef WM_WOL
591 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
592 #endif
593 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
594 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
595 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
596 static void wm_set_pcie_completion_timeout(struct wm_softc *);
597 static void wm_reset_init_script_82575(struct wm_softc *);
598 static void wm_release_manageability(struct wm_softc *);
599 static void wm_release_hw_control(struct wm_softc *);
600 static void wm_get_wakeup(struct wm_softc *);
601 #ifdef WM_WOL
602 static void wm_enable_phy_wakeup(struct wm_softc *);
603 static void wm_enable_wakeup(struct wm_softc *);
604 #endif
605 static void wm_init_manageability(struct wm_softc *);
606
607 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
608 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
609
610 /*
611 * Devices supported by this driver.
612 */
613 static const struct wm_product {
614 pci_vendor_id_t wmp_vendor;
615 pci_product_id_t wmp_product;
616 const char *wmp_name;
617 wm_chip_type wmp_type;
618 int wmp_flags;
619 #define WMP_F_1000X 0x01
620 #define WMP_F_1000T 0x02
621 #define WMP_F_SERDES 0x04
622 } wm_products[] = {
623 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
624 "Intel i82542 1000BASE-X Ethernet",
625 WM_T_82542_2_1, WMP_F_1000X },
626
627 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
628 "Intel i82543GC 1000BASE-X Ethernet",
629 WM_T_82543, WMP_F_1000X },
630
631 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
632 "Intel i82543GC 1000BASE-T Ethernet",
633 WM_T_82543, WMP_F_1000T },
634
635 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
636 "Intel i82544EI 1000BASE-T Ethernet",
637 WM_T_82544, WMP_F_1000T },
638
639 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
640 "Intel i82544EI 1000BASE-X Ethernet",
641 WM_T_82544, WMP_F_1000X },
642
643 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
644 "Intel i82544GC 1000BASE-T Ethernet",
645 WM_T_82544, WMP_F_1000T },
646
647 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
648 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
649 WM_T_82544, WMP_F_1000T },
650
651 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
652 "Intel i82540EM 1000BASE-T Ethernet",
653 WM_T_82540, WMP_F_1000T },
654
655 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
656 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
657 WM_T_82540, WMP_F_1000T },
658
659 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
660 "Intel i82540EP 1000BASE-T Ethernet",
661 WM_T_82540, WMP_F_1000T },
662
663 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
664 "Intel i82540EP 1000BASE-T Ethernet",
665 WM_T_82540, WMP_F_1000T },
666
667 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
668 "Intel i82540EP 1000BASE-T Ethernet",
669 WM_T_82540, WMP_F_1000T },
670
671 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
672 "Intel i82545EM 1000BASE-T Ethernet",
673 WM_T_82545, WMP_F_1000T },
674
675 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
676 "Intel i82545GM 1000BASE-T Ethernet",
677 WM_T_82545_3, WMP_F_1000T },
678
679 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
680 "Intel i82545GM 1000BASE-X Ethernet",
681 WM_T_82545_3, WMP_F_1000X },
682 #if 0
683 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
684 "Intel i82545GM Gigabit Ethernet (SERDES)",
685 WM_T_82545_3, WMP_F_SERDES },
686 #endif
687 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
688 "Intel i82546EB 1000BASE-T Ethernet",
689 WM_T_82546, WMP_F_1000T },
690
691 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
692 "Intel i82546EB 1000BASE-T Ethernet",
693 WM_T_82546, WMP_F_1000T },
694
695 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
696 "Intel i82545EM 1000BASE-X Ethernet",
697 WM_T_82545, WMP_F_1000X },
698
699 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
700 "Intel i82546EB 1000BASE-X Ethernet",
701 WM_T_82546, WMP_F_1000X },
702
703 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
704 "Intel i82546GB 1000BASE-T Ethernet",
705 WM_T_82546_3, WMP_F_1000T },
706
707 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
708 "Intel i82546GB 1000BASE-X Ethernet",
709 WM_T_82546_3, WMP_F_1000X },
710 #if 0
711 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
712 "Intel i82546GB Gigabit Ethernet (SERDES)",
713 WM_T_82546_3, WMP_F_SERDES },
714 #endif
715 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
716 "i82546GB quad-port Gigabit Ethernet",
717 WM_T_82546_3, WMP_F_1000T },
718
719 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
720 "i82546GB quad-port Gigabit Ethernet (KSP3)",
721 WM_T_82546_3, WMP_F_1000T },
722
723 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
724 "Intel PRO/1000MT (82546GB)",
725 WM_T_82546_3, WMP_F_1000T },
726
727 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
728 "Intel i82541EI 1000BASE-T Ethernet",
729 WM_T_82541, WMP_F_1000T },
730
731 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
732 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
733 WM_T_82541, WMP_F_1000T },
734
735 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
736 "Intel i82541EI Mobile 1000BASE-T Ethernet",
737 WM_T_82541, WMP_F_1000T },
738
739 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
740 "Intel i82541ER 1000BASE-T Ethernet",
741 WM_T_82541_2, WMP_F_1000T },
742
743 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
744 "Intel i82541GI 1000BASE-T Ethernet",
745 WM_T_82541_2, WMP_F_1000T },
746
747 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
748 "Intel i82541GI Mobile 1000BASE-T Ethernet",
749 WM_T_82541_2, WMP_F_1000T },
750
751 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
752 "Intel i82541PI 1000BASE-T Ethernet",
753 WM_T_82541_2, WMP_F_1000T },
754
755 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
756 "Intel i82547EI 1000BASE-T Ethernet",
757 WM_T_82547, WMP_F_1000T },
758
759 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
760 "Intel i82547EI Mobile 1000BASE-T Ethernet",
761 WM_T_82547, WMP_F_1000T },
762
763 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
764 "Intel i82547GI 1000BASE-T Ethernet",
765 WM_T_82547_2, WMP_F_1000T },
766
767 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
768 "Intel PRO/1000 PT (82571EB)",
769 WM_T_82571, WMP_F_1000T },
770
771 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
772 "Intel PRO/1000 PF (82571EB)",
773 WM_T_82571, WMP_F_1000X },
774 #if 0
775 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
776 "Intel PRO/1000 PB (82571EB)",
777 WM_T_82571, WMP_F_SERDES },
778 #endif
779 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
780 "Intel PRO/1000 QT (82571EB)",
781 WM_T_82571, WMP_F_1000T },
782
783 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
784 "Intel i82572EI 1000baseT Ethernet",
785 WM_T_82572, WMP_F_1000T },
786
787 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
788 "Intel PRO/1000 PT Quad Port Server Adapter",
789 WM_T_82571, WMP_F_1000T, },
790
791 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
792 "Intel i82572EI 1000baseX Ethernet",
793 WM_T_82572, WMP_F_1000X },
794 #if 0
795 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
796 "Intel i82572EI Gigabit Ethernet (SERDES)",
797 WM_T_82572, WMP_F_SERDES },
798 #endif
799
800 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
801 "Intel i82572EI 1000baseT Ethernet",
802 WM_T_82572, WMP_F_1000T },
803
804 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
805 "Intel i82573E",
806 WM_T_82573, WMP_F_1000T },
807
808 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
809 "Intel i82573E IAMT",
810 WM_T_82573, WMP_F_1000T },
811
812 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
813 "Intel i82573L Gigabit Ethernet",
814 WM_T_82573, WMP_F_1000T },
815
816 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
817 "Intel i82574L",
818 WM_T_82574, WMP_F_1000T },
819
820 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
821 "Intel i82583V",
822 WM_T_82583, WMP_F_1000T },
823
824 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
825 "i80003 dual 1000baseT Ethernet",
826 WM_T_80003, WMP_F_1000T },
827
828 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
829 "i80003 dual 1000baseX Ethernet",
830 WM_T_80003, WMP_F_1000T },
831 #if 0
832 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
833 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
834 WM_T_80003, WMP_F_SERDES },
835 #endif
836
837 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
838 "Intel i80003 1000baseT Ethernet",
839 WM_T_80003, WMP_F_1000T },
840 #if 0
841 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
842 "Intel i80003 Gigabit Ethernet (SERDES)",
843 WM_T_80003, WMP_F_SERDES },
844 #endif
845 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
846 "Intel i82801H (M_AMT) LAN Controller",
847 WM_T_ICH8, WMP_F_1000T },
848 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
849 "Intel i82801H (AMT) LAN Controller",
850 WM_T_ICH8, WMP_F_1000T },
851 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
852 "Intel i82801H LAN Controller",
853 WM_T_ICH8, WMP_F_1000T },
854 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
855 "Intel i82801H (IFE) LAN Controller",
856 WM_T_ICH8, WMP_F_1000T },
857 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
858 "Intel i82801H (M) LAN Controller",
859 WM_T_ICH8, WMP_F_1000T },
860 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
861 "Intel i82801H IFE (GT) LAN Controller",
862 WM_T_ICH8, WMP_F_1000T },
863 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
864 "Intel i82801H IFE (G) LAN Controller",
865 WM_T_ICH8, WMP_F_1000T },
866 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
867 "82801I (AMT) LAN Controller",
868 WM_T_ICH9, WMP_F_1000T },
869 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
870 "82801I LAN Controller",
871 WM_T_ICH9, WMP_F_1000T },
872 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
873 "82801I (G) LAN Controller",
874 WM_T_ICH9, WMP_F_1000T },
875 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
876 "82801I (GT) LAN Controller",
877 WM_T_ICH9, WMP_F_1000T },
878 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
879 "82801I (C) LAN Controller",
880 WM_T_ICH9, WMP_F_1000T },
881 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
882 "82801I mobile LAN Controller",
883 WM_T_ICH9, WMP_F_1000T },
884 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
885 "82801I mobile (V) LAN Controller",
886 WM_T_ICH9, WMP_F_1000T },
887 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
888 "82801I mobile (AMT) LAN Controller",
889 WM_T_ICH9, WMP_F_1000T },
890 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
891 "82567LM-4 LAN Controller",
892 WM_T_ICH9, WMP_F_1000T },
893 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
894 "82567V-3 LAN Controller",
895 WM_T_ICH9, WMP_F_1000T },
896 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
897 "82567LM-2 LAN Controller",
898 WM_T_ICH10, WMP_F_1000T },
899 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
900 "82567LF-2 LAN Controller",
901 WM_T_ICH10, WMP_F_1000T },
902 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
903 "82567LM-3 LAN Controller",
904 WM_T_ICH10, WMP_F_1000T },
905 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
906 "82567LF-3 LAN Controller",
907 WM_T_ICH10, WMP_F_1000T },
908 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
909 "82567V-2 LAN Controller",
910 WM_T_ICH10, WMP_F_1000T },
911 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
912 "PCH LAN (82577LM) Controller",
913 WM_T_PCH, WMP_F_1000T },
914 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
915 "PCH LAN (82577LC) Controller",
916 WM_T_PCH, WMP_F_1000T },
917 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
918 "PCH LAN (82578DM) Controller",
919 WM_T_PCH, WMP_F_1000T },
920 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
921 "PCH LAN (82578DC) Controller",
922 WM_T_PCH, WMP_F_1000T },
923 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
924 "82575EB dual-1000baseT Ethernet",
925 WM_T_82575, WMP_F_1000T },
926 #if 0
927 /*
928 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
929 * disabled for now ...
930 */
931 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
932 "82575EB dual-1000baseX Ethernet (SERDES)",
933 WM_T_82575, WMP_F_SERDES },
934 #endif
935 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
936 "82575GB quad-1000baseT Ethernet",
937 WM_T_82575, WMP_F_1000T },
938 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
939 "82575GB quad-1000baseT Ethernet (PM)",
940 WM_T_82575, WMP_F_1000T },
941 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
942 "82576 1000BaseT Ethernet",
943 WM_T_82576, WMP_F_1000T },
944 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
945 "82576 1000BaseX Ethernet",
946 WM_T_82576, WMP_F_1000X },
947 #if 0
948 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
949 "82576 gigabit Ethernet (SERDES)",
950 WM_T_82576, WMP_F_SERDES },
951 #endif
952 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
953 "82576 quad-1000BaseT Ethernet",
954 WM_T_82576, WMP_F_1000T },
955 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
956 "82576 gigabit Ethernet",
957 WM_T_82576, WMP_F_1000T },
958 #if 0
959 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
960 "82576 gigabit Ethernet (SERDES)",
961 WM_T_82576, WMP_F_SERDES },
962 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
963 "82576 quad-gigabit Ethernet (SERDES)",
964 WM_T_82576, WMP_F_SERDES },
965 #endif
966 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
967 "82580 1000BaseT Ethernet",
968 WM_T_82580, WMP_F_1000T },
969 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
970 "82580 1000BaseX Ethernet",
971 WM_T_82580, WMP_F_1000X },
972 #if 0
973 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
974 "82580 1000BaseT Ethernet (SERDES)",
975 WM_T_82580, WMP_F_SERDES },
976 #endif
977 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
978 "82580 gigabit Ethernet (SGMII)",
979 WM_T_82580, WMP_F_1000T },
980 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
981 "82580 dual-1000BaseT Ethernet",
982 WM_T_82580, WMP_F_1000T },
983 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER,
984 "82580 1000BaseT Ethernet",
985 WM_T_82580ER, WMP_F_1000T },
986 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER_DUAL,
987 "82580 dual-1000BaseT Ethernet",
988 WM_T_82580ER, WMP_F_1000T },
989 { 0, 0,
990 NULL,
991 0, 0 },
992 };
993
994 #ifdef WM_EVENT_COUNTERS
995 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
996 #endif /* WM_EVENT_COUNTERS */
997
998 #if 0 /* Not currently used */
999 static inline uint32_t
1000 wm_io_read(struct wm_softc *sc, int reg)
1001 {
1002
1003 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1004 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1005 }
1006 #endif
1007
1008 static inline void
1009 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1010 {
1011
1012 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1013 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1014 }
1015
1016 static inline void
1017 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1018 uint32_t data)
1019 {
1020 uint32_t regval;
1021 int i;
1022
1023 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1024
1025 CSR_WRITE(sc, reg, regval);
1026
1027 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1028 delay(5);
1029 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1030 break;
1031 }
1032 if (i == SCTL_CTL_POLL_TIMEOUT) {
1033 aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
1034 device_xname(sc->sc_dev), reg);
1035 }
1036 }
1037
1038 static inline void
1039 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1040 {
1041 wa->wa_low = htole32(v & 0xffffffffU);
1042 if (sizeof(bus_addr_t) == 8)
1043 wa->wa_high = htole32((uint64_t) v >> 32);
1044 else
1045 wa->wa_high = 0;
1046 }
1047
1048 static void
1049 wm_set_spiaddrbits(struct wm_softc *sc)
1050 {
1051 uint32_t reg;
1052
1053 sc->sc_flags |= WM_F_EEPROM_SPI;
1054 reg = CSR_READ(sc, WMREG_EECD);
1055 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1056 }
1057
1058 static const struct wm_product *
1059 wm_lookup(const struct pci_attach_args *pa)
1060 {
1061 const struct wm_product *wmp;
1062
1063 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1064 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1065 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1066 return wmp;
1067 }
1068 return NULL;
1069 }
1070
1071 static int
1072 wm_match(device_t parent, cfdata_t cf, void *aux)
1073 {
1074 struct pci_attach_args *pa = aux;
1075
1076 if (wm_lookup(pa) != NULL)
1077 return 1;
1078
1079 return 0;
1080 }
1081
1082 static void
1083 wm_attach(device_t parent, device_t self, void *aux)
1084 {
1085 struct wm_softc *sc = device_private(self);
1086 struct pci_attach_args *pa = aux;
1087 prop_dictionary_t dict;
1088 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1089 pci_chipset_tag_t pc = pa->pa_pc;
1090 pci_intr_handle_t ih;
1091 const char *intrstr = NULL;
1092 const char *eetype, *xname;
1093 bus_space_tag_t memt;
1094 bus_space_handle_t memh;
1095 bus_size_t memsize;
1096 int memh_valid;
1097 int i, error;
1098 const struct wm_product *wmp;
1099 prop_data_t ea;
1100 prop_number_t pn;
1101 uint8_t enaddr[ETHER_ADDR_LEN];
1102 uint16_t cfg1, cfg2, swdpin, io3;
1103 pcireg_t preg, memtype;
1104 uint16_t eeprom_data, apme_mask;
1105 uint32_t reg;
1106
1107 sc->sc_dev = self;
1108 callout_init(&sc->sc_tick_ch, 0);
1109
1110 sc->sc_wmp = wmp = wm_lookup(pa);
1111 if (wmp == NULL) {
1112 printf("\n");
1113 panic("wm_attach: impossible");
1114 }
1115
1116 sc->sc_pc = pa->pa_pc;
1117 sc->sc_pcitag = pa->pa_tag;
1118
1119 if (pci_dma64_available(pa))
1120 sc->sc_dmat = pa->pa_dmat64;
1121 else
1122 sc->sc_dmat = pa->pa_dmat;
1123
1124 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1125 aprint_naive(": Ethernet controller\n");
1126 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, sc->sc_rev);
1127
1128 sc->sc_type = wmp->wmp_type;
1129 if (sc->sc_type < WM_T_82543) {
1130 if (sc->sc_rev < 2) {
1131 aprint_error_dev(sc->sc_dev,
1132 "i82542 must be at least rev. 2\n");
1133 return;
1134 }
1135 if (sc->sc_rev < 3)
1136 sc->sc_type = WM_T_82542_2_0;
1137 }
1138
1139 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1140 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER))
1141 sc->sc_flags |= WM_F_NEWQUEUE;
1142
1143 /* Set device properties (mactype) */
1144 dict = device_properties(sc->sc_dev);
1145 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1146
1147 /*
1148 * Map the device. All devices support memory-mapped acccess,
1149 * and it is really required for normal operation.
1150 */
1151 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1152 switch (memtype) {
1153 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1154 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1155 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1156 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1157 break;
1158 default:
1159 memh_valid = 0;
1160 break;
1161 }
1162
1163 if (memh_valid) {
1164 sc->sc_st = memt;
1165 sc->sc_sh = memh;
1166 sc->sc_ss = memsize;
1167 } else {
1168 aprint_error_dev(sc->sc_dev,
1169 "unable to map device registers\n");
1170 return;
1171 }
1172
1173 wm_get_wakeup(sc);
1174
1175 /*
1176 * In addition, i82544 and later support I/O mapped indirect
1177 * register access. It is not desirable (nor supported in
1178 * this driver) to use it for normal operation, though it is
1179 * required to work around bugs in some chip versions.
1180 */
1181 if (sc->sc_type >= WM_T_82544) {
1182 /* First we have to find the I/O BAR. */
1183 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1184 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1185 PCI_MAPREG_TYPE_IO)
1186 break;
1187 }
1188 if (i == PCI_MAPREG_END)
1189 aprint_error_dev(sc->sc_dev,
1190 "WARNING: unable to find I/O BAR\n");
1191 else {
1192 /*
1193 * The i8254x doesn't apparently respond when the
1194 * I/O BAR is 0, which looks somewhat like it's not
1195 * been configured.
1196 */
1197 preg = pci_conf_read(pc, pa->pa_tag, i);
1198 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1199 aprint_error_dev(sc->sc_dev,
1200 "WARNING: I/O BAR at zero.\n");
1201 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1202 0, &sc->sc_iot, &sc->sc_ioh,
1203 NULL, NULL) == 0) {
1204 sc->sc_flags |= WM_F_IOH_VALID;
1205 } else {
1206 aprint_error_dev(sc->sc_dev,
1207 "WARNING: unable to map I/O space\n");
1208 }
1209 }
1210
1211 }
1212
1213 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1214 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1215 preg |= PCI_COMMAND_MASTER_ENABLE;
1216 if (sc->sc_type < WM_T_82542_2_1)
1217 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1218 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1219
1220 /* power up chip */
1221 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1222 NULL)) && error != EOPNOTSUPP) {
1223 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1224 return;
1225 }
1226
1227 /*
1228 * Map and establish our interrupt.
1229 */
1230 if (pci_intr_map(pa, &ih)) {
1231 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1232 return;
1233 }
1234 intrstr = pci_intr_string(pc, ih);
1235 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1236 if (sc->sc_ih == NULL) {
1237 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1238 if (intrstr != NULL)
1239 aprint_error(" at %s", intrstr);
1240 aprint_error("\n");
1241 return;
1242 }
1243 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1244
1245 /*
1246 * Check the function ID (unit number of the chip).
1247 */
1248 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1249 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1250 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1251 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER))
1252 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1253 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1254 else
1255 sc->sc_funcid = 0;
1256
1257 /*
1258 * Determine a few things about the bus we're connected to.
1259 */
1260 if (sc->sc_type < WM_T_82543) {
1261 /* We don't really know the bus characteristics here. */
1262 sc->sc_bus_speed = 33;
1263 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1264 /*
1265 * CSA (Communication Streaming Architecture) is about as fast
1266 * a 32-bit 66MHz PCI Bus.
1267 */
1268 sc->sc_flags |= WM_F_CSA;
1269 sc->sc_bus_speed = 66;
1270 aprint_verbose_dev(sc->sc_dev,
1271 "Communication Streaming Architecture\n");
1272 if (sc->sc_type == WM_T_82547) {
1273 callout_init(&sc->sc_txfifo_ch, 0);
1274 callout_setfunc(&sc->sc_txfifo_ch,
1275 wm_82547_txfifo_stall, sc);
1276 aprint_verbose_dev(sc->sc_dev,
1277 "using 82547 Tx FIFO stall work-around\n");
1278 }
1279 } else if (sc->sc_type >= WM_T_82571) {
1280 sc->sc_flags |= WM_F_PCIE;
1281 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1282 && (sc->sc_type != WM_T_ICH10)
1283 && (sc->sc_type != WM_T_PCH)) {
1284 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1285 /* ICH* and PCH have no PCIe capability registers */
1286 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1287 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1288 NULL) == 0)
1289 aprint_error_dev(sc->sc_dev,
1290 "unable to find PCIe capability\n");
1291 }
1292 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1293 } else {
1294 reg = CSR_READ(sc, WMREG_STATUS);
1295 if (reg & STATUS_BUS64)
1296 sc->sc_flags |= WM_F_BUS64;
1297 if ((reg & STATUS_PCIX_MODE) != 0) {
1298 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1299
1300 sc->sc_flags |= WM_F_PCIX;
1301 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1302 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1303 aprint_error_dev(sc->sc_dev,
1304 "unable to find PCIX capability\n");
1305 else if (sc->sc_type != WM_T_82545_3 &&
1306 sc->sc_type != WM_T_82546_3) {
1307 /*
1308 * Work around a problem caused by the BIOS
1309 * setting the max memory read byte count
1310 * incorrectly.
1311 */
1312 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1313 sc->sc_pcixe_capoff + PCI_PCIX_CMD);
1314 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1315 sc->sc_pcixe_capoff + PCI_PCIX_STATUS);
1316
1317 bytecnt =
1318 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1319 PCI_PCIX_CMD_BYTECNT_SHIFT;
1320 maxb =
1321 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1322 PCI_PCIX_STATUS_MAXB_SHIFT;
1323 if (bytecnt > maxb) {
1324 aprint_verbose_dev(sc->sc_dev,
1325 "resetting PCI-X MMRBC: %d -> %d\n",
1326 512 << bytecnt, 512 << maxb);
1327 pcix_cmd = (pcix_cmd &
1328 ~PCI_PCIX_CMD_BYTECNT_MASK) |
1329 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1330 pci_conf_write(pa->pa_pc, pa->pa_tag,
1331 sc->sc_pcixe_capoff + PCI_PCIX_CMD,
1332 pcix_cmd);
1333 }
1334 }
1335 }
1336 /*
1337 * The quad port adapter is special; it has a PCIX-PCIX
1338 * bridge on the board, and can run the secondary bus at
1339 * a higher speed.
1340 */
1341 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1342 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1343 : 66;
1344 } else if (sc->sc_flags & WM_F_PCIX) {
1345 switch (reg & STATUS_PCIXSPD_MASK) {
1346 case STATUS_PCIXSPD_50_66:
1347 sc->sc_bus_speed = 66;
1348 break;
1349 case STATUS_PCIXSPD_66_100:
1350 sc->sc_bus_speed = 100;
1351 break;
1352 case STATUS_PCIXSPD_100_133:
1353 sc->sc_bus_speed = 133;
1354 break;
1355 default:
1356 aprint_error_dev(sc->sc_dev,
1357 "unknown PCIXSPD %d; assuming 66MHz\n",
1358 reg & STATUS_PCIXSPD_MASK);
1359 sc->sc_bus_speed = 66;
1360 break;
1361 }
1362 } else
1363 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1364 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1365 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1366 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1367 }
1368
1369 /*
1370 * Allocate the control data structures, and create and load the
1371 * DMA map for it.
1372 *
1373 * NOTE: All Tx descriptors must be in the same 4G segment of
1374 * memory. So must Rx descriptors. We simplify by allocating
1375 * both sets within the same 4G segment.
1376 */
1377 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1378 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1379 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1380 sizeof(struct wm_control_data_82542) :
1381 sizeof(struct wm_control_data_82544);
1382 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1383 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1384 &sc->sc_cd_rseg, 0)) != 0) {
1385 aprint_error_dev(sc->sc_dev,
1386 "unable to allocate control data, error = %d\n",
1387 error);
1388 goto fail_0;
1389 }
1390
1391 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1392 sc->sc_cd_rseg, sc->sc_cd_size,
1393 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1394 aprint_error_dev(sc->sc_dev,
1395 "unable to map control data, error = %d\n", error);
1396 goto fail_1;
1397 }
1398
1399 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1400 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1401 aprint_error_dev(sc->sc_dev,
1402 "unable to create control data DMA map, error = %d\n",
1403 error);
1404 goto fail_2;
1405 }
1406
1407 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1408 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1409 aprint_error_dev(sc->sc_dev,
1410 "unable to load control data DMA map, error = %d\n",
1411 error);
1412 goto fail_3;
1413 }
1414
1415 /*
1416 * Create the transmit buffer DMA maps.
1417 */
1418 WM_TXQUEUELEN(sc) =
1419 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1420 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1421 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1422 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1423 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1424 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1425 aprint_error_dev(sc->sc_dev,
1426 "unable to create Tx DMA map %d, error = %d\n",
1427 i, error);
1428 goto fail_4;
1429 }
1430 }
1431
1432 /*
1433 * Create the receive buffer DMA maps.
1434 */
1435 for (i = 0; i < WM_NRXDESC; i++) {
1436 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1437 MCLBYTES, 0, 0,
1438 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1439 aprint_error_dev(sc->sc_dev,
1440 "unable to create Rx DMA map %d error = %d\n",
1441 i, error);
1442 goto fail_5;
1443 }
1444 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1445 }
1446
1447 /* clear interesting stat counters */
1448 CSR_READ(sc, WMREG_COLC);
1449 CSR_READ(sc, WMREG_RXERRC);
1450
1451 /*
1452 * Reset the chip to a known state.
1453 */
1454 wm_reset(sc);
1455
1456 switch (sc->sc_type) {
1457 case WM_T_82571:
1458 case WM_T_82572:
1459 case WM_T_82573:
1460 case WM_T_82574:
1461 case WM_T_82583:
1462 case WM_T_80003:
1463 case WM_T_ICH8:
1464 case WM_T_ICH9:
1465 case WM_T_ICH10:
1466 case WM_T_PCH:
1467 if (wm_check_mng_mode(sc) != 0)
1468 wm_get_hw_control(sc);
1469 break;
1470 default:
1471 break;
1472 }
1473
1474 /*
1475 * Get some information about the EEPROM.
1476 */
1477 switch (sc->sc_type) {
1478 case WM_T_82542_2_0:
1479 case WM_T_82542_2_1:
1480 case WM_T_82543:
1481 case WM_T_82544:
1482 /* Microwire */
1483 sc->sc_ee_addrbits = 6;
1484 break;
1485 case WM_T_82540:
1486 case WM_T_82545:
1487 case WM_T_82545_3:
1488 case WM_T_82546:
1489 case WM_T_82546_3:
1490 /* Microwire */
1491 reg = CSR_READ(sc, WMREG_EECD);
1492 if (reg & EECD_EE_SIZE)
1493 sc->sc_ee_addrbits = 8;
1494 else
1495 sc->sc_ee_addrbits = 6;
1496 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1497 break;
1498 case WM_T_82541:
1499 case WM_T_82541_2:
1500 case WM_T_82547:
1501 case WM_T_82547_2:
1502 reg = CSR_READ(sc, WMREG_EECD);
1503 if (reg & EECD_EE_TYPE) {
1504 /* SPI */
1505 wm_set_spiaddrbits(sc);
1506 } else
1507 /* Microwire */
1508 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1509 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1510 break;
1511 case WM_T_82571:
1512 case WM_T_82572:
1513 /* SPI */
1514 wm_set_spiaddrbits(sc);
1515 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1516 break;
1517 case WM_T_82573:
1518 case WM_T_82574:
1519 case WM_T_82583:
1520 if (wm_is_onboard_nvm_eeprom(sc) == 0)
1521 sc->sc_flags |= WM_F_EEPROM_FLASH;
1522 else {
1523 /* SPI */
1524 wm_set_spiaddrbits(sc);
1525 }
1526 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1527 break;
1528 case WM_T_82575:
1529 case WM_T_82576:
1530 case WM_T_82580:
1531 case WM_T_82580ER:
1532 case WM_T_80003:
1533 /* SPI */
1534 wm_set_spiaddrbits(sc);
1535 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1536 break;
1537 case WM_T_ICH8:
1538 case WM_T_ICH9:
1539 case WM_T_ICH10:
1540 case WM_T_PCH:
1541 /* FLASH */
1542 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1543 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1544 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1545 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1546 aprint_error_dev(sc->sc_dev,
1547 "can't map FLASH registers\n");
1548 return;
1549 }
1550 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1551 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1552 ICH_FLASH_SECTOR_SIZE;
1553 sc->sc_ich8_flash_bank_size =
1554 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1555 sc->sc_ich8_flash_bank_size -=
1556 (reg & ICH_GFPREG_BASE_MASK);
1557 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1558 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1559 break;
1560 default:
1561 break;
1562 }
1563
1564 /*
1565 * Defer printing the EEPROM type until after verifying the checksum
1566 * This allows the EEPROM type to be printed correctly in the case
1567 * that no EEPROM is attached.
1568 */
1569 /*
1570 * Validate the EEPROM checksum. If the checksum fails, flag
1571 * this for later, so we can fail future reads from the EEPROM.
1572 */
1573 if (wm_validate_eeprom_checksum(sc)) {
1574 /*
1575 * Read twice again because some PCI-e parts fail the
1576 * first check due to the link being in sleep state.
1577 */
1578 if (wm_validate_eeprom_checksum(sc))
1579 sc->sc_flags |= WM_F_EEPROM_INVALID;
1580 }
1581
1582 /* Set device properties (macflags) */
1583 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1584
1585 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1586 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1587 else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1588 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1589 } else {
1590 if (sc->sc_flags & WM_F_EEPROM_SPI)
1591 eetype = "SPI";
1592 else
1593 eetype = "MicroWire";
1594 aprint_verbose_dev(sc->sc_dev,
1595 "%u word (%d address bits) %s EEPROM\n",
1596 1U << sc->sc_ee_addrbits,
1597 sc->sc_ee_addrbits, eetype);
1598 }
1599
1600 /*
1601 * Read the Ethernet address from the EEPROM, if not first found
1602 * in device properties.
1603 */
1604 ea = prop_dictionary_get(dict, "mac-address");
1605 if (ea != NULL) {
1606 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1607 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1608 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1609 } else {
1610 if (wm_read_mac_addr(sc, enaddr) != 0) {
1611 aprint_error_dev(sc->sc_dev,
1612 "unable to read Ethernet address\n");
1613 return;
1614 }
1615 }
1616
1617 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1618 ether_sprintf(enaddr));
1619
1620 /*
1621 * Read the config info from the EEPROM, and set up various
1622 * bits in the control registers based on their contents.
1623 */
1624 pn = prop_dictionary_get(dict, "i82543-cfg1");
1625 if (pn != NULL) {
1626 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1627 cfg1 = (uint16_t) prop_number_integer_value(pn);
1628 } else {
1629 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1630 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1631 return;
1632 }
1633 }
1634
1635 pn = prop_dictionary_get(dict, "i82543-cfg2");
1636 if (pn != NULL) {
1637 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1638 cfg2 = (uint16_t) prop_number_integer_value(pn);
1639 } else {
1640 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1641 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1642 return;
1643 }
1644 }
1645
1646 /* check for WM_F_WOL */
1647 switch (sc->sc_type) {
1648 case WM_T_82542_2_0:
1649 case WM_T_82542_2_1:
1650 case WM_T_82543:
1651 /* dummy? */
1652 eeprom_data = 0;
1653 apme_mask = EEPROM_CFG3_APME;
1654 break;
1655 case WM_T_82544:
1656 apme_mask = EEPROM_CFG2_82544_APM_EN;
1657 eeprom_data = cfg2;
1658 break;
1659 case WM_T_82546:
1660 case WM_T_82546_3:
1661 case WM_T_82571:
1662 case WM_T_82572:
1663 case WM_T_82573:
1664 case WM_T_82574:
1665 case WM_T_82583:
1666 case WM_T_80003:
1667 default:
1668 apme_mask = EEPROM_CFG3_APME;
1669 wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1670 : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1671 break;
1672 case WM_T_82575:
1673 case WM_T_82576:
1674 case WM_T_82580:
1675 case WM_T_82580ER:
1676 case WM_T_ICH8:
1677 case WM_T_ICH9:
1678 case WM_T_ICH10:
1679 case WM_T_PCH:
1680 apme_mask = WUC_APME;
1681 eeprom_data = CSR_READ(sc, WMREG_WUC);
1682 break;
1683 }
1684
1685 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1686 if ((eeprom_data & apme_mask) != 0)
1687 sc->sc_flags |= WM_F_WOL;
1688 #ifdef WM_DEBUG
1689 if ((sc->sc_flags & WM_F_WOL) != 0)
1690 printf("WOL\n");
1691 #endif
1692
1693 /*
1694 * XXX need special handling for some multiple port cards
1695 * to disable a paticular port.
1696 */
1697
1698 if (sc->sc_type >= WM_T_82544) {
1699 pn = prop_dictionary_get(dict, "i82543-swdpin");
1700 if (pn != NULL) {
1701 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1702 swdpin = (uint16_t) prop_number_integer_value(pn);
1703 } else {
1704 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1705 aprint_error_dev(sc->sc_dev,
1706 "unable to read SWDPIN\n");
1707 return;
1708 }
1709 }
1710 }
1711
1712 if (cfg1 & EEPROM_CFG1_ILOS)
1713 sc->sc_ctrl |= CTRL_ILOS;
1714 if (sc->sc_type >= WM_T_82544) {
1715 sc->sc_ctrl |=
1716 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1717 CTRL_SWDPIO_SHIFT;
1718 sc->sc_ctrl |=
1719 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1720 CTRL_SWDPINS_SHIFT;
1721 } else {
1722 sc->sc_ctrl |=
1723 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1724 CTRL_SWDPIO_SHIFT;
1725 }
1726
1727 #if 0
1728 if (sc->sc_type >= WM_T_82544) {
1729 if (cfg1 & EEPROM_CFG1_IPS0)
1730 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1731 if (cfg1 & EEPROM_CFG1_IPS1)
1732 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1733 sc->sc_ctrl_ext |=
1734 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1735 CTRL_EXT_SWDPIO_SHIFT;
1736 sc->sc_ctrl_ext |=
1737 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1738 CTRL_EXT_SWDPINS_SHIFT;
1739 } else {
1740 sc->sc_ctrl_ext |=
1741 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1742 CTRL_EXT_SWDPIO_SHIFT;
1743 }
1744 #endif
1745
1746 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1747 #if 0
1748 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1749 #endif
1750
1751 /*
1752 * Set up some register offsets that are different between
1753 * the i82542 and the i82543 and later chips.
1754 */
1755 if (sc->sc_type < WM_T_82543) {
1756 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1757 sc->sc_tdt_reg = WMREG_OLD_TDT;
1758 } else {
1759 sc->sc_rdt_reg = WMREG_RDT;
1760 sc->sc_tdt_reg = WMREG_TDT;
1761 }
1762
1763 if (sc->sc_type == WM_T_PCH) {
1764 uint16_t val;
1765
1766 /* Save the NVM K1 bit setting */
1767 wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1768
1769 if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1770 sc->sc_nvm_k1_enabled = 1;
1771 else
1772 sc->sc_nvm_k1_enabled = 0;
1773 }
1774
1775 /*
1776 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1777 * media structures accordingly.
1778 */
1779 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1780 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1781 || sc->sc_type == WM_T_82573
1782 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1783 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1784 wm_gmii_mediainit(sc, wmp->wmp_product);
1785 } else if (sc->sc_type < WM_T_82543 ||
1786 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1787 if (wmp->wmp_flags & WMP_F_1000T)
1788 aprint_error_dev(sc->sc_dev,
1789 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1790 wm_tbi_mediainit(sc);
1791 } else {
1792 switch (sc->sc_type) {
1793 case WM_T_82575:
1794 case WM_T_82576:
1795 case WM_T_82580:
1796 case WM_T_82580ER:
1797 reg = CSR_READ(sc, WMREG_CTRL_EXT);
1798 switch (reg & CTRL_EXT_LINK_MODE_MASK) {
1799 case CTRL_EXT_LINK_MODE_SGMII:
1800 aprint_verbose_dev(sc->sc_dev, "SGMII\n");
1801 sc->sc_flags |= WM_F_SGMII;
1802 CSR_WRITE(sc, WMREG_CTRL_EXT,
1803 reg | CTRL_EXT_I2C_ENA);
1804 wm_gmii_mediainit(sc, wmp->wmp_product);
1805 break;
1806 case CTRL_EXT_LINK_MODE_1000KX:
1807 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
1808 aprint_verbose_dev(sc->sc_dev, "1000KX or SERDES\n");
1809 CSR_WRITE(sc, WMREG_CTRL_EXT,
1810 reg | CTRL_EXT_I2C_ENA);
1811 panic("not supported yet\n");
1812 break;
1813 case CTRL_EXT_LINK_MODE_GMII:
1814 default:
1815 CSR_WRITE(sc, WMREG_CTRL_EXT,
1816 reg & ~CTRL_EXT_I2C_ENA);
1817 wm_gmii_mediainit(sc, wmp->wmp_product);
1818 break;
1819 }
1820 break;
1821 default:
1822 if (wmp->wmp_flags & WMP_F_1000X)
1823 aprint_error_dev(sc->sc_dev,
1824 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1825 wm_gmii_mediainit(sc, wmp->wmp_product);
1826 }
1827 }
1828
1829 ifp = &sc->sc_ethercom.ec_if;
1830 xname = device_xname(sc->sc_dev);
1831 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1832 ifp->if_softc = sc;
1833 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1834 ifp->if_ioctl = wm_ioctl;
1835 ifp->if_start = wm_start;
1836 ifp->if_watchdog = wm_watchdog;
1837 ifp->if_init = wm_init;
1838 ifp->if_stop = wm_stop;
1839 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1840 IFQ_SET_READY(&ifp->if_snd);
1841
1842 /* Check for jumbo frame */
1843 switch (sc->sc_type) {
1844 case WM_T_82573:
1845 /* XXX limited to 9234 if ASPM is disabled */
1846 wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1847 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1848 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1849 break;
1850 case WM_T_82571:
1851 case WM_T_82572:
1852 case WM_T_82574:
1853 case WM_T_82575:
1854 case WM_T_82576:
1855 case WM_T_82580:
1856 case WM_T_82580ER:
1857 case WM_T_80003:
1858 case WM_T_ICH9:
1859 case WM_T_ICH10:
1860 /* XXX limited to 9234 */
1861 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1862 break;
1863 case WM_T_PCH:
1864 /* XXX limited to 4096 */
1865 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1866 break;
1867 case WM_T_82542_2_0:
1868 case WM_T_82542_2_1:
1869 case WM_T_82583:
1870 case WM_T_ICH8:
1871 /* No support for jumbo frame */
1872 break;
1873 default:
1874 /* ETHER_MAX_LEN_JUMBO */
1875 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1876 break;
1877 }
1878
1879 /*
1880 * If we're a i82543 or greater, we can support VLANs.
1881 */
1882 if (sc->sc_type >= WM_T_82543)
1883 sc->sc_ethercom.ec_capabilities |=
1884 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1885
1886 /*
1887 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1888 * on i82543 and later.
1889 */
1890 if (sc->sc_type >= WM_T_82543) {
1891 ifp->if_capabilities |=
1892 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1893 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1894 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1895 IFCAP_CSUM_TCPv6_Tx |
1896 IFCAP_CSUM_UDPv6_Tx;
1897 }
1898
1899 /*
1900 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1901 *
1902 * 82541GI (8086:1076) ... no
1903 * 82572EI (8086:10b9) ... yes
1904 */
1905 if (sc->sc_type >= WM_T_82571) {
1906 ifp->if_capabilities |=
1907 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1908 }
1909
1910 /*
1911 * If we're a i82544 or greater (except i82547), we can do
1912 * TCP segmentation offload.
1913 */
1914 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1915 ifp->if_capabilities |= IFCAP_TSOv4;
1916 }
1917
1918 if (sc->sc_type >= WM_T_82571) {
1919 ifp->if_capabilities |= IFCAP_TSOv6;
1920 }
1921
1922 /*
1923 * Attach the interface.
1924 */
1925 if_attach(ifp);
1926 ether_ifattach(ifp, enaddr);
1927 #if NRND > 0
1928 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1929 #endif
1930
1931 #ifdef WM_EVENT_COUNTERS
1932 /* Attach event counters. */
1933 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1934 NULL, xname, "txsstall");
1935 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1936 NULL, xname, "txdstall");
1937 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1938 NULL, xname, "txfifo_stall");
1939 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1940 NULL, xname, "txdw");
1941 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1942 NULL, xname, "txqe");
1943 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1944 NULL, xname, "rxintr");
1945 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1946 NULL, xname, "linkintr");
1947
1948 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1949 NULL, xname, "rxipsum");
1950 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1951 NULL, xname, "rxtusum");
1952 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1953 NULL, xname, "txipsum");
1954 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1955 NULL, xname, "txtusum");
1956 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1957 NULL, xname, "txtusum6");
1958
1959 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1960 NULL, xname, "txtso");
1961 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1962 NULL, xname, "txtso6");
1963 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1964 NULL, xname, "txtsopain");
1965
1966 for (i = 0; i < WM_NTXSEGS; i++) {
1967 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1968 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1969 NULL, xname, wm_txseg_evcnt_names[i]);
1970 }
1971
1972 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1973 NULL, xname, "txdrop");
1974
1975 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1976 NULL, xname, "tu");
1977
1978 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1979 NULL, xname, "tx_xoff");
1980 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1981 NULL, xname, "tx_xon");
1982 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1983 NULL, xname, "rx_xoff");
1984 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1985 NULL, xname, "rx_xon");
1986 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1987 NULL, xname, "rx_macctl");
1988 #endif /* WM_EVENT_COUNTERS */
1989
1990 if (pmf_device_register(self, wm_suspend, wm_resume))
1991 pmf_class_network_register(self, ifp);
1992 else
1993 aprint_error_dev(self, "couldn't establish power handler\n");
1994
1995 return;
1996
1997 /*
1998 * Free any resources we've allocated during the failed attach
1999 * attempt. Do this in reverse order and fall through.
2000 */
2001 fail_5:
2002 for (i = 0; i < WM_NRXDESC; i++) {
2003 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2004 bus_dmamap_destroy(sc->sc_dmat,
2005 sc->sc_rxsoft[i].rxs_dmamap);
2006 }
2007 fail_4:
2008 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2009 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2010 bus_dmamap_destroy(sc->sc_dmat,
2011 sc->sc_txsoft[i].txs_dmamap);
2012 }
2013 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2014 fail_3:
2015 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2016 fail_2:
2017 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2018 sc->sc_cd_size);
2019 fail_1:
2020 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2021 fail_0:
2022 return;
2023 }
2024
2025 static int
2026 wm_detach(device_t self, int flags __unused)
2027 {
2028 struct wm_softc *sc = device_private(self);
2029 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2030 int i, s;
2031
2032 s = splnet();
2033 /* Stop the interface. Callouts are stopped in it. */
2034 wm_stop(ifp, 1);
2035 splx(s);
2036
2037 pmf_device_deregister(self);
2038
2039 /* Tell the firmware about the release */
2040 wm_release_manageability(sc);
2041
2042 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2043
2044 /* Delete all remaining media. */
2045 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2046
2047 ether_ifdetach(ifp);
2048 if_detach(ifp);
2049
2050
2051 /* Unload RX dmamaps and free mbufs */
2052 wm_rxdrain(sc);
2053
2054 /* Free dmamap. It's the same as the end of the wm_attach() function */
2055 for (i = 0; i < WM_NRXDESC; i++) {
2056 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2057 bus_dmamap_destroy(sc->sc_dmat,
2058 sc->sc_rxsoft[i].rxs_dmamap);
2059 }
2060 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2061 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2062 bus_dmamap_destroy(sc->sc_dmat,
2063 sc->sc_txsoft[i].txs_dmamap);
2064 }
2065 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2066 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2067 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2068 sc->sc_cd_size);
2069 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2070
2071 /* Disestablish the interrupt handler */
2072 if (sc->sc_ih != NULL) {
2073 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2074 sc->sc_ih = NULL;
2075 }
2076
2077 /* Unmap the register */
2078 if (sc->sc_ss) {
2079 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2080 sc->sc_ss = 0;
2081 }
2082
2083 wm_release_hw_control(sc);
2084
2085 return 0;
2086 }
2087
2088 /*
2089 * wm_tx_offload:
2090 *
2091 * Set up TCP/IP checksumming parameters for the
2092 * specified packet.
2093 */
2094 static int
2095 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
2096 uint8_t *fieldsp)
2097 {
2098 struct mbuf *m0 = txs->txs_mbuf;
2099 struct livengood_tcpip_ctxdesc *t;
2100 uint32_t ipcs, tucs, cmd, cmdlen, seg;
2101 uint32_t ipcse;
2102 struct ether_header *eh;
2103 int offset, iphl;
2104 uint8_t fields;
2105
2106 /*
2107 * XXX It would be nice if the mbuf pkthdr had offset
2108 * fields for the protocol headers.
2109 */
2110
2111 eh = mtod(m0, struct ether_header *);
2112 switch (htons(eh->ether_type)) {
2113 case ETHERTYPE_IP:
2114 case ETHERTYPE_IPV6:
2115 offset = ETHER_HDR_LEN;
2116 break;
2117
2118 case ETHERTYPE_VLAN:
2119 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2120 break;
2121
2122 default:
2123 /*
2124 * Don't support this protocol or encapsulation.
2125 */
2126 *fieldsp = 0;
2127 *cmdp = 0;
2128 return 0;
2129 }
2130
2131 if ((m0->m_pkthdr.csum_flags &
2132 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
2133 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2134 } else {
2135 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2136 }
2137 ipcse = offset + iphl - 1;
2138
2139 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
2140 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
2141 seg = 0;
2142 fields = 0;
2143
2144 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2145 int hlen = offset + iphl;
2146 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2147
2148 if (__predict_false(m0->m_len <
2149 (hlen + sizeof(struct tcphdr)))) {
2150 /*
2151 * TCP/IP headers are not in the first mbuf; we need
2152 * to do this the slow and painful way. Let's just
2153 * hope this doesn't happen very often.
2154 */
2155 struct tcphdr th;
2156
2157 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2158
2159 m_copydata(m0, hlen, sizeof(th), &th);
2160 if (v4) {
2161 struct ip ip;
2162
2163 m_copydata(m0, offset, sizeof(ip), &ip);
2164 ip.ip_len = 0;
2165 m_copyback(m0,
2166 offset + offsetof(struct ip, ip_len),
2167 sizeof(ip.ip_len), &ip.ip_len);
2168 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2169 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2170 } else {
2171 struct ip6_hdr ip6;
2172
2173 m_copydata(m0, offset, sizeof(ip6), &ip6);
2174 ip6.ip6_plen = 0;
2175 m_copyback(m0,
2176 offset + offsetof(struct ip6_hdr, ip6_plen),
2177 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2178 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2179 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2180 }
2181 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2182 sizeof(th.th_sum), &th.th_sum);
2183
2184 hlen += th.th_off << 2;
2185 } else {
2186 /*
2187 * TCP/IP headers are in the first mbuf; we can do
2188 * this the easy way.
2189 */
2190 struct tcphdr *th;
2191
2192 if (v4) {
2193 struct ip *ip =
2194 (void *)(mtod(m0, char *) + offset);
2195 th = (void *)(mtod(m0, char *) + hlen);
2196
2197 ip->ip_len = 0;
2198 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2199 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2200 } else {
2201 struct ip6_hdr *ip6 =
2202 (void *)(mtod(m0, char *) + offset);
2203 th = (void *)(mtod(m0, char *) + hlen);
2204
2205 ip6->ip6_plen = 0;
2206 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2207 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2208 }
2209 hlen += th->th_off << 2;
2210 }
2211
2212 if (v4) {
2213 WM_EVCNT_INCR(&sc->sc_ev_txtso);
2214 cmdlen |= WTX_TCPIP_CMD_IP;
2215 } else {
2216 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2217 ipcse = 0;
2218 }
2219 cmd |= WTX_TCPIP_CMD_TSE;
2220 cmdlen |= WTX_TCPIP_CMD_TSE |
2221 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
2222 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
2223 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
2224 }
2225
2226 /*
2227 * NOTE: Even if we're not using the IP or TCP/UDP checksum
2228 * offload feature, if we load the context descriptor, we
2229 * MUST provide valid values for IPCSS and TUCSS fields.
2230 */
2231
2232 ipcs = WTX_TCPIP_IPCSS(offset) |
2233 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
2234 WTX_TCPIP_IPCSE(ipcse);
2235 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
2236 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
2237 fields |= WTX_IXSM;
2238 }
2239
2240 offset += iphl;
2241
2242 if (m0->m_pkthdr.csum_flags &
2243 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
2244 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2245 fields |= WTX_TXSM;
2246 tucs = WTX_TCPIP_TUCSS(offset) |
2247 WTX_TCPIP_TUCSO(offset +
2248 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
2249 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2250 } else if ((m0->m_pkthdr.csum_flags &
2251 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
2252 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2253 fields |= WTX_TXSM;
2254 tucs = WTX_TCPIP_TUCSS(offset) |
2255 WTX_TCPIP_TUCSO(offset +
2256 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
2257 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2258 } else {
2259 /* Just initialize it to a valid TCP context. */
2260 tucs = WTX_TCPIP_TUCSS(offset) |
2261 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
2262 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2263 }
2264
2265 /* Fill in the context descriptor. */
2266 t = (struct livengood_tcpip_ctxdesc *)
2267 &sc->sc_txdescs[sc->sc_txnext];
2268 t->tcpip_ipcs = htole32(ipcs);
2269 t->tcpip_tucs = htole32(tucs);
2270 t->tcpip_cmdlen = htole32(cmdlen);
2271 t->tcpip_seg = htole32(seg);
2272 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2273
2274 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2275 txs->txs_ndesc++;
2276
2277 *cmdp = cmd;
2278 *fieldsp = fields;
2279
2280 return 0;
2281 }
2282
2283 static void
2284 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2285 {
2286 struct mbuf *m;
2287 int i;
2288
2289 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2290 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2291 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2292 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2293 m->m_data, m->m_len, m->m_flags);
2294 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2295 i, i == 1 ? "" : "s");
2296 }
2297
2298 /*
2299 * wm_82547_txfifo_stall:
2300 *
2301 * Callout used to wait for the 82547 Tx FIFO to drain,
2302 * reset the FIFO pointers, and restart packet transmission.
2303 */
2304 static void
2305 wm_82547_txfifo_stall(void *arg)
2306 {
2307 struct wm_softc *sc = arg;
2308 int s;
2309
2310 s = splnet();
2311
2312 if (sc->sc_txfifo_stall) {
2313 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2314 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2315 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2316 /*
2317 * Packets have drained. Stop transmitter, reset
2318 * FIFO pointers, restart transmitter, and kick
2319 * the packet queue.
2320 */
2321 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2322 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2323 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2324 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2325 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2326 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2327 CSR_WRITE(sc, WMREG_TCTL, tctl);
2328 CSR_WRITE_FLUSH(sc);
2329
2330 sc->sc_txfifo_head = 0;
2331 sc->sc_txfifo_stall = 0;
2332 wm_start(&sc->sc_ethercom.ec_if);
2333 } else {
2334 /*
2335 * Still waiting for packets to drain; try again in
2336 * another tick.
2337 */
2338 callout_schedule(&sc->sc_txfifo_ch, 1);
2339 }
2340 }
2341
2342 splx(s);
2343 }
2344
2345 /*
2346 * wm_82547_txfifo_bugchk:
2347 *
2348 * Check for bug condition in the 82547 Tx FIFO. We need to
2349 * prevent enqueueing a packet that would wrap around the end
2350 * if the Tx FIFO ring buffer, otherwise the chip will croak.
2351 *
2352 * We do this by checking the amount of space before the end
2353 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
2354 * the Tx FIFO, wait for all remaining packets to drain, reset
2355 * the internal FIFO pointers to the beginning, and restart
2356 * transmission on the interface.
2357 */
2358 #define WM_FIFO_HDR 0x10
2359 #define WM_82547_PAD_LEN 0x3e0
2360 static int
2361 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2362 {
2363 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2364 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2365
2366 /* Just return if already stalled. */
2367 if (sc->sc_txfifo_stall)
2368 return 1;
2369
2370 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2371 /* Stall only occurs in half-duplex mode. */
2372 goto send_packet;
2373 }
2374
2375 if (len >= WM_82547_PAD_LEN + space) {
2376 sc->sc_txfifo_stall = 1;
2377 callout_schedule(&sc->sc_txfifo_ch, 1);
2378 return 1;
2379 }
2380
2381 send_packet:
2382 sc->sc_txfifo_head += len;
2383 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2384 sc->sc_txfifo_head -= sc->sc_txfifo_size;
2385
2386 return 0;
2387 }
2388
2389 /*
2390 * wm_start: [ifnet interface function]
2391 *
2392 * Start packet transmission on the interface.
2393 */
2394 static void
2395 wm_start(struct ifnet *ifp)
2396 {
2397 struct wm_softc *sc = ifp->if_softc;
2398 struct mbuf *m0;
2399 struct m_tag *mtag;
2400 struct wm_txsoft *txs;
2401 bus_dmamap_t dmamap;
2402 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2403 bus_addr_t curaddr;
2404 bus_size_t seglen, curlen;
2405 uint32_t cksumcmd;
2406 uint8_t cksumfields;
2407
2408 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2409 return;
2410
2411 /*
2412 * Remember the previous number of free descriptors.
2413 */
2414 ofree = sc->sc_txfree;
2415
2416 /*
2417 * Loop through the send queue, setting up transmit descriptors
2418 * until we drain the queue, or use up all available transmit
2419 * descriptors.
2420 */
2421 for (;;) {
2422 /* Grab a packet off the queue. */
2423 IFQ_POLL(&ifp->if_snd, m0);
2424 if (m0 == NULL)
2425 break;
2426
2427 DPRINTF(WM_DEBUG_TX,
2428 ("%s: TX: have packet to transmit: %p\n",
2429 device_xname(sc->sc_dev), m0));
2430
2431 /* Get a work queue entry. */
2432 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2433 wm_txintr(sc);
2434 if (sc->sc_txsfree == 0) {
2435 DPRINTF(WM_DEBUG_TX,
2436 ("%s: TX: no free job descriptors\n",
2437 device_xname(sc->sc_dev)));
2438 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2439 break;
2440 }
2441 }
2442
2443 txs = &sc->sc_txsoft[sc->sc_txsnext];
2444 dmamap = txs->txs_dmamap;
2445
2446 use_tso = (m0->m_pkthdr.csum_flags &
2447 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2448
2449 /*
2450 * So says the Linux driver:
2451 * The controller does a simple calculation to make sure
2452 * there is enough room in the FIFO before initiating the
2453 * DMA for each buffer. The calc is:
2454 * 4 = ceil(buffer len / MSS)
2455 * To make sure we don't overrun the FIFO, adjust the max
2456 * buffer len if the MSS drops.
2457 */
2458 dmamap->dm_maxsegsz =
2459 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2460 ? m0->m_pkthdr.segsz << 2
2461 : WTX_MAX_LEN;
2462
2463 /*
2464 * Load the DMA map. If this fails, the packet either
2465 * didn't fit in the allotted number of segments, or we
2466 * were short on resources. For the too-many-segments
2467 * case, we simply report an error and drop the packet,
2468 * since we can't sanely copy a jumbo packet to a single
2469 * buffer.
2470 */
2471 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2472 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2473 if (error) {
2474 if (error == EFBIG) {
2475 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2476 log(LOG_ERR, "%s: Tx packet consumes too many "
2477 "DMA segments, dropping...\n",
2478 device_xname(sc->sc_dev));
2479 IFQ_DEQUEUE(&ifp->if_snd, m0);
2480 wm_dump_mbuf_chain(sc, m0);
2481 m_freem(m0);
2482 continue;
2483 }
2484 /*
2485 * Short on resources, just stop for now.
2486 */
2487 DPRINTF(WM_DEBUG_TX,
2488 ("%s: TX: dmamap load failed: %d\n",
2489 device_xname(sc->sc_dev), error));
2490 break;
2491 }
2492
2493 segs_needed = dmamap->dm_nsegs;
2494 if (use_tso) {
2495 /* For sentinel descriptor; see below. */
2496 segs_needed++;
2497 }
2498
2499 /*
2500 * Ensure we have enough descriptors free to describe
2501 * the packet. Note, we always reserve one descriptor
2502 * at the end of the ring due to the semantics of the
2503 * TDT register, plus one more in the event we need
2504 * to load offload context.
2505 */
2506 if (segs_needed > sc->sc_txfree - 2) {
2507 /*
2508 * Not enough free descriptors to transmit this
2509 * packet. We haven't committed anything yet,
2510 * so just unload the DMA map, put the packet
2511 * pack on the queue, and punt. Notify the upper
2512 * layer that there are no more slots left.
2513 */
2514 DPRINTF(WM_DEBUG_TX,
2515 ("%s: TX: need %d (%d) descriptors, have %d\n",
2516 device_xname(sc->sc_dev), dmamap->dm_nsegs,
2517 segs_needed, sc->sc_txfree - 1));
2518 ifp->if_flags |= IFF_OACTIVE;
2519 bus_dmamap_unload(sc->sc_dmat, dmamap);
2520 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2521 break;
2522 }
2523
2524 /*
2525 * Check for 82547 Tx FIFO bug. We need to do this
2526 * once we know we can transmit the packet, since we
2527 * do some internal FIFO space accounting here.
2528 */
2529 if (sc->sc_type == WM_T_82547 &&
2530 wm_82547_txfifo_bugchk(sc, m0)) {
2531 DPRINTF(WM_DEBUG_TX,
2532 ("%s: TX: 82547 Tx FIFO bug detected\n",
2533 device_xname(sc->sc_dev)));
2534 ifp->if_flags |= IFF_OACTIVE;
2535 bus_dmamap_unload(sc->sc_dmat, dmamap);
2536 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2537 break;
2538 }
2539
2540 IFQ_DEQUEUE(&ifp->if_snd, m0);
2541
2542 /*
2543 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2544 */
2545
2546 DPRINTF(WM_DEBUG_TX,
2547 ("%s: TX: packet has %d (%d) DMA segments\n",
2548 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2549
2550 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2551
2552 /*
2553 * Store a pointer to the packet so that we can free it
2554 * later.
2555 *
2556 * Initially, we consider the number of descriptors the
2557 * packet uses the number of DMA segments. This may be
2558 * incremented by 1 if we do checksum offload (a descriptor
2559 * is used to set the checksum context).
2560 */
2561 txs->txs_mbuf = m0;
2562 txs->txs_firstdesc = sc->sc_txnext;
2563 txs->txs_ndesc = segs_needed;
2564
2565 /* Set up offload parameters for this packet. */
2566 if (m0->m_pkthdr.csum_flags &
2567 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2568 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2569 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2570 if (wm_tx_offload(sc, txs, &cksumcmd,
2571 &cksumfields) != 0) {
2572 /* Error message already displayed. */
2573 bus_dmamap_unload(sc->sc_dmat, dmamap);
2574 continue;
2575 }
2576 } else {
2577 cksumcmd = 0;
2578 cksumfields = 0;
2579 }
2580
2581 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2582
2583 /* Sync the DMA map. */
2584 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2585 BUS_DMASYNC_PREWRITE);
2586
2587 /*
2588 * Initialize the transmit descriptor.
2589 */
2590 for (nexttx = sc->sc_txnext, seg = 0;
2591 seg < dmamap->dm_nsegs; seg++) {
2592 for (seglen = dmamap->dm_segs[seg].ds_len,
2593 curaddr = dmamap->dm_segs[seg].ds_addr;
2594 seglen != 0;
2595 curaddr += curlen, seglen -= curlen,
2596 nexttx = WM_NEXTTX(sc, nexttx)) {
2597 curlen = seglen;
2598
2599 /*
2600 * So says the Linux driver:
2601 * Work around for premature descriptor
2602 * write-backs in TSO mode. Append a
2603 * 4-byte sentinel descriptor.
2604 */
2605 if (use_tso &&
2606 seg == dmamap->dm_nsegs - 1 &&
2607 curlen > 8)
2608 curlen -= 4;
2609
2610 wm_set_dma_addr(
2611 &sc->sc_txdescs[nexttx].wtx_addr,
2612 curaddr);
2613 sc->sc_txdescs[nexttx].wtx_cmdlen =
2614 htole32(cksumcmd | curlen);
2615 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2616 0;
2617 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2618 cksumfields;
2619 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2620 lasttx = nexttx;
2621
2622 DPRINTF(WM_DEBUG_TX,
2623 ("%s: TX: desc %d: low 0x%08lx, "
2624 "len 0x%04x\n",
2625 device_xname(sc->sc_dev), nexttx,
2626 curaddr & 0xffffffffUL, (unsigned)curlen));
2627 }
2628 }
2629
2630 KASSERT(lasttx != -1);
2631
2632 /*
2633 * Set up the command byte on the last descriptor of
2634 * the packet. If we're in the interrupt delay window,
2635 * delay the interrupt.
2636 */
2637 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2638 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2639
2640 /*
2641 * If VLANs are enabled and the packet has a VLAN tag, set
2642 * up the descriptor to encapsulate the packet for us.
2643 *
2644 * This is only valid on the last descriptor of the packet.
2645 */
2646 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2647 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2648 htole32(WTX_CMD_VLE);
2649 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2650 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2651 }
2652
2653 txs->txs_lastdesc = lasttx;
2654
2655 DPRINTF(WM_DEBUG_TX,
2656 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2657 device_xname(sc->sc_dev),
2658 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2659
2660 /* Sync the descriptors we're using. */
2661 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2662 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2663
2664 /* Give the packet to the chip. */
2665 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2666
2667 DPRINTF(WM_DEBUG_TX,
2668 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2669
2670 DPRINTF(WM_DEBUG_TX,
2671 ("%s: TX: finished transmitting packet, job %d\n",
2672 device_xname(sc->sc_dev), sc->sc_txsnext));
2673
2674 /* Advance the tx pointer. */
2675 sc->sc_txfree -= txs->txs_ndesc;
2676 sc->sc_txnext = nexttx;
2677
2678 sc->sc_txsfree--;
2679 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2680
2681 /* Pass the packet to any BPF listeners. */
2682 bpf_mtap(ifp, m0);
2683 }
2684
2685 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2686 /* No more slots; notify upper layer. */
2687 ifp->if_flags |= IFF_OACTIVE;
2688 }
2689
2690 if (sc->sc_txfree != ofree) {
2691 /* Set a watchdog timer in case the chip flakes out. */
2692 ifp->if_timer = 5;
2693 }
2694 }
2695
2696 /*
2697 * wm_watchdog: [ifnet interface function]
2698 *
2699 * Watchdog timer handler.
2700 */
2701 static void
2702 wm_watchdog(struct ifnet *ifp)
2703 {
2704 struct wm_softc *sc = ifp->if_softc;
2705
2706 /*
2707 * Since we're using delayed interrupts, sweep up
2708 * before we report an error.
2709 */
2710 wm_txintr(sc);
2711
2712 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2713 log(LOG_ERR,
2714 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2715 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2716 sc->sc_txnext);
2717 ifp->if_oerrors++;
2718
2719 /* Reset the interface. */
2720 (void) wm_init(ifp);
2721 }
2722
2723 /* Try to get more packets going. */
2724 wm_start(ifp);
2725 }
2726
2727 /*
2728 * wm_ioctl: [ifnet interface function]
2729 *
2730 * Handle control requests from the operator.
2731 */
2732 static int
2733 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2734 {
2735 struct wm_softc *sc = ifp->if_softc;
2736 struct ifreq *ifr = (struct ifreq *) data;
2737 struct ifaddr *ifa = (struct ifaddr *)data;
2738 struct sockaddr_dl *sdl;
2739 int diff, s, error;
2740
2741 s = splnet();
2742
2743 switch (cmd) {
2744 case SIOCSIFFLAGS:
2745 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
2746 break;
2747 if (ifp->if_flags & IFF_UP) {
2748 diff = (ifp->if_flags ^ sc->sc_if_flags)
2749 & (IFF_PROMISC | IFF_ALLMULTI);
2750 if ((diff & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2751 /*
2752 * If the difference bettween last flag and
2753 * new flag is only IFF_PROMISC or
2754 * IFF_ALLMULTI, set multicast filter only
2755 * (don't reset to prevent link down).
2756 */
2757 wm_set_filter(sc);
2758 } else {
2759 /*
2760 * Reset the interface to pick up changes in
2761 * any other flags that affect the hardware
2762 * state.
2763 */
2764 wm_init(ifp);
2765 }
2766 } else {
2767 if (ifp->if_flags & IFF_RUNNING)
2768 wm_stop(ifp, 1);
2769 }
2770 sc->sc_if_flags = ifp->if_flags;
2771 error = 0;
2772 break;
2773 case SIOCSIFMEDIA:
2774 case SIOCGIFMEDIA:
2775 /* Flow control requires full-duplex mode. */
2776 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2777 (ifr->ifr_media & IFM_FDX) == 0)
2778 ifr->ifr_media &= ~IFM_ETH_FMASK;
2779 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2780 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2781 /* We can do both TXPAUSE and RXPAUSE. */
2782 ifr->ifr_media |=
2783 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2784 }
2785 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2786 }
2787 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2788 break;
2789 case SIOCINITIFADDR:
2790 if (ifa->ifa_addr->sa_family == AF_LINK) {
2791 sdl = satosdl(ifp->if_dl->ifa_addr);
2792 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2793 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2794 /* unicast address is first multicast entry */
2795 wm_set_filter(sc);
2796 error = 0;
2797 break;
2798 }
2799 /* Fall through for rest */
2800 default:
2801 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2802 break;
2803
2804 error = 0;
2805
2806 if (cmd == SIOCSIFCAP)
2807 error = (*ifp->if_init)(ifp);
2808 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2809 ;
2810 else if (ifp->if_flags & IFF_RUNNING) {
2811 /*
2812 * Multicast list has changed; set the hardware filter
2813 * accordingly.
2814 */
2815 wm_set_filter(sc);
2816 }
2817 break;
2818 }
2819
2820 /* Try to get more packets going. */
2821 wm_start(ifp);
2822
2823 splx(s);
2824 return error;
2825 }
2826
2827 /*
2828 * wm_intr:
2829 *
2830 * Interrupt service routine.
2831 */
2832 static int
2833 wm_intr(void *arg)
2834 {
2835 struct wm_softc *sc = arg;
2836 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2837 uint32_t icr;
2838 int handled = 0;
2839
2840 while (1 /* CONSTCOND */) {
2841 icr = CSR_READ(sc, WMREG_ICR);
2842 if ((icr & sc->sc_icr) == 0)
2843 break;
2844 #if 0 /*NRND > 0*/
2845 if (RND_ENABLED(&sc->rnd_source))
2846 rnd_add_uint32(&sc->rnd_source, icr);
2847 #endif
2848
2849 handled = 1;
2850
2851 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2852 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2853 DPRINTF(WM_DEBUG_RX,
2854 ("%s: RX: got Rx intr 0x%08x\n",
2855 device_xname(sc->sc_dev),
2856 icr & (ICR_RXDMT0|ICR_RXT0)));
2857 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2858 }
2859 #endif
2860 wm_rxintr(sc);
2861
2862 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2863 if (icr & ICR_TXDW) {
2864 DPRINTF(WM_DEBUG_TX,
2865 ("%s: TX: got TXDW interrupt\n",
2866 device_xname(sc->sc_dev)));
2867 WM_EVCNT_INCR(&sc->sc_ev_txdw);
2868 }
2869 #endif
2870 wm_txintr(sc);
2871
2872 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2873 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2874 wm_linkintr(sc, icr);
2875 }
2876
2877 if (icr & ICR_RXO) {
2878 #if defined(WM_DEBUG)
2879 log(LOG_WARNING, "%s: Receive overrun\n",
2880 device_xname(sc->sc_dev));
2881 #endif /* defined(WM_DEBUG) */
2882 }
2883 }
2884
2885 if (handled) {
2886 /* Try to get more packets going. */
2887 wm_start(ifp);
2888 }
2889
2890 return handled;
2891 }
2892
2893 /*
2894 * wm_txintr:
2895 *
2896 * Helper; handle transmit interrupts.
2897 */
2898 static void
2899 wm_txintr(struct wm_softc *sc)
2900 {
2901 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2902 struct wm_txsoft *txs;
2903 uint8_t status;
2904 int i;
2905
2906 ifp->if_flags &= ~IFF_OACTIVE;
2907
2908 /*
2909 * Go through the Tx list and free mbufs for those
2910 * frames which have been transmitted.
2911 */
2912 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2913 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2914 txs = &sc->sc_txsoft[i];
2915
2916 DPRINTF(WM_DEBUG_TX,
2917 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
2918
2919 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2920 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2921
2922 status =
2923 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2924 if ((status & WTX_ST_DD) == 0) {
2925 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2926 BUS_DMASYNC_PREREAD);
2927 break;
2928 }
2929
2930 DPRINTF(WM_DEBUG_TX,
2931 ("%s: TX: job %d done: descs %d..%d\n",
2932 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
2933 txs->txs_lastdesc));
2934
2935 /*
2936 * XXX We should probably be using the statistics
2937 * XXX registers, but I don't know if they exist
2938 * XXX on chips before the i82544.
2939 */
2940
2941 #ifdef WM_EVENT_COUNTERS
2942 if (status & WTX_ST_TU)
2943 WM_EVCNT_INCR(&sc->sc_ev_tu);
2944 #endif /* WM_EVENT_COUNTERS */
2945
2946 if (status & (WTX_ST_EC|WTX_ST_LC)) {
2947 ifp->if_oerrors++;
2948 if (status & WTX_ST_LC)
2949 log(LOG_WARNING, "%s: late collision\n",
2950 device_xname(sc->sc_dev));
2951 else if (status & WTX_ST_EC) {
2952 ifp->if_collisions += 16;
2953 log(LOG_WARNING, "%s: excessive collisions\n",
2954 device_xname(sc->sc_dev));
2955 }
2956 } else
2957 ifp->if_opackets++;
2958
2959 sc->sc_txfree += txs->txs_ndesc;
2960 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2961 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2962 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2963 m_freem(txs->txs_mbuf);
2964 txs->txs_mbuf = NULL;
2965 }
2966
2967 /* Update the dirty transmit buffer pointer. */
2968 sc->sc_txsdirty = i;
2969 DPRINTF(WM_DEBUG_TX,
2970 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
2971
2972 /*
2973 * If there are no more pending transmissions, cancel the watchdog
2974 * timer.
2975 */
2976 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2977 ifp->if_timer = 0;
2978 }
2979
2980 /*
2981 * wm_rxintr:
2982 *
2983 * Helper; handle receive interrupts.
2984 */
2985 static void
2986 wm_rxintr(struct wm_softc *sc)
2987 {
2988 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2989 struct wm_rxsoft *rxs;
2990 struct mbuf *m;
2991 int i, len;
2992 uint8_t status, errors;
2993 uint16_t vlantag;
2994
2995 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
2996 rxs = &sc->sc_rxsoft[i];
2997
2998 DPRINTF(WM_DEBUG_RX,
2999 ("%s: RX: checking descriptor %d\n",
3000 device_xname(sc->sc_dev), i));
3001
3002 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3003
3004 status = sc->sc_rxdescs[i].wrx_status;
3005 errors = sc->sc_rxdescs[i].wrx_errors;
3006 len = le16toh(sc->sc_rxdescs[i].wrx_len);
3007 vlantag = sc->sc_rxdescs[i].wrx_special;
3008
3009 if ((status & WRX_ST_DD) == 0) {
3010 /*
3011 * We have processed all of the receive descriptors.
3012 */
3013 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
3014 break;
3015 }
3016
3017 if (__predict_false(sc->sc_rxdiscard)) {
3018 DPRINTF(WM_DEBUG_RX,
3019 ("%s: RX: discarding contents of descriptor %d\n",
3020 device_xname(sc->sc_dev), i));
3021 WM_INIT_RXDESC(sc, i);
3022 if (status & WRX_ST_EOP) {
3023 /* Reset our state. */
3024 DPRINTF(WM_DEBUG_RX,
3025 ("%s: RX: resetting rxdiscard -> 0\n",
3026 device_xname(sc->sc_dev)));
3027 sc->sc_rxdiscard = 0;
3028 }
3029 continue;
3030 }
3031
3032 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3033 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3034
3035 m = rxs->rxs_mbuf;
3036
3037 /*
3038 * Add a new receive buffer to the ring, unless of
3039 * course the length is zero. Treat the latter as a
3040 * failed mapping.
3041 */
3042 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
3043 /*
3044 * Failed, throw away what we've done so
3045 * far, and discard the rest of the packet.
3046 */
3047 ifp->if_ierrors++;
3048 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3049 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3050 WM_INIT_RXDESC(sc, i);
3051 if ((status & WRX_ST_EOP) == 0)
3052 sc->sc_rxdiscard = 1;
3053 if (sc->sc_rxhead != NULL)
3054 m_freem(sc->sc_rxhead);
3055 WM_RXCHAIN_RESET(sc);
3056 DPRINTF(WM_DEBUG_RX,
3057 ("%s: RX: Rx buffer allocation failed, "
3058 "dropping packet%s\n", device_xname(sc->sc_dev),
3059 sc->sc_rxdiscard ? " (discard)" : ""));
3060 continue;
3061 }
3062
3063 m->m_len = len;
3064 sc->sc_rxlen += len;
3065 DPRINTF(WM_DEBUG_RX,
3066 ("%s: RX: buffer at %p len %d\n",
3067 device_xname(sc->sc_dev), m->m_data, len));
3068
3069 /*
3070 * If this is not the end of the packet, keep
3071 * looking.
3072 */
3073 if ((status & WRX_ST_EOP) == 0) {
3074 WM_RXCHAIN_LINK(sc, m);
3075 DPRINTF(WM_DEBUG_RX,
3076 ("%s: RX: not yet EOP, rxlen -> %d\n",
3077 device_xname(sc->sc_dev), sc->sc_rxlen));
3078 continue;
3079 }
3080
3081 /*
3082 * Okay, we have the entire packet now. The chip is
3083 * configured to include the FCS (not all chips can
3084 * be configured to strip it), so we need to trim it.
3085 * May need to adjust length of previous mbuf in the
3086 * chain if the current mbuf is too short.
3087 */
3088 if (m->m_len < ETHER_CRC_LEN) {
3089 sc->sc_rxtail->m_len -= (ETHER_CRC_LEN - m->m_len);
3090 m->m_len = 0;
3091 } else {
3092 m->m_len -= ETHER_CRC_LEN;
3093 }
3094 len = sc->sc_rxlen - ETHER_CRC_LEN;
3095
3096 WM_RXCHAIN_LINK(sc, m);
3097
3098 *sc->sc_rxtailp = NULL;
3099 m = sc->sc_rxhead;
3100
3101 WM_RXCHAIN_RESET(sc);
3102
3103 DPRINTF(WM_DEBUG_RX,
3104 ("%s: RX: have entire packet, len -> %d\n",
3105 device_xname(sc->sc_dev), len));
3106
3107 /*
3108 * If an error occurred, update stats and drop the packet.
3109 */
3110 if (errors &
3111 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3112 if (errors & WRX_ER_SE)
3113 log(LOG_WARNING, "%s: symbol error\n",
3114 device_xname(sc->sc_dev));
3115 else if (errors & WRX_ER_SEQ)
3116 log(LOG_WARNING, "%s: receive sequence error\n",
3117 device_xname(sc->sc_dev));
3118 else if (errors & WRX_ER_CE)
3119 log(LOG_WARNING, "%s: CRC error\n",
3120 device_xname(sc->sc_dev));
3121 m_freem(m);
3122 continue;
3123 }
3124
3125 /*
3126 * No errors. Receive the packet.
3127 */
3128 m->m_pkthdr.rcvif = ifp;
3129 m->m_pkthdr.len = len;
3130
3131 /*
3132 * If VLANs are enabled, VLAN packets have been unwrapped
3133 * for us. Associate the tag with the packet.
3134 */
3135 if ((status & WRX_ST_VP) != 0) {
3136 VLAN_INPUT_TAG(ifp, m,
3137 le16toh(vlantag),
3138 continue);
3139 }
3140
3141 /*
3142 * Set up checksum info for this packet.
3143 */
3144 if ((status & WRX_ST_IXSM) == 0) {
3145 if (status & WRX_ST_IPCS) {
3146 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3147 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3148 if (errors & WRX_ER_IPE)
3149 m->m_pkthdr.csum_flags |=
3150 M_CSUM_IPv4_BAD;
3151 }
3152 if (status & WRX_ST_TCPCS) {
3153 /*
3154 * Note: we don't know if this was TCP or UDP,
3155 * so we just set both bits, and expect the
3156 * upper layers to deal.
3157 */
3158 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3159 m->m_pkthdr.csum_flags |=
3160 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3161 M_CSUM_TCPv6 | M_CSUM_UDPv6;
3162 if (errors & WRX_ER_TCPE)
3163 m->m_pkthdr.csum_flags |=
3164 M_CSUM_TCP_UDP_BAD;
3165 }
3166 }
3167
3168 ifp->if_ipackets++;
3169
3170 /* Pass this up to any BPF listeners. */
3171 bpf_mtap(ifp, m);
3172
3173 /* Pass it on. */
3174 (*ifp->if_input)(ifp, m);
3175 }
3176
3177 /* Update the receive pointer. */
3178 sc->sc_rxptr = i;
3179
3180 DPRINTF(WM_DEBUG_RX,
3181 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3182 }
3183
3184 /*
3185 * wm_linkintr_gmii:
3186 *
3187 * Helper; handle link interrupts for GMII.
3188 */
3189 static void
3190 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
3191 {
3192
3193 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3194 __func__));
3195
3196 if (icr & ICR_LSC) {
3197 DPRINTF(WM_DEBUG_LINK,
3198 ("%s: LINK: LSC -> mii_tick\n",
3199 device_xname(sc->sc_dev)));
3200 mii_tick(&sc->sc_mii);
3201 if (sc->sc_type == WM_T_82543) {
3202 int miistatus, active;
3203
3204 /*
3205 * With 82543, we need to force speed and
3206 * duplex on the MAC equal to what the PHY
3207 * speed and duplex configuration is.
3208 */
3209 miistatus = sc->sc_mii.mii_media_status;
3210
3211 if (miistatus & IFM_ACTIVE) {
3212 active = sc->sc_mii.mii_media_active;
3213 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
3214 switch (IFM_SUBTYPE(active)) {
3215 case IFM_10_T:
3216 sc->sc_ctrl |= CTRL_SPEED_10;
3217 break;
3218 case IFM_100_TX:
3219 sc->sc_ctrl |= CTRL_SPEED_100;
3220 break;
3221 case IFM_1000_T:
3222 sc->sc_ctrl |= CTRL_SPEED_1000;
3223 break;
3224 default:
3225 /*
3226 * fiber?
3227 * Shoud not enter here.
3228 */
3229 printf("unknown media (%x)\n",
3230 active);
3231 break;
3232 }
3233 if (active & IFM_FDX)
3234 sc->sc_ctrl |= CTRL_FD;
3235 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3236 }
3237 } else if ((sc->sc_type == WM_T_ICH8)
3238 && (sc->sc_phytype == WMPHY_IGP_3)) {
3239 wm_kmrn_lock_loss_workaround_ich8lan(sc);
3240 } else if (sc->sc_type == WM_T_PCH) {
3241 wm_k1_gig_workaround_hv(sc,
3242 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
3243 }
3244
3245 if ((sc->sc_phytype == WMPHY_82578)
3246 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
3247 == IFM_1000_T)) {
3248
3249 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
3250 delay(200*1000); /* XXX too big */
3251
3252 /* Link stall fix for link up */
3253 wm_gmii_hv_writereg(sc->sc_dev, 1,
3254 HV_MUX_DATA_CTRL,
3255 HV_MUX_DATA_CTRL_GEN_TO_MAC
3256 | HV_MUX_DATA_CTRL_FORCE_SPEED);
3257 wm_gmii_hv_writereg(sc->sc_dev, 1,
3258 HV_MUX_DATA_CTRL,
3259 HV_MUX_DATA_CTRL_GEN_TO_MAC);
3260 }
3261 }
3262 } else if (icr & ICR_RXSEQ) {
3263 DPRINTF(WM_DEBUG_LINK,
3264 ("%s: LINK Receive sequence error\n",
3265 device_xname(sc->sc_dev)));
3266 }
3267 }
3268
3269 /*
3270 * wm_linkintr_tbi:
3271 *
3272 * Helper; handle link interrupts for TBI mode.
3273 */
3274 static void
3275 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3276 {
3277 uint32_t status;
3278
3279 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3280 __func__));
3281
3282 status = CSR_READ(sc, WMREG_STATUS);
3283 if (icr & ICR_LSC) {
3284 if (status & STATUS_LU) {
3285 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3286 device_xname(sc->sc_dev),
3287 (status & STATUS_FD) ? "FDX" : "HDX"));
3288 /*
3289 * NOTE: CTRL will update TFCE and RFCE automatically,
3290 * so we should update sc->sc_ctrl
3291 */
3292
3293 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3294 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3295 sc->sc_fcrtl &= ~FCRTL_XONE;
3296 if (status & STATUS_FD)
3297 sc->sc_tctl |=
3298 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3299 else
3300 sc->sc_tctl |=
3301 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3302 if (sc->sc_ctrl & CTRL_TFCE)
3303 sc->sc_fcrtl |= FCRTL_XONE;
3304 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3305 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3306 WMREG_OLD_FCRTL : WMREG_FCRTL,
3307 sc->sc_fcrtl);
3308 sc->sc_tbi_linkup = 1;
3309 } else {
3310 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3311 device_xname(sc->sc_dev)));
3312 sc->sc_tbi_linkup = 0;
3313 }
3314 wm_tbi_set_linkled(sc);
3315 } else if (icr & ICR_RXCFG) {
3316 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3317 device_xname(sc->sc_dev)));
3318 sc->sc_tbi_nrxcfg++;
3319 wm_check_for_link(sc);
3320 } else if (icr & ICR_RXSEQ) {
3321 DPRINTF(WM_DEBUG_LINK,
3322 ("%s: LINK: Receive sequence error\n",
3323 device_xname(sc->sc_dev)));
3324 }
3325 }
3326
3327 /*
3328 * wm_linkintr:
3329 *
3330 * Helper; handle link interrupts.
3331 */
3332 static void
3333 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3334 {
3335
3336 if (sc->sc_flags & WM_F_HAS_MII)
3337 wm_linkintr_gmii(sc, icr);
3338 else
3339 wm_linkintr_tbi(sc, icr);
3340 }
3341
3342 /*
3343 * wm_tick:
3344 *
3345 * One second timer, used to check link status, sweep up
3346 * completed transmit jobs, etc.
3347 */
3348 static void
3349 wm_tick(void *arg)
3350 {
3351 struct wm_softc *sc = arg;
3352 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3353 int s;
3354
3355 s = splnet();
3356
3357 if (sc->sc_type >= WM_T_82542_2_1) {
3358 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3359 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3360 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3361 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3362 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3363 }
3364
3365 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3366 ifp->if_ierrors += 0ULL + /* ensure quad_t */
3367 + CSR_READ(sc, WMREG_CRCERRS)
3368 + CSR_READ(sc, WMREG_ALGNERRC)
3369 + CSR_READ(sc, WMREG_SYMERRC)
3370 + CSR_READ(sc, WMREG_RXERRC)
3371 + CSR_READ(sc, WMREG_SEC)
3372 + CSR_READ(sc, WMREG_CEXTERR)
3373 + CSR_READ(sc, WMREG_RLEC);
3374 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
3375
3376 if (sc->sc_flags & WM_F_HAS_MII)
3377 mii_tick(&sc->sc_mii);
3378 else
3379 wm_tbi_check_link(sc);
3380
3381 splx(s);
3382
3383 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3384 }
3385
3386 /*
3387 * wm_reset:
3388 *
3389 * Reset the i82542 chip.
3390 */
3391 static void
3392 wm_reset(struct wm_softc *sc)
3393 {
3394 int phy_reset = 0;
3395 uint32_t reg, mask;
3396 int i;
3397
3398 /*
3399 * Allocate on-chip memory according to the MTU size.
3400 * The Packet Buffer Allocation register must be written
3401 * before the chip is reset.
3402 */
3403 switch (sc->sc_type) {
3404 case WM_T_82547:
3405 case WM_T_82547_2:
3406 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3407 PBA_22K : PBA_30K;
3408 sc->sc_txfifo_head = 0;
3409 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3410 sc->sc_txfifo_size =
3411 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3412 sc->sc_txfifo_stall = 0;
3413 break;
3414 case WM_T_82571:
3415 case WM_T_82572:
3416 case WM_T_82575: /* XXX need special handing for jumbo frames */
3417 case WM_T_80003:
3418 sc->sc_pba = PBA_32K;
3419 break;
3420 case WM_T_82580:
3421 case WM_T_82580ER:
3422 sc->sc_pba = PBA_35K;
3423 break;
3424 case WM_T_82576:
3425 sc->sc_pba = PBA_64K;
3426 break;
3427 case WM_T_82573:
3428 sc->sc_pba = PBA_12K;
3429 break;
3430 case WM_T_82574:
3431 case WM_T_82583:
3432 sc->sc_pba = PBA_20K;
3433 break;
3434 case WM_T_ICH8:
3435 sc->sc_pba = PBA_8K;
3436 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3437 break;
3438 case WM_T_ICH9:
3439 case WM_T_ICH10:
3440 case WM_T_PCH:
3441 sc->sc_pba = PBA_10K;
3442 break;
3443 default:
3444 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3445 PBA_40K : PBA_48K;
3446 break;
3447 }
3448 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3449
3450 /* Prevent the PCI-E bus from sticking */
3451 if (sc->sc_flags & WM_F_PCIE) {
3452 int timeout = 800;
3453
3454 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3455 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3456
3457 while (timeout--) {
3458 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
3459 break;
3460 delay(100);
3461 }
3462 }
3463
3464 /* Set the completion timeout for interface */
3465 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
3466 wm_set_pcie_completion_timeout(sc);
3467
3468 /* Clear interrupt */
3469 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3470
3471 /* Stop the transmit and receive processes. */
3472 CSR_WRITE(sc, WMREG_RCTL, 0);
3473 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3474 sc->sc_rctl &= ~RCTL_EN;
3475
3476 /* XXX set_tbi_sbp_82543() */
3477
3478 delay(10*1000);
3479
3480 /* Must acquire the MDIO ownership before MAC reset */
3481 switch (sc->sc_type) {
3482 case WM_T_82573:
3483 case WM_T_82574:
3484 case WM_T_82583:
3485 i = 0;
3486 reg = CSR_READ(sc, WMREG_EXTCNFCTR)
3487 | EXTCNFCTR_MDIO_SW_OWNERSHIP;
3488 do {
3489 CSR_WRITE(sc, WMREG_EXTCNFCTR,
3490 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
3491 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
3492 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
3493 break;
3494 reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
3495 delay(2*1000);
3496 i++;
3497 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
3498 break;
3499 default:
3500 break;
3501 }
3502
3503 /*
3504 * 82541 Errata 29? & 82547 Errata 28?
3505 * See also the description about PHY_RST bit in CTRL register
3506 * in 8254x_GBe_SDM.pdf.
3507 */
3508 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3509 CSR_WRITE(sc, WMREG_CTRL,
3510 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3511 delay(5000);
3512 }
3513
3514 switch (sc->sc_type) {
3515 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3516 case WM_T_82541:
3517 case WM_T_82541_2:
3518 case WM_T_82547:
3519 case WM_T_82547_2:
3520 /*
3521 * On some chipsets, a reset through a memory-mapped write
3522 * cycle can cause the chip to reset before completing the
3523 * write cycle. This causes major headache that can be
3524 * avoided by issuing the reset via indirect register writes
3525 * through I/O space.
3526 *
3527 * So, if we successfully mapped the I/O BAR at attach time,
3528 * use that. Otherwise, try our luck with a memory-mapped
3529 * reset.
3530 */
3531 if (sc->sc_flags & WM_F_IOH_VALID)
3532 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3533 else
3534 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3535 break;
3536 case WM_T_82545_3:
3537 case WM_T_82546_3:
3538 /* Use the shadow control register on these chips. */
3539 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3540 break;
3541 case WM_T_80003:
3542 mask = swfwphysem[sc->sc_funcid];
3543 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3544 wm_get_swfw_semaphore(sc, mask);
3545 CSR_WRITE(sc, WMREG_CTRL, reg);
3546 wm_put_swfw_semaphore(sc, mask);
3547 break;
3548 case WM_T_ICH8:
3549 case WM_T_ICH9:
3550 case WM_T_ICH10:
3551 case WM_T_PCH:
3552 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3553 if (wm_check_reset_block(sc) == 0) {
3554 if (sc->sc_type >= WM_T_PCH) {
3555 uint32_t status;
3556
3557 status = CSR_READ(sc, WMREG_STATUS);
3558 CSR_WRITE(sc, WMREG_STATUS,
3559 status & ~STATUS_PHYRA);
3560 }
3561
3562 reg |= CTRL_PHY_RESET;
3563 phy_reset = 1;
3564 }
3565 wm_get_swfwhw_semaphore(sc);
3566 CSR_WRITE(sc, WMREG_CTRL, reg);
3567 delay(20*1000);
3568 wm_put_swfwhw_semaphore(sc);
3569 break;
3570 case WM_T_82542_2_0:
3571 case WM_T_82542_2_1:
3572 case WM_T_82543:
3573 case WM_T_82540:
3574 case WM_T_82545:
3575 case WM_T_82546:
3576 case WM_T_82571:
3577 case WM_T_82572:
3578 case WM_T_82573:
3579 case WM_T_82574:
3580 case WM_T_82575:
3581 case WM_T_82576:
3582 case WM_T_82580:
3583 case WM_T_82580ER:
3584 case WM_T_82583:
3585 default:
3586 /* Everything else can safely use the documented method. */
3587 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3588 break;
3589 }
3590
3591 if (phy_reset != 0)
3592 wm_get_cfg_done(sc);
3593
3594 /* reload EEPROM */
3595 switch (sc->sc_type) {
3596 case WM_T_82542_2_0:
3597 case WM_T_82542_2_1:
3598 case WM_T_82543:
3599 case WM_T_82544:
3600 delay(10);
3601 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3602 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3603 delay(2000);
3604 break;
3605 case WM_T_82540:
3606 case WM_T_82545:
3607 case WM_T_82545_3:
3608 case WM_T_82546:
3609 case WM_T_82546_3:
3610 delay(5*1000);
3611 /* XXX Disable HW ARPs on ASF enabled adapters */
3612 break;
3613 case WM_T_82541:
3614 case WM_T_82541_2:
3615 case WM_T_82547:
3616 case WM_T_82547_2:
3617 delay(20000);
3618 /* XXX Disable HW ARPs on ASF enabled adapters */
3619 break;
3620 case WM_T_82571:
3621 case WM_T_82572:
3622 case WM_T_82573:
3623 case WM_T_82574:
3624 case WM_T_82583:
3625 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3626 delay(10);
3627 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3628 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3629 }
3630 /* check EECD_EE_AUTORD */
3631 wm_get_auto_rd_done(sc);
3632 /*
3633 * Phy configuration from NVM just starts after EECD_AUTO_RD
3634 * is set.
3635 */
3636 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3637 || (sc->sc_type == WM_T_82583))
3638 delay(25*1000);
3639 break;
3640 case WM_T_82575:
3641 case WM_T_82576:
3642 case WM_T_82580:
3643 case WM_T_82580ER:
3644 case WM_T_80003:
3645 case WM_T_ICH8:
3646 case WM_T_ICH9:
3647 /* check EECD_EE_AUTORD */
3648 wm_get_auto_rd_done(sc);
3649 break;
3650 case WM_T_ICH10:
3651 case WM_T_PCH:
3652 wm_lan_init_done(sc);
3653 break;
3654 default:
3655 panic("%s: unknown type\n", __func__);
3656 }
3657
3658 /* Check whether EEPROM is present or not */
3659 switch (sc->sc_type) {
3660 case WM_T_82575:
3661 case WM_T_82576:
3662 #if 0 /* XXX */
3663 case WM_T_82580:
3664 case WM_T_82580ER:
3665 #endif
3666 case WM_T_ICH8:
3667 case WM_T_ICH9:
3668 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3669 /* Not found */
3670 sc->sc_flags |= WM_F_EEPROM_INVALID;
3671 if ((sc->sc_type == WM_T_82575)
3672 || (sc->sc_type == WM_T_82576)
3673 || (sc->sc_type == WM_T_82580)
3674 || (sc->sc_type == WM_T_82580ER))
3675 wm_reset_init_script_82575(sc);
3676 }
3677 break;
3678 default:
3679 break;
3680 }
3681
3682 if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)) {
3683 /* clear global device reset status bit */
3684 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3685 }
3686
3687 /* Clear any pending interrupt events. */
3688 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3689 reg = CSR_READ(sc, WMREG_ICR);
3690
3691 /* reload sc_ctrl */
3692 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3693
3694 /* dummy read from WUC */
3695 if (sc->sc_type == WM_T_PCH)
3696 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3697 /*
3698 * For PCH, this write will make sure that any noise will be detected
3699 * as a CRC error and be dropped rather than show up as a bad packet
3700 * to the DMA engine
3701 */
3702 if (sc->sc_type == WM_T_PCH)
3703 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3704
3705 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3706 CSR_WRITE(sc, WMREG_WUC, 0);
3707
3708 /* XXX need special handling for 82580 */
3709 }
3710
3711 /*
3712 * wm_init: [ifnet interface function]
3713 *
3714 * Initialize the interface. Must be called at splnet().
3715 */
3716 static int
3717 wm_init(struct ifnet *ifp)
3718 {
3719 struct wm_softc *sc = ifp->if_softc;
3720 struct wm_rxsoft *rxs;
3721 int i, error = 0;
3722 uint32_t reg;
3723
3724 /*
3725 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3726 * There is a small but measurable benefit to avoiding the adjusment
3727 * of the descriptor so that the headers are aligned, for normal mtu,
3728 * on such platforms. One possibility is that the DMA itself is
3729 * slightly more efficient if the front of the entire packet (instead
3730 * of the front of the headers) is aligned.
3731 *
3732 * Note we must always set align_tweak to 0 if we are using
3733 * jumbo frames.
3734 */
3735 #ifdef __NO_STRICT_ALIGNMENT
3736 sc->sc_align_tweak = 0;
3737 #else
3738 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3739 sc->sc_align_tweak = 0;
3740 else
3741 sc->sc_align_tweak = 2;
3742 #endif /* __NO_STRICT_ALIGNMENT */
3743
3744 /* Cancel any pending I/O. */
3745 wm_stop(ifp, 0);
3746
3747 /* update statistics before reset */
3748 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3749 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3750
3751 /* Reset the chip to a known state. */
3752 wm_reset(sc);
3753
3754 switch (sc->sc_type) {
3755 case WM_T_82571:
3756 case WM_T_82572:
3757 case WM_T_82573:
3758 case WM_T_82574:
3759 case WM_T_82583:
3760 case WM_T_80003:
3761 case WM_T_ICH8:
3762 case WM_T_ICH9:
3763 case WM_T_ICH10:
3764 case WM_T_PCH:
3765 if (wm_check_mng_mode(sc) != 0)
3766 wm_get_hw_control(sc);
3767 break;
3768 default:
3769 break;
3770 }
3771
3772 /* Reset the PHY. */
3773 if (sc->sc_flags & WM_F_HAS_MII)
3774 wm_gmii_reset(sc);
3775
3776 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3777 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3778 if (sc->sc_type == WM_T_PCH)
3779 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
3780
3781 /* Initialize the transmit descriptor ring. */
3782 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3783 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3784 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3785 sc->sc_txfree = WM_NTXDESC(sc);
3786 sc->sc_txnext = 0;
3787
3788 if (sc->sc_type < WM_T_82543) {
3789 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
3790 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
3791 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3792 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3793 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3794 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3795 } else {
3796 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
3797 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
3798 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3799 CSR_WRITE(sc, WMREG_TDH, 0);
3800 CSR_WRITE(sc, WMREG_TDT, 0);
3801 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3802 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3803
3804 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3805 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
3806 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
3807 | TXDCTL_WTHRESH(0));
3808 else {
3809 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3810 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3811 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3812 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3813 }
3814 }
3815 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3816 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3817
3818 /* Initialize the transmit job descriptors. */
3819 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3820 sc->sc_txsoft[i].txs_mbuf = NULL;
3821 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3822 sc->sc_txsnext = 0;
3823 sc->sc_txsdirty = 0;
3824
3825 /*
3826 * Initialize the receive descriptor and receive job
3827 * descriptor rings.
3828 */
3829 if (sc->sc_type < WM_T_82543) {
3830 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3831 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3832 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3833 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3834 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3835 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3836
3837 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3838 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3839 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3840 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3841 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3842 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3843 } else {
3844 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3845 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3846 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3847 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3848 CSR_WRITE(sc, WMREG_EITR(0), 450);
3849 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
3850 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
3851 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
3852 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
3853 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
3854 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
3855 | RXDCTL_WTHRESH(1));
3856 } else {
3857 CSR_WRITE(sc, WMREG_RDH, 0);
3858 CSR_WRITE(sc, WMREG_RDT, 0);
3859 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3860 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
3861 }
3862 }
3863 for (i = 0; i < WM_NRXDESC; i++) {
3864 rxs = &sc->sc_rxsoft[i];
3865 if (rxs->rxs_mbuf == NULL) {
3866 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3867 log(LOG_ERR, "%s: unable to allocate or map rx "
3868 "buffer %d, error = %d\n",
3869 device_xname(sc->sc_dev), i, error);
3870 /*
3871 * XXX Should attempt to run with fewer receive
3872 * XXX buffers instead of just failing.
3873 */
3874 wm_rxdrain(sc);
3875 goto out;
3876 }
3877 } else {
3878 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3879 WM_INIT_RXDESC(sc, i);
3880 }
3881 }
3882 sc->sc_rxptr = 0;
3883 sc->sc_rxdiscard = 0;
3884 WM_RXCHAIN_RESET(sc);
3885
3886 /*
3887 * Clear out the VLAN table -- we don't use it (yet).
3888 */
3889 CSR_WRITE(sc, WMREG_VET, 0);
3890 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3891 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3892
3893 /*
3894 * Set up flow-control parameters.
3895 *
3896 * XXX Values could probably stand some tuning.
3897 */
3898 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3899 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
3900 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3901 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3902 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3903 }
3904
3905 sc->sc_fcrtl = FCRTL_DFLT;
3906 if (sc->sc_type < WM_T_82543) {
3907 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3908 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3909 } else {
3910 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3911 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3912 }
3913
3914 if (sc->sc_type == WM_T_80003)
3915 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3916 else
3917 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3918
3919 /* Deal with VLAN enables. */
3920 if (VLAN_ATTACHED(&sc->sc_ethercom))
3921 sc->sc_ctrl |= CTRL_VME;
3922 else
3923 sc->sc_ctrl &= ~CTRL_VME;
3924
3925 /* Write the control registers. */
3926 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3927
3928 if (sc->sc_flags & WM_F_HAS_MII) {
3929 int val;
3930
3931 switch (sc->sc_type) {
3932 case WM_T_80003:
3933 case WM_T_ICH8:
3934 case WM_T_ICH9:
3935 case WM_T_ICH10:
3936 case WM_T_PCH:
3937 /*
3938 * Set the mac to wait the maximum time between each
3939 * iteration and increase the max iterations when
3940 * polling the phy; this fixes erroneous timeouts at
3941 * 10Mbps.
3942 */
3943 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
3944 0xFFFF);
3945 val = wm_kmrn_readreg(sc,
3946 KUMCTRLSTA_OFFSET_INB_PARAM);
3947 val |= 0x3F;
3948 wm_kmrn_writereg(sc,
3949 KUMCTRLSTA_OFFSET_INB_PARAM, val);
3950 break;
3951 default:
3952 break;
3953 }
3954
3955 if (sc->sc_type == WM_T_80003) {
3956 val = CSR_READ(sc, WMREG_CTRL_EXT);
3957 val &= ~CTRL_EXT_LINK_MODE_MASK;
3958 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3959
3960 /* Bypass RX and TX FIFO's */
3961 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3962 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
3963 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3964 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3965 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3966 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3967 }
3968 }
3969 #if 0
3970 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3971 #endif
3972
3973 /*
3974 * Set up checksum offload parameters.
3975 */
3976 reg = CSR_READ(sc, WMREG_RXCSUM);
3977 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3978 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3979 reg |= RXCSUM_IPOFL;
3980 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3981 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3982 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3983 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3984 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3985
3986 /* Reset TBI's RXCFG count */
3987 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
3988
3989 /*
3990 * Set up the interrupt registers.
3991 */
3992 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3993 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3994 ICR_RXO | ICR_RXT0;
3995 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
3996 sc->sc_icr |= ICR_RXCFG;
3997 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3998
3999 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4000 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)) {
4001 reg = CSR_READ(sc, WMREG_KABGTXD);
4002 reg |= KABGTXD_BGSQLBIAS;
4003 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4004 }
4005
4006 /* Set up the inter-packet gap. */
4007 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4008
4009 if (sc->sc_type >= WM_T_82543) {
4010 /*
4011 * Set up the interrupt throttling register (units of 256ns)
4012 * Note that a footnote in Intel's documentation says this
4013 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4014 * or 10Mbit mode. Empirically, it appears to be the case
4015 * that that is also true for the 1024ns units of the other
4016 * interrupt-related timer registers -- so, really, we ought
4017 * to divide this value by 4 when the link speed is low.
4018 *
4019 * XXX implement this division at link speed change!
4020 */
4021
4022 /*
4023 * For N interrupts/sec, set this value to:
4024 * 1000000000 / (N * 256). Note that we set the
4025 * absolute and packet timer values to this value
4026 * divided by 4 to get "simple timer" behavior.
4027 */
4028
4029 sc->sc_itr = 1500; /* 2604 ints/sec */
4030 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4031 }
4032
4033 /* Set the VLAN ethernetype. */
4034 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4035
4036 /*
4037 * Set up the transmit control register; we start out with
4038 * a collision distance suitable for FDX, but update it whe
4039 * we resolve the media type.
4040 */
4041 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4042 | TCTL_CT(TX_COLLISION_THRESHOLD)
4043 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4044 if (sc->sc_type >= WM_T_82571)
4045 sc->sc_tctl |= TCTL_MULR;
4046 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4047
4048 if (sc->sc_type == WM_T_80003) {
4049 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4050 reg &= ~TCTL_EXT_GCEX_MASK;
4051 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4052 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4053 }
4054
4055 /* Set the media. */
4056 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4057 goto out;
4058
4059 /* Configure for OS presence */
4060 wm_init_manageability(sc);
4061
4062 /*
4063 * Set up the receive control register; we actually program
4064 * the register when we set the receive filter. Use multicast
4065 * address offset type 0.
4066 *
4067 * Only the i82544 has the ability to strip the incoming
4068 * CRC, so we don't enable that feature.
4069 */
4070 sc->sc_mchash_type = 0;
4071 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4072 | RCTL_MO(sc->sc_mchash_type);
4073
4074 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4075 && (ifp->if_mtu > ETHERMTU)) {
4076 sc->sc_rctl |= RCTL_LPE;
4077 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4078 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4079 }
4080
4081 if (MCLBYTES == 2048) {
4082 sc->sc_rctl |= RCTL_2k;
4083 } else {
4084 if (sc->sc_type >= WM_T_82543) {
4085 switch (MCLBYTES) {
4086 case 4096:
4087 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4088 break;
4089 case 8192:
4090 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4091 break;
4092 case 16384:
4093 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4094 break;
4095 default:
4096 panic("wm_init: MCLBYTES %d unsupported",
4097 MCLBYTES);
4098 break;
4099 }
4100 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4101 }
4102
4103 /* Set the receive filter. */
4104 wm_set_filter(sc);
4105
4106 /* On 575 and later set RDT only if RX enabled... */
4107 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4108 for (i = 0; i < WM_NRXDESC; i++)
4109 WM_INIT_RXDESC(sc, i);
4110
4111 /* Start the one second link check clock. */
4112 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4113
4114 /* ...all done! */
4115 ifp->if_flags |= IFF_RUNNING;
4116 ifp->if_flags &= ~IFF_OACTIVE;
4117
4118 out:
4119 if (error)
4120 log(LOG_ERR, "%s: interface not running\n",
4121 device_xname(sc->sc_dev));
4122 return error;
4123 }
4124
4125 /*
4126 * wm_rxdrain:
4127 *
4128 * Drain the receive queue.
4129 */
4130 static void
4131 wm_rxdrain(struct wm_softc *sc)
4132 {
4133 struct wm_rxsoft *rxs;
4134 int i;
4135
4136 for (i = 0; i < WM_NRXDESC; i++) {
4137 rxs = &sc->sc_rxsoft[i];
4138 if (rxs->rxs_mbuf != NULL) {
4139 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4140 m_freem(rxs->rxs_mbuf);
4141 rxs->rxs_mbuf = NULL;
4142 }
4143 }
4144 }
4145
4146 /*
4147 * wm_stop: [ifnet interface function]
4148 *
4149 * Stop transmission on the interface.
4150 */
4151 static void
4152 wm_stop(struct ifnet *ifp, int disable)
4153 {
4154 struct wm_softc *sc = ifp->if_softc;
4155 struct wm_txsoft *txs;
4156 int i;
4157
4158 /* Stop the one second clock. */
4159 callout_stop(&sc->sc_tick_ch);
4160
4161 /* Stop the 82547 Tx FIFO stall check timer. */
4162 if (sc->sc_type == WM_T_82547)
4163 callout_stop(&sc->sc_txfifo_ch);
4164
4165 if (sc->sc_flags & WM_F_HAS_MII) {
4166 /* Down the MII. */
4167 mii_down(&sc->sc_mii);
4168 } else {
4169 #if 0
4170 /* Should we clear PHY's status properly? */
4171 wm_reset(sc);
4172 #endif
4173 }
4174
4175 /* Stop the transmit and receive processes. */
4176 CSR_WRITE(sc, WMREG_TCTL, 0);
4177 CSR_WRITE(sc, WMREG_RCTL, 0);
4178 sc->sc_rctl &= ~RCTL_EN;
4179
4180 /*
4181 * Clear the interrupt mask to ensure the device cannot assert its
4182 * interrupt line.
4183 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4184 * any currently pending or shared interrupt.
4185 */
4186 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4187 sc->sc_icr = 0;
4188
4189 /* Release any queued transmit buffers. */
4190 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4191 txs = &sc->sc_txsoft[i];
4192 if (txs->txs_mbuf != NULL) {
4193 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4194 m_freem(txs->txs_mbuf);
4195 txs->txs_mbuf = NULL;
4196 }
4197 }
4198
4199 /* Mark the interface as down and cancel the watchdog timer. */
4200 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4201 ifp->if_timer = 0;
4202
4203 if (disable)
4204 wm_rxdrain(sc);
4205
4206 #if 0 /* notyet */
4207 if (sc->sc_type >= WM_T_82544)
4208 CSR_WRITE(sc, WMREG_WUC, 0);
4209 #endif
4210 }
4211
4212 void
4213 wm_get_auto_rd_done(struct wm_softc *sc)
4214 {
4215 int i;
4216
4217 /* wait for eeprom to reload */
4218 switch (sc->sc_type) {
4219 case WM_T_82571:
4220 case WM_T_82572:
4221 case WM_T_82573:
4222 case WM_T_82574:
4223 case WM_T_82583:
4224 case WM_T_82575:
4225 case WM_T_82576:
4226 case WM_T_82580:
4227 case WM_T_82580ER:
4228 case WM_T_80003:
4229 case WM_T_ICH8:
4230 case WM_T_ICH9:
4231 for (i = 0; i < 10; i++) {
4232 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4233 break;
4234 delay(1000);
4235 }
4236 if (i == 10) {
4237 log(LOG_ERR, "%s: auto read from eeprom failed to "
4238 "complete\n", device_xname(sc->sc_dev));
4239 }
4240 break;
4241 default:
4242 break;
4243 }
4244 }
4245
4246 void
4247 wm_lan_init_done(struct wm_softc *sc)
4248 {
4249 uint32_t reg = 0;
4250 int i;
4251
4252 /* wait for eeprom to reload */
4253 switch (sc->sc_type) {
4254 case WM_T_ICH10:
4255 case WM_T_PCH:
4256 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4257 reg = CSR_READ(sc, WMREG_STATUS);
4258 if ((reg & STATUS_LAN_INIT_DONE) != 0)
4259 break;
4260 delay(100);
4261 }
4262 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4263 log(LOG_ERR, "%s: %s: lan_init_done failed to "
4264 "complete\n", device_xname(sc->sc_dev), __func__);
4265 }
4266 break;
4267 default:
4268 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4269 __func__);
4270 break;
4271 }
4272
4273 reg &= ~STATUS_LAN_INIT_DONE;
4274 CSR_WRITE(sc, WMREG_STATUS, reg);
4275 }
4276
4277 void
4278 wm_get_cfg_done(struct wm_softc *sc)
4279 {
4280 int mask;
4281 uint32_t reg;
4282 int i;
4283
4284 /* wait for eeprom to reload */
4285 switch (sc->sc_type) {
4286 case WM_T_82542_2_0:
4287 case WM_T_82542_2_1:
4288 /* null */
4289 break;
4290 case WM_T_82543:
4291 case WM_T_82544:
4292 case WM_T_82540:
4293 case WM_T_82545:
4294 case WM_T_82545_3:
4295 case WM_T_82546:
4296 case WM_T_82546_3:
4297 case WM_T_82541:
4298 case WM_T_82541_2:
4299 case WM_T_82547:
4300 case WM_T_82547_2:
4301 case WM_T_82573:
4302 case WM_T_82574:
4303 case WM_T_82583:
4304 /* generic */
4305 delay(10*1000);
4306 break;
4307 case WM_T_80003:
4308 case WM_T_82571:
4309 case WM_T_82572:
4310 case WM_T_82575:
4311 case WM_T_82576:
4312 case WM_T_82580:
4313 case WM_T_82580ER:
4314 if (sc->sc_type == WM_T_82571) {
4315 /* Only 82571 shares port 0 */
4316 mask = EEMNGCTL_CFGDONE_0;
4317 } else
4318 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4319 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4320 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4321 break;
4322 delay(1000);
4323 }
4324 if (i >= WM_PHY_CFG_TIMEOUT) {
4325 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
4326 device_xname(sc->sc_dev), __func__));
4327 }
4328 break;
4329 case WM_T_ICH8:
4330 case WM_T_ICH9:
4331 case WM_T_ICH10:
4332 case WM_T_PCH:
4333 if (sc->sc_type >= WM_T_PCH) {
4334 reg = CSR_READ(sc, WMREG_STATUS);
4335 if ((reg & STATUS_PHYRA) != 0)
4336 CSR_WRITE(sc, WMREG_STATUS,
4337 reg & ~STATUS_PHYRA);
4338 }
4339 delay(10*1000);
4340 break;
4341 default:
4342 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4343 __func__);
4344 break;
4345 }
4346 }
4347
4348 /*
4349 * wm_acquire_eeprom:
4350 *
4351 * Perform the EEPROM handshake required on some chips.
4352 */
4353 static int
4354 wm_acquire_eeprom(struct wm_softc *sc)
4355 {
4356 uint32_t reg;
4357 int x;
4358 int ret = 0;
4359
4360 /* always success */
4361 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4362 return 0;
4363
4364 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
4365 ret = wm_get_swfwhw_semaphore(sc);
4366 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
4367 /* this will also do wm_get_swsm_semaphore() if needed */
4368 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
4369 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4370 ret = wm_get_swsm_semaphore(sc);
4371 }
4372
4373 if (ret) {
4374 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4375 __func__);
4376 return 1;
4377 }
4378
4379 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4380 reg = CSR_READ(sc, WMREG_EECD);
4381
4382 /* Request EEPROM access. */
4383 reg |= EECD_EE_REQ;
4384 CSR_WRITE(sc, WMREG_EECD, reg);
4385
4386 /* ..and wait for it to be granted. */
4387 for (x = 0; x < 1000; x++) {
4388 reg = CSR_READ(sc, WMREG_EECD);
4389 if (reg & EECD_EE_GNT)
4390 break;
4391 delay(5);
4392 }
4393 if ((reg & EECD_EE_GNT) == 0) {
4394 aprint_error_dev(sc->sc_dev,
4395 "could not acquire EEPROM GNT\n");
4396 reg &= ~EECD_EE_REQ;
4397 CSR_WRITE(sc, WMREG_EECD, reg);
4398 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4399 wm_put_swfwhw_semaphore(sc);
4400 if (sc->sc_flags & WM_F_SWFW_SYNC)
4401 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4402 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4403 wm_put_swsm_semaphore(sc);
4404 return 1;
4405 }
4406 }
4407
4408 return 0;
4409 }
4410
4411 /*
4412 * wm_release_eeprom:
4413 *
4414 * Release the EEPROM mutex.
4415 */
4416 static void
4417 wm_release_eeprom(struct wm_softc *sc)
4418 {
4419 uint32_t reg;
4420
4421 /* always success */
4422 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4423 return;
4424
4425 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4426 reg = CSR_READ(sc, WMREG_EECD);
4427 reg &= ~EECD_EE_REQ;
4428 CSR_WRITE(sc, WMREG_EECD, reg);
4429 }
4430
4431 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4432 wm_put_swfwhw_semaphore(sc);
4433 if (sc->sc_flags & WM_F_SWFW_SYNC)
4434 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4435 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4436 wm_put_swsm_semaphore(sc);
4437 }
4438
4439 /*
4440 * wm_eeprom_sendbits:
4441 *
4442 * Send a series of bits to the EEPROM.
4443 */
4444 static void
4445 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
4446 {
4447 uint32_t reg;
4448 int x;
4449
4450 reg = CSR_READ(sc, WMREG_EECD);
4451
4452 for (x = nbits; x > 0; x--) {
4453 if (bits & (1U << (x - 1)))
4454 reg |= EECD_DI;
4455 else
4456 reg &= ~EECD_DI;
4457 CSR_WRITE(sc, WMREG_EECD, reg);
4458 delay(2);
4459 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4460 delay(2);
4461 CSR_WRITE(sc, WMREG_EECD, reg);
4462 delay(2);
4463 }
4464 }
4465
4466 /*
4467 * wm_eeprom_recvbits:
4468 *
4469 * Receive a series of bits from the EEPROM.
4470 */
4471 static void
4472 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
4473 {
4474 uint32_t reg, val;
4475 int x;
4476
4477 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
4478
4479 val = 0;
4480 for (x = nbits; x > 0; x--) {
4481 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4482 delay(2);
4483 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
4484 val |= (1U << (x - 1));
4485 CSR_WRITE(sc, WMREG_EECD, reg);
4486 delay(2);
4487 }
4488 *valp = val;
4489 }
4490
4491 /*
4492 * wm_read_eeprom_uwire:
4493 *
4494 * Read a word from the EEPROM using the MicroWire protocol.
4495 */
4496 static int
4497 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4498 {
4499 uint32_t reg, val;
4500 int i;
4501
4502 for (i = 0; i < wordcnt; i++) {
4503 /* Clear SK and DI. */
4504 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
4505 CSR_WRITE(sc, WMREG_EECD, reg);
4506
4507 /* Set CHIP SELECT. */
4508 reg |= EECD_CS;
4509 CSR_WRITE(sc, WMREG_EECD, reg);
4510 delay(2);
4511
4512 /* Shift in the READ command. */
4513 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
4514
4515 /* Shift in address. */
4516 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
4517
4518 /* Shift out the data. */
4519 wm_eeprom_recvbits(sc, &val, 16);
4520 data[i] = val & 0xffff;
4521
4522 /* Clear CHIP SELECT. */
4523 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
4524 CSR_WRITE(sc, WMREG_EECD, reg);
4525 delay(2);
4526 }
4527
4528 return 0;
4529 }
4530
4531 /*
4532 * wm_spi_eeprom_ready:
4533 *
4534 * Wait for a SPI EEPROM to be ready for commands.
4535 */
4536 static int
4537 wm_spi_eeprom_ready(struct wm_softc *sc)
4538 {
4539 uint32_t val;
4540 int usec;
4541
4542 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
4543 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
4544 wm_eeprom_recvbits(sc, &val, 8);
4545 if ((val & SPI_SR_RDY) == 0)
4546 break;
4547 }
4548 if (usec >= SPI_MAX_RETRIES) {
4549 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
4550 return 1;
4551 }
4552 return 0;
4553 }
4554
4555 /*
4556 * wm_read_eeprom_spi:
4557 *
4558 * Read a work from the EEPROM using the SPI protocol.
4559 */
4560 static int
4561 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4562 {
4563 uint32_t reg, val;
4564 int i;
4565 uint8_t opc;
4566
4567 /* Clear SK and CS. */
4568 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
4569 CSR_WRITE(sc, WMREG_EECD, reg);
4570 delay(2);
4571
4572 if (wm_spi_eeprom_ready(sc))
4573 return 1;
4574
4575 /* Toggle CS to flush commands. */
4576 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
4577 delay(2);
4578 CSR_WRITE(sc, WMREG_EECD, reg);
4579 delay(2);
4580
4581 opc = SPI_OPC_READ;
4582 if (sc->sc_ee_addrbits == 8 && word >= 128)
4583 opc |= SPI_OPC_A8;
4584
4585 wm_eeprom_sendbits(sc, opc, 8);
4586 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
4587
4588 for (i = 0; i < wordcnt; i++) {
4589 wm_eeprom_recvbits(sc, &val, 16);
4590 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
4591 }
4592
4593 /* Raise CS and clear SK. */
4594 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
4595 CSR_WRITE(sc, WMREG_EECD, reg);
4596 delay(2);
4597
4598 return 0;
4599 }
4600
4601 #define EEPROM_CHECKSUM 0xBABA
4602 #define EEPROM_SIZE 0x0040
4603
4604 /*
4605 * wm_validate_eeprom_checksum
4606 *
4607 * The checksum is defined as the sum of the first 64 (16 bit) words.
4608 */
4609 static int
4610 wm_validate_eeprom_checksum(struct wm_softc *sc)
4611 {
4612 uint16_t checksum;
4613 uint16_t eeprom_data;
4614 int i;
4615
4616 checksum = 0;
4617
4618 for (i = 0; i < EEPROM_SIZE; i++) {
4619 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
4620 return 1;
4621 checksum += eeprom_data;
4622 }
4623
4624 if (checksum != (uint16_t) EEPROM_CHECKSUM)
4625 return 1;
4626
4627 return 0;
4628 }
4629
4630 /*
4631 * wm_read_eeprom:
4632 *
4633 * Read data from the serial EEPROM.
4634 */
4635 static int
4636 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4637 {
4638 int rv;
4639
4640 if (sc->sc_flags & WM_F_EEPROM_INVALID)
4641 return 1;
4642
4643 if (wm_acquire_eeprom(sc))
4644 return 1;
4645
4646 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4647 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4648 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
4649 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
4650 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
4651 else if (sc->sc_flags & WM_F_EEPROM_SPI)
4652 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
4653 else
4654 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
4655
4656 wm_release_eeprom(sc);
4657 return rv;
4658 }
4659
4660 static int
4661 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
4662 uint16_t *data)
4663 {
4664 int i, eerd = 0;
4665 int error = 0;
4666
4667 for (i = 0; i < wordcnt; i++) {
4668 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
4669
4670 CSR_WRITE(sc, WMREG_EERD, eerd);
4671 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
4672 if (error != 0)
4673 break;
4674
4675 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
4676 }
4677
4678 return error;
4679 }
4680
4681 static int
4682 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
4683 {
4684 uint32_t attempts = 100000;
4685 uint32_t i, reg = 0;
4686 int32_t done = -1;
4687
4688 for (i = 0; i < attempts; i++) {
4689 reg = CSR_READ(sc, rw);
4690
4691 if (reg & EERD_DONE) {
4692 done = 0;
4693 break;
4694 }
4695 delay(5);
4696 }
4697
4698 return done;
4699 }
4700
4701 static int
4702 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
4703 {
4704 uint16_t myea[ETHER_ADDR_LEN / 2];
4705 uint16_t offset = EEPROM_OFF_MACADDR;
4706 int do_invert = 0;
4707
4708 if (sc->sc_funcid != 0)
4709 switch (sc->sc_type) {
4710 case WM_T_82580:
4711 case WM_T_82580ER:
4712 switch (sc->sc_funcid) {
4713 case 1:
4714 offset = EEPROM_OFF_LAN1;
4715 break;
4716 case 2:
4717 offset = EEPROM_OFF_LAN2;
4718 break;
4719 case 3:
4720 offset = EEPROM_OFF_LAN3;
4721 break;
4722 default:
4723 goto bad;
4724 /* NOTREACHED */
4725 break;
4726 }
4727 break;
4728 case WM_T_82571:
4729 case WM_T_82575:
4730 case WM_T_82576:
4731 case WM_T_80003:
4732 if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1,
4733 &offset) != 0) {
4734 goto bad;
4735 }
4736
4737 /* no pointer */
4738 if (offset == 0xffff) {
4739 /* reset the offset to LAN0 */
4740 offset = EEPROM_OFF_MACADDR;
4741 do_invert = 1;
4742 goto do_read;
4743 }
4744
4745 switch (sc->sc_funcid) {
4746 case 1:
4747 offset += EEPROM_OFF_MACADDR_LAN1;
4748 break;
4749 case 2:
4750 offset += EEPROM_OFF_MACADDR_LAN2;
4751 break;
4752 case 3:
4753 offset += EEPROM_OFF_MACADDR_LAN3;
4754 break;
4755 default:
4756 goto bad;
4757 /* NOTREACHED */
4758 break;
4759 }
4760 break;
4761 default:
4762 do_invert = 1;
4763 break;
4764 }
4765
4766 do_read:
4767 if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]),
4768 myea) != 0) {
4769 goto bad;
4770 }
4771
4772 enaddr[0] = myea[0] & 0xff;
4773 enaddr[1] = myea[0] >> 8;
4774 enaddr[2] = myea[1] & 0xff;
4775 enaddr[3] = myea[1] >> 8;
4776 enaddr[4] = myea[2] & 0xff;
4777 enaddr[5] = myea[2] >> 8;
4778
4779 /*
4780 * Toggle the LSB of the MAC address on the second port
4781 * of some dual port cards.
4782 */
4783 if (do_invert != 0)
4784 enaddr[5] ^= 1;
4785
4786 return 0;
4787
4788 bad:
4789 aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n");
4790
4791 return -1;
4792 }
4793
4794 /*
4795 * wm_add_rxbuf:
4796 *
4797 * Add a receive buffer to the indiciated descriptor.
4798 */
4799 static int
4800 wm_add_rxbuf(struct wm_softc *sc, int idx)
4801 {
4802 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
4803 struct mbuf *m;
4804 int error;
4805
4806 MGETHDR(m, M_DONTWAIT, MT_DATA);
4807 if (m == NULL)
4808 return ENOBUFS;
4809
4810 MCLGET(m, M_DONTWAIT);
4811 if ((m->m_flags & M_EXT) == 0) {
4812 m_freem(m);
4813 return ENOBUFS;
4814 }
4815
4816 if (rxs->rxs_mbuf != NULL)
4817 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4818
4819 rxs->rxs_mbuf = m;
4820
4821 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4822 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4823 BUS_DMA_READ|BUS_DMA_NOWAIT);
4824 if (error) {
4825 /* XXX XXX XXX */
4826 aprint_error_dev(sc->sc_dev,
4827 "unable to load rx DMA map %d, error = %d\n",
4828 idx, error);
4829 panic("wm_add_rxbuf");
4830 }
4831
4832 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4833 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4834
4835 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4836 if ((sc->sc_rctl & RCTL_EN) != 0)
4837 WM_INIT_RXDESC(sc, idx);
4838 } else
4839 WM_INIT_RXDESC(sc, idx);
4840
4841 return 0;
4842 }
4843
4844 /*
4845 * wm_set_ral:
4846 *
4847 * Set an entery in the receive address list.
4848 */
4849 static void
4850 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4851 {
4852 uint32_t ral_lo, ral_hi;
4853
4854 if (enaddr != NULL) {
4855 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
4856 (enaddr[3] << 24);
4857 ral_hi = enaddr[4] | (enaddr[5] << 8);
4858 ral_hi |= RAL_AV;
4859 } else {
4860 ral_lo = 0;
4861 ral_hi = 0;
4862 }
4863
4864 if (sc->sc_type >= WM_T_82544) {
4865 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
4866 ral_lo);
4867 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
4868 ral_hi);
4869 } else {
4870 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
4871 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
4872 }
4873 }
4874
4875 /*
4876 * wm_mchash:
4877 *
4878 * Compute the hash of the multicast address for the 4096-bit
4879 * multicast filter.
4880 */
4881 static uint32_t
4882 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4883 {
4884 static const int lo_shift[4] = { 4, 3, 2, 0 };
4885 static const int hi_shift[4] = { 4, 5, 6, 8 };
4886 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4887 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4888 uint32_t hash;
4889
4890 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4891 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)) {
4892 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4893 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4894 return (hash & 0x3ff);
4895 }
4896 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4897 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4898
4899 return (hash & 0xfff);
4900 }
4901
4902 /*
4903 * wm_set_filter:
4904 *
4905 * Set up the receive filter.
4906 */
4907 static void
4908 wm_set_filter(struct wm_softc *sc)
4909 {
4910 struct ethercom *ec = &sc->sc_ethercom;
4911 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4912 struct ether_multi *enm;
4913 struct ether_multistep step;
4914 bus_addr_t mta_reg;
4915 uint32_t hash, reg, bit;
4916 int i, size;
4917
4918 if (sc->sc_type >= WM_T_82544)
4919 mta_reg = WMREG_CORDOVA_MTA;
4920 else
4921 mta_reg = WMREG_MTA;
4922
4923 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4924
4925 if (ifp->if_flags & IFF_BROADCAST)
4926 sc->sc_rctl |= RCTL_BAM;
4927 if (ifp->if_flags & IFF_PROMISC) {
4928 sc->sc_rctl |= RCTL_UPE;
4929 goto allmulti;
4930 }
4931
4932 /*
4933 * Set the station address in the first RAL slot, and
4934 * clear the remaining slots.
4935 */
4936 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4937 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4938 size = WM_ICH8_RAL_TABSIZE;
4939 else
4940 size = WM_RAL_TABSIZE;
4941 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4942 for (i = 1; i < size; i++)
4943 wm_set_ral(sc, NULL, i);
4944
4945 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4946 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4947 size = WM_ICH8_MC_TABSIZE;
4948 else
4949 size = WM_MC_TABSIZE;
4950 /* Clear out the multicast table. */
4951 for (i = 0; i < size; i++)
4952 CSR_WRITE(sc, mta_reg + (i << 2), 0);
4953
4954 ETHER_FIRST_MULTI(step, ec, enm);
4955 while (enm != NULL) {
4956 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4957 /*
4958 * We must listen to a range of multicast addresses.
4959 * For now, just accept all multicasts, rather than
4960 * trying to set only those filter bits needed to match
4961 * the range. (At this time, the only use of address
4962 * ranges is for IP multicast routing, for which the
4963 * range is big enough to require all bits set.)
4964 */
4965 goto allmulti;
4966 }
4967
4968 hash = wm_mchash(sc, enm->enm_addrlo);
4969
4970 reg = (hash >> 5);
4971 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4972 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4973 reg &= 0x1f;
4974 else
4975 reg &= 0x7f;
4976 bit = hash & 0x1f;
4977
4978 hash = CSR_READ(sc, mta_reg + (reg << 2));
4979 hash |= 1U << bit;
4980
4981 /* XXX Hardware bug?? */
4982 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
4983 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4984 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4985 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4986 } else
4987 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4988
4989 ETHER_NEXT_MULTI(step, enm);
4990 }
4991
4992 ifp->if_flags &= ~IFF_ALLMULTI;
4993 goto setit;
4994
4995 allmulti:
4996 ifp->if_flags |= IFF_ALLMULTI;
4997 sc->sc_rctl |= RCTL_MPE;
4998
4999 setit:
5000 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
5001 }
5002
5003 /*
5004 * wm_tbi_mediainit:
5005 *
5006 * Initialize media for use on 1000BASE-X devices.
5007 */
5008 static void
5009 wm_tbi_mediainit(struct wm_softc *sc)
5010 {
5011 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5012 const char *sep = "";
5013
5014 if (sc->sc_type < WM_T_82543)
5015 sc->sc_tipg = TIPG_WM_DFLT;
5016 else
5017 sc->sc_tipg = TIPG_LG_DFLT;
5018
5019 sc->sc_tbi_anegticks = 5;
5020
5021 /* Initialize our media structures */
5022 sc->sc_mii.mii_ifp = ifp;
5023
5024 sc->sc_ethercom.ec_mii = &sc->sc_mii;
5025 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
5026 wm_tbi_mediastatus);
5027
5028 /*
5029 * SWD Pins:
5030 *
5031 * 0 = Link LED (output)
5032 * 1 = Loss Of Signal (input)
5033 */
5034 sc->sc_ctrl |= CTRL_SWDPIO(0);
5035 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
5036
5037 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5038
5039 #define ADD(ss, mm, dd) \
5040 do { \
5041 aprint_normal("%s%s", sep, ss); \
5042 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
5043 sep = ", "; \
5044 } while (/*CONSTCOND*/0)
5045
5046 aprint_normal_dev(sc->sc_dev, "");
5047 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
5048 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
5049 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
5050 aprint_normal("\n");
5051
5052 #undef ADD
5053
5054 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5055 }
5056
5057 /*
5058 * wm_tbi_mediastatus: [ifmedia interface function]
5059 *
5060 * Get the current interface media status on a 1000BASE-X device.
5061 */
5062 static void
5063 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5064 {
5065 struct wm_softc *sc = ifp->if_softc;
5066 uint32_t ctrl, status;
5067
5068 ifmr->ifm_status = IFM_AVALID;
5069 ifmr->ifm_active = IFM_ETHER;
5070
5071 status = CSR_READ(sc, WMREG_STATUS);
5072 if ((status & STATUS_LU) == 0) {
5073 ifmr->ifm_active |= IFM_NONE;
5074 return;
5075 }
5076
5077 ifmr->ifm_status |= IFM_ACTIVE;
5078 ifmr->ifm_active |= IFM_1000_SX;
5079 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
5080 ifmr->ifm_active |= IFM_FDX;
5081 ctrl = CSR_READ(sc, WMREG_CTRL);
5082 if (ctrl & CTRL_RFCE)
5083 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
5084 if (ctrl & CTRL_TFCE)
5085 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
5086 }
5087
5088 /*
5089 * wm_tbi_mediachange: [ifmedia interface function]
5090 *
5091 * Set hardware to newly-selected media on a 1000BASE-X device.
5092 */
5093 static int
5094 wm_tbi_mediachange(struct ifnet *ifp)
5095 {
5096 struct wm_softc *sc = ifp->if_softc;
5097 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5098 uint32_t status;
5099 int i;
5100
5101 sc->sc_txcw = 0;
5102 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
5103 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
5104 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
5105 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5106 sc->sc_txcw |= TXCW_ANE;
5107 } else {
5108 /*
5109 * If autonegotiation is turned off, force link up and turn on
5110 * full duplex
5111 */
5112 sc->sc_txcw &= ~TXCW_ANE;
5113 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
5114 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5115 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5116 delay(1000);
5117 }
5118
5119 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
5120 device_xname(sc->sc_dev),sc->sc_txcw));
5121 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5122 delay(10000);
5123
5124 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
5125 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
5126
5127 /*
5128 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
5129 * optics detect a signal, 0 if they don't.
5130 */
5131 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
5132 /* Have signal; wait for the link to come up. */
5133
5134 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5135 /*
5136 * Reset the link, and let autonegotiation do its thing
5137 */
5138 sc->sc_ctrl |= CTRL_LRST;
5139 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5140 delay(1000);
5141 sc->sc_ctrl &= ~CTRL_LRST;
5142 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5143 delay(1000);
5144 }
5145
5146 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
5147 delay(10000);
5148 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
5149 break;
5150 }
5151
5152 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
5153 device_xname(sc->sc_dev),i));
5154
5155 status = CSR_READ(sc, WMREG_STATUS);
5156 DPRINTF(WM_DEBUG_LINK,
5157 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
5158 device_xname(sc->sc_dev),status, STATUS_LU));
5159 if (status & STATUS_LU) {
5160 /* Link is up. */
5161 DPRINTF(WM_DEBUG_LINK,
5162 ("%s: LINK: set media -> link up %s\n",
5163 device_xname(sc->sc_dev),
5164 (status & STATUS_FD) ? "FDX" : "HDX"));
5165
5166 /*
5167 * NOTE: CTRL will update TFCE and RFCE automatically,
5168 * so we should update sc->sc_ctrl
5169 */
5170 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5171 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5172 sc->sc_fcrtl &= ~FCRTL_XONE;
5173 if (status & STATUS_FD)
5174 sc->sc_tctl |=
5175 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5176 else
5177 sc->sc_tctl |=
5178 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5179 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
5180 sc->sc_fcrtl |= FCRTL_XONE;
5181 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5182 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5183 WMREG_OLD_FCRTL : WMREG_FCRTL,
5184 sc->sc_fcrtl);
5185 sc->sc_tbi_linkup = 1;
5186 } else {
5187 if (i == WM_LINKUP_TIMEOUT)
5188 wm_check_for_link(sc);
5189 /* Link is down. */
5190 DPRINTF(WM_DEBUG_LINK,
5191 ("%s: LINK: set media -> link down\n",
5192 device_xname(sc->sc_dev)));
5193 sc->sc_tbi_linkup = 0;
5194 }
5195 } else {
5196 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
5197 device_xname(sc->sc_dev)));
5198 sc->sc_tbi_linkup = 0;
5199 }
5200
5201 wm_tbi_set_linkled(sc);
5202
5203 return 0;
5204 }
5205
5206 /*
5207 * wm_tbi_set_linkled:
5208 *
5209 * Update the link LED on 1000BASE-X devices.
5210 */
5211 static void
5212 wm_tbi_set_linkled(struct wm_softc *sc)
5213 {
5214
5215 if (sc->sc_tbi_linkup)
5216 sc->sc_ctrl |= CTRL_SWDPIN(0);
5217 else
5218 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
5219
5220 /* 82540 or newer devices are active low */
5221 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
5222
5223 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5224 }
5225
5226 /*
5227 * wm_tbi_check_link:
5228 *
5229 * Check the link on 1000BASE-X devices.
5230 */
5231 static void
5232 wm_tbi_check_link(struct wm_softc *sc)
5233 {
5234 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5235 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5236 uint32_t rxcw, ctrl, status;
5237
5238 status = CSR_READ(sc, WMREG_STATUS);
5239
5240 rxcw = CSR_READ(sc, WMREG_RXCW);
5241 ctrl = CSR_READ(sc, WMREG_CTRL);
5242
5243 /* set link status */
5244 if ((status & STATUS_LU) == 0) {
5245 DPRINTF(WM_DEBUG_LINK,
5246 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
5247 sc->sc_tbi_linkup = 0;
5248 } else if (sc->sc_tbi_linkup == 0) {
5249 DPRINTF(WM_DEBUG_LINK,
5250 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
5251 (status & STATUS_FD) ? "FDX" : "HDX"));
5252 sc->sc_tbi_linkup = 1;
5253 }
5254
5255 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
5256 && ((status & STATUS_LU) == 0)) {
5257 sc->sc_tbi_linkup = 0;
5258 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
5259 /* RXCFG storm! */
5260 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
5261 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
5262 wm_init(ifp);
5263 wm_start(ifp);
5264 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5265 /* If the timer expired, retry autonegotiation */
5266 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
5267 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
5268 sc->sc_tbi_ticks = 0;
5269 /*
5270 * Reset the link, and let autonegotiation do
5271 * its thing
5272 */
5273 sc->sc_ctrl |= CTRL_LRST;
5274 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5275 delay(1000);
5276 sc->sc_ctrl &= ~CTRL_LRST;
5277 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5278 delay(1000);
5279 CSR_WRITE(sc, WMREG_TXCW,
5280 sc->sc_txcw & ~TXCW_ANE);
5281 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5282 }
5283 }
5284 }
5285
5286 wm_tbi_set_linkled(sc);
5287 }
5288
5289 /*
5290 * wm_gmii_reset:
5291 *
5292 * Reset the PHY.
5293 */
5294 static void
5295 wm_gmii_reset(struct wm_softc *sc)
5296 {
5297 uint32_t reg;
5298 int rv;
5299
5300 /* get phy semaphore */
5301 switch (sc->sc_type) {
5302 case WM_T_82571:
5303 case WM_T_82572:
5304 case WM_T_82573:
5305 case WM_T_82574:
5306 case WM_T_82583:
5307 /* XXX should get sw semaphore, too */
5308 rv = wm_get_swsm_semaphore(sc);
5309 break;
5310 case WM_T_82575:
5311 case WM_T_82576:
5312 case WM_T_82580:
5313 case WM_T_82580ER:
5314 case WM_T_80003:
5315 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5316 break;
5317 case WM_T_ICH8:
5318 case WM_T_ICH9:
5319 case WM_T_ICH10:
5320 case WM_T_PCH:
5321 rv = wm_get_swfwhw_semaphore(sc);
5322 break;
5323 default:
5324 /* nothing to do*/
5325 rv = 0;
5326 break;
5327 }
5328 if (rv != 0) {
5329 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5330 __func__);
5331 return;
5332 }
5333
5334 switch (sc->sc_type) {
5335 case WM_T_82542_2_0:
5336 case WM_T_82542_2_1:
5337 /* null */
5338 break;
5339 case WM_T_82543:
5340 /*
5341 * With 82543, we need to force speed and duplex on the MAC
5342 * equal to what the PHY speed and duplex configuration is.
5343 * In addition, we need to perform a hardware reset on the PHY
5344 * to take it out of reset.
5345 */
5346 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5347 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5348
5349 /* The PHY reset pin is active-low. */
5350 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5351 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
5352 CTRL_EXT_SWDPIN(4));
5353 reg |= CTRL_EXT_SWDPIO(4);
5354
5355 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5356 delay(10*1000);
5357
5358 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
5359 delay(150);
5360 #if 0
5361 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
5362 #endif
5363 delay(20*1000); /* XXX extra delay to get PHY ID? */
5364 break;
5365 case WM_T_82544: /* reset 10000us */
5366 case WM_T_82540:
5367 case WM_T_82545:
5368 case WM_T_82545_3:
5369 case WM_T_82546:
5370 case WM_T_82546_3:
5371 case WM_T_82541:
5372 case WM_T_82541_2:
5373 case WM_T_82547:
5374 case WM_T_82547_2:
5375 case WM_T_82571: /* reset 100us */
5376 case WM_T_82572:
5377 case WM_T_82573:
5378 case WM_T_82574:
5379 case WM_T_82575:
5380 case WM_T_82576:
5381 case WM_T_82580:
5382 case WM_T_82580ER:
5383 case WM_T_82583:
5384 case WM_T_80003:
5385 /* generic reset */
5386 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5387 delay((sc->sc_type >= WM_T_82571) ? 100 : 10*1000);
5388 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5389 delay(150);
5390
5391 if ((sc->sc_type == WM_T_82541)
5392 || (sc->sc_type == WM_T_82541_2)
5393 || (sc->sc_type == WM_T_82547)
5394 || (sc->sc_type == WM_T_82547_2)) {
5395 /* workaround for igp are done in igp_reset() */
5396 /* XXX add code to set LED after phy reset */
5397 }
5398 break;
5399 case WM_T_ICH8:
5400 case WM_T_ICH9:
5401 case WM_T_ICH10:
5402 case WM_T_PCH:
5403 /* generic reset */
5404 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5405 delay(100);
5406 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5407 delay(150);
5408 break;
5409 default:
5410 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5411 __func__);
5412 break;
5413 }
5414
5415 /* release PHY semaphore */
5416 switch (sc->sc_type) {
5417 case WM_T_82571:
5418 case WM_T_82572:
5419 case WM_T_82573:
5420 case WM_T_82574:
5421 case WM_T_82583:
5422 /* XXX should put sw semaphore, too */
5423 wm_put_swsm_semaphore(sc);
5424 break;
5425 case WM_T_82575:
5426 case WM_T_82576:
5427 case WM_T_82580:
5428 case WM_T_82580ER:
5429 case WM_T_80003:
5430 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5431 break;
5432 case WM_T_ICH8:
5433 case WM_T_ICH9:
5434 case WM_T_ICH10:
5435 case WM_T_PCH:
5436 wm_put_swfwhw_semaphore(sc);
5437 break;
5438 default:
5439 /* nothing to do*/
5440 rv = 0;
5441 break;
5442 }
5443
5444 /* get_cfg_done */
5445 wm_get_cfg_done(sc);
5446
5447 /* extra setup */
5448 switch (sc->sc_type) {
5449 case WM_T_82542_2_0:
5450 case WM_T_82542_2_1:
5451 case WM_T_82543:
5452 case WM_T_82544:
5453 case WM_T_82540:
5454 case WM_T_82545:
5455 case WM_T_82545_3:
5456 case WM_T_82546:
5457 case WM_T_82546_3:
5458 case WM_T_82541_2:
5459 case WM_T_82547_2:
5460 case WM_T_82571:
5461 case WM_T_82572:
5462 case WM_T_82573:
5463 case WM_T_82574:
5464 case WM_T_82575:
5465 case WM_T_82576:
5466 case WM_T_82580:
5467 case WM_T_82580ER:
5468 case WM_T_82583:
5469 case WM_T_80003:
5470 /* null */
5471 break;
5472 case WM_T_82541:
5473 case WM_T_82547:
5474 /* XXX Configure actively LED after PHY reset */
5475 break;
5476 case WM_T_ICH8:
5477 case WM_T_ICH9:
5478 case WM_T_ICH10:
5479 case WM_T_PCH:
5480 /* Allow time for h/w to get to a quiescent state afer reset */
5481 delay(10*1000);
5482
5483 if (sc->sc_type == WM_T_PCH) {
5484 wm_hv_phy_workaround_ich8lan(sc);
5485
5486 /*
5487 * dummy read to clear the phy wakeup bit after lcd
5488 * reset
5489 */
5490 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
5491 }
5492
5493 /*
5494 * XXX Configure the LCD with th extended configuration region
5495 * in NVM
5496 */
5497
5498 /* Configure the LCD with the OEM bits in NVM */
5499 if (sc->sc_type == WM_T_PCH) {
5500 /*
5501 * Disable LPLU.
5502 * XXX It seems that 82567 has LPLU, too.
5503 */
5504 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
5505 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
5506 reg |= HV_OEM_BITS_ANEGNOW;
5507 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
5508 }
5509 break;
5510 default:
5511 panic("%s: unknown type\n", __func__);
5512 break;
5513 }
5514 }
5515
5516 /*
5517 * wm_gmii_mediainit:
5518 *
5519 * Initialize media for use on 1000BASE-T devices.
5520 */
5521 static void
5522 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
5523 {
5524 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5525
5526 /* We have MII. */
5527 sc->sc_flags |= WM_F_HAS_MII;
5528
5529 if (sc->sc_type == WM_T_80003)
5530 sc->sc_tipg = TIPG_1000T_80003_DFLT;
5531 else
5532 sc->sc_tipg = TIPG_1000T_DFLT;
5533
5534 /*
5535 * Let the chip set speed/duplex on its own based on
5536 * signals from the PHY.
5537 * XXXbouyer - I'm not sure this is right for the 80003,
5538 * the em driver only sets CTRL_SLU here - but it seems to work.
5539 */
5540 sc->sc_ctrl |= CTRL_SLU;
5541 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5542
5543 /* Initialize our media structures and probe the GMII. */
5544 sc->sc_mii.mii_ifp = ifp;
5545
5546 switch (prodid) {
5547 case PCI_PRODUCT_INTEL_PCH_M_LM:
5548 case PCI_PRODUCT_INTEL_PCH_M_LC:
5549 /* 82577 */
5550 sc->sc_phytype = WMPHY_82577;
5551 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5552 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5553 break;
5554 case PCI_PRODUCT_INTEL_PCH_D_DM:
5555 case PCI_PRODUCT_INTEL_PCH_D_DC:
5556 /* 82578 */
5557 sc->sc_phytype = WMPHY_82578;
5558 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5559 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5560 break;
5561 case PCI_PRODUCT_INTEL_82801I_BM:
5562 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
5563 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
5564 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
5565 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
5566 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
5567 /* 82567 */
5568 sc->sc_phytype = WMPHY_BM;
5569 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5570 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5571 break;
5572 default:
5573 if ((sc->sc_flags & WM_F_SGMII) != 0) {
5574 sc->sc_mii.mii_readreg = wm_sgmii_readreg;
5575 sc->sc_mii.mii_writereg = wm_sgmii_writereg;
5576 } else if (sc->sc_type >= WM_T_80003) {
5577 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
5578 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
5579 } else if (sc->sc_type >= WM_T_82544) {
5580 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
5581 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
5582 } else {
5583 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
5584 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
5585 }
5586 break;
5587 }
5588 sc->sc_mii.mii_statchg = wm_gmii_statchg;
5589
5590 wm_gmii_reset(sc);
5591
5592 sc->sc_ethercom.ec_mii = &sc->sc_mii;
5593 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
5594 wm_gmii_mediastatus);
5595
5596 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
5597 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)) {
5598 if ((sc->sc_flags & WM_F_SGMII) == 0) {
5599 /* Attach only one port */
5600 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
5601 MII_OFFSET_ANY, MIIF_DOPAUSE);
5602 } else {
5603 int i;
5604 uint32_t ctrl_ext;
5605
5606 /* Power on sgmii phy if it is disabled */
5607 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
5608 CSR_WRITE(sc, WMREG_CTRL_EXT,
5609 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
5610 CSR_WRITE_FLUSH(sc);
5611 delay(300*1000); /* XXX too long */
5612
5613 /* from 1 to 8 */
5614 for (i = 1; i < 8; i++)
5615 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
5616 i, MII_OFFSET_ANY, MIIF_DOPAUSE);
5617
5618 /* restore previous sfp cage power state */
5619 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
5620 }
5621 } else {
5622 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5623 MII_OFFSET_ANY, MIIF_DOPAUSE);
5624 }
5625
5626 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5627 /* if failed, retry with *_bm_* */
5628 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5629 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5630
5631 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5632 MII_OFFSET_ANY, MIIF_DOPAUSE);
5633 }
5634 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5635 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
5636 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
5637 sc->sc_phytype = WMPHY_NONE;
5638 } else {
5639 /* Check PHY type */
5640 uint32_t model;
5641 struct mii_softc *child;
5642
5643 child = LIST_FIRST(&sc->sc_mii.mii_phys);
5644 if (device_is_a(child->mii_dev, "igphy")) {
5645 struct igphy_softc *isc = (struct igphy_softc *)child;
5646
5647 model = isc->sc_mii.mii_mpd_model;
5648 if (model == MII_MODEL_yyINTEL_I82566)
5649 sc->sc_phytype = WMPHY_IGP_3;
5650 }
5651
5652 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5653 }
5654 }
5655
5656 /*
5657 * wm_gmii_mediastatus: [ifmedia interface function]
5658 *
5659 * Get the current interface media status on a 1000BASE-T device.
5660 */
5661 static void
5662 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5663 {
5664 struct wm_softc *sc = ifp->if_softc;
5665
5666 ether_mediastatus(ifp, ifmr);
5667 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
5668 | sc->sc_flowflags;
5669 }
5670
5671 /*
5672 * wm_gmii_mediachange: [ifmedia interface function]
5673 *
5674 * Set hardware to newly-selected media on a 1000BASE-T device.
5675 */
5676 static int
5677 wm_gmii_mediachange(struct ifnet *ifp)
5678 {
5679 struct wm_softc *sc = ifp->if_softc;
5680 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5681 int rc;
5682
5683 if ((ifp->if_flags & IFF_UP) == 0)
5684 return 0;
5685
5686 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5687 sc->sc_ctrl |= CTRL_SLU;
5688 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
5689 || (sc->sc_type > WM_T_82543)) {
5690 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
5691 } else {
5692 sc->sc_ctrl &= ~CTRL_ASDE;
5693 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5694 if (ife->ifm_media & IFM_FDX)
5695 sc->sc_ctrl |= CTRL_FD;
5696 switch (IFM_SUBTYPE(ife->ifm_media)) {
5697 case IFM_10_T:
5698 sc->sc_ctrl |= CTRL_SPEED_10;
5699 break;
5700 case IFM_100_TX:
5701 sc->sc_ctrl |= CTRL_SPEED_100;
5702 break;
5703 case IFM_1000_T:
5704 sc->sc_ctrl |= CTRL_SPEED_1000;
5705 break;
5706 default:
5707 panic("wm_gmii_mediachange: bad media 0x%x",
5708 ife->ifm_media);
5709 }
5710 }
5711 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5712 if (sc->sc_type <= WM_T_82543)
5713 wm_gmii_reset(sc);
5714
5715 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
5716 return 0;
5717 return rc;
5718 }
5719
5720 #define MDI_IO CTRL_SWDPIN(2)
5721 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
5722 #define MDI_CLK CTRL_SWDPIN(3)
5723
5724 static void
5725 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
5726 {
5727 uint32_t i, v;
5728
5729 v = CSR_READ(sc, WMREG_CTRL);
5730 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5731 v |= MDI_DIR | CTRL_SWDPIO(3);
5732
5733 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
5734 if (data & i)
5735 v |= MDI_IO;
5736 else
5737 v &= ~MDI_IO;
5738 CSR_WRITE(sc, WMREG_CTRL, v);
5739 delay(10);
5740 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5741 delay(10);
5742 CSR_WRITE(sc, WMREG_CTRL, v);
5743 delay(10);
5744 }
5745 }
5746
5747 static uint32_t
5748 i82543_mii_recvbits(struct wm_softc *sc)
5749 {
5750 uint32_t v, i, data = 0;
5751
5752 v = CSR_READ(sc, WMREG_CTRL);
5753 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5754 v |= CTRL_SWDPIO(3);
5755
5756 CSR_WRITE(sc, WMREG_CTRL, v);
5757 delay(10);
5758 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5759 delay(10);
5760 CSR_WRITE(sc, WMREG_CTRL, v);
5761 delay(10);
5762
5763 for (i = 0; i < 16; i++) {
5764 data <<= 1;
5765 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5766 delay(10);
5767 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
5768 data |= 1;
5769 CSR_WRITE(sc, WMREG_CTRL, v);
5770 delay(10);
5771 }
5772
5773 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5774 delay(10);
5775 CSR_WRITE(sc, WMREG_CTRL, v);
5776 delay(10);
5777
5778 return data;
5779 }
5780
5781 #undef MDI_IO
5782 #undef MDI_DIR
5783 #undef MDI_CLK
5784
5785 /*
5786 * wm_gmii_i82543_readreg: [mii interface function]
5787 *
5788 * Read a PHY register on the GMII (i82543 version).
5789 */
5790 static int
5791 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
5792 {
5793 struct wm_softc *sc = device_private(self);
5794 int rv;
5795
5796 i82543_mii_sendbits(sc, 0xffffffffU, 32);
5797 i82543_mii_sendbits(sc, reg | (phy << 5) |
5798 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
5799 rv = i82543_mii_recvbits(sc) & 0xffff;
5800
5801 DPRINTF(WM_DEBUG_GMII,
5802 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
5803 device_xname(sc->sc_dev), phy, reg, rv));
5804
5805 return rv;
5806 }
5807
5808 /*
5809 * wm_gmii_i82543_writereg: [mii interface function]
5810 *
5811 * Write a PHY register on the GMII (i82543 version).
5812 */
5813 static void
5814 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
5815 {
5816 struct wm_softc *sc = device_private(self);
5817
5818 i82543_mii_sendbits(sc, 0xffffffffU, 32);
5819 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
5820 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
5821 (MII_COMMAND_START << 30), 32);
5822 }
5823
5824 /*
5825 * wm_gmii_i82544_readreg: [mii interface function]
5826 *
5827 * Read a PHY register on the GMII.
5828 */
5829 static int
5830 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
5831 {
5832 struct wm_softc *sc = device_private(self);
5833 uint32_t mdic = 0;
5834 int i, rv;
5835
5836 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
5837 MDIC_REGADD(reg));
5838
5839 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
5840 mdic = CSR_READ(sc, WMREG_MDIC);
5841 if (mdic & MDIC_READY)
5842 break;
5843 delay(50);
5844 }
5845
5846 if ((mdic & MDIC_READY) == 0) {
5847 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
5848 device_xname(sc->sc_dev), phy, reg);
5849 rv = 0;
5850 } else if (mdic & MDIC_E) {
5851 #if 0 /* This is normal if no PHY is present. */
5852 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
5853 device_xname(sc->sc_dev), phy, reg);
5854 #endif
5855 rv = 0;
5856 } else {
5857 rv = MDIC_DATA(mdic);
5858 if (rv == 0xffff)
5859 rv = 0;
5860 }
5861
5862 return rv;
5863 }
5864
5865 /*
5866 * wm_gmii_i82544_writereg: [mii interface function]
5867 *
5868 * Write a PHY register on the GMII.
5869 */
5870 static void
5871 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
5872 {
5873 struct wm_softc *sc = device_private(self);
5874 uint32_t mdic = 0;
5875 int i;
5876
5877 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
5878 MDIC_REGADD(reg) | MDIC_DATA(val));
5879
5880 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
5881 mdic = CSR_READ(sc, WMREG_MDIC);
5882 if (mdic & MDIC_READY)
5883 break;
5884 delay(50);
5885 }
5886
5887 if ((mdic & MDIC_READY) == 0)
5888 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
5889 device_xname(sc->sc_dev), phy, reg);
5890 else if (mdic & MDIC_E)
5891 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
5892 device_xname(sc->sc_dev), phy, reg);
5893 }
5894
5895 /*
5896 * wm_gmii_i80003_readreg: [mii interface function]
5897 *
5898 * Read a PHY register on the kumeran
5899 * This could be handled by the PHY layer if we didn't have to lock the
5900 * ressource ...
5901 */
5902 static int
5903 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
5904 {
5905 struct wm_softc *sc = device_private(self);
5906 int sem;
5907 int rv;
5908
5909 if (phy != 1) /* only one PHY on kumeran bus */
5910 return 0;
5911
5912 sem = swfwphysem[sc->sc_funcid];
5913 if (wm_get_swfw_semaphore(sc, sem)) {
5914 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5915 __func__);
5916 return 0;
5917 }
5918
5919 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
5920 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5921 reg >> GG82563_PAGE_SHIFT);
5922 } else {
5923 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
5924 reg >> GG82563_PAGE_SHIFT);
5925 }
5926 /* Wait more 200us for a bug of the ready bit in the MDIC register */
5927 delay(200);
5928 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
5929 delay(200);
5930
5931 wm_put_swfw_semaphore(sc, sem);
5932 return rv;
5933 }
5934
5935 /*
5936 * wm_gmii_i80003_writereg: [mii interface function]
5937 *
5938 * Write a PHY register on the kumeran.
5939 * This could be handled by the PHY layer if we didn't have to lock the
5940 * ressource ...
5941 */
5942 static void
5943 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
5944 {
5945 struct wm_softc *sc = device_private(self);
5946 int sem;
5947
5948 if (phy != 1) /* only one PHY on kumeran bus */
5949 return;
5950
5951 sem = swfwphysem[sc->sc_funcid];
5952 if (wm_get_swfw_semaphore(sc, sem)) {
5953 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5954 __func__);
5955 return;
5956 }
5957
5958 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
5959 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5960 reg >> GG82563_PAGE_SHIFT);
5961 } else {
5962 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
5963 reg >> GG82563_PAGE_SHIFT);
5964 }
5965 /* Wait more 200us for a bug of the ready bit in the MDIC register */
5966 delay(200);
5967 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
5968 delay(200);
5969
5970 wm_put_swfw_semaphore(sc, sem);
5971 }
5972
5973 /*
5974 * wm_gmii_bm_readreg: [mii interface function]
5975 *
5976 * Read a PHY register on the kumeran
5977 * This could be handled by the PHY layer if we didn't have to lock the
5978 * ressource ...
5979 */
5980 static int
5981 wm_gmii_bm_readreg(device_t self, int phy, int reg)
5982 {
5983 struct wm_softc *sc = device_private(self);
5984 int sem;
5985 int rv;
5986
5987 sem = swfwphysem[sc->sc_funcid];
5988 if (wm_get_swfw_semaphore(sc, sem)) {
5989 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5990 __func__);
5991 return 0;
5992 }
5993
5994 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
5995 if (phy == 1)
5996 wm_gmii_i82544_writereg(self, phy, 0x1f,
5997 reg);
5998 else
5999 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6000 reg >> GG82563_PAGE_SHIFT);
6001
6002 }
6003
6004 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6005 wm_put_swfw_semaphore(sc, sem);
6006 return rv;
6007 }
6008
6009 /*
6010 * wm_gmii_bm_writereg: [mii interface function]
6011 *
6012 * Write a PHY register on the kumeran.
6013 * This could be handled by the PHY layer if we didn't have to lock the
6014 * ressource ...
6015 */
6016 static void
6017 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6018 {
6019 struct wm_softc *sc = device_private(self);
6020 int sem;
6021
6022 sem = swfwphysem[sc->sc_funcid];
6023 if (wm_get_swfw_semaphore(sc, sem)) {
6024 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6025 __func__);
6026 return;
6027 }
6028
6029 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6030 if (phy == 1)
6031 wm_gmii_i82544_writereg(self, phy, 0x1f,
6032 reg);
6033 else
6034 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6035 reg >> GG82563_PAGE_SHIFT);
6036
6037 }
6038
6039 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6040 wm_put_swfw_semaphore(sc, sem);
6041 }
6042
6043 static void
6044 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6045 {
6046 struct wm_softc *sc = device_private(self);
6047 uint16_t regnum = BM_PHY_REG_NUM(offset);
6048 uint16_t wuce;
6049
6050 /* XXX Gig must be disabled for MDIO accesses to page 800 */
6051 if (sc->sc_type == WM_T_PCH) {
6052 /* XXX e1000 driver do nothing... why? */
6053 }
6054
6055 /* Set page 769 */
6056 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6057 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6058
6059 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6060
6061 wuce &= ~BM_WUC_HOST_WU_BIT;
6062 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6063 wuce | BM_WUC_ENABLE_BIT);
6064
6065 /* Select page 800 */
6066 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6067 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6068
6069 /* Write page 800 */
6070 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6071
6072 if (rd)
6073 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6074 else
6075 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6076
6077 /* Set page 769 */
6078 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6079 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6080
6081 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6082 }
6083
6084 /*
6085 * wm_gmii_hv_readreg: [mii interface function]
6086 *
6087 * Read a PHY register on the kumeran
6088 * This could be handled by the PHY layer if we didn't have to lock the
6089 * ressource ...
6090 */
6091 static int
6092 wm_gmii_hv_readreg(device_t self, int phy, int reg)
6093 {
6094 struct wm_softc *sc = device_private(self);
6095 uint16_t page = BM_PHY_REG_PAGE(reg);
6096 uint16_t regnum = BM_PHY_REG_NUM(reg);
6097 uint16_t val;
6098 int rv;
6099
6100 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6101 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6102 __func__);
6103 return 0;
6104 }
6105
6106 /* XXX Workaround failure in MDIO access while cable is disconnected */
6107 if (sc->sc_phytype == WMPHY_82577) {
6108 /* XXX must write */
6109 }
6110
6111 /* Page 800 works differently than the rest so it has its own func */
6112 if (page == BM_WUC_PAGE) {
6113 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
6114 return val;
6115 }
6116
6117 /*
6118 * Lower than page 768 works differently than the rest so it has its
6119 * own func
6120 */
6121 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6122 printf("gmii_hv_readreg!!!\n");
6123 return 0;
6124 }
6125
6126 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6127 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6128 page << BME1000_PAGE_SHIFT);
6129 }
6130
6131 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
6132 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6133 return rv;
6134 }
6135
6136 /*
6137 * wm_gmii_hv_writereg: [mii interface function]
6138 *
6139 * Write a PHY register on the kumeran.
6140 * This could be handled by the PHY layer if we didn't have to lock the
6141 * ressource ...
6142 */
6143 static void
6144 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
6145 {
6146 struct wm_softc *sc = device_private(self);
6147 uint16_t page = BM_PHY_REG_PAGE(reg);
6148 uint16_t regnum = BM_PHY_REG_NUM(reg);
6149
6150 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6151 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6152 __func__);
6153 return;
6154 }
6155
6156 /* XXX Workaround failure in MDIO access while cable is disconnected */
6157
6158 /* Page 800 works differently than the rest so it has its own func */
6159 if (page == BM_WUC_PAGE) {
6160 uint16_t tmp;
6161
6162 tmp = val;
6163 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
6164 return;
6165 }
6166
6167 /*
6168 * Lower than page 768 works differently than the rest so it has its
6169 * own func
6170 */
6171 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6172 printf("gmii_hv_writereg!!!\n");
6173 return;
6174 }
6175
6176 /*
6177 * XXX Workaround MDIO accesses being disabled after entering IEEE
6178 * Power Down (whenever bit 11 of the PHY control register is set)
6179 */
6180
6181 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6182 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6183 page << BME1000_PAGE_SHIFT);
6184 }
6185
6186 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
6187 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6188 }
6189
6190 /*
6191 * wm_gmii_hv_readreg: [mii interface function]
6192 *
6193 * Read a PHY register on the kumeran
6194 * This could be handled by the PHY layer if we didn't have to lock the
6195 * ressource ...
6196 */
6197 static int
6198 wm_sgmii_readreg(device_t self, int phy, int reg)
6199 {
6200 struct wm_softc *sc = device_private(self);
6201 uint32_t i2ccmd;
6202 int i, rv;
6203
6204 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6205 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6206 __func__);
6207 return 0;
6208 }
6209
6210 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6211 | (phy << I2CCMD_PHY_ADDR_SHIFT)
6212 | I2CCMD_OPCODE_READ;
6213 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6214
6215 /* Poll the ready bit */
6216 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6217 delay(50);
6218 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6219 if (i2ccmd & I2CCMD_READY)
6220 break;
6221 }
6222 if ((i2ccmd & I2CCMD_READY) == 0)
6223 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
6224 if ((i2ccmd & I2CCMD_ERROR) != 0)
6225 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6226
6227 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
6228
6229 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6230 return rv;
6231 }
6232
6233 /*
6234 * wm_gmii_hv_writereg: [mii interface function]
6235 *
6236 * Write a PHY register on the kumeran.
6237 * This could be handled by the PHY layer if we didn't have to lock the
6238 * ressource ...
6239 */
6240 static void
6241 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
6242 {
6243 struct wm_softc *sc = device_private(self);
6244 uint32_t i2ccmd;
6245 int i;
6246
6247 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6248 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6249 __func__);
6250 return;
6251 }
6252
6253 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6254 | (phy << I2CCMD_PHY_ADDR_SHIFT)
6255 | I2CCMD_OPCODE_WRITE;
6256 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6257
6258 /* Poll the ready bit */
6259 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6260 delay(50);
6261 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6262 if (i2ccmd & I2CCMD_READY)
6263 break;
6264 }
6265 if ((i2ccmd & I2CCMD_READY) == 0)
6266 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
6267 if ((i2ccmd & I2CCMD_ERROR) != 0)
6268 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6269
6270 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6271 }
6272
6273 /*
6274 * wm_gmii_statchg: [mii interface function]
6275 *
6276 * Callback from MII layer when media changes.
6277 */
6278 static void
6279 wm_gmii_statchg(device_t self)
6280 {
6281 struct wm_softc *sc = device_private(self);
6282 struct mii_data *mii = &sc->sc_mii;
6283
6284 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
6285 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6286 sc->sc_fcrtl &= ~FCRTL_XONE;
6287
6288 /*
6289 * Get flow control negotiation result.
6290 */
6291 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
6292 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
6293 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
6294 mii->mii_media_active &= ~IFM_ETH_FMASK;
6295 }
6296
6297 if (sc->sc_flowflags & IFM_FLOW) {
6298 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
6299 sc->sc_ctrl |= CTRL_TFCE;
6300 sc->sc_fcrtl |= FCRTL_XONE;
6301 }
6302 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
6303 sc->sc_ctrl |= CTRL_RFCE;
6304 }
6305
6306 if (sc->sc_mii.mii_media_active & IFM_FDX) {
6307 DPRINTF(WM_DEBUG_LINK,
6308 ("%s: LINK: statchg: FDX\n", device_xname(sc->sc_dev)));
6309 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6310 } else {
6311 DPRINTF(WM_DEBUG_LINK,
6312 ("%s: LINK: statchg: HDX\n", device_xname(sc->sc_dev)));
6313 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6314 }
6315
6316 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6317 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6318 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
6319 : WMREG_FCRTL, sc->sc_fcrtl);
6320 if (sc->sc_type == WM_T_80003) {
6321 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
6322 case IFM_1000_T:
6323 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6324 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
6325 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6326 break;
6327 default:
6328 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6329 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
6330 sc->sc_tipg = TIPG_10_100_80003_DFLT;
6331 break;
6332 }
6333 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6334 }
6335 }
6336
6337 /*
6338 * wm_kmrn_readreg:
6339 *
6340 * Read a kumeran register
6341 */
6342 static int
6343 wm_kmrn_readreg(struct wm_softc *sc, int reg)
6344 {
6345 int rv;
6346
6347 if (sc->sc_flags == WM_F_SWFW_SYNC) {
6348 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6349 aprint_error_dev(sc->sc_dev,
6350 "%s: failed to get semaphore\n", __func__);
6351 return 0;
6352 }
6353 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
6354 if (wm_get_swfwhw_semaphore(sc)) {
6355 aprint_error_dev(sc->sc_dev,
6356 "%s: failed to get semaphore\n", __func__);
6357 return 0;
6358 }
6359 }
6360
6361 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
6362 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
6363 KUMCTRLSTA_REN);
6364 delay(2);
6365
6366 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
6367
6368 if (sc->sc_flags == WM_F_SWFW_SYNC)
6369 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
6370 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
6371 wm_put_swfwhw_semaphore(sc);
6372
6373 return rv;
6374 }
6375
6376 /*
6377 * wm_kmrn_writereg:
6378 *
6379 * Write a kumeran register
6380 */
6381 static void
6382 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
6383 {
6384
6385 if (sc->sc_flags == WM_F_SWFW_SYNC) {
6386 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6387 aprint_error_dev(sc->sc_dev,
6388 "%s: failed to get semaphore\n", __func__);
6389 return;
6390 }
6391 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
6392 if (wm_get_swfwhw_semaphore(sc)) {
6393 aprint_error_dev(sc->sc_dev,
6394 "%s: failed to get semaphore\n", __func__);
6395 return;
6396 }
6397 }
6398
6399 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
6400 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
6401 (val & KUMCTRLSTA_MASK));
6402
6403 if (sc->sc_flags == WM_F_SWFW_SYNC)
6404 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
6405 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
6406 wm_put_swfwhw_semaphore(sc);
6407 }
6408
6409 static int
6410 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
6411 {
6412 uint32_t eecd = 0;
6413
6414 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
6415 || sc->sc_type == WM_T_82583) {
6416 eecd = CSR_READ(sc, WMREG_EECD);
6417
6418 /* Isolate bits 15 & 16 */
6419 eecd = ((eecd >> 15) & 0x03);
6420
6421 /* If both bits are set, device is Flash type */
6422 if (eecd == 0x03)
6423 return 0;
6424 }
6425 return 1;
6426 }
6427
6428 static int
6429 wm_get_swsm_semaphore(struct wm_softc *sc)
6430 {
6431 int32_t timeout;
6432 uint32_t swsm;
6433
6434 /* Get the FW semaphore. */
6435 timeout = 1000 + 1; /* XXX */
6436 while (timeout) {
6437 swsm = CSR_READ(sc, WMREG_SWSM);
6438 swsm |= SWSM_SWESMBI;
6439 CSR_WRITE(sc, WMREG_SWSM, swsm);
6440 /* if we managed to set the bit we got the semaphore. */
6441 swsm = CSR_READ(sc, WMREG_SWSM);
6442 if (swsm & SWSM_SWESMBI)
6443 break;
6444
6445 delay(50);
6446 timeout--;
6447 }
6448
6449 if (timeout == 0) {
6450 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
6451 /* Release semaphores */
6452 wm_put_swsm_semaphore(sc);
6453 return 1;
6454 }
6455 return 0;
6456 }
6457
6458 static void
6459 wm_put_swsm_semaphore(struct wm_softc *sc)
6460 {
6461 uint32_t swsm;
6462
6463 swsm = CSR_READ(sc, WMREG_SWSM);
6464 swsm &= ~(SWSM_SWESMBI);
6465 CSR_WRITE(sc, WMREG_SWSM, swsm);
6466 }
6467
6468 static int
6469 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
6470 {
6471 uint32_t swfw_sync;
6472 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
6473 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
6474 int timeout = 200;
6475
6476 for (timeout = 0; timeout < 200; timeout++) {
6477 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
6478 if (wm_get_swsm_semaphore(sc)) {
6479 aprint_error_dev(sc->sc_dev,
6480 "%s: failed to get semaphore\n",
6481 __func__);
6482 return 1;
6483 }
6484 }
6485 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
6486 if ((swfw_sync & (swmask | fwmask)) == 0) {
6487 swfw_sync |= swmask;
6488 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
6489 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6490 wm_put_swsm_semaphore(sc);
6491 return 0;
6492 }
6493 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6494 wm_put_swsm_semaphore(sc);
6495 delay(5000);
6496 }
6497 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
6498 device_xname(sc->sc_dev), mask, swfw_sync);
6499 return 1;
6500 }
6501
6502 static void
6503 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
6504 {
6505 uint32_t swfw_sync;
6506
6507 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
6508 while (wm_get_swsm_semaphore(sc) != 0)
6509 continue;
6510 }
6511 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
6512 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
6513 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
6514 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6515 wm_put_swsm_semaphore(sc);
6516 }
6517
6518 static int
6519 wm_get_swfwhw_semaphore(struct wm_softc *sc)
6520 {
6521 uint32_t ext_ctrl;
6522 int timeout = 200;
6523
6524 for (timeout = 0; timeout < 200; timeout++) {
6525 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6526 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
6527 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
6528
6529 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6530 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
6531 return 0;
6532 delay(5000);
6533 }
6534 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
6535 device_xname(sc->sc_dev), ext_ctrl);
6536 return 1;
6537 }
6538
6539 static void
6540 wm_put_swfwhw_semaphore(struct wm_softc *sc)
6541 {
6542 uint32_t ext_ctrl;
6543 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6544 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
6545 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
6546 }
6547
6548 static int
6549 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
6550 {
6551 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
6552 uint8_t bank_high_byte;
6553 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
6554
6555 if ((sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
6556 /* Value of bit 22 corresponds to the flash bank we're on. */
6557 *bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
6558 } else {
6559 wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
6560 if ((bank_high_byte & 0xc0) == 0x80)
6561 *bank = 0;
6562 else {
6563 wm_read_ich8_byte(sc, act_offset + bank1_offset,
6564 &bank_high_byte);
6565 if ((bank_high_byte & 0xc0) == 0x80)
6566 *bank = 1;
6567 else {
6568 aprint_error_dev(sc->sc_dev,
6569 "EEPROM not present\n");
6570 return -1;
6571 }
6572 }
6573 }
6574
6575 return 0;
6576 }
6577
6578 /******************************************************************************
6579 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
6580 * register.
6581 *
6582 * sc - Struct containing variables accessed by shared code
6583 * offset - offset of word in the EEPROM to read
6584 * data - word read from the EEPROM
6585 * words - number of words to read
6586 *****************************************************************************/
6587 static int
6588 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
6589 {
6590 int32_t error = 0;
6591 uint32_t flash_bank = 0;
6592 uint32_t act_offset = 0;
6593 uint32_t bank_offset = 0;
6594 uint16_t word = 0;
6595 uint16_t i = 0;
6596
6597 /* We need to know which is the valid flash bank. In the event
6598 * that we didn't allocate eeprom_shadow_ram, we may not be
6599 * managing flash_bank. So it cannot be trusted and needs
6600 * to be updated with each read.
6601 */
6602 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
6603 if (error) {
6604 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
6605 __func__);
6606 return error;
6607 }
6608
6609 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
6610 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
6611
6612 error = wm_get_swfwhw_semaphore(sc);
6613 if (error) {
6614 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6615 __func__);
6616 return error;
6617 }
6618
6619 for (i = 0; i < words; i++) {
6620 /* The NVM part needs a byte offset, hence * 2 */
6621 act_offset = bank_offset + ((offset + i) * 2);
6622 error = wm_read_ich8_word(sc, act_offset, &word);
6623 if (error) {
6624 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
6625 __func__);
6626 break;
6627 }
6628 data[i] = word;
6629 }
6630
6631 wm_put_swfwhw_semaphore(sc);
6632 return error;
6633 }
6634
6635 /******************************************************************************
6636 * This function does initial flash setup so that a new read/write/erase cycle
6637 * can be started.
6638 *
6639 * sc - The pointer to the hw structure
6640 ****************************************************************************/
6641 static int32_t
6642 wm_ich8_cycle_init(struct wm_softc *sc)
6643 {
6644 uint16_t hsfsts;
6645 int32_t error = 1;
6646 int32_t i = 0;
6647
6648 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6649
6650 /* May be check the Flash Des Valid bit in Hw status */
6651 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
6652 return error;
6653 }
6654
6655 /* Clear FCERR in Hw status by writing 1 */
6656 /* Clear DAEL in Hw status by writing a 1 */
6657 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
6658
6659 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6660
6661 /*
6662 * Either we should have a hardware SPI cycle in progress bit to check
6663 * against, in order to start a new cycle or FDONE bit should be
6664 * changed in the hardware so that it is 1 after harware reset, which
6665 * can then be used as an indication whether a cycle is in progress or
6666 * has been completed .. we should also have some software semaphore me
6667 * chanism to guard FDONE or the cycle in progress bit so that two
6668 * threads access to those bits can be sequentiallized or a way so that
6669 * 2 threads dont start the cycle at the same time
6670 */
6671
6672 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6673 /*
6674 * There is no cycle running at present, so we can start a
6675 * cycle
6676 */
6677
6678 /* Begin by setting Flash Cycle Done. */
6679 hsfsts |= HSFSTS_DONE;
6680 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6681 error = 0;
6682 } else {
6683 /*
6684 * otherwise poll for sometime so the current cycle has a
6685 * chance to end before giving up.
6686 */
6687 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
6688 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6689 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6690 error = 0;
6691 break;
6692 }
6693 delay(1);
6694 }
6695 if (error == 0) {
6696 /*
6697 * Successful in waiting for previous cycle to timeout,
6698 * now set the Flash Cycle Done.
6699 */
6700 hsfsts |= HSFSTS_DONE;
6701 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6702 }
6703 }
6704 return error;
6705 }
6706
6707 /******************************************************************************
6708 * This function starts a flash cycle and waits for its completion
6709 *
6710 * sc - The pointer to the hw structure
6711 ****************************************************************************/
6712 static int32_t
6713 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
6714 {
6715 uint16_t hsflctl;
6716 uint16_t hsfsts;
6717 int32_t error = 1;
6718 uint32_t i = 0;
6719
6720 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
6721 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6722 hsflctl |= HSFCTL_GO;
6723 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6724
6725 /* wait till FDONE bit is set to 1 */
6726 do {
6727 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6728 if (hsfsts & HSFSTS_DONE)
6729 break;
6730 delay(1);
6731 i++;
6732 } while (i < timeout);
6733 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
6734 error = 0;
6735
6736 return error;
6737 }
6738
6739 /******************************************************************************
6740 * Reads a byte or word from the NVM using the ICH8 flash access registers.
6741 *
6742 * sc - The pointer to the hw structure
6743 * index - The index of the byte or word to read.
6744 * size - Size of data to read, 1=byte 2=word
6745 * data - Pointer to the word to store the value read.
6746 *****************************************************************************/
6747 static int32_t
6748 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
6749 uint32_t size, uint16_t* data)
6750 {
6751 uint16_t hsfsts;
6752 uint16_t hsflctl;
6753 uint32_t flash_linear_address;
6754 uint32_t flash_data = 0;
6755 int32_t error = 1;
6756 int32_t count = 0;
6757
6758 if (size < 1 || size > 2 || data == 0x0 ||
6759 index > ICH_FLASH_LINEAR_ADDR_MASK)
6760 return error;
6761
6762 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
6763 sc->sc_ich8_flash_base;
6764
6765 do {
6766 delay(1);
6767 /* Steps */
6768 error = wm_ich8_cycle_init(sc);
6769 if (error)
6770 break;
6771
6772 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6773 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
6774 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
6775 & HSFCTL_BCOUNT_MASK;
6776 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
6777 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6778
6779 /*
6780 * Write the last 24 bits of index into Flash Linear address
6781 * field in Flash Address
6782 */
6783 /* TODO: TBD maybe check the index against the size of flash */
6784
6785 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
6786
6787 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
6788
6789 /*
6790 * Check if FCERR is set to 1, if set to 1, clear it and try
6791 * the whole sequence a few more times, else read in (shift in)
6792 * the Flash Data0, the order is least significant byte first
6793 * msb to lsb
6794 */
6795 if (error == 0) {
6796 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
6797 if (size == 1)
6798 *data = (uint8_t)(flash_data & 0x000000FF);
6799 else if (size == 2)
6800 *data = (uint16_t)(flash_data & 0x0000FFFF);
6801 break;
6802 } else {
6803 /*
6804 * If we've gotten here, then things are probably
6805 * completely hosed, but if the error condition is
6806 * detected, it won't hurt to give it another try...
6807 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
6808 */
6809 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6810 if (hsfsts & HSFSTS_ERR) {
6811 /* Repeat for some time before giving up. */
6812 continue;
6813 } else if ((hsfsts & HSFSTS_DONE) == 0)
6814 break;
6815 }
6816 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
6817
6818 return error;
6819 }
6820
6821 /******************************************************************************
6822 * Reads a single byte from the NVM using the ICH8 flash access registers.
6823 *
6824 * sc - pointer to wm_hw structure
6825 * index - The index of the byte to read.
6826 * data - Pointer to a byte to store the value read.
6827 *****************************************************************************/
6828 static int32_t
6829 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
6830 {
6831 int32_t status;
6832 uint16_t word = 0;
6833
6834 status = wm_read_ich8_data(sc, index, 1, &word);
6835 if (status == 0)
6836 *data = (uint8_t)word;
6837
6838 return status;
6839 }
6840
6841 /******************************************************************************
6842 * Reads a word from the NVM using the ICH8 flash access registers.
6843 *
6844 * sc - pointer to wm_hw structure
6845 * index - The starting byte index of the word to read.
6846 * data - Pointer to a word to store the value read.
6847 *****************************************************************************/
6848 static int32_t
6849 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
6850 {
6851 int32_t status;
6852
6853 status = wm_read_ich8_data(sc, index, 2, data);
6854 return status;
6855 }
6856
6857 static int
6858 wm_check_mng_mode(struct wm_softc *sc)
6859 {
6860 int rv;
6861
6862 switch (sc->sc_type) {
6863 case WM_T_ICH8:
6864 case WM_T_ICH9:
6865 case WM_T_ICH10:
6866 case WM_T_PCH:
6867 rv = wm_check_mng_mode_ich8lan(sc);
6868 break;
6869 case WM_T_82574:
6870 case WM_T_82583:
6871 rv = wm_check_mng_mode_82574(sc);
6872 break;
6873 case WM_T_82571:
6874 case WM_T_82572:
6875 case WM_T_82573:
6876 case WM_T_80003:
6877 rv = wm_check_mng_mode_generic(sc);
6878 break;
6879 default:
6880 /* noting to do */
6881 rv = 0;
6882 break;
6883 }
6884
6885 return rv;
6886 }
6887
6888 static int
6889 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
6890 {
6891 uint32_t fwsm;
6892
6893 fwsm = CSR_READ(sc, WMREG_FWSM);
6894
6895 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
6896 return 1;
6897
6898 return 0;
6899 }
6900
6901 static int
6902 wm_check_mng_mode_82574(struct wm_softc *sc)
6903 {
6904 uint16_t data;
6905
6906 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
6907
6908 if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
6909 return 1;
6910
6911 return 0;
6912 }
6913
6914 static int
6915 wm_check_mng_mode_generic(struct wm_softc *sc)
6916 {
6917 uint32_t fwsm;
6918
6919 fwsm = CSR_READ(sc, WMREG_FWSM);
6920
6921 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
6922 return 1;
6923
6924 return 0;
6925 }
6926
6927 static int
6928 wm_enable_mng_pass_thru(struct wm_softc *sc)
6929 {
6930 uint32_t manc, fwsm, factps;
6931
6932 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
6933 return 0;
6934
6935 manc = CSR_READ(sc, WMREG_MANC);
6936
6937 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
6938 device_xname(sc->sc_dev), manc));
6939 if (((manc & MANC_RECV_TCO_EN) == 0)
6940 || ((manc & MANC_EN_MAC_ADDR_FILTER) == 0))
6941 return 0;
6942
6943 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
6944 fwsm = CSR_READ(sc, WMREG_FWSM);
6945 factps = CSR_READ(sc, WMREG_FACTPS);
6946 if (((factps & FACTPS_MNGCG) == 0)
6947 && ((fwsm & FWSM_MODE_MASK)
6948 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
6949 return 1;
6950 } else if (((manc & MANC_SMBUS_EN) != 0)
6951 && ((manc & MANC_ASF_EN) == 0))
6952 return 1;
6953
6954 return 0;
6955 }
6956
6957 static int
6958 wm_check_reset_block(struct wm_softc *sc)
6959 {
6960 uint32_t reg;
6961
6962 switch (sc->sc_type) {
6963 case WM_T_ICH8:
6964 case WM_T_ICH9:
6965 case WM_T_ICH10:
6966 case WM_T_PCH:
6967 reg = CSR_READ(sc, WMREG_FWSM);
6968 if ((reg & FWSM_RSPCIPHY) != 0)
6969 return 0;
6970 else
6971 return -1;
6972 break;
6973 case WM_T_82571:
6974 case WM_T_82572:
6975 case WM_T_82573:
6976 case WM_T_82574:
6977 case WM_T_82583:
6978 case WM_T_80003:
6979 reg = CSR_READ(sc, WMREG_MANC);
6980 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
6981 return -1;
6982 else
6983 return 0;
6984 break;
6985 default:
6986 /* no problem */
6987 break;
6988 }
6989
6990 return 0;
6991 }
6992
6993 static void
6994 wm_get_hw_control(struct wm_softc *sc)
6995 {
6996 uint32_t reg;
6997
6998 switch (sc->sc_type) {
6999 case WM_T_82573:
7000 reg = CSR_READ(sc, WMREG_SWSM);
7001 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
7002 break;
7003 case WM_T_82571:
7004 case WM_T_82572:
7005 case WM_T_82574:
7006 case WM_T_82583:
7007 case WM_T_80003:
7008 case WM_T_ICH8:
7009 case WM_T_ICH9:
7010 case WM_T_ICH10:
7011 case WM_T_PCH:
7012 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7013 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
7014 break;
7015 default:
7016 break;
7017 }
7018 }
7019
7020 static void
7021 wm_release_hw_control(struct wm_softc *sc)
7022 {
7023 uint32_t reg;
7024
7025 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
7026 return;
7027
7028 if (sc->sc_type == WM_T_82573) {
7029 reg = CSR_READ(sc, WMREG_SWSM);
7030 reg &= ~SWSM_DRV_LOAD;
7031 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
7032 } else {
7033 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7034 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
7035 }
7036 }
7037
7038 /* XXX Currently TBI only */
7039 static int
7040 wm_check_for_link(struct wm_softc *sc)
7041 {
7042 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7043 uint32_t rxcw;
7044 uint32_t ctrl;
7045 uint32_t status;
7046 uint32_t sig;
7047
7048 rxcw = CSR_READ(sc, WMREG_RXCW);
7049 ctrl = CSR_READ(sc, WMREG_CTRL);
7050 status = CSR_READ(sc, WMREG_STATUS);
7051
7052 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7053
7054 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7055 device_xname(sc->sc_dev), __func__,
7056 ((ctrl & CTRL_SWDPIN(1)) == sig),
7057 ((status & STATUS_LU) != 0),
7058 ((rxcw & RXCW_C) != 0)
7059 ));
7060
7061 /*
7062 * SWDPIN LU RXCW
7063 * 0 0 0
7064 * 0 0 1 (should not happen)
7065 * 0 1 0 (should not happen)
7066 * 0 1 1 (should not happen)
7067 * 1 0 0 Disable autonego and force linkup
7068 * 1 0 1 got /C/ but not linkup yet
7069 * 1 1 0 (linkup)
7070 * 1 1 1 If IFM_AUTO, back to autonego
7071 *
7072 */
7073 if (((ctrl & CTRL_SWDPIN(1)) == sig)
7074 && ((status & STATUS_LU) == 0)
7075 && ((rxcw & RXCW_C) == 0)) {
7076 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7077 __func__));
7078 sc->sc_tbi_linkup = 0;
7079 /* Disable auto-negotiation in the TXCW register */
7080 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7081
7082 /*
7083 * Force link-up and also force full-duplex.
7084 *
7085 * NOTE: CTRL was updated TFCE and RFCE automatically,
7086 * so we should update sc->sc_ctrl
7087 */
7088 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7089 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7090 } else if (((status & STATUS_LU) != 0)
7091 && ((rxcw & RXCW_C) != 0)
7092 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7093 sc->sc_tbi_linkup = 1;
7094 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7095 __func__));
7096 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7097 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7098 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7099 && ((rxcw & RXCW_C) != 0)) {
7100 DPRINTF(WM_DEBUG_LINK, ("/C/"));
7101 } else {
7102 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7103 status));
7104 }
7105
7106 return 0;
7107 }
7108
7109 /* Work-around for 82566 Kumeran PCS lock loss */
7110 static void
7111 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
7112 {
7113 int miistatus, active, i;
7114 int reg;
7115
7116 miistatus = sc->sc_mii.mii_media_status;
7117
7118 /* If the link is not up, do nothing */
7119 if ((miistatus & IFM_ACTIVE) != 0)
7120 return;
7121
7122 active = sc->sc_mii.mii_media_active;
7123
7124 /* Nothing to do if the link is other than 1Gbps */
7125 if (IFM_SUBTYPE(active) != IFM_1000_T)
7126 return;
7127
7128 for (i = 0; i < 10; i++) {
7129 /* read twice */
7130 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7131 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7132 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
7133 goto out; /* GOOD! */
7134
7135 /* Reset the PHY */
7136 wm_gmii_reset(sc);
7137 delay(5*1000);
7138 }
7139
7140 /* Disable GigE link negotiation */
7141 reg = CSR_READ(sc, WMREG_PHY_CTRL);
7142 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7143 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7144
7145 /*
7146 * Call gig speed drop workaround on Gig disable before accessing
7147 * any PHY registers.
7148 */
7149 wm_gig_downshift_workaround_ich8lan(sc);
7150
7151 out:
7152 return;
7153 }
7154
7155 /* WOL from S5 stops working */
7156 static void
7157 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
7158 {
7159 uint16_t kmrn_reg;
7160
7161 /* Only for igp3 */
7162 if (sc->sc_phytype == WMPHY_IGP_3) {
7163 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
7164 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
7165 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7166 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
7167 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7168 }
7169 }
7170
7171 #ifdef WM_WOL
7172 /* Power down workaround on D3 */
7173 static void
7174 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
7175 {
7176 uint32_t reg;
7177 int i;
7178
7179 for (i = 0; i < 2; i++) {
7180 /* Disable link */
7181 reg = CSR_READ(sc, WMREG_PHY_CTRL);
7182 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7183 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7184
7185 /*
7186 * Call gig speed drop workaround on Gig disable before
7187 * accessing any PHY registers
7188 */
7189 if (sc->sc_type == WM_T_ICH8)
7190 wm_gig_downshift_workaround_ich8lan(sc);
7191
7192 /* Write VR power-down enable */
7193 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7194 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7195 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
7196 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
7197
7198 /* Read it back and test */
7199 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7200 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7201 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
7202 break;
7203
7204 /* Issue PHY reset and repeat at most one more time */
7205 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7206 }
7207 }
7208 #endif /* WM_WOL */
7209
7210 /*
7211 * Workaround for pch's PHYs
7212 * XXX should be moved to new PHY driver?
7213 */
7214 static void
7215 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
7216 {
7217
7218 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
7219
7220 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
7221
7222 /* 82578 */
7223 if (sc->sc_phytype == WMPHY_82578) {
7224 /* PCH rev. < 3 */
7225 if (sc->sc_rev < 3) {
7226 /* XXX 6 bit shift? Why? Is it page2? */
7227 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
7228 0x66c0);
7229 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
7230 0xffff);
7231 }
7232
7233 /* XXX phy rev. < 2 */
7234 }
7235
7236 /* Select page 0 */
7237
7238 /* XXX acquire semaphore */
7239 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
7240 /* XXX release semaphore */
7241
7242 /*
7243 * Configure the K1 Si workaround during phy reset assuming there is
7244 * link so that it disables K1 if link is in 1Gbps.
7245 */
7246 wm_k1_gig_workaround_hv(sc, 1);
7247 }
7248
7249 static void
7250 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
7251 {
7252 int k1_enable = sc->sc_nvm_k1_enabled;
7253
7254 /* XXX acquire semaphore */
7255
7256 if (link) {
7257 k1_enable = 0;
7258
7259 /* Link stall fix for link up */
7260 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
7261 } else {
7262 /* Link stall fix for link down */
7263 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
7264 }
7265
7266 wm_configure_k1_ich8lan(sc, k1_enable);
7267
7268 /* XXX release semaphore */
7269 }
7270
7271 static void
7272 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
7273 {
7274 uint32_t ctrl, ctrl_ext, tmp;
7275 uint16_t kmrn_reg;
7276
7277 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
7278
7279 if (k1_enable)
7280 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
7281 else
7282 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
7283
7284 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
7285
7286 delay(20);
7287
7288 ctrl = CSR_READ(sc, WMREG_CTRL);
7289 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7290
7291 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
7292 tmp |= CTRL_FRCSPD;
7293
7294 CSR_WRITE(sc, WMREG_CTRL, tmp);
7295 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
7296 delay(20);
7297
7298 CSR_WRITE(sc, WMREG_CTRL, ctrl);
7299 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7300 delay(20);
7301 }
7302
7303 static void
7304 wm_set_pcie_completion_timeout(struct wm_softc *sc)
7305 {
7306 uint32_t gcr;
7307 pcireg_t ctrl2;
7308
7309 gcr = CSR_READ(sc, WMREG_GCR);
7310
7311 /* Only take action if timeout value is defaulted to 0 */
7312 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
7313 goto out;
7314
7315 if ((gcr & GCR_CAP_VER2) == 0) {
7316 gcr |= GCR_CMPL_TMOUT_10MS;
7317 goto out;
7318 }
7319
7320 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
7321 sc->sc_pcixe_capoff + PCI_PCIE_DCSR2);
7322 ctrl2 |= WM_PCI_PCIE_DCSR2_16MS;
7323 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
7324 sc->sc_pcixe_capoff + PCI_PCIE_DCSR2, ctrl2);
7325
7326 out:
7327 /* Disable completion timeout resend */
7328 gcr &= ~GCR_CMPL_TMOUT_RESEND;
7329
7330 CSR_WRITE(sc, WMREG_GCR, gcr);
7331 }
7332
7333 /* special case - for 82575 - need to do manual init ... */
7334 static void
7335 wm_reset_init_script_82575(struct wm_softc *sc)
7336 {
7337 /*
7338 * remark: this is untested code - we have no board without EEPROM
7339 * same setup as mentioned int the freeBSD driver for the i82575
7340 */
7341
7342 /* SerDes configuration via SERDESCTRL */
7343 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
7344 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
7345 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
7346 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
7347
7348 /* CCM configuration via CCMCTL register */
7349 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
7350 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
7351
7352 /* PCIe lanes configuration */
7353 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
7354 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
7355 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
7356 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
7357
7358 /* PCIe PLL Configuration */
7359 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
7360 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
7361 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
7362 }
7363
7364 static void
7365 wm_init_manageability(struct wm_softc *sc)
7366 {
7367
7368 if (sc->sc_flags & WM_F_HAS_MANAGE) {
7369 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
7370 uint32_t manc = CSR_READ(sc, WMREG_MANC);
7371
7372 /* disabl hardware interception of ARP */
7373 manc &= ~MANC_ARP_EN;
7374
7375 /* enable receiving management packets to the host */
7376 if (sc->sc_type >= WM_T_82571) {
7377 manc |= MANC_EN_MNG2HOST;
7378 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
7379 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
7380
7381 }
7382
7383 CSR_WRITE(sc, WMREG_MANC, manc);
7384 }
7385 }
7386
7387 static void
7388 wm_release_manageability(struct wm_softc *sc)
7389 {
7390
7391 if (sc->sc_flags & WM_F_HAS_MANAGE) {
7392 uint32_t manc = CSR_READ(sc, WMREG_MANC);
7393
7394 if (sc->sc_type >= WM_T_82571)
7395 manc &= ~MANC_EN_MNG2HOST;
7396
7397 CSR_WRITE(sc, WMREG_MANC, manc);
7398 }
7399 }
7400
7401 static void
7402 wm_get_wakeup(struct wm_softc *sc)
7403 {
7404
7405 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
7406 switch (sc->sc_type) {
7407 case WM_T_82573:
7408 case WM_T_82583:
7409 sc->sc_flags |= WM_F_HAS_AMT;
7410 /* FALLTHROUGH */
7411 case WM_T_80003:
7412 case WM_T_82541:
7413 case WM_T_82547:
7414 case WM_T_82571:
7415 case WM_T_82572:
7416 case WM_T_82574:
7417 case WM_T_82575:
7418 case WM_T_82576:
7419 #if 0 /* XXX */
7420 case WM_T_82580:
7421 case WM_T_82580ER:
7422 #endif
7423 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
7424 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
7425 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
7426 break;
7427 case WM_T_ICH8:
7428 case WM_T_ICH9:
7429 case WM_T_ICH10:
7430 case WM_T_PCH:
7431 sc->sc_flags |= WM_F_HAS_AMT;
7432 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
7433 break;
7434 default:
7435 break;
7436 }
7437
7438 /* 1: HAS_MANAGE */
7439 if (wm_enable_mng_pass_thru(sc) != 0)
7440 sc->sc_flags |= WM_F_HAS_MANAGE;
7441
7442 #ifdef WM_DEBUG
7443 printf("\n");
7444 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
7445 printf("HAS_AMT,");
7446 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
7447 printf("ARC_SUBSYS_VALID,");
7448 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
7449 printf("ASF_FIRMWARE_PRES,");
7450 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
7451 printf("HAS_MANAGE,");
7452 printf("\n");
7453 #endif
7454 /*
7455 * Note that the WOL flags is set after the resetting of the eeprom
7456 * stuff
7457 */
7458 }
7459
7460 #ifdef WM_WOL
7461 /* WOL in the newer chipset interfaces (pchlan) */
7462 static void
7463 wm_enable_phy_wakeup(struct wm_softc *sc)
7464 {
7465 #if 0
7466 uint16_t preg;
7467
7468 /* Copy MAC RARs to PHY RARs */
7469
7470 /* Copy MAC MTA to PHY MTA */
7471
7472 /* Configure PHY Rx Control register */
7473
7474 /* Enable PHY wakeup in MAC register */
7475
7476 /* Configure and enable PHY wakeup in PHY registers */
7477
7478 /* Activate PHY wakeup */
7479
7480 /* XXX */
7481 #endif
7482 }
7483
7484 static void
7485 wm_enable_wakeup(struct wm_softc *sc)
7486 {
7487 uint32_t reg, pmreg;
7488 pcireg_t pmode;
7489
7490 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
7491 &pmreg, NULL) == 0)
7492 return;
7493
7494 /* Advertise the wakeup capability */
7495 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
7496 | CTRL_SWDPIN(3));
7497 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
7498
7499 /* ICH workaround */
7500 switch (sc->sc_type) {
7501 case WM_T_ICH8:
7502 case WM_T_ICH9:
7503 case WM_T_ICH10:
7504 case WM_T_PCH:
7505 /* Disable gig during WOL */
7506 reg = CSR_READ(sc, WMREG_PHY_CTRL);
7507 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
7508 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7509 if (sc->sc_type == WM_T_PCH)
7510 wm_gmii_reset(sc);
7511
7512 /* Power down workaround */
7513 if (sc->sc_phytype == WMPHY_82577) {
7514 struct mii_softc *child;
7515
7516 /* Assume that the PHY is copper */
7517 child = LIST_FIRST(&sc->sc_mii.mii_phys);
7518 if (child->mii_mpd_rev <= 2)
7519 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
7520 (768 << 5) | 25, 0x0444); /* magic num */
7521 }
7522 break;
7523 default:
7524 break;
7525 }
7526
7527 /* Keep the laser running on fiber adapters */
7528 if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
7529 || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
7530 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7531 reg |= CTRL_EXT_SWDPIN(3);
7532 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7533 }
7534
7535 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
7536 #if 0 /* for the multicast packet */
7537 reg |= WUFC_MC;
7538 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
7539 #endif
7540
7541 if (sc->sc_type == WM_T_PCH) {
7542 wm_enable_phy_wakeup(sc);
7543 } else {
7544 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
7545 CSR_WRITE(sc, WMREG_WUFC, reg);
7546 }
7547
7548 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
7549 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
7550 && (sc->sc_phytype == WMPHY_IGP_3))
7551 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
7552
7553 /* Request PME */
7554 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
7555 #if 0
7556 /* Disable WOL */
7557 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
7558 #else
7559 /* For WOL */
7560 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
7561 #endif
7562 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
7563 }
7564 #endif /* WM_WOL */
7565
7566 static bool
7567 wm_suspend(device_t self, const pmf_qual_t *qual)
7568 {
7569 struct wm_softc *sc = device_private(self);
7570
7571 wm_release_manageability(sc);
7572 wm_release_hw_control(sc);
7573 #ifdef WM_WOL
7574 wm_enable_wakeup(sc);
7575 #endif
7576
7577 return true;
7578 }
7579
7580 static bool
7581 wm_resume(device_t self, const pmf_qual_t *qual)
7582 {
7583 struct wm_softc *sc = device_private(self);
7584
7585 wm_init_manageability(sc);
7586
7587 return true;
7588 }
7589