if_wm.c revision 1.229 1 /* $NetBSD: if_wm.c,v 1.229 2012/07/22 14:33:04 matt Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.229 2012/07/22 14:33:04 matt Exp $");
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/callout.h>
84 #include <sys/mbuf.h>
85 #include <sys/malloc.h>
86 #include <sys/kernel.h>
87 #include <sys/socket.h>
88 #include <sys/ioctl.h>
89 #include <sys/errno.h>
90 #include <sys/device.h>
91 #include <sys/queue.h>
92 #include <sys/syslog.h>
93
94 #include <sys/rnd.h>
95
96 #include <net/if.h>
97 #include <net/if_dl.h>
98 #include <net/if_media.h>
99 #include <net/if_ether.h>
100
101 #include <net/bpf.h>
102
103 #include <netinet/in.h> /* XXX for struct ip */
104 #include <netinet/in_systm.h> /* XXX for struct ip */
105 #include <netinet/ip.h> /* XXX for struct ip */
106 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
107 #include <netinet/tcp.h> /* XXX for struct tcphdr */
108
109 #include <sys/bus.h>
110 #include <sys/intr.h>
111 #include <machine/endian.h>
112
113 #include <dev/mii/mii.h>
114 #include <dev/mii/miivar.h>
115 #include <dev/mii/miidevs.h>
116 #include <dev/mii/mii_bitbang.h>
117 #include <dev/mii/ikphyreg.h>
118 #include <dev/mii/igphyreg.h>
119 #include <dev/mii/igphyvar.h>
120 #include <dev/mii/inbmphyreg.h>
121
122 #include <dev/pci/pcireg.h>
123 #include <dev/pci/pcivar.h>
124 #include <dev/pci/pcidevs.h>
125
126 #include <dev/pci/if_wmreg.h>
127 #include <dev/pci/if_wmvar.h>
128
129 #ifdef WM_DEBUG
130 #define WM_DEBUG_LINK 0x01
131 #define WM_DEBUG_TX 0x02
132 #define WM_DEBUG_RX 0x04
133 #define WM_DEBUG_GMII 0x08
134 #define WM_DEBUG_MANAGE 0x10
135 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
136 | WM_DEBUG_MANAGE;
137
138 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
139 #else
140 #define DPRINTF(x, y) /* nothing */
141 #endif /* WM_DEBUG */
142
143 /*
144 * Transmit descriptor list size. Due to errata, we can only have
145 * 256 hardware descriptors in the ring on < 82544, but we use 4096
146 * on >= 82544. We tell the upper layers that they can queue a lot
147 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
148 * of them at a time.
149 *
150 * We allow up to 256 (!) DMA segments per packet. Pathological packet
151 * chains containing many small mbufs have been observed in zero-copy
152 * situations with jumbo frames.
153 */
154 #define WM_NTXSEGS 256
155 #define WM_IFQUEUELEN 256
156 #define WM_TXQUEUELEN_MAX 64
157 #define WM_TXQUEUELEN_MAX_82547 16
158 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
159 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
160 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
161 #define WM_NTXDESC_82542 256
162 #define WM_NTXDESC_82544 4096
163 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
164 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
165 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
166 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
167 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
168
169 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
170
171 /*
172 * Receive descriptor list size. We have one Rx buffer for normal
173 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
174 * packet. We allocate 256 receive descriptors, each with a 2k
175 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
176 */
177 #define WM_NRXDESC 256
178 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
179 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
180 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
181
182 /*
183 * Control structures are DMA'd to the i82542 chip. We allocate them in
184 * a single clump that maps to a single DMA segment to make several things
185 * easier.
186 */
187 struct wm_control_data_82544 {
188 /*
189 * The receive descriptors.
190 */
191 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
192
193 /*
194 * The transmit descriptors. Put these at the end, because
195 * we might use a smaller number of them.
196 */
197 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
198 };
199
200 struct wm_control_data_82542 {
201 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
202 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
203 };
204
205 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
206 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
207 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
208
209 /*
210 * Software state for transmit jobs.
211 */
212 struct wm_txsoft {
213 struct mbuf *txs_mbuf; /* head of our mbuf chain */
214 bus_dmamap_t txs_dmamap; /* our DMA map */
215 int txs_firstdesc; /* first descriptor in packet */
216 int txs_lastdesc; /* last descriptor in packet */
217 int txs_ndesc; /* # of descriptors used */
218 };
219
220 /*
221 * Software state for receive buffers. Each descriptor gets a
222 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
223 * more than one buffer, we chain them together.
224 */
225 struct wm_rxsoft {
226 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
227 bus_dmamap_t rxs_dmamap; /* our DMA map */
228 };
229
230 #define WM_LINKUP_TIMEOUT 50
231
232 static uint16_t swfwphysem[] = {
233 SWFW_PHY0_SM,
234 SWFW_PHY1_SM,
235 SWFW_PHY2_SM,
236 SWFW_PHY3_SM
237 };
238
239 /*
240 * Software state per device.
241 */
242 struct wm_softc {
243 device_t sc_dev; /* generic device information */
244 bus_space_tag_t sc_st; /* bus space tag */
245 bus_space_handle_t sc_sh; /* bus space handle */
246 bus_size_t sc_ss; /* bus space size */
247 bus_space_tag_t sc_iot; /* I/O space tag */
248 bus_space_handle_t sc_ioh; /* I/O space handle */
249 bus_size_t sc_ios; /* I/O space size */
250 bus_space_tag_t sc_flasht; /* flash registers space tag */
251 bus_space_handle_t sc_flashh; /* flash registers space handle */
252 bus_dma_tag_t sc_dmat; /* bus DMA tag */
253
254 struct ethercom sc_ethercom; /* ethernet common data */
255 struct mii_data sc_mii; /* MII/media information */
256
257 pci_chipset_tag_t sc_pc;
258 pcitag_t sc_pcitag;
259 int sc_bus_speed; /* PCI/PCIX bus speed */
260 int sc_pcixe_capoff; /* PCI[Xe] capability register offset */
261
262 const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
263 wm_chip_type sc_type; /* MAC type */
264 int sc_rev; /* MAC revision */
265 wm_phy_type sc_phytype; /* PHY type */
266 int sc_funcid; /* unit number of the chip (0 to 3) */
267 int sc_flags; /* flags; see below */
268 int sc_if_flags; /* last if_flags */
269 int sc_flowflags; /* 802.3x flow control flags */
270 int sc_align_tweak;
271
272 void *sc_ih; /* interrupt cookie */
273 callout_t sc_tick_ch; /* tick callout */
274
275 int sc_ee_addrbits; /* EEPROM address bits */
276 int sc_ich8_flash_base;
277 int sc_ich8_flash_bank_size;
278 int sc_nvm_k1_enabled;
279
280 /*
281 * Software state for the transmit and receive descriptors.
282 */
283 int sc_txnum; /* must be a power of two */
284 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
285 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
286
287 /*
288 * Control data structures.
289 */
290 int sc_ntxdesc; /* must be a power of two */
291 struct wm_control_data_82544 *sc_control_data;
292 bus_dmamap_t sc_cddmamap; /* control data DMA map */
293 bus_dma_segment_t sc_cd_seg; /* control data segment */
294 int sc_cd_rseg; /* real number of control segment */
295 size_t sc_cd_size; /* control data size */
296 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
297 #define sc_txdescs sc_control_data->wcd_txdescs
298 #define sc_rxdescs sc_control_data->wcd_rxdescs
299
300 #ifdef WM_EVENT_COUNTERS
301 /* Event counters. */
302 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
303 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
304 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
305 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
306 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
307 struct evcnt sc_ev_rxintr; /* Rx interrupts */
308 struct evcnt sc_ev_linkintr; /* Link interrupts */
309
310 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
311 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
312 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
313 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
314 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
315 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
316 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
317 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
318
319 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
320 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
321
322 struct evcnt sc_ev_tu; /* Tx underrun */
323
324 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
325 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
326 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
327 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
328 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
329 #endif /* WM_EVENT_COUNTERS */
330
331 bus_addr_t sc_tdt_reg; /* offset of TDT register */
332
333 int sc_txfree; /* number of free Tx descriptors */
334 int sc_txnext; /* next ready Tx descriptor */
335
336 int sc_txsfree; /* number of free Tx jobs */
337 int sc_txsnext; /* next free Tx job */
338 int sc_txsdirty; /* dirty Tx jobs */
339
340 /* These 5 variables are used only on the 82547. */
341 int sc_txfifo_size; /* Tx FIFO size */
342 int sc_txfifo_head; /* current head of FIFO */
343 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
344 int sc_txfifo_stall; /* Tx FIFO is stalled */
345 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
346
347 bus_addr_t sc_rdt_reg; /* offset of RDT register */
348
349 int sc_rxptr; /* next ready Rx descriptor/queue ent */
350 int sc_rxdiscard;
351 int sc_rxlen;
352 struct mbuf *sc_rxhead;
353 struct mbuf *sc_rxtail;
354 struct mbuf **sc_rxtailp;
355
356 uint32_t sc_ctrl; /* prototype CTRL register */
357 #if 0
358 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
359 #endif
360 uint32_t sc_icr; /* prototype interrupt bits */
361 uint32_t sc_itr; /* prototype intr throttling reg */
362 uint32_t sc_tctl; /* prototype TCTL register */
363 uint32_t sc_rctl; /* prototype RCTL register */
364 uint32_t sc_txcw; /* prototype TXCW register */
365 uint32_t sc_tipg; /* prototype TIPG register */
366 uint32_t sc_fcrtl; /* prototype FCRTL register */
367 uint32_t sc_pba; /* prototype PBA register */
368
369 int sc_tbi_linkup; /* TBI link status */
370 int sc_tbi_anegticks; /* autonegotiation ticks */
371 int sc_tbi_ticks; /* tbi ticks */
372 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
373 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
374
375 int sc_mchash_type; /* multicast filter offset */
376
377 krndsource_t rnd_source; /* random source */
378 };
379
380 #define WM_RXCHAIN_RESET(sc) \
381 do { \
382 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
383 *(sc)->sc_rxtailp = NULL; \
384 (sc)->sc_rxlen = 0; \
385 } while (/*CONSTCOND*/0)
386
387 #define WM_RXCHAIN_LINK(sc, m) \
388 do { \
389 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
390 (sc)->sc_rxtailp = &(m)->m_next; \
391 } while (/*CONSTCOND*/0)
392
393 #ifdef WM_EVENT_COUNTERS
394 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
395 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
396 #else
397 #define WM_EVCNT_INCR(ev) /* nothing */
398 #define WM_EVCNT_ADD(ev, val) /* nothing */
399 #endif
400
401 #define CSR_READ(sc, reg) \
402 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
403 #define CSR_WRITE(sc, reg, val) \
404 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
405 #define CSR_WRITE_FLUSH(sc) \
406 (void) CSR_READ((sc), WMREG_STATUS)
407
408 #define ICH8_FLASH_READ32(sc, reg) \
409 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
410 #define ICH8_FLASH_WRITE32(sc, reg, data) \
411 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
412
413 #define ICH8_FLASH_READ16(sc, reg) \
414 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
415 #define ICH8_FLASH_WRITE16(sc, reg, data) \
416 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
417
418 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
419 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
420
421 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
422 #define WM_CDTXADDR_HI(sc, x) \
423 (sizeof(bus_addr_t) == 8 ? \
424 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
425
426 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
427 #define WM_CDRXADDR_HI(sc, x) \
428 (sizeof(bus_addr_t) == 8 ? \
429 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
430
431 #define WM_CDTXSYNC(sc, x, n, ops) \
432 do { \
433 int __x, __n; \
434 \
435 __x = (x); \
436 __n = (n); \
437 \
438 /* If it will wrap around, sync to the end of the ring. */ \
439 if ((__x + __n) > WM_NTXDESC(sc)) { \
440 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
441 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
442 (WM_NTXDESC(sc) - __x), (ops)); \
443 __n -= (WM_NTXDESC(sc) - __x); \
444 __x = 0; \
445 } \
446 \
447 /* Now sync whatever is left. */ \
448 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
449 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
450 } while (/*CONSTCOND*/0)
451
452 #define WM_CDRXSYNC(sc, x, ops) \
453 do { \
454 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
455 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
456 } while (/*CONSTCOND*/0)
457
458 #define WM_INIT_RXDESC(sc, x) \
459 do { \
460 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
461 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
462 struct mbuf *__m = __rxs->rxs_mbuf; \
463 \
464 /* \
465 * Note: We scoot the packet forward 2 bytes in the buffer \
466 * so that the payload after the Ethernet header is aligned \
467 * to a 4-byte boundary. \
468 * \
469 * XXX BRAINDAMAGE ALERT! \
470 * The stupid chip uses the same size for every buffer, which \
471 * is set in the Receive Control register. We are using the 2K \
472 * size option, but what we REALLY want is (2K - 2)! For this \
473 * reason, we can't "scoot" packets longer than the standard \
474 * Ethernet MTU. On strict-alignment platforms, if the total \
475 * size exceeds (2K - 2) we set align_tweak to 0 and let \
476 * the upper layer copy the headers. \
477 */ \
478 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
479 \
480 wm_set_dma_addr(&__rxd->wrx_addr, \
481 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
482 __rxd->wrx_len = 0; \
483 __rxd->wrx_cksum = 0; \
484 __rxd->wrx_status = 0; \
485 __rxd->wrx_errors = 0; \
486 __rxd->wrx_special = 0; \
487 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
488 \
489 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
490 } while (/*CONSTCOND*/0)
491
492 static void wm_start(struct ifnet *);
493 static void wm_watchdog(struct ifnet *);
494 static int wm_ifflags_cb(struct ethercom *);
495 static int wm_ioctl(struct ifnet *, u_long, void *);
496 static int wm_init(struct ifnet *);
497 static void wm_stop(struct ifnet *, int);
498 static bool wm_suspend(device_t, const pmf_qual_t *);
499 static bool wm_resume(device_t, const pmf_qual_t *);
500
501 static void wm_reset(struct wm_softc *);
502 static void wm_rxdrain(struct wm_softc *);
503 static int wm_add_rxbuf(struct wm_softc *, int);
504 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
505 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
506 static int wm_validate_eeprom_checksum(struct wm_softc *);
507 static int wm_check_alt_mac_addr(struct wm_softc *);
508 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
509 static void wm_tick(void *);
510
511 static void wm_set_filter(struct wm_softc *);
512 static void wm_set_vlan(struct wm_softc *);
513
514 static int wm_intr(void *);
515 static void wm_txintr(struct wm_softc *);
516 static void wm_rxintr(struct wm_softc *);
517 static void wm_linkintr(struct wm_softc *, uint32_t);
518
519 static void wm_tbi_mediainit(struct wm_softc *);
520 static int wm_tbi_mediachange(struct ifnet *);
521 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
522
523 static void wm_tbi_set_linkled(struct wm_softc *);
524 static void wm_tbi_check_link(struct wm_softc *);
525
526 static void wm_gmii_reset(struct wm_softc *);
527
528 static int wm_gmii_i82543_readreg(device_t, int, int);
529 static void wm_gmii_i82543_writereg(device_t, int, int, int);
530
531 static int wm_gmii_i82544_readreg(device_t, int, int);
532 static void wm_gmii_i82544_writereg(device_t, int, int, int);
533
534 static int wm_gmii_i80003_readreg(device_t, int, int);
535 static void wm_gmii_i80003_writereg(device_t, int, int, int);
536 static int wm_gmii_bm_readreg(device_t, int, int);
537 static void wm_gmii_bm_writereg(device_t, int, int, int);
538 static int wm_gmii_hv_readreg(device_t, int, int);
539 static void wm_gmii_hv_writereg(device_t, int, int, int);
540 static int wm_sgmii_readreg(device_t, int, int);
541 static void wm_sgmii_writereg(device_t, int, int, int);
542
543 static void wm_gmii_statchg(struct ifnet *);
544
545 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
546 static int wm_gmii_mediachange(struct ifnet *);
547 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
548
549 static int wm_kmrn_readreg(struct wm_softc *, int);
550 static void wm_kmrn_writereg(struct wm_softc *, int, int);
551
552 static void wm_set_spiaddrbits(struct wm_softc *);
553 static int wm_match(device_t, cfdata_t, void *);
554 static void wm_attach(device_t, device_t, void *);
555 static int wm_detach(device_t, int);
556 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
557 static void wm_get_auto_rd_done(struct wm_softc *);
558 static void wm_lan_init_done(struct wm_softc *);
559 static void wm_get_cfg_done(struct wm_softc *);
560 static int wm_get_swsm_semaphore(struct wm_softc *);
561 static void wm_put_swsm_semaphore(struct wm_softc *);
562 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
563 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
564 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
565 static int wm_get_swfwhw_semaphore(struct wm_softc *);
566 static void wm_put_swfwhw_semaphore(struct wm_softc *);
567
568 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
569 static int32_t wm_ich8_cycle_init(struct wm_softc *);
570 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
571 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
572 uint32_t, uint16_t *);
573 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
574 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
575 static void wm_82547_txfifo_stall(void *);
576 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
577 static int wm_check_mng_mode(struct wm_softc *);
578 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
579 static int wm_check_mng_mode_82574(struct wm_softc *);
580 static int wm_check_mng_mode_generic(struct wm_softc *);
581 static int wm_enable_mng_pass_thru(struct wm_softc *);
582 static int wm_check_reset_block(struct wm_softc *);
583 static void wm_get_hw_control(struct wm_softc *);
584 static int wm_check_for_link(struct wm_softc *);
585 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
586 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
587 #ifdef WM_WOL
588 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
589 #endif
590 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
591 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
592 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
593 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
594 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
595 static void wm_smbustopci(struct wm_softc *);
596 static void wm_set_pcie_completion_timeout(struct wm_softc *);
597 static void wm_reset_init_script_82575(struct wm_softc *);
598 static void wm_release_manageability(struct wm_softc *);
599 static void wm_release_hw_control(struct wm_softc *);
600 static void wm_get_wakeup(struct wm_softc *);
601 #ifdef WM_WOL
602 static void wm_enable_phy_wakeup(struct wm_softc *);
603 static void wm_enable_wakeup(struct wm_softc *);
604 #endif
605 static void wm_init_manageability(struct wm_softc *);
606 static void wm_set_eee_i350(struct wm_softc *);
607
608 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
609 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
610
611 /*
612 * Devices supported by this driver.
613 */
614 static const struct wm_product {
615 pci_vendor_id_t wmp_vendor;
616 pci_product_id_t wmp_product;
617 const char *wmp_name;
618 wm_chip_type wmp_type;
619 int wmp_flags;
620 #define WMP_F_1000X 0x01
621 #define WMP_F_1000T 0x02
622 #define WMP_F_SERDES 0x04
623 } wm_products[] = {
624 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
625 "Intel i82542 1000BASE-X Ethernet",
626 WM_T_82542_2_1, WMP_F_1000X },
627
628 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
629 "Intel i82543GC 1000BASE-X Ethernet",
630 WM_T_82543, WMP_F_1000X },
631
632 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
633 "Intel i82543GC 1000BASE-T Ethernet",
634 WM_T_82543, WMP_F_1000T },
635
636 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
637 "Intel i82544EI 1000BASE-T Ethernet",
638 WM_T_82544, WMP_F_1000T },
639
640 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
641 "Intel i82544EI 1000BASE-X Ethernet",
642 WM_T_82544, WMP_F_1000X },
643
644 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
645 "Intel i82544GC 1000BASE-T Ethernet",
646 WM_T_82544, WMP_F_1000T },
647
648 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
649 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
650 WM_T_82544, WMP_F_1000T },
651
652 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
653 "Intel i82540EM 1000BASE-T Ethernet",
654 WM_T_82540, WMP_F_1000T },
655
656 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
657 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
658 WM_T_82540, WMP_F_1000T },
659
660 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
661 "Intel i82540EP 1000BASE-T Ethernet",
662 WM_T_82540, WMP_F_1000T },
663
664 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
665 "Intel i82540EP 1000BASE-T Ethernet",
666 WM_T_82540, WMP_F_1000T },
667
668 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
669 "Intel i82540EP 1000BASE-T Ethernet",
670 WM_T_82540, WMP_F_1000T },
671
672 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
673 "Intel i82545EM 1000BASE-T Ethernet",
674 WM_T_82545, WMP_F_1000T },
675
676 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
677 "Intel i82545GM 1000BASE-T Ethernet",
678 WM_T_82545_3, WMP_F_1000T },
679
680 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
681 "Intel i82545GM 1000BASE-X Ethernet",
682 WM_T_82545_3, WMP_F_1000X },
683 #if 0
684 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
685 "Intel i82545GM Gigabit Ethernet (SERDES)",
686 WM_T_82545_3, WMP_F_SERDES },
687 #endif
688 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
689 "Intel i82546EB 1000BASE-T Ethernet",
690 WM_T_82546, WMP_F_1000T },
691
692 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
693 "Intel i82546EB 1000BASE-T Ethernet",
694 WM_T_82546, WMP_F_1000T },
695
696 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
697 "Intel i82545EM 1000BASE-X Ethernet",
698 WM_T_82545, WMP_F_1000X },
699
700 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
701 "Intel i82546EB 1000BASE-X Ethernet",
702 WM_T_82546, WMP_F_1000X },
703
704 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
705 "Intel i82546GB 1000BASE-T Ethernet",
706 WM_T_82546_3, WMP_F_1000T },
707
708 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
709 "Intel i82546GB 1000BASE-X Ethernet",
710 WM_T_82546_3, WMP_F_1000X },
711 #if 0
712 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
713 "Intel i82546GB Gigabit Ethernet (SERDES)",
714 WM_T_82546_3, WMP_F_SERDES },
715 #endif
716 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
717 "i82546GB quad-port Gigabit Ethernet",
718 WM_T_82546_3, WMP_F_1000T },
719
720 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
721 "i82546GB quad-port Gigabit Ethernet (KSP3)",
722 WM_T_82546_3, WMP_F_1000T },
723
724 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
725 "Intel PRO/1000MT (82546GB)",
726 WM_T_82546_3, WMP_F_1000T },
727
728 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
729 "Intel i82541EI 1000BASE-T Ethernet",
730 WM_T_82541, WMP_F_1000T },
731
732 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
733 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
734 WM_T_82541, WMP_F_1000T },
735
736 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
737 "Intel i82541EI Mobile 1000BASE-T Ethernet",
738 WM_T_82541, WMP_F_1000T },
739
740 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
741 "Intel i82541ER 1000BASE-T Ethernet",
742 WM_T_82541_2, WMP_F_1000T },
743
744 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
745 "Intel i82541GI 1000BASE-T Ethernet",
746 WM_T_82541_2, WMP_F_1000T },
747
748 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
749 "Intel i82541GI Mobile 1000BASE-T Ethernet",
750 WM_T_82541_2, WMP_F_1000T },
751
752 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
753 "Intel i82541PI 1000BASE-T Ethernet",
754 WM_T_82541_2, WMP_F_1000T },
755
756 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
757 "Intel i82547EI 1000BASE-T Ethernet",
758 WM_T_82547, WMP_F_1000T },
759
760 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
761 "Intel i82547EI Mobile 1000BASE-T Ethernet",
762 WM_T_82547, WMP_F_1000T },
763
764 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
765 "Intel i82547GI 1000BASE-T Ethernet",
766 WM_T_82547_2, WMP_F_1000T },
767
768 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
769 "Intel PRO/1000 PT (82571EB)",
770 WM_T_82571, WMP_F_1000T },
771
772 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
773 "Intel PRO/1000 PF (82571EB)",
774 WM_T_82571, WMP_F_1000X },
775 #if 0
776 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
777 "Intel PRO/1000 PB (82571EB)",
778 WM_T_82571, WMP_F_SERDES },
779 #endif
780 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
781 "Intel PRO/1000 QT (82571EB)",
782 WM_T_82571, WMP_F_1000T },
783
784 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
785 "Intel i82572EI 1000baseT Ethernet",
786 WM_T_82572, WMP_F_1000T },
787
788 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
789 "Intel PRO/1000 PT Quad Port Server Adapter",
790 WM_T_82571, WMP_F_1000T, },
791
792 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
793 "Intel i82572EI 1000baseX Ethernet",
794 WM_T_82572, WMP_F_1000X },
795 #if 0
796 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
797 "Intel i82572EI Gigabit Ethernet (SERDES)",
798 WM_T_82572, WMP_F_SERDES },
799 #endif
800
801 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
802 "Intel i82572EI 1000baseT Ethernet",
803 WM_T_82572, WMP_F_1000T },
804
805 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
806 "Intel i82573E",
807 WM_T_82573, WMP_F_1000T },
808
809 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
810 "Intel i82573E IAMT",
811 WM_T_82573, WMP_F_1000T },
812
813 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
814 "Intel i82573L Gigabit Ethernet",
815 WM_T_82573, WMP_F_1000T },
816
817 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
818 "Intel i82574L",
819 WM_T_82574, WMP_F_1000T },
820
821 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
822 "Intel i82583V",
823 WM_T_82583, WMP_F_1000T },
824
825 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
826 "i80003 dual 1000baseT Ethernet",
827 WM_T_80003, WMP_F_1000T },
828
829 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
830 "i80003 dual 1000baseX Ethernet",
831 WM_T_80003, WMP_F_1000T },
832 #if 0
833 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
834 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
835 WM_T_80003, WMP_F_SERDES },
836 #endif
837
838 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
839 "Intel i80003 1000baseT Ethernet",
840 WM_T_80003, WMP_F_1000T },
841 #if 0
842 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
843 "Intel i80003 Gigabit Ethernet (SERDES)",
844 WM_T_80003, WMP_F_SERDES },
845 #endif
846 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
847 "Intel i82801H (M_AMT) LAN Controller",
848 WM_T_ICH8, WMP_F_1000T },
849 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
850 "Intel i82801H (AMT) LAN Controller",
851 WM_T_ICH8, WMP_F_1000T },
852 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
853 "Intel i82801H LAN Controller",
854 WM_T_ICH8, WMP_F_1000T },
855 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
856 "Intel i82801H (IFE) LAN Controller",
857 WM_T_ICH8, WMP_F_1000T },
858 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
859 "Intel i82801H (M) LAN Controller",
860 WM_T_ICH8, WMP_F_1000T },
861 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
862 "Intel i82801H IFE (GT) LAN Controller",
863 WM_T_ICH8, WMP_F_1000T },
864 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
865 "Intel i82801H IFE (G) LAN Controller",
866 WM_T_ICH8, WMP_F_1000T },
867 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
868 "82801I (AMT) LAN Controller",
869 WM_T_ICH9, WMP_F_1000T },
870 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
871 "82801I LAN Controller",
872 WM_T_ICH9, WMP_F_1000T },
873 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
874 "82801I (G) LAN Controller",
875 WM_T_ICH9, WMP_F_1000T },
876 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
877 "82801I (GT) LAN Controller",
878 WM_T_ICH9, WMP_F_1000T },
879 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
880 "82801I (C) LAN Controller",
881 WM_T_ICH9, WMP_F_1000T },
882 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
883 "82801I mobile LAN Controller",
884 WM_T_ICH9, WMP_F_1000T },
885 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
886 "82801I mobile (V) LAN Controller",
887 WM_T_ICH9, WMP_F_1000T },
888 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
889 "82801I mobile (AMT) LAN Controller",
890 WM_T_ICH9, WMP_F_1000T },
891 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
892 "82567LM-4 LAN Controller",
893 WM_T_ICH9, WMP_F_1000T },
894 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
895 "82567V-3 LAN Controller",
896 WM_T_ICH9, WMP_F_1000T },
897 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
898 "82567LM-2 LAN Controller",
899 WM_T_ICH10, WMP_F_1000T },
900 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
901 "82567LF-2 LAN Controller",
902 WM_T_ICH10, WMP_F_1000T },
903 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
904 "82567LM-3 LAN Controller",
905 WM_T_ICH10, WMP_F_1000T },
906 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
907 "82567LF-3 LAN Controller",
908 WM_T_ICH10, WMP_F_1000T },
909 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
910 "82567V-2 LAN Controller",
911 WM_T_ICH10, WMP_F_1000T },
912 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
913 "82567V-3? LAN Controller",
914 WM_T_ICH10, WMP_F_1000T },
915 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
916 "HANKSVILLE LAN Controller",
917 WM_T_ICH10, WMP_F_1000T },
918 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
919 "PCH LAN (82577LM) Controller",
920 WM_T_PCH, WMP_F_1000T },
921 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
922 "PCH LAN (82577LC) Controller",
923 WM_T_PCH, WMP_F_1000T },
924 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
925 "PCH LAN (82578DM) Controller",
926 WM_T_PCH, WMP_F_1000T },
927 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
928 "PCH LAN (82578DC) Controller",
929 WM_T_PCH2, WMP_F_1000T },
930 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
931 "PCH2 LAN (82579LM) Controller",
932 WM_T_PCH2, WMP_F_1000T },
933 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
934 "PCH2 LAN (82579V) Controller",
935 WM_T_PCH, WMP_F_1000T },
936 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
937 "82575EB dual-1000baseT Ethernet",
938 WM_T_82575, WMP_F_1000T },
939 #if 0
940 /*
941 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
942 * disabled for now ...
943 */
944 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
945 "82575EB dual-1000baseX Ethernet (SERDES)",
946 WM_T_82575, WMP_F_SERDES },
947 #endif
948 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
949 "82575GB quad-1000baseT Ethernet",
950 WM_T_82575, WMP_F_1000T },
951 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
952 "82575GB quad-1000baseT Ethernet (PM)",
953 WM_T_82575, WMP_F_1000T },
954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
955 "82576 1000BaseT Ethernet",
956 WM_T_82576, WMP_F_1000T },
957 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
958 "82576 1000BaseX Ethernet",
959 WM_T_82576, WMP_F_1000X },
960 #if 0
961 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
962 "82576 gigabit Ethernet (SERDES)",
963 WM_T_82576, WMP_F_SERDES },
964 #endif
965 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
966 "82576 quad-1000BaseT Ethernet",
967 WM_T_82576, WMP_F_1000T },
968 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
969 "82576 gigabit Ethernet",
970 WM_T_82576, WMP_F_1000T },
971 #if 0
972 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
973 "82576 gigabit Ethernet (SERDES)",
974 WM_T_82576, WMP_F_SERDES },
975 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
976 "82576 quad-gigabit Ethernet (SERDES)",
977 WM_T_82576, WMP_F_SERDES },
978 #endif
979 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
980 "82580 1000BaseT Ethernet",
981 WM_T_82580, WMP_F_1000T },
982 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
983 "82580 1000BaseX Ethernet",
984 WM_T_82580, WMP_F_1000X },
985 #if 0
986 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
987 "82580 1000BaseT Ethernet (SERDES)",
988 WM_T_82580, WMP_F_SERDES },
989 #endif
990 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
991 "82580 gigabit Ethernet (SGMII)",
992 WM_T_82580, WMP_F_1000T },
993 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
994 "82580 dual-1000BaseT Ethernet",
995 WM_T_82580, WMP_F_1000T },
996 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER,
997 "82580 1000BaseT Ethernet",
998 WM_T_82580ER, WMP_F_1000T },
999 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER_DUAL,
1000 "82580 dual-1000BaseT Ethernet",
1001 WM_T_82580ER, WMP_F_1000T },
1002 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1003 "82580 quad-1000BaseX Ethernet",
1004 WM_T_82580, WMP_F_1000X },
1005 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1006 "I350 Gigabit Network Connection",
1007 WM_T_I350, WMP_F_1000T },
1008 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1009 "I350 Gigabit Fiber Network Connection",
1010 WM_T_I350, WMP_F_1000X },
1011 #if 0
1012 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1013 "I350 Gigabit Backplane Connection",
1014 WM_T_I350, WMP_F_SERDES },
1015 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1016 "I350 Gigabit Connection",
1017 WM_T_I350, WMP_F_1000T },
1018 #endif
1019 { 0, 0,
1020 NULL,
1021 0, 0 },
1022 };
1023
1024 #ifdef WM_EVENT_COUNTERS
1025 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1026 #endif /* WM_EVENT_COUNTERS */
1027
1028 #if 0 /* Not currently used */
1029 static inline uint32_t
1030 wm_io_read(struct wm_softc *sc, int reg)
1031 {
1032
1033 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1034 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1035 }
1036 #endif
1037
1038 static inline void
1039 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1040 {
1041
1042 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1043 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1044 }
1045
1046 static inline void
1047 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1048 uint32_t data)
1049 {
1050 uint32_t regval;
1051 int i;
1052
1053 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1054
1055 CSR_WRITE(sc, reg, regval);
1056
1057 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1058 delay(5);
1059 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1060 break;
1061 }
1062 if (i == SCTL_CTL_POLL_TIMEOUT) {
1063 aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
1064 device_xname(sc->sc_dev), reg);
1065 }
1066 }
1067
1068 static inline void
1069 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1070 {
1071 wa->wa_low = htole32(v & 0xffffffffU);
1072 if (sizeof(bus_addr_t) == 8)
1073 wa->wa_high = htole32((uint64_t) v >> 32);
1074 else
1075 wa->wa_high = 0;
1076 }
1077
1078 static void
1079 wm_set_spiaddrbits(struct wm_softc *sc)
1080 {
1081 uint32_t reg;
1082
1083 sc->sc_flags |= WM_F_EEPROM_SPI;
1084 reg = CSR_READ(sc, WMREG_EECD);
1085 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1086 }
1087
1088 static const struct wm_product *
1089 wm_lookup(const struct pci_attach_args *pa)
1090 {
1091 const struct wm_product *wmp;
1092
1093 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1094 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1095 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1096 return wmp;
1097 }
1098 return NULL;
1099 }
1100
1101 static int
1102 wm_match(device_t parent, cfdata_t cf, void *aux)
1103 {
1104 struct pci_attach_args *pa = aux;
1105
1106 if (wm_lookup(pa) != NULL)
1107 return 1;
1108
1109 return 0;
1110 }
1111
1112 static void
1113 wm_attach(device_t parent, device_t self, void *aux)
1114 {
1115 struct wm_softc *sc = device_private(self);
1116 struct pci_attach_args *pa = aux;
1117 prop_dictionary_t dict;
1118 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1119 pci_chipset_tag_t pc = pa->pa_pc;
1120 pci_intr_handle_t ih;
1121 const char *intrstr = NULL;
1122 const char *eetype, *xname;
1123 bus_space_tag_t memt;
1124 bus_space_handle_t memh;
1125 bus_size_t memsize;
1126 int memh_valid;
1127 int i, error;
1128 const struct wm_product *wmp;
1129 prop_data_t ea;
1130 prop_number_t pn;
1131 uint8_t enaddr[ETHER_ADDR_LEN];
1132 uint16_t cfg1, cfg2, swdpin, io3;
1133 pcireg_t preg, memtype;
1134 uint16_t eeprom_data, apme_mask;
1135 uint32_t reg;
1136
1137 sc->sc_dev = self;
1138 callout_init(&sc->sc_tick_ch, 0);
1139
1140 sc->sc_wmp = wmp = wm_lookup(pa);
1141 if (wmp == NULL) {
1142 printf("\n");
1143 panic("wm_attach: impossible");
1144 }
1145
1146 sc->sc_pc = pa->pa_pc;
1147 sc->sc_pcitag = pa->pa_tag;
1148
1149 if (pci_dma64_available(pa))
1150 sc->sc_dmat = pa->pa_dmat64;
1151 else
1152 sc->sc_dmat = pa->pa_dmat;
1153
1154 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1155 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1156
1157 sc->sc_type = wmp->wmp_type;
1158 if (sc->sc_type < WM_T_82543) {
1159 if (sc->sc_rev < 2) {
1160 aprint_error_dev(sc->sc_dev,
1161 "i82542 must be at least rev. 2\n");
1162 return;
1163 }
1164 if (sc->sc_rev < 3)
1165 sc->sc_type = WM_T_82542_2_0;
1166 }
1167
1168 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1169 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1170 || (sc->sc_type == WM_T_I350))
1171 sc->sc_flags |= WM_F_NEWQUEUE;
1172
1173 /* Set device properties (mactype) */
1174 dict = device_properties(sc->sc_dev);
1175 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1176
1177 /*
1178 * Map the device. All devices support memory-mapped acccess,
1179 * and it is really required for normal operation.
1180 */
1181 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1182 switch (memtype) {
1183 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1184 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1185 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1186 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1187 break;
1188 default:
1189 memh_valid = 0;
1190 break;
1191 }
1192
1193 if (memh_valid) {
1194 sc->sc_st = memt;
1195 sc->sc_sh = memh;
1196 sc->sc_ss = memsize;
1197 } else {
1198 aprint_error_dev(sc->sc_dev,
1199 "unable to map device registers\n");
1200 return;
1201 }
1202
1203 wm_get_wakeup(sc);
1204
1205 /*
1206 * In addition, i82544 and later support I/O mapped indirect
1207 * register access. It is not desirable (nor supported in
1208 * this driver) to use it for normal operation, though it is
1209 * required to work around bugs in some chip versions.
1210 */
1211 if (sc->sc_type >= WM_T_82544) {
1212 /* First we have to find the I/O BAR. */
1213 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1214 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1215 PCI_MAPREG_TYPE_IO)
1216 break;
1217 }
1218 if (i != PCI_MAPREG_END) {
1219 /*
1220 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1221 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1222 * It's no problem because newer chips has no this
1223 * bug.
1224 *
1225 * The i8254x doesn't apparently respond when the
1226 * I/O BAR is 0, which looks somewhat like it's not
1227 * been configured.
1228 */
1229 preg = pci_conf_read(pc, pa->pa_tag, i);
1230 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1231 aprint_error_dev(sc->sc_dev,
1232 "WARNING: I/O BAR at zero.\n");
1233 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1234 0, &sc->sc_iot, &sc->sc_ioh,
1235 NULL, &sc->sc_ios) == 0) {
1236 sc->sc_flags |= WM_F_IOH_VALID;
1237 } else {
1238 aprint_error_dev(sc->sc_dev,
1239 "WARNING: unable to map I/O space\n");
1240 }
1241 }
1242
1243 }
1244
1245 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1246 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1247 preg |= PCI_COMMAND_MASTER_ENABLE;
1248 if (sc->sc_type < WM_T_82542_2_1)
1249 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1250 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1251
1252 /* power up chip */
1253 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1254 NULL)) && error != EOPNOTSUPP) {
1255 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1256 return;
1257 }
1258
1259 /*
1260 * Map and establish our interrupt.
1261 */
1262 if (pci_intr_map(pa, &ih)) {
1263 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1264 return;
1265 }
1266 intrstr = pci_intr_string(pc, ih);
1267 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1268 if (sc->sc_ih == NULL) {
1269 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1270 if (intrstr != NULL)
1271 aprint_error(" at %s", intrstr);
1272 aprint_error("\n");
1273 return;
1274 }
1275 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1276
1277 /*
1278 * Check the function ID (unit number of the chip).
1279 */
1280 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1281 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1282 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1283 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1284 || (sc->sc_type == WM_T_I350))
1285 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1286 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1287 else
1288 sc->sc_funcid = 0;
1289
1290 /*
1291 * Determine a few things about the bus we're connected to.
1292 */
1293 if (sc->sc_type < WM_T_82543) {
1294 /* We don't really know the bus characteristics here. */
1295 sc->sc_bus_speed = 33;
1296 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1297 /*
1298 * CSA (Communication Streaming Architecture) is about as fast
1299 * a 32-bit 66MHz PCI Bus.
1300 */
1301 sc->sc_flags |= WM_F_CSA;
1302 sc->sc_bus_speed = 66;
1303 aprint_verbose_dev(sc->sc_dev,
1304 "Communication Streaming Architecture\n");
1305 if (sc->sc_type == WM_T_82547) {
1306 callout_init(&sc->sc_txfifo_ch, 0);
1307 callout_setfunc(&sc->sc_txfifo_ch,
1308 wm_82547_txfifo_stall, sc);
1309 aprint_verbose_dev(sc->sc_dev,
1310 "using 82547 Tx FIFO stall work-around\n");
1311 }
1312 } else if (sc->sc_type >= WM_T_82571) {
1313 sc->sc_flags |= WM_F_PCIE;
1314 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1315 && (sc->sc_type != WM_T_ICH10)
1316 && (sc->sc_type != WM_T_PCH)
1317 && (sc->sc_type != WM_T_PCH2)) {
1318 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1319 /* ICH* and PCH* have no PCIe capability registers */
1320 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1321 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1322 NULL) == 0)
1323 aprint_error_dev(sc->sc_dev,
1324 "unable to find PCIe capability\n");
1325 }
1326 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1327 } else {
1328 reg = CSR_READ(sc, WMREG_STATUS);
1329 if (reg & STATUS_BUS64)
1330 sc->sc_flags |= WM_F_BUS64;
1331 if ((reg & STATUS_PCIX_MODE) != 0) {
1332 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1333
1334 sc->sc_flags |= WM_F_PCIX;
1335 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1336 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1337 aprint_error_dev(sc->sc_dev,
1338 "unable to find PCIX capability\n");
1339 else if (sc->sc_type != WM_T_82545_3 &&
1340 sc->sc_type != WM_T_82546_3) {
1341 /*
1342 * Work around a problem caused by the BIOS
1343 * setting the max memory read byte count
1344 * incorrectly.
1345 */
1346 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1347 sc->sc_pcixe_capoff + PCI_PCIX_CMD);
1348 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1349 sc->sc_pcixe_capoff + PCI_PCIX_STATUS);
1350
1351 bytecnt =
1352 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1353 PCI_PCIX_CMD_BYTECNT_SHIFT;
1354 maxb =
1355 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1356 PCI_PCIX_STATUS_MAXB_SHIFT;
1357 if (bytecnt > maxb) {
1358 aprint_verbose_dev(sc->sc_dev,
1359 "resetting PCI-X MMRBC: %d -> %d\n",
1360 512 << bytecnt, 512 << maxb);
1361 pcix_cmd = (pcix_cmd &
1362 ~PCI_PCIX_CMD_BYTECNT_MASK) |
1363 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1364 pci_conf_write(pa->pa_pc, pa->pa_tag,
1365 sc->sc_pcixe_capoff + PCI_PCIX_CMD,
1366 pcix_cmd);
1367 }
1368 }
1369 }
1370 /*
1371 * The quad port adapter is special; it has a PCIX-PCIX
1372 * bridge on the board, and can run the secondary bus at
1373 * a higher speed.
1374 */
1375 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1376 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1377 : 66;
1378 } else if (sc->sc_flags & WM_F_PCIX) {
1379 switch (reg & STATUS_PCIXSPD_MASK) {
1380 case STATUS_PCIXSPD_50_66:
1381 sc->sc_bus_speed = 66;
1382 break;
1383 case STATUS_PCIXSPD_66_100:
1384 sc->sc_bus_speed = 100;
1385 break;
1386 case STATUS_PCIXSPD_100_133:
1387 sc->sc_bus_speed = 133;
1388 break;
1389 default:
1390 aprint_error_dev(sc->sc_dev,
1391 "unknown PCIXSPD %d; assuming 66MHz\n",
1392 reg & STATUS_PCIXSPD_MASK);
1393 sc->sc_bus_speed = 66;
1394 break;
1395 }
1396 } else
1397 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1398 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1399 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1400 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1401 }
1402
1403 /*
1404 * Allocate the control data structures, and create and load the
1405 * DMA map for it.
1406 *
1407 * NOTE: All Tx descriptors must be in the same 4G segment of
1408 * memory. So must Rx descriptors. We simplify by allocating
1409 * both sets within the same 4G segment.
1410 */
1411 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1412 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1413 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1414 sizeof(struct wm_control_data_82542) :
1415 sizeof(struct wm_control_data_82544);
1416 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1417 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1418 &sc->sc_cd_rseg, 0)) != 0) {
1419 aprint_error_dev(sc->sc_dev,
1420 "unable to allocate control data, error = %d\n",
1421 error);
1422 goto fail_0;
1423 }
1424
1425 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1426 sc->sc_cd_rseg, sc->sc_cd_size,
1427 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1428 aprint_error_dev(sc->sc_dev,
1429 "unable to map control data, error = %d\n", error);
1430 goto fail_1;
1431 }
1432
1433 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1434 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1435 aprint_error_dev(sc->sc_dev,
1436 "unable to create control data DMA map, error = %d\n",
1437 error);
1438 goto fail_2;
1439 }
1440
1441 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1442 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1443 aprint_error_dev(sc->sc_dev,
1444 "unable to load control data DMA map, error = %d\n",
1445 error);
1446 goto fail_3;
1447 }
1448
1449 /*
1450 * Create the transmit buffer DMA maps.
1451 */
1452 WM_TXQUEUELEN(sc) =
1453 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1454 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1455 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1456 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1457 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1458 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1459 aprint_error_dev(sc->sc_dev,
1460 "unable to create Tx DMA map %d, error = %d\n",
1461 i, error);
1462 goto fail_4;
1463 }
1464 }
1465
1466 /*
1467 * Create the receive buffer DMA maps.
1468 */
1469 for (i = 0; i < WM_NRXDESC; i++) {
1470 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1471 MCLBYTES, 0, 0,
1472 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1473 aprint_error_dev(sc->sc_dev,
1474 "unable to create Rx DMA map %d error = %d\n",
1475 i, error);
1476 goto fail_5;
1477 }
1478 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1479 }
1480
1481 /* clear interesting stat counters */
1482 CSR_READ(sc, WMREG_COLC);
1483 CSR_READ(sc, WMREG_RXERRC);
1484
1485 /* get PHY control from SMBus to PCIe */
1486 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2))
1487 wm_smbustopci(sc);
1488
1489 /*
1490 * Reset the chip to a known state.
1491 */
1492 wm_reset(sc);
1493
1494 switch (sc->sc_type) {
1495 case WM_T_82571:
1496 case WM_T_82572:
1497 case WM_T_82573:
1498 case WM_T_82574:
1499 case WM_T_82583:
1500 case WM_T_80003:
1501 case WM_T_ICH8:
1502 case WM_T_ICH9:
1503 case WM_T_ICH10:
1504 case WM_T_PCH:
1505 case WM_T_PCH2:
1506 if (wm_check_mng_mode(sc) != 0)
1507 wm_get_hw_control(sc);
1508 break;
1509 default:
1510 break;
1511 }
1512
1513 /*
1514 * Get some information about the EEPROM.
1515 */
1516 switch (sc->sc_type) {
1517 case WM_T_82542_2_0:
1518 case WM_T_82542_2_1:
1519 case WM_T_82543:
1520 case WM_T_82544:
1521 /* Microwire */
1522 sc->sc_ee_addrbits = 6;
1523 break;
1524 case WM_T_82540:
1525 case WM_T_82545:
1526 case WM_T_82545_3:
1527 case WM_T_82546:
1528 case WM_T_82546_3:
1529 /* Microwire */
1530 reg = CSR_READ(sc, WMREG_EECD);
1531 if (reg & EECD_EE_SIZE)
1532 sc->sc_ee_addrbits = 8;
1533 else
1534 sc->sc_ee_addrbits = 6;
1535 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1536 break;
1537 case WM_T_82541:
1538 case WM_T_82541_2:
1539 case WM_T_82547:
1540 case WM_T_82547_2:
1541 reg = CSR_READ(sc, WMREG_EECD);
1542 if (reg & EECD_EE_TYPE) {
1543 /* SPI */
1544 wm_set_spiaddrbits(sc);
1545 } else
1546 /* Microwire */
1547 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1548 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1549 break;
1550 case WM_T_82571:
1551 case WM_T_82572:
1552 /* SPI */
1553 wm_set_spiaddrbits(sc);
1554 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1555 break;
1556 case WM_T_82573:
1557 case WM_T_82574:
1558 case WM_T_82583:
1559 if (wm_is_onboard_nvm_eeprom(sc) == 0)
1560 sc->sc_flags |= WM_F_EEPROM_FLASH;
1561 else {
1562 /* SPI */
1563 wm_set_spiaddrbits(sc);
1564 }
1565 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1566 break;
1567 case WM_T_82575:
1568 case WM_T_82576:
1569 case WM_T_82580:
1570 case WM_T_82580ER:
1571 case WM_T_I350:
1572 case WM_T_80003:
1573 /* SPI */
1574 wm_set_spiaddrbits(sc);
1575 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1576 break;
1577 case WM_T_ICH8:
1578 case WM_T_ICH9:
1579 case WM_T_ICH10:
1580 case WM_T_PCH:
1581 case WM_T_PCH2:
1582 /* FLASH */
1583 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1584 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1585 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1586 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1587 aprint_error_dev(sc->sc_dev,
1588 "can't map FLASH registers\n");
1589 return;
1590 }
1591 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1592 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1593 ICH_FLASH_SECTOR_SIZE;
1594 sc->sc_ich8_flash_bank_size =
1595 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1596 sc->sc_ich8_flash_bank_size -=
1597 (reg & ICH_GFPREG_BASE_MASK);
1598 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1599 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1600 break;
1601 default:
1602 break;
1603 }
1604
1605 /*
1606 * Defer printing the EEPROM type until after verifying the checksum
1607 * This allows the EEPROM type to be printed correctly in the case
1608 * that no EEPROM is attached.
1609 */
1610 /*
1611 * Validate the EEPROM checksum. If the checksum fails, flag
1612 * this for later, so we can fail future reads from the EEPROM.
1613 */
1614 if (wm_validate_eeprom_checksum(sc)) {
1615 /*
1616 * Read twice again because some PCI-e parts fail the
1617 * first check due to the link being in sleep state.
1618 */
1619 if (wm_validate_eeprom_checksum(sc))
1620 sc->sc_flags |= WM_F_EEPROM_INVALID;
1621 }
1622
1623 /* Set device properties (macflags) */
1624 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1625
1626 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1627 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1628 else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1629 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1630 } else {
1631 if (sc->sc_flags & WM_F_EEPROM_SPI)
1632 eetype = "SPI";
1633 else
1634 eetype = "MicroWire";
1635 aprint_verbose_dev(sc->sc_dev,
1636 "%u word (%d address bits) %s EEPROM\n",
1637 1U << sc->sc_ee_addrbits,
1638 sc->sc_ee_addrbits, eetype);
1639 }
1640
1641 /*
1642 * Read the Ethernet address from the EEPROM, if not first found
1643 * in device properties.
1644 */
1645 ea = prop_dictionary_get(dict, "mac-address");
1646 if (ea != NULL) {
1647 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1648 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1649 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1650 } else {
1651 if (wm_read_mac_addr(sc, enaddr) != 0) {
1652 aprint_error_dev(sc->sc_dev,
1653 "unable to read Ethernet address\n");
1654 return;
1655 }
1656 }
1657
1658 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1659 ether_sprintf(enaddr));
1660
1661 /*
1662 * Read the config info from the EEPROM, and set up various
1663 * bits in the control registers based on their contents.
1664 */
1665 pn = prop_dictionary_get(dict, "i82543-cfg1");
1666 if (pn != NULL) {
1667 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1668 cfg1 = (uint16_t) prop_number_integer_value(pn);
1669 } else {
1670 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1671 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1672 return;
1673 }
1674 }
1675
1676 pn = prop_dictionary_get(dict, "i82543-cfg2");
1677 if (pn != NULL) {
1678 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1679 cfg2 = (uint16_t) prop_number_integer_value(pn);
1680 } else {
1681 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1682 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1683 return;
1684 }
1685 }
1686
1687 /* check for WM_F_WOL */
1688 switch (sc->sc_type) {
1689 case WM_T_82542_2_0:
1690 case WM_T_82542_2_1:
1691 case WM_T_82543:
1692 /* dummy? */
1693 eeprom_data = 0;
1694 apme_mask = EEPROM_CFG3_APME;
1695 break;
1696 case WM_T_82544:
1697 apme_mask = EEPROM_CFG2_82544_APM_EN;
1698 eeprom_data = cfg2;
1699 break;
1700 case WM_T_82546:
1701 case WM_T_82546_3:
1702 case WM_T_82571:
1703 case WM_T_82572:
1704 case WM_T_82573:
1705 case WM_T_82574:
1706 case WM_T_82583:
1707 case WM_T_80003:
1708 default:
1709 apme_mask = EEPROM_CFG3_APME;
1710 wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1711 : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1712 break;
1713 case WM_T_82575:
1714 case WM_T_82576:
1715 case WM_T_82580:
1716 case WM_T_82580ER:
1717 case WM_T_I350:
1718 case WM_T_ICH8:
1719 case WM_T_ICH9:
1720 case WM_T_ICH10:
1721 case WM_T_PCH:
1722 case WM_T_PCH2:
1723 /* XXX The funcid should be checked on some devices */
1724 apme_mask = WUC_APME;
1725 eeprom_data = CSR_READ(sc, WMREG_WUC);
1726 break;
1727 }
1728
1729 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1730 if ((eeprom_data & apme_mask) != 0)
1731 sc->sc_flags |= WM_F_WOL;
1732 #ifdef WM_DEBUG
1733 if ((sc->sc_flags & WM_F_WOL) != 0)
1734 printf("WOL\n");
1735 #endif
1736
1737 /*
1738 * XXX need special handling for some multiple port cards
1739 * to disable a paticular port.
1740 */
1741
1742 if (sc->sc_type >= WM_T_82544) {
1743 pn = prop_dictionary_get(dict, "i82543-swdpin");
1744 if (pn != NULL) {
1745 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1746 swdpin = (uint16_t) prop_number_integer_value(pn);
1747 } else {
1748 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1749 aprint_error_dev(sc->sc_dev,
1750 "unable to read SWDPIN\n");
1751 return;
1752 }
1753 }
1754 }
1755
1756 if (cfg1 & EEPROM_CFG1_ILOS)
1757 sc->sc_ctrl |= CTRL_ILOS;
1758 if (sc->sc_type >= WM_T_82544) {
1759 sc->sc_ctrl |=
1760 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1761 CTRL_SWDPIO_SHIFT;
1762 sc->sc_ctrl |=
1763 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1764 CTRL_SWDPINS_SHIFT;
1765 } else {
1766 sc->sc_ctrl |=
1767 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1768 CTRL_SWDPIO_SHIFT;
1769 }
1770
1771 #if 0
1772 if (sc->sc_type >= WM_T_82544) {
1773 if (cfg1 & EEPROM_CFG1_IPS0)
1774 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1775 if (cfg1 & EEPROM_CFG1_IPS1)
1776 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1777 sc->sc_ctrl_ext |=
1778 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1779 CTRL_EXT_SWDPIO_SHIFT;
1780 sc->sc_ctrl_ext |=
1781 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1782 CTRL_EXT_SWDPINS_SHIFT;
1783 } else {
1784 sc->sc_ctrl_ext |=
1785 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1786 CTRL_EXT_SWDPIO_SHIFT;
1787 }
1788 #endif
1789
1790 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1791 #if 0
1792 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1793 #endif
1794
1795 /*
1796 * Set up some register offsets that are different between
1797 * the i82542 and the i82543 and later chips.
1798 */
1799 if (sc->sc_type < WM_T_82543) {
1800 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1801 sc->sc_tdt_reg = WMREG_OLD_TDT;
1802 } else {
1803 sc->sc_rdt_reg = WMREG_RDT;
1804 sc->sc_tdt_reg = WMREG_TDT;
1805 }
1806
1807 if (sc->sc_type == WM_T_PCH) {
1808 uint16_t val;
1809
1810 /* Save the NVM K1 bit setting */
1811 wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1812
1813 if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1814 sc->sc_nvm_k1_enabled = 1;
1815 else
1816 sc->sc_nvm_k1_enabled = 0;
1817 }
1818
1819 /*
1820 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1821 * media structures accordingly.
1822 */
1823 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1824 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1825 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_82573
1826 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1827 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1828 wm_gmii_mediainit(sc, wmp->wmp_product);
1829 } else if (sc->sc_type < WM_T_82543 ||
1830 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1831 if (wmp->wmp_flags & WMP_F_1000T)
1832 aprint_error_dev(sc->sc_dev,
1833 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1834 wm_tbi_mediainit(sc);
1835 } else {
1836 switch (sc->sc_type) {
1837 case WM_T_82575:
1838 case WM_T_82576:
1839 case WM_T_82580:
1840 case WM_T_82580ER:
1841 case WM_T_I350:
1842 reg = CSR_READ(sc, WMREG_CTRL_EXT);
1843 switch (reg & CTRL_EXT_LINK_MODE_MASK) {
1844 case CTRL_EXT_LINK_MODE_SGMII:
1845 aprint_verbose_dev(sc->sc_dev, "SGMII\n");
1846 sc->sc_flags |= WM_F_SGMII;
1847 CSR_WRITE(sc, WMREG_CTRL_EXT,
1848 reg | CTRL_EXT_I2C_ENA);
1849 wm_gmii_mediainit(sc, wmp->wmp_product);
1850 break;
1851 case CTRL_EXT_LINK_MODE_1000KX:
1852 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
1853 aprint_verbose_dev(sc->sc_dev, "1000KX or SERDES\n");
1854 CSR_WRITE(sc, WMREG_CTRL_EXT,
1855 reg | CTRL_EXT_I2C_ENA);
1856 panic("not supported yet\n");
1857 break;
1858 case CTRL_EXT_LINK_MODE_GMII:
1859 default:
1860 CSR_WRITE(sc, WMREG_CTRL_EXT,
1861 reg & ~CTRL_EXT_I2C_ENA);
1862 wm_gmii_mediainit(sc, wmp->wmp_product);
1863 break;
1864 }
1865 break;
1866 default:
1867 if (wmp->wmp_flags & WMP_F_1000X)
1868 aprint_error_dev(sc->sc_dev,
1869 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1870 wm_gmii_mediainit(sc, wmp->wmp_product);
1871 }
1872 }
1873
1874 ifp = &sc->sc_ethercom.ec_if;
1875 xname = device_xname(sc->sc_dev);
1876 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1877 ifp->if_softc = sc;
1878 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1879 ifp->if_ioctl = wm_ioctl;
1880 ifp->if_start = wm_start;
1881 ifp->if_watchdog = wm_watchdog;
1882 ifp->if_init = wm_init;
1883 ifp->if_stop = wm_stop;
1884 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1885 IFQ_SET_READY(&ifp->if_snd);
1886
1887 /* Check for jumbo frame */
1888 switch (sc->sc_type) {
1889 case WM_T_82573:
1890 /* XXX limited to 9234 if ASPM is disabled */
1891 wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1892 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1893 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1894 break;
1895 case WM_T_82571:
1896 case WM_T_82572:
1897 case WM_T_82574:
1898 case WM_T_82575:
1899 case WM_T_82576:
1900 case WM_T_82580:
1901 case WM_T_82580ER:
1902 case WM_T_I350:
1903 case WM_T_80003:
1904 case WM_T_ICH9:
1905 case WM_T_ICH10:
1906 case WM_T_PCH2: /* PCH2 supports 9K frame size */
1907 /* XXX limited to 9234 */
1908 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1909 break;
1910 case WM_T_PCH:
1911 /* XXX limited to 4096 */
1912 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1913 break;
1914 case WM_T_82542_2_0:
1915 case WM_T_82542_2_1:
1916 case WM_T_82583:
1917 case WM_T_ICH8:
1918 /* No support for jumbo frame */
1919 break;
1920 default:
1921 /* ETHER_MAX_LEN_JUMBO */
1922 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1923 break;
1924 }
1925
1926 /*
1927 * If we're a i82543 or greater, we can support VLANs.
1928 */
1929 if (sc->sc_type == WM_T_82575 || sc->sc_type == WM_T_82576)
1930 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
1931 else if (sc->sc_type >= WM_T_82543)
1932 sc->sc_ethercom.ec_capabilities |=
1933 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1934
1935 /*
1936 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1937 * on i82543 and later.
1938 */
1939 if (sc->sc_type >= WM_T_82543) {
1940 ifp->if_capabilities |=
1941 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1942 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1943 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1944 IFCAP_CSUM_TCPv6_Tx |
1945 IFCAP_CSUM_UDPv6_Tx;
1946 }
1947
1948 /*
1949 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1950 *
1951 * 82541GI (8086:1076) ... no
1952 * 82572EI (8086:10b9) ... yes
1953 */
1954 if (sc->sc_type >= WM_T_82571) {
1955 ifp->if_capabilities |=
1956 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1957 }
1958
1959 /*
1960 * If we're a i82544 or greater (except i82547), we can do
1961 * TCP segmentation offload.
1962 */
1963 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1964 ifp->if_capabilities |= IFCAP_TSOv4;
1965 }
1966
1967 if (sc->sc_type >= WM_T_82571) {
1968 ifp->if_capabilities |= IFCAP_TSOv6;
1969 }
1970
1971 /*
1972 * Attach the interface.
1973 */
1974 if_attach(ifp);
1975 ether_ifattach(ifp, enaddr);
1976 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
1977 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1978
1979 #ifdef WM_EVENT_COUNTERS
1980 /* Attach event counters. */
1981 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1982 NULL, xname, "txsstall");
1983 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1984 NULL, xname, "txdstall");
1985 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1986 NULL, xname, "txfifo_stall");
1987 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1988 NULL, xname, "txdw");
1989 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1990 NULL, xname, "txqe");
1991 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1992 NULL, xname, "rxintr");
1993 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1994 NULL, xname, "linkintr");
1995
1996 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1997 NULL, xname, "rxipsum");
1998 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1999 NULL, xname, "rxtusum");
2000 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2001 NULL, xname, "txipsum");
2002 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2003 NULL, xname, "txtusum");
2004 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2005 NULL, xname, "txtusum6");
2006
2007 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2008 NULL, xname, "txtso");
2009 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2010 NULL, xname, "txtso6");
2011 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2012 NULL, xname, "txtsopain");
2013
2014 for (i = 0; i < WM_NTXSEGS; i++) {
2015 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
2016 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2017 NULL, xname, wm_txseg_evcnt_names[i]);
2018 }
2019
2020 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2021 NULL, xname, "txdrop");
2022
2023 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2024 NULL, xname, "tu");
2025
2026 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2027 NULL, xname, "tx_xoff");
2028 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2029 NULL, xname, "tx_xon");
2030 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2031 NULL, xname, "rx_xoff");
2032 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2033 NULL, xname, "rx_xon");
2034 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2035 NULL, xname, "rx_macctl");
2036 #endif /* WM_EVENT_COUNTERS */
2037
2038 if (pmf_device_register(self, wm_suspend, wm_resume))
2039 pmf_class_network_register(self, ifp);
2040 else
2041 aprint_error_dev(self, "couldn't establish power handler\n");
2042
2043 return;
2044
2045 /*
2046 * Free any resources we've allocated during the failed attach
2047 * attempt. Do this in reverse order and fall through.
2048 */
2049 fail_5:
2050 for (i = 0; i < WM_NRXDESC; i++) {
2051 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2052 bus_dmamap_destroy(sc->sc_dmat,
2053 sc->sc_rxsoft[i].rxs_dmamap);
2054 }
2055 fail_4:
2056 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2057 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2058 bus_dmamap_destroy(sc->sc_dmat,
2059 sc->sc_txsoft[i].txs_dmamap);
2060 }
2061 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2062 fail_3:
2063 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2064 fail_2:
2065 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2066 sc->sc_cd_size);
2067 fail_1:
2068 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2069 fail_0:
2070 return;
2071 }
2072
2073 static int
2074 wm_detach(device_t self, int flags __unused)
2075 {
2076 struct wm_softc *sc = device_private(self);
2077 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2078 int i, s;
2079
2080 s = splnet();
2081 /* Stop the interface. Callouts are stopped in it. */
2082 wm_stop(ifp, 1);
2083 splx(s);
2084
2085 pmf_device_deregister(self);
2086
2087 /* Tell the firmware about the release */
2088 wm_release_manageability(sc);
2089 wm_release_hw_control(sc);
2090
2091 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2092
2093 /* Delete all remaining media. */
2094 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2095
2096 ether_ifdetach(ifp);
2097 if_detach(ifp);
2098
2099
2100 /* Unload RX dmamaps and free mbufs */
2101 wm_rxdrain(sc);
2102
2103 /* Free dmamap. It's the same as the end of the wm_attach() function */
2104 for (i = 0; i < WM_NRXDESC; i++) {
2105 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2106 bus_dmamap_destroy(sc->sc_dmat,
2107 sc->sc_rxsoft[i].rxs_dmamap);
2108 }
2109 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2110 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2111 bus_dmamap_destroy(sc->sc_dmat,
2112 sc->sc_txsoft[i].txs_dmamap);
2113 }
2114 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2115 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2116 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2117 sc->sc_cd_size);
2118 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2119
2120 /* Disestablish the interrupt handler */
2121 if (sc->sc_ih != NULL) {
2122 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2123 sc->sc_ih = NULL;
2124 }
2125
2126 /* Unmap the registers */
2127 if (sc->sc_ss) {
2128 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2129 sc->sc_ss = 0;
2130 }
2131
2132 if (sc->sc_ios) {
2133 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2134 sc->sc_ios = 0;
2135 }
2136
2137 return 0;
2138 }
2139
2140 /*
2141 * wm_tx_offload:
2142 *
2143 * Set up TCP/IP checksumming parameters for the
2144 * specified packet.
2145 */
2146 static int
2147 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
2148 uint8_t *fieldsp)
2149 {
2150 struct mbuf *m0 = txs->txs_mbuf;
2151 struct livengood_tcpip_ctxdesc *t;
2152 uint32_t ipcs, tucs, cmd, cmdlen, seg;
2153 uint32_t ipcse;
2154 struct ether_header *eh;
2155 int offset, iphl;
2156 uint8_t fields;
2157
2158 /*
2159 * XXX It would be nice if the mbuf pkthdr had offset
2160 * fields for the protocol headers.
2161 */
2162
2163 eh = mtod(m0, struct ether_header *);
2164 switch (htons(eh->ether_type)) {
2165 case ETHERTYPE_IP:
2166 case ETHERTYPE_IPV6:
2167 offset = ETHER_HDR_LEN;
2168 break;
2169
2170 case ETHERTYPE_VLAN:
2171 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2172 break;
2173
2174 default:
2175 /*
2176 * Don't support this protocol or encapsulation.
2177 */
2178 *fieldsp = 0;
2179 *cmdp = 0;
2180 return 0;
2181 }
2182
2183 if ((m0->m_pkthdr.csum_flags &
2184 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
2185 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2186 } else {
2187 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2188 }
2189 ipcse = offset + iphl - 1;
2190
2191 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
2192 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
2193 seg = 0;
2194 fields = 0;
2195
2196 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2197 int hlen = offset + iphl;
2198 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2199
2200 if (__predict_false(m0->m_len <
2201 (hlen + sizeof(struct tcphdr)))) {
2202 /*
2203 * TCP/IP headers are not in the first mbuf; we need
2204 * to do this the slow and painful way. Let's just
2205 * hope this doesn't happen very often.
2206 */
2207 struct tcphdr th;
2208
2209 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2210
2211 m_copydata(m0, hlen, sizeof(th), &th);
2212 if (v4) {
2213 struct ip ip;
2214
2215 m_copydata(m0, offset, sizeof(ip), &ip);
2216 ip.ip_len = 0;
2217 m_copyback(m0,
2218 offset + offsetof(struct ip, ip_len),
2219 sizeof(ip.ip_len), &ip.ip_len);
2220 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2221 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2222 } else {
2223 struct ip6_hdr ip6;
2224
2225 m_copydata(m0, offset, sizeof(ip6), &ip6);
2226 ip6.ip6_plen = 0;
2227 m_copyback(m0,
2228 offset + offsetof(struct ip6_hdr, ip6_plen),
2229 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2230 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2231 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2232 }
2233 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2234 sizeof(th.th_sum), &th.th_sum);
2235
2236 hlen += th.th_off << 2;
2237 } else {
2238 /*
2239 * TCP/IP headers are in the first mbuf; we can do
2240 * this the easy way.
2241 */
2242 struct tcphdr *th;
2243
2244 if (v4) {
2245 struct ip *ip =
2246 (void *)(mtod(m0, char *) + offset);
2247 th = (void *)(mtod(m0, char *) + hlen);
2248
2249 ip->ip_len = 0;
2250 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2251 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2252 } else {
2253 struct ip6_hdr *ip6 =
2254 (void *)(mtod(m0, char *) + offset);
2255 th = (void *)(mtod(m0, char *) + hlen);
2256
2257 ip6->ip6_plen = 0;
2258 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2259 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2260 }
2261 hlen += th->th_off << 2;
2262 }
2263
2264 if (v4) {
2265 WM_EVCNT_INCR(&sc->sc_ev_txtso);
2266 cmdlen |= WTX_TCPIP_CMD_IP;
2267 } else {
2268 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2269 ipcse = 0;
2270 }
2271 cmd |= WTX_TCPIP_CMD_TSE;
2272 cmdlen |= WTX_TCPIP_CMD_TSE |
2273 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
2274 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
2275 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
2276 }
2277
2278 /*
2279 * NOTE: Even if we're not using the IP or TCP/UDP checksum
2280 * offload feature, if we load the context descriptor, we
2281 * MUST provide valid values for IPCSS and TUCSS fields.
2282 */
2283
2284 ipcs = WTX_TCPIP_IPCSS(offset) |
2285 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
2286 WTX_TCPIP_IPCSE(ipcse);
2287 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
2288 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
2289 fields |= WTX_IXSM;
2290 }
2291
2292 offset += iphl;
2293
2294 if (m0->m_pkthdr.csum_flags &
2295 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
2296 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2297 fields |= WTX_TXSM;
2298 tucs = WTX_TCPIP_TUCSS(offset) |
2299 WTX_TCPIP_TUCSO(offset +
2300 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
2301 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2302 } else if ((m0->m_pkthdr.csum_flags &
2303 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
2304 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2305 fields |= WTX_TXSM;
2306 tucs = WTX_TCPIP_TUCSS(offset) |
2307 WTX_TCPIP_TUCSO(offset +
2308 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
2309 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2310 } else {
2311 /* Just initialize it to a valid TCP context. */
2312 tucs = WTX_TCPIP_TUCSS(offset) |
2313 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
2314 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2315 }
2316
2317 /* Fill in the context descriptor. */
2318 t = (struct livengood_tcpip_ctxdesc *)
2319 &sc->sc_txdescs[sc->sc_txnext];
2320 t->tcpip_ipcs = htole32(ipcs);
2321 t->tcpip_tucs = htole32(tucs);
2322 t->tcpip_cmdlen = htole32(cmdlen);
2323 t->tcpip_seg = htole32(seg);
2324 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2325
2326 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2327 txs->txs_ndesc++;
2328
2329 *cmdp = cmd;
2330 *fieldsp = fields;
2331
2332 return 0;
2333 }
2334
2335 static void
2336 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2337 {
2338 struct mbuf *m;
2339 int i;
2340
2341 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2342 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2343 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2344 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2345 m->m_data, m->m_len, m->m_flags);
2346 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2347 i, i == 1 ? "" : "s");
2348 }
2349
2350 /*
2351 * wm_82547_txfifo_stall:
2352 *
2353 * Callout used to wait for the 82547 Tx FIFO to drain,
2354 * reset the FIFO pointers, and restart packet transmission.
2355 */
2356 static void
2357 wm_82547_txfifo_stall(void *arg)
2358 {
2359 struct wm_softc *sc = arg;
2360 int s;
2361
2362 s = splnet();
2363
2364 if (sc->sc_txfifo_stall) {
2365 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2366 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2367 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2368 /*
2369 * Packets have drained. Stop transmitter, reset
2370 * FIFO pointers, restart transmitter, and kick
2371 * the packet queue.
2372 */
2373 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2374 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2375 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2376 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2377 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2378 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2379 CSR_WRITE(sc, WMREG_TCTL, tctl);
2380 CSR_WRITE_FLUSH(sc);
2381
2382 sc->sc_txfifo_head = 0;
2383 sc->sc_txfifo_stall = 0;
2384 wm_start(&sc->sc_ethercom.ec_if);
2385 } else {
2386 /*
2387 * Still waiting for packets to drain; try again in
2388 * another tick.
2389 */
2390 callout_schedule(&sc->sc_txfifo_ch, 1);
2391 }
2392 }
2393
2394 splx(s);
2395 }
2396
2397 static void
2398 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
2399 {
2400 uint32_t reg;
2401
2402 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
2403
2404 if (on != 0)
2405 reg |= EXTCNFCTR_GATE_PHY_CFG;
2406 else
2407 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
2408
2409 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
2410 }
2411
2412 /*
2413 * wm_82547_txfifo_bugchk:
2414 *
2415 * Check for bug condition in the 82547 Tx FIFO. We need to
2416 * prevent enqueueing a packet that would wrap around the end
2417 * if the Tx FIFO ring buffer, otherwise the chip will croak.
2418 *
2419 * We do this by checking the amount of space before the end
2420 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
2421 * the Tx FIFO, wait for all remaining packets to drain, reset
2422 * the internal FIFO pointers to the beginning, and restart
2423 * transmission on the interface.
2424 */
2425 #define WM_FIFO_HDR 0x10
2426 #define WM_82547_PAD_LEN 0x3e0
2427 static int
2428 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2429 {
2430 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2431 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2432
2433 /* Just return if already stalled. */
2434 if (sc->sc_txfifo_stall)
2435 return 1;
2436
2437 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2438 /* Stall only occurs in half-duplex mode. */
2439 goto send_packet;
2440 }
2441
2442 if (len >= WM_82547_PAD_LEN + space) {
2443 sc->sc_txfifo_stall = 1;
2444 callout_schedule(&sc->sc_txfifo_ch, 1);
2445 return 1;
2446 }
2447
2448 send_packet:
2449 sc->sc_txfifo_head += len;
2450 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2451 sc->sc_txfifo_head -= sc->sc_txfifo_size;
2452
2453 return 0;
2454 }
2455
2456 /*
2457 * wm_start: [ifnet interface function]
2458 *
2459 * Start packet transmission on the interface.
2460 */
2461 static void
2462 wm_start(struct ifnet *ifp)
2463 {
2464 struct wm_softc *sc = ifp->if_softc;
2465 struct mbuf *m0;
2466 struct m_tag *mtag;
2467 struct wm_txsoft *txs;
2468 bus_dmamap_t dmamap;
2469 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2470 bus_addr_t curaddr;
2471 bus_size_t seglen, curlen;
2472 uint32_t cksumcmd;
2473 uint8_t cksumfields;
2474
2475 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2476 return;
2477
2478 /*
2479 * Remember the previous number of free descriptors.
2480 */
2481 ofree = sc->sc_txfree;
2482
2483 /*
2484 * Loop through the send queue, setting up transmit descriptors
2485 * until we drain the queue, or use up all available transmit
2486 * descriptors.
2487 */
2488 for (;;) {
2489 /* Grab a packet off the queue. */
2490 IFQ_POLL(&ifp->if_snd, m0);
2491 if (m0 == NULL)
2492 break;
2493
2494 DPRINTF(WM_DEBUG_TX,
2495 ("%s: TX: have packet to transmit: %p\n",
2496 device_xname(sc->sc_dev), m0));
2497
2498 /* Get a work queue entry. */
2499 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2500 wm_txintr(sc);
2501 if (sc->sc_txsfree == 0) {
2502 DPRINTF(WM_DEBUG_TX,
2503 ("%s: TX: no free job descriptors\n",
2504 device_xname(sc->sc_dev)));
2505 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2506 break;
2507 }
2508 }
2509
2510 txs = &sc->sc_txsoft[sc->sc_txsnext];
2511 dmamap = txs->txs_dmamap;
2512
2513 use_tso = (m0->m_pkthdr.csum_flags &
2514 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2515
2516 /*
2517 * So says the Linux driver:
2518 * The controller does a simple calculation to make sure
2519 * there is enough room in the FIFO before initiating the
2520 * DMA for each buffer. The calc is:
2521 * 4 = ceil(buffer len / MSS)
2522 * To make sure we don't overrun the FIFO, adjust the max
2523 * buffer len if the MSS drops.
2524 */
2525 dmamap->dm_maxsegsz =
2526 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2527 ? m0->m_pkthdr.segsz << 2
2528 : WTX_MAX_LEN;
2529
2530 /*
2531 * Load the DMA map. If this fails, the packet either
2532 * didn't fit in the allotted number of segments, or we
2533 * were short on resources. For the too-many-segments
2534 * case, we simply report an error and drop the packet,
2535 * since we can't sanely copy a jumbo packet to a single
2536 * buffer.
2537 */
2538 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2539 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2540 if (error) {
2541 if (error == EFBIG) {
2542 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2543 log(LOG_ERR, "%s: Tx packet consumes too many "
2544 "DMA segments, dropping...\n",
2545 device_xname(sc->sc_dev));
2546 IFQ_DEQUEUE(&ifp->if_snd, m0);
2547 wm_dump_mbuf_chain(sc, m0);
2548 m_freem(m0);
2549 continue;
2550 }
2551 /*
2552 * Short on resources, just stop for now.
2553 */
2554 DPRINTF(WM_DEBUG_TX,
2555 ("%s: TX: dmamap load failed: %d\n",
2556 device_xname(sc->sc_dev), error));
2557 break;
2558 }
2559
2560 segs_needed = dmamap->dm_nsegs;
2561 if (use_tso) {
2562 /* For sentinel descriptor; see below. */
2563 segs_needed++;
2564 }
2565
2566 /*
2567 * Ensure we have enough descriptors free to describe
2568 * the packet. Note, we always reserve one descriptor
2569 * at the end of the ring due to the semantics of the
2570 * TDT register, plus one more in the event we need
2571 * to load offload context.
2572 */
2573 if (segs_needed > sc->sc_txfree - 2) {
2574 /*
2575 * Not enough free descriptors to transmit this
2576 * packet. We haven't committed anything yet,
2577 * so just unload the DMA map, put the packet
2578 * pack on the queue, and punt. Notify the upper
2579 * layer that there are no more slots left.
2580 */
2581 DPRINTF(WM_DEBUG_TX,
2582 ("%s: TX: need %d (%d) descriptors, have %d\n",
2583 device_xname(sc->sc_dev), dmamap->dm_nsegs,
2584 segs_needed, sc->sc_txfree - 1));
2585 ifp->if_flags |= IFF_OACTIVE;
2586 bus_dmamap_unload(sc->sc_dmat, dmamap);
2587 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2588 break;
2589 }
2590
2591 /*
2592 * Check for 82547 Tx FIFO bug. We need to do this
2593 * once we know we can transmit the packet, since we
2594 * do some internal FIFO space accounting here.
2595 */
2596 if (sc->sc_type == WM_T_82547 &&
2597 wm_82547_txfifo_bugchk(sc, m0)) {
2598 DPRINTF(WM_DEBUG_TX,
2599 ("%s: TX: 82547 Tx FIFO bug detected\n",
2600 device_xname(sc->sc_dev)));
2601 ifp->if_flags |= IFF_OACTIVE;
2602 bus_dmamap_unload(sc->sc_dmat, dmamap);
2603 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2604 break;
2605 }
2606
2607 IFQ_DEQUEUE(&ifp->if_snd, m0);
2608
2609 /*
2610 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2611 */
2612
2613 DPRINTF(WM_DEBUG_TX,
2614 ("%s: TX: packet has %d (%d) DMA segments\n",
2615 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2616
2617 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2618
2619 /*
2620 * Store a pointer to the packet so that we can free it
2621 * later.
2622 *
2623 * Initially, we consider the number of descriptors the
2624 * packet uses the number of DMA segments. This may be
2625 * incremented by 1 if we do checksum offload (a descriptor
2626 * is used to set the checksum context).
2627 */
2628 txs->txs_mbuf = m0;
2629 txs->txs_firstdesc = sc->sc_txnext;
2630 txs->txs_ndesc = segs_needed;
2631
2632 /* Set up offload parameters for this packet. */
2633 if (m0->m_pkthdr.csum_flags &
2634 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2635 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2636 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2637 if (wm_tx_offload(sc, txs, &cksumcmd,
2638 &cksumfields) != 0) {
2639 /* Error message already displayed. */
2640 bus_dmamap_unload(sc->sc_dmat, dmamap);
2641 continue;
2642 }
2643 } else {
2644 cksumcmd = 0;
2645 cksumfields = 0;
2646 }
2647
2648 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2649
2650 /* Sync the DMA map. */
2651 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2652 BUS_DMASYNC_PREWRITE);
2653
2654 /*
2655 * Initialize the transmit descriptor.
2656 */
2657 for (nexttx = sc->sc_txnext, seg = 0;
2658 seg < dmamap->dm_nsegs; seg++) {
2659 for (seglen = dmamap->dm_segs[seg].ds_len,
2660 curaddr = dmamap->dm_segs[seg].ds_addr;
2661 seglen != 0;
2662 curaddr += curlen, seglen -= curlen,
2663 nexttx = WM_NEXTTX(sc, nexttx)) {
2664 curlen = seglen;
2665
2666 /*
2667 * So says the Linux driver:
2668 * Work around for premature descriptor
2669 * write-backs in TSO mode. Append a
2670 * 4-byte sentinel descriptor.
2671 */
2672 if (use_tso &&
2673 seg == dmamap->dm_nsegs - 1 &&
2674 curlen > 8)
2675 curlen -= 4;
2676
2677 wm_set_dma_addr(
2678 &sc->sc_txdescs[nexttx].wtx_addr,
2679 curaddr);
2680 sc->sc_txdescs[nexttx].wtx_cmdlen =
2681 htole32(cksumcmd | curlen);
2682 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2683 0;
2684 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2685 cksumfields;
2686 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2687 lasttx = nexttx;
2688
2689 DPRINTF(WM_DEBUG_TX,
2690 ("%s: TX: desc %d: low %#" PRIxPADDR ", "
2691 "len %#04zx\n",
2692 device_xname(sc->sc_dev), nexttx,
2693 curaddr & 0xffffffffUL, curlen));
2694 }
2695 }
2696
2697 KASSERT(lasttx != -1);
2698
2699 /*
2700 * Set up the command byte on the last descriptor of
2701 * the packet. If we're in the interrupt delay window,
2702 * delay the interrupt.
2703 */
2704 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2705 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2706
2707 /*
2708 * If VLANs are enabled and the packet has a VLAN tag, set
2709 * up the descriptor to encapsulate the packet for us.
2710 *
2711 * This is only valid on the last descriptor of the packet.
2712 */
2713 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2714 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2715 htole32(WTX_CMD_VLE);
2716 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2717 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2718 }
2719
2720 txs->txs_lastdesc = lasttx;
2721
2722 DPRINTF(WM_DEBUG_TX,
2723 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2724 device_xname(sc->sc_dev),
2725 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2726
2727 /* Sync the descriptors we're using. */
2728 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2729 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2730
2731 /* Give the packet to the chip. */
2732 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2733
2734 DPRINTF(WM_DEBUG_TX,
2735 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2736
2737 DPRINTF(WM_DEBUG_TX,
2738 ("%s: TX: finished transmitting packet, job %d\n",
2739 device_xname(sc->sc_dev), sc->sc_txsnext));
2740
2741 /* Advance the tx pointer. */
2742 sc->sc_txfree -= txs->txs_ndesc;
2743 sc->sc_txnext = nexttx;
2744
2745 sc->sc_txsfree--;
2746 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2747
2748 /* Pass the packet to any BPF listeners. */
2749 bpf_mtap(ifp, m0);
2750 }
2751
2752 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2753 /* No more slots; notify upper layer. */
2754 ifp->if_flags |= IFF_OACTIVE;
2755 }
2756
2757 if (sc->sc_txfree != ofree) {
2758 /* Set a watchdog timer in case the chip flakes out. */
2759 ifp->if_timer = 5;
2760 }
2761 }
2762
2763 /*
2764 * wm_watchdog: [ifnet interface function]
2765 *
2766 * Watchdog timer handler.
2767 */
2768 static void
2769 wm_watchdog(struct ifnet *ifp)
2770 {
2771 struct wm_softc *sc = ifp->if_softc;
2772
2773 /*
2774 * Since we're using delayed interrupts, sweep up
2775 * before we report an error.
2776 */
2777 wm_txintr(sc);
2778
2779 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2780 log(LOG_ERR,
2781 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2782 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2783 sc->sc_txnext);
2784 ifp->if_oerrors++;
2785
2786 /* Reset the interface. */
2787 (void) wm_init(ifp);
2788 }
2789
2790 /* Try to get more packets going. */
2791 wm_start(ifp);
2792 }
2793
2794 static int
2795 wm_ifflags_cb(struct ethercom *ec)
2796 {
2797 struct ifnet *ifp = &ec->ec_if;
2798 struct wm_softc *sc = ifp->if_softc;
2799 int change = ifp->if_flags ^ sc->sc_if_flags;
2800
2801 if (change != 0)
2802 sc->sc_if_flags = ifp->if_flags;
2803
2804 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
2805 return ENETRESET;
2806
2807 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2808 wm_set_filter(sc);
2809
2810 wm_set_vlan(sc);
2811
2812 return 0;
2813 }
2814
2815 /*
2816 * wm_ioctl: [ifnet interface function]
2817 *
2818 * Handle control requests from the operator.
2819 */
2820 static int
2821 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2822 {
2823 struct wm_softc *sc = ifp->if_softc;
2824 struct ifreq *ifr = (struct ifreq *) data;
2825 struct ifaddr *ifa = (struct ifaddr *)data;
2826 struct sockaddr_dl *sdl;
2827 int s, error;
2828
2829 s = splnet();
2830
2831 switch (cmd) {
2832 case SIOCSIFMEDIA:
2833 case SIOCGIFMEDIA:
2834 /* Flow control requires full-duplex mode. */
2835 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2836 (ifr->ifr_media & IFM_FDX) == 0)
2837 ifr->ifr_media &= ~IFM_ETH_FMASK;
2838 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2839 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2840 /* We can do both TXPAUSE and RXPAUSE. */
2841 ifr->ifr_media |=
2842 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2843 }
2844 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2845 }
2846 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2847 break;
2848 case SIOCINITIFADDR:
2849 if (ifa->ifa_addr->sa_family == AF_LINK) {
2850 sdl = satosdl(ifp->if_dl->ifa_addr);
2851 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2852 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2853 /* unicast address is first multicast entry */
2854 wm_set_filter(sc);
2855 error = 0;
2856 break;
2857 }
2858 /*FALLTHROUGH*/
2859 default:
2860 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2861 break;
2862
2863 error = 0;
2864
2865 if (cmd == SIOCSIFCAP)
2866 error = (*ifp->if_init)(ifp);
2867 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2868 ;
2869 else if (ifp->if_flags & IFF_RUNNING) {
2870 /*
2871 * Multicast list has changed; set the hardware filter
2872 * accordingly.
2873 */
2874 wm_set_filter(sc);
2875 }
2876 break;
2877 }
2878
2879 /* Try to get more packets going. */
2880 wm_start(ifp);
2881
2882 splx(s);
2883 return error;
2884 }
2885
2886 /*
2887 * wm_intr:
2888 *
2889 * Interrupt service routine.
2890 */
2891 static int
2892 wm_intr(void *arg)
2893 {
2894 struct wm_softc *sc = arg;
2895 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2896 uint32_t icr;
2897 int handled = 0;
2898
2899 while (1 /* CONSTCOND */) {
2900 icr = CSR_READ(sc, WMREG_ICR);
2901 if ((icr & sc->sc_icr) == 0)
2902 break;
2903 rnd_add_uint32(&sc->rnd_source, icr);
2904
2905 handled = 1;
2906
2907 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2908 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2909 DPRINTF(WM_DEBUG_RX,
2910 ("%s: RX: got Rx intr 0x%08x\n",
2911 device_xname(sc->sc_dev),
2912 icr & (ICR_RXDMT0|ICR_RXT0)));
2913 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2914 }
2915 #endif
2916 wm_rxintr(sc);
2917
2918 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2919 if (icr & ICR_TXDW) {
2920 DPRINTF(WM_DEBUG_TX,
2921 ("%s: TX: got TXDW interrupt\n",
2922 device_xname(sc->sc_dev)));
2923 WM_EVCNT_INCR(&sc->sc_ev_txdw);
2924 }
2925 #endif
2926 wm_txintr(sc);
2927
2928 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2929 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2930 wm_linkintr(sc, icr);
2931 }
2932
2933 if (icr & ICR_RXO) {
2934 #if defined(WM_DEBUG)
2935 log(LOG_WARNING, "%s: Receive overrun\n",
2936 device_xname(sc->sc_dev));
2937 #endif /* defined(WM_DEBUG) */
2938 }
2939 }
2940
2941 if (handled) {
2942 /* Try to get more packets going. */
2943 wm_start(ifp);
2944 }
2945
2946 return handled;
2947 }
2948
2949 /*
2950 * wm_txintr:
2951 *
2952 * Helper; handle transmit interrupts.
2953 */
2954 static void
2955 wm_txintr(struct wm_softc *sc)
2956 {
2957 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2958 struct wm_txsoft *txs;
2959 uint8_t status;
2960 int i;
2961
2962 ifp->if_flags &= ~IFF_OACTIVE;
2963
2964 /*
2965 * Go through the Tx list and free mbufs for those
2966 * frames which have been transmitted.
2967 */
2968 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2969 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2970 txs = &sc->sc_txsoft[i];
2971
2972 DPRINTF(WM_DEBUG_TX,
2973 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
2974
2975 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2976 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2977
2978 status =
2979 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2980 if ((status & WTX_ST_DD) == 0) {
2981 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2982 BUS_DMASYNC_PREREAD);
2983 break;
2984 }
2985
2986 DPRINTF(WM_DEBUG_TX,
2987 ("%s: TX: job %d done: descs %d..%d\n",
2988 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
2989 txs->txs_lastdesc));
2990
2991 /*
2992 * XXX We should probably be using the statistics
2993 * XXX registers, but I don't know if they exist
2994 * XXX on chips before the i82544.
2995 */
2996
2997 #ifdef WM_EVENT_COUNTERS
2998 if (status & WTX_ST_TU)
2999 WM_EVCNT_INCR(&sc->sc_ev_tu);
3000 #endif /* WM_EVENT_COUNTERS */
3001
3002 if (status & (WTX_ST_EC|WTX_ST_LC)) {
3003 ifp->if_oerrors++;
3004 if (status & WTX_ST_LC)
3005 log(LOG_WARNING, "%s: late collision\n",
3006 device_xname(sc->sc_dev));
3007 else if (status & WTX_ST_EC) {
3008 ifp->if_collisions += 16;
3009 log(LOG_WARNING, "%s: excessive collisions\n",
3010 device_xname(sc->sc_dev));
3011 }
3012 } else
3013 ifp->if_opackets++;
3014
3015 sc->sc_txfree += txs->txs_ndesc;
3016 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
3017 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3018 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3019 m_freem(txs->txs_mbuf);
3020 txs->txs_mbuf = NULL;
3021 }
3022
3023 /* Update the dirty transmit buffer pointer. */
3024 sc->sc_txsdirty = i;
3025 DPRINTF(WM_DEBUG_TX,
3026 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
3027
3028 /*
3029 * If there are no more pending transmissions, cancel the watchdog
3030 * timer.
3031 */
3032 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
3033 ifp->if_timer = 0;
3034 }
3035
3036 /*
3037 * wm_rxintr:
3038 *
3039 * Helper; handle receive interrupts.
3040 */
3041 static void
3042 wm_rxintr(struct wm_softc *sc)
3043 {
3044 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3045 struct wm_rxsoft *rxs;
3046 struct mbuf *m;
3047 int i, len;
3048 uint8_t status, errors;
3049 uint16_t vlantag;
3050
3051 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
3052 rxs = &sc->sc_rxsoft[i];
3053
3054 DPRINTF(WM_DEBUG_RX,
3055 ("%s: RX: checking descriptor %d\n",
3056 device_xname(sc->sc_dev), i));
3057
3058 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3059
3060 status = sc->sc_rxdescs[i].wrx_status;
3061 errors = sc->sc_rxdescs[i].wrx_errors;
3062 len = le16toh(sc->sc_rxdescs[i].wrx_len);
3063 vlantag = sc->sc_rxdescs[i].wrx_special;
3064
3065 if ((status & WRX_ST_DD) == 0) {
3066 /*
3067 * We have processed all of the receive descriptors.
3068 */
3069 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
3070 break;
3071 }
3072
3073 if (__predict_false(sc->sc_rxdiscard)) {
3074 DPRINTF(WM_DEBUG_RX,
3075 ("%s: RX: discarding contents of descriptor %d\n",
3076 device_xname(sc->sc_dev), i));
3077 WM_INIT_RXDESC(sc, i);
3078 if (status & WRX_ST_EOP) {
3079 /* Reset our state. */
3080 DPRINTF(WM_DEBUG_RX,
3081 ("%s: RX: resetting rxdiscard -> 0\n",
3082 device_xname(sc->sc_dev)));
3083 sc->sc_rxdiscard = 0;
3084 }
3085 continue;
3086 }
3087
3088 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3089 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3090
3091 m = rxs->rxs_mbuf;
3092
3093 /*
3094 * Add a new receive buffer to the ring, unless of
3095 * course the length is zero. Treat the latter as a
3096 * failed mapping.
3097 */
3098 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
3099 /*
3100 * Failed, throw away what we've done so
3101 * far, and discard the rest of the packet.
3102 */
3103 ifp->if_ierrors++;
3104 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3105 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3106 WM_INIT_RXDESC(sc, i);
3107 if ((status & WRX_ST_EOP) == 0)
3108 sc->sc_rxdiscard = 1;
3109 if (sc->sc_rxhead != NULL)
3110 m_freem(sc->sc_rxhead);
3111 WM_RXCHAIN_RESET(sc);
3112 DPRINTF(WM_DEBUG_RX,
3113 ("%s: RX: Rx buffer allocation failed, "
3114 "dropping packet%s\n", device_xname(sc->sc_dev),
3115 sc->sc_rxdiscard ? " (discard)" : ""));
3116 continue;
3117 }
3118
3119 m->m_len = len;
3120 sc->sc_rxlen += len;
3121 DPRINTF(WM_DEBUG_RX,
3122 ("%s: RX: buffer at %p len %d\n",
3123 device_xname(sc->sc_dev), m->m_data, len));
3124
3125 /*
3126 * If this is not the end of the packet, keep
3127 * looking.
3128 */
3129 if ((status & WRX_ST_EOP) == 0) {
3130 WM_RXCHAIN_LINK(sc, m);
3131 DPRINTF(WM_DEBUG_RX,
3132 ("%s: RX: not yet EOP, rxlen -> %d\n",
3133 device_xname(sc->sc_dev), sc->sc_rxlen));
3134 continue;
3135 }
3136
3137 /*
3138 * Okay, we have the entire packet now. The chip is
3139 * configured to include the FCS except I350
3140 * (not all chips can be configured to strip it),
3141 * so we need to trim it.
3142 * May need to adjust length of previous mbuf in the
3143 * chain if the current mbuf is too short.
3144 * For an eratta, the RCTL_SECRC bit in RCTL register
3145 * is always set in I350, so we don't trim it.
3146 */
3147 if (sc->sc_type != WM_T_I350) {
3148 if (m->m_len < ETHER_CRC_LEN) {
3149 sc->sc_rxtail->m_len
3150 -= (ETHER_CRC_LEN - m->m_len);
3151 m->m_len = 0;
3152 } else
3153 m->m_len -= ETHER_CRC_LEN;
3154 len = sc->sc_rxlen - ETHER_CRC_LEN;
3155 } else
3156 len = sc->sc_rxlen;
3157
3158 WM_RXCHAIN_LINK(sc, m);
3159
3160 *sc->sc_rxtailp = NULL;
3161 m = sc->sc_rxhead;
3162
3163 WM_RXCHAIN_RESET(sc);
3164
3165 DPRINTF(WM_DEBUG_RX,
3166 ("%s: RX: have entire packet, len -> %d\n",
3167 device_xname(sc->sc_dev), len));
3168
3169 /*
3170 * If an error occurred, update stats and drop the packet.
3171 */
3172 if (errors &
3173 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3174 if (errors & WRX_ER_SE)
3175 log(LOG_WARNING, "%s: symbol error\n",
3176 device_xname(sc->sc_dev));
3177 else if (errors & WRX_ER_SEQ)
3178 log(LOG_WARNING, "%s: receive sequence error\n",
3179 device_xname(sc->sc_dev));
3180 else if (errors & WRX_ER_CE)
3181 log(LOG_WARNING, "%s: CRC error\n",
3182 device_xname(sc->sc_dev));
3183 m_freem(m);
3184 continue;
3185 }
3186
3187 /*
3188 * No errors. Receive the packet.
3189 */
3190 m->m_pkthdr.rcvif = ifp;
3191 m->m_pkthdr.len = len;
3192
3193 /*
3194 * If VLANs are enabled, VLAN packets have been unwrapped
3195 * for us. Associate the tag with the packet.
3196 */
3197 if ((status & WRX_ST_VP) != 0) {
3198 VLAN_INPUT_TAG(ifp, m,
3199 le16toh(vlantag),
3200 continue);
3201 }
3202
3203 /*
3204 * Set up checksum info for this packet.
3205 */
3206 if ((status & WRX_ST_IXSM) == 0) {
3207 if (status & WRX_ST_IPCS) {
3208 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3209 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3210 if (errors & WRX_ER_IPE)
3211 m->m_pkthdr.csum_flags |=
3212 M_CSUM_IPv4_BAD;
3213 }
3214 if (status & WRX_ST_TCPCS) {
3215 /*
3216 * Note: we don't know if this was TCP or UDP,
3217 * so we just set both bits, and expect the
3218 * upper layers to deal.
3219 */
3220 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3221 m->m_pkthdr.csum_flags |=
3222 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3223 M_CSUM_TCPv6 | M_CSUM_UDPv6;
3224 if (errors & WRX_ER_TCPE)
3225 m->m_pkthdr.csum_flags |=
3226 M_CSUM_TCP_UDP_BAD;
3227 }
3228 }
3229
3230 ifp->if_ipackets++;
3231
3232 /* Pass this up to any BPF listeners. */
3233 bpf_mtap(ifp, m);
3234
3235 /* Pass it on. */
3236 (*ifp->if_input)(ifp, m);
3237 }
3238
3239 /* Update the receive pointer. */
3240 sc->sc_rxptr = i;
3241
3242 DPRINTF(WM_DEBUG_RX,
3243 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3244 }
3245
3246 /*
3247 * wm_linkintr_gmii:
3248 *
3249 * Helper; handle link interrupts for GMII.
3250 */
3251 static void
3252 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
3253 {
3254
3255 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3256 __func__));
3257
3258 if (icr & ICR_LSC) {
3259 DPRINTF(WM_DEBUG_LINK,
3260 ("%s: LINK: LSC -> mii_tick\n",
3261 device_xname(sc->sc_dev)));
3262 mii_tick(&sc->sc_mii);
3263 if (sc->sc_type == WM_T_82543) {
3264 int miistatus, active;
3265
3266 /*
3267 * With 82543, we need to force speed and
3268 * duplex on the MAC equal to what the PHY
3269 * speed and duplex configuration is.
3270 */
3271 miistatus = sc->sc_mii.mii_media_status;
3272
3273 if (miistatus & IFM_ACTIVE) {
3274 active = sc->sc_mii.mii_media_active;
3275 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
3276 switch (IFM_SUBTYPE(active)) {
3277 case IFM_10_T:
3278 sc->sc_ctrl |= CTRL_SPEED_10;
3279 break;
3280 case IFM_100_TX:
3281 sc->sc_ctrl |= CTRL_SPEED_100;
3282 break;
3283 case IFM_1000_T:
3284 sc->sc_ctrl |= CTRL_SPEED_1000;
3285 break;
3286 default:
3287 /*
3288 * fiber?
3289 * Shoud not enter here.
3290 */
3291 printf("unknown media (%x)\n",
3292 active);
3293 break;
3294 }
3295 if (active & IFM_FDX)
3296 sc->sc_ctrl |= CTRL_FD;
3297 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3298 }
3299 } else if ((sc->sc_type == WM_T_ICH8)
3300 && (sc->sc_phytype == WMPHY_IGP_3)) {
3301 wm_kmrn_lock_loss_workaround_ich8lan(sc);
3302 } else if (sc->sc_type == WM_T_PCH) {
3303 wm_k1_gig_workaround_hv(sc,
3304 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
3305 }
3306
3307 if ((sc->sc_phytype == WMPHY_82578)
3308 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
3309 == IFM_1000_T)) {
3310
3311 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
3312 delay(200*1000); /* XXX too big */
3313
3314 /* Link stall fix for link up */
3315 wm_gmii_hv_writereg(sc->sc_dev, 1,
3316 HV_MUX_DATA_CTRL,
3317 HV_MUX_DATA_CTRL_GEN_TO_MAC
3318 | HV_MUX_DATA_CTRL_FORCE_SPEED);
3319 wm_gmii_hv_writereg(sc->sc_dev, 1,
3320 HV_MUX_DATA_CTRL,
3321 HV_MUX_DATA_CTRL_GEN_TO_MAC);
3322 }
3323 }
3324 } else if (icr & ICR_RXSEQ) {
3325 DPRINTF(WM_DEBUG_LINK,
3326 ("%s: LINK Receive sequence error\n",
3327 device_xname(sc->sc_dev)));
3328 }
3329 }
3330
3331 /*
3332 * wm_linkintr_tbi:
3333 *
3334 * Helper; handle link interrupts for TBI mode.
3335 */
3336 static void
3337 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3338 {
3339 uint32_t status;
3340
3341 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3342 __func__));
3343
3344 status = CSR_READ(sc, WMREG_STATUS);
3345 if (icr & ICR_LSC) {
3346 if (status & STATUS_LU) {
3347 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3348 device_xname(sc->sc_dev),
3349 (status & STATUS_FD) ? "FDX" : "HDX"));
3350 /*
3351 * NOTE: CTRL will update TFCE and RFCE automatically,
3352 * so we should update sc->sc_ctrl
3353 */
3354
3355 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3356 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3357 sc->sc_fcrtl &= ~FCRTL_XONE;
3358 if (status & STATUS_FD)
3359 sc->sc_tctl |=
3360 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3361 else
3362 sc->sc_tctl |=
3363 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3364 if (sc->sc_ctrl & CTRL_TFCE)
3365 sc->sc_fcrtl |= FCRTL_XONE;
3366 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3367 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3368 WMREG_OLD_FCRTL : WMREG_FCRTL,
3369 sc->sc_fcrtl);
3370 sc->sc_tbi_linkup = 1;
3371 } else {
3372 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3373 device_xname(sc->sc_dev)));
3374 sc->sc_tbi_linkup = 0;
3375 }
3376 wm_tbi_set_linkled(sc);
3377 } else if (icr & ICR_RXCFG) {
3378 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3379 device_xname(sc->sc_dev)));
3380 sc->sc_tbi_nrxcfg++;
3381 wm_check_for_link(sc);
3382 } else if (icr & ICR_RXSEQ) {
3383 DPRINTF(WM_DEBUG_LINK,
3384 ("%s: LINK: Receive sequence error\n",
3385 device_xname(sc->sc_dev)));
3386 }
3387 }
3388
3389 /*
3390 * wm_linkintr:
3391 *
3392 * Helper; handle link interrupts.
3393 */
3394 static void
3395 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3396 {
3397
3398 if (sc->sc_flags & WM_F_HAS_MII)
3399 wm_linkintr_gmii(sc, icr);
3400 else
3401 wm_linkintr_tbi(sc, icr);
3402 }
3403
3404 /*
3405 * wm_tick:
3406 *
3407 * One second timer, used to check link status, sweep up
3408 * completed transmit jobs, etc.
3409 */
3410 static void
3411 wm_tick(void *arg)
3412 {
3413 struct wm_softc *sc = arg;
3414 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3415 int s;
3416
3417 s = splnet();
3418
3419 if (sc->sc_type >= WM_T_82542_2_1) {
3420 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3421 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3422 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3423 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3424 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3425 }
3426
3427 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3428 ifp->if_ierrors += 0ULL + /* ensure quad_t */
3429 + CSR_READ(sc, WMREG_CRCERRS)
3430 + CSR_READ(sc, WMREG_ALGNERRC)
3431 + CSR_READ(sc, WMREG_SYMERRC)
3432 + CSR_READ(sc, WMREG_RXERRC)
3433 + CSR_READ(sc, WMREG_SEC)
3434 + CSR_READ(sc, WMREG_CEXTERR)
3435 + CSR_READ(sc, WMREG_RLEC);
3436 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
3437
3438 if (sc->sc_flags & WM_F_HAS_MII)
3439 mii_tick(&sc->sc_mii);
3440 else
3441 wm_tbi_check_link(sc);
3442
3443 splx(s);
3444
3445 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3446 }
3447
3448 /*
3449 * wm_reset:
3450 *
3451 * Reset the i82542 chip.
3452 */
3453 static void
3454 wm_reset(struct wm_softc *sc)
3455 {
3456 int phy_reset = 0;
3457 uint32_t reg, mask;
3458 int i;
3459
3460 /*
3461 * Allocate on-chip memory according to the MTU size.
3462 * The Packet Buffer Allocation register must be written
3463 * before the chip is reset.
3464 */
3465 switch (sc->sc_type) {
3466 case WM_T_82547:
3467 case WM_T_82547_2:
3468 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3469 PBA_22K : PBA_30K;
3470 sc->sc_txfifo_head = 0;
3471 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3472 sc->sc_txfifo_size =
3473 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3474 sc->sc_txfifo_stall = 0;
3475 break;
3476 case WM_T_82571:
3477 case WM_T_82572:
3478 case WM_T_82575: /* XXX need special handing for jumbo frames */
3479 case WM_T_I350:
3480 case WM_T_80003:
3481 sc->sc_pba = PBA_32K;
3482 break;
3483 case WM_T_82580:
3484 case WM_T_82580ER:
3485 sc->sc_pba = PBA_35K;
3486 break;
3487 case WM_T_82576:
3488 sc->sc_pba = PBA_64K;
3489 break;
3490 case WM_T_82573:
3491 sc->sc_pba = PBA_12K;
3492 break;
3493 case WM_T_82574:
3494 case WM_T_82583:
3495 sc->sc_pba = PBA_20K;
3496 break;
3497 case WM_T_ICH8:
3498 sc->sc_pba = PBA_8K;
3499 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3500 break;
3501 case WM_T_ICH9:
3502 case WM_T_ICH10:
3503 sc->sc_pba = PBA_10K;
3504 break;
3505 case WM_T_PCH:
3506 case WM_T_PCH2:
3507 sc->sc_pba = PBA_26K;
3508 break;
3509 default:
3510 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3511 PBA_40K : PBA_48K;
3512 break;
3513 }
3514 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3515
3516 /* Prevent the PCI-E bus from sticking */
3517 if (sc->sc_flags & WM_F_PCIE) {
3518 int timeout = 800;
3519
3520 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3521 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3522
3523 while (timeout--) {
3524 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
3525 break;
3526 delay(100);
3527 }
3528 }
3529
3530 /* Set the completion timeout for interface */
3531 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3532 || (sc->sc_type == WM_T_I350))
3533 wm_set_pcie_completion_timeout(sc);
3534
3535 /* Clear interrupt */
3536 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3537
3538 /* Stop the transmit and receive processes. */
3539 CSR_WRITE(sc, WMREG_RCTL, 0);
3540 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3541 sc->sc_rctl &= ~RCTL_EN;
3542
3543 /* XXX set_tbi_sbp_82543() */
3544
3545 delay(10*1000);
3546
3547 /* Must acquire the MDIO ownership before MAC reset */
3548 switch (sc->sc_type) {
3549 case WM_T_82573:
3550 case WM_T_82574:
3551 case WM_T_82583:
3552 i = 0;
3553 reg = CSR_READ(sc, WMREG_EXTCNFCTR)
3554 | EXTCNFCTR_MDIO_SW_OWNERSHIP;
3555 do {
3556 CSR_WRITE(sc, WMREG_EXTCNFCTR,
3557 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
3558 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
3559 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
3560 break;
3561 reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
3562 delay(2*1000);
3563 i++;
3564 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
3565 break;
3566 default:
3567 break;
3568 }
3569
3570 /*
3571 * 82541 Errata 29? & 82547 Errata 28?
3572 * See also the description about PHY_RST bit in CTRL register
3573 * in 8254x_GBe_SDM.pdf.
3574 */
3575 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3576 CSR_WRITE(sc, WMREG_CTRL,
3577 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3578 delay(5000);
3579 }
3580
3581 switch (sc->sc_type) {
3582 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3583 case WM_T_82541:
3584 case WM_T_82541_2:
3585 case WM_T_82547:
3586 case WM_T_82547_2:
3587 /*
3588 * On some chipsets, a reset through a memory-mapped write
3589 * cycle can cause the chip to reset before completing the
3590 * write cycle. This causes major headache that can be
3591 * avoided by issuing the reset via indirect register writes
3592 * through I/O space.
3593 *
3594 * So, if we successfully mapped the I/O BAR at attach time,
3595 * use that. Otherwise, try our luck with a memory-mapped
3596 * reset.
3597 */
3598 if (sc->sc_flags & WM_F_IOH_VALID)
3599 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3600 else
3601 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3602 break;
3603 case WM_T_82545_3:
3604 case WM_T_82546_3:
3605 /* Use the shadow control register on these chips. */
3606 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3607 break;
3608 case WM_T_80003:
3609 mask = swfwphysem[sc->sc_funcid];
3610 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3611 wm_get_swfw_semaphore(sc, mask);
3612 CSR_WRITE(sc, WMREG_CTRL, reg);
3613 wm_put_swfw_semaphore(sc, mask);
3614 break;
3615 case WM_T_ICH8:
3616 case WM_T_ICH9:
3617 case WM_T_ICH10:
3618 case WM_T_PCH:
3619 case WM_T_PCH2:
3620 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3621 if (wm_check_reset_block(sc) == 0) {
3622 /*
3623 * Gate automatic PHY configuration by hardware on
3624 * manaed 82579
3625 */
3626 if ((sc->sc_type == WM_T_PCH2)
3627 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3628 != 0))
3629 wm_gate_hw_phy_config_ich8lan(sc, 1);
3630
3631
3632 reg |= CTRL_PHY_RESET;
3633 phy_reset = 1;
3634 }
3635 wm_get_swfwhw_semaphore(sc);
3636 CSR_WRITE(sc, WMREG_CTRL, reg);
3637 delay(20*1000);
3638 wm_put_swfwhw_semaphore(sc);
3639 break;
3640 case WM_T_82542_2_0:
3641 case WM_T_82542_2_1:
3642 case WM_T_82543:
3643 case WM_T_82540:
3644 case WM_T_82545:
3645 case WM_T_82546:
3646 case WM_T_82571:
3647 case WM_T_82572:
3648 case WM_T_82573:
3649 case WM_T_82574:
3650 case WM_T_82575:
3651 case WM_T_82576:
3652 case WM_T_82580:
3653 case WM_T_82580ER:
3654 case WM_T_82583:
3655 case WM_T_I350:
3656 default:
3657 /* Everything else can safely use the documented method. */
3658 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3659 break;
3660 }
3661
3662 if (phy_reset != 0)
3663 wm_get_cfg_done(sc);
3664
3665 /* reload EEPROM */
3666 switch (sc->sc_type) {
3667 case WM_T_82542_2_0:
3668 case WM_T_82542_2_1:
3669 case WM_T_82543:
3670 case WM_T_82544:
3671 delay(10);
3672 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3673 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3674 delay(2000);
3675 break;
3676 case WM_T_82540:
3677 case WM_T_82545:
3678 case WM_T_82545_3:
3679 case WM_T_82546:
3680 case WM_T_82546_3:
3681 delay(5*1000);
3682 /* XXX Disable HW ARPs on ASF enabled adapters */
3683 break;
3684 case WM_T_82541:
3685 case WM_T_82541_2:
3686 case WM_T_82547:
3687 case WM_T_82547_2:
3688 delay(20000);
3689 /* XXX Disable HW ARPs on ASF enabled adapters */
3690 break;
3691 case WM_T_82571:
3692 case WM_T_82572:
3693 case WM_T_82573:
3694 case WM_T_82574:
3695 case WM_T_82583:
3696 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3697 delay(10);
3698 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3699 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3700 }
3701 /* check EECD_EE_AUTORD */
3702 wm_get_auto_rd_done(sc);
3703 /*
3704 * Phy configuration from NVM just starts after EECD_AUTO_RD
3705 * is set.
3706 */
3707 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3708 || (sc->sc_type == WM_T_82583))
3709 delay(25*1000);
3710 break;
3711 case WM_T_82575:
3712 case WM_T_82576:
3713 case WM_T_82580:
3714 case WM_T_82580ER:
3715 case WM_T_I350:
3716 case WM_T_80003:
3717 case WM_T_ICH8:
3718 case WM_T_ICH9:
3719 /* check EECD_EE_AUTORD */
3720 wm_get_auto_rd_done(sc);
3721 break;
3722 case WM_T_ICH10:
3723 case WM_T_PCH:
3724 case WM_T_PCH2:
3725 wm_lan_init_done(sc);
3726 break;
3727 default:
3728 panic("%s: unknown type\n", __func__);
3729 }
3730
3731 /* Check whether EEPROM is present or not */
3732 switch (sc->sc_type) {
3733 case WM_T_82575:
3734 case WM_T_82576:
3735 #if 0 /* XXX */
3736 case WM_T_82580:
3737 case WM_T_82580ER:
3738 #endif
3739 case WM_T_I350:
3740 case WM_T_ICH8:
3741 case WM_T_ICH9:
3742 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3743 /* Not found */
3744 sc->sc_flags |= WM_F_EEPROM_INVALID;
3745 if ((sc->sc_type == WM_T_82575)
3746 || (sc->sc_type == WM_T_82576)
3747 || (sc->sc_type == WM_T_82580)
3748 || (sc->sc_type == WM_T_82580ER)
3749 || (sc->sc_type == WM_T_I350))
3750 wm_reset_init_script_82575(sc);
3751 }
3752 break;
3753 default:
3754 break;
3755 }
3756
3757 if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
3758 || (sc->sc_type == WM_T_I350)) {
3759 /* clear global device reset status bit */
3760 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3761 }
3762
3763 /* Clear any pending interrupt events. */
3764 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3765 reg = CSR_READ(sc, WMREG_ICR);
3766
3767 /* reload sc_ctrl */
3768 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3769
3770 if (sc->sc_type == WM_T_I350)
3771 wm_set_eee_i350(sc);
3772
3773 /* dummy read from WUC */
3774 if (sc->sc_type == WM_T_PCH)
3775 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3776 /*
3777 * For PCH, this write will make sure that any noise will be detected
3778 * as a CRC error and be dropped rather than show up as a bad packet
3779 * to the DMA engine
3780 */
3781 if (sc->sc_type == WM_T_PCH)
3782 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3783
3784 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3785 CSR_WRITE(sc, WMREG_WUC, 0);
3786
3787 /* XXX need special handling for 82580 */
3788 }
3789
3790 static void
3791 wm_set_vlan(struct wm_softc *sc)
3792 {
3793 /* Deal with VLAN enables. */
3794 if (VLAN_ATTACHED(&sc->sc_ethercom))
3795 sc->sc_ctrl |= CTRL_VME;
3796 else
3797 sc->sc_ctrl &= ~CTRL_VME;
3798
3799 /* Write the control registers. */
3800 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3801 }
3802
3803 /*
3804 * wm_init: [ifnet interface function]
3805 *
3806 * Initialize the interface. Must be called at splnet().
3807 */
3808 static int
3809 wm_init(struct ifnet *ifp)
3810 {
3811 struct wm_softc *sc = ifp->if_softc;
3812 struct wm_rxsoft *rxs;
3813 int i, j, trynum, error = 0;
3814 uint32_t reg;
3815
3816 /*
3817 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3818 * There is a small but measurable benefit to avoiding the adjusment
3819 * of the descriptor so that the headers are aligned, for normal mtu,
3820 * on such platforms. One possibility is that the DMA itself is
3821 * slightly more efficient if the front of the entire packet (instead
3822 * of the front of the headers) is aligned.
3823 *
3824 * Note we must always set align_tweak to 0 if we are using
3825 * jumbo frames.
3826 */
3827 #ifdef __NO_STRICT_ALIGNMENT
3828 sc->sc_align_tweak = 0;
3829 #else
3830 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3831 sc->sc_align_tweak = 0;
3832 else
3833 sc->sc_align_tweak = 2;
3834 #endif /* __NO_STRICT_ALIGNMENT */
3835
3836 /* Cancel any pending I/O. */
3837 wm_stop(ifp, 0);
3838
3839 /* update statistics before reset */
3840 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3841 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3842
3843 /* Reset the chip to a known state. */
3844 wm_reset(sc);
3845
3846 switch (sc->sc_type) {
3847 case WM_T_82571:
3848 case WM_T_82572:
3849 case WM_T_82573:
3850 case WM_T_82574:
3851 case WM_T_82583:
3852 case WM_T_80003:
3853 case WM_T_ICH8:
3854 case WM_T_ICH9:
3855 case WM_T_ICH10:
3856 case WM_T_PCH:
3857 case WM_T_PCH2:
3858 if (wm_check_mng_mode(sc) != 0)
3859 wm_get_hw_control(sc);
3860 break;
3861 default:
3862 break;
3863 }
3864
3865 /* Reset the PHY. */
3866 if (sc->sc_flags & WM_F_HAS_MII)
3867 wm_gmii_reset(sc);
3868
3869 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3870 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3871 if ((sc->sc_type == WM_T_PCH) && (sc->sc_type == WM_T_PCH2))
3872 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
3873
3874 /* Initialize the transmit descriptor ring. */
3875 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3876 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3877 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3878 sc->sc_txfree = WM_NTXDESC(sc);
3879 sc->sc_txnext = 0;
3880
3881 if (sc->sc_type < WM_T_82543) {
3882 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
3883 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
3884 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3885 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3886 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3887 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3888 } else {
3889 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
3890 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
3891 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3892 CSR_WRITE(sc, WMREG_TDH, 0);
3893 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3894 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3895
3896 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3897 /*
3898 * Don't write TDT before TCTL.EN is set.
3899 * See the document.
3900 */
3901 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
3902 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
3903 | TXDCTL_WTHRESH(0));
3904 else {
3905 CSR_WRITE(sc, WMREG_TDT, 0);
3906 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3907 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3908 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3909 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3910 }
3911 }
3912 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3913 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3914
3915 /* Initialize the transmit job descriptors. */
3916 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3917 sc->sc_txsoft[i].txs_mbuf = NULL;
3918 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3919 sc->sc_txsnext = 0;
3920 sc->sc_txsdirty = 0;
3921
3922 /*
3923 * Initialize the receive descriptor and receive job
3924 * descriptor rings.
3925 */
3926 if (sc->sc_type < WM_T_82543) {
3927 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3928 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3929 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3930 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3931 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3932 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3933
3934 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3935 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3936 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3937 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3938 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3939 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3940 } else {
3941 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3942 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3943 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3944 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3945 CSR_WRITE(sc, WMREG_EITR(0), 450);
3946 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
3947 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
3948 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
3949 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
3950 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
3951 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
3952 | RXDCTL_WTHRESH(1));
3953 } else {
3954 CSR_WRITE(sc, WMREG_RDH, 0);
3955 CSR_WRITE(sc, WMREG_RDT, 0);
3956 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3957 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
3958 }
3959 }
3960 for (i = 0; i < WM_NRXDESC; i++) {
3961 rxs = &sc->sc_rxsoft[i];
3962 if (rxs->rxs_mbuf == NULL) {
3963 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3964 log(LOG_ERR, "%s: unable to allocate or map rx "
3965 "buffer %d, error = %d\n",
3966 device_xname(sc->sc_dev), i, error);
3967 /*
3968 * XXX Should attempt to run with fewer receive
3969 * XXX buffers instead of just failing.
3970 */
3971 wm_rxdrain(sc);
3972 goto out;
3973 }
3974 } else {
3975 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3976 WM_INIT_RXDESC(sc, i);
3977 /*
3978 * For 82575 and newer device, the RX descriptors
3979 * must be initialized after the setting of RCTL.EN in
3980 * wm_set_filter()
3981 */
3982 }
3983 }
3984 sc->sc_rxptr = 0;
3985 sc->sc_rxdiscard = 0;
3986 WM_RXCHAIN_RESET(sc);
3987
3988 /*
3989 * Clear out the VLAN table -- we don't use it (yet).
3990 */
3991 CSR_WRITE(sc, WMREG_VET, 0);
3992 if (sc->sc_type == WM_T_I350)
3993 trynum = 10; /* Due to hw errata */
3994 else
3995 trynum = 1;
3996 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3997 for (j = 0; j < trynum; j++)
3998 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3999
4000 /*
4001 * Set up flow-control parameters.
4002 *
4003 * XXX Values could probably stand some tuning.
4004 */
4005 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4006 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4007 && (sc->sc_type != WM_T_PCH2)) {
4008 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4009 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4010 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4011 }
4012
4013 sc->sc_fcrtl = FCRTL_DFLT;
4014 if (sc->sc_type < WM_T_82543) {
4015 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4016 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4017 } else {
4018 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4019 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4020 }
4021
4022 if (sc->sc_type == WM_T_80003)
4023 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4024 else
4025 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4026
4027 /* Writes the control register. */
4028 wm_set_vlan(sc);
4029
4030 if (sc->sc_flags & WM_F_HAS_MII) {
4031 int val;
4032
4033 switch (sc->sc_type) {
4034 case WM_T_80003:
4035 case WM_T_ICH8:
4036 case WM_T_ICH9:
4037 case WM_T_ICH10:
4038 case WM_T_PCH:
4039 case WM_T_PCH2:
4040 /*
4041 * Set the mac to wait the maximum time between each
4042 * iteration and increase the max iterations when
4043 * polling the phy; this fixes erroneous timeouts at
4044 * 10Mbps.
4045 */
4046 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4047 0xFFFF);
4048 val = wm_kmrn_readreg(sc,
4049 KUMCTRLSTA_OFFSET_INB_PARAM);
4050 val |= 0x3F;
4051 wm_kmrn_writereg(sc,
4052 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4053 break;
4054 default:
4055 break;
4056 }
4057
4058 if (sc->sc_type == WM_T_80003) {
4059 val = CSR_READ(sc, WMREG_CTRL_EXT);
4060 val &= ~CTRL_EXT_LINK_MODE_MASK;
4061 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4062
4063 /* Bypass RX and TX FIFO's */
4064 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4065 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4066 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4067 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4068 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4069 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4070 }
4071 }
4072 #if 0
4073 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4074 #endif
4075
4076 /*
4077 * Set up checksum offload parameters.
4078 */
4079 reg = CSR_READ(sc, WMREG_RXCSUM);
4080 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4081 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4082 reg |= RXCSUM_IPOFL;
4083 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4084 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4085 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4086 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4087 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4088
4089 /* Reset TBI's RXCFG count */
4090 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
4091
4092 /*
4093 * Set up the interrupt registers.
4094 */
4095 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4096 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4097 ICR_RXO | ICR_RXT0;
4098 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
4099 sc->sc_icr |= ICR_RXCFG;
4100 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4101
4102 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4103 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4104 || (sc->sc_type == WM_T_PCH2)) {
4105 reg = CSR_READ(sc, WMREG_KABGTXD);
4106 reg |= KABGTXD_BGSQLBIAS;
4107 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4108 }
4109
4110 /* Set up the inter-packet gap. */
4111 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4112
4113 if (sc->sc_type >= WM_T_82543) {
4114 /*
4115 * Set up the interrupt throttling register (units of 256ns)
4116 * Note that a footnote in Intel's documentation says this
4117 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4118 * or 10Mbit mode. Empirically, it appears to be the case
4119 * that that is also true for the 1024ns units of the other
4120 * interrupt-related timer registers -- so, really, we ought
4121 * to divide this value by 4 when the link speed is low.
4122 *
4123 * XXX implement this division at link speed change!
4124 */
4125
4126 /*
4127 * For N interrupts/sec, set this value to:
4128 * 1000000000 / (N * 256). Note that we set the
4129 * absolute and packet timer values to this value
4130 * divided by 4 to get "simple timer" behavior.
4131 */
4132
4133 sc->sc_itr = 1500; /* 2604 ints/sec */
4134 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4135 }
4136
4137 /* Set the VLAN ethernetype. */
4138 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4139
4140 /*
4141 * Set up the transmit control register; we start out with
4142 * a collision distance suitable for FDX, but update it whe
4143 * we resolve the media type.
4144 */
4145 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4146 | TCTL_CT(TX_COLLISION_THRESHOLD)
4147 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4148 if (sc->sc_type >= WM_T_82571)
4149 sc->sc_tctl |= TCTL_MULR;
4150 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4151
4152 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4153 /*
4154 * Write TDT after TCTL.EN is set.
4155 * See the document.
4156 */
4157 CSR_WRITE(sc, WMREG_TDT, 0);
4158 }
4159
4160 if (sc->sc_type == WM_T_80003) {
4161 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4162 reg &= ~TCTL_EXT_GCEX_MASK;
4163 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4164 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4165 }
4166
4167 /* Set the media. */
4168 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4169 goto out;
4170
4171 /* Configure for OS presence */
4172 wm_init_manageability(sc);
4173
4174 /*
4175 * Set up the receive control register; we actually program
4176 * the register when we set the receive filter. Use multicast
4177 * address offset type 0.
4178 *
4179 * Only the i82544 has the ability to strip the incoming
4180 * CRC, so we don't enable that feature.
4181 */
4182 sc->sc_mchash_type = 0;
4183 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4184 | RCTL_MO(sc->sc_mchash_type);
4185
4186 /*
4187 * The I350 has a bug where it always strips the CRC whether
4188 * asked to or not. So ask for stripped CRC here and cope in rxeof
4189 */
4190 if (sc->sc_type == WM_T_I350)
4191 sc->sc_rctl |= RCTL_SECRC;
4192
4193 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4194 && (ifp->if_mtu > ETHERMTU)) {
4195 sc->sc_rctl |= RCTL_LPE;
4196 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4197 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4198 }
4199
4200 if (MCLBYTES == 2048) {
4201 sc->sc_rctl |= RCTL_2k;
4202 } else {
4203 if (sc->sc_type >= WM_T_82543) {
4204 switch (MCLBYTES) {
4205 case 4096:
4206 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4207 break;
4208 case 8192:
4209 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4210 break;
4211 case 16384:
4212 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4213 break;
4214 default:
4215 panic("wm_init: MCLBYTES %d unsupported",
4216 MCLBYTES);
4217 break;
4218 }
4219 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4220 }
4221
4222 /* Set the receive filter. */
4223 wm_set_filter(sc);
4224
4225 /* On 575 and later set RDT only if RX enabled */
4226 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4227 for (i = 0; i < WM_NRXDESC; i++)
4228 WM_INIT_RXDESC(sc, i);
4229
4230 /* Start the one second link check clock. */
4231 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4232
4233 /* ...all done! */
4234 ifp->if_flags |= IFF_RUNNING;
4235 ifp->if_flags &= ~IFF_OACTIVE;
4236
4237 out:
4238 sc->sc_if_flags = ifp->if_flags;
4239 if (error)
4240 log(LOG_ERR, "%s: interface not running\n",
4241 device_xname(sc->sc_dev));
4242 return error;
4243 }
4244
4245 /*
4246 * wm_rxdrain:
4247 *
4248 * Drain the receive queue.
4249 */
4250 static void
4251 wm_rxdrain(struct wm_softc *sc)
4252 {
4253 struct wm_rxsoft *rxs;
4254 int i;
4255
4256 for (i = 0; i < WM_NRXDESC; i++) {
4257 rxs = &sc->sc_rxsoft[i];
4258 if (rxs->rxs_mbuf != NULL) {
4259 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4260 m_freem(rxs->rxs_mbuf);
4261 rxs->rxs_mbuf = NULL;
4262 }
4263 }
4264 }
4265
4266 /*
4267 * wm_stop: [ifnet interface function]
4268 *
4269 * Stop transmission on the interface.
4270 */
4271 static void
4272 wm_stop(struct ifnet *ifp, int disable)
4273 {
4274 struct wm_softc *sc = ifp->if_softc;
4275 struct wm_txsoft *txs;
4276 int i;
4277
4278 /* Stop the one second clock. */
4279 callout_stop(&sc->sc_tick_ch);
4280
4281 /* Stop the 82547 Tx FIFO stall check timer. */
4282 if (sc->sc_type == WM_T_82547)
4283 callout_stop(&sc->sc_txfifo_ch);
4284
4285 if (sc->sc_flags & WM_F_HAS_MII) {
4286 /* Down the MII. */
4287 mii_down(&sc->sc_mii);
4288 } else {
4289 #if 0
4290 /* Should we clear PHY's status properly? */
4291 wm_reset(sc);
4292 #endif
4293 }
4294
4295 /* Stop the transmit and receive processes. */
4296 CSR_WRITE(sc, WMREG_TCTL, 0);
4297 CSR_WRITE(sc, WMREG_RCTL, 0);
4298 sc->sc_rctl &= ~RCTL_EN;
4299
4300 /*
4301 * Clear the interrupt mask to ensure the device cannot assert its
4302 * interrupt line.
4303 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4304 * any currently pending or shared interrupt.
4305 */
4306 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4307 sc->sc_icr = 0;
4308
4309 /* Release any queued transmit buffers. */
4310 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4311 txs = &sc->sc_txsoft[i];
4312 if (txs->txs_mbuf != NULL) {
4313 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4314 m_freem(txs->txs_mbuf);
4315 txs->txs_mbuf = NULL;
4316 }
4317 }
4318
4319 /* Mark the interface as down and cancel the watchdog timer. */
4320 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4321 ifp->if_timer = 0;
4322
4323 if (disable)
4324 wm_rxdrain(sc);
4325
4326 #if 0 /* notyet */
4327 if (sc->sc_type >= WM_T_82544)
4328 CSR_WRITE(sc, WMREG_WUC, 0);
4329 #endif
4330 }
4331
4332 void
4333 wm_get_auto_rd_done(struct wm_softc *sc)
4334 {
4335 int i;
4336
4337 /* wait for eeprom to reload */
4338 switch (sc->sc_type) {
4339 case WM_T_82571:
4340 case WM_T_82572:
4341 case WM_T_82573:
4342 case WM_T_82574:
4343 case WM_T_82583:
4344 case WM_T_82575:
4345 case WM_T_82576:
4346 case WM_T_82580:
4347 case WM_T_82580ER:
4348 case WM_T_I350:
4349 case WM_T_80003:
4350 case WM_T_ICH8:
4351 case WM_T_ICH9:
4352 for (i = 0; i < 10; i++) {
4353 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4354 break;
4355 delay(1000);
4356 }
4357 if (i == 10) {
4358 log(LOG_ERR, "%s: auto read from eeprom failed to "
4359 "complete\n", device_xname(sc->sc_dev));
4360 }
4361 break;
4362 default:
4363 break;
4364 }
4365 }
4366
4367 void
4368 wm_lan_init_done(struct wm_softc *sc)
4369 {
4370 uint32_t reg = 0;
4371 int i;
4372
4373 /* wait for eeprom to reload */
4374 switch (sc->sc_type) {
4375 case WM_T_ICH10:
4376 case WM_T_PCH:
4377 case WM_T_PCH2:
4378 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4379 reg = CSR_READ(sc, WMREG_STATUS);
4380 if ((reg & STATUS_LAN_INIT_DONE) != 0)
4381 break;
4382 delay(100);
4383 }
4384 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4385 log(LOG_ERR, "%s: %s: lan_init_done failed to "
4386 "complete\n", device_xname(sc->sc_dev), __func__);
4387 }
4388 break;
4389 default:
4390 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4391 __func__);
4392 break;
4393 }
4394
4395 reg &= ~STATUS_LAN_INIT_DONE;
4396 CSR_WRITE(sc, WMREG_STATUS, reg);
4397 }
4398
4399 void
4400 wm_get_cfg_done(struct wm_softc *sc)
4401 {
4402 int mask;
4403 uint32_t reg;
4404 int i;
4405
4406 /* wait for eeprom to reload */
4407 switch (sc->sc_type) {
4408 case WM_T_82542_2_0:
4409 case WM_T_82542_2_1:
4410 /* null */
4411 break;
4412 case WM_T_82543:
4413 case WM_T_82544:
4414 case WM_T_82540:
4415 case WM_T_82545:
4416 case WM_T_82545_3:
4417 case WM_T_82546:
4418 case WM_T_82546_3:
4419 case WM_T_82541:
4420 case WM_T_82541_2:
4421 case WM_T_82547:
4422 case WM_T_82547_2:
4423 case WM_T_82573:
4424 case WM_T_82574:
4425 case WM_T_82583:
4426 /* generic */
4427 delay(10*1000);
4428 break;
4429 case WM_T_80003:
4430 case WM_T_82571:
4431 case WM_T_82572:
4432 case WM_T_82575:
4433 case WM_T_82576:
4434 case WM_T_82580:
4435 case WM_T_82580ER:
4436 case WM_T_I350:
4437 if (sc->sc_type == WM_T_82571) {
4438 /* Only 82571 shares port 0 */
4439 mask = EEMNGCTL_CFGDONE_0;
4440 } else
4441 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4442 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4443 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4444 break;
4445 delay(1000);
4446 }
4447 if (i >= WM_PHY_CFG_TIMEOUT) {
4448 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
4449 device_xname(sc->sc_dev), __func__));
4450 }
4451 break;
4452 case WM_T_ICH8:
4453 case WM_T_ICH9:
4454 case WM_T_ICH10:
4455 case WM_T_PCH:
4456 case WM_T_PCH2:
4457 if (sc->sc_type >= WM_T_PCH) {
4458 reg = CSR_READ(sc, WMREG_STATUS);
4459 if ((reg & STATUS_PHYRA) != 0)
4460 CSR_WRITE(sc, WMREG_STATUS,
4461 reg & ~STATUS_PHYRA);
4462 }
4463 delay(10*1000);
4464 break;
4465 default:
4466 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4467 __func__);
4468 break;
4469 }
4470 }
4471
4472 /*
4473 * wm_acquire_eeprom:
4474 *
4475 * Perform the EEPROM handshake required on some chips.
4476 */
4477 static int
4478 wm_acquire_eeprom(struct wm_softc *sc)
4479 {
4480 uint32_t reg;
4481 int x;
4482 int ret = 0;
4483
4484 /* always success */
4485 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4486 return 0;
4487
4488 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
4489 ret = wm_get_swfwhw_semaphore(sc);
4490 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
4491 /* this will also do wm_get_swsm_semaphore() if needed */
4492 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
4493 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4494 ret = wm_get_swsm_semaphore(sc);
4495 }
4496
4497 if (ret) {
4498 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4499 __func__);
4500 return 1;
4501 }
4502
4503 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4504 reg = CSR_READ(sc, WMREG_EECD);
4505
4506 /* Request EEPROM access. */
4507 reg |= EECD_EE_REQ;
4508 CSR_WRITE(sc, WMREG_EECD, reg);
4509
4510 /* ..and wait for it to be granted. */
4511 for (x = 0; x < 1000; x++) {
4512 reg = CSR_READ(sc, WMREG_EECD);
4513 if (reg & EECD_EE_GNT)
4514 break;
4515 delay(5);
4516 }
4517 if ((reg & EECD_EE_GNT) == 0) {
4518 aprint_error_dev(sc->sc_dev,
4519 "could not acquire EEPROM GNT\n");
4520 reg &= ~EECD_EE_REQ;
4521 CSR_WRITE(sc, WMREG_EECD, reg);
4522 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4523 wm_put_swfwhw_semaphore(sc);
4524 if (sc->sc_flags & WM_F_SWFW_SYNC)
4525 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4526 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4527 wm_put_swsm_semaphore(sc);
4528 return 1;
4529 }
4530 }
4531
4532 return 0;
4533 }
4534
4535 /*
4536 * wm_release_eeprom:
4537 *
4538 * Release the EEPROM mutex.
4539 */
4540 static void
4541 wm_release_eeprom(struct wm_softc *sc)
4542 {
4543 uint32_t reg;
4544
4545 /* always success */
4546 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4547 return;
4548
4549 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4550 reg = CSR_READ(sc, WMREG_EECD);
4551 reg &= ~EECD_EE_REQ;
4552 CSR_WRITE(sc, WMREG_EECD, reg);
4553 }
4554
4555 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4556 wm_put_swfwhw_semaphore(sc);
4557 if (sc->sc_flags & WM_F_SWFW_SYNC)
4558 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4559 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4560 wm_put_swsm_semaphore(sc);
4561 }
4562
4563 /*
4564 * wm_eeprom_sendbits:
4565 *
4566 * Send a series of bits to the EEPROM.
4567 */
4568 static void
4569 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
4570 {
4571 uint32_t reg;
4572 int x;
4573
4574 reg = CSR_READ(sc, WMREG_EECD);
4575
4576 for (x = nbits; x > 0; x--) {
4577 if (bits & (1U << (x - 1)))
4578 reg |= EECD_DI;
4579 else
4580 reg &= ~EECD_DI;
4581 CSR_WRITE(sc, WMREG_EECD, reg);
4582 delay(2);
4583 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4584 delay(2);
4585 CSR_WRITE(sc, WMREG_EECD, reg);
4586 delay(2);
4587 }
4588 }
4589
4590 /*
4591 * wm_eeprom_recvbits:
4592 *
4593 * Receive a series of bits from the EEPROM.
4594 */
4595 static void
4596 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
4597 {
4598 uint32_t reg, val;
4599 int x;
4600
4601 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
4602
4603 val = 0;
4604 for (x = nbits; x > 0; x--) {
4605 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4606 delay(2);
4607 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
4608 val |= (1U << (x - 1));
4609 CSR_WRITE(sc, WMREG_EECD, reg);
4610 delay(2);
4611 }
4612 *valp = val;
4613 }
4614
4615 /*
4616 * wm_read_eeprom_uwire:
4617 *
4618 * Read a word from the EEPROM using the MicroWire protocol.
4619 */
4620 static int
4621 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4622 {
4623 uint32_t reg, val;
4624 int i;
4625
4626 for (i = 0; i < wordcnt; i++) {
4627 /* Clear SK and DI. */
4628 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
4629 CSR_WRITE(sc, WMREG_EECD, reg);
4630
4631 /* Set CHIP SELECT. */
4632 reg |= EECD_CS;
4633 CSR_WRITE(sc, WMREG_EECD, reg);
4634 delay(2);
4635
4636 /* Shift in the READ command. */
4637 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
4638
4639 /* Shift in address. */
4640 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
4641
4642 /* Shift out the data. */
4643 wm_eeprom_recvbits(sc, &val, 16);
4644 data[i] = val & 0xffff;
4645
4646 /* Clear CHIP SELECT. */
4647 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
4648 CSR_WRITE(sc, WMREG_EECD, reg);
4649 delay(2);
4650 }
4651
4652 return 0;
4653 }
4654
4655 /*
4656 * wm_spi_eeprom_ready:
4657 *
4658 * Wait for a SPI EEPROM to be ready for commands.
4659 */
4660 static int
4661 wm_spi_eeprom_ready(struct wm_softc *sc)
4662 {
4663 uint32_t val;
4664 int usec;
4665
4666 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
4667 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
4668 wm_eeprom_recvbits(sc, &val, 8);
4669 if ((val & SPI_SR_RDY) == 0)
4670 break;
4671 }
4672 if (usec >= SPI_MAX_RETRIES) {
4673 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
4674 return 1;
4675 }
4676 return 0;
4677 }
4678
4679 /*
4680 * wm_read_eeprom_spi:
4681 *
4682 * Read a work from the EEPROM using the SPI protocol.
4683 */
4684 static int
4685 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4686 {
4687 uint32_t reg, val;
4688 int i;
4689 uint8_t opc;
4690
4691 /* Clear SK and CS. */
4692 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
4693 CSR_WRITE(sc, WMREG_EECD, reg);
4694 delay(2);
4695
4696 if (wm_spi_eeprom_ready(sc))
4697 return 1;
4698
4699 /* Toggle CS to flush commands. */
4700 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
4701 delay(2);
4702 CSR_WRITE(sc, WMREG_EECD, reg);
4703 delay(2);
4704
4705 opc = SPI_OPC_READ;
4706 if (sc->sc_ee_addrbits == 8 && word >= 128)
4707 opc |= SPI_OPC_A8;
4708
4709 wm_eeprom_sendbits(sc, opc, 8);
4710 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
4711
4712 for (i = 0; i < wordcnt; i++) {
4713 wm_eeprom_recvbits(sc, &val, 16);
4714 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
4715 }
4716
4717 /* Raise CS and clear SK. */
4718 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
4719 CSR_WRITE(sc, WMREG_EECD, reg);
4720 delay(2);
4721
4722 return 0;
4723 }
4724
4725 #define EEPROM_CHECKSUM 0xBABA
4726 #define EEPROM_SIZE 0x0040
4727
4728 /*
4729 * wm_validate_eeprom_checksum
4730 *
4731 * The checksum is defined as the sum of the first 64 (16 bit) words.
4732 */
4733 static int
4734 wm_validate_eeprom_checksum(struct wm_softc *sc)
4735 {
4736 uint16_t checksum;
4737 uint16_t eeprom_data;
4738 int i;
4739
4740 checksum = 0;
4741
4742 for (i = 0; i < EEPROM_SIZE; i++) {
4743 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
4744 return 1;
4745 checksum += eeprom_data;
4746 }
4747
4748 if (checksum != (uint16_t) EEPROM_CHECKSUM)
4749 return 1;
4750
4751 return 0;
4752 }
4753
4754 /*
4755 * wm_read_eeprom:
4756 *
4757 * Read data from the serial EEPROM.
4758 */
4759 static int
4760 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4761 {
4762 int rv;
4763
4764 if (sc->sc_flags & WM_F_EEPROM_INVALID)
4765 return 1;
4766
4767 if (wm_acquire_eeprom(sc))
4768 return 1;
4769
4770 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4771 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4772 || (sc->sc_type == WM_T_PCH2))
4773 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
4774 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
4775 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
4776 else if (sc->sc_flags & WM_F_EEPROM_SPI)
4777 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
4778 else
4779 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
4780
4781 wm_release_eeprom(sc);
4782 return rv;
4783 }
4784
4785 static int
4786 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
4787 uint16_t *data)
4788 {
4789 int i, eerd = 0;
4790 int error = 0;
4791
4792 for (i = 0; i < wordcnt; i++) {
4793 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
4794
4795 CSR_WRITE(sc, WMREG_EERD, eerd);
4796 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
4797 if (error != 0)
4798 break;
4799
4800 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
4801 }
4802
4803 return error;
4804 }
4805
4806 static int
4807 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
4808 {
4809 uint32_t attempts = 100000;
4810 uint32_t i, reg = 0;
4811 int32_t done = -1;
4812
4813 for (i = 0; i < attempts; i++) {
4814 reg = CSR_READ(sc, rw);
4815
4816 if (reg & EERD_DONE) {
4817 done = 0;
4818 break;
4819 }
4820 delay(5);
4821 }
4822
4823 return done;
4824 }
4825
4826 static int
4827 wm_check_alt_mac_addr(struct wm_softc *sc)
4828 {
4829 uint16_t myea[ETHER_ADDR_LEN / 2];
4830 uint16_t offset = EEPROM_OFF_MACADDR;
4831
4832 /* Try to read alternative MAC address pointer */
4833 if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
4834 return -1;
4835
4836 /* Check pointer */
4837 if (offset == 0xffff)
4838 return -1;
4839
4840 /*
4841 * Check whether alternative MAC address is valid or not.
4842 * Some cards have non 0xffff pointer but those don't use
4843 * alternative MAC address in reality.
4844 *
4845 * Check whether the broadcast bit is set or not.
4846 */
4847 if (wm_read_eeprom(sc, offset, 1, myea) == 0)
4848 if (((myea[0] & 0xff) & 0x01) == 0)
4849 return 0; /* found! */
4850
4851 /* not found */
4852 return -1;
4853 }
4854
4855 static int
4856 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
4857 {
4858 uint16_t myea[ETHER_ADDR_LEN / 2];
4859 uint16_t offset = EEPROM_OFF_MACADDR;
4860 int do_invert = 0;
4861
4862 switch (sc->sc_type) {
4863 case WM_T_82580:
4864 case WM_T_82580ER:
4865 case WM_T_I350:
4866 switch (sc->sc_funcid) {
4867 case 0:
4868 /* default value (== EEPROM_OFF_MACADDR) */
4869 break;
4870 case 1:
4871 offset = EEPROM_OFF_LAN1;
4872 break;
4873 case 2:
4874 offset = EEPROM_OFF_LAN2;
4875 break;
4876 case 3:
4877 offset = EEPROM_OFF_LAN3;
4878 break;
4879 default:
4880 goto bad;
4881 /* NOTREACHED */
4882 break;
4883 }
4884 break;
4885 case WM_T_82571:
4886 case WM_T_82575:
4887 case WM_T_82576:
4888 case WM_T_80003:
4889 if (wm_check_alt_mac_addr(sc) != 0) {
4890 /* reset the offset to LAN0 */
4891 offset = EEPROM_OFF_MACADDR;
4892 if ((sc->sc_funcid & 0x01) == 1)
4893 do_invert = 1;
4894 goto do_read;
4895 }
4896 switch (sc->sc_funcid) {
4897 case 0:
4898 /*
4899 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
4900 * itself.
4901 */
4902 break;
4903 case 1:
4904 offset += EEPROM_OFF_MACADDR_LAN1;
4905 break;
4906 case 2:
4907 offset += EEPROM_OFF_MACADDR_LAN2;
4908 break;
4909 case 3:
4910 offset += EEPROM_OFF_MACADDR_LAN3;
4911 break;
4912 default:
4913 goto bad;
4914 /* NOTREACHED */
4915 break;
4916 }
4917 break;
4918 default:
4919 if ((sc->sc_funcid & 0x01) == 1)
4920 do_invert = 1;
4921 break;
4922 }
4923
4924 do_read:
4925 if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]),
4926 myea) != 0) {
4927 goto bad;
4928 }
4929
4930 enaddr[0] = myea[0] & 0xff;
4931 enaddr[1] = myea[0] >> 8;
4932 enaddr[2] = myea[1] & 0xff;
4933 enaddr[3] = myea[1] >> 8;
4934 enaddr[4] = myea[2] & 0xff;
4935 enaddr[5] = myea[2] >> 8;
4936
4937 /*
4938 * Toggle the LSB of the MAC address on the second port
4939 * of some dual port cards.
4940 */
4941 if (do_invert != 0)
4942 enaddr[5] ^= 1;
4943
4944 return 0;
4945
4946 bad:
4947 aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n");
4948
4949 return -1;
4950 }
4951
4952 /*
4953 * wm_add_rxbuf:
4954 *
4955 * Add a receive buffer to the indiciated descriptor.
4956 */
4957 static int
4958 wm_add_rxbuf(struct wm_softc *sc, int idx)
4959 {
4960 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
4961 struct mbuf *m;
4962 int error;
4963
4964 MGETHDR(m, M_DONTWAIT, MT_DATA);
4965 if (m == NULL)
4966 return ENOBUFS;
4967
4968 MCLGET(m, M_DONTWAIT);
4969 if ((m->m_flags & M_EXT) == 0) {
4970 m_freem(m);
4971 return ENOBUFS;
4972 }
4973
4974 if (rxs->rxs_mbuf != NULL)
4975 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4976
4977 rxs->rxs_mbuf = m;
4978
4979 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4980 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4981 BUS_DMA_READ|BUS_DMA_NOWAIT);
4982 if (error) {
4983 /* XXX XXX XXX */
4984 aprint_error_dev(sc->sc_dev,
4985 "unable to load rx DMA map %d, error = %d\n",
4986 idx, error);
4987 panic("wm_add_rxbuf");
4988 }
4989
4990 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4991 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4992
4993 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4994 if ((sc->sc_rctl & RCTL_EN) != 0)
4995 WM_INIT_RXDESC(sc, idx);
4996 } else
4997 WM_INIT_RXDESC(sc, idx);
4998
4999 return 0;
5000 }
5001
5002 /*
5003 * wm_set_ral:
5004 *
5005 * Set an entery in the receive address list.
5006 */
5007 static void
5008 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
5009 {
5010 uint32_t ral_lo, ral_hi;
5011
5012 if (enaddr != NULL) {
5013 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
5014 (enaddr[3] << 24);
5015 ral_hi = enaddr[4] | (enaddr[5] << 8);
5016 ral_hi |= RAL_AV;
5017 } else {
5018 ral_lo = 0;
5019 ral_hi = 0;
5020 }
5021
5022 if (sc->sc_type >= WM_T_82544) {
5023 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
5024 ral_lo);
5025 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
5026 ral_hi);
5027 } else {
5028 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
5029 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
5030 }
5031 }
5032
5033 /*
5034 * wm_mchash:
5035 *
5036 * Compute the hash of the multicast address for the 4096-bit
5037 * multicast filter.
5038 */
5039 static uint32_t
5040 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
5041 {
5042 static const int lo_shift[4] = { 4, 3, 2, 0 };
5043 static const int hi_shift[4] = { 4, 5, 6, 8 };
5044 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
5045 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
5046 uint32_t hash;
5047
5048 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5049 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5050 || (sc->sc_type == WM_T_PCH2)) {
5051 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
5052 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
5053 return (hash & 0x3ff);
5054 }
5055 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
5056 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
5057
5058 return (hash & 0xfff);
5059 }
5060
5061 /*
5062 * wm_set_filter:
5063 *
5064 * Set up the receive filter.
5065 */
5066 static void
5067 wm_set_filter(struct wm_softc *sc)
5068 {
5069 struct ethercom *ec = &sc->sc_ethercom;
5070 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5071 struct ether_multi *enm;
5072 struct ether_multistep step;
5073 bus_addr_t mta_reg;
5074 uint32_t hash, reg, bit;
5075 int i, size;
5076
5077 if (sc->sc_type >= WM_T_82544)
5078 mta_reg = WMREG_CORDOVA_MTA;
5079 else
5080 mta_reg = WMREG_MTA;
5081
5082 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
5083
5084 if (ifp->if_flags & IFF_BROADCAST)
5085 sc->sc_rctl |= RCTL_BAM;
5086 if (ifp->if_flags & IFF_PROMISC) {
5087 sc->sc_rctl |= RCTL_UPE;
5088 goto allmulti;
5089 }
5090
5091 /*
5092 * Set the station address in the first RAL slot, and
5093 * clear the remaining slots.
5094 */
5095 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5096 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5097 || (sc->sc_type == WM_T_PCH2))
5098 size = WM_ICH8_RAL_TABSIZE;
5099 else
5100 size = WM_RAL_TABSIZE;
5101 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
5102 for (i = 1; i < size; i++)
5103 wm_set_ral(sc, NULL, i);
5104
5105 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5106 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5107 || (sc->sc_type == WM_T_PCH2))
5108 size = WM_ICH8_MC_TABSIZE;
5109 else
5110 size = WM_MC_TABSIZE;
5111 /* Clear out the multicast table. */
5112 for (i = 0; i < size; i++)
5113 CSR_WRITE(sc, mta_reg + (i << 2), 0);
5114
5115 ETHER_FIRST_MULTI(step, ec, enm);
5116 while (enm != NULL) {
5117 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
5118 /*
5119 * We must listen to a range of multicast addresses.
5120 * For now, just accept all multicasts, rather than
5121 * trying to set only those filter bits needed to match
5122 * the range. (At this time, the only use of address
5123 * ranges is for IP multicast routing, for which the
5124 * range is big enough to require all bits set.)
5125 */
5126 goto allmulti;
5127 }
5128
5129 hash = wm_mchash(sc, enm->enm_addrlo);
5130
5131 reg = (hash >> 5);
5132 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5133 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5134 || (sc->sc_type == WM_T_PCH2))
5135 reg &= 0x1f;
5136 else
5137 reg &= 0x7f;
5138 bit = hash & 0x1f;
5139
5140 hash = CSR_READ(sc, mta_reg + (reg << 2));
5141 hash |= 1U << bit;
5142
5143 /* XXX Hardware bug?? */
5144 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
5145 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
5146 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5147 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
5148 } else
5149 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5150
5151 ETHER_NEXT_MULTI(step, enm);
5152 }
5153
5154 ifp->if_flags &= ~IFF_ALLMULTI;
5155 goto setit;
5156
5157 allmulti:
5158 ifp->if_flags |= IFF_ALLMULTI;
5159 sc->sc_rctl |= RCTL_MPE;
5160
5161 setit:
5162 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
5163 }
5164
5165 /*
5166 * wm_tbi_mediainit:
5167 *
5168 * Initialize media for use on 1000BASE-X devices.
5169 */
5170 static void
5171 wm_tbi_mediainit(struct wm_softc *sc)
5172 {
5173 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5174 const char *sep = "";
5175
5176 if (sc->sc_type < WM_T_82543)
5177 sc->sc_tipg = TIPG_WM_DFLT;
5178 else
5179 sc->sc_tipg = TIPG_LG_DFLT;
5180
5181 sc->sc_tbi_anegticks = 5;
5182
5183 /* Initialize our media structures */
5184 sc->sc_mii.mii_ifp = ifp;
5185
5186 sc->sc_ethercom.ec_mii = &sc->sc_mii;
5187 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
5188 wm_tbi_mediastatus);
5189
5190 /*
5191 * SWD Pins:
5192 *
5193 * 0 = Link LED (output)
5194 * 1 = Loss Of Signal (input)
5195 */
5196 sc->sc_ctrl |= CTRL_SWDPIO(0);
5197 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
5198
5199 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5200
5201 #define ADD(ss, mm, dd) \
5202 do { \
5203 aprint_normal("%s%s", sep, ss); \
5204 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
5205 sep = ", "; \
5206 } while (/*CONSTCOND*/0)
5207
5208 aprint_normal_dev(sc->sc_dev, "");
5209 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
5210 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
5211 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
5212 aprint_normal("\n");
5213
5214 #undef ADD
5215
5216 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5217 }
5218
5219 /*
5220 * wm_tbi_mediastatus: [ifmedia interface function]
5221 *
5222 * Get the current interface media status on a 1000BASE-X device.
5223 */
5224 static void
5225 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5226 {
5227 struct wm_softc *sc = ifp->if_softc;
5228 uint32_t ctrl, status;
5229
5230 ifmr->ifm_status = IFM_AVALID;
5231 ifmr->ifm_active = IFM_ETHER;
5232
5233 status = CSR_READ(sc, WMREG_STATUS);
5234 if ((status & STATUS_LU) == 0) {
5235 ifmr->ifm_active |= IFM_NONE;
5236 return;
5237 }
5238
5239 ifmr->ifm_status |= IFM_ACTIVE;
5240 ifmr->ifm_active |= IFM_1000_SX;
5241 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
5242 ifmr->ifm_active |= IFM_FDX;
5243 ctrl = CSR_READ(sc, WMREG_CTRL);
5244 if (ctrl & CTRL_RFCE)
5245 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
5246 if (ctrl & CTRL_TFCE)
5247 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
5248 }
5249
5250 /*
5251 * wm_tbi_mediachange: [ifmedia interface function]
5252 *
5253 * Set hardware to newly-selected media on a 1000BASE-X device.
5254 */
5255 static int
5256 wm_tbi_mediachange(struct ifnet *ifp)
5257 {
5258 struct wm_softc *sc = ifp->if_softc;
5259 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5260 uint32_t status;
5261 int i;
5262
5263 sc->sc_txcw = 0;
5264 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
5265 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
5266 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
5267 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5268 sc->sc_txcw |= TXCW_ANE;
5269 } else {
5270 /*
5271 * If autonegotiation is turned off, force link up and turn on
5272 * full duplex
5273 */
5274 sc->sc_txcw &= ~TXCW_ANE;
5275 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
5276 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5277 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5278 delay(1000);
5279 }
5280
5281 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
5282 device_xname(sc->sc_dev),sc->sc_txcw));
5283 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5284 delay(10000);
5285
5286 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
5287 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
5288
5289 /*
5290 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
5291 * optics detect a signal, 0 if they don't.
5292 */
5293 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
5294 /* Have signal; wait for the link to come up. */
5295
5296 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5297 /*
5298 * Reset the link, and let autonegotiation do its thing
5299 */
5300 sc->sc_ctrl |= CTRL_LRST;
5301 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5302 delay(1000);
5303 sc->sc_ctrl &= ~CTRL_LRST;
5304 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5305 delay(1000);
5306 }
5307
5308 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
5309 delay(10000);
5310 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
5311 break;
5312 }
5313
5314 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
5315 device_xname(sc->sc_dev),i));
5316
5317 status = CSR_READ(sc, WMREG_STATUS);
5318 DPRINTF(WM_DEBUG_LINK,
5319 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
5320 device_xname(sc->sc_dev),status, STATUS_LU));
5321 if (status & STATUS_LU) {
5322 /* Link is up. */
5323 DPRINTF(WM_DEBUG_LINK,
5324 ("%s: LINK: set media -> link up %s\n",
5325 device_xname(sc->sc_dev),
5326 (status & STATUS_FD) ? "FDX" : "HDX"));
5327
5328 /*
5329 * NOTE: CTRL will update TFCE and RFCE automatically,
5330 * so we should update sc->sc_ctrl
5331 */
5332 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5333 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5334 sc->sc_fcrtl &= ~FCRTL_XONE;
5335 if (status & STATUS_FD)
5336 sc->sc_tctl |=
5337 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5338 else
5339 sc->sc_tctl |=
5340 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5341 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
5342 sc->sc_fcrtl |= FCRTL_XONE;
5343 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5344 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5345 WMREG_OLD_FCRTL : WMREG_FCRTL,
5346 sc->sc_fcrtl);
5347 sc->sc_tbi_linkup = 1;
5348 } else {
5349 if (i == WM_LINKUP_TIMEOUT)
5350 wm_check_for_link(sc);
5351 /* Link is down. */
5352 DPRINTF(WM_DEBUG_LINK,
5353 ("%s: LINK: set media -> link down\n",
5354 device_xname(sc->sc_dev)));
5355 sc->sc_tbi_linkup = 0;
5356 }
5357 } else {
5358 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
5359 device_xname(sc->sc_dev)));
5360 sc->sc_tbi_linkup = 0;
5361 }
5362
5363 wm_tbi_set_linkled(sc);
5364
5365 return 0;
5366 }
5367
5368 /*
5369 * wm_tbi_set_linkled:
5370 *
5371 * Update the link LED on 1000BASE-X devices.
5372 */
5373 static void
5374 wm_tbi_set_linkled(struct wm_softc *sc)
5375 {
5376
5377 if (sc->sc_tbi_linkup)
5378 sc->sc_ctrl |= CTRL_SWDPIN(0);
5379 else
5380 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
5381
5382 /* 82540 or newer devices are active low */
5383 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
5384
5385 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5386 }
5387
5388 /*
5389 * wm_tbi_check_link:
5390 *
5391 * Check the link on 1000BASE-X devices.
5392 */
5393 static void
5394 wm_tbi_check_link(struct wm_softc *sc)
5395 {
5396 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5397 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5398 uint32_t rxcw, ctrl, status;
5399
5400 status = CSR_READ(sc, WMREG_STATUS);
5401
5402 rxcw = CSR_READ(sc, WMREG_RXCW);
5403 ctrl = CSR_READ(sc, WMREG_CTRL);
5404
5405 /* set link status */
5406 if ((status & STATUS_LU) == 0) {
5407 DPRINTF(WM_DEBUG_LINK,
5408 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
5409 sc->sc_tbi_linkup = 0;
5410 } else if (sc->sc_tbi_linkup == 0) {
5411 DPRINTF(WM_DEBUG_LINK,
5412 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
5413 (status & STATUS_FD) ? "FDX" : "HDX"));
5414 sc->sc_tbi_linkup = 1;
5415 }
5416
5417 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
5418 && ((status & STATUS_LU) == 0)) {
5419 sc->sc_tbi_linkup = 0;
5420 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
5421 /* RXCFG storm! */
5422 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
5423 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
5424 wm_init(ifp);
5425 wm_start(ifp);
5426 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5427 /* If the timer expired, retry autonegotiation */
5428 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
5429 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
5430 sc->sc_tbi_ticks = 0;
5431 /*
5432 * Reset the link, and let autonegotiation do
5433 * its thing
5434 */
5435 sc->sc_ctrl |= CTRL_LRST;
5436 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5437 delay(1000);
5438 sc->sc_ctrl &= ~CTRL_LRST;
5439 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5440 delay(1000);
5441 CSR_WRITE(sc, WMREG_TXCW,
5442 sc->sc_txcw & ~TXCW_ANE);
5443 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5444 }
5445 }
5446 }
5447
5448 wm_tbi_set_linkled(sc);
5449 }
5450
5451 /*
5452 * wm_gmii_reset:
5453 *
5454 * Reset the PHY.
5455 */
5456 static void
5457 wm_gmii_reset(struct wm_softc *sc)
5458 {
5459 uint32_t reg;
5460 int rv;
5461
5462 /* get phy semaphore */
5463 switch (sc->sc_type) {
5464 case WM_T_82571:
5465 case WM_T_82572:
5466 case WM_T_82573:
5467 case WM_T_82574:
5468 case WM_T_82583:
5469 /* XXX should get sw semaphore, too */
5470 rv = wm_get_swsm_semaphore(sc);
5471 break;
5472 case WM_T_82575:
5473 case WM_T_82576:
5474 case WM_T_82580:
5475 case WM_T_82580ER:
5476 case WM_T_I350:
5477 case WM_T_80003:
5478 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5479 break;
5480 case WM_T_ICH8:
5481 case WM_T_ICH9:
5482 case WM_T_ICH10:
5483 case WM_T_PCH:
5484 case WM_T_PCH2:
5485 rv = wm_get_swfwhw_semaphore(sc);
5486 break;
5487 default:
5488 /* nothing to do*/
5489 rv = 0;
5490 break;
5491 }
5492 if (rv != 0) {
5493 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5494 __func__);
5495 return;
5496 }
5497
5498 switch (sc->sc_type) {
5499 case WM_T_82542_2_0:
5500 case WM_T_82542_2_1:
5501 /* null */
5502 break;
5503 case WM_T_82543:
5504 /*
5505 * With 82543, we need to force speed and duplex on the MAC
5506 * equal to what the PHY speed and duplex configuration is.
5507 * In addition, we need to perform a hardware reset on the PHY
5508 * to take it out of reset.
5509 */
5510 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5511 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5512
5513 /* The PHY reset pin is active-low. */
5514 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5515 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
5516 CTRL_EXT_SWDPIN(4));
5517 reg |= CTRL_EXT_SWDPIO(4);
5518
5519 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5520 delay(10*1000);
5521
5522 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
5523 delay(150);
5524 #if 0
5525 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
5526 #endif
5527 delay(20*1000); /* XXX extra delay to get PHY ID? */
5528 break;
5529 case WM_T_82544: /* reset 10000us */
5530 case WM_T_82540:
5531 case WM_T_82545:
5532 case WM_T_82545_3:
5533 case WM_T_82546:
5534 case WM_T_82546_3:
5535 case WM_T_82541:
5536 case WM_T_82541_2:
5537 case WM_T_82547:
5538 case WM_T_82547_2:
5539 case WM_T_82571: /* reset 100us */
5540 case WM_T_82572:
5541 case WM_T_82573:
5542 case WM_T_82574:
5543 case WM_T_82575:
5544 case WM_T_82576:
5545 case WM_T_82580:
5546 case WM_T_82580ER:
5547 case WM_T_I350:
5548 case WM_T_82583:
5549 case WM_T_80003:
5550 /* generic reset */
5551 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5552 delay(20000);
5553 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5554 delay(20000);
5555
5556 if ((sc->sc_type == WM_T_82541)
5557 || (sc->sc_type == WM_T_82541_2)
5558 || (sc->sc_type == WM_T_82547)
5559 || (sc->sc_type == WM_T_82547_2)) {
5560 /* workaround for igp are done in igp_reset() */
5561 /* XXX add code to set LED after phy reset */
5562 }
5563 break;
5564 case WM_T_ICH8:
5565 case WM_T_ICH9:
5566 case WM_T_ICH10:
5567 case WM_T_PCH:
5568 case WM_T_PCH2:
5569 /* generic reset */
5570 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5571 delay(100);
5572 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5573 delay(150);
5574 break;
5575 default:
5576 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5577 __func__);
5578 break;
5579 }
5580
5581 /* release PHY semaphore */
5582 switch (sc->sc_type) {
5583 case WM_T_82571:
5584 case WM_T_82572:
5585 case WM_T_82573:
5586 case WM_T_82574:
5587 case WM_T_82583:
5588 /* XXX should put sw semaphore, too */
5589 wm_put_swsm_semaphore(sc);
5590 break;
5591 case WM_T_82575:
5592 case WM_T_82576:
5593 case WM_T_82580:
5594 case WM_T_82580ER:
5595 case WM_T_I350:
5596 case WM_T_80003:
5597 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5598 break;
5599 case WM_T_ICH8:
5600 case WM_T_ICH9:
5601 case WM_T_ICH10:
5602 case WM_T_PCH:
5603 case WM_T_PCH2:
5604 wm_put_swfwhw_semaphore(sc);
5605 break;
5606 default:
5607 /* nothing to do*/
5608 rv = 0;
5609 break;
5610 }
5611
5612 /* get_cfg_done */
5613 wm_get_cfg_done(sc);
5614
5615 /* extra setup */
5616 switch (sc->sc_type) {
5617 case WM_T_82542_2_0:
5618 case WM_T_82542_2_1:
5619 case WM_T_82543:
5620 case WM_T_82544:
5621 case WM_T_82540:
5622 case WM_T_82545:
5623 case WM_T_82545_3:
5624 case WM_T_82546:
5625 case WM_T_82546_3:
5626 case WM_T_82541_2:
5627 case WM_T_82547_2:
5628 case WM_T_82571:
5629 case WM_T_82572:
5630 case WM_T_82573:
5631 case WM_T_82574:
5632 case WM_T_82575:
5633 case WM_T_82576:
5634 case WM_T_82580:
5635 case WM_T_82580ER:
5636 case WM_T_I350:
5637 case WM_T_82583:
5638 case WM_T_80003:
5639 /* null */
5640 break;
5641 case WM_T_82541:
5642 case WM_T_82547:
5643 /* XXX Configure actively LED after PHY reset */
5644 break;
5645 case WM_T_ICH8:
5646 case WM_T_ICH9:
5647 case WM_T_ICH10:
5648 case WM_T_PCH:
5649 case WM_T_PCH2:
5650 /* Allow time for h/w to get to a quiescent state afer reset */
5651 delay(10*1000);
5652
5653 if (sc->sc_type == WM_T_PCH)
5654 wm_hv_phy_workaround_ich8lan(sc);
5655
5656 if (sc->sc_type == WM_T_PCH2)
5657 wm_lv_phy_workaround_ich8lan(sc);
5658
5659 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
5660 /*
5661 * dummy read to clear the phy wakeup bit after lcd
5662 * reset
5663 */
5664 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
5665 }
5666
5667 /*
5668 * XXX Configure the LCD with th extended configuration region
5669 * in NVM
5670 */
5671
5672 /* Configure the LCD with the OEM bits in NVM */
5673 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
5674 /*
5675 * Disable LPLU.
5676 * XXX It seems that 82567 has LPLU, too.
5677 */
5678 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
5679 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
5680 reg |= HV_OEM_BITS_ANEGNOW;
5681 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
5682 }
5683 break;
5684 default:
5685 panic("%s: unknown type\n", __func__);
5686 break;
5687 }
5688 }
5689
5690 /*
5691 * wm_gmii_mediainit:
5692 *
5693 * Initialize media for use on 1000BASE-T devices.
5694 */
5695 static void
5696 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
5697 {
5698 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5699
5700 /* We have MII. */
5701 sc->sc_flags |= WM_F_HAS_MII;
5702
5703 if (sc->sc_type == WM_T_80003)
5704 sc->sc_tipg = TIPG_1000T_80003_DFLT;
5705 else
5706 sc->sc_tipg = TIPG_1000T_DFLT;
5707
5708 /*
5709 * Let the chip set speed/duplex on its own based on
5710 * signals from the PHY.
5711 * XXXbouyer - I'm not sure this is right for the 80003,
5712 * the em driver only sets CTRL_SLU here - but it seems to work.
5713 */
5714 sc->sc_ctrl |= CTRL_SLU;
5715 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5716
5717 /* Initialize our media structures and probe the GMII. */
5718 sc->sc_mii.mii_ifp = ifp;
5719
5720 switch (prodid) {
5721 case PCI_PRODUCT_INTEL_PCH_M_LM:
5722 case PCI_PRODUCT_INTEL_PCH_M_LC:
5723 /* 82577 */
5724 sc->sc_phytype = WMPHY_82577;
5725 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5726 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5727 break;
5728 case PCI_PRODUCT_INTEL_PCH_D_DM:
5729 case PCI_PRODUCT_INTEL_PCH_D_DC:
5730 /* 82578 */
5731 sc->sc_phytype = WMPHY_82578;
5732 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5733 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5734 break;
5735 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
5736 case PCI_PRODUCT_INTEL_PCH2_LV_V:
5737 /* 82578 */
5738 sc->sc_phytype = WMPHY_82579;
5739 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5740 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5741 break;
5742 case PCI_PRODUCT_INTEL_82801I_BM:
5743 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
5744 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
5745 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
5746 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
5747 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
5748 /* 82567 */
5749 sc->sc_phytype = WMPHY_BM;
5750 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5751 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5752 break;
5753 default:
5754 if ((sc->sc_flags & WM_F_SGMII) != 0) {
5755 sc->sc_mii.mii_readreg = wm_sgmii_readreg;
5756 sc->sc_mii.mii_writereg = wm_sgmii_writereg;
5757 } else if (sc->sc_type >= WM_T_80003) {
5758 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
5759 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
5760 } else if (sc->sc_type >= WM_T_82544) {
5761 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
5762 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
5763 } else {
5764 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
5765 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
5766 }
5767 break;
5768 }
5769 sc->sc_mii.mii_statchg = wm_gmii_statchg;
5770
5771 wm_gmii_reset(sc);
5772
5773 sc->sc_ethercom.ec_mii = &sc->sc_mii;
5774 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
5775 wm_gmii_mediastatus);
5776
5777 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
5778 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
5779 || (sc->sc_type == WM_T_I350)) {
5780 if ((sc->sc_flags & WM_F_SGMII) == 0) {
5781 /* Attach only one port */
5782 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
5783 MII_OFFSET_ANY, MIIF_DOPAUSE);
5784 } else {
5785 int i;
5786 uint32_t ctrl_ext;
5787
5788 /* Power on sgmii phy if it is disabled */
5789 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
5790 CSR_WRITE(sc, WMREG_CTRL_EXT,
5791 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
5792 CSR_WRITE_FLUSH(sc);
5793 delay(300*1000); /* XXX too long */
5794
5795 /* from 1 to 8 */
5796 for (i = 1; i < 8; i++)
5797 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
5798 i, MII_OFFSET_ANY, MIIF_DOPAUSE);
5799
5800 /* restore previous sfp cage power state */
5801 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
5802 }
5803 } else {
5804 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5805 MII_OFFSET_ANY, MIIF_DOPAUSE);
5806 }
5807
5808 if ((sc->sc_type == WM_T_PCH2) &&
5809 (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL)) {
5810 wm_set_mdio_slow_mode_hv(sc);
5811 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5812 MII_OFFSET_ANY, MIIF_DOPAUSE);
5813 }
5814
5815 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5816 /* if failed, retry with *_bm_* */
5817 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5818 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5819
5820 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5821 MII_OFFSET_ANY, MIIF_DOPAUSE);
5822 }
5823 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5824 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
5825 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
5826 sc->sc_phytype = WMPHY_NONE;
5827 } else {
5828 /* Check PHY type */
5829 uint32_t model;
5830 struct mii_softc *child;
5831
5832 child = LIST_FIRST(&sc->sc_mii.mii_phys);
5833 if (device_is_a(child->mii_dev, "igphy")) {
5834 struct igphy_softc *isc = (struct igphy_softc *)child;
5835
5836 model = isc->sc_mii.mii_mpd_model;
5837 if (model == MII_MODEL_yyINTEL_I82566)
5838 sc->sc_phytype = WMPHY_IGP_3;
5839 }
5840
5841 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5842 }
5843 }
5844
5845 /*
5846 * wm_gmii_mediastatus: [ifmedia interface function]
5847 *
5848 * Get the current interface media status on a 1000BASE-T device.
5849 */
5850 static void
5851 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5852 {
5853 struct wm_softc *sc = ifp->if_softc;
5854
5855 ether_mediastatus(ifp, ifmr);
5856 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
5857 | sc->sc_flowflags;
5858 }
5859
5860 /*
5861 * wm_gmii_mediachange: [ifmedia interface function]
5862 *
5863 * Set hardware to newly-selected media on a 1000BASE-T device.
5864 */
5865 static int
5866 wm_gmii_mediachange(struct ifnet *ifp)
5867 {
5868 struct wm_softc *sc = ifp->if_softc;
5869 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5870 int rc;
5871
5872 if ((ifp->if_flags & IFF_UP) == 0)
5873 return 0;
5874
5875 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5876 sc->sc_ctrl |= CTRL_SLU;
5877 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
5878 || (sc->sc_type > WM_T_82543)) {
5879 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
5880 } else {
5881 sc->sc_ctrl &= ~CTRL_ASDE;
5882 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5883 if (ife->ifm_media & IFM_FDX)
5884 sc->sc_ctrl |= CTRL_FD;
5885 switch (IFM_SUBTYPE(ife->ifm_media)) {
5886 case IFM_10_T:
5887 sc->sc_ctrl |= CTRL_SPEED_10;
5888 break;
5889 case IFM_100_TX:
5890 sc->sc_ctrl |= CTRL_SPEED_100;
5891 break;
5892 case IFM_1000_T:
5893 sc->sc_ctrl |= CTRL_SPEED_1000;
5894 break;
5895 default:
5896 panic("wm_gmii_mediachange: bad media 0x%x",
5897 ife->ifm_media);
5898 }
5899 }
5900 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5901 if (sc->sc_type <= WM_T_82543)
5902 wm_gmii_reset(sc);
5903
5904 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
5905 return 0;
5906 return rc;
5907 }
5908
5909 #define MDI_IO CTRL_SWDPIN(2)
5910 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
5911 #define MDI_CLK CTRL_SWDPIN(3)
5912
5913 static void
5914 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
5915 {
5916 uint32_t i, v;
5917
5918 v = CSR_READ(sc, WMREG_CTRL);
5919 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5920 v |= MDI_DIR | CTRL_SWDPIO(3);
5921
5922 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
5923 if (data & i)
5924 v |= MDI_IO;
5925 else
5926 v &= ~MDI_IO;
5927 CSR_WRITE(sc, WMREG_CTRL, v);
5928 delay(10);
5929 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5930 delay(10);
5931 CSR_WRITE(sc, WMREG_CTRL, v);
5932 delay(10);
5933 }
5934 }
5935
5936 static uint32_t
5937 i82543_mii_recvbits(struct wm_softc *sc)
5938 {
5939 uint32_t v, i, data = 0;
5940
5941 v = CSR_READ(sc, WMREG_CTRL);
5942 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5943 v |= CTRL_SWDPIO(3);
5944
5945 CSR_WRITE(sc, WMREG_CTRL, v);
5946 delay(10);
5947 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5948 delay(10);
5949 CSR_WRITE(sc, WMREG_CTRL, v);
5950 delay(10);
5951
5952 for (i = 0; i < 16; i++) {
5953 data <<= 1;
5954 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5955 delay(10);
5956 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
5957 data |= 1;
5958 CSR_WRITE(sc, WMREG_CTRL, v);
5959 delay(10);
5960 }
5961
5962 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5963 delay(10);
5964 CSR_WRITE(sc, WMREG_CTRL, v);
5965 delay(10);
5966
5967 return data;
5968 }
5969
5970 #undef MDI_IO
5971 #undef MDI_DIR
5972 #undef MDI_CLK
5973
5974 /*
5975 * wm_gmii_i82543_readreg: [mii interface function]
5976 *
5977 * Read a PHY register on the GMII (i82543 version).
5978 */
5979 static int
5980 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
5981 {
5982 struct wm_softc *sc = device_private(self);
5983 int rv;
5984
5985 i82543_mii_sendbits(sc, 0xffffffffU, 32);
5986 i82543_mii_sendbits(sc, reg | (phy << 5) |
5987 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
5988 rv = i82543_mii_recvbits(sc) & 0xffff;
5989
5990 DPRINTF(WM_DEBUG_GMII,
5991 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
5992 device_xname(sc->sc_dev), phy, reg, rv));
5993
5994 return rv;
5995 }
5996
5997 /*
5998 * wm_gmii_i82543_writereg: [mii interface function]
5999 *
6000 * Write a PHY register on the GMII (i82543 version).
6001 */
6002 static void
6003 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6004 {
6005 struct wm_softc *sc = device_private(self);
6006
6007 i82543_mii_sendbits(sc, 0xffffffffU, 32);
6008 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6009 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6010 (MII_COMMAND_START << 30), 32);
6011 }
6012
6013 /*
6014 * wm_gmii_i82544_readreg: [mii interface function]
6015 *
6016 * Read a PHY register on the GMII.
6017 */
6018 static int
6019 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6020 {
6021 struct wm_softc *sc = device_private(self);
6022 uint32_t mdic = 0;
6023 int i, rv;
6024
6025 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6026 MDIC_REGADD(reg));
6027
6028 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6029 mdic = CSR_READ(sc, WMREG_MDIC);
6030 if (mdic & MDIC_READY)
6031 break;
6032 delay(50);
6033 }
6034
6035 if ((mdic & MDIC_READY) == 0) {
6036 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6037 device_xname(sc->sc_dev), phy, reg);
6038 rv = 0;
6039 } else if (mdic & MDIC_E) {
6040 #if 0 /* This is normal if no PHY is present. */
6041 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6042 device_xname(sc->sc_dev), phy, reg);
6043 #endif
6044 rv = 0;
6045 } else {
6046 rv = MDIC_DATA(mdic);
6047 if (rv == 0xffff)
6048 rv = 0;
6049 }
6050
6051 return rv;
6052 }
6053
6054 /*
6055 * wm_gmii_i82544_writereg: [mii interface function]
6056 *
6057 * Write a PHY register on the GMII.
6058 */
6059 static void
6060 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6061 {
6062 struct wm_softc *sc = device_private(self);
6063 uint32_t mdic = 0;
6064 int i;
6065
6066 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6067 MDIC_REGADD(reg) | MDIC_DATA(val));
6068
6069 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6070 mdic = CSR_READ(sc, WMREG_MDIC);
6071 if (mdic & MDIC_READY)
6072 break;
6073 delay(50);
6074 }
6075
6076 if ((mdic & MDIC_READY) == 0)
6077 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6078 device_xname(sc->sc_dev), phy, reg);
6079 else if (mdic & MDIC_E)
6080 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6081 device_xname(sc->sc_dev), phy, reg);
6082 }
6083
6084 /*
6085 * wm_gmii_i80003_readreg: [mii interface function]
6086 *
6087 * Read a PHY register on the kumeran
6088 * This could be handled by the PHY layer if we didn't have to lock the
6089 * ressource ...
6090 */
6091 static int
6092 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6093 {
6094 struct wm_softc *sc = device_private(self);
6095 int sem;
6096 int rv;
6097
6098 if (phy != 1) /* only one PHY on kumeran bus */
6099 return 0;
6100
6101 sem = swfwphysem[sc->sc_funcid];
6102 if (wm_get_swfw_semaphore(sc, sem)) {
6103 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6104 __func__);
6105 return 0;
6106 }
6107
6108 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6109 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6110 reg >> GG82563_PAGE_SHIFT);
6111 } else {
6112 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6113 reg >> GG82563_PAGE_SHIFT);
6114 }
6115 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6116 delay(200);
6117 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6118 delay(200);
6119
6120 wm_put_swfw_semaphore(sc, sem);
6121 return rv;
6122 }
6123
6124 /*
6125 * wm_gmii_i80003_writereg: [mii interface function]
6126 *
6127 * Write a PHY register on the kumeran.
6128 * This could be handled by the PHY layer if we didn't have to lock the
6129 * ressource ...
6130 */
6131 static void
6132 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6133 {
6134 struct wm_softc *sc = device_private(self);
6135 int sem;
6136
6137 if (phy != 1) /* only one PHY on kumeran bus */
6138 return;
6139
6140 sem = swfwphysem[sc->sc_funcid];
6141 if (wm_get_swfw_semaphore(sc, sem)) {
6142 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6143 __func__);
6144 return;
6145 }
6146
6147 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6148 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6149 reg >> GG82563_PAGE_SHIFT);
6150 } else {
6151 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6152 reg >> GG82563_PAGE_SHIFT);
6153 }
6154 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6155 delay(200);
6156 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6157 delay(200);
6158
6159 wm_put_swfw_semaphore(sc, sem);
6160 }
6161
6162 /*
6163 * wm_gmii_bm_readreg: [mii interface function]
6164 *
6165 * Read a PHY register on the kumeran
6166 * This could be handled by the PHY layer if we didn't have to lock the
6167 * ressource ...
6168 */
6169 static int
6170 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6171 {
6172 struct wm_softc *sc = device_private(self);
6173 int sem;
6174 int rv;
6175
6176 sem = swfwphysem[sc->sc_funcid];
6177 if (wm_get_swfw_semaphore(sc, sem)) {
6178 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6179 __func__);
6180 return 0;
6181 }
6182
6183 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6184 if (phy == 1)
6185 wm_gmii_i82544_writereg(self, phy, 0x1f,
6186 reg);
6187 else
6188 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6189 reg >> GG82563_PAGE_SHIFT);
6190
6191 }
6192
6193 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6194 wm_put_swfw_semaphore(sc, sem);
6195 return rv;
6196 }
6197
6198 /*
6199 * wm_gmii_bm_writereg: [mii interface function]
6200 *
6201 * Write a PHY register on the kumeran.
6202 * This could be handled by the PHY layer if we didn't have to lock the
6203 * ressource ...
6204 */
6205 static void
6206 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6207 {
6208 struct wm_softc *sc = device_private(self);
6209 int sem;
6210
6211 sem = swfwphysem[sc->sc_funcid];
6212 if (wm_get_swfw_semaphore(sc, sem)) {
6213 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6214 __func__);
6215 return;
6216 }
6217
6218 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6219 if (phy == 1)
6220 wm_gmii_i82544_writereg(self, phy, 0x1f,
6221 reg);
6222 else
6223 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6224 reg >> GG82563_PAGE_SHIFT);
6225
6226 }
6227
6228 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6229 wm_put_swfw_semaphore(sc, sem);
6230 }
6231
6232 static void
6233 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6234 {
6235 struct wm_softc *sc = device_private(self);
6236 uint16_t regnum = BM_PHY_REG_NUM(offset);
6237 uint16_t wuce;
6238
6239 /* XXX Gig must be disabled for MDIO accesses to page 800 */
6240 if (sc->sc_type == WM_T_PCH) {
6241 /* XXX e1000 driver do nothing... why? */
6242 }
6243
6244 /* Set page 769 */
6245 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6246 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6247
6248 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6249
6250 wuce &= ~BM_WUC_HOST_WU_BIT;
6251 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6252 wuce | BM_WUC_ENABLE_BIT);
6253
6254 /* Select page 800 */
6255 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6256 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6257
6258 /* Write page 800 */
6259 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6260
6261 if (rd)
6262 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6263 else
6264 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6265
6266 /* Set page 769 */
6267 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6268 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6269
6270 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6271 }
6272
6273 /*
6274 * wm_gmii_hv_readreg: [mii interface function]
6275 *
6276 * Read a PHY register on the kumeran
6277 * This could be handled by the PHY layer if we didn't have to lock the
6278 * ressource ...
6279 */
6280 static int
6281 wm_gmii_hv_readreg(device_t self, int phy, int reg)
6282 {
6283 struct wm_softc *sc = device_private(self);
6284 uint16_t page = BM_PHY_REG_PAGE(reg);
6285 uint16_t regnum = BM_PHY_REG_NUM(reg);
6286 uint16_t val;
6287 int rv;
6288
6289 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6290 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6291 __func__);
6292 return 0;
6293 }
6294
6295 /* XXX Workaround failure in MDIO access while cable is disconnected */
6296 if (sc->sc_phytype == WMPHY_82577) {
6297 /* XXX must write */
6298 }
6299
6300 /* Page 800 works differently than the rest so it has its own func */
6301 if (page == BM_WUC_PAGE) {
6302 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
6303 return val;
6304 }
6305
6306 /*
6307 * Lower than page 768 works differently than the rest so it has its
6308 * own func
6309 */
6310 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6311 printf("gmii_hv_readreg!!!\n");
6312 return 0;
6313 }
6314
6315 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6316 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6317 page << BME1000_PAGE_SHIFT);
6318 }
6319
6320 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
6321 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6322 return rv;
6323 }
6324
6325 /*
6326 * wm_gmii_hv_writereg: [mii interface function]
6327 *
6328 * Write a PHY register on the kumeran.
6329 * This could be handled by the PHY layer if we didn't have to lock the
6330 * ressource ...
6331 */
6332 static void
6333 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
6334 {
6335 struct wm_softc *sc = device_private(self);
6336 uint16_t page = BM_PHY_REG_PAGE(reg);
6337 uint16_t regnum = BM_PHY_REG_NUM(reg);
6338
6339 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6340 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6341 __func__);
6342 return;
6343 }
6344
6345 /* XXX Workaround failure in MDIO access while cable is disconnected */
6346
6347 /* Page 800 works differently than the rest so it has its own func */
6348 if (page == BM_WUC_PAGE) {
6349 uint16_t tmp;
6350
6351 tmp = val;
6352 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
6353 return;
6354 }
6355
6356 /*
6357 * Lower than page 768 works differently than the rest so it has its
6358 * own func
6359 */
6360 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6361 printf("gmii_hv_writereg!!!\n");
6362 return;
6363 }
6364
6365 /*
6366 * XXX Workaround MDIO accesses being disabled after entering IEEE
6367 * Power Down (whenever bit 11 of the PHY control register is set)
6368 */
6369
6370 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6371 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6372 page << BME1000_PAGE_SHIFT);
6373 }
6374
6375 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
6376 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6377 }
6378
6379 /*
6380 * wm_gmii_hv_readreg: [mii interface function]
6381 *
6382 * Read a PHY register on the kumeran
6383 * This could be handled by the PHY layer if we didn't have to lock the
6384 * ressource ...
6385 */
6386 static int
6387 wm_sgmii_readreg(device_t self, int phy, int reg)
6388 {
6389 struct wm_softc *sc = device_private(self);
6390 uint32_t i2ccmd;
6391 int i, rv;
6392
6393 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6394 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6395 __func__);
6396 return 0;
6397 }
6398
6399 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6400 | (phy << I2CCMD_PHY_ADDR_SHIFT)
6401 | I2CCMD_OPCODE_READ;
6402 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6403
6404 /* Poll the ready bit */
6405 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6406 delay(50);
6407 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6408 if (i2ccmd & I2CCMD_READY)
6409 break;
6410 }
6411 if ((i2ccmd & I2CCMD_READY) == 0)
6412 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
6413 if ((i2ccmd & I2CCMD_ERROR) != 0)
6414 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6415
6416 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
6417
6418 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6419 return rv;
6420 }
6421
6422 /*
6423 * wm_gmii_hv_writereg: [mii interface function]
6424 *
6425 * Write a PHY register on the kumeran.
6426 * This could be handled by the PHY layer if we didn't have to lock the
6427 * ressource ...
6428 */
6429 static void
6430 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
6431 {
6432 struct wm_softc *sc = device_private(self);
6433 uint32_t i2ccmd;
6434 int i;
6435
6436 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6437 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6438 __func__);
6439 return;
6440 }
6441
6442 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6443 | (phy << I2CCMD_PHY_ADDR_SHIFT)
6444 | I2CCMD_OPCODE_WRITE;
6445 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6446
6447 /* Poll the ready bit */
6448 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6449 delay(50);
6450 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6451 if (i2ccmd & I2CCMD_READY)
6452 break;
6453 }
6454 if ((i2ccmd & I2CCMD_READY) == 0)
6455 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
6456 if ((i2ccmd & I2CCMD_ERROR) != 0)
6457 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6458
6459 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6460 }
6461
6462 /*
6463 * wm_gmii_statchg: [mii interface function]
6464 *
6465 * Callback from MII layer when media changes.
6466 */
6467 static void
6468 wm_gmii_statchg(struct ifnet *ifp)
6469 {
6470 struct wm_softc *sc = ifp->if_softc;
6471 struct mii_data *mii = &sc->sc_mii;
6472
6473 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
6474 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6475 sc->sc_fcrtl &= ~FCRTL_XONE;
6476
6477 /*
6478 * Get flow control negotiation result.
6479 */
6480 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
6481 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
6482 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
6483 mii->mii_media_active &= ~IFM_ETH_FMASK;
6484 }
6485
6486 if (sc->sc_flowflags & IFM_FLOW) {
6487 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
6488 sc->sc_ctrl |= CTRL_TFCE;
6489 sc->sc_fcrtl |= FCRTL_XONE;
6490 }
6491 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
6492 sc->sc_ctrl |= CTRL_RFCE;
6493 }
6494
6495 if (sc->sc_mii.mii_media_active & IFM_FDX) {
6496 DPRINTF(WM_DEBUG_LINK,
6497 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
6498 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6499 } else {
6500 DPRINTF(WM_DEBUG_LINK,
6501 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
6502 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6503 }
6504
6505 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6506 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6507 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
6508 : WMREG_FCRTL, sc->sc_fcrtl);
6509 if (sc->sc_type == WM_T_80003) {
6510 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
6511 case IFM_1000_T:
6512 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6513 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
6514 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6515 break;
6516 default:
6517 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6518 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
6519 sc->sc_tipg = TIPG_10_100_80003_DFLT;
6520 break;
6521 }
6522 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6523 }
6524 }
6525
6526 /*
6527 * wm_kmrn_readreg:
6528 *
6529 * Read a kumeran register
6530 */
6531 static int
6532 wm_kmrn_readreg(struct wm_softc *sc, int reg)
6533 {
6534 int rv;
6535
6536 if (sc->sc_flags == WM_F_SWFW_SYNC) {
6537 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6538 aprint_error_dev(sc->sc_dev,
6539 "%s: failed to get semaphore\n", __func__);
6540 return 0;
6541 }
6542 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
6543 if (wm_get_swfwhw_semaphore(sc)) {
6544 aprint_error_dev(sc->sc_dev,
6545 "%s: failed to get semaphore\n", __func__);
6546 return 0;
6547 }
6548 }
6549
6550 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
6551 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
6552 KUMCTRLSTA_REN);
6553 delay(2);
6554
6555 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
6556
6557 if (sc->sc_flags == WM_F_SWFW_SYNC)
6558 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
6559 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
6560 wm_put_swfwhw_semaphore(sc);
6561
6562 return rv;
6563 }
6564
6565 /*
6566 * wm_kmrn_writereg:
6567 *
6568 * Write a kumeran register
6569 */
6570 static void
6571 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
6572 {
6573
6574 if (sc->sc_flags == WM_F_SWFW_SYNC) {
6575 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6576 aprint_error_dev(sc->sc_dev,
6577 "%s: failed to get semaphore\n", __func__);
6578 return;
6579 }
6580 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
6581 if (wm_get_swfwhw_semaphore(sc)) {
6582 aprint_error_dev(sc->sc_dev,
6583 "%s: failed to get semaphore\n", __func__);
6584 return;
6585 }
6586 }
6587
6588 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
6589 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
6590 (val & KUMCTRLSTA_MASK));
6591
6592 if (sc->sc_flags == WM_F_SWFW_SYNC)
6593 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
6594 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
6595 wm_put_swfwhw_semaphore(sc);
6596 }
6597
6598 static int
6599 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
6600 {
6601 uint32_t eecd = 0;
6602
6603 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
6604 || sc->sc_type == WM_T_82583) {
6605 eecd = CSR_READ(sc, WMREG_EECD);
6606
6607 /* Isolate bits 15 & 16 */
6608 eecd = ((eecd >> 15) & 0x03);
6609
6610 /* If both bits are set, device is Flash type */
6611 if (eecd == 0x03)
6612 return 0;
6613 }
6614 return 1;
6615 }
6616
6617 static int
6618 wm_get_swsm_semaphore(struct wm_softc *sc)
6619 {
6620 int32_t timeout;
6621 uint32_t swsm;
6622
6623 /* Get the FW semaphore. */
6624 timeout = 1000 + 1; /* XXX */
6625 while (timeout) {
6626 swsm = CSR_READ(sc, WMREG_SWSM);
6627 swsm |= SWSM_SWESMBI;
6628 CSR_WRITE(sc, WMREG_SWSM, swsm);
6629 /* if we managed to set the bit we got the semaphore. */
6630 swsm = CSR_READ(sc, WMREG_SWSM);
6631 if (swsm & SWSM_SWESMBI)
6632 break;
6633
6634 delay(50);
6635 timeout--;
6636 }
6637
6638 if (timeout == 0) {
6639 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
6640 /* Release semaphores */
6641 wm_put_swsm_semaphore(sc);
6642 return 1;
6643 }
6644 return 0;
6645 }
6646
6647 static void
6648 wm_put_swsm_semaphore(struct wm_softc *sc)
6649 {
6650 uint32_t swsm;
6651
6652 swsm = CSR_READ(sc, WMREG_SWSM);
6653 swsm &= ~(SWSM_SWESMBI);
6654 CSR_WRITE(sc, WMREG_SWSM, swsm);
6655 }
6656
6657 static int
6658 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
6659 {
6660 uint32_t swfw_sync;
6661 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
6662 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
6663 int timeout = 200;
6664
6665 for (timeout = 0; timeout < 200; timeout++) {
6666 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
6667 if (wm_get_swsm_semaphore(sc)) {
6668 aprint_error_dev(sc->sc_dev,
6669 "%s: failed to get semaphore\n",
6670 __func__);
6671 return 1;
6672 }
6673 }
6674 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
6675 if ((swfw_sync & (swmask | fwmask)) == 0) {
6676 swfw_sync |= swmask;
6677 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
6678 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6679 wm_put_swsm_semaphore(sc);
6680 return 0;
6681 }
6682 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6683 wm_put_swsm_semaphore(sc);
6684 delay(5000);
6685 }
6686 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
6687 device_xname(sc->sc_dev), mask, swfw_sync);
6688 return 1;
6689 }
6690
6691 static void
6692 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
6693 {
6694 uint32_t swfw_sync;
6695
6696 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
6697 while (wm_get_swsm_semaphore(sc) != 0)
6698 continue;
6699 }
6700 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
6701 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
6702 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
6703 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6704 wm_put_swsm_semaphore(sc);
6705 }
6706
6707 static int
6708 wm_get_swfwhw_semaphore(struct wm_softc *sc)
6709 {
6710 uint32_t ext_ctrl;
6711 int timeout = 200;
6712
6713 for (timeout = 0; timeout < 200; timeout++) {
6714 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6715 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
6716 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
6717
6718 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6719 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
6720 return 0;
6721 delay(5000);
6722 }
6723 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
6724 device_xname(sc->sc_dev), ext_ctrl);
6725 return 1;
6726 }
6727
6728 static void
6729 wm_put_swfwhw_semaphore(struct wm_softc *sc)
6730 {
6731 uint32_t ext_ctrl;
6732 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6733 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
6734 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
6735 }
6736
6737 static int
6738 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
6739 {
6740 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
6741 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
6742
6743 if ((sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
6744 /* Value of bit 22 corresponds to the flash bank we're on. */
6745 *bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
6746 } else {
6747 uint8_t bank_high_byte;
6748 wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
6749 if ((bank_high_byte & 0xc0) == 0x80)
6750 *bank = 0;
6751 else {
6752 wm_read_ich8_byte(sc, act_offset + bank1_offset,
6753 &bank_high_byte);
6754 if ((bank_high_byte & 0xc0) == 0x80)
6755 *bank = 1;
6756 else {
6757 aprint_error_dev(sc->sc_dev,
6758 "EEPROM not present\n");
6759 return -1;
6760 }
6761 }
6762 }
6763
6764 return 0;
6765 }
6766
6767 /******************************************************************************
6768 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
6769 * register.
6770 *
6771 * sc - Struct containing variables accessed by shared code
6772 * offset - offset of word in the EEPROM to read
6773 * data - word read from the EEPROM
6774 * words - number of words to read
6775 *****************************************************************************/
6776 static int
6777 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
6778 {
6779 int32_t error = 0;
6780 uint32_t flash_bank = 0;
6781 uint32_t act_offset = 0;
6782 uint32_t bank_offset = 0;
6783 uint16_t word = 0;
6784 uint16_t i = 0;
6785
6786 /* We need to know which is the valid flash bank. In the event
6787 * that we didn't allocate eeprom_shadow_ram, we may not be
6788 * managing flash_bank. So it cannot be trusted and needs
6789 * to be updated with each read.
6790 */
6791 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
6792 if (error) {
6793 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
6794 __func__);
6795 return error;
6796 }
6797
6798 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
6799 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
6800
6801 error = wm_get_swfwhw_semaphore(sc);
6802 if (error) {
6803 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6804 __func__);
6805 return error;
6806 }
6807
6808 for (i = 0; i < words; i++) {
6809 /* The NVM part needs a byte offset, hence * 2 */
6810 act_offset = bank_offset + ((offset + i) * 2);
6811 error = wm_read_ich8_word(sc, act_offset, &word);
6812 if (error) {
6813 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
6814 __func__);
6815 break;
6816 }
6817 data[i] = word;
6818 }
6819
6820 wm_put_swfwhw_semaphore(sc);
6821 return error;
6822 }
6823
6824 /******************************************************************************
6825 * This function does initial flash setup so that a new read/write/erase cycle
6826 * can be started.
6827 *
6828 * sc - The pointer to the hw structure
6829 ****************************************************************************/
6830 static int32_t
6831 wm_ich8_cycle_init(struct wm_softc *sc)
6832 {
6833 uint16_t hsfsts;
6834 int32_t error = 1;
6835 int32_t i = 0;
6836
6837 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6838
6839 /* May be check the Flash Des Valid bit in Hw status */
6840 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
6841 return error;
6842 }
6843
6844 /* Clear FCERR in Hw status by writing 1 */
6845 /* Clear DAEL in Hw status by writing a 1 */
6846 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
6847
6848 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6849
6850 /*
6851 * Either we should have a hardware SPI cycle in progress bit to check
6852 * against, in order to start a new cycle or FDONE bit should be
6853 * changed in the hardware so that it is 1 after harware reset, which
6854 * can then be used as an indication whether a cycle is in progress or
6855 * has been completed .. we should also have some software semaphore
6856 * mechanism to guard FDONE or the cycle in progress bit so that two
6857 * threads access to those bits can be sequentiallized or a way so that
6858 * 2 threads dont start the cycle at the same time
6859 */
6860
6861 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6862 /*
6863 * There is no cycle running at present, so we can start a
6864 * cycle
6865 */
6866
6867 /* Begin by setting Flash Cycle Done. */
6868 hsfsts |= HSFSTS_DONE;
6869 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6870 error = 0;
6871 } else {
6872 /*
6873 * otherwise poll for sometime so the current cycle has a
6874 * chance to end before giving up.
6875 */
6876 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
6877 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6878 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6879 error = 0;
6880 break;
6881 }
6882 delay(1);
6883 }
6884 if (error == 0) {
6885 /*
6886 * Successful in waiting for previous cycle to timeout,
6887 * now set the Flash Cycle Done.
6888 */
6889 hsfsts |= HSFSTS_DONE;
6890 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6891 }
6892 }
6893 return error;
6894 }
6895
6896 /******************************************************************************
6897 * This function starts a flash cycle and waits for its completion
6898 *
6899 * sc - The pointer to the hw structure
6900 ****************************************************************************/
6901 static int32_t
6902 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
6903 {
6904 uint16_t hsflctl;
6905 uint16_t hsfsts;
6906 int32_t error = 1;
6907 uint32_t i = 0;
6908
6909 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
6910 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6911 hsflctl |= HSFCTL_GO;
6912 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6913
6914 /* wait till FDONE bit is set to 1 */
6915 do {
6916 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6917 if (hsfsts & HSFSTS_DONE)
6918 break;
6919 delay(1);
6920 i++;
6921 } while (i < timeout);
6922 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
6923 error = 0;
6924
6925 return error;
6926 }
6927
6928 /******************************************************************************
6929 * Reads a byte or word from the NVM using the ICH8 flash access registers.
6930 *
6931 * sc - The pointer to the hw structure
6932 * index - The index of the byte or word to read.
6933 * size - Size of data to read, 1=byte 2=word
6934 * data - Pointer to the word to store the value read.
6935 *****************************************************************************/
6936 static int32_t
6937 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
6938 uint32_t size, uint16_t* data)
6939 {
6940 uint16_t hsfsts;
6941 uint16_t hsflctl;
6942 uint32_t flash_linear_address;
6943 uint32_t flash_data = 0;
6944 int32_t error = 1;
6945 int32_t count = 0;
6946
6947 if (size < 1 || size > 2 || data == 0x0 ||
6948 index > ICH_FLASH_LINEAR_ADDR_MASK)
6949 return error;
6950
6951 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
6952 sc->sc_ich8_flash_base;
6953
6954 do {
6955 delay(1);
6956 /* Steps */
6957 error = wm_ich8_cycle_init(sc);
6958 if (error)
6959 break;
6960
6961 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6962 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
6963 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
6964 & HSFCTL_BCOUNT_MASK;
6965 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
6966 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6967
6968 /*
6969 * Write the last 24 bits of index into Flash Linear address
6970 * field in Flash Address
6971 */
6972 /* TODO: TBD maybe check the index against the size of flash */
6973
6974 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
6975
6976 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
6977
6978 /*
6979 * Check if FCERR is set to 1, if set to 1, clear it and try
6980 * the whole sequence a few more times, else read in (shift in)
6981 * the Flash Data0, the order is least significant byte first
6982 * msb to lsb
6983 */
6984 if (error == 0) {
6985 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
6986 if (size == 1)
6987 *data = (uint8_t)(flash_data & 0x000000FF);
6988 else if (size == 2)
6989 *data = (uint16_t)(flash_data & 0x0000FFFF);
6990 break;
6991 } else {
6992 /*
6993 * If we've gotten here, then things are probably
6994 * completely hosed, but if the error condition is
6995 * detected, it won't hurt to give it another try...
6996 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
6997 */
6998 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6999 if (hsfsts & HSFSTS_ERR) {
7000 /* Repeat for some time before giving up. */
7001 continue;
7002 } else if ((hsfsts & HSFSTS_DONE) == 0)
7003 break;
7004 }
7005 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
7006
7007 return error;
7008 }
7009
7010 /******************************************************************************
7011 * Reads a single byte from the NVM using the ICH8 flash access registers.
7012 *
7013 * sc - pointer to wm_hw structure
7014 * index - The index of the byte to read.
7015 * data - Pointer to a byte to store the value read.
7016 *****************************************************************************/
7017 static int32_t
7018 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
7019 {
7020 int32_t status;
7021 uint16_t word = 0;
7022
7023 status = wm_read_ich8_data(sc, index, 1, &word);
7024 if (status == 0)
7025 *data = (uint8_t)word;
7026 else
7027 *data = 0;
7028
7029 return status;
7030 }
7031
7032 /******************************************************************************
7033 * Reads a word from the NVM using the ICH8 flash access registers.
7034 *
7035 * sc - pointer to wm_hw structure
7036 * index - The starting byte index of the word to read.
7037 * data - Pointer to a word to store the value read.
7038 *****************************************************************************/
7039 static int32_t
7040 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
7041 {
7042 int32_t status;
7043
7044 status = wm_read_ich8_data(sc, index, 2, data);
7045 return status;
7046 }
7047
7048 static int
7049 wm_check_mng_mode(struct wm_softc *sc)
7050 {
7051 int rv;
7052
7053 switch (sc->sc_type) {
7054 case WM_T_ICH8:
7055 case WM_T_ICH9:
7056 case WM_T_ICH10:
7057 case WM_T_PCH:
7058 case WM_T_PCH2:
7059 rv = wm_check_mng_mode_ich8lan(sc);
7060 break;
7061 case WM_T_82574:
7062 case WM_T_82583:
7063 rv = wm_check_mng_mode_82574(sc);
7064 break;
7065 case WM_T_82571:
7066 case WM_T_82572:
7067 case WM_T_82573:
7068 case WM_T_80003:
7069 rv = wm_check_mng_mode_generic(sc);
7070 break;
7071 default:
7072 /* noting to do */
7073 rv = 0;
7074 break;
7075 }
7076
7077 return rv;
7078 }
7079
7080 static int
7081 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
7082 {
7083 uint32_t fwsm;
7084
7085 fwsm = CSR_READ(sc, WMREG_FWSM);
7086
7087 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
7088 return 1;
7089
7090 return 0;
7091 }
7092
7093 static int
7094 wm_check_mng_mode_82574(struct wm_softc *sc)
7095 {
7096 uint16_t data;
7097
7098 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
7099
7100 if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
7101 return 1;
7102
7103 return 0;
7104 }
7105
7106 static int
7107 wm_check_mng_mode_generic(struct wm_softc *sc)
7108 {
7109 uint32_t fwsm;
7110
7111 fwsm = CSR_READ(sc, WMREG_FWSM);
7112
7113 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
7114 return 1;
7115
7116 return 0;
7117 }
7118
7119 static int
7120 wm_enable_mng_pass_thru(struct wm_softc *sc)
7121 {
7122 uint32_t manc, fwsm, factps;
7123
7124 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
7125 return 0;
7126
7127 manc = CSR_READ(sc, WMREG_MANC);
7128
7129 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
7130 device_xname(sc->sc_dev), manc));
7131 if (((manc & MANC_RECV_TCO_EN) == 0)
7132 || ((manc & MANC_EN_MAC_ADDR_FILTER) == 0))
7133 return 0;
7134
7135 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
7136 fwsm = CSR_READ(sc, WMREG_FWSM);
7137 factps = CSR_READ(sc, WMREG_FACTPS);
7138 if (((factps & FACTPS_MNGCG) == 0)
7139 && ((fwsm & FWSM_MODE_MASK)
7140 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
7141 return 1;
7142 } else if (((manc & MANC_SMBUS_EN) != 0)
7143 && ((manc & MANC_ASF_EN) == 0))
7144 return 1;
7145
7146 return 0;
7147 }
7148
7149 static int
7150 wm_check_reset_block(struct wm_softc *sc)
7151 {
7152 uint32_t reg;
7153
7154 switch (sc->sc_type) {
7155 case WM_T_ICH8:
7156 case WM_T_ICH9:
7157 case WM_T_ICH10:
7158 case WM_T_PCH:
7159 case WM_T_PCH2:
7160 reg = CSR_READ(sc, WMREG_FWSM);
7161 if ((reg & FWSM_RSPCIPHY) != 0)
7162 return 0;
7163 else
7164 return -1;
7165 break;
7166 case WM_T_82571:
7167 case WM_T_82572:
7168 case WM_T_82573:
7169 case WM_T_82574:
7170 case WM_T_82583:
7171 case WM_T_80003:
7172 reg = CSR_READ(sc, WMREG_MANC);
7173 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
7174 return -1;
7175 else
7176 return 0;
7177 break;
7178 default:
7179 /* no problem */
7180 break;
7181 }
7182
7183 return 0;
7184 }
7185
7186 static void
7187 wm_get_hw_control(struct wm_softc *sc)
7188 {
7189 uint32_t reg;
7190
7191 switch (sc->sc_type) {
7192 case WM_T_82573:
7193 reg = CSR_READ(sc, WMREG_SWSM);
7194 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
7195 break;
7196 case WM_T_82571:
7197 case WM_T_82572:
7198 case WM_T_82574:
7199 case WM_T_82583:
7200 case WM_T_80003:
7201 case WM_T_ICH8:
7202 case WM_T_ICH9:
7203 case WM_T_ICH10:
7204 case WM_T_PCH:
7205 case WM_T_PCH2:
7206 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7207 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
7208 break;
7209 default:
7210 break;
7211 }
7212 }
7213
7214 static void
7215 wm_release_hw_control(struct wm_softc *sc)
7216 {
7217 uint32_t reg;
7218
7219 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
7220 return;
7221
7222 if (sc->sc_type == WM_T_82573) {
7223 reg = CSR_READ(sc, WMREG_SWSM);
7224 reg &= ~SWSM_DRV_LOAD;
7225 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
7226 } else {
7227 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7228 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
7229 }
7230 }
7231
7232 /* XXX Currently TBI only */
7233 static int
7234 wm_check_for_link(struct wm_softc *sc)
7235 {
7236 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7237 uint32_t rxcw;
7238 uint32_t ctrl;
7239 uint32_t status;
7240 uint32_t sig;
7241
7242 rxcw = CSR_READ(sc, WMREG_RXCW);
7243 ctrl = CSR_READ(sc, WMREG_CTRL);
7244 status = CSR_READ(sc, WMREG_STATUS);
7245
7246 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7247
7248 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7249 device_xname(sc->sc_dev), __func__,
7250 ((ctrl & CTRL_SWDPIN(1)) == sig),
7251 ((status & STATUS_LU) != 0),
7252 ((rxcw & RXCW_C) != 0)
7253 ));
7254
7255 /*
7256 * SWDPIN LU RXCW
7257 * 0 0 0
7258 * 0 0 1 (should not happen)
7259 * 0 1 0 (should not happen)
7260 * 0 1 1 (should not happen)
7261 * 1 0 0 Disable autonego and force linkup
7262 * 1 0 1 got /C/ but not linkup yet
7263 * 1 1 0 (linkup)
7264 * 1 1 1 If IFM_AUTO, back to autonego
7265 *
7266 */
7267 if (((ctrl & CTRL_SWDPIN(1)) == sig)
7268 && ((status & STATUS_LU) == 0)
7269 && ((rxcw & RXCW_C) == 0)) {
7270 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7271 __func__));
7272 sc->sc_tbi_linkup = 0;
7273 /* Disable auto-negotiation in the TXCW register */
7274 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7275
7276 /*
7277 * Force link-up and also force full-duplex.
7278 *
7279 * NOTE: CTRL was updated TFCE and RFCE automatically,
7280 * so we should update sc->sc_ctrl
7281 */
7282 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7283 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7284 } else if (((status & STATUS_LU) != 0)
7285 && ((rxcw & RXCW_C) != 0)
7286 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7287 sc->sc_tbi_linkup = 1;
7288 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7289 __func__));
7290 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7291 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7292 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7293 && ((rxcw & RXCW_C) != 0)) {
7294 DPRINTF(WM_DEBUG_LINK, ("/C/"));
7295 } else {
7296 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7297 status));
7298 }
7299
7300 return 0;
7301 }
7302
7303 /* Work-around for 82566 Kumeran PCS lock loss */
7304 static void
7305 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
7306 {
7307 int miistatus, active, i;
7308 int reg;
7309
7310 miistatus = sc->sc_mii.mii_media_status;
7311
7312 /* If the link is not up, do nothing */
7313 if ((miistatus & IFM_ACTIVE) != 0)
7314 return;
7315
7316 active = sc->sc_mii.mii_media_active;
7317
7318 /* Nothing to do if the link is other than 1Gbps */
7319 if (IFM_SUBTYPE(active) != IFM_1000_T)
7320 return;
7321
7322 for (i = 0; i < 10; i++) {
7323 /* read twice */
7324 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7325 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7326 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
7327 goto out; /* GOOD! */
7328
7329 /* Reset the PHY */
7330 wm_gmii_reset(sc);
7331 delay(5*1000);
7332 }
7333
7334 /* Disable GigE link negotiation */
7335 reg = CSR_READ(sc, WMREG_PHY_CTRL);
7336 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7337 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7338
7339 /*
7340 * Call gig speed drop workaround on Gig disable before accessing
7341 * any PHY registers.
7342 */
7343 wm_gig_downshift_workaround_ich8lan(sc);
7344
7345 out:
7346 return;
7347 }
7348
7349 /* WOL from S5 stops working */
7350 static void
7351 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
7352 {
7353 uint16_t kmrn_reg;
7354
7355 /* Only for igp3 */
7356 if (sc->sc_phytype == WMPHY_IGP_3) {
7357 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
7358 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
7359 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7360 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
7361 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7362 }
7363 }
7364
7365 #ifdef WM_WOL
7366 /* Power down workaround on D3 */
7367 static void
7368 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
7369 {
7370 uint32_t reg;
7371 int i;
7372
7373 for (i = 0; i < 2; i++) {
7374 /* Disable link */
7375 reg = CSR_READ(sc, WMREG_PHY_CTRL);
7376 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7377 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7378
7379 /*
7380 * Call gig speed drop workaround on Gig disable before
7381 * accessing any PHY registers
7382 */
7383 if (sc->sc_type == WM_T_ICH8)
7384 wm_gig_downshift_workaround_ich8lan(sc);
7385
7386 /* Write VR power-down enable */
7387 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7388 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7389 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
7390 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
7391
7392 /* Read it back and test */
7393 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7394 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7395 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
7396 break;
7397
7398 /* Issue PHY reset and repeat at most one more time */
7399 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7400 }
7401 }
7402 #endif /* WM_WOL */
7403
7404 /*
7405 * Workaround for pch's PHYs
7406 * XXX should be moved to new PHY driver?
7407 */
7408 static void
7409 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
7410 {
7411 if (sc->sc_phytype == WMPHY_82577)
7412 wm_set_mdio_slow_mode_hv(sc);
7413
7414 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
7415
7416 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
7417
7418 /* 82578 */
7419 if (sc->sc_phytype == WMPHY_82578) {
7420 /* PCH rev. < 3 */
7421 if (sc->sc_rev < 3) {
7422 /* XXX 6 bit shift? Why? Is it page2? */
7423 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
7424 0x66c0);
7425 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
7426 0xffff);
7427 }
7428
7429 /* XXX phy rev. < 2 */
7430 }
7431
7432 /* Select page 0 */
7433
7434 /* XXX acquire semaphore */
7435 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
7436 /* XXX release semaphore */
7437
7438 /*
7439 * Configure the K1 Si workaround during phy reset assuming there is
7440 * link so that it disables K1 if link is in 1Gbps.
7441 */
7442 wm_k1_gig_workaround_hv(sc, 1);
7443 }
7444
7445 static void
7446 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
7447 {
7448
7449 wm_set_mdio_slow_mode_hv(sc);
7450 }
7451
7452 static void
7453 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
7454 {
7455 int k1_enable = sc->sc_nvm_k1_enabled;
7456
7457 /* XXX acquire semaphore */
7458
7459 if (link) {
7460 k1_enable = 0;
7461
7462 /* Link stall fix for link up */
7463 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
7464 } else {
7465 /* Link stall fix for link down */
7466 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
7467 }
7468
7469 wm_configure_k1_ich8lan(sc, k1_enable);
7470
7471 /* XXX release semaphore */
7472 }
7473
7474 static void
7475 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
7476 {
7477 uint32_t reg;
7478
7479 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
7480 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
7481 reg | HV_KMRN_MDIO_SLOW);
7482 }
7483
7484 static void
7485 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
7486 {
7487 uint32_t ctrl, ctrl_ext, tmp;
7488 uint16_t kmrn_reg;
7489
7490 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
7491
7492 if (k1_enable)
7493 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
7494 else
7495 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
7496
7497 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
7498
7499 delay(20);
7500
7501 ctrl = CSR_READ(sc, WMREG_CTRL);
7502 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7503
7504 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
7505 tmp |= CTRL_FRCSPD;
7506
7507 CSR_WRITE(sc, WMREG_CTRL, tmp);
7508 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
7509 delay(20);
7510
7511 CSR_WRITE(sc, WMREG_CTRL, ctrl);
7512 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7513 delay(20);
7514 }
7515
7516 static void
7517 wm_smbustopci(struct wm_softc *sc)
7518 {
7519 uint32_t fwsm;
7520
7521 fwsm = CSR_READ(sc, WMREG_FWSM);
7522 if (((fwsm & FWSM_FW_VALID) == 0)
7523 && ((wm_check_reset_block(sc) == 0))) {
7524 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
7525 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
7526 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7527 delay(10);
7528 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
7529 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7530 delay(50*1000);
7531
7532 /*
7533 * Gate automatic PHY configuration by hardware on non-managed
7534 * 82579
7535 */
7536 if (sc->sc_type == WM_T_PCH2)
7537 wm_gate_hw_phy_config_ich8lan(sc, 1);
7538 }
7539 }
7540
7541 static void
7542 wm_set_pcie_completion_timeout(struct wm_softc *sc)
7543 {
7544 uint32_t gcr;
7545 pcireg_t ctrl2;
7546
7547 gcr = CSR_READ(sc, WMREG_GCR);
7548
7549 /* Only take action if timeout value is defaulted to 0 */
7550 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
7551 goto out;
7552
7553 if ((gcr & GCR_CAP_VER2) == 0) {
7554 gcr |= GCR_CMPL_TMOUT_10MS;
7555 goto out;
7556 }
7557
7558 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
7559 sc->sc_pcixe_capoff + PCI_PCIE_DCSR2);
7560 ctrl2 |= WM_PCI_PCIE_DCSR2_16MS;
7561 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
7562 sc->sc_pcixe_capoff + PCI_PCIE_DCSR2, ctrl2);
7563
7564 out:
7565 /* Disable completion timeout resend */
7566 gcr &= ~GCR_CMPL_TMOUT_RESEND;
7567
7568 CSR_WRITE(sc, WMREG_GCR, gcr);
7569 }
7570
7571 /* special case - for 82575 - need to do manual init ... */
7572 static void
7573 wm_reset_init_script_82575(struct wm_softc *sc)
7574 {
7575 /*
7576 * remark: this is untested code - we have no board without EEPROM
7577 * same setup as mentioned int the freeBSD driver for the i82575
7578 */
7579
7580 /* SerDes configuration via SERDESCTRL */
7581 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
7582 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
7583 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
7584 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
7585
7586 /* CCM configuration via CCMCTL register */
7587 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
7588 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
7589
7590 /* PCIe lanes configuration */
7591 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
7592 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
7593 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
7594 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
7595
7596 /* PCIe PLL Configuration */
7597 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
7598 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
7599 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
7600 }
7601
7602 static void
7603 wm_init_manageability(struct wm_softc *sc)
7604 {
7605
7606 if (sc->sc_flags & WM_F_HAS_MANAGE) {
7607 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
7608 uint32_t manc = CSR_READ(sc, WMREG_MANC);
7609
7610 /* disabl hardware interception of ARP */
7611 manc &= ~MANC_ARP_EN;
7612
7613 /* enable receiving management packets to the host */
7614 if (sc->sc_type >= WM_T_82571) {
7615 manc |= MANC_EN_MNG2HOST;
7616 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
7617 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
7618
7619 }
7620
7621 CSR_WRITE(sc, WMREG_MANC, manc);
7622 }
7623 }
7624
7625 static void
7626 wm_release_manageability(struct wm_softc *sc)
7627 {
7628
7629 if (sc->sc_flags & WM_F_HAS_MANAGE) {
7630 uint32_t manc = CSR_READ(sc, WMREG_MANC);
7631
7632 if (sc->sc_type >= WM_T_82571)
7633 manc &= ~MANC_EN_MNG2HOST;
7634
7635 CSR_WRITE(sc, WMREG_MANC, manc);
7636 }
7637 }
7638
7639 static void
7640 wm_get_wakeup(struct wm_softc *sc)
7641 {
7642
7643 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
7644 switch (sc->sc_type) {
7645 case WM_T_82573:
7646 case WM_T_82583:
7647 sc->sc_flags |= WM_F_HAS_AMT;
7648 /* FALLTHROUGH */
7649 case WM_T_80003:
7650 case WM_T_82541:
7651 case WM_T_82547:
7652 case WM_T_82571:
7653 case WM_T_82572:
7654 case WM_T_82574:
7655 case WM_T_82575:
7656 case WM_T_82576:
7657 #if 0 /* XXX */
7658 case WM_T_82580:
7659 case WM_T_82580ER:
7660 case WM_T_I350:
7661 #endif
7662 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
7663 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
7664 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
7665 break;
7666 case WM_T_ICH8:
7667 case WM_T_ICH9:
7668 case WM_T_ICH10:
7669 case WM_T_PCH:
7670 case WM_T_PCH2:
7671 sc->sc_flags |= WM_F_HAS_AMT;
7672 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
7673 break;
7674 default:
7675 break;
7676 }
7677
7678 /* 1: HAS_MANAGE */
7679 if (wm_enable_mng_pass_thru(sc) != 0)
7680 sc->sc_flags |= WM_F_HAS_MANAGE;
7681
7682 #ifdef WM_DEBUG
7683 printf("\n");
7684 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
7685 printf("HAS_AMT,");
7686 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
7687 printf("ARC_SUBSYS_VALID,");
7688 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
7689 printf("ASF_FIRMWARE_PRES,");
7690 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
7691 printf("HAS_MANAGE,");
7692 printf("\n");
7693 #endif
7694 /*
7695 * Note that the WOL flags is set after the resetting of the eeprom
7696 * stuff
7697 */
7698 }
7699
7700 #ifdef WM_WOL
7701 /* WOL in the newer chipset interfaces (pchlan) */
7702 static void
7703 wm_enable_phy_wakeup(struct wm_softc *sc)
7704 {
7705 #if 0
7706 uint16_t preg;
7707
7708 /* Copy MAC RARs to PHY RARs */
7709
7710 /* Copy MAC MTA to PHY MTA */
7711
7712 /* Configure PHY Rx Control register */
7713
7714 /* Enable PHY wakeup in MAC register */
7715
7716 /* Configure and enable PHY wakeup in PHY registers */
7717
7718 /* Activate PHY wakeup */
7719
7720 /* XXX */
7721 #endif
7722 }
7723
7724 static void
7725 wm_enable_wakeup(struct wm_softc *sc)
7726 {
7727 uint32_t reg, pmreg;
7728 pcireg_t pmode;
7729
7730 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
7731 &pmreg, NULL) == 0)
7732 return;
7733
7734 /* Advertise the wakeup capability */
7735 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
7736 | CTRL_SWDPIN(3));
7737 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
7738
7739 /* ICH workaround */
7740 switch (sc->sc_type) {
7741 case WM_T_ICH8:
7742 case WM_T_ICH9:
7743 case WM_T_ICH10:
7744 case WM_T_PCH:
7745 case WM_T_PCH2:
7746 /* Disable gig during WOL */
7747 reg = CSR_READ(sc, WMREG_PHY_CTRL);
7748 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
7749 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7750 if (sc->sc_type == WM_T_PCH)
7751 wm_gmii_reset(sc);
7752
7753 /* Power down workaround */
7754 if (sc->sc_phytype == WMPHY_82577) {
7755 struct mii_softc *child;
7756
7757 /* Assume that the PHY is copper */
7758 child = LIST_FIRST(&sc->sc_mii.mii_phys);
7759 if (child->mii_mpd_rev <= 2)
7760 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
7761 (768 << 5) | 25, 0x0444); /* magic num */
7762 }
7763 break;
7764 default:
7765 break;
7766 }
7767
7768 /* Keep the laser running on fiber adapters */
7769 if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
7770 || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
7771 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7772 reg |= CTRL_EXT_SWDPIN(3);
7773 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7774 }
7775
7776 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
7777 #if 0 /* for the multicast packet */
7778 reg |= WUFC_MC;
7779 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
7780 #endif
7781
7782 if (sc->sc_type == WM_T_PCH) {
7783 wm_enable_phy_wakeup(sc);
7784 } else {
7785 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
7786 CSR_WRITE(sc, WMREG_WUFC, reg);
7787 }
7788
7789 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
7790 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
7791 || (sc->sc_type == WM_T_PCH2))
7792 && (sc->sc_phytype == WMPHY_IGP_3))
7793 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
7794
7795 /* Request PME */
7796 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
7797 #if 0
7798 /* Disable WOL */
7799 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
7800 #else
7801 /* For WOL */
7802 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
7803 #endif
7804 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
7805 }
7806 #endif /* WM_WOL */
7807
7808 static bool
7809 wm_suspend(device_t self, const pmf_qual_t *qual)
7810 {
7811 struct wm_softc *sc = device_private(self);
7812
7813 wm_release_manageability(sc);
7814 wm_release_hw_control(sc);
7815 #ifdef WM_WOL
7816 wm_enable_wakeup(sc);
7817 #endif
7818
7819 return true;
7820 }
7821
7822 static bool
7823 wm_resume(device_t self, const pmf_qual_t *qual)
7824 {
7825 struct wm_softc *sc = device_private(self);
7826
7827 wm_init_manageability(sc);
7828
7829 return true;
7830 }
7831
7832 static void
7833 wm_set_eee_i350(struct wm_softc * sc)
7834 {
7835 uint32_t ipcnfg, eeer;
7836
7837 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
7838 eeer = CSR_READ(sc, WMREG_EEER);
7839
7840 if ((sc->sc_flags & WM_F_EEE) != 0) {
7841 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
7842 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
7843 | EEER_LPI_FC);
7844 } else {
7845 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
7846 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
7847 | EEER_LPI_FC);
7848 }
7849
7850 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
7851 CSR_WRITE(sc, WMREG_EEER, eeer);
7852 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
7853 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
7854 }
7855