if_wm.c revision 1.227 1 /* $NetBSD: if_wm.c,v 1.227 2012/02/02 19:43:05 tls Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.227 2012/02/02 19:43:05 tls Exp $");
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/callout.h>
84 #include <sys/mbuf.h>
85 #include <sys/malloc.h>
86 #include <sys/kernel.h>
87 #include <sys/socket.h>
88 #include <sys/ioctl.h>
89 #include <sys/errno.h>
90 #include <sys/device.h>
91 #include <sys/queue.h>
92 #include <sys/syslog.h>
93
94 #include <sys/rnd.h>
95
96 #include <net/if.h>
97 #include <net/if_dl.h>
98 #include <net/if_media.h>
99 #include <net/if_ether.h>
100
101 #include <net/bpf.h>
102
103 #include <netinet/in.h> /* XXX for struct ip */
104 #include <netinet/in_systm.h> /* XXX for struct ip */
105 #include <netinet/ip.h> /* XXX for struct ip */
106 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
107 #include <netinet/tcp.h> /* XXX for struct tcphdr */
108
109 #include <sys/bus.h>
110 #include <sys/intr.h>
111 #include <machine/endian.h>
112
113 #include <dev/mii/mii.h>
114 #include <dev/mii/miivar.h>
115 #include <dev/mii/miidevs.h>
116 #include <dev/mii/mii_bitbang.h>
117 #include <dev/mii/ikphyreg.h>
118 #include <dev/mii/igphyreg.h>
119 #include <dev/mii/igphyvar.h>
120 #include <dev/mii/inbmphyreg.h>
121
122 #include <dev/pci/pcireg.h>
123 #include <dev/pci/pcivar.h>
124 #include <dev/pci/pcidevs.h>
125
126 #include <dev/pci/if_wmreg.h>
127 #include <dev/pci/if_wmvar.h>
128
129 #ifdef WM_DEBUG
130 #define WM_DEBUG_LINK 0x01
131 #define WM_DEBUG_TX 0x02
132 #define WM_DEBUG_RX 0x04
133 #define WM_DEBUG_GMII 0x08
134 #define WM_DEBUG_MANAGE 0x10
135 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
136 | WM_DEBUG_MANAGE;
137
138 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
139 #else
140 #define DPRINTF(x, y) /* nothing */
141 #endif /* WM_DEBUG */
142
143 /*
144 * Transmit descriptor list size. Due to errata, we can only have
145 * 256 hardware descriptors in the ring on < 82544, but we use 4096
146 * on >= 82544. We tell the upper layers that they can queue a lot
147 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
148 * of them at a time.
149 *
150 * We allow up to 256 (!) DMA segments per packet. Pathological packet
151 * chains containing many small mbufs have been observed in zero-copy
152 * situations with jumbo frames.
153 */
154 #define WM_NTXSEGS 256
155 #define WM_IFQUEUELEN 256
156 #define WM_TXQUEUELEN_MAX 64
157 #define WM_TXQUEUELEN_MAX_82547 16
158 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
159 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
160 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
161 #define WM_NTXDESC_82542 256
162 #define WM_NTXDESC_82544 4096
163 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
164 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
165 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
166 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
167 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
168
169 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
170
171 /*
172 * Receive descriptor list size. We have one Rx buffer for normal
173 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
174 * packet. We allocate 256 receive descriptors, each with a 2k
175 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
176 */
177 #define WM_NRXDESC 256
178 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
179 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
180 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
181
182 /*
183 * Control structures are DMA'd to the i82542 chip. We allocate them in
184 * a single clump that maps to a single DMA segment to make several things
185 * easier.
186 */
187 struct wm_control_data_82544 {
188 /*
189 * The receive descriptors.
190 */
191 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
192
193 /*
194 * The transmit descriptors. Put these at the end, because
195 * we might use a smaller number of them.
196 */
197 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
198 };
199
200 struct wm_control_data_82542 {
201 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
202 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
203 };
204
205 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
206 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
207 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
208
209 /*
210 * Software state for transmit jobs.
211 */
212 struct wm_txsoft {
213 struct mbuf *txs_mbuf; /* head of our mbuf chain */
214 bus_dmamap_t txs_dmamap; /* our DMA map */
215 int txs_firstdesc; /* first descriptor in packet */
216 int txs_lastdesc; /* last descriptor in packet */
217 int txs_ndesc; /* # of descriptors used */
218 };
219
220 /*
221 * Software state for receive buffers. Each descriptor gets a
222 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
223 * more than one buffer, we chain them together.
224 */
225 struct wm_rxsoft {
226 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
227 bus_dmamap_t rxs_dmamap; /* our DMA map */
228 };
229
230 #define WM_LINKUP_TIMEOUT 50
231
232 static uint16_t swfwphysem[] = {
233 SWFW_PHY0_SM,
234 SWFW_PHY1_SM,
235 SWFW_PHY2_SM,
236 SWFW_PHY3_SM
237 };
238
239 /*
240 * Software state per device.
241 */
242 struct wm_softc {
243 device_t sc_dev; /* generic device information */
244 bus_space_tag_t sc_st; /* bus space tag */
245 bus_space_handle_t sc_sh; /* bus space handle */
246 bus_size_t sc_ss; /* bus space size */
247 bus_space_tag_t sc_iot; /* I/O space tag */
248 bus_space_handle_t sc_ioh; /* I/O space handle */
249 bus_size_t sc_ios; /* I/O space size */
250 bus_space_tag_t sc_flasht; /* flash registers space tag */
251 bus_space_handle_t sc_flashh; /* flash registers space handle */
252 bus_dma_tag_t sc_dmat; /* bus DMA tag */
253
254 struct ethercom sc_ethercom; /* ethernet common data */
255 struct mii_data sc_mii; /* MII/media information */
256
257 pci_chipset_tag_t sc_pc;
258 pcitag_t sc_pcitag;
259 int sc_bus_speed; /* PCI/PCIX bus speed */
260 int sc_pcixe_capoff; /* PCI[Xe] capability register offset */
261
262 const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
263 wm_chip_type sc_type; /* MAC type */
264 int sc_rev; /* MAC revision */
265 wm_phy_type sc_phytype; /* PHY type */
266 int sc_funcid; /* unit number of the chip (0 to 3) */
267 int sc_flags; /* flags; see below */
268 int sc_if_flags; /* last if_flags */
269 int sc_flowflags; /* 802.3x flow control flags */
270 int sc_align_tweak;
271
272 void *sc_ih; /* interrupt cookie */
273 callout_t sc_tick_ch; /* tick callout */
274
275 int sc_ee_addrbits; /* EEPROM address bits */
276 int sc_ich8_flash_base;
277 int sc_ich8_flash_bank_size;
278 int sc_nvm_k1_enabled;
279
280 /*
281 * Software state for the transmit and receive descriptors.
282 */
283 int sc_txnum; /* must be a power of two */
284 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
285 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
286
287 /*
288 * Control data structures.
289 */
290 int sc_ntxdesc; /* must be a power of two */
291 struct wm_control_data_82544 *sc_control_data;
292 bus_dmamap_t sc_cddmamap; /* control data DMA map */
293 bus_dma_segment_t sc_cd_seg; /* control data segment */
294 int sc_cd_rseg; /* real number of control segment */
295 size_t sc_cd_size; /* control data size */
296 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
297 #define sc_txdescs sc_control_data->wcd_txdescs
298 #define sc_rxdescs sc_control_data->wcd_rxdescs
299
300 #ifdef WM_EVENT_COUNTERS
301 /* Event counters. */
302 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
303 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
304 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
305 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
306 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
307 struct evcnt sc_ev_rxintr; /* Rx interrupts */
308 struct evcnt sc_ev_linkintr; /* Link interrupts */
309
310 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
311 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
312 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
313 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
314 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
315 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
316 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
317 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
318
319 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
320 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
321
322 struct evcnt sc_ev_tu; /* Tx underrun */
323
324 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
325 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
326 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
327 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
328 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
329 #endif /* WM_EVENT_COUNTERS */
330
331 bus_addr_t sc_tdt_reg; /* offset of TDT register */
332
333 int sc_txfree; /* number of free Tx descriptors */
334 int sc_txnext; /* next ready Tx descriptor */
335
336 int sc_txsfree; /* number of free Tx jobs */
337 int sc_txsnext; /* next free Tx job */
338 int sc_txsdirty; /* dirty Tx jobs */
339
340 /* These 5 variables are used only on the 82547. */
341 int sc_txfifo_size; /* Tx FIFO size */
342 int sc_txfifo_head; /* current head of FIFO */
343 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
344 int sc_txfifo_stall; /* Tx FIFO is stalled */
345 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
346
347 bus_addr_t sc_rdt_reg; /* offset of RDT register */
348
349 int sc_rxptr; /* next ready Rx descriptor/queue ent */
350 int sc_rxdiscard;
351 int sc_rxlen;
352 struct mbuf *sc_rxhead;
353 struct mbuf *sc_rxtail;
354 struct mbuf **sc_rxtailp;
355
356 uint32_t sc_ctrl; /* prototype CTRL register */
357 #if 0
358 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
359 #endif
360 uint32_t sc_icr; /* prototype interrupt bits */
361 uint32_t sc_itr; /* prototype intr throttling reg */
362 uint32_t sc_tctl; /* prototype TCTL register */
363 uint32_t sc_rctl; /* prototype RCTL register */
364 uint32_t sc_txcw; /* prototype TXCW register */
365 uint32_t sc_tipg; /* prototype TIPG register */
366 uint32_t sc_fcrtl; /* prototype FCRTL register */
367 uint32_t sc_pba; /* prototype PBA register */
368
369 int sc_tbi_linkup; /* TBI link status */
370 int sc_tbi_anegticks; /* autonegotiation ticks */
371 int sc_tbi_ticks; /* tbi ticks */
372 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
373 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
374
375 int sc_mchash_type; /* multicast filter offset */
376
377 krndsource_t rnd_source; /* random source */
378 };
379
380 #define WM_RXCHAIN_RESET(sc) \
381 do { \
382 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
383 *(sc)->sc_rxtailp = NULL; \
384 (sc)->sc_rxlen = 0; \
385 } while (/*CONSTCOND*/0)
386
387 #define WM_RXCHAIN_LINK(sc, m) \
388 do { \
389 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
390 (sc)->sc_rxtailp = &(m)->m_next; \
391 } while (/*CONSTCOND*/0)
392
393 #ifdef WM_EVENT_COUNTERS
394 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
395 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
396 #else
397 #define WM_EVCNT_INCR(ev) /* nothing */
398 #define WM_EVCNT_ADD(ev, val) /* nothing */
399 #endif
400
401 #define CSR_READ(sc, reg) \
402 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
403 #define CSR_WRITE(sc, reg, val) \
404 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
405 #define CSR_WRITE_FLUSH(sc) \
406 (void) CSR_READ((sc), WMREG_STATUS)
407
408 #define ICH8_FLASH_READ32(sc, reg) \
409 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
410 #define ICH8_FLASH_WRITE32(sc, reg, data) \
411 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
412
413 #define ICH8_FLASH_READ16(sc, reg) \
414 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
415 #define ICH8_FLASH_WRITE16(sc, reg, data) \
416 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
417
418 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
419 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
420
421 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
422 #define WM_CDTXADDR_HI(sc, x) \
423 (sizeof(bus_addr_t) == 8 ? \
424 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
425
426 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
427 #define WM_CDRXADDR_HI(sc, x) \
428 (sizeof(bus_addr_t) == 8 ? \
429 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
430
431 #define WM_CDTXSYNC(sc, x, n, ops) \
432 do { \
433 int __x, __n; \
434 \
435 __x = (x); \
436 __n = (n); \
437 \
438 /* If it will wrap around, sync to the end of the ring. */ \
439 if ((__x + __n) > WM_NTXDESC(sc)) { \
440 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
441 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
442 (WM_NTXDESC(sc) - __x), (ops)); \
443 __n -= (WM_NTXDESC(sc) - __x); \
444 __x = 0; \
445 } \
446 \
447 /* Now sync whatever is left. */ \
448 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
449 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
450 } while (/*CONSTCOND*/0)
451
452 #define WM_CDRXSYNC(sc, x, ops) \
453 do { \
454 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
455 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
456 } while (/*CONSTCOND*/0)
457
458 #define WM_INIT_RXDESC(sc, x) \
459 do { \
460 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
461 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
462 struct mbuf *__m = __rxs->rxs_mbuf; \
463 \
464 /* \
465 * Note: We scoot the packet forward 2 bytes in the buffer \
466 * so that the payload after the Ethernet header is aligned \
467 * to a 4-byte boundary. \
468 * \
469 * XXX BRAINDAMAGE ALERT! \
470 * The stupid chip uses the same size for every buffer, which \
471 * is set in the Receive Control register. We are using the 2K \
472 * size option, but what we REALLY want is (2K - 2)! For this \
473 * reason, we can't "scoot" packets longer than the standard \
474 * Ethernet MTU. On strict-alignment platforms, if the total \
475 * size exceeds (2K - 2) we set align_tweak to 0 and let \
476 * the upper layer copy the headers. \
477 */ \
478 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
479 \
480 wm_set_dma_addr(&__rxd->wrx_addr, \
481 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
482 __rxd->wrx_len = 0; \
483 __rxd->wrx_cksum = 0; \
484 __rxd->wrx_status = 0; \
485 __rxd->wrx_errors = 0; \
486 __rxd->wrx_special = 0; \
487 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
488 \
489 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
490 } while (/*CONSTCOND*/0)
491
492 static void wm_start(struct ifnet *);
493 static void wm_watchdog(struct ifnet *);
494 static int wm_ifflags_cb(struct ethercom *);
495 static int wm_ioctl(struct ifnet *, u_long, void *);
496 static int wm_init(struct ifnet *);
497 static void wm_stop(struct ifnet *, int);
498 static bool wm_suspend(device_t, const pmf_qual_t *);
499 static bool wm_resume(device_t, const pmf_qual_t *);
500
501 static void wm_reset(struct wm_softc *);
502 static void wm_rxdrain(struct wm_softc *);
503 static int wm_add_rxbuf(struct wm_softc *, int);
504 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
505 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
506 static int wm_validate_eeprom_checksum(struct wm_softc *);
507 static int wm_check_alt_mac_addr(struct wm_softc *);
508 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
509 static void wm_tick(void *);
510
511 static void wm_set_filter(struct wm_softc *);
512 static void wm_set_vlan(struct wm_softc *);
513
514 static int wm_intr(void *);
515 static void wm_txintr(struct wm_softc *);
516 static void wm_rxintr(struct wm_softc *);
517 static void wm_linkintr(struct wm_softc *, uint32_t);
518
519 static void wm_tbi_mediainit(struct wm_softc *);
520 static int wm_tbi_mediachange(struct ifnet *);
521 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
522
523 static void wm_tbi_set_linkled(struct wm_softc *);
524 static void wm_tbi_check_link(struct wm_softc *);
525
526 static void wm_gmii_reset(struct wm_softc *);
527
528 static int wm_gmii_i82543_readreg(device_t, int, int);
529 static void wm_gmii_i82543_writereg(device_t, int, int, int);
530
531 static int wm_gmii_i82544_readreg(device_t, int, int);
532 static void wm_gmii_i82544_writereg(device_t, int, int, int);
533
534 static int wm_gmii_i80003_readreg(device_t, int, int);
535 static void wm_gmii_i80003_writereg(device_t, int, int, int);
536 static int wm_gmii_bm_readreg(device_t, int, int);
537 static void wm_gmii_bm_writereg(device_t, int, int, int);
538 static int wm_gmii_hv_readreg(device_t, int, int);
539 static void wm_gmii_hv_writereg(device_t, int, int, int);
540 static int wm_sgmii_readreg(device_t, int, int);
541 static void wm_sgmii_writereg(device_t, int, int, int);
542
543 static void wm_gmii_statchg(device_t);
544
545 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
546 static int wm_gmii_mediachange(struct ifnet *);
547 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
548
549 static int wm_kmrn_readreg(struct wm_softc *, int);
550 static void wm_kmrn_writereg(struct wm_softc *, int, int);
551
552 static void wm_set_spiaddrbits(struct wm_softc *);
553 static int wm_match(device_t, cfdata_t, void *);
554 static void wm_attach(device_t, device_t, void *);
555 static int wm_detach(device_t, int);
556 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
557 static void wm_get_auto_rd_done(struct wm_softc *);
558 static void wm_lan_init_done(struct wm_softc *);
559 static void wm_get_cfg_done(struct wm_softc *);
560 static int wm_get_swsm_semaphore(struct wm_softc *);
561 static void wm_put_swsm_semaphore(struct wm_softc *);
562 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
563 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
564 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
565 static int wm_get_swfwhw_semaphore(struct wm_softc *);
566 static void wm_put_swfwhw_semaphore(struct wm_softc *);
567
568 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
569 static int32_t wm_ich8_cycle_init(struct wm_softc *);
570 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
571 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
572 uint32_t, uint16_t *);
573 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
574 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
575 static void wm_82547_txfifo_stall(void *);
576 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
577 static int wm_check_mng_mode(struct wm_softc *);
578 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
579 static int wm_check_mng_mode_82574(struct wm_softc *);
580 static int wm_check_mng_mode_generic(struct wm_softc *);
581 static int wm_enable_mng_pass_thru(struct wm_softc *);
582 static int wm_check_reset_block(struct wm_softc *);
583 static void wm_get_hw_control(struct wm_softc *);
584 static int wm_check_for_link(struct wm_softc *);
585 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
586 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
587 #ifdef WM_WOL
588 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
589 #endif
590 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
591 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
592 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
593 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
594 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
595 static void wm_smbustopci(struct wm_softc *);
596 static void wm_set_pcie_completion_timeout(struct wm_softc *);
597 static void wm_reset_init_script_82575(struct wm_softc *);
598 static void wm_release_manageability(struct wm_softc *);
599 static void wm_release_hw_control(struct wm_softc *);
600 static void wm_get_wakeup(struct wm_softc *);
601 #ifdef WM_WOL
602 static void wm_enable_phy_wakeup(struct wm_softc *);
603 static void wm_enable_wakeup(struct wm_softc *);
604 #endif
605 static void wm_init_manageability(struct wm_softc *);
606
607 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
608 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
609
610 /*
611 * Devices supported by this driver.
612 */
613 static const struct wm_product {
614 pci_vendor_id_t wmp_vendor;
615 pci_product_id_t wmp_product;
616 const char *wmp_name;
617 wm_chip_type wmp_type;
618 int wmp_flags;
619 #define WMP_F_1000X 0x01
620 #define WMP_F_1000T 0x02
621 #define WMP_F_SERDES 0x04
622 } wm_products[] = {
623 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
624 "Intel i82542 1000BASE-X Ethernet",
625 WM_T_82542_2_1, WMP_F_1000X },
626
627 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
628 "Intel i82543GC 1000BASE-X Ethernet",
629 WM_T_82543, WMP_F_1000X },
630
631 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
632 "Intel i82543GC 1000BASE-T Ethernet",
633 WM_T_82543, WMP_F_1000T },
634
635 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
636 "Intel i82544EI 1000BASE-T Ethernet",
637 WM_T_82544, WMP_F_1000T },
638
639 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
640 "Intel i82544EI 1000BASE-X Ethernet",
641 WM_T_82544, WMP_F_1000X },
642
643 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
644 "Intel i82544GC 1000BASE-T Ethernet",
645 WM_T_82544, WMP_F_1000T },
646
647 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
648 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
649 WM_T_82544, WMP_F_1000T },
650
651 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
652 "Intel i82540EM 1000BASE-T Ethernet",
653 WM_T_82540, WMP_F_1000T },
654
655 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
656 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
657 WM_T_82540, WMP_F_1000T },
658
659 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
660 "Intel i82540EP 1000BASE-T Ethernet",
661 WM_T_82540, WMP_F_1000T },
662
663 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
664 "Intel i82540EP 1000BASE-T Ethernet",
665 WM_T_82540, WMP_F_1000T },
666
667 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
668 "Intel i82540EP 1000BASE-T Ethernet",
669 WM_T_82540, WMP_F_1000T },
670
671 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
672 "Intel i82545EM 1000BASE-T Ethernet",
673 WM_T_82545, WMP_F_1000T },
674
675 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
676 "Intel i82545GM 1000BASE-T Ethernet",
677 WM_T_82545_3, WMP_F_1000T },
678
679 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
680 "Intel i82545GM 1000BASE-X Ethernet",
681 WM_T_82545_3, WMP_F_1000X },
682 #if 0
683 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
684 "Intel i82545GM Gigabit Ethernet (SERDES)",
685 WM_T_82545_3, WMP_F_SERDES },
686 #endif
687 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
688 "Intel i82546EB 1000BASE-T Ethernet",
689 WM_T_82546, WMP_F_1000T },
690
691 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
692 "Intel i82546EB 1000BASE-T Ethernet",
693 WM_T_82546, WMP_F_1000T },
694
695 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
696 "Intel i82545EM 1000BASE-X Ethernet",
697 WM_T_82545, WMP_F_1000X },
698
699 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
700 "Intel i82546EB 1000BASE-X Ethernet",
701 WM_T_82546, WMP_F_1000X },
702
703 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
704 "Intel i82546GB 1000BASE-T Ethernet",
705 WM_T_82546_3, WMP_F_1000T },
706
707 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
708 "Intel i82546GB 1000BASE-X Ethernet",
709 WM_T_82546_3, WMP_F_1000X },
710 #if 0
711 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
712 "Intel i82546GB Gigabit Ethernet (SERDES)",
713 WM_T_82546_3, WMP_F_SERDES },
714 #endif
715 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
716 "i82546GB quad-port Gigabit Ethernet",
717 WM_T_82546_3, WMP_F_1000T },
718
719 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
720 "i82546GB quad-port Gigabit Ethernet (KSP3)",
721 WM_T_82546_3, WMP_F_1000T },
722
723 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
724 "Intel PRO/1000MT (82546GB)",
725 WM_T_82546_3, WMP_F_1000T },
726
727 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
728 "Intel i82541EI 1000BASE-T Ethernet",
729 WM_T_82541, WMP_F_1000T },
730
731 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
732 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
733 WM_T_82541, WMP_F_1000T },
734
735 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
736 "Intel i82541EI Mobile 1000BASE-T Ethernet",
737 WM_T_82541, WMP_F_1000T },
738
739 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
740 "Intel i82541ER 1000BASE-T Ethernet",
741 WM_T_82541_2, WMP_F_1000T },
742
743 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
744 "Intel i82541GI 1000BASE-T Ethernet",
745 WM_T_82541_2, WMP_F_1000T },
746
747 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
748 "Intel i82541GI Mobile 1000BASE-T Ethernet",
749 WM_T_82541_2, WMP_F_1000T },
750
751 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
752 "Intel i82541PI 1000BASE-T Ethernet",
753 WM_T_82541_2, WMP_F_1000T },
754
755 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
756 "Intel i82547EI 1000BASE-T Ethernet",
757 WM_T_82547, WMP_F_1000T },
758
759 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
760 "Intel i82547EI Mobile 1000BASE-T Ethernet",
761 WM_T_82547, WMP_F_1000T },
762
763 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
764 "Intel i82547GI 1000BASE-T Ethernet",
765 WM_T_82547_2, WMP_F_1000T },
766
767 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
768 "Intel PRO/1000 PT (82571EB)",
769 WM_T_82571, WMP_F_1000T },
770
771 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
772 "Intel PRO/1000 PF (82571EB)",
773 WM_T_82571, WMP_F_1000X },
774 #if 0
775 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
776 "Intel PRO/1000 PB (82571EB)",
777 WM_T_82571, WMP_F_SERDES },
778 #endif
779 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
780 "Intel PRO/1000 QT (82571EB)",
781 WM_T_82571, WMP_F_1000T },
782
783 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
784 "Intel i82572EI 1000baseT Ethernet",
785 WM_T_82572, WMP_F_1000T },
786
787 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
788 "Intel PRO/1000 PT Quad Port Server Adapter",
789 WM_T_82571, WMP_F_1000T, },
790
791 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
792 "Intel i82572EI 1000baseX Ethernet",
793 WM_T_82572, WMP_F_1000X },
794 #if 0
795 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
796 "Intel i82572EI Gigabit Ethernet (SERDES)",
797 WM_T_82572, WMP_F_SERDES },
798 #endif
799
800 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
801 "Intel i82572EI 1000baseT Ethernet",
802 WM_T_82572, WMP_F_1000T },
803
804 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
805 "Intel i82573E",
806 WM_T_82573, WMP_F_1000T },
807
808 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
809 "Intel i82573E IAMT",
810 WM_T_82573, WMP_F_1000T },
811
812 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
813 "Intel i82573L Gigabit Ethernet",
814 WM_T_82573, WMP_F_1000T },
815
816 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
817 "Intel i82574L",
818 WM_T_82574, WMP_F_1000T },
819
820 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
821 "Intel i82583V",
822 WM_T_82583, WMP_F_1000T },
823
824 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
825 "i80003 dual 1000baseT Ethernet",
826 WM_T_80003, WMP_F_1000T },
827
828 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
829 "i80003 dual 1000baseX Ethernet",
830 WM_T_80003, WMP_F_1000T },
831 #if 0
832 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
833 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
834 WM_T_80003, WMP_F_SERDES },
835 #endif
836
837 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
838 "Intel i80003 1000baseT Ethernet",
839 WM_T_80003, WMP_F_1000T },
840 #if 0
841 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
842 "Intel i80003 Gigabit Ethernet (SERDES)",
843 WM_T_80003, WMP_F_SERDES },
844 #endif
845 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
846 "Intel i82801H (M_AMT) LAN Controller",
847 WM_T_ICH8, WMP_F_1000T },
848 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
849 "Intel i82801H (AMT) LAN Controller",
850 WM_T_ICH8, WMP_F_1000T },
851 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
852 "Intel i82801H LAN Controller",
853 WM_T_ICH8, WMP_F_1000T },
854 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
855 "Intel i82801H (IFE) LAN Controller",
856 WM_T_ICH8, WMP_F_1000T },
857 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
858 "Intel i82801H (M) LAN Controller",
859 WM_T_ICH8, WMP_F_1000T },
860 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
861 "Intel i82801H IFE (GT) LAN Controller",
862 WM_T_ICH8, WMP_F_1000T },
863 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
864 "Intel i82801H IFE (G) LAN Controller",
865 WM_T_ICH8, WMP_F_1000T },
866 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
867 "82801I (AMT) LAN Controller",
868 WM_T_ICH9, WMP_F_1000T },
869 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
870 "82801I LAN Controller",
871 WM_T_ICH9, WMP_F_1000T },
872 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
873 "82801I (G) LAN Controller",
874 WM_T_ICH9, WMP_F_1000T },
875 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
876 "82801I (GT) LAN Controller",
877 WM_T_ICH9, WMP_F_1000T },
878 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
879 "82801I (C) LAN Controller",
880 WM_T_ICH9, WMP_F_1000T },
881 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
882 "82801I mobile LAN Controller",
883 WM_T_ICH9, WMP_F_1000T },
884 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
885 "82801I mobile (V) LAN Controller",
886 WM_T_ICH9, WMP_F_1000T },
887 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
888 "82801I mobile (AMT) LAN Controller",
889 WM_T_ICH9, WMP_F_1000T },
890 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
891 "82567LM-4 LAN Controller",
892 WM_T_ICH9, WMP_F_1000T },
893 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
894 "82567V-3 LAN Controller",
895 WM_T_ICH9, WMP_F_1000T },
896 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
897 "82567LM-2 LAN Controller",
898 WM_T_ICH10, WMP_F_1000T },
899 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
900 "82567LF-2 LAN Controller",
901 WM_T_ICH10, WMP_F_1000T },
902 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
903 "82567LM-3 LAN Controller",
904 WM_T_ICH10, WMP_F_1000T },
905 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
906 "82567LF-3 LAN Controller",
907 WM_T_ICH10, WMP_F_1000T },
908 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
909 "82567V-2 LAN Controller",
910 WM_T_ICH10, WMP_F_1000T },
911 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
912 "82567V-3? LAN Controller",
913 WM_T_ICH10, WMP_F_1000T },
914 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
915 "HANKSVILLE LAN Controller",
916 WM_T_ICH10, WMP_F_1000T },
917 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
918 "PCH LAN (82577LM) Controller",
919 WM_T_PCH, WMP_F_1000T },
920 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
921 "PCH LAN (82577LC) Controller",
922 WM_T_PCH, WMP_F_1000T },
923 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
924 "PCH LAN (82578DM) Controller",
925 WM_T_PCH, WMP_F_1000T },
926 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
927 "PCH LAN (82578DC) Controller",
928 WM_T_PCH2, WMP_F_1000T },
929 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
930 "PCH2 LAN (82579LM) Controller",
931 WM_T_PCH2, WMP_F_1000T },
932 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
933 "PCH2 LAN (82579V) Controller",
934 WM_T_PCH, WMP_F_1000T },
935 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
936 "82575EB dual-1000baseT Ethernet",
937 WM_T_82575, WMP_F_1000T },
938 #if 0
939 /*
940 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
941 * disabled for now ...
942 */
943 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
944 "82575EB dual-1000baseX Ethernet (SERDES)",
945 WM_T_82575, WMP_F_SERDES },
946 #endif
947 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
948 "82575GB quad-1000baseT Ethernet",
949 WM_T_82575, WMP_F_1000T },
950 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
951 "82575GB quad-1000baseT Ethernet (PM)",
952 WM_T_82575, WMP_F_1000T },
953 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
954 "82576 1000BaseT Ethernet",
955 WM_T_82576, WMP_F_1000T },
956 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
957 "82576 1000BaseX Ethernet",
958 WM_T_82576, WMP_F_1000X },
959 #if 0
960 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
961 "82576 gigabit Ethernet (SERDES)",
962 WM_T_82576, WMP_F_SERDES },
963 #endif
964 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
965 "82576 quad-1000BaseT Ethernet",
966 WM_T_82576, WMP_F_1000T },
967 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
968 "82576 gigabit Ethernet",
969 WM_T_82576, WMP_F_1000T },
970 #if 0
971 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
972 "82576 gigabit Ethernet (SERDES)",
973 WM_T_82576, WMP_F_SERDES },
974 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
975 "82576 quad-gigabit Ethernet (SERDES)",
976 WM_T_82576, WMP_F_SERDES },
977 #endif
978 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
979 "82580 1000BaseT Ethernet",
980 WM_T_82580, WMP_F_1000T },
981 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
982 "82580 1000BaseX Ethernet",
983 WM_T_82580, WMP_F_1000X },
984 #if 0
985 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
986 "82580 1000BaseT Ethernet (SERDES)",
987 WM_T_82580, WMP_F_SERDES },
988 #endif
989 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
990 "82580 gigabit Ethernet (SGMII)",
991 WM_T_82580, WMP_F_1000T },
992 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
993 "82580 dual-1000BaseT Ethernet",
994 WM_T_82580, WMP_F_1000T },
995 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER,
996 "82580 1000BaseT Ethernet",
997 WM_T_82580ER, WMP_F_1000T },
998 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER_DUAL,
999 "82580 dual-1000BaseT Ethernet",
1000 WM_T_82580ER, WMP_F_1000T },
1001 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1002 "82580 quad-1000BaseX Ethernet",
1003 WM_T_82580, WMP_F_1000X },
1004 { 0, 0,
1005 NULL,
1006 0, 0 },
1007 };
1008
1009 #ifdef WM_EVENT_COUNTERS
1010 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1011 #endif /* WM_EVENT_COUNTERS */
1012
1013 #if 0 /* Not currently used */
1014 static inline uint32_t
1015 wm_io_read(struct wm_softc *sc, int reg)
1016 {
1017
1018 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1019 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1020 }
1021 #endif
1022
1023 static inline void
1024 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1025 {
1026
1027 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1028 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1029 }
1030
1031 static inline void
1032 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1033 uint32_t data)
1034 {
1035 uint32_t regval;
1036 int i;
1037
1038 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1039
1040 CSR_WRITE(sc, reg, regval);
1041
1042 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1043 delay(5);
1044 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1045 break;
1046 }
1047 if (i == SCTL_CTL_POLL_TIMEOUT) {
1048 aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
1049 device_xname(sc->sc_dev), reg);
1050 }
1051 }
1052
1053 static inline void
1054 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1055 {
1056 wa->wa_low = htole32(v & 0xffffffffU);
1057 if (sizeof(bus_addr_t) == 8)
1058 wa->wa_high = htole32((uint64_t) v >> 32);
1059 else
1060 wa->wa_high = 0;
1061 }
1062
1063 static void
1064 wm_set_spiaddrbits(struct wm_softc *sc)
1065 {
1066 uint32_t reg;
1067
1068 sc->sc_flags |= WM_F_EEPROM_SPI;
1069 reg = CSR_READ(sc, WMREG_EECD);
1070 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1071 }
1072
1073 static const struct wm_product *
1074 wm_lookup(const struct pci_attach_args *pa)
1075 {
1076 const struct wm_product *wmp;
1077
1078 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1079 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1080 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1081 return wmp;
1082 }
1083 return NULL;
1084 }
1085
1086 static int
1087 wm_match(device_t parent, cfdata_t cf, void *aux)
1088 {
1089 struct pci_attach_args *pa = aux;
1090
1091 if (wm_lookup(pa) != NULL)
1092 return 1;
1093
1094 return 0;
1095 }
1096
1097 static void
1098 wm_attach(device_t parent, device_t self, void *aux)
1099 {
1100 struct wm_softc *sc = device_private(self);
1101 struct pci_attach_args *pa = aux;
1102 prop_dictionary_t dict;
1103 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1104 pci_chipset_tag_t pc = pa->pa_pc;
1105 pci_intr_handle_t ih;
1106 const char *intrstr = NULL;
1107 const char *eetype, *xname;
1108 bus_space_tag_t memt;
1109 bus_space_handle_t memh;
1110 bus_size_t memsize;
1111 int memh_valid;
1112 int i, error;
1113 const struct wm_product *wmp;
1114 prop_data_t ea;
1115 prop_number_t pn;
1116 uint8_t enaddr[ETHER_ADDR_LEN];
1117 uint16_t cfg1, cfg2, swdpin, io3;
1118 pcireg_t preg, memtype;
1119 uint16_t eeprom_data, apme_mask;
1120 uint32_t reg;
1121
1122 sc->sc_dev = self;
1123 callout_init(&sc->sc_tick_ch, 0);
1124
1125 sc->sc_wmp = wmp = wm_lookup(pa);
1126 if (wmp == NULL) {
1127 printf("\n");
1128 panic("wm_attach: impossible");
1129 }
1130
1131 sc->sc_pc = pa->pa_pc;
1132 sc->sc_pcitag = pa->pa_tag;
1133
1134 if (pci_dma64_available(pa))
1135 sc->sc_dmat = pa->pa_dmat64;
1136 else
1137 sc->sc_dmat = pa->pa_dmat;
1138
1139 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1140 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1141
1142 sc->sc_type = wmp->wmp_type;
1143 if (sc->sc_type < WM_T_82543) {
1144 if (sc->sc_rev < 2) {
1145 aprint_error_dev(sc->sc_dev,
1146 "i82542 must be at least rev. 2\n");
1147 return;
1148 }
1149 if (sc->sc_rev < 3)
1150 sc->sc_type = WM_T_82542_2_0;
1151 }
1152
1153 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1154 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER))
1155 sc->sc_flags |= WM_F_NEWQUEUE;
1156
1157 /* Set device properties (mactype) */
1158 dict = device_properties(sc->sc_dev);
1159 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1160
1161 /*
1162 * Map the device. All devices support memory-mapped acccess,
1163 * and it is really required for normal operation.
1164 */
1165 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1166 switch (memtype) {
1167 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1168 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1169 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1170 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1171 break;
1172 default:
1173 memh_valid = 0;
1174 break;
1175 }
1176
1177 if (memh_valid) {
1178 sc->sc_st = memt;
1179 sc->sc_sh = memh;
1180 sc->sc_ss = memsize;
1181 } else {
1182 aprint_error_dev(sc->sc_dev,
1183 "unable to map device registers\n");
1184 return;
1185 }
1186
1187 wm_get_wakeup(sc);
1188
1189 /*
1190 * In addition, i82544 and later support I/O mapped indirect
1191 * register access. It is not desirable (nor supported in
1192 * this driver) to use it for normal operation, though it is
1193 * required to work around bugs in some chip versions.
1194 */
1195 if (sc->sc_type >= WM_T_82544) {
1196 /* First we have to find the I/O BAR. */
1197 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1198 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1199 PCI_MAPREG_TYPE_IO)
1200 break;
1201 }
1202 if (i != PCI_MAPREG_END) {
1203 /*
1204 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1205 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1206 * It's no problem because newer chips has no this
1207 * bug.
1208 *
1209 * The i8254x doesn't apparently respond when the
1210 * I/O BAR is 0, which looks somewhat like it's not
1211 * been configured.
1212 */
1213 preg = pci_conf_read(pc, pa->pa_tag, i);
1214 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1215 aprint_error_dev(sc->sc_dev,
1216 "WARNING: I/O BAR at zero.\n");
1217 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1218 0, &sc->sc_iot, &sc->sc_ioh,
1219 NULL, &sc->sc_ios) == 0) {
1220 sc->sc_flags |= WM_F_IOH_VALID;
1221 } else {
1222 aprint_error_dev(sc->sc_dev,
1223 "WARNING: unable to map I/O space\n");
1224 }
1225 }
1226
1227 }
1228
1229 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1230 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1231 preg |= PCI_COMMAND_MASTER_ENABLE;
1232 if (sc->sc_type < WM_T_82542_2_1)
1233 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1234 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1235
1236 /* power up chip */
1237 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1238 NULL)) && error != EOPNOTSUPP) {
1239 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1240 return;
1241 }
1242
1243 /*
1244 * Map and establish our interrupt.
1245 */
1246 if (pci_intr_map(pa, &ih)) {
1247 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1248 return;
1249 }
1250 intrstr = pci_intr_string(pc, ih);
1251 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1252 if (sc->sc_ih == NULL) {
1253 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1254 if (intrstr != NULL)
1255 aprint_error(" at %s", intrstr);
1256 aprint_error("\n");
1257 return;
1258 }
1259 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1260
1261 /*
1262 * Check the function ID (unit number of the chip).
1263 */
1264 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1265 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1266 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1267 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER))
1268 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1269 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1270 else
1271 sc->sc_funcid = 0;
1272
1273 /*
1274 * Determine a few things about the bus we're connected to.
1275 */
1276 if (sc->sc_type < WM_T_82543) {
1277 /* We don't really know the bus characteristics here. */
1278 sc->sc_bus_speed = 33;
1279 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1280 /*
1281 * CSA (Communication Streaming Architecture) is about as fast
1282 * a 32-bit 66MHz PCI Bus.
1283 */
1284 sc->sc_flags |= WM_F_CSA;
1285 sc->sc_bus_speed = 66;
1286 aprint_verbose_dev(sc->sc_dev,
1287 "Communication Streaming Architecture\n");
1288 if (sc->sc_type == WM_T_82547) {
1289 callout_init(&sc->sc_txfifo_ch, 0);
1290 callout_setfunc(&sc->sc_txfifo_ch,
1291 wm_82547_txfifo_stall, sc);
1292 aprint_verbose_dev(sc->sc_dev,
1293 "using 82547 Tx FIFO stall work-around\n");
1294 }
1295 } else if (sc->sc_type >= WM_T_82571) {
1296 sc->sc_flags |= WM_F_PCIE;
1297 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1298 && (sc->sc_type != WM_T_ICH10)
1299 && (sc->sc_type != WM_T_PCH)
1300 && (sc->sc_type != WM_T_PCH2)) {
1301 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1302 /* ICH* and PCH* have no PCIe capability registers */
1303 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1304 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1305 NULL) == 0)
1306 aprint_error_dev(sc->sc_dev,
1307 "unable to find PCIe capability\n");
1308 }
1309 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1310 } else {
1311 reg = CSR_READ(sc, WMREG_STATUS);
1312 if (reg & STATUS_BUS64)
1313 sc->sc_flags |= WM_F_BUS64;
1314 if ((reg & STATUS_PCIX_MODE) != 0) {
1315 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1316
1317 sc->sc_flags |= WM_F_PCIX;
1318 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1319 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1320 aprint_error_dev(sc->sc_dev,
1321 "unable to find PCIX capability\n");
1322 else if (sc->sc_type != WM_T_82545_3 &&
1323 sc->sc_type != WM_T_82546_3) {
1324 /*
1325 * Work around a problem caused by the BIOS
1326 * setting the max memory read byte count
1327 * incorrectly.
1328 */
1329 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1330 sc->sc_pcixe_capoff + PCI_PCIX_CMD);
1331 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1332 sc->sc_pcixe_capoff + PCI_PCIX_STATUS);
1333
1334 bytecnt =
1335 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1336 PCI_PCIX_CMD_BYTECNT_SHIFT;
1337 maxb =
1338 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1339 PCI_PCIX_STATUS_MAXB_SHIFT;
1340 if (bytecnt > maxb) {
1341 aprint_verbose_dev(sc->sc_dev,
1342 "resetting PCI-X MMRBC: %d -> %d\n",
1343 512 << bytecnt, 512 << maxb);
1344 pcix_cmd = (pcix_cmd &
1345 ~PCI_PCIX_CMD_BYTECNT_MASK) |
1346 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1347 pci_conf_write(pa->pa_pc, pa->pa_tag,
1348 sc->sc_pcixe_capoff + PCI_PCIX_CMD,
1349 pcix_cmd);
1350 }
1351 }
1352 }
1353 /*
1354 * The quad port adapter is special; it has a PCIX-PCIX
1355 * bridge on the board, and can run the secondary bus at
1356 * a higher speed.
1357 */
1358 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1359 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1360 : 66;
1361 } else if (sc->sc_flags & WM_F_PCIX) {
1362 switch (reg & STATUS_PCIXSPD_MASK) {
1363 case STATUS_PCIXSPD_50_66:
1364 sc->sc_bus_speed = 66;
1365 break;
1366 case STATUS_PCIXSPD_66_100:
1367 sc->sc_bus_speed = 100;
1368 break;
1369 case STATUS_PCIXSPD_100_133:
1370 sc->sc_bus_speed = 133;
1371 break;
1372 default:
1373 aprint_error_dev(sc->sc_dev,
1374 "unknown PCIXSPD %d; assuming 66MHz\n",
1375 reg & STATUS_PCIXSPD_MASK);
1376 sc->sc_bus_speed = 66;
1377 break;
1378 }
1379 } else
1380 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1381 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1382 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1383 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1384 }
1385
1386 /*
1387 * Allocate the control data structures, and create and load the
1388 * DMA map for it.
1389 *
1390 * NOTE: All Tx descriptors must be in the same 4G segment of
1391 * memory. So must Rx descriptors. We simplify by allocating
1392 * both sets within the same 4G segment.
1393 */
1394 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1395 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1396 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1397 sizeof(struct wm_control_data_82542) :
1398 sizeof(struct wm_control_data_82544);
1399 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1400 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1401 &sc->sc_cd_rseg, 0)) != 0) {
1402 aprint_error_dev(sc->sc_dev,
1403 "unable to allocate control data, error = %d\n",
1404 error);
1405 goto fail_0;
1406 }
1407
1408 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1409 sc->sc_cd_rseg, sc->sc_cd_size,
1410 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1411 aprint_error_dev(sc->sc_dev,
1412 "unable to map control data, error = %d\n", error);
1413 goto fail_1;
1414 }
1415
1416 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1417 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1418 aprint_error_dev(sc->sc_dev,
1419 "unable to create control data DMA map, error = %d\n",
1420 error);
1421 goto fail_2;
1422 }
1423
1424 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1425 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1426 aprint_error_dev(sc->sc_dev,
1427 "unable to load control data DMA map, error = %d\n",
1428 error);
1429 goto fail_3;
1430 }
1431
1432 /*
1433 * Create the transmit buffer DMA maps.
1434 */
1435 WM_TXQUEUELEN(sc) =
1436 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1437 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1438 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1439 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1440 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1441 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1442 aprint_error_dev(sc->sc_dev,
1443 "unable to create Tx DMA map %d, error = %d\n",
1444 i, error);
1445 goto fail_4;
1446 }
1447 }
1448
1449 /*
1450 * Create the receive buffer DMA maps.
1451 */
1452 for (i = 0; i < WM_NRXDESC; i++) {
1453 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1454 MCLBYTES, 0, 0,
1455 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1456 aprint_error_dev(sc->sc_dev,
1457 "unable to create Rx DMA map %d error = %d\n",
1458 i, error);
1459 goto fail_5;
1460 }
1461 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1462 }
1463
1464 /* clear interesting stat counters */
1465 CSR_READ(sc, WMREG_COLC);
1466 CSR_READ(sc, WMREG_RXERRC);
1467
1468 /* get PHY control from SMBus to PCIe */
1469 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2))
1470 wm_smbustopci(sc);
1471
1472 /*
1473 * Reset the chip to a known state.
1474 */
1475 wm_reset(sc);
1476
1477 switch (sc->sc_type) {
1478 case WM_T_82571:
1479 case WM_T_82572:
1480 case WM_T_82573:
1481 case WM_T_82574:
1482 case WM_T_82583:
1483 case WM_T_80003:
1484 case WM_T_ICH8:
1485 case WM_T_ICH9:
1486 case WM_T_ICH10:
1487 case WM_T_PCH:
1488 case WM_T_PCH2:
1489 if (wm_check_mng_mode(sc) != 0)
1490 wm_get_hw_control(sc);
1491 break;
1492 default:
1493 break;
1494 }
1495
1496 /*
1497 * Get some information about the EEPROM.
1498 */
1499 switch (sc->sc_type) {
1500 case WM_T_82542_2_0:
1501 case WM_T_82542_2_1:
1502 case WM_T_82543:
1503 case WM_T_82544:
1504 /* Microwire */
1505 sc->sc_ee_addrbits = 6;
1506 break;
1507 case WM_T_82540:
1508 case WM_T_82545:
1509 case WM_T_82545_3:
1510 case WM_T_82546:
1511 case WM_T_82546_3:
1512 /* Microwire */
1513 reg = CSR_READ(sc, WMREG_EECD);
1514 if (reg & EECD_EE_SIZE)
1515 sc->sc_ee_addrbits = 8;
1516 else
1517 sc->sc_ee_addrbits = 6;
1518 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1519 break;
1520 case WM_T_82541:
1521 case WM_T_82541_2:
1522 case WM_T_82547:
1523 case WM_T_82547_2:
1524 reg = CSR_READ(sc, WMREG_EECD);
1525 if (reg & EECD_EE_TYPE) {
1526 /* SPI */
1527 wm_set_spiaddrbits(sc);
1528 } else
1529 /* Microwire */
1530 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1531 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1532 break;
1533 case WM_T_82571:
1534 case WM_T_82572:
1535 /* SPI */
1536 wm_set_spiaddrbits(sc);
1537 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1538 break;
1539 case WM_T_82573:
1540 case WM_T_82574:
1541 case WM_T_82583:
1542 if (wm_is_onboard_nvm_eeprom(sc) == 0)
1543 sc->sc_flags |= WM_F_EEPROM_FLASH;
1544 else {
1545 /* SPI */
1546 wm_set_spiaddrbits(sc);
1547 }
1548 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1549 break;
1550 case WM_T_82575:
1551 case WM_T_82576:
1552 case WM_T_82580:
1553 case WM_T_82580ER:
1554 case WM_T_80003:
1555 /* SPI */
1556 wm_set_spiaddrbits(sc);
1557 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1558 break;
1559 case WM_T_ICH8:
1560 case WM_T_ICH9:
1561 case WM_T_ICH10:
1562 case WM_T_PCH:
1563 case WM_T_PCH2:
1564 /* FLASH */
1565 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1566 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1567 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1568 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1569 aprint_error_dev(sc->sc_dev,
1570 "can't map FLASH registers\n");
1571 return;
1572 }
1573 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1574 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1575 ICH_FLASH_SECTOR_SIZE;
1576 sc->sc_ich8_flash_bank_size =
1577 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1578 sc->sc_ich8_flash_bank_size -=
1579 (reg & ICH_GFPREG_BASE_MASK);
1580 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1581 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1582 break;
1583 default:
1584 break;
1585 }
1586
1587 /*
1588 * Defer printing the EEPROM type until after verifying the checksum
1589 * This allows the EEPROM type to be printed correctly in the case
1590 * that no EEPROM is attached.
1591 */
1592 /*
1593 * Validate the EEPROM checksum. If the checksum fails, flag
1594 * this for later, so we can fail future reads from the EEPROM.
1595 */
1596 if (wm_validate_eeprom_checksum(sc)) {
1597 /*
1598 * Read twice again because some PCI-e parts fail the
1599 * first check due to the link being in sleep state.
1600 */
1601 if (wm_validate_eeprom_checksum(sc))
1602 sc->sc_flags |= WM_F_EEPROM_INVALID;
1603 }
1604
1605 /* Set device properties (macflags) */
1606 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1607
1608 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1609 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1610 else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1611 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1612 } else {
1613 if (sc->sc_flags & WM_F_EEPROM_SPI)
1614 eetype = "SPI";
1615 else
1616 eetype = "MicroWire";
1617 aprint_verbose_dev(sc->sc_dev,
1618 "%u word (%d address bits) %s EEPROM\n",
1619 1U << sc->sc_ee_addrbits,
1620 sc->sc_ee_addrbits, eetype);
1621 }
1622
1623 /*
1624 * Read the Ethernet address from the EEPROM, if not first found
1625 * in device properties.
1626 */
1627 ea = prop_dictionary_get(dict, "mac-address");
1628 if (ea != NULL) {
1629 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1630 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1631 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1632 } else {
1633 if (wm_read_mac_addr(sc, enaddr) != 0) {
1634 aprint_error_dev(sc->sc_dev,
1635 "unable to read Ethernet address\n");
1636 return;
1637 }
1638 }
1639
1640 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1641 ether_sprintf(enaddr));
1642
1643 /*
1644 * Read the config info from the EEPROM, and set up various
1645 * bits in the control registers based on their contents.
1646 */
1647 pn = prop_dictionary_get(dict, "i82543-cfg1");
1648 if (pn != NULL) {
1649 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1650 cfg1 = (uint16_t) prop_number_integer_value(pn);
1651 } else {
1652 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1653 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1654 return;
1655 }
1656 }
1657
1658 pn = prop_dictionary_get(dict, "i82543-cfg2");
1659 if (pn != NULL) {
1660 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1661 cfg2 = (uint16_t) prop_number_integer_value(pn);
1662 } else {
1663 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1664 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1665 return;
1666 }
1667 }
1668
1669 /* check for WM_F_WOL */
1670 switch (sc->sc_type) {
1671 case WM_T_82542_2_0:
1672 case WM_T_82542_2_1:
1673 case WM_T_82543:
1674 /* dummy? */
1675 eeprom_data = 0;
1676 apme_mask = EEPROM_CFG3_APME;
1677 break;
1678 case WM_T_82544:
1679 apme_mask = EEPROM_CFG2_82544_APM_EN;
1680 eeprom_data = cfg2;
1681 break;
1682 case WM_T_82546:
1683 case WM_T_82546_3:
1684 case WM_T_82571:
1685 case WM_T_82572:
1686 case WM_T_82573:
1687 case WM_T_82574:
1688 case WM_T_82583:
1689 case WM_T_80003:
1690 default:
1691 apme_mask = EEPROM_CFG3_APME;
1692 wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1693 : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1694 break;
1695 case WM_T_82575:
1696 case WM_T_82576:
1697 case WM_T_82580:
1698 case WM_T_82580ER:
1699 case WM_T_ICH8:
1700 case WM_T_ICH9:
1701 case WM_T_ICH10:
1702 case WM_T_PCH:
1703 case WM_T_PCH2:
1704 apme_mask = WUC_APME;
1705 eeprom_data = CSR_READ(sc, WMREG_WUC);
1706 break;
1707 }
1708
1709 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1710 if ((eeprom_data & apme_mask) != 0)
1711 sc->sc_flags |= WM_F_WOL;
1712 #ifdef WM_DEBUG
1713 if ((sc->sc_flags & WM_F_WOL) != 0)
1714 printf("WOL\n");
1715 #endif
1716
1717 /*
1718 * XXX need special handling for some multiple port cards
1719 * to disable a paticular port.
1720 */
1721
1722 if (sc->sc_type >= WM_T_82544) {
1723 pn = prop_dictionary_get(dict, "i82543-swdpin");
1724 if (pn != NULL) {
1725 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1726 swdpin = (uint16_t) prop_number_integer_value(pn);
1727 } else {
1728 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1729 aprint_error_dev(sc->sc_dev,
1730 "unable to read SWDPIN\n");
1731 return;
1732 }
1733 }
1734 }
1735
1736 if (cfg1 & EEPROM_CFG1_ILOS)
1737 sc->sc_ctrl |= CTRL_ILOS;
1738 if (sc->sc_type >= WM_T_82544) {
1739 sc->sc_ctrl |=
1740 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1741 CTRL_SWDPIO_SHIFT;
1742 sc->sc_ctrl |=
1743 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1744 CTRL_SWDPINS_SHIFT;
1745 } else {
1746 sc->sc_ctrl |=
1747 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1748 CTRL_SWDPIO_SHIFT;
1749 }
1750
1751 #if 0
1752 if (sc->sc_type >= WM_T_82544) {
1753 if (cfg1 & EEPROM_CFG1_IPS0)
1754 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1755 if (cfg1 & EEPROM_CFG1_IPS1)
1756 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1757 sc->sc_ctrl_ext |=
1758 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1759 CTRL_EXT_SWDPIO_SHIFT;
1760 sc->sc_ctrl_ext |=
1761 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1762 CTRL_EXT_SWDPINS_SHIFT;
1763 } else {
1764 sc->sc_ctrl_ext |=
1765 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1766 CTRL_EXT_SWDPIO_SHIFT;
1767 }
1768 #endif
1769
1770 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1771 #if 0
1772 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1773 #endif
1774
1775 /*
1776 * Set up some register offsets that are different between
1777 * the i82542 and the i82543 and later chips.
1778 */
1779 if (sc->sc_type < WM_T_82543) {
1780 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1781 sc->sc_tdt_reg = WMREG_OLD_TDT;
1782 } else {
1783 sc->sc_rdt_reg = WMREG_RDT;
1784 sc->sc_tdt_reg = WMREG_TDT;
1785 }
1786
1787 if (sc->sc_type == WM_T_PCH) {
1788 uint16_t val;
1789
1790 /* Save the NVM K1 bit setting */
1791 wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1792
1793 if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1794 sc->sc_nvm_k1_enabled = 1;
1795 else
1796 sc->sc_nvm_k1_enabled = 0;
1797 }
1798
1799 /*
1800 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1801 * media structures accordingly.
1802 */
1803 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1804 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1805 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_82573
1806 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1807 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1808 wm_gmii_mediainit(sc, wmp->wmp_product);
1809 } else if (sc->sc_type < WM_T_82543 ||
1810 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1811 if (wmp->wmp_flags & WMP_F_1000T)
1812 aprint_error_dev(sc->sc_dev,
1813 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1814 wm_tbi_mediainit(sc);
1815 } else {
1816 switch (sc->sc_type) {
1817 case WM_T_82575:
1818 case WM_T_82576:
1819 case WM_T_82580:
1820 case WM_T_82580ER:
1821 reg = CSR_READ(sc, WMREG_CTRL_EXT);
1822 switch (reg & CTRL_EXT_LINK_MODE_MASK) {
1823 case CTRL_EXT_LINK_MODE_SGMII:
1824 aprint_verbose_dev(sc->sc_dev, "SGMII\n");
1825 sc->sc_flags |= WM_F_SGMII;
1826 CSR_WRITE(sc, WMREG_CTRL_EXT,
1827 reg | CTRL_EXT_I2C_ENA);
1828 wm_gmii_mediainit(sc, wmp->wmp_product);
1829 break;
1830 case CTRL_EXT_LINK_MODE_1000KX:
1831 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
1832 aprint_verbose_dev(sc->sc_dev, "1000KX or SERDES\n");
1833 CSR_WRITE(sc, WMREG_CTRL_EXT,
1834 reg | CTRL_EXT_I2C_ENA);
1835 panic("not supported yet\n");
1836 break;
1837 case CTRL_EXT_LINK_MODE_GMII:
1838 default:
1839 CSR_WRITE(sc, WMREG_CTRL_EXT,
1840 reg & ~CTRL_EXT_I2C_ENA);
1841 wm_gmii_mediainit(sc, wmp->wmp_product);
1842 break;
1843 }
1844 break;
1845 default:
1846 if (wmp->wmp_flags & WMP_F_1000X)
1847 aprint_error_dev(sc->sc_dev,
1848 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1849 wm_gmii_mediainit(sc, wmp->wmp_product);
1850 }
1851 }
1852
1853 ifp = &sc->sc_ethercom.ec_if;
1854 xname = device_xname(sc->sc_dev);
1855 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1856 ifp->if_softc = sc;
1857 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1858 ifp->if_ioctl = wm_ioctl;
1859 ifp->if_start = wm_start;
1860 ifp->if_watchdog = wm_watchdog;
1861 ifp->if_init = wm_init;
1862 ifp->if_stop = wm_stop;
1863 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1864 IFQ_SET_READY(&ifp->if_snd);
1865
1866 /* Check for jumbo frame */
1867 switch (sc->sc_type) {
1868 case WM_T_82573:
1869 /* XXX limited to 9234 if ASPM is disabled */
1870 wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1871 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1872 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1873 break;
1874 case WM_T_82571:
1875 case WM_T_82572:
1876 case WM_T_82574:
1877 case WM_T_82575:
1878 case WM_T_82576:
1879 case WM_T_82580:
1880 case WM_T_82580ER:
1881 case WM_T_80003:
1882 case WM_T_ICH9:
1883 case WM_T_ICH10:
1884 case WM_T_PCH2: /* PCH2 supports 9K frame size */
1885 /* XXX limited to 9234 */
1886 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1887 break;
1888 case WM_T_PCH:
1889 /* XXX limited to 4096 */
1890 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1891 break;
1892 case WM_T_82542_2_0:
1893 case WM_T_82542_2_1:
1894 case WM_T_82583:
1895 case WM_T_ICH8:
1896 /* No support for jumbo frame */
1897 break;
1898 default:
1899 /* ETHER_MAX_LEN_JUMBO */
1900 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1901 break;
1902 }
1903
1904 /*
1905 * If we're a i82543 or greater, we can support VLANs.
1906 */
1907 if (sc->sc_type == WM_T_82575 || sc->sc_type == WM_T_82576)
1908 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
1909 else if (sc->sc_type >= WM_T_82543)
1910 sc->sc_ethercom.ec_capabilities |=
1911 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1912
1913 /*
1914 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1915 * on i82543 and later.
1916 */
1917 if (sc->sc_type >= WM_T_82543) {
1918 ifp->if_capabilities |=
1919 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1920 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1921 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1922 IFCAP_CSUM_TCPv6_Tx |
1923 IFCAP_CSUM_UDPv6_Tx;
1924 }
1925
1926 /*
1927 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1928 *
1929 * 82541GI (8086:1076) ... no
1930 * 82572EI (8086:10b9) ... yes
1931 */
1932 if (sc->sc_type >= WM_T_82571) {
1933 ifp->if_capabilities |=
1934 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1935 }
1936
1937 /*
1938 * If we're a i82544 or greater (except i82547), we can do
1939 * TCP segmentation offload.
1940 */
1941 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1942 ifp->if_capabilities |= IFCAP_TSOv4;
1943 }
1944
1945 if (sc->sc_type >= WM_T_82571) {
1946 ifp->if_capabilities |= IFCAP_TSOv6;
1947 }
1948
1949 /*
1950 * Attach the interface.
1951 */
1952 if_attach(ifp);
1953 ether_ifattach(ifp, enaddr);
1954 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
1955 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1956
1957 #ifdef WM_EVENT_COUNTERS
1958 /* Attach event counters. */
1959 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1960 NULL, xname, "txsstall");
1961 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1962 NULL, xname, "txdstall");
1963 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1964 NULL, xname, "txfifo_stall");
1965 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1966 NULL, xname, "txdw");
1967 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1968 NULL, xname, "txqe");
1969 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1970 NULL, xname, "rxintr");
1971 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1972 NULL, xname, "linkintr");
1973
1974 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1975 NULL, xname, "rxipsum");
1976 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1977 NULL, xname, "rxtusum");
1978 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1979 NULL, xname, "txipsum");
1980 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1981 NULL, xname, "txtusum");
1982 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1983 NULL, xname, "txtusum6");
1984
1985 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1986 NULL, xname, "txtso");
1987 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1988 NULL, xname, "txtso6");
1989 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1990 NULL, xname, "txtsopain");
1991
1992 for (i = 0; i < WM_NTXSEGS; i++) {
1993 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1994 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1995 NULL, xname, wm_txseg_evcnt_names[i]);
1996 }
1997
1998 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1999 NULL, xname, "txdrop");
2000
2001 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2002 NULL, xname, "tu");
2003
2004 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2005 NULL, xname, "tx_xoff");
2006 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2007 NULL, xname, "tx_xon");
2008 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2009 NULL, xname, "rx_xoff");
2010 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2011 NULL, xname, "rx_xon");
2012 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2013 NULL, xname, "rx_macctl");
2014 #endif /* WM_EVENT_COUNTERS */
2015
2016 if (pmf_device_register(self, wm_suspend, wm_resume))
2017 pmf_class_network_register(self, ifp);
2018 else
2019 aprint_error_dev(self, "couldn't establish power handler\n");
2020
2021 return;
2022
2023 /*
2024 * Free any resources we've allocated during the failed attach
2025 * attempt. Do this in reverse order and fall through.
2026 */
2027 fail_5:
2028 for (i = 0; i < WM_NRXDESC; i++) {
2029 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2030 bus_dmamap_destroy(sc->sc_dmat,
2031 sc->sc_rxsoft[i].rxs_dmamap);
2032 }
2033 fail_4:
2034 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2035 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2036 bus_dmamap_destroy(sc->sc_dmat,
2037 sc->sc_txsoft[i].txs_dmamap);
2038 }
2039 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2040 fail_3:
2041 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2042 fail_2:
2043 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2044 sc->sc_cd_size);
2045 fail_1:
2046 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2047 fail_0:
2048 return;
2049 }
2050
2051 static int
2052 wm_detach(device_t self, int flags __unused)
2053 {
2054 struct wm_softc *sc = device_private(self);
2055 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2056 int i, s;
2057
2058 s = splnet();
2059 /* Stop the interface. Callouts are stopped in it. */
2060 wm_stop(ifp, 1);
2061 splx(s);
2062
2063 pmf_device_deregister(self);
2064
2065 /* Tell the firmware about the release */
2066 wm_release_manageability(sc);
2067 wm_release_hw_control(sc);
2068
2069 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2070
2071 /* Delete all remaining media. */
2072 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2073
2074 ether_ifdetach(ifp);
2075 if_detach(ifp);
2076
2077
2078 /* Unload RX dmamaps and free mbufs */
2079 wm_rxdrain(sc);
2080
2081 /* Free dmamap. It's the same as the end of the wm_attach() function */
2082 for (i = 0; i < WM_NRXDESC; i++) {
2083 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2084 bus_dmamap_destroy(sc->sc_dmat,
2085 sc->sc_rxsoft[i].rxs_dmamap);
2086 }
2087 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2088 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2089 bus_dmamap_destroy(sc->sc_dmat,
2090 sc->sc_txsoft[i].txs_dmamap);
2091 }
2092 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2093 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2094 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2095 sc->sc_cd_size);
2096 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2097
2098 /* Disestablish the interrupt handler */
2099 if (sc->sc_ih != NULL) {
2100 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2101 sc->sc_ih = NULL;
2102 }
2103
2104 /* Unmap the registers */
2105 if (sc->sc_ss) {
2106 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2107 sc->sc_ss = 0;
2108 }
2109
2110 if (sc->sc_ios) {
2111 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2112 sc->sc_ios = 0;
2113 }
2114
2115 return 0;
2116 }
2117
2118 /*
2119 * wm_tx_offload:
2120 *
2121 * Set up TCP/IP checksumming parameters for the
2122 * specified packet.
2123 */
2124 static int
2125 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
2126 uint8_t *fieldsp)
2127 {
2128 struct mbuf *m0 = txs->txs_mbuf;
2129 struct livengood_tcpip_ctxdesc *t;
2130 uint32_t ipcs, tucs, cmd, cmdlen, seg;
2131 uint32_t ipcse;
2132 struct ether_header *eh;
2133 int offset, iphl;
2134 uint8_t fields;
2135
2136 /*
2137 * XXX It would be nice if the mbuf pkthdr had offset
2138 * fields for the protocol headers.
2139 */
2140
2141 eh = mtod(m0, struct ether_header *);
2142 switch (htons(eh->ether_type)) {
2143 case ETHERTYPE_IP:
2144 case ETHERTYPE_IPV6:
2145 offset = ETHER_HDR_LEN;
2146 break;
2147
2148 case ETHERTYPE_VLAN:
2149 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2150 break;
2151
2152 default:
2153 /*
2154 * Don't support this protocol or encapsulation.
2155 */
2156 *fieldsp = 0;
2157 *cmdp = 0;
2158 return 0;
2159 }
2160
2161 if ((m0->m_pkthdr.csum_flags &
2162 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
2163 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2164 } else {
2165 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2166 }
2167 ipcse = offset + iphl - 1;
2168
2169 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
2170 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
2171 seg = 0;
2172 fields = 0;
2173
2174 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2175 int hlen = offset + iphl;
2176 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2177
2178 if (__predict_false(m0->m_len <
2179 (hlen + sizeof(struct tcphdr)))) {
2180 /*
2181 * TCP/IP headers are not in the first mbuf; we need
2182 * to do this the slow and painful way. Let's just
2183 * hope this doesn't happen very often.
2184 */
2185 struct tcphdr th;
2186
2187 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2188
2189 m_copydata(m0, hlen, sizeof(th), &th);
2190 if (v4) {
2191 struct ip ip;
2192
2193 m_copydata(m0, offset, sizeof(ip), &ip);
2194 ip.ip_len = 0;
2195 m_copyback(m0,
2196 offset + offsetof(struct ip, ip_len),
2197 sizeof(ip.ip_len), &ip.ip_len);
2198 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2199 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2200 } else {
2201 struct ip6_hdr ip6;
2202
2203 m_copydata(m0, offset, sizeof(ip6), &ip6);
2204 ip6.ip6_plen = 0;
2205 m_copyback(m0,
2206 offset + offsetof(struct ip6_hdr, ip6_plen),
2207 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2208 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2209 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2210 }
2211 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2212 sizeof(th.th_sum), &th.th_sum);
2213
2214 hlen += th.th_off << 2;
2215 } else {
2216 /*
2217 * TCP/IP headers are in the first mbuf; we can do
2218 * this the easy way.
2219 */
2220 struct tcphdr *th;
2221
2222 if (v4) {
2223 struct ip *ip =
2224 (void *)(mtod(m0, char *) + offset);
2225 th = (void *)(mtod(m0, char *) + hlen);
2226
2227 ip->ip_len = 0;
2228 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2229 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2230 } else {
2231 struct ip6_hdr *ip6 =
2232 (void *)(mtod(m0, char *) + offset);
2233 th = (void *)(mtod(m0, char *) + hlen);
2234
2235 ip6->ip6_plen = 0;
2236 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2237 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2238 }
2239 hlen += th->th_off << 2;
2240 }
2241
2242 if (v4) {
2243 WM_EVCNT_INCR(&sc->sc_ev_txtso);
2244 cmdlen |= WTX_TCPIP_CMD_IP;
2245 } else {
2246 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2247 ipcse = 0;
2248 }
2249 cmd |= WTX_TCPIP_CMD_TSE;
2250 cmdlen |= WTX_TCPIP_CMD_TSE |
2251 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
2252 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
2253 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
2254 }
2255
2256 /*
2257 * NOTE: Even if we're not using the IP or TCP/UDP checksum
2258 * offload feature, if we load the context descriptor, we
2259 * MUST provide valid values for IPCSS and TUCSS fields.
2260 */
2261
2262 ipcs = WTX_TCPIP_IPCSS(offset) |
2263 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
2264 WTX_TCPIP_IPCSE(ipcse);
2265 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
2266 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
2267 fields |= WTX_IXSM;
2268 }
2269
2270 offset += iphl;
2271
2272 if (m0->m_pkthdr.csum_flags &
2273 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
2274 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2275 fields |= WTX_TXSM;
2276 tucs = WTX_TCPIP_TUCSS(offset) |
2277 WTX_TCPIP_TUCSO(offset +
2278 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
2279 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2280 } else if ((m0->m_pkthdr.csum_flags &
2281 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
2282 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2283 fields |= WTX_TXSM;
2284 tucs = WTX_TCPIP_TUCSS(offset) |
2285 WTX_TCPIP_TUCSO(offset +
2286 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
2287 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2288 } else {
2289 /* Just initialize it to a valid TCP context. */
2290 tucs = WTX_TCPIP_TUCSS(offset) |
2291 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
2292 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2293 }
2294
2295 /* Fill in the context descriptor. */
2296 t = (struct livengood_tcpip_ctxdesc *)
2297 &sc->sc_txdescs[sc->sc_txnext];
2298 t->tcpip_ipcs = htole32(ipcs);
2299 t->tcpip_tucs = htole32(tucs);
2300 t->tcpip_cmdlen = htole32(cmdlen);
2301 t->tcpip_seg = htole32(seg);
2302 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2303
2304 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2305 txs->txs_ndesc++;
2306
2307 *cmdp = cmd;
2308 *fieldsp = fields;
2309
2310 return 0;
2311 }
2312
2313 static void
2314 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2315 {
2316 struct mbuf *m;
2317 int i;
2318
2319 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2320 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2321 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2322 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2323 m->m_data, m->m_len, m->m_flags);
2324 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2325 i, i == 1 ? "" : "s");
2326 }
2327
2328 /*
2329 * wm_82547_txfifo_stall:
2330 *
2331 * Callout used to wait for the 82547 Tx FIFO to drain,
2332 * reset the FIFO pointers, and restart packet transmission.
2333 */
2334 static void
2335 wm_82547_txfifo_stall(void *arg)
2336 {
2337 struct wm_softc *sc = arg;
2338 int s;
2339
2340 s = splnet();
2341
2342 if (sc->sc_txfifo_stall) {
2343 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2344 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2345 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2346 /*
2347 * Packets have drained. Stop transmitter, reset
2348 * FIFO pointers, restart transmitter, and kick
2349 * the packet queue.
2350 */
2351 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2352 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2353 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2354 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2355 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2356 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2357 CSR_WRITE(sc, WMREG_TCTL, tctl);
2358 CSR_WRITE_FLUSH(sc);
2359
2360 sc->sc_txfifo_head = 0;
2361 sc->sc_txfifo_stall = 0;
2362 wm_start(&sc->sc_ethercom.ec_if);
2363 } else {
2364 /*
2365 * Still waiting for packets to drain; try again in
2366 * another tick.
2367 */
2368 callout_schedule(&sc->sc_txfifo_ch, 1);
2369 }
2370 }
2371
2372 splx(s);
2373 }
2374
2375 static void
2376 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
2377 {
2378 uint32_t reg;
2379
2380 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
2381
2382 if (on != 0)
2383 reg |= EXTCNFCTR_GATE_PHY_CFG;
2384 else
2385 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
2386
2387 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
2388 }
2389
2390 /*
2391 * wm_82547_txfifo_bugchk:
2392 *
2393 * Check for bug condition in the 82547 Tx FIFO. We need to
2394 * prevent enqueueing a packet that would wrap around the end
2395 * if the Tx FIFO ring buffer, otherwise the chip will croak.
2396 *
2397 * We do this by checking the amount of space before the end
2398 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
2399 * the Tx FIFO, wait for all remaining packets to drain, reset
2400 * the internal FIFO pointers to the beginning, and restart
2401 * transmission on the interface.
2402 */
2403 #define WM_FIFO_HDR 0x10
2404 #define WM_82547_PAD_LEN 0x3e0
2405 static int
2406 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2407 {
2408 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2409 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2410
2411 /* Just return if already stalled. */
2412 if (sc->sc_txfifo_stall)
2413 return 1;
2414
2415 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2416 /* Stall only occurs in half-duplex mode. */
2417 goto send_packet;
2418 }
2419
2420 if (len >= WM_82547_PAD_LEN + space) {
2421 sc->sc_txfifo_stall = 1;
2422 callout_schedule(&sc->sc_txfifo_ch, 1);
2423 return 1;
2424 }
2425
2426 send_packet:
2427 sc->sc_txfifo_head += len;
2428 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2429 sc->sc_txfifo_head -= sc->sc_txfifo_size;
2430
2431 return 0;
2432 }
2433
2434 /*
2435 * wm_start: [ifnet interface function]
2436 *
2437 * Start packet transmission on the interface.
2438 */
2439 static void
2440 wm_start(struct ifnet *ifp)
2441 {
2442 struct wm_softc *sc = ifp->if_softc;
2443 struct mbuf *m0;
2444 struct m_tag *mtag;
2445 struct wm_txsoft *txs;
2446 bus_dmamap_t dmamap;
2447 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2448 bus_addr_t curaddr;
2449 bus_size_t seglen, curlen;
2450 uint32_t cksumcmd;
2451 uint8_t cksumfields;
2452
2453 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2454 return;
2455
2456 /*
2457 * Remember the previous number of free descriptors.
2458 */
2459 ofree = sc->sc_txfree;
2460
2461 /*
2462 * Loop through the send queue, setting up transmit descriptors
2463 * until we drain the queue, or use up all available transmit
2464 * descriptors.
2465 */
2466 for (;;) {
2467 /* Grab a packet off the queue. */
2468 IFQ_POLL(&ifp->if_snd, m0);
2469 if (m0 == NULL)
2470 break;
2471
2472 DPRINTF(WM_DEBUG_TX,
2473 ("%s: TX: have packet to transmit: %p\n",
2474 device_xname(sc->sc_dev), m0));
2475
2476 /* Get a work queue entry. */
2477 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2478 wm_txintr(sc);
2479 if (sc->sc_txsfree == 0) {
2480 DPRINTF(WM_DEBUG_TX,
2481 ("%s: TX: no free job descriptors\n",
2482 device_xname(sc->sc_dev)));
2483 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2484 break;
2485 }
2486 }
2487
2488 txs = &sc->sc_txsoft[sc->sc_txsnext];
2489 dmamap = txs->txs_dmamap;
2490
2491 use_tso = (m0->m_pkthdr.csum_flags &
2492 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2493
2494 /*
2495 * So says the Linux driver:
2496 * The controller does a simple calculation to make sure
2497 * there is enough room in the FIFO before initiating the
2498 * DMA for each buffer. The calc is:
2499 * 4 = ceil(buffer len / MSS)
2500 * To make sure we don't overrun the FIFO, adjust the max
2501 * buffer len if the MSS drops.
2502 */
2503 dmamap->dm_maxsegsz =
2504 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2505 ? m0->m_pkthdr.segsz << 2
2506 : WTX_MAX_LEN;
2507
2508 /*
2509 * Load the DMA map. If this fails, the packet either
2510 * didn't fit in the allotted number of segments, or we
2511 * were short on resources. For the too-many-segments
2512 * case, we simply report an error and drop the packet,
2513 * since we can't sanely copy a jumbo packet to a single
2514 * buffer.
2515 */
2516 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2517 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2518 if (error) {
2519 if (error == EFBIG) {
2520 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2521 log(LOG_ERR, "%s: Tx packet consumes too many "
2522 "DMA segments, dropping...\n",
2523 device_xname(sc->sc_dev));
2524 IFQ_DEQUEUE(&ifp->if_snd, m0);
2525 wm_dump_mbuf_chain(sc, m0);
2526 m_freem(m0);
2527 continue;
2528 }
2529 /*
2530 * Short on resources, just stop for now.
2531 */
2532 DPRINTF(WM_DEBUG_TX,
2533 ("%s: TX: dmamap load failed: %d\n",
2534 device_xname(sc->sc_dev), error));
2535 break;
2536 }
2537
2538 segs_needed = dmamap->dm_nsegs;
2539 if (use_tso) {
2540 /* For sentinel descriptor; see below. */
2541 segs_needed++;
2542 }
2543
2544 /*
2545 * Ensure we have enough descriptors free to describe
2546 * the packet. Note, we always reserve one descriptor
2547 * at the end of the ring due to the semantics of the
2548 * TDT register, plus one more in the event we need
2549 * to load offload context.
2550 */
2551 if (segs_needed > sc->sc_txfree - 2) {
2552 /*
2553 * Not enough free descriptors to transmit this
2554 * packet. We haven't committed anything yet,
2555 * so just unload the DMA map, put the packet
2556 * pack on the queue, and punt. Notify the upper
2557 * layer that there are no more slots left.
2558 */
2559 DPRINTF(WM_DEBUG_TX,
2560 ("%s: TX: need %d (%d) descriptors, have %d\n",
2561 device_xname(sc->sc_dev), dmamap->dm_nsegs,
2562 segs_needed, sc->sc_txfree - 1));
2563 ifp->if_flags |= IFF_OACTIVE;
2564 bus_dmamap_unload(sc->sc_dmat, dmamap);
2565 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2566 break;
2567 }
2568
2569 /*
2570 * Check for 82547 Tx FIFO bug. We need to do this
2571 * once we know we can transmit the packet, since we
2572 * do some internal FIFO space accounting here.
2573 */
2574 if (sc->sc_type == WM_T_82547 &&
2575 wm_82547_txfifo_bugchk(sc, m0)) {
2576 DPRINTF(WM_DEBUG_TX,
2577 ("%s: TX: 82547 Tx FIFO bug detected\n",
2578 device_xname(sc->sc_dev)));
2579 ifp->if_flags |= IFF_OACTIVE;
2580 bus_dmamap_unload(sc->sc_dmat, dmamap);
2581 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2582 break;
2583 }
2584
2585 IFQ_DEQUEUE(&ifp->if_snd, m0);
2586
2587 /*
2588 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2589 */
2590
2591 DPRINTF(WM_DEBUG_TX,
2592 ("%s: TX: packet has %d (%d) DMA segments\n",
2593 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2594
2595 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2596
2597 /*
2598 * Store a pointer to the packet so that we can free it
2599 * later.
2600 *
2601 * Initially, we consider the number of descriptors the
2602 * packet uses the number of DMA segments. This may be
2603 * incremented by 1 if we do checksum offload (a descriptor
2604 * is used to set the checksum context).
2605 */
2606 txs->txs_mbuf = m0;
2607 txs->txs_firstdesc = sc->sc_txnext;
2608 txs->txs_ndesc = segs_needed;
2609
2610 /* Set up offload parameters for this packet. */
2611 if (m0->m_pkthdr.csum_flags &
2612 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2613 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2614 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2615 if (wm_tx_offload(sc, txs, &cksumcmd,
2616 &cksumfields) != 0) {
2617 /* Error message already displayed. */
2618 bus_dmamap_unload(sc->sc_dmat, dmamap);
2619 continue;
2620 }
2621 } else {
2622 cksumcmd = 0;
2623 cksumfields = 0;
2624 }
2625
2626 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2627
2628 /* Sync the DMA map. */
2629 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2630 BUS_DMASYNC_PREWRITE);
2631
2632 /*
2633 * Initialize the transmit descriptor.
2634 */
2635 for (nexttx = sc->sc_txnext, seg = 0;
2636 seg < dmamap->dm_nsegs; seg++) {
2637 for (seglen = dmamap->dm_segs[seg].ds_len,
2638 curaddr = dmamap->dm_segs[seg].ds_addr;
2639 seglen != 0;
2640 curaddr += curlen, seglen -= curlen,
2641 nexttx = WM_NEXTTX(sc, nexttx)) {
2642 curlen = seglen;
2643
2644 /*
2645 * So says the Linux driver:
2646 * Work around for premature descriptor
2647 * write-backs in TSO mode. Append a
2648 * 4-byte sentinel descriptor.
2649 */
2650 if (use_tso &&
2651 seg == dmamap->dm_nsegs - 1 &&
2652 curlen > 8)
2653 curlen -= 4;
2654
2655 wm_set_dma_addr(
2656 &sc->sc_txdescs[nexttx].wtx_addr,
2657 curaddr);
2658 sc->sc_txdescs[nexttx].wtx_cmdlen =
2659 htole32(cksumcmd | curlen);
2660 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2661 0;
2662 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2663 cksumfields;
2664 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2665 lasttx = nexttx;
2666
2667 DPRINTF(WM_DEBUG_TX,
2668 ("%s: TX: desc %d: low %#" PRIxPADDR ", "
2669 "len %#04zx\n",
2670 device_xname(sc->sc_dev), nexttx,
2671 curaddr & 0xffffffffUL, curlen));
2672 }
2673 }
2674
2675 KASSERT(lasttx != -1);
2676
2677 /*
2678 * Set up the command byte on the last descriptor of
2679 * the packet. If we're in the interrupt delay window,
2680 * delay the interrupt.
2681 */
2682 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2683 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2684
2685 /*
2686 * If VLANs are enabled and the packet has a VLAN tag, set
2687 * up the descriptor to encapsulate the packet for us.
2688 *
2689 * This is only valid on the last descriptor of the packet.
2690 */
2691 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2692 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2693 htole32(WTX_CMD_VLE);
2694 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2695 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2696 }
2697
2698 txs->txs_lastdesc = lasttx;
2699
2700 DPRINTF(WM_DEBUG_TX,
2701 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2702 device_xname(sc->sc_dev),
2703 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2704
2705 /* Sync the descriptors we're using. */
2706 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2707 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2708
2709 /* Give the packet to the chip. */
2710 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2711
2712 DPRINTF(WM_DEBUG_TX,
2713 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2714
2715 DPRINTF(WM_DEBUG_TX,
2716 ("%s: TX: finished transmitting packet, job %d\n",
2717 device_xname(sc->sc_dev), sc->sc_txsnext));
2718
2719 /* Advance the tx pointer. */
2720 sc->sc_txfree -= txs->txs_ndesc;
2721 sc->sc_txnext = nexttx;
2722
2723 sc->sc_txsfree--;
2724 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2725
2726 /* Pass the packet to any BPF listeners. */
2727 bpf_mtap(ifp, m0);
2728 }
2729
2730 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2731 /* No more slots; notify upper layer. */
2732 ifp->if_flags |= IFF_OACTIVE;
2733 }
2734
2735 if (sc->sc_txfree != ofree) {
2736 /* Set a watchdog timer in case the chip flakes out. */
2737 ifp->if_timer = 5;
2738 }
2739 }
2740
2741 /*
2742 * wm_watchdog: [ifnet interface function]
2743 *
2744 * Watchdog timer handler.
2745 */
2746 static void
2747 wm_watchdog(struct ifnet *ifp)
2748 {
2749 struct wm_softc *sc = ifp->if_softc;
2750
2751 /*
2752 * Since we're using delayed interrupts, sweep up
2753 * before we report an error.
2754 */
2755 wm_txintr(sc);
2756
2757 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2758 log(LOG_ERR,
2759 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2760 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2761 sc->sc_txnext);
2762 ifp->if_oerrors++;
2763
2764 /* Reset the interface. */
2765 (void) wm_init(ifp);
2766 }
2767
2768 /* Try to get more packets going. */
2769 wm_start(ifp);
2770 }
2771
2772 static int
2773 wm_ifflags_cb(struct ethercom *ec)
2774 {
2775 struct ifnet *ifp = &ec->ec_if;
2776 struct wm_softc *sc = ifp->if_softc;
2777 int change = ifp->if_flags ^ sc->sc_if_flags;
2778
2779 if (change != 0)
2780 sc->sc_if_flags = ifp->if_flags;
2781
2782 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
2783 return ENETRESET;
2784
2785 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2786 wm_set_filter(sc);
2787
2788 wm_set_vlan(sc);
2789
2790 return 0;
2791 }
2792
2793 /*
2794 * wm_ioctl: [ifnet interface function]
2795 *
2796 * Handle control requests from the operator.
2797 */
2798 static int
2799 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2800 {
2801 struct wm_softc *sc = ifp->if_softc;
2802 struct ifreq *ifr = (struct ifreq *) data;
2803 struct ifaddr *ifa = (struct ifaddr *)data;
2804 struct sockaddr_dl *sdl;
2805 int s, error;
2806
2807 s = splnet();
2808
2809 switch (cmd) {
2810 case SIOCSIFMEDIA:
2811 case SIOCGIFMEDIA:
2812 /* Flow control requires full-duplex mode. */
2813 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2814 (ifr->ifr_media & IFM_FDX) == 0)
2815 ifr->ifr_media &= ~IFM_ETH_FMASK;
2816 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2817 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2818 /* We can do both TXPAUSE and RXPAUSE. */
2819 ifr->ifr_media |=
2820 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2821 }
2822 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2823 }
2824 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2825 break;
2826 case SIOCINITIFADDR:
2827 if (ifa->ifa_addr->sa_family == AF_LINK) {
2828 sdl = satosdl(ifp->if_dl->ifa_addr);
2829 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2830 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2831 /* unicast address is first multicast entry */
2832 wm_set_filter(sc);
2833 error = 0;
2834 break;
2835 }
2836 /*FALLTHROUGH*/
2837 default:
2838 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2839 break;
2840
2841 error = 0;
2842
2843 if (cmd == SIOCSIFCAP)
2844 error = (*ifp->if_init)(ifp);
2845 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2846 ;
2847 else if (ifp->if_flags & IFF_RUNNING) {
2848 /*
2849 * Multicast list has changed; set the hardware filter
2850 * accordingly.
2851 */
2852 wm_set_filter(sc);
2853 }
2854 break;
2855 }
2856
2857 /* Try to get more packets going. */
2858 wm_start(ifp);
2859
2860 splx(s);
2861 return error;
2862 }
2863
2864 /*
2865 * wm_intr:
2866 *
2867 * Interrupt service routine.
2868 */
2869 static int
2870 wm_intr(void *arg)
2871 {
2872 struct wm_softc *sc = arg;
2873 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2874 uint32_t icr;
2875 int handled = 0;
2876
2877 while (1 /* CONSTCOND */) {
2878 icr = CSR_READ(sc, WMREG_ICR);
2879 if ((icr & sc->sc_icr) == 0)
2880 break;
2881 rnd_add_uint32(&sc->rnd_source, icr);
2882
2883 handled = 1;
2884
2885 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2886 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2887 DPRINTF(WM_DEBUG_RX,
2888 ("%s: RX: got Rx intr 0x%08x\n",
2889 device_xname(sc->sc_dev),
2890 icr & (ICR_RXDMT0|ICR_RXT0)));
2891 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2892 }
2893 #endif
2894 wm_rxintr(sc);
2895
2896 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2897 if (icr & ICR_TXDW) {
2898 DPRINTF(WM_DEBUG_TX,
2899 ("%s: TX: got TXDW interrupt\n",
2900 device_xname(sc->sc_dev)));
2901 WM_EVCNT_INCR(&sc->sc_ev_txdw);
2902 }
2903 #endif
2904 wm_txintr(sc);
2905
2906 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2907 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2908 wm_linkintr(sc, icr);
2909 }
2910
2911 if (icr & ICR_RXO) {
2912 #if defined(WM_DEBUG)
2913 log(LOG_WARNING, "%s: Receive overrun\n",
2914 device_xname(sc->sc_dev));
2915 #endif /* defined(WM_DEBUG) */
2916 }
2917 }
2918
2919 if (handled) {
2920 /* Try to get more packets going. */
2921 wm_start(ifp);
2922 }
2923
2924 return handled;
2925 }
2926
2927 /*
2928 * wm_txintr:
2929 *
2930 * Helper; handle transmit interrupts.
2931 */
2932 static void
2933 wm_txintr(struct wm_softc *sc)
2934 {
2935 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2936 struct wm_txsoft *txs;
2937 uint8_t status;
2938 int i;
2939
2940 ifp->if_flags &= ~IFF_OACTIVE;
2941
2942 /*
2943 * Go through the Tx list and free mbufs for those
2944 * frames which have been transmitted.
2945 */
2946 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2947 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2948 txs = &sc->sc_txsoft[i];
2949
2950 DPRINTF(WM_DEBUG_TX,
2951 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
2952
2953 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2954 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2955
2956 status =
2957 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2958 if ((status & WTX_ST_DD) == 0) {
2959 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2960 BUS_DMASYNC_PREREAD);
2961 break;
2962 }
2963
2964 DPRINTF(WM_DEBUG_TX,
2965 ("%s: TX: job %d done: descs %d..%d\n",
2966 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
2967 txs->txs_lastdesc));
2968
2969 /*
2970 * XXX We should probably be using the statistics
2971 * XXX registers, but I don't know if they exist
2972 * XXX on chips before the i82544.
2973 */
2974
2975 #ifdef WM_EVENT_COUNTERS
2976 if (status & WTX_ST_TU)
2977 WM_EVCNT_INCR(&sc->sc_ev_tu);
2978 #endif /* WM_EVENT_COUNTERS */
2979
2980 if (status & (WTX_ST_EC|WTX_ST_LC)) {
2981 ifp->if_oerrors++;
2982 if (status & WTX_ST_LC)
2983 log(LOG_WARNING, "%s: late collision\n",
2984 device_xname(sc->sc_dev));
2985 else if (status & WTX_ST_EC) {
2986 ifp->if_collisions += 16;
2987 log(LOG_WARNING, "%s: excessive collisions\n",
2988 device_xname(sc->sc_dev));
2989 }
2990 } else
2991 ifp->if_opackets++;
2992
2993 sc->sc_txfree += txs->txs_ndesc;
2994 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2995 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2996 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2997 m_freem(txs->txs_mbuf);
2998 txs->txs_mbuf = NULL;
2999 }
3000
3001 /* Update the dirty transmit buffer pointer. */
3002 sc->sc_txsdirty = i;
3003 DPRINTF(WM_DEBUG_TX,
3004 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
3005
3006 /*
3007 * If there are no more pending transmissions, cancel the watchdog
3008 * timer.
3009 */
3010 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
3011 ifp->if_timer = 0;
3012 }
3013
3014 /*
3015 * wm_rxintr:
3016 *
3017 * Helper; handle receive interrupts.
3018 */
3019 static void
3020 wm_rxintr(struct wm_softc *sc)
3021 {
3022 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3023 struct wm_rxsoft *rxs;
3024 struct mbuf *m;
3025 int i, len;
3026 uint8_t status, errors;
3027 uint16_t vlantag;
3028
3029 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
3030 rxs = &sc->sc_rxsoft[i];
3031
3032 DPRINTF(WM_DEBUG_RX,
3033 ("%s: RX: checking descriptor %d\n",
3034 device_xname(sc->sc_dev), i));
3035
3036 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3037
3038 status = sc->sc_rxdescs[i].wrx_status;
3039 errors = sc->sc_rxdescs[i].wrx_errors;
3040 len = le16toh(sc->sc_rxdescs[i].wrx_len);
3041 vlantag = sc->sc_rxdescs[i].wrx_special;
3042
3043 if ((status & WRX_ST_DD) == 0) {
3044 /*
3045 * We have processed all of the receive descriptors.
3046 */
3047 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
3048 break;
3049 }
3050
3051 if (__predict_false(sc->sc_rxdiscard)) {
3052 DPRINTF(WM_DEBUG_RX,
3053 ("%s: RX: discarding contents of descriptor %d\n",
3054 device_xname(sc->sc_dev), i));
3055 WM_INIT_RXDESC(sc, i);
3056 if (status & WRX_ST_EOP) {
3057 /* Reset our state. */
3058 DPRINTF(WM_DEBUG_RX,
3059 ("%s: RX: resetting rxdiscard -> 0\n",
3060 device_xname(sc->sc_dev)));
3061 sc->sc_rxdiscard = 0;
3062 }
3063 continue;
3064 }
3065
3066 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3067 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3068
3069 m = rxs->rxs_mbuf;
3070
3071 /*
3072 * Add a new receive buffer to the ring, unless of
3073 * course the length is zero. Treat the latter as a
3074 * failed mapping.
3075 */
3076 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
3077 /*
3078 * Failed, throw away what we've done so
3079 * far, and discard the rest of the packet.
3080 */
3081 ifp->if_ierrors++;
3082 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3083 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3084 WM_INIT_RXDESC(sc, i);
3085 if ((status & WRX_ST_EOP) == 0)
3086 sc->sc_rxdiscard = 1;
3087 if (sc->sc_rxhead != NULL)
3088 m_freem(sc->sc_rxhead);
3089 WM_RXCHAIN_RESET(sc);
3090 DPRINTF(WM_DEBUG_RX,
3091 ("%s: RX: Rx buffer allocation failed, "
3092 "dropping packet%s\n", device_xname(sc->sc_dev),
3093 sc->sc_rxdiscard ? " (discard)" : ""));
3094 continue;
3095 }
3096
3097 m->m_len = len;
3098 sc->sc_rxlen += len;
3099 DPRINTF(WM_DEBUG_RX,
3100 ("%s: RX: buffer at %p len %d\n",
3101 device_xname(sc->sc_dev), m->m_data, len));
3102
3103 /*
3104 * If this is not the end of the packet, keep
3105 * looking.
3106 */
3107 if ((status & WRX_ST_EOP) == 0) {
3108 WM_RXCHAIN_LINK(sc, m);
3109 DPRINTF(WM_DEBUG_RX,
3110 ("%s: RX: not yet EOP, rxlen -> %d\n",
3111 device_xname(sc->sc_dev), sc->sc_rxlen));
3112 continue;
3113 }
3114
3115 /*
3116 * Okay, we have the entire packet now. The chip is
3117 * configured to include the FCS (not all chips can
3118 * be configured to strip it), so we need to trim it.
3119 * May need to adjust length of previous mbuf in the
3120 * chain if the current mbuf is too short.
3121 */
3122 if (m->m_len < ETHER_CRC_LEN) {
3123 sc->sc_rxtail->m_len -= (ETHER_CRC_LEN - m->m_len);
3124 m->m_len = 0;
3125 } else {
3126 m->m_len -= ETHER_CRC_LEN;
3127 }
3128 len = sc->sc_rxlen - ETHER_CRC_LEN;
3129
3130 WM_RXCHAIN_LINK(sc, m);
3131
3132 *sc->sc_rxtailp = NULL;
3133 m = sc->sc_rxhead;
3134
3135 WM_RXCHAIN_RESET(sc);
3136
3137 DPRINTF(WM_DEBUG_RX,
3138 ("%s: RX: have entire packet, len -> %d\n",
3139 device_xname(sc->sc_dev), len));
3140
3141 /*
3142 * If an error occurred, update stats and drop the packet.
3143 */
3144 if (errors &
3145 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3146 if (errors & WRX_ER_SE)
3147 log(LOG_WARNING, "%s: symbol error\n",
3148 device_xname(sc->sc_dev));
3149 else if (errors & WRX_ER_SEQ)
3150 log(LOG_WARNING, "%s: receive sequence error\n",
3151 device_xname(sc->sc_dev));
3152 else if (errors & WRX_ER_CE)
3153 log(LOG_WARNING, "%s: CRC error\n",
3154 device_xname(sc->sc_dev));
3155 m_freem(m);
3156 continue;
3157 }
3158
3159 /*
3160 * No errors. Receive the packet.
3161 */
3162 m->m_pkthdr.rcvif = ifp;
3163 m->m_pkthdr.len = len;
3164
3165 /*
3166 * If VLANs are enabled, VLAN packets have been unwrapped
3167 * for us. Associate the tag with the packet.
3168 */
3169 if ((status & WRX_ST_VP) != 0) {
3170 VLAN_INPUT_TAG(ifp, m,
3171 le16toh(vlantag),
3172 continue);
3173 }
3174
3175 /*
3176 * Set up checksum info for this packet.
3177 */
3178 if ((status & WRX_ST_IXSM) == 0) {
3179 if (status & WRX_ST_IPCS) {
3180 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3181 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3182 if (errors & WRX_ER_IPE)
3183 m->m_pkthdr.csum_flags |=
3184 M_CSUM_IPv4_BAD;
3185 }
3186 if (status & WRX_ST_TCPCS) {
3187 /*
3188 * Note: we don't know if this was TCP or UDP,
3189 * so we just set both bits, and expect the
3190 * upper layers to deal.
3191 */
3192 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3193 m->m_pkthdr.csum_flags |=
3194 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3195 M_CSUM_TCPv6 | M_CSUM_UDPv6;
3196 if (errors & WRX_ER_TCPE)
3197 m->m_pkthdr.csum_flags |=
3198 M_CSUM_TCP_UDP_BAD;
3199 }
3200 }
3201
3202 ifp->if_ipackets++;
3203
3204 /* Pass this up to any BPF listeners. */
3205 bpf_mtap(ifp, m);
3206
3207 /* Pass it on. */
3208 (*ifp->if_input)(ifp, m);
3209 }
3210
3211 /* Update the receive pointer. */
3212 sc->sc_rxptr = i;
3213
3214 DPRINTF(WM_DEBUG_RX,
3215 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3216 }
3217
3218 /*
3219 * wm_linkintr_gmii:
3220 *
3221 * Helper; handle link interrupts for GMII.
3222 */
3223 static void
3224 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
3225 {
3226
3227 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3228 __func__));
3229
3230 if (icr & ICR_LSC) {
3231 DPRINTF(WM_DEBUG_LINK,
3232 ("%s: LINK: LSC -> mii_tick\n",
3233 device_xname(sc->sc_dev)));
3234 mii_tick(&sc->sc_mii);
3235 if (sc->sc_type == WM_T_82543) {
3236 int miistatus, active;
3237
3238 /*
3239 * With 82543, we need to force speed and
3240 * duplex on the MAC equal to what the PHY
3241 * speed and duplex configuration is.
3242 */
3243 miistatus = sc->sc_mii.mii_media_status;
3244
3245 if (miistatus & IFM_ACTIVE) {
3246 active = sc->sc_mii.mii_media_active;
3247 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
3248 switch (IFM_SUBTYPE(active)) {
3249 case IFM_10_T:
3250 sc->sc_ctrl |= CTRL_SPEED_10;
3251 break;
3252 case IFM_100_TX:
3253 sc->sc_ctrl |= CTRL_SPEED_100;
3254 break;
3255 case IFM_1000_T:
3256 sc->sc_ctrl |= CTRL_SPEED_1000;
3257 break;
3258 default:
3259 /*
3260 * fiber?
3261 * Shoud not enter here.
3262 */
3263 printf("unknown media (%x)\n",
3264 active);
3265 break;
3266 }
3267 if (active & IFM_FDX)
3268 sc->sc_ctrl |= CTRL_FD;
3269 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3270 }
3271 } else if ((sc->sc_type == WM_T_ICH8)
3272 && (sc->sc_phytype == WMPHY_IGP_3)) {
3273 wm_kmrn_lock_loss_workaround_ich8lan(sc);
3274 } else if (sc->sc_type == WM_T_PCH) {
3275 wm_k1_gig_workaround_hv(sc,
3276 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
3277 }
3278
3279 if ((sc->sc_phytype == WMPHY_82578)
3280 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
3281 == IFM_1000_T)) {
3282
3283 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
3284 delay(200*1000); /* XXX too big */
3285
3286 /* Link stall fix for link up */
3287 wm_gmii_hv_writereg(sc->sc_dev, 1,
3288 HV_MUX_DATA_CTRL,
3289 HV_MUX_DATA_CTRL_GEN_TO_MAC
3290 | HV_MUX_DATA_CTRL_FORCE_SPEED);
3291 wm_gmii_hv_writereg(sc->sc_dev, 1,
3292 HV_MUX_DATA_CTRL,
3293 HV_MUX_DATA_CTRL_GEN_TO_MAC);
3294 }
3295 }
3296 } else if (icr & ICR_RXSEQ) {
3297 DPRINTF(WM_DEBUG_LINK,
3298 ("%s: LINK Receive sequence error\n",
3299 device_xname(sc->sc_dev)));
3300 }
3301 }
3302
3303 /*
3304 * wm_linkintr_tbi:
3305 *
3306 * Helper; handle link interrupts for TBI mode.
3307 */
3308 static void
3309 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3310 {
3311 uint32_t status;
3312
3313 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3314 __func__));
3315
3316 status = CSR_READ(sc, WMREG_STATUS);
3317 if (icr & ICR_LSC) {
3318 if (status & STATUS_LU) {
3319 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3320 device_xname(sc->sc_dev),
3321 (status & STATUS_FD) ? "FDX" : "HDX"));
3322 /*
3323 * NOTE: CTRL will update TFCE and RFCE automatically,
3324 * so we should update sc->sc_ctrl
3325 */
3326
3327 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3328 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3329 sc->sc_fcrtl &= ~FCRTL_XONE;
3330 if (status & STATUS_FD)
3331 sc->sc_tctl |=
3332 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3333 else
3334 sc->sc_tctl |=
3335 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3336 if (sc->sc_ctrl & CTRL_TFCE)
3337 sc->sc_fcrtl |= FCRTL_XONE;
3338 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3339 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3340 WMREG_OLD_FCRTL : WMREG_FCRTL,
3341 sc->sc_fcrtl);
3342 sc->sc_tbi_linkup = 1;
3343 } else {
3344 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3345 device_xname(sc->sc_dev)));
3346 sc->sc_tbi_linkup = 0;
3347 }
3348 wm_tbi_set_linkled(sc);
3349 } else if (icr & ICR_RXCFG) {
3350 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3351 device_xname(sc->sc_dev)));
3352 sc->sc_tbi_nrxcfg++;
3353 wm_check_for_link(sc);
3354 } else if (icr & ICR_RXSEQ) {
3355 DPRINTF(WM_DEBUG_LINK,
3356 ("%s: LINK: Receive sequence error\n",
3357 device_xname(sc->sc_dev)));
3358 }
3359 }
3360
3361 /*
3362 * wm_linkintr:
3363 *
3364 * Helper; handle link interrupts.
3365 */
3366 static void
3367 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3368 {
3369
3370 if (sc->sc_flags & WM_F_HAS_MII)
3371 wm_linkintr_gmii(sc, icr);
3372 else
3373 wm_linkintr_tbi(sc, icr);
3374 }
3375
3376 /*
3377 * wm_tick:
3378 *
3379 * One second timer, used to check link status, sweep up
3380 * completed transmit jobs, etc.
3381 */
3382 static void
3383 wm_tick(void *arg)
3384 {
3385 struct wm_softc *sc = arg;
3386 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3387 int s;
3388
3389 s = splnet();
3390
3391 if (sc->sc_type >= WM_T_82542_2_1) {
3392 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3393 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3394 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3395 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3396 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3397 }
3398
3399 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3400 ifp->if_ierrors += 0ULL + /* ensure quad_t */
3401 + CSR_READ(sc, WMREG_CRCERRS)
3402 + CSR_READ(sc, WMREG_ALGNERRC)
3403 + CSR_READ(sc, WMREG_SYMERRC)
3404 + CSR_READ(sc, WMREG_RXERRC)
3405 + CSR_READ(sc, WMREG_SEC)
3406 + CSR_READ(sc, WMREG_CEXTERR)
3407 + CSR_READ(sc, WMREG_RLEC);
3408 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
3409
3410 if (sc->sc_flags & WM_F_HAS_MII)
3411 mii_tick(&sc->sc_mii);
3412 else
3413 wm_tbi_check_link(sc);
3414
3415 splx(s);
3416
3417 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3418 }
3419
3420 /*
3421 * wm_reset:
3422 *
3423 * Reset the i82542 chip.
3424 */
3425 static void
3426 wm_reset(struct wm_softc *sc)
3427 {
3428 int phy_reset = 0;
3429 uint32_t reg, mask;
3430 int i;
3431
3432 /*
3433 * Allocate on-chip memory according to the MTU size.
3434 * The Packet Buffer Allocation register must be written
3435 * before the chip is reset.
3436 */
3437 switch (sc->sc_type) {
3438 case WM_T_82547:
3439 case WM_T_82547_2:
3440 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3441 PBA_22K : PBA_30K;
3442 sc->sc_txfifo_head = 0;
3443 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3444 sc->sc_txfifo_size =
3445 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3446 sc->sc_txfifo_stall = 0;
3447 break;
3448 case WM_T_82571:
3449 case WM_T_82572:
3450 case WM_T_82575: /* XXX need special handing for jumbo frames */
3451 case WM_T_80003:
3452 sc->sc_pba = PBA_32K;
3453 break;
3454 case WM_T_82580:
3455 case WM_T_82580ER:
3456 sc->sc_pba = PBA_35K;
3457 break;
3458 case WM_T_82576:
3459 sc->sc_pba = PBA_64K;
3460 break;
3461 case WM_T_82573:
3462 sc->sc_pba = PBA_12K;
3463 break;
3464 case WM_T_82574:
3465 case WM_T_82583:
3466 sc->sc_pba = PBA_20K;
3467 break;
3468 case WM_T_ICH8:
3469 sc->sc_pba = PBA_8K;
3470 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3471 break;
3472 case WM_T_ICH9:
3473 case WM_T_ICH10:
3474 sc->sc_pba = PBA_10K;
3475 break;
3476 case WM_T_PCH:
3477 case WM_T_PCH2:
3478 sc->sc_pba = PBA_26K;
3479 break;
3480 default:
3481 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3482 PBA_40K : PBA_48K;
3483 break;
3484 }
3485 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3486
3487 /* Prevent the PCI-E bus from sticking */
3488 if (sc->sc_flags & WM_F_PCIE) {
3489 int timeout = 800;
3490
3491 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3492 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3493
3494 while (timeout--) {
3495 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
3496 break;
3497 delay(100);
3498 }
3499 }
3500
3501 /* Set the completion timeout for interface */
3502 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
3503 wm_set_pcie_completion_timeout(sc);
3504
3505 /* Clear interrupt */
3506 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3507
3508 /* Stop the transmit and receive processes. */
3509 CSR_WRITE(sc, WMREG_RCTL, 0);
3510 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3511 sc->sc_rctl &= ~RCTL_EN;
3512
3513 /* XXX set_tbi_sbp_82543() */
3514
3515 delay(10*1000);
3516
3517 /* Must acquire the MDIO ownership before MAC reset */
3518 switch (sc->sc_type) {
3519 case WM_T_82573:
3520 case WM_T_82574:
3521 case WM_T_82583:
3522 i = 0;
3523 reg = CSR_READ(sc, WMREG_EXTCNFCTR)
3524 | EXTCNFCTR_MDIO_SW_OWNERSHIP;
3525 do {
3526 CSR_WRITE(sc, WMREG_EXTCNFCTR,
3527 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
3528 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
3529 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
3530 break;
3531 reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
3532 delay(2*1000);
3533 i++;
3534 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
3535 break;
3536 default:
3537 break;
3538 }
3539
3540 /*
3541 * 82541 Errata 29? & 82547 Errata 28?
3542 * See also the description about PHY_RST bit in CTRL register
3543 * in 8254x_GBe_SDM.pdf.
3544 */
3545 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3546 CSR_WRITE(sc, WMREG_CTRL,
3547 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3548 delay(5000);
3549 }
3550
3551 switch (sc->sc_type) {
3552 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3553 case WM_T_82541:
3554 case WM_T_82541_2:
3555 case WM_T_82547:
3556 case WM_T_82547_2:
3557 /*
3558 * On some chipsets, a reset through a memory-mapped write
3559 * cycle can cause the chip to reset before completing the
3560 * write cycle. This causes major headache that can be
3561 * avoided by issuing the reset via indirect register writes
3562 * through I/O space.
3563 *
3564 * So, if we successfully mapped the I/O BAR at attach time,
3565 * use that. Otherwise, try our luck with a memory-mapped
3566 * reset.
3567 */
3568 if (sc->sc_flags & WM_F_IOH_VALID)
3569 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3570 else
3571 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3572 break;
3573 case WM_T_82545_3:
3574 case WM_T_82546_3:
3575 /* Use the shadow control register on these chips. */
3576 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3577 break;
3578 case WM_T_80003:
3579 mask = swfwphysem[sc->sc_funcid];
3580 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3581 wm_get_swfw_semaphore(sc, mask);
3582 CSR_WRITE(sc, WMREG_CTRL, reg);
3583 wm_put_swfw_semaphore(sc, mask);
3584 break;
3585 case WM_T_ICH8:
3586 case WM_T_ICH9:
3587 case WM_T_ICH10:
3588 case WM_T_PCH:
3589 case WM_T_PCH2:
3590 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3591 if (wm_check_reset_block(sc) == 0) {
3592 /*
3593 * Gate automatic PHY configuration by hardware on
3594 * manaed 82579
3595 */
3596 if ((sc->sc_type == WM_T_PCH2)
3597 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3598 != 0))
3599 wm_gate_hw_phy_config_ich8lan(sc, 1);
3600
3601
3602 reg |= CTRL_PHY_RESET;
3603 phy_reset = 1;
3604 }
3605 wm_get_swfwhw_semaphore(sc);
3606 CSR_WRITE(sc, WMREG_CTRL, reg);
3607 delay(20*1000);
3608 wm_put_swfwhw_semaphore(sc);
3609 break;
3610 case WM_T_82542_2_0:
3611 case WM_T_82542_2_1:
3612 case WM_T_82543:
3613 case WM_T_82540:
3614 case WM_T_82545:
3615 case WM_T_82546:
3616 case WM_T_82571:
3617 case WM_T_82572:
3618 case WM_T_82573:
3619 case WM_T_82574:
3620 case WM_T_82575:
3621 case WM_T_82576:
3622 case WM_T_82580:
3623 case WM_T_82580ER:
3624 case WM_T_82583:
3625 default:
3626 /* Everything else can safely use the documented method. */
3627 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3628 break;
3629 }
3630
3631 if (phy_reset != 0)
3632 wm_get_cfg_done(sc);
3633
3634 /* reload EEPROM */
3635 switch (sc->sc_type) {
3636 case WM_T_82542_2_0:
3637 case WM_T_82542_2_1:
3638 case WM_T_82543:
3639 case WM_T_82544:
3640 delay(10);
3641 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3642 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3643 delay(2000);
3644 break;
3645 case WM_T_82540:
3646 case WM_T_82545:
3647 case WM_T_82545_3:
3648 case WM_T_82546:
3649 case WM_T_82546_3:
3650 delay(5*1000);
3651 /* XXX Disable HW ARPs on ASF enabled adapters */
3652 break;
3653 case WM_T_82541:
3654 case WM_T_82541_2:
3655 case WM_T_82547:
3656 case WM_T_82547_2:
3657 delay(20000);
3658 /* XXX Disable HW ARPs on ASF enabled adapters */
3659 break;
3660 case WM_T_82571:
3661 case WM_T_82572:
3662 case WM_T_82573:
3663 case WM_T_82574:
3664 case WM_T_82583:
3665 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3666 delay(10);
3667 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3668 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3669 }
3670 /* check EECD_EE_AUTORD */
3671 wm_get_auto_rd_done(sc);
3672 /*
3673 * Phy configuration from NVM just starts after EECD_AUTO_RD
3674 * is set.
3675 */
3676 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3677 || (sc->sc_type == WM_T_82583))
3678 delay(25*1000);
3679 break;
3680 case WM_T_82575:
3681 case WM_T_82576:
3682 case WM_T_82580:
3683 case WM_T_82580ER:
3684 case WM_T_80003:
3685 case WM_T_ICH8:
3686 case WM_T_ICH9:
3687 /* check EECD_EE_AUTORD */
3688 wm_get_auto_rd_done(sc);
3689 break;
3690 case WM_T_ICH10:
3691 case WM_T_PCH:
3692 case WM_T_PCH2:
3693 wm_lan_init_done(sc);
3694 break;
3695 default:
3696 panic("%s: unknown type\n", __func__);
3697 }
3698
3699 /* Check whether EEPROM is present or not */
3700 switch (sc->sc_type) {
3701 case WM_T_82575:
3702 case WM_T_82576:
3703 #if 0 /* XXX */
3704 case WM_T_82580:
3705 case WM_T_82580ER:
3706 #endif
3707 case WM_T_ICH8:
3708 case WM_T_ICH9:
3709 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3710 /* Not found */
3711 sc->sc_flags |= WM_F_EEPROM_INVALID;
3712 if ((sc->sc_type == WM_T_82575)
3713 || (sc->sc_type == WM_T_82576)
3714 || (sc->sc_type == WM_T_82580)
3715 || (sc->sc_type == WM_T_82580ER))
3716 wm_reset_init_script_82575(sc);
3717 }
3718 break;
3719 default:
3720 break;
3721 }
3722
3723 if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)) {
3724 /* clear global device reset status bit */
3725 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3726 }
3727
3728 /* Clear any pending interrupt events. */
3729 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3730 reg = CSR_READ(sc, WMREG_ICR);
3731
3732 /* reload sc_ctrl */
3733 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3734
3735 /* dummy read from WUC */
3736 if (sc->sc_type == WM_T_PCH)
3737 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3738 /*
3739 * For PCH, this write will make sure that any noise will be detected
3740 * as a CRC error and be dropped rather than show up as a bad packet
3741 * to the DMA engine
3742 */
3743 if (sc->sc_type == WM_T_PCH)
3744 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3745
3746 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3747 CSR_WRITE(sc, WMREG_WUC, 0);
3748
3749 /* XXX need special handling for 82580 */
3750 }
3751
3752 static void
3753 wm_set_vlan(struct wm_softc *sc)
3754 {
3755 /* Deal with VLAN enables. */
3756 if (VLAN_ATTACHED(&sc->sc_ethercom))
3757 sc->sc_ctrl |= CTRL_VME;
3758 else
3759 sc->sc_ctrl &= ~CTRL_VME;
3760
3761 /* Write the control registers. */
3762 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3763 }
3764
3765 /*
3766 * wm_init: [ifnet interface function]
3767 *
3768 * Initialize the interface. Must be called at splnet().
3769 */
3770 static int
3771 wm_init(struct ifnet *ifp)
3772 {
3773 struct wm_softc *sc = ifp->if_softc;
3774 struct wm_rxsoft *rxs;
3775 int i, error = 0;
3776 uint32_t reg;
3777
3778 /*
3779 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3780 * There is a small but measurable benefit to avoiding the adjusment
3781 * of the descriptor so that the headers are aligned, for normal mtu,
3782 * on such platforms. One possibility is that the DMA itself is
3783 * slightly more efficient if the front of the entire packet (instead
3784 * of the front of the headers) is aligned.
3785 *
3786 * Note we must always set align_tweak to 0 if we are using
3787 * jumbo frames.
3788 */
3789 #ifdef __NO_STRICT_ALIGNMENT
3790 sc->sc_align_tweak = 0;
3791 #else
3792 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3793 sc->sc_align_tweak = 0;
3794 else
3795 sc->sc_align_tweak = 2;
3796 #endif /* __NO_STRICT_ALIGNMENT */
3797
3798 /* Cancel any pending I/O. */
3799 wm_stop(ifp, 0);
3800
3801 /* update statistics before reset */
3802 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3803 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3804
3805 /* Reset the chip to a known state. */
3806 wm_reset(sc);
3807
3808 switch (sc->sc_type) {
3809 case WM_T_82571:
3810 case WM_T_82572:
3811 case WM_T_82573:
3812 case WM_T_82574:
3813 case WM_T_82583:
3814 case WM_T_80003:
3815 case WM_T_ICH8:
3816 case WM_T_ICH9:
3817 case WM_T_ICH10:
3818 case WM_T_PCH:
3819 case WM_T_PCH2:
3820 if (wm_check_mng_mode(sc) != 0)
3821 wm_get_hw_control(sc);
3822 break;
3823 default:
3824 break;
3825 }
3826
3827 /* Reset the PHY. */
3828 if (sc->sc_flags & WM_F_HAS_MII)
3829 wm_gmii_reset(sc);
3830
3831 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3832 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3833 if ((sc->sc_type == WM_T_PCH) && (sc->sc_type == WM_T_PCH2))
3834 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
3835
3836 /* Initialize the transmit descriptor ring. */
3837 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3838 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3839 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3840 sc->sc_txfree = WM_NTXDESC(sc);
3841 sc->sc_txnext = 0;
3842
3843 if (sc->sc_type < WM_T_82543) {
3844 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
3845 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
3846 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3847 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3848 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3849 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3850 } else {
3851 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
3852 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
3853 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3854 CSR_WRITE(sc, WMREG_TDH, 0);
3855 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3856 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3857
3858 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3859 /*
3860 * Don't write TDT before TCTL.EN is set.
3861 * See the document.
3862 */
3863 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
3864 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
3865 | TXDCTL_WTHRESH(0));
3866 else {
3867 CSR_WRITE(sc, WMREG_TDT, 0);
3868 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3869 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3870 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3871 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3872 }
3873 }
3874 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3875 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3876
3877 /* Initialize the transmit job descriptors. */
3878 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3879 sc->sc_txsoft[i].txs_mbuf = NULL;
3880 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3881 sc->sc_txsnext = 0;
3882 sc->sc_txsdirty = 0;
3883
3884 /*
3885 * Initialize the receive descriptor and receive job
3886 * descriptor rings.
3887 */
3888 if (sc->sc_type < WM_T_82543) {
3889 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3890 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3891 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3892 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3893 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3894 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3895
3896 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3897 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3898 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3899 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3900 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3901 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3902 } else {
3903 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3904 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3905 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3906 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3907 CSR_WRITE(sc, WMREG_EITR(0), 450);
3908 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
3909 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
3910 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
3911 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
3912 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
3913 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
3914 | RXDCTL_WTHRESH(1));
3915 } else {
3916 CSR_WRITE(sc, WMREG_RDH, 0);
3917 CSR_WRITE(sc, WMREG_RDT, 0);
3918 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3919 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
3920 }
3921 }
3922 for (i = 0; i < WM_NRXDESC; i++) {
3923 rxs = &sc->sc_rxsoft[i];
3924 if (rxs->rxs_mbuf == NULL) {
3925 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3926 log(LOG_ERR, "%s: unable to allocate or map rx "
3927 "buffer %d, error = %d\n",
3928 device_xname(sc->sc_dev), i, error);
3929 /*
3930 * XXX Should attempt to run with fewer receive
3931 * XXX buffers instead of just failing.
3932 */
3933 wm_rxdrain(sc);
3934 goto out;
3935 }
3936 } else {
3937 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3938 WM_INIT_RXDESC(sc, i);
3939 /*
3940 * For 82575 and newer device, the RX descriptors
3941 * must be initialized after the setting of RCTL.EN in
3942 * wm_set_filter()
3943 */
3944 }
3945 }
3946 sc->sc_rxptr = 0;
3947 sc->sc_rxdiscard = 0;
3948 WM_RXCHAIN_RESET(sc);
3949
3950 /*
3951 * Clear out the VLAN table -- we don't use it (yet).
3952 */
3953 CSR_WRITE(sc, WMREG_VET, 0);
3954 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3955 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3956
3957 /*
3958 * Set up flow-control parameters.
3959 *
3960 * XXX Values could probably stand some tuning.
3961 */
3962 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3963 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
3964 && (sc->sc_type != WM_T_PCH2)) {
3965 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3966 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3967 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3968 }
3969
3970 sc->sc_fcrtl = FCRTL_DFLT;
3971 if (sc->sc_type < WM_T_82543) {
3972 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3973 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3974 } else {
3975 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3976 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3977 }
3978
3979 if (sc->sc_type == WM_T_80003)
3980 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3981 else
3982 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3983
3984 /* Writes the control register. */
3985 wm_set_vlan(sc);
3986
3987 if (sc->sc_flags & WM_F_HAS_MII) {
3988 int val;
3989
3990 switch (sc->sc_type) {
3991 case WM_T_80003:
3992 case WM_T_ICH8:
3993 case WM_T_ICH9:
3994 case WM_T_ICH10:
3995 case WM_T_PCH:
3996 case WM_T_PCH2:
3997 /*
3998 * Set the mac to wait the maximum time between each
3999 * iteration and increase the max iterations when
4000 * polling the phy; this fixes erroneous timeouts at
4001 * 10Mbps.
4002 */
4003 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4004 0xFFFF);
4005 val = wm_kmrn_readreg(sc,
4006 KUMCTRLSTA_OFFSET_INB_PARAM);
4007 val |= 0x3F;
4008 wm_kmrn_writereg(sc,
4009 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4010 break;
4011 default:
4012 break;
4013 }
4014
4015 if (sc->sc_type == WM_T_80003) {
4016 val = CSR_READ(sc, WMREG_CTRL_EXT);
4017 val &= ~CTRL_EXT_LINK_MODE_MASK;
4018 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4019
4020 /* Bypass RX and TX FIFO's */
4021 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4022 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4023 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4024 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4025 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4026 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4027 }
4028 }
4029 #if 0
4030 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4031 #endif
4032
4033 /*
4034 * Set up checksum offload parameters.
4035 */
4036 reg = CSR_READ(sc, WMREG_RXCSUM);
4037 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4038 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4039 reg |= RXCSUM_IPOFL;
4040 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4041 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4042 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4043 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4044 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4045
4046 /* Reset TBI's RXCFG count */
4047 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
4048
4049 /*
4050 * Set up the interrupt registers.
4051 */
4052 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4053 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4054 ICR_RXO | ICR_RXT0;
4055 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
4056 sc->sc_icr |= ICR_RXCFG;
4057 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4058
4059 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4060 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4061 || (sc->sc_type == WM_T_PCH2)) {
4062 reg = CSR_READ(sc, WMREG_KABGTXD);
4063 reg |= KABGTXD_BGSQLBIAS;
4064 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4065 }
4066
4067 /* Set up the inter-packet gap. */
4068 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4069
4070 if (sc->sc_type >= WM_T_82543) {
4071 /*
4072 * Set up the interrupt throttling register (units of 256ns)
4073 * Note that a footnote in Intel's documentation says this
4074 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4075 * or 10Mbit mode. Empirically, it appears to be the case
4076 * that that is also true for the 1024ns units of the other
4077 * interrupt-related timer registers -- so, really, we ought
4078 * to divide this value by 4 when the link speed is low.
4079 *
4080 * XXX implement this division at link speed change!
4081 */
4082
4083 /*
4084 * For N interrupts/sec, set this value to:
4085 * 1000000000 / (N * 256). Note that we set the
4086 * absolute and packet timer values to this value
4087 * divided by 4 to get "simple timer" behavior.
4088 */
4089
4090 sc->sc_itr = 1500; /* 2604 ints/sec */
4091 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4092 }
4093
4094 /* Set the VLAN ethernetype. */
4095 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4096
4097 /*
4098 * Set up the transmit control register; we start out with
4099 * a collision distance suitable for FDX, but update it whe
4100 * we resolve the media type.
4101 */
4102 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4103 | TCTL_CT(TX_COLLISION_THRESHOLD)
4104 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4105 if (sc->sc_type >= WM_T_82571)
4106 sc->sc_tctl |= TCTL_MULR;
4107 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4108
4109 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4110 /*
4111 * Write TDT after TCTL.EN is set.
4112 * See the document.
4113 */
4114 CSR_WRITE(sc, WMREG_TDT, 0);
4115 }
4116
4117 if (sc->sc_type == WM_T_80003) {
4118 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4119 reg &= ~TCTL_EXT_GCEX_MASK;
4120 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4121 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4122 }
4123
4124 /* Set the media. */
4125 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4126 goto out;
4127
4128 /* Configure for OS presence */
4129 wm_init_manageability(sc);
4130
4131 /*
4132 * Set up the receive control register; we actually program
4133 * the register when we set the receive filter. Use multicast
4134 * address offset type 0.
4135 *
4136 * Only the i82544 has the ability to strip the incoming
4137 * CRC, so we don't enable that feature.
4138 */
4139 sc->sc_mchash_type = 0;
4140 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4141 | RCTL_MO(sc->sc_mchash_type);
4142
4143 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4144 && (ifp->if_mtu > ETHERMTU)) {
4145 sc->sc_rctl |= RCTL_LPE;
4146 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4147 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4148 }
4149
4150 if (MCLBYTES == 2048) {
4151 sc->sc_rctl |= RCTL_2k;
4152 } else {
4153 if (sc->sc_type >= WM_T_82543) {
4154 switch (MCLBYTES) {
4155 case 4096:
4156 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4157 break;
4158 case 8192:
4159 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4160 break;
4161 case 16384:
4162 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4163 break;
4164 default:
4165 panic("wm_init: MCLBYTES %d unsupported",
4166 MCLBYTES);
4167 break;
4168 }
4169 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4170 }
4171
4172 /* Set the receive filter. */
4173 wm_set_filter(sc);
4174
4175 /* On 575 and later set RDT only if RX enabled */
4176 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4177 for (i = 0; i < WM_NRXDESC; i++)
4178 WM_INIT_RXDESC(sc, i);
4179
4180 /* Start the one second link check clock. */
4181 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4182
4183 /* ...all done! */
4184 ifp->if_flags |= IFF_RUNNING;
4185 ifp->if_flags &= ~IFF_OACTIVE;
4186
4187 out:
4188 sc->sc_if_flags = ifp->if_flags;
4189 if (error)
4190 log(LOG_ERR, "%s: interface not running\n",
4191 device_xname(sc->sc_dev));
4192 return error;
4193 }
4194
4195 /*
4196 * wm_rxdrain:
4197 *
4198 * Drain the receive queue.
4199 */
4200 static void
4201 wm_rxdrain(struct wm_softc *sc)
4202 {
4203 struct wm_rxsoft *rxs;
4204 int i;
4205
4206 for (i = 0; i < WM_NRXDESC; i++) {
4207 rxs = &sc->sc_rxsoft[i];
4208 if (rxs->rxs_mbuf != NULL) {
4209 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4210 m_freem(rxs->rxs_mbuf);
4211 rxs->rxs_mbuf = NULL;
4212 }
4213 }
4214 }
4215
4216 /*
4217 * wm_stop: [ifnet interface function]
4218 *
4219 * Stop transmission on the interface.
4220 */
4221 static void
4222 wm_stop(struct ifnet *ifp, int disable)
4223 {
4224 struct wm_softc *sc = ifp->if_softc;
4225 struct wm_txsoft *txs;
4226 int i;
4227
4228 /* Stop the one second clock. */
4229 callout_stop(&sc->sc_tick_ch);
4230
4231 /* Stop the 82547 Tx FIFO stall check timer. */
4232 if (sc->sc_type == WM_T_82547)
4233 callout_stop(&sc->sc_txfifo_ch);
4234
4235 if (sc->sc_flags & WM_F_HAS_MII) {
4236 /* Down the MII. */
4237 mii_down(&sc->sc_mii);
4238 } else {
4239 #if 0
4240 /* Should we clear PHY's status properly? */
4241 wm_reset(sc);
4242 #endif
4243 }
4244
4245 /* Stop the transmit and receive processes. */
4246 CSR_WRITE(sc, WMREG_TCTL, 0);
4247 CSR_WRITE(sc, WMREG_RCTL, 0);
4248 sc->sc_rctl &= ~RCTL_EN;
4249
4250 /*
4251 * Clear the interrupt mask to ensure the device cannot assert its
4252 * interrupt line.
4253 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4254 * any currently pending or shared interrupt.
4255 */
4256 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4257 sc->sc_icr = 0;
4258
4259 /* Release any queued transmit buffers. */
4260 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4261 txs = &sc->sc_txsoft[i];
4262 if (txs->txs_mbuf != NULL) {
4263 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4264 m_freem(txs->txs_mbuf);
4265 txs->txs_mbuf = NULL;
4266 }
4267 }
4268
4269 /* Mark the interface as down and cancel the watchdog timer. */
4270 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4271 ifp->if_timer = 0;
4272
4273 if (disable)
4274 wm_rxdrain(sc);
4275
4276 #if 0 /* notyet */
4277 if (sc->sc_type >= WM_T_82544)
4278 CSR_WRITE(sc, WMREG_WUC, 0);
4279 #endif
4280 }
4281
4282 void
4283 wm_get_auto_rd_done(struct wm_softc *sc)
4284 {
4285 int i;
4286
4287 /* wait for eeprom to reload */
4288 switch (sc->sc_type) {
4289 case WM_T_82571:
4290 case WM_T_82572:
4291 case WM_T_82573:
4292 case WM_T_82574:
4293 case WM_T_82583:
4294 case WM_T_82575:
4295 case WM_T_82576:
4296 case WM_T_82580:
4297 case WM_T_82580ER:
4298 case WM_T_80003:
4299 case WM_T_ICH8:
4300 case WM_T_ICH9:
4301 for (i = 0; i < 10; i++) {
4302 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4303 break;
4304 delay(1000);
4305 }
4306 if (i == 10) {
4307 log(LOG_ERR, "%s: auto read from eeprom failed to "
4308 "complete\n", device_xname(sc->sc_dev));
4309 }
4310 break;
4311 default:
4312 break;
4313 }
4314 }
4315
4316 void
4317 wm_lan_init_done(struct wm_softc *sc)
4318 {
4319 uint32_t reg = 0;
4320 int i;
4321
4322 /* wait for eeprom to reload */
4323 switch (sc->sc_type) {
4324 case WM_T_ICH10:
4325 case WM_T_PCH:
4326 case WM_T_PCH2:
4327 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4328 reg = CSR_READ(sc, WMREG_STATUS);
4329 if ((reg & STATUS_LAN_INIT_DONE) != 0)
4330 break;
4331 delay(100);
4332 }
4333 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4334 log(LOG_ERR, "%s: %s: lan_init_done failed to "
4335 "complete\n", device_xname(sc->sc_dev), __func__);
4336 }
4337 break;
4338 default:
4339 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4340 __func__);
4341 break;
4342 }
4343
4344 reg &= ~STATUS_LAN_INIT_DONE;
4345 CSR_WRITE(sc, WMREG_STATUS, reg);
4346 }
4347
4348 void
4349 wm_get_cfg_done(struct wm_softc *sc)
4350 {
4351 int mask;
4352 uint32_t reg;
4353 int i;
4354
4355 /* wait for eeprom to reload */
4356 switch (sc->sc_type) {
4357 case WM_T_82542_2_0:
4358 case WM_T_82542_2_1:
4359 /* null */
4360 break;
4361 case WM_T_82543:
4362 case WM_T_82544:
4363 case WM_T_82540:
4364 case WM_T_82545:
4365 case WM_T_82545_3:
4366 case WM_T_82546:
4367 case WM_T_82546_3:
4368 case WM_T_82541:
4369 case WM_T_82541_2:
4370 case WM_T_82547:
4371 case WM_T_82547_2:
4372 case WM_T_82573:
4373 case WM_T_82574:
4374 case WM_T_82583:
4375 /* generic */
4376 delay(10*1000);
4377 break;
4378 case WM_T_80003:
4379 case WM_T_82571:
4380 case WM_T_82572:
4381 case WM_T_82575:
4382 case WM_T_82576:
4383 case WM_T_82580:
4384 case WM_T_82580ER:
4385 if (sc->sc_type == WM_T_82571) {
4386 /* Only 82571 shares port 0 */
4387 mask = EEMNGCTL_CFGDONE_0;
4388 } else
4389 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4390 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4391 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4392 break;
4393 delay(1000);
4394 }
4395 if (i >= WM_PHY_CFG_TIMEOUT) {
4396 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
4397 device_xname(sc->sc_dev), __func__));
4398 }
4399 break;
4400 case WM_T_ICH8:
4401 case WM_T_ICH9:
4402 case WM_T_ICH10:
4403 case WM_T_PCH:
4404 case WM_T_PCH2:
4405 if (sc->sc_type >= WM_T_PCH) {
4406 reg = CSR_READ(sc, WMREG_STATUS);
4407 if ((reg & STATUS_PHYRA) != 0)
4408 CSR_WRITE(sc, WMREG_STATUS,
4409 reg & ~STATUS_PHYRA);
4410 }
4411 delay(10*1000);
4412 break;
4413 default:
4414 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4415 __func__);
4416 break;
4417 }
4418 }
4419
4420 /*
4421 * wm_acquire_eeprom:
4422 *
4423 * Perform the EEPROM handshake required on some chips.
4424 */
4425 static int
4426 wm_acquire_eeprom(struct wm_softc *sc)
4427 {
4428 uint32_t reg;
4429 int x;
4430 int ret = 0;
4431
4432 /* always success */
4433 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4434 return 0;
4435
4436 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
4437 ret = wm_get_swfwhw_semaphore(sc);
4438 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
4439 /* this will also do wm_get_swsm_semaphore() if needed */
4440 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
4441 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4442 ret = wm_get_swsm_semaphore(sc);
4443 }
4444
4445 if (ret) {
4446 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4447 __func__);
4448 return 1;
4449 }
4450
4451 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4452 reg = CSR_READ(sc, WMREG_EECD);
4453
4454 /* Request EEPROM access. */
4455 reg |= EECD_EE_REQ;
4456 CSR_WRITE(sc, WMREG_EECD, reg);
4457
4458 /* ..and wait for it to be granted. */
4459 for (x = 0; x < 1000; x++) {
4460 reg = CSR_READ(sc, WMREG_EECD);
4461 if (reg & EECD_EE_GNT)
4462 break;
4463 delay(5);
4464 }
4465 if ((reg & EECD_EE_GNT) == 0) {
4466 aprint_error_dev(sc->sc_dev,
4467 "could not acquire EEPROM GNT\n");
4468 reg &= ~EECD_EE_REQ;
4469 CSR_WRITE(sc, WMREG_EECD, reg);
4470 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4471 wm_put_swfwhw_semaphore(sc);
4472 if (sc->sc_flags & WM_F_SWFW_SYNC)
4473 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4474 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4475 wm_put_swsm_semaphore(sc);
4476 return 1;
4477 }
4478 }
4479
4480 return 0;
4481 }
4482
4483 /*
4484 * wm_release_eeprom:
4485 *
4486 * Release the EEPROM mutex.
4487 */
4488 static void
4489 wm_release_eeprom(struct wm_softc *sc)
4490 {
4491 uint32_t reg;
4492
4493 /* always success */
4494 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4495 return;
4496
4497 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4498 reg = CSR_READ(sc, WMREG_EECD);
4499 reg &= ~EECD_EE_REQ;
4500 CSR_WRITE(sc, WMREG_EECD, reg);
4501 }
4502
4503 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4504 wm_put_swfwhw_semaphore(sc);
4505 if (sc->sc_flags & WM_F_SWFW_SYNC)
4506 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4507 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4508 wm_put_swsm_semaphore(sc);
4509 }
4510
4511 /*
4512 * wm_eeprom_sendbits:
4513 *
4514 * Send a series of bits to the EEPROM.
4515 */
4516 static void
4517 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
4518 {
4519 uint32_t reg;
4520 int x;
4521
4522 reg = CSR_READ(sc, WMREG_EECD);
4523
4524 for (x = nbits; x > 0; x--) {
4525 if (bits & (1U << (x - 1)))
4526 reg |= EECD_DI;
4527 else
4528 reg &= ~EECD_DI;
4529 CSR_WRITE(sc, WMREG_EECD, reg);
4530 delay(2);
4531 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4532 delay(2);
4533 CSR_WRITE(sc, WMREG_EECD, reg);
4534 delay(2);
4535 }
4536 }
4537
4538 /*
4539 * wm_eeprom_recvbits:
4540 *
4541 * Receive a series of bits from the EEPROM.
4542 */
4543 static void
4544 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
4545 {
4546 uint32_t reg, val;
4547 int x;
4548
4549 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
4550
4551 val = 0;
4552 for (x = nbits; x > 0; x--) {
4553 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4554 delay(2);
4555 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
4556 val |= (1U << (x - 1));
4557 CSR_WRITE(sc, WMREG_EECD, reg);
4558 delay(2);
4559 }
4560 *valp = val;
4561 }
4562
4563 /*
4564 * wm_read_eeprom_uwire:
4565 *
4566 * Read a word from the EEPROM using the MicroWire protocol.
4567 */
4568 static int
4569 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4570 {
4571 uint32_t reg, val;
4572 int i;
4573
4574 for (i = 0; i < wordcnt; i++) {
4575 /* Clear SK and DI. */
4576 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
4577 CSR_WRITE(sc, WMREG_EECD, reg);
4578
4579 /* Set CHIP SELECT. */
4580 reg |= EECD_CS;
4581 CSR_WRITE(sc, WMREG_EECD, reg);
4582 delay(2);
4583
4584 /* Shift in the READ command. */
4585 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
4586
4587 /* Shift in address. */
4588 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
4589
4590 /* Shift out the data. */
4591 wm_eeprom_recvbits(sc, &val, 16);
4592 data[i] = val & 0xffff;
4593
4594 /* Clear CHIP SELECT. */
4595 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
4596 CSR_WRITE(sc, WMREG_EECD, reg);
4597 delay(2);
4598 }
4599
4600 return 0;
4601 }
4602
4603 /*
4604 * wm_spi_eeprom_ready:
4605 *
4606 * Wait for a SPI EEPROM to be ready for commands.
4607 */
4608 static int
4609 wm_spi_eeprom_ready(struct wm_softc *sc)
4610 {
4611 uint32_t val;
4612 int usec;
4613
4614 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
4615 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
4616 wm_eeprom_recvbits(sc, &val, 8);
4617 if ((val & SPI_SR_RDY) == 0)
4618 break;
4619 }
4620 if (usec >= SPI_MAX_RETRIES) {
4621 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
4622 return 1;
4623 }
4624 return 0;
4625 }
4626
4627 /*
4628 * wm_read_eeprom_spi:
4629 *
4630 * Read a work from the EEPROM using the SPI protocol.
4631 */
4632 static int
4633 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4634 {
4635 uint32_t reg, val;
4636 int i;
4637 uint8_t opc;
4638
4639 /* Clear SK and CS. */
4640 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
4641 CSR_WRITE(sc, WMREG_EECD, reg);
4642 delay(2);
4643
4644 if (wm_spi_eeprom_ready(sc))
4645 return 1;
4646
4647 /* Toggle CS to flush commands. */
4648 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
4649 delay(2);
4650 CSR_WRITE(sc, WMREG_EECD, reg);
4651 delay(2);
4652
4653 opc = SPI_OPC_READ;
4654 if (sc->sc_ee_addrbits == 8 && word >= 128)
4655 opc |= SPI_OPC_A8;
4656
4657 wm_eeprom_sendbits(sc, opc, 8);
4658 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
4659
4660 for (i = 0; i < wordcnt; i++) {
4661 wm_eeprom_recvbits(sc, &val, 16);
4662 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
4663 }
4664
4665 /* Raise CS and clear SK. */
4666 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
4667 CSR_WRITE(sc, WMREG_EECD, reg);
4668 delay(2);
4669
4670 return 0;
4671 }
4672
4673 #define EEPROM_CHECKSUM 0xBABA
4674 #define EEPROM_SIZE 0x0040
4675
4676 /*
4677 * wm_validate_eeprom_checksum
4678 *
4679 * The checksum is defined as the sum of the first 64 (16 bit) words.
4680 */
4681 static int
4682 wm_validate_eeprom_checksum(struct wm_softc *sc)
4683 {
4684 uint16_t checksum;
4685 uint16_t eeprom_data;
4686 int i;
4687
4688 checksum = 0;
4689
4690 for (i = 0; i < EEPROM_SIZE; i++) {
4691 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
4692 return 1;
4693 checksum += eeprom_data;
4694 }
4695
4696 if (checksum != (uint16_t) EEPROM_CHECKSUM)
4697 return 1;
4698
4699 return 0;
4700 }
4701
4702 /*
4703 * wm_read_eeprom:
4704 *
4705 * Read data from the serial EEPROM.
4706 */
4707 static int
4708 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4709 {
4710 int rv;
4711
4712 if (sc->sc_flags & WM_F_EEPROM_INVALID)
4713 return 1;
4714
4715 if (wm_acquire_eeprom(sc))
4716 return 1;
4717
4718 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4719 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4720 || (sc->sc_type == WM_T_PCH2))
4721 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
4722 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
4723 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
4724 else if (sc->sc_flags & WM_F_EEPROM_SPI)
4725 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
4726 else
4727 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
4728
4729 wm_release_eeprom(sc);
4730 return rv;
4731 }
4732
4733 static int
4734 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
4735 uint16_t *data)
4736 {
4737 int i, eerd = 0;
4738 int error = 0;
4739
4740 for (i = 0; i < wordcnt; i++) {
4741 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
4742
4743 CSR_WRITE(sc, WMREG_EERD, eerd);
4744 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
4745 if (error != 0)
4746 break;
4747
4748 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
4749 }
4750
4751 return error;
4752 }
4753
4754 static int
4755 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
4756 {
4757 uint32_t attempts = 100000;
4758 uint32_t i, reg = 0;
4759 int32_t done = -1;
4760
4761 for (i = 0; i < attempts; i++) {
4762 reg = CSR_READ(sc, rw);
4763
4764 if (reg & EERD_DONE) {
4765 done = 0;
4766 break;
4767 }
4768 delay(5);
4769 }
4770
4771 return done;
4772 }
4773
4774 static int
4775 wm_check_alt_mac_addr(struct wm_softc *sc)
4776 {
4777 uint16_t myea[ETHER_ADDR_LEN / 2];
4778 uint16_t offset = EEPROM_OFF_MACADDR;
4779
4780 /* Try to read alternative MAC address pointer */
4781 if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
4782 return -1;
4783
4784 /* Check pointer */
4785 if (offset == 0xffff)
4786 return -1;
4787
4788 /*
4789 * Check whether alternative MAC address is valid or not.
4790 * Some cards have non 0xffff pointer but those don't use
4791 * alternative MAC address in reality.
4792 *
4793 * Check whether the broadcast bit is set or not.
4794 */
4795 if (wm_read_eeprom(sc, offset, 1, myea) == 0)
4796 if (((myea[0] & 0xff) & 0x01) == 0)
4797 return 0; /* found! */
4798
4799 /* not found */
4800 return -1;
4801 }
4802
4803 static int
4804 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
4805 {
4806 uint16_t myea[ETHER_ADDR_LEN / 2];
4807 uint16_t offset = EEPROM_OFF_MACADDR;
4808 int do_invert = 0;
4809
4810 switch (sc->sc_type) {
4811 case WM_T_82580:
4812 case WM_T_82580ER:
4813 switch (sc->sc_funcid) {
4814 case 0:
4815 /* default value (== EEPROM_OFF_MACADDR) */
4816 break;
4817 case 1:
4818 offset = EEPROM_OFF_LAN1;
4819 break;
4820 case 2:
4821 offset = EEPROM_OFF_LAN2;
4822 break;
4823 case 3:
4824 offset = EEPROM_OFF_LAN3;
4825 break;
4826 default:
4827 goto bad;
4828 /* NOTREACHED */
4829 break;
4830 }
4831 break;
4832 case WM_T_82571:
4833 case WM_T_82575:
4834 case WM_T_82576:
4835 case WM_T_80003:
4836 if (wm_check_alt_mac_addr(sc) != 0) {
4837 /* reset the offset to LAN0 */
4838 offset = EEPROM_OFF_MACADDR;
4839 if ((sc->sc_funcid & 0x01) == 1)
4840 do_invert = 1;
4841 goto do_read;
4842 }
4843 switch (sc->sc_funcid) {
4844 case 0:
4845 /*
4846 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
4847 * itself.
4848 */
4849 break;
4850 case 1:
4851 offset += EEPROM_OFF_MACADDR_LAN1;
4852 break;
4853 case 2:
4854 offset += EEPROM_OFF_MACADDR_LAN2;
4855 break;
4856 case 3:
4857 offset += EEPROM_OFF_MACADDR_LAN3;
4858 break;
4859 default:
4860 goto bad;
4861 /* NOTREACHED */
4862 break;
4863 }
4864 break;
4865 default:
4866 if ((sc->sc_funcid & 0x01) == 1)
4867 do_invert = 1;
4868 break;
4869 }
4870
4871 do_read:
4872 if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]),
4873 myea) != 0) {
4874 goto bad;
4875 }
4876
4877 enaddr[0] = myea[0] & 0xff;
4878 enaddr[1] = myea[0] >> 8;
4879 enaddr[2] = myea[1] & 0xff;
4880 enaddr[3] = myea[1] >> 8;
4881 enaddr[4] = myea[2] & 0xff;
4882 enaddr[5] = myea[2] >> 8;
4883
4884 /*
4885 * Toggle the LSB of the MAC address on the second port
4886 * of some dual port cards.
4887 */
4888 if (do_invert != 0)
4889 enaddr[5] ^= 1;
4890
4891 return 0;
4892
4893 bad:
4894 aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n");
4895
4896 return -1;
4897 }
4898
4899 /*
4900 * wm_add_rxbuf:
4901 *
4902 * Add a receive buffer to the indiciated descriptor.
4903 */
4904 static int
4905 wm_add_rxbuf(struct wm_softc *sc, int idx)
4906 {
4907 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
4908 struct mbuf *m;
4909 int error;
4910
4911 MGETHDR(m, M_DONTWAIT, MT_DATA);
4912 if (m == NULL)
4913 return ENOBUFS;
4914
4915 MCLGET(m, M_DONTWAIT);
4916 if ((m->m_flags & M_EXT) == 0) {
4917 m_freem(m);
4918 return ENOBUFS;
4919 }
4920
4921 if (rxs->rxs_mbuf != NULL)
4922 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4923
4924 rxs->rxs_mbuf = m;
4925
4926 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4927 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4928 BUS_DMA_READ|BUS_DMA_NOWAIT);
4929 if (error) {
4930 /* XXX XXX XXX */
4931 aprint_error_dev(sc->sc_dev,
4932 "unable to load rx DMA map %d, error = %d\n",
4933 idx, error);
4934 panic("wm_add_rxbuf");
4935 }
4936
4937 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4938 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4939
4940 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4941 if ((sc->sc_rctl & RCTL_EN) != 0)
4942 WM_INIT_RXDESC(sc, idx);
4943 } else
4944 WM_INIT_RXDESC(sc, idx);
4945
4946 return 0;
4947 }
4948
4949 /*
4950 * wm_set_ral:
4951 *
4952 * Set an entery in the receive address list.
4953 */
4954 static void
4955 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4956 {
4957 uint32_t ral_lo, ral_hi;
4958
4959 if (enaddr != NULL) {
4960 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
4961 (enaddr[3] << 24);
4962 ral_hi = enaddr[4] | (enaddr[5] << 8);
4963 ral_hi |= RAL_AV;
4964 } else {
4965 ral_lo = 0;
4966 ral_hi = 0;
4967 }
4968
4969 if (sc->sc_type >= WM_T_82544) {
4970 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
4971 ral_lo);
4972 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
4973 ral_hi);
4974 } else {
4975 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
4976 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
4977 }
4978 }
4979
4980 /*
4981 * wm_mchash:
4982 *
4983 * Compute the hash of the multicast address for the 4096-bit
4984 * multicast filter.
4985 */
4986 static uint32_t
4987 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4988 {
4989 static const int lo_shift[4] = { 4, 3, 2, 0 };
4990 static const int hi_shift[4] = { 4, 5, 6, 8 };
4991 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4992 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4993 uint32_t hash;
4994
4995 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4996 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4997 || (sc->sc_type == WM_T_PCH2)) {
4998 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4999 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
5000 return (hash & 0x3ff);
5001 }
5002 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
5003 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
5004
5005 return (hash & 0xfff);
5006 }
5007
5008 /*
5009 * wm_set_filter:
5010 *
5011 * Set up the receive filter.
5012 */
5013 static void
5014 wm_set_filter(struct wm_softc *sc)
5015 {
5016 struct ethercom *ec = &sc->sc_ethercom;
5017 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5018 struct ether_multi *enm;
5019 struct ether_multistep step;
5020 bus_addr_t mta_reg;
5021 uint32_t hash, reg, bit;
5022 int i, size;
5023
5024 if (sc->sc_type >= WM_T_82544)
5025 mta_reg = WMREG_CORDOVA_MTA;
5026 else
5027 mta_reg = WMREG_MTA;
5028
5029 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
5030
5031 if (ifp->if_flags & IFF_BROADCAST)
5032 sc->sc_rctl |= RCTL_BAM;
5033 if (ifp->if_flags & IFF_PROMISC) {
5034 sc->sc_rctl |= RCTL_UPE;
5035 goto allmulti;
5036 }
5037
5038 /*
5039 * Set the station address in the first RAL slot, and
5040 * clear the remaining slots.
5041 */
5042 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5043 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5044 || (sc->sc_type == WM_T_PCH2))
5045 size = WM_ICH8_RAL_TABSIZE;
5046 else
5047 size = WM_RAL_TABSIZE;
5048 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
5049 for (i = 1; i < size; i++)
5050 wm_set_ral(sc, NULL, i);
5051
5052 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5053 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5054 || (sc->sc_type == WM_T_PCH2))
5055 size = WM_ICH8_MC_TABSIZE;
5056 else
5057 size = WM_MC_TABSIZE;
5058 /* Clear out the multicast table. */
5059 for (i = 0; i < size; i++)
5060 CSR_WRITE(sc, mta_reg + (i << 2), 0);
5061
5062 ETHER_FIRST_MULTI(step, ec, enm);
5063 while (enm != NULL) {
5064 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
5065 /*
5066 * We must listen to a range of multicast addresses.
5067 * For now, just accept all multicasts, rather than
5068 * trying to set only those filter bits needed to match
5069 * the range. (At this time, the only use of address
5070 * ranges is for IP multicast routing, for which the
5071 * range is big enough to require all bits set.)
5072 */
5073 goto allmulti;
5074 }
5075
5076 hash = wm_mchash(sc, enm->enm_addrlo);
5077
5078 reg = (hash >> 5);
5079 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5080 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5081 || (sc->sc_type == WM_T_PCH2))
5082 reg &= 0x1f;
5083 else
5084 reg &= 0x7f;
5085 bit = hash & 0x1f;
5086
5087 hash = CSR_READ(sc, mta_reg + (reg << 2));
5088 hash |= 1U << bit;
5089
5090 /* XXX Hardware bug?? */
5091 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
5092 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
5093 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5094 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
5095 } else
5096 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5097
5098 ETHER_NEXT_MULTI(step, enm);
5099 }
5100
5101 ifp->if_flags &= ~IFF_ALLMULTI;
5102 goto setit;
5103
5104 allmulti:
5105 ifp->if_flags |= IFF_ALLMULTI;
5106 sc->sc_rctl |= RCTL_MPE;
5107
5108 setit:
5109 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
5110 }
5111
5112 /*
5113 * wm_tbi_mediainit:
5114 *
5115 * Initialize media for use on 1000BASE-X devices.
5116 */
5117 static void
5118 wm_tbi_mediainit(struct wm_softc *sc)
5119 {
5120 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5121 const char *sep = "";
5122
5123 if (sc->sc_type < WM_T_82543)
5124 sc->sc_tipg = TIPG_WM_DFLT;
5125 else
5126 sc->sc_tipg = TIPG_LG_DFLT;
5127
5128 sc->sc_tbi_anegticks = 5;
5129
5130 /* Initialize our media structures */
5131 sc->sc_mii.mii_ifp = ifp;
5132
5133 sc->sc_ethercom.ec_mii = &sc->sc_mii;
5134 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
5135 wm_tbi_mediastatus);
5136
5137 /*
5138 * SWD Pins:
5139 *
5140 * 0 = Link LED (output)
5141 * 1 = Loss Of Signal (input)
5142 */
5143 sc->sc_ctrl |= CTRL_SWDPIO(0);
5144 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
5145
5146 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5147
5148 #define ADD(ss, mm, dd) \
5149 do { \
5150 aprint_normal("%s%s", sep, ss); \
5151 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
5152 sep = ", "; \
5153 } while (/*CONSTCOND*/0)
5154
5155 aprint_normal_dev(sc->sc_dev, "");
5156 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
5157 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
5158 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
5159 aprint_normal("\n");
5160
5161 #undef ADD
5162
5163 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5164 }
5165
5166 /*
5167 * wm_tbi_mediastatus: [ifmedia interface function]
5168 *
5169 * Get the current interface media status on a 1000BASE-X device.
5170 */
5171 static void
5172 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5173 {
5174 struct wm_softc *sc = ifp->if_softc;
5175 uint32_t ctrl, status;
5176
5177 ifmr->ifm_status = IFM_AVALID;
5178 ifmr->ifm_active = IFM_ETHER;
5179
5180 status = CSR_READ(sc, WMREG_STATUS);
5181 if ((status & STATUS_LU) == 0) {
5182 ifmr->ifm_active |= IFM_NONE;
5183 return;
5184 }
5185
5186 ifmr->ifm_status |= IFM_ACTIVE;
5187 ifmr->ifm_active |= IFM_1000_SX;
5188 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
5189 ifmr->ifm_active |= IFM_FDX;
5190 ctrl = CSR_READ(sc, WMREG_CTRL);
5191 if (ctrl & CTRL_RFCE)
5192 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
5193 if (ctrl & CTRL_TFCE)
5194 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
5195 }
5196
5197 /*
5198 * wm_tbi_mediachange: [ifmedia interface function]
5199 *
5200 * Set hardware to newly-selected media on a 1000BASE-X device.
5201 */
5202 static int
5203 wm_tbi_mediachange(struct ifnet *ifp)
5204 {
5205 struct wm_softc *sc = ifp->if_softc;
5206 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5207 uint32_t status;
5208 int i;
5209
5210 sc->sc_txcw = 0;
5211 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
5212 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
5213 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
5214 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5215 sc->sc_txcw |= TXCW_ANE;
5216 } else {
5217 /*
5218 * If autonegotiation is turned off, force link up and turn on
5219 * full duplex
5220 */
5221 sc->sc_txcw &= ~TXCW_ANE;
5222 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
5223 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5224 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5225 delay(1000);
5226 }
5227
5228 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
5229 device_xname(sc->sc_dev),sc->sc_txcw));
5230 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5231 delay(10000);
5232
5233 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
5234 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
5235
5236 /*
5237 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
5238 * optics detect a signal, 0 if they don't.
5239 */
5240 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
5241 /* Have signal; wait for the link to come up. */
5242
5243 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5244 /*
5245 * Reset the link, and let autonegotiation do its thing
5246 */
5247 sc->sc_ctrl |= CTRL_LRST;
5248 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5249 delay(1000);
5250 sc->sc_ctrl &= ~CTRL_LRST;
5251 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5252 delay(1000);
5253 }
5254
5255 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
5256 delay(10000);
5257 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
5258 break;
5259 }
5260
5261 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
5262 device_xname(sc->sc_dev),i));
5263
5264 status = CSR_READ(sc, WMREG_STATUS);
5265 DPRINTF(WM_DEBUG_LINK,
5266 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
5267 device_xname(sc->sc_dev),status, STATUS_LU));
5268 if (status & STATUS_LU) {
5269 /* Link is up. */
5270 DPRINTF(WM_DEBUG_LINK,
5271 ("%s: LINK: set media -> link up %s\n",
5272 device_xname(sc->sc_dev),
5273 (status & STATUS_FD) ? "FDX" : "HDX"));
5274
5275 /*
5276 * NOTE: CTRL will update TFCE and RFCE automatically,
5277 * so we should update sc->sc_ctrl
5278 */
5279 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5280 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5281 sc->sc_fcrtl &= ~FCRTL_XONE;
5282 if (status & STATUS_FD)
5283 sc->sc_tctl |=
5284 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5285 else
5286 sc->sc_tctl |=
5287 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5288 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
5289 sc->sc_fcrtl |= FCRTL_XONE;
5290 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5291 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5292 WMREG_OLD_FCRTL : WMREG_FCRTL,
5293 sc->sc_fcrtl);
5294 sc->sc_tbi_linkup = 1;
5295 } else {
5296 if (i == WM_LINKUP_TIMEOUT)
5297 wm_check_for_link(sc);
5298 /* Link is down. */
5299 DPRINTF(WM_DEBUG_LINK,
5300 ("%s: LINK: set media -> link down\n",
5301 device_xname(sc->sc_dev)));
5302 sc->sc_tbi_linkup = 0;
5303 }
5304 } else {
5305 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
5306 device_xname(sc->sc_dev)));
5307 sc->sc_tbi_linkup = 0;
5308 }
5309
5310 wm_tbi_set_linkled(sc);
5311
5312 return 0;
5313 }
5314
5315 /*
5316 * wm_tbi_set_linkled:
5317 *
5318 * Update the link LED on 1000BASE-X devices.
5319 */
5320 static void
5321 wm_tbi_set_linkled(struct wm_softc *sc)
5322 {
5323
5324 if (sc->sc_tbi_linkup)
5325 sc->sc_ctrl |= CTRL_SWDPIN(0);
5326 else
5327 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
5328
5329 /* 82540 or newer devices are active low */
5330 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
5331
5332 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5333 }
5334
5335 /*
5336 * wm_tbi_check_link:
5337 *
5338 * Check the link on 1000BASE-X devices.
5339 */
5340 static void
5341 wm_tbi_check_link(struct wm_softc *sc)
5342 {
5343 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5344 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5345 uint32_t rxcw, ctrl, status;
5346
5347 status = CSR_READ(sc, WMREG_STATUS);
5348
5349 rxcw = CSR_READ(sc, WMREG_RXCW);
5350 ctrl = CSR_READ(sc, WMREG_CTRL);
5351
5352 /* set link status */
5353 if ((status & STATUS_LU) == 0) {
5354 DPRINTF(WM_DEBUG_LINK,
5355 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
5356 sc->sc_tbi_linkup = 0;
5357 } else if (sc->sc_tbi_linkup == 0) {
5358 DPRINTF(WM_DEBUG_LINK,
5359 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
5360 (status & STATUS_FD) ? "FDX" : "HDX"));
5361 sc->sc_tbi_linkup = 1;
5362 }
5363
5364 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
5365 && ((status & STATUS_LU) == 0)) {
5366 sc->sc_tbi_linkup = 0;
5367 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
5368 /* RXCFG storm! */
5369 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
5370 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
5371 wm_init(ifp);
5372 wm_start(ifp);
5373 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5374 /* If the timer expired, retry autonegotiation */
5375 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
5376 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
5377 sc->sc_tbi_ticks = 0;
5378 /*
5379 * Reset the link, and let autonegotiation do
5380 * its thing
5381 */
5382 sc->sc_ctrl |= CTRL_LRST;
5383 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5384 delay(1000);
5385 sc->sc_ctrl &= ~CTRL_LRST;
5386 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5387 delay(1000);
5388 CSR_WRITE(sc, WMREG_TXCW,
5389 sc->sc_txcw & ~TXCW_ANE);
5390 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5391 }
5392 }
5393 }
5394
5395 wm_tbi_set_linkled(sc);
5396 }
5397
5398 /*
5399 * wm_gmii_reset:
5400 *
5401 * Reset the PHY.
5402 */
5403 static void
5404 wm_gmii_reset(struct wm_softc *sc)
5405 {
5406 uint32_t reg;
5407 int rv;
5408
5409 /* get phy semaphore */
5410 switch (sc->sc_type) {
5411 case WM_T_82571:
5412 case WM_T_82572:
5413 case WM_T_82573:
5414 case WM_T_82574:
5415 case WM_T_82583:
5416 /* XXX should get sw semaphore, too */
5417 rv = wm_get_swsm_semaphore(sc);
5418 break;
5419 case WM_T_82575:
5420 case WM_T_82576:
5421 case WM_T_82580:
5422 case WM_T_82580ER:
5423 case WM_T_80003:
5424 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5425 break;
5426 case WM_T_ICH8:
5427 case WM_T_ICH9:
5428 case WM_T_ICH10:
5429 case WM_T_PCH:
5430 case WM_T_PCH2:
5431 rv = wm_get_swfwhw_semaphore(sc);
5432 break;
5433 default:
5434 /* nothing to do*/
5435 rv = 0;
5436 break;
5437 }
5438 if (rv != 0) {
5439 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5440 __func__);
5441 return;
5442 }
5443
5444 switch (sc->sc_type) {
5445 case WM_T_82542_2_0:
5446 case WM_T_82542_2_1:
5447 /* null */
5448 break;
5449 case WM_T_82543:
5450 /*
5451 * With 82543, we need to force speed and duplex on the MAC
5452 * equal to what the PHY speed and duplex configuration is.
5453 * In addition, we need to perform a hardware reset on the PHY
5454 * to take it out of reset.
5455 */
5456 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5457 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5458
5459 /* The PHY reset pin is active-low. */
5460 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5461 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
5462 CTRL_EXT_SWDPIN(4));
5463 reg |= CTRL_EXT_SWDPIO(4);
5464
5465 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5466 delay(10*1000);
5467
5468 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
5469 delay(150);
5470 #if 0
5471 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
5472 #endif
5473 delay(20*1000); /* XXX extra delay to get PHY ID? */
5474 break;
5475 case WM_T_82544: /* reset 10000us */
5476 case WM_T_82540:
5477 case WM_T_82545:
5478 case WM_T_82545_3:
5479 case WM_T_82546:
5480 case WM_T_82546_3:
5481 case WM_T_82541:
5482 case WM_T_82541_2:
5483 case WM_T_82547:
5484 case WM_T_82547_2:
5485 case WM_T_82571: /* reset 100us */
5486 case WM_T_82572:
5487 case WM_T_82573:
5488 case WM_T_82574:
5489 case WM_T_82575:
5490 case WM_T_82576:
5491 case WM_T_82580:
5492 case WM_T_82580ER:
5493 case WM_T_82583:
5494 case WM_T_80003:
5495 /* generic reset */
5496 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5497 delay(20000);
5498 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5499 delay(20000);
5500
5501 if ((sc->sc_type == WM_T_82541)
5502 || (sc->sc_type == WM_T_82541_2)
5503 || (sc->sc_type == WM_T_82547)
5504 || (sc->sc_type == WM_T_82547_2)) {
5505 /* workaround for igp are done in igp_reset() */
5506 /* XXX add code to set LED after phy reset */
5507 }
5508 break;
5509 case WM_T_ICH8:
5510 case WM_T_ICH9:
5511 case WM_T_ICH10:
5512 case WM_T_PCH:
5513 case WM_T_PCH2:
5514 /* generic reset */
5515 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5516 delay(100);
5517 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5518 delay(150);
5519 break;
5520 default:
5521 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5522 __func__);
5523 break;
5524 }
5525
5526 /* release PHY semaphore */
5527 switch (sc->sc_type) {
5528 case WM_T_82571:
5529 case WM_T_82572:
5530 case WM_T_82573:
5531 case WM_T_82574:
5532 case WM_T_82583:
5533 /* XXX should put sw semaphore, too */
5534 wm_put_swsm_semaphore(sc);
5535 break;
5536 case WM_T_82575:
5537 case WM_T_82576:
5538 case WM_T_82580:
5539 case WM_T_82580ER:
5540 case WM_T_80003:
5541 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5542 break;
5543 case WM_T_ICH8:
5544 case WM_T_ICH9:
5545 case WM_T_ICH10:
5546 case WM_T_PCH:
5547 case WM_T_PCH2:
5548 wm_put_swfwhw_semaphore(sc);
5549 break;
5550 default:
5551 /* nothing to do*/
5552 rv = 0;
5553 break;
5554 }
5555
5556 /* get_cfg_done */
5557 wm_get_cfg_done(sc);
5558
5559 /* extra setup */
5560 switch (sc->sc_type) {
5561 case WM_T_82542_2_0:
5562 case WM_T_82542_2_1:
5563 case WM_T_82543:
5564 case WM_T_82544:
5565 case WM_T_82540:
5566 case WM_T_82545:
5567 case WM_T_82545_3:
5568 case WM_T_82546:
5569 case WM_T_82546_3:
5570 case WM_T_82541_2:
5571 case WM_T_82547_2:
5572 case WM_T_82571:
5573 case WM_T_82572:
5574 case WM_T_82573:
5575 case WM_T_82574:
5576 case WM_T_82575:
5577 case WM_T_82576:
5578 case WM_T_82580:
5579 case WM_T_82580ER:
5580 case WM_T_82583:
5581 case WM_T_80003:
5582 /* null */
5583 break;
5584 case WM_T_82541:
5585 case WM_T_82547:
5586 /* XXX Configure actively LED after PHY reset */
5587 break;
5588 case WM_T_ICH8:
5589 case WM_T_ICH9:
5590 case WM_T_ICH10:
5591 case WM_T_PCH:
5592 case WM_T_PCH2:
5593 /* Allow time for h/w to get to a quiescent state afer reset */
5594 delay(10*1000);
5595
5596 if (sc->sc_type == WM_T_PCH)
5597 wm_hv_phy_workaround_ich8lan(sc);
5598
5599 if (sc->sc_type == WM_T_PCH2)
5600 wm_lv_phy_workaround_ich8lan(sc);
5601
5602 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
5603 /*
5604 * dummy read to clear the phy wakeup bit after lcd
5605 * reset
5606 */
5607 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
5608 }
5609
5610 /*
5611 * XXX Configure the LCD with th extended configuration region
5612 * in NVM
5613 */
5614
5615 /* Configure the LCD with the OEM bits in NVM */
5616 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
5617 /*
5618 * Disable LPLU.
5619 * XXX It seems that 82567 has LPLU, too.
5620 */
5621 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
5622 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
5623 reg |= HV_OEM_BITS_ANEGNOW;
5624 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
5625 }
5626 break;
5627 default:
5628 panic("%s: unknown type\n", __func__);
5629 break;
5630 }
5631 }
5632
5633 /*
5634 * wm_gmii_mediainit:
5635 *
5636 * Initialize media for use on 1000BASE-T devices.
5637 */
5638 static void
5639 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
5640 {
5641 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5642
5643 /* We have MII. */
5644 sc->sc_flags |= WM_F_HAS_MII;
5645
5646 if (sc->sc_type == WM_T_80003)
5647 sc->sc_tipg = TIPG_1000T_80003_DFLT;
5648 else
5649 sc->sc_tipg = TIPG_1000T_DFLT;
5650
5651 /*
5652 * Let the chip set speed/duplex on its own based on
5653 * signals from the PHY.
5654 * XXXbouyer - I'm not sure this is right for the 80003,
5655 * the em driver only sets CTRL_SLU here - but it seems to work.
5656 */
5657 sc->sc_ctrl |= CTRL_SLU;
5658 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5659
5660 /* Initialize our media structures and probe the GMII. */
5661 sc->sc_mii.mii_ifp = ifp;
5662
5663 switch (prodid) {
5664 case PCI_PRODUCT_INTEL_PCH_M_LM:
5665 case PCI_PRODUCT_INTEL_PCH_M_LC:
5666 /* 82577 */
5667 sc->sc_phytype = WMPHY_82577;
5668 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5669 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5670 break;
5671 case PCI_PRODUCT_INTEL_PCH_D_DM:
5672 case PCI_PRODUCT_INTEL_PCH_D_DC:
5673 /* 82578 */
5674 sc->sc_phytype = WMPHY_82578;
5675 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5676 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5677 break;
5678 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
5679 case PCI_PRODUCT_INTEL_PCH2_LV_V:
5680 /* 82578 */
5681 sc->sc_phytype = WMPHY_82579;
5682 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5683 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5684 break;
5685 case PCI_PRODUCT_INTEL_82801I_BM:
5686 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
5687 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
5688 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
5689 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
5690 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
5691 /* 82567 */
5692 sc->sc_phytype = WMPHY_BM;
5693 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5694 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5695 break;
5696 default:
5697 if ((sc->sc_flags & WM_F_SGMII) != 0) {
5698 sc->sc_mii.mii_readreg = wm_sgmii_readreg;
5699 sc->sc_mii.mii_writereg = wm_sgmii_writereg;
5700 } else if (sc->sc_type >= WM_T_80003) {
5701 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
5702 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
5703 } else if (sc->sc_type >= WM_T_82544) {
5704 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
5705 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
5706 } else {
5707 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
5708 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
5709 }
5710 break;
5711 }
5712 sc->sc_mii.mii_statchg = wm_gmii_statchg;
5713
5714 wm_gmii_reset(sc);
5715
5716 sc->sc_ethercom.ec_mii = &sc->sc_mii;
5717 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
5718 wm_gmii_mediastatus);
5719
5720 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
5721 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)) {
5722 if ((sc->sc_flags & WM_F_SGMII) == 0) {
5723 /* Attach only one port */
5724 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
5725 MII_OFFSET_ANY, MIIF_DOPAUSE);
5726 } else {
5727 int i;
5728 uint32_t ctrl_ext;
5729
5730 /* Power on sgmii phy if it is disabled */
5731 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
5732 CSR_WRITE(sc, WMREG_CTRL_EXT,
5733 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
5734 CSR_WRITE_FLUSH(sc);
5735 delay(300*1000); /* XXX too long */
5736
5737 /* from 1 to 8 */
5738 for (i = 1; i < 8; i++)
5739 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
5740 i, MII_OFFSET_ANY, MIIF_DOPAUSE);
5741
5742 /* restore previous sfp cage power state */
5743 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
5744 }
5745 } else {
5746 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5747 MII_OFFSET_ANY, MIIF_DOPAUSE);
5748 }
5749
5750 if ((sc->sc_type == WM_T_PCH2) &&
5751 (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL)) {
5752 wm_set_mdio_slow_mode_hv(sc);
5753 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5754 MII_OFFSET_ANY, MIIF_DOPAUSE);
5755 }
5756
5757 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5758 /* if failed, retry with *_bm_* */
5759 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5760 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5761
5762 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5763 MII_OFFSET_ANY, MIIF_DOPAUSE);
5764 }
5765 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5766 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
5767 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
5768 sc->sc_phytype = WMPHY_NONE;
5769 } else {
5770 /* Check PHY type */
5771 uint32_t model;
5772 struct mii_softc *child;
5773
5774 child = LIST_FIRST(&sc->sc_mii.mii_phys);
5775 if (device_is_a(child->mii_dev, "igphy")) {
5776 struct igphy_softc *isc = (struct igphy_softc *)child;
5777
5778 model = isc->sc_mii.mii_mpd_model;
5779 if (model == MII_MODEL_yyINTEL_I82566)
5780 sc->sc_phytype = WMPHY_IGP_3;
5781 }
5782
5783 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5784 }
5785 }
5786
5787 /*
5788 * wm_gmii_mediastatus: [ifmedia interface function]
5789 *
5790 * Get the current interface media status on a 1000BASE-T device.
5791 */
5792 static void
5793 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5794 {
5795 struct wm_softc *sc = ifp->if_softc;
5796
5797 ether_mediastatus(ifp, ifmr);
5798 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
5799 | sc->sc_flowflags;
5800 }
5801
5802 /*
5803 * wm_gmii_mediachange: [ifmedia interface function]
5804 *
5805 * Set hardware to newly-selected media on a 1000BASE-T device.
5806 */
5807 static int
5808 wm_gmii_mediachange(struct ifnet *ifp)
5809 {
5810 struct wm_softc *sc = ifp->if_softc;
5811 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5812 int rc;
5813
5814 if ((ifp->if_flags & IFF_UP) == 0)
5815 return 0;
5816
5817 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5818 sc->sc_ctrl |= CTRL_SLU;
5819 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
5820 || (sc->sc_type > WM_T_82543)) {
5821 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
5822 } else {
5823 sc->sc_ctrl &= ~CTRL_ASDE;
5824 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5825 if (ife->ifm_media & IFM_FDX)
5826 sc->sc_ctrl |= CTRL_FD;
5827 switch (IFM_SUBTYPE(ife->ifm_media)) {
5828 case IFM_10_T:
5829 sc->sc_ctrl |= CTRL_SPEED_10;
5830 break;
5831 case IFM_100_TX:
5832 sc->sc_ctrl |= CTRL_SPEED_100;
5833 break;
5834 case IFM_1000_T:
5835 sc->sc_ctrl |= CTRL_SPEED_1000;
5836 break;
5837 default:
5838 panic("wm_gmii_mediachange: bad media 0x%x",
5839 ife->ifm_media);
5840 }
5841 }
5842 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5843 if (sc->sc_type <= WM_T_82543)
5844 wm_gmii_reset(sc);
5845
5846 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
5847 return 0;
5848 return rc;
5849 }
5850
5851 #define MDI_IO CTRL_SWDPIN(2)
5852 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
5853 #define MDI_CLK CTRL_SWDPIN(3)
5854
5855 static void
5856 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
5857 {
5858 uint32_t i, v;
5859
5860 v = CSR_READ(sc, WMREG_CTRL);
5861 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5862 v |= MDI_DIR | CTRL_SWDPIO(3);
5863
5864 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
5865 if (data & i)
5866 v |= MDI_IO;
5867 else
5868 v &= ~MDI_IO;
5869 CSR_WRITE(sc, WMREG_CTRL, v);
5870 delay(10);
5871 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5872 delay(10);
5873 CSR_WRITE(sc, WMREG_CTRL, v);
5874 delay(10);
5875 }
5876 }
5877
5878 static uint32_t
5879 i82543_mii_recvbits(struct wm_softc *sc)
5880 {
5881 uint32_t v, i, data = 0;
5882
5883 v = CSR_READ(sc, WMREG_CTRL);
5884 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5885 v |= CTRL_SWDPIO(3);
5886
5887 CSR_WRITE(sc, WMREG_CTRL, v);
5888 delay(10);
5889 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5890 delay(10);
5891 CSR_WRITE(sc, WMREG_CTRL, v);
5892 delay(10);
5893
5894 for (i = 0; i < 16; i++) {
5895 data <<= 1;
5896 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5897 delay(10);
5898 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
5899 data |= 1;
5900 CSR_WRITE(sc, WMREG_CTRL, v);
5901 delay(10);
5902 }
5903
5904 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5905 delay(10);
5906 CSR_WRITE(sc, WMREG_CTRL, v);
5907 delay(10);
5908
5909 return data;
5910 }
5911
5912 #undef MDI_IO
5913 #undef MDI_DIR
5914 #undef MDI_CLK
5915
5916 /*
5917 * wm_gmii_i82543_readreg: [mii interface function]
5918 *
5919 * Read a PHY register on the GMII (i82543 version).
5920 */
5921 static int
5922 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
5923 {
5924 struct wm_softc *sc = device_private(self);
5925 int rv;
5926
5927 i82543_mii_sendbits(sc, 0xffffffffU, 32);
5928 i82543_mii_sendbits(sc, reg | (phy << 5) |
5929 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
5930 rv = i82543_mii_recvbits(sc) & 0xffff;
5931
5932 DPRINTF(WM_DEBUG_GMII,
5933 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
5934 device_xname(sc->sc_dev), phy, reg, rv));
5935
5936 return rv;
5937 }
5938
5939 /*
5940 * wm_gmii_i82543_writereg: [mii interface function]
5941 *
5942 * Write a PHY register on the GMII (i82543 version).
5943 */
5944 static void
5945 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
5946 {
5947 struct wm_softc *sc = device_private(self);
5948
5949 i82543_mii_sendbits(sc, 0xffffffffU, 32);
5950 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
5951 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
5952 (MII_COMMAND_START << 30), 32);
5953 }
5954
5955 /*
5956 * wm_gmii_i82544_readreg: [mii interface function]
5957 *
5958 * Read a PHY register on the GMII.
5959 */
5960 static int
5961 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
5962 {
5963 struct wm_softc *sc = device_private(self);
5964 uint32_t mdic = 0;
5965 int i, rv;
5966
5967 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
5968 MDIC_REGADD(reg));
5969
5970 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
5971 mdic = CSR_READ(sc, WMREG_MDIC);
5972 if (mdic & MDIC_READY)
5973 break;
5974 delay(50);
5975 }
5976
5977 if ((mdic & MDIC_READY) == 0) {
5978 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
5979 device_xname(sc->sc_dev), phy, reg);
5980 rv = 0;
5981 } else if (mdic & MDIC_E) {
5982 #if 0 /* This is normal if no PHY is present. */
5983 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
5984 device_xname(sc->sc_dev), phy, reg);
5985 #endif
5986 rv = 0;
5987 } else {
5988 rv = MDIC_DATA(mdic);
5989 if (rv == 0xffff)
5990 rv = 0;
5991 }
5992
5993 return rv;
5994 }
5995
5996 /*
5997 * wm_gmii_i82544_writereg: [mii interface function]
5998 *
5999 * Write a PHY register on the GMII.
6000 */
6001 static void
6002 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6003 {
6004 struct wm_softc *sc = device_private(self);
6005 uint32_t mdic = 0;
6006 int i;
6007
6008 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6009 MDIC_REGADD(reg) | MDIC_DATA(val));
6010
6011 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6012 mdic = CSR_READ(sc, WMREG_MDIC);
6013 if (mdic & MDIC_READY)
6014 break;
6015 delay(50);
6016 }
6017
6018 if ((mdic & MDIC_READY) == 0)
6019 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6020 device_xname(sc->sc_dev), phy, reg);
6021 else if (mdic & MDIC_E)
6022 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6023 device_xname(sc->sc_dev), phy, reg);
6024 }
6025
6026 /*
6027 * wm_gmii_i80003_readreg: [mii interface function]
6028 *
6029 * Read a PHY register on the kumeran
6030 * This could be handled by the PHY layer if we didn't have to lock the
6031 * ressource ...
6032 */
6033 static int
6034 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6035 {
6036 struct wm_softc *sc = device_private(self);
6037 int sem;
6038 int rv;
6039
6040 if (phy != 1) /* only one PHY on kumeran bus */
6041 return 0;
6042
6043 sem = swfwphysem[sc->sc_funcid];
6044 if (wm_get_swfw_semaphore(sc, sem)) {
6045 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6046 __func__);
6047 return 0;
6048 }
6049
6050 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6051 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6052 reg >> GG82563_PAGE_SHIFT);
6053 } else {
6054 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6055 reg >> GG82563_PAGE_SHIFT);
6056 }
6057 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6058 delay(200);
6059 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6060 delay(200);
6061
6062 wm_put_swfw_semaphore(sc, sem);
6063 return rv;
6064 }
6065
6066 /*
6067 * wm_gmii_i80003_writereg: [mii interface function]
6068 *
6069 * Write a PHY register on the kumeran.
6070 * This could be handled by the PHY layer if we didn't have to lock the
6071 * ressource ...
6072 */
6073 static void
6074 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6075 {
6076 struct wm_softc *sc = device_private(self);
6077 int sem;
6078
6079 if (phy != 1) /* only one PHY on kumeran bus */
6080 return;
6081
6082 sem = swfwphysem[sc->sc_funcid];
6083 if (wm_get_swfw_semaphore(sc, sem)) {
6084 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6085 __func__);
6086 return;
6087 }
6088
6089 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6090 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6091 reg >> GG82563_PAGE_SHIFT);
6092 } else {
6093 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6094 reg >> GG82563_PAGE_SHIFT);
6095 }
6096 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6097 delay(200);
6098 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6099 delay(200);
6100
6101 wm_put_swfw_semaphore(sc, sem);
6102 }
6103
6104 /*
6105 * wm_gmii_bm_readreg: [mii interface function]
6106 *
6107 * Read a PHY register on the kumeran
6108 * This could be handled by the PHY layer if we didn't have to lock the
6109 * ressource ...
6110 */
6111 static int
6112 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6113 {
6114 struct wm_softc *sc = device_private(self);
6115 int sem;
6116 int rv;
6117
6118 sem = swfwphysem[sc->sc_funcid];
6119 if (wm_get_swfw_semaphore(sc, sem)) {
6120 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6121 __func__);
6122 return 0;
6123 }
6124
6125 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6126 if (phy == 1)
6127 wm_gmii_i82544_writereg(self, phy, 0x1f,
6128 reg);
6129 else
6130 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6131 reg >> GG82563_PAGE_SHIFT);
6132
6133 }
6134
6135 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6136 wm_put_swfw_semaphore(sc, sem);
6137 return rv;
6138 }
6139
6140 /*
6141 * wm_gmii_bm_writereg: [mii interface function]
6142 *
6143 * Write a PHY register on the kumeran.
6144 * This could be handled by the PHY layer if we didn't have to lock the
6145 * ressource ...
6146 */
6147 static void
6148 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6149 {
6150 struct wm_softc *sc = device_private(self);
6151 int sem;
6152
6153 sem = swfwphysem[sc->sc_funcid];
6154 if (wm_get_swfw_semaphore(sc, sem)) {
6155 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6156 __func__);
6157 return;
6158 }
6159
6160 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6161 if (phy == 1)
6162 wm_gmii_i82544_writereg(self, phy, 0x1f,
6163 reg);
6164 else
6165 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6166 reg >> GG82563_PAGE_SHIFT);
6167
6168 }
6169
6170 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6171 wm_put_swfw_semaphore(sc, sem);
6172 }
6173
6174 static void
6175 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6176 {
6177 struct wm_softc *sc = device_private(self);
6178 uint16_t regnum = BM_PHY_REG_NUM(offset);
6179 uint16_t wuce;
6180
6181 /* XXX Gig must be disabled for MDIO accesses to page 800 */
6182 if (sc->sc_type == WM_T_PCH) {
6183 /* XXX e1000 driver do nothing... why? */
6184 }
6185
6186 /* Set page 769 */
6187 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6188 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6189
6190 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6191
6192 wuce &= ~BM_WUC_HOST_WU_BIT;
6193 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6194 wuce | BM_WUC_ENABLE_BIT);
6195
6196 /* Select page 800 */
6197 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6198 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6199
6200 /* Write page 800 */
6201 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6202
6203 if (rd)
6204 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6205 else
6206 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6207
6208 /* Set page 769 */
6209 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6210 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6211
6212 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6213 }
6214
6215 /*
6216 * wm_gmii_hv_readreg: [mii interface function]
6217 *
6218 * Read a PHY register on the kumeran
6219 * This could be handled by the PHY layer if we didn't have to lock the
6220 * ressource ...
6221 */
6222 static int
6223 wm_gmii_hv_readreg(device_t self, int phy, int reg)
6224 {
6225 struct wm_softc *sc = device_private(self);
6226 uint16_t page = BM_PHY_REG_PAGE(reg);
6227 uint16_t regnum = BM_PHY_REG_NUM(reg);
6228 uint16_t val;
6229 int rv;
6230
6231 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6232 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6233 __func__);
6234 return 0;
6235 }
6236
6237 /* XXX Workaround failure in MDIO access while cable is disconnected */
6238 if (sc->sc_phytype == WMPHY_82577) {
6239 /* XXX must write */
6240 }
6241
6242 /* Page 800 works differently than the rest so it has its own func */
6243 if (page == BM_WUC_PAGE) {
6244 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
6245 return val;
6246 }
6247
6248 /*
6249 * Lower than page 768 works differently than the rest so it has its
6250 * own func
6251 */
6252 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6253 printf("gmii_hv_readreg!!!\n");
6254 return 0;
6255 }
6256
6257 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6258 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6259 page << BME1000_PAGE_SHIFT);
6260 }
6261
6262 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
6263 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6264 return rv;
6265 }
6266
6267 /*
6268 * wm_gmii_hv_writereg: [mii interface function]
6269 *
6270 * Write a PHY register on the kumeran.
6271 * This could be handled by the PHY layer if we didn't have to lock the
6272 * ressource ...
6273 */
6274 static void
6275 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
6276 {
6277 struct wm_softc *sc = device_private(self);
6278 uint16_t page = BM_PHY_REG_PAGE(reg);
6279 uint16_t regnum = BM_PHY_REG_NUM(reg);
6280
6281 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6282 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6283 __func__);
6284 return;
6285 }
6286
6287 /* XXX Workaround failure in MDIO access while cable is disconnected */
6288
6289 /* Page 800 works differently than the rest so it has its own func */
6290 if (page == BM_WUC_PAGE) {
6291 uint16_t tmp;
6292
6293 tmp = val;
6294 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
6295 return;
6296 }
6297
6298 /*
6299 * Lower than page 768 works differently than the rest so it has its
6300 * own func
6301 */
6302 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6303 printf("gmii_hv_writereg!!!\n");
6304 return;
6305 }
6306
6307 /*
6308 * XXX Workaround MDIO accesses being disabled after entering IEEE
6309 * Power Down (whenever bit 11 of the PHY control register is set)
6310 */
6311
6312 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6313 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6314 page << BME1000_PAGE_SHIFT);
6315 }
6316
6317 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
6318 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6319 }
6320
6321 /*
6322 * wm_gmii_hv_readreg: [mii interface function]
6323 *
6324 * Read a PHY register on the kumeran
6325 * This could be handled by the PHY layer if we didn't have to lock the
6326 * ressource ...
6327 */
6328 static int
6329 wm_sgmii_readreg(device_t self, int phy, int reg)
6330 {
6331 struct wm_softc *sc = device_private(self);
6332 uint32_t i2ccmd;
6333 int i, rv;
6334
6335 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6336 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6337 __func__);
6338 return 0;
6339 }
6340
6341 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6342 | (phy << I2CCMD_PHY_ADDR_SHIFT)
6343 | I2CCMD_OPCODE_READ;
6344 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6345
6346 /* Poll the ready bit */
6347 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6348 delay(50);
6349 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6350 if (i2ccmd & I2CCMD_READY)
6351 break;
6352 }
6353 if ((i2ccmd & I2CCMD_READY) == 0)
6354 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
6355 if ((i2ccmd & I2CCMD_ERROR) != 0)
6356 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6357
6358 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
6359
6360 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6361 return rv;
6362 }
6363
6364 /*
6365 * wm_gmii_hv_writereg: [mii interface function]
6366 *
6367 * Write a PHY register on the kumeran.
6368 * This could be handled by the PHY layer if we didn't have to lock the
6369 * ressource ...
6370 */
6371 static void
6372 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
6373 {
6374 struct wm_softc *sc = device_private(self);
6375 uint32_t i2ccmd;
6376 int i;
6377
6378 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6379 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6380 __func__);
6381 return;
6382 }
6383
6384 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6385 | (phy << I2CCMD_PHY_ADDR_SHIFT)
6386 | I2CCMD_OPCODE_WRITE;
6387 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6388
6389 /* Poll the ready bit */
6390 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6391 delay(50);
6392 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6393 if (i2ccmd & I2CCMD_READY)
6394 break;
6395 }
6396 if ((i2ccmd & I2CCMD_READY) == 0)
6397 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
6398 if ((i2ccmd & I2CCMD_ERROR) != 0)
6399 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6400
6401 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6402 }
6403
6404 /*
6405 * wm_gmii_statchg: [mii interface function]
6406 *
6407 * Callback from MII layer when media changes.
6408 */
6409 static void
6410 wm_gmii_statchg(device_t self)
6411 {
6412 struct wm_softc *sc = device_private(self);
6413 struct mii_data *mii = &sc->sc_mii;
6414
6415 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
6416 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6417 sc->sc_fcrtl &= ~FCRTL_XONE;
6418
6419 /*
6420 * Get flow control negotiation result.
6421 */
6422 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
6423 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
6424 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
6425 mii->mii_media_active &= ~IFM_ETH_FMASK;
6426 }
6427
6428 if (sc->sc_flowflags & IFM_FLOW) {
6429 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
6430 sc->sc_ctrl |= CTRL_TFCE;
6431 sc->sc_fcrtl |= FCRTL_XONE;
6432 }
6433 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
6434 sc->sc_ctrl |= CTRL_RFCE;
6435 }
6436
6437 if (sc->sc_mii.mii_media_active & IFM_FDX) {
6438 DPRINTF(WM_DEBUG_LINK,
6439 ("%s: LINK: statchg: FDX\n", device_xname(sc->sc_dev)));
6440 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6441 } else {
6442 DPRINTF(WM_DEBUG_LINK,
6443 ("%s: LINK: statchg: HDX\n", device_xname(sc->sc_dev)));
6444 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6445 }
6446
6447 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6448 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6449 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
6450 : WMREG_FCRTL, sc->sc_fcrtl);
6451 if (sc->sc_type == WM_T_80003) {
6452 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
6453 case IFM_1000_T:
6454 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6455 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
6456 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6457 break;
6458 default:
6459 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6460 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
6461 sc->sc_tipg = TIPG_10_100_80003_DFLT;
6462 break;
6463 }
6464 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6465 }
6466 }
6467
6468 /*
6469 * wm_kmrn_readreg:
6470 *
6471 * Read a kumeran register
6472 */
6473 static int
6474 wm_kmrn_readreg(struct wm_softc *sc, int reg)
6475 {
6476 int rv;
6477
6478 if (sc->sc_flags == WM_F_SWFW_SYNC) {
6479 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6480 aprint_error_dev(sc->sc_dev,
6481 "%s: failed to get semaphore\n", __func__);
6482 return 0;
6483 }
6484 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
6485 if (wm_get_swfwhw_semaphore(sc)) {
6486 aprint_error_dev(sc->sc_dev,
6487 "%s: failed to get semaphore\n", __func__);
6488 return 0;
6489 }
6490 }
6491
6492 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
6493 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
6494 KUMCTRLSTA_REN);
6495 delay(2);
6496
6497 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
6498
6499 if (sc->sc_flags == WM_F_SWFW_SYNC)
6500 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
6501 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
6502 wm_put_swfwhw_semaphore(sc);
6503
6504 return rv;
6505 }
6506
6507 /*
6508 * wm_kmrn_writereg:
6509 *
6510 * Write a kumeran register
6511 */
6512 static void
6513 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
6514 {
6515
6516 if (sc->sc_flags == WM_F_SWFW_SYNC) {
6517 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6518 aprint_error_dev(sc->sc_dev,
6519 "%s: failed to get semaphore\n", __func__);
6520 return;
6521 }
6522 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
6523 if (wm_get_swfwhw_semaphore(sc)) {
6524 aprint_error_dev(sc->sc_dev,
6525 "%s: failed to get semaphore\n", __func__);
6526 return;
6527 }
6528 }
6529
6530 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
6531 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
6532 (val & KUMCTRLSTA_MASK));
6533
6534 if (sc->sc_flags == WM_F_SWFW_SYNC)
6535 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
6536 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
6537 wm_put_swfwhw_semaphore(sc);
6538 }
6539
6540 static int
6541 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
6542 {
6543 uint32_t eecd = 0;
6544
6545 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
6546 || sc->sc_type == WM_T_82583) {
6547 eecd = CSR_READ(sc, WMREG_EECD);
6548
6549 /* Isolate bits 15 & 16 */
6550 eecd = ((eecd >> 15) & 0x03);
6551
6552 /* If both bits are set, device is Flash type */
6553 if (eecd == 0x03)
6554 return 0;
6555 }
6556 return 1;
6557 }
6558
6559 static int
6560 wm_get_swsm_semaphore(struct wm_softc *sc)
6561 {
6562 int32_t timeout;
6563 uint32_t swsm;
6564
6565 /* Get the FW semaphore. */
6566 timeout = 1000 + 1; /* XXX */
6567 while (timeout) {
6568 swsm = CSR_READ(sc, WMREG_SWSM);
6569 swsm |= SWSM_SWESMBI;
6570 CSR_WRITE(sc, WMREG_SWSM, swsm);
6571 /* if we managed to set the bit we got the semaphore. */
6572 swsm = CSR_READ(sc, WMREG_SWSM);
6573 if (swsm & SWSM_SWESMBI)
6574 break;
6575
6576 delay(50);
6577 timeout--;
6578 }
6579
6580 if (timeout == 0) {
6581 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
6582 /* Release semaphores */
6583 wm_put_swsm_semaphore(sc);
6584 return 1;
6585 }
6586 return 0;
6587 }
6588
6589 static void
6590 wm_put_swsm_semaphore(struct wm_softc *sc)
6591 {
6592 uint32_t swsm;
6593
6594 swsm = CSR_READ(sc, WMREG_SWSM);
6595 swsm &= ~(SWSM_SWESMBI);
6596 CSR_WRITE(sc, WMREG_SWSM, swsm);
6597 }
6598
6599 static int
6600 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
6601 {
6602 uint32_t swfw_sync;
6603 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
6604 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
6605 int timeout = 200;
6606
6607 for (timeout = 0; timeout < 200; timeout++) {
6608 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
6609 if (wm_get_swsm_semaphore(sc)) {
6610 aprint_error_dev(sc->sc_dev,
6611 "%s: failed to get semaphore\n",
6612 __func__);
6613 return 1;
6614 }
6615 }
6616 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
6617 if ((swfw_sync & (swmask | fwmask)) == 0) {
6618 swfw_sync |= swmask;
6619 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
6620 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6621 wm_put_swsm_semaphore(sc);
6622 return 0;
6623 }
6624 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6625 wm_put_swsm_semaphore(sc);
6626 delay(5000);
6627 }
6628 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
6629 device_xname(sc->sc_dev), mask, swfw_sync);
6630 return 1;
6631 }
6632
6633 static void
6634 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
6635 {
6636 uint32_t swfw_sync;
6637
6638 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
6639 while (wm_get_swsm_semaphore(sc) != 0)
6640 continue;
6641 }
6642 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
6643 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
6644 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
6645 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6646 wm_put_swsm_semaphore(sc);
6647 }
6648
6649 static int
6650 wm_get_swfwhw_semaphore(struct wm_softc *sc)
6651 {
6652 uint32_t ext_ctrl;
6653 int timeout = 200;
6654
6655 for (timeout = 0; timeout < 200; timeout++) {
6656 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6657 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
6658 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
6659
6660 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6661 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
6662 return 0;
6663 delay(5000);
6664 }
6665 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
6666 device_xname(sc->sc_dev), ext_ctrl);
6667 return 1;
6668 }
6669
6670 static void
6671 wm_put_swfwhw_semaphore(struct wm_softc *sc)
6672 {
6673 uint32_t ext_ctrl;
6674 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6675 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
6676 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
6677 }
6678
6679 static int
6680 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
6681 {
6682 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
6683 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
6684
6685 if ((sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
6686 /* Value of bit 22 corresponds to the flash bank we're on. */
6687 *bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
6688 } else {
6689 uint8_t bank_high_byte;
6690 wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
6691 if ((bank_high_byte & 0xc0) == 0x80)
6692 *bank = 0;
6693 else {
6694 wm_read_ich8_byte(sc, act_offset + bank1_offset,
6695 &bank_high_byte);
6696 if ((bank_high_byte & 0xc0) == 0x80)
6697 *bank = 1;
6698 else {
6699 aprint_error_dev(sc->sc_dev,
6700 "EEPROM not present\n");
6701 return -1;
6702 }
6703 }
6704 }
6705
6706 return 0;
6707 }
6708
6709 /******************************************************************************
6710 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
6711 * register.
6712 *
6713 * sc - Struct containing variables accessed by shared code
6714 * offset - offset of word in the EEPROM to read
6715 * data - word read from the EEPROM
6716 * words - number of words to read
6717 *****************************************************************************/
6718 static int
6719 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
6720 {
6721 int32_t error = 0;
6722 uint32_t flash_bank = 0;
6723 uint32_t act_offset = 0;
6724 uint32_t bank_offset = 0;
6725 uint16_t word = 0;
6726 uint16_t i = 0;
6727
6728 /* We need to know which is the valid flash bank. In the event
6729 * that we didn't allocate eeprom_shadow_ram, we may not be
6730 * managing flash_bank. So it cannot be trusted and needs
6731 * to be updated with each read.
6732 */
6733 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
6734 if (error) {
6735 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
6736 __func__);
6737 return error;
6738 }
6739
6740 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
6741 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
6742
6743 error = wm_get_swfwhw_semaphore(sc);
6744 if (error) {
6745 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6746 __func__);
6747 return error;
6748 }
6749
6750 for (i = 0; i < words; i++) {
6751 /* The NVM part needs a byte offset, hence * 2 */
6752 act_offset = bank_offset + ((offset + i) * 2);
6753 error = wm_read_ich8_word(sc, act_offset, &word);
6754 if (error) {
6755 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
6756 __func__);
6757 break;
6758 }
6759 data[i] = word;
6760 }
6761
6762 wm_put_swfwhw_semaphore(sc);
6763 return error;
6764 }
6765
6766 /******************************************************************************
6767 * This function does initial flash setup so that a new read/write/erase cycle
6768 * can be started.
6769 *
6770 * sc - The pointer to the hw structure
6771 ****************************************************************************/
6772 static int32_t
6773 wm_ich8_cycle_init(struct wm_softc *sc)
6774 {
6775 uint16_t hsfsts;
6776 int32_t error = 1;
6777 int32_t i = 0;
6778
6779 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6780
6781 /* May be check the Flash Des Valid bit in Hw status */
6782 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
6783 return error;
6784 }
6785
6786 /* Clear FCERR in Hw status by writing 1 */
6787 /* Clear DAEL in Hw status by writing a 1 */
6788 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
6789
6790 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6791
6792 /*
6793 * Either we should have a hardware SPI cycle in progress bit to check
6794 * against, in order to start a new cycle or FDONE bit should be
6795 * changed in the hardware so that it is 1 after harware reset, which
6796 * can then be used as an indication whether a cycle is in progress or
6797 * has been completed .. we should also have some software semaphore
6798 * mechanism to guard FDONE or the cycle in progress bit so that two
6799 * threads access to those bits can be sequentiallized or a way so that
6800 * 2 threads dont start the cycle at the same time
6801 */
6802
6803 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6804 /*
6805 * There is no cycle running at present, so we can start a
6806 * cycle
6807 */
6808
6809 /* Begin by setting Flash Cycle Done. */
6810 hsfsts |= HSFSTS_DONE;
6811 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6812 error = 0;
6813 } else {
6814 /*
6815 * otherwise poll for sometime so the current cycle has a
6816 * chance to end before giving up.
6817 */
6818 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
6819 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6820 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6821 error = 0;
6822 break;
6823 }
6824 delay(1);
6825 }
6826 if (error == 0) {
6827 /*
6828 * Successful in waiting for previous cycle to timeout,
6829 * now set the Flash Cycle Done.
6830 */
6831 hsfsts |= HSFSTS_DONE;
6832 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6833 }
6834 }
6835 return error;
6836 }
6837
6838 /******************************************************************************
6839 * This function starts a flash cycle and waits for its completion
6840 *
6841 * sc - The pointer to the hw structure
6842 ****************************************************************************/
6843 static int32_t
6844 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
6845 {
6846 uint16_t hsflctl;
6847 uint16_t hsfsts;
6848 int32_t error = 1;
6849 uint32_t i = 0;
6850
6851 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
6852 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6853 hsflctl |= HSFCTL_GO;
6854 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6855
6856 /* wait till FDONE bit is set to 1 */
6857 do {
6858 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6859 if (hsfsts & HSFSTS_DONE)
6860 break;
6861 delay(1);
6862 i++;
6863 } while (i < timeout);
6864 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
6865 error = 0;
6866
6867 return error;
6868 }
6869
6870 /******************************************************************************
6871 * Reads a byte or word from the NVM using the ICH8 flash access registers.
6872 *
6873 * sc - The pointer to the hw structure
6874 * index - The index of the byte or word to read.
6875 * size - Size of data to read, 1=byte 2=word
6876 * data - Pointer to the word to store the value read.
6877 *****************************************************************************/
6878 static int32_t
6879 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
6880 uint32_t size, uint16_t* data)
6881 {
6882 uint16_t hsfsts;
6883 uint16_t hsflctl;
6884 uint32_t flash_linear_address;
6885 uint32_t flash_data = 0;
6886 int32_t error = 1;
6887 int32_t count = 0;
6888
6889 if (size < 1 || size > 2 || data == 0x0 ||
6890 index > ICH_FLASH_LINEAR_ADDR_MASK)
6891 return error;
6892
6893 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
6894 sc->sc_ich8_flash_base;
6895
6896 do {
6897 delay(1);
6898 /* Steps */
6899 error = wm_ich8_cycle_init(sc);
6900 if (error)
6901 break;
6902
6903 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6904 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
6905 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
6906 & HSFCTL_BCOUNT_MASK;
6907 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
6908 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6909
6910 /*
6911 * Write the last 24 bits of index into Flash Linear address
6912 * field in Flash Address
6913 */
6914 /* TODO: TBD maybe check the index against the size of flash */
6915
6916 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
6917
6918 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
6919
6920 /*
6921 * Check if FCERR is set to 1, if set to 1, clear it and try
6922 * the whole sequence a few more times, else read in (shift in)
6923 * the Flash Data0, the order is least significant byte first
6924 * msb to lsb
6925 */
6926 if (error == 0) {
6927 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
6928 if (size == 1)
6929 *data = (uint8_t)(flash_data & 0x000000FF);
6930 else if (size == 2)
6931 *data = (uint16_t)(flash_data & 0x0000FFFF);
6932 break;
6933 } else {
6934 /*
6935 * If we've gotten here, then things are probably
6936 * completely hosed, but if the error condition is
6937 * detected, it won't hurt to give it another try...
6938 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
6939 */
6940 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6941 if (hsfsts & HSFSTS_ERR) {
6942 /* Repeat for some time before giving up. */
6943 continue;
6944 } else if ((hsfsts & HSFSTS_DONE) == 0)
6945 break;
6946 }
6947 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
6948
6949 return error;
6950 }
6951
6952 /******************************************************************************
6953 * Reads a single byte from the NVM using the ICH8 flash access registers.
6954 *
6955 * sc - pointer to wm_hw structure
6956 * index - The index of the byte to read.
6957 * data - Pointer to a byte to store the value read.
6958 *****************************************************************************/
6959 static int32_t
6960 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
6961 {
6962 int32_t status;
6963 uint16_t word = 0;
6964
6965 status = wm_read_ich8_data(sc, index, 1, &word);
6966 if (status == 0)
6967 *data = (uint8_t)word;
6968 else
6969 *data = 0;
6970
6971 return status;
6972 }
6973
6974 /******************************************************************************
6975 * Reads a word from the NVM using the ICH8 flash access registers.
6976 *
6977 * sc - pointer to wm_hw structure
6978 * index - The starting byte index of the word to read.
6979 * data - Pointer to a word to store the value read.
6980 *****************************************************************************/
6981 static int32_t
6982 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
6983 {
6984 int32_t status;
6985
6986 status = wm_read_ich8_data(sc, index, 2, data);
6987 return status;
6988 }
6989
6990 static int
6991 wm_check_mng_mode(struct wm_softc *sc)
6992 {
6993 int rv;
6994
6995 switch (sc->sc_type) {
6996 case WM_T_ICH8:
6997 case WM_T_ICH9:
6998 case WM_T_ICH10:
6999 case WM_T_PCH:
7000 case WM_T_PCH2:
7001 rv = wm_check_mng_mode_ich8lan(sc);
7002 break;
7003 case WM_T_82574:
7004 case WM_T_82583:
7005 rv = wm_check_mng_mode_82574(sc);
7006 break;
7007 case WM_T_82571:
7008 case WM_T_82572:
7009 case WM_T_82573:
7010 case WM_T_80003:
7011 rv = wm_check_mng_mode_generic(sc);
7012 break;
7013 default:
7014 /* noting to do */
7015 rv = 0;
7016 break;
7017 }
7018
7019 return rv;
7020 }
7021
7022 static int
7023 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
7024 {
7025 uint32_t fwsm;
7026
7027 fwsm = CSR_READ(sc, WMREG_FWSM);
7028
7029 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
7030 return 1;
7031
7032 return 0;
7033 }
7034
7035 static int
7036 wm_check_mng_mode_82574(struct wm_softc *sc)
7037 {
7038 uint16_t data;
7039
7040 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
7041
7042 if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
7043 return 1;
7044
7045 return 0;
7046 }
7047
7048 static int
7049 wm_check_mng_mode_generic(struct wm_softc *sc)
7050 {
7051 uint32_t fwsm;
7052
7053 fwsm = CSR_READ(sc, WMREG_FWSM);
7054
7055 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
7056 return 1;
7057
7058 return 0;
7059 }
7060
7061 static int
7062 wm_enable_mng_pass_thru(struct wm_softc *sc)
7063 {
7064 uint32_t manc, fwsm, factps;
7065
7066 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
7067 return 0;
7068
7069 manc = CSR_READ(sc, WMREG_MANC);
7070
7071 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
7072 device_xname(sc->sc_dev), manc));
7073 if (((manc & MANC_RECV_TCO_EN) == 0)
7074 || ((manc & MANC_EN_MAC_ADDR_FILTER) == 0))
7075 return 0;
7076
7077 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
7078 fwsm = CSR_READ(sc, WMREG_FWSM);
7079 factps = CSR_READ(sc, WMREG_FACTPS);
7080 if (((factps & FACTPS_MNGCG) == 0)
7081 && ((fwsm & FWSM_MODE_MASK)
7082 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
7083 return 1;
7084 } else if (((manc & MANC_SMBUS_EN) != 0)
7085 && ((manc & MANC_ASF_EN) == 0))
7086 return 1;
7087
7088 return 0;
7089 }
7090
7091 static int
7092 wm_check_reset_block(struct wm_softc *sc)
7093 {
7094 uint32_t reg;
7095
7096 switch (sc->sc_type) {
7097 case WM_T_ICH8:
7098 case WM_T_ICH9:
7099 case WM_T_ICH10:
7100 case WM_T_PCH:
7101 case WM_T_PCH2:
7102 reg = CSR_READ(sc, WMREG_FWSM);
7103 if ((reg & FWSM_RSPCIPHY) != 0)
7104 return 0;
7105 else
7106 return -1;
7107 break;
7108 case WM_T_82571:
7109 case WM_T_82572:
7110 case WM_T_82573:
7111 case WM_T_82574:
7112 case WM_T_82583:
7113 case WM_T_80003:
7114 reg = CSR_READ(sc, WMREG_MANC);
7115 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
7116 return -1;
7117 else
7118 return 0;
7119 break;
7120 default:
7121 /* no problem */
7122 break;
7123 }
7124
7125 return 0;
7126 }
7127
7128 static void
7129 wm_get_hw_control(struct wm_softc *sc)
7130 {
7131 uint32_t reg;
7132
7133 switch (sc->sc_type) {
7134 case WM_T_82573:
7135 reg = CSR_READ(sc, WMREG_SWSM);
7136 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
7137 break;
7138 case WM_T_82571:
7139 case WM_T_82572:
7140 case WM_T_82574:
7141 case WM_T_82583:
7142 case WM_T_80003:
7143 case WM_T_ICH8:
7144 case WM_T_ICH9:
7145 case WM_T_ICH10:
7146 case WM_T_PCH:
7147 case WM_T_PCH2:
7148 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7149 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
7150 break;
7151 default:
7152 break;
7153 }
7154 }
7155
7156 static void
7157 wm_release_hw_control(struct wm_softc *sc)
7158 {
7159 uint32_t reg;
7160
7161 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
7162 return;
7163
7164 if (sc->sc_type == WM_T_82573) {
7165 reg = CSR_READ(sc, WMREG_SWSM);
7166 reg &= ~SWSM_DRV_LOAD;
7167 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
7168 } else {
7169 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7170 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
7171 }
7172 }
7173
7174 /* XXX Currently TBI only */
7175 static int
7176 wm_check_for_link(struct wm_softc *sc)
7177 {
7178 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7179 uint32_t rxcw;
7180 uint32_t ctrl;
7181 uint32_t status;
7182 uint32_t sig;
7183
7184 rxcw = CSR_READ(sc, WMREG_RXCW);
7185 ctrl = CSR_READ(sc, WMREG_CTRL);
7186 status = CSR_READ(sc, WMREG_STATUS);
7187
7188 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7189
7190 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7191 device_xname(sc->sc_dev), __func__,
7192 ((ctrl & CTRL_SWDPIN(1)) == sig),
7193 ((status & STATUS_LU) != 0),
7194 ((rxcw & RXCW_C) != 0)
7195 ));
7196
7197 /*
7198 * SWDPIN LU RXCW
7199 * 0 0 0
7200 * 0 0 1 (should not happen)
7201 * 0 1 0 (should not happen)
7202 * 0 1 1 (should not happen)
7203 * 1 0 0 Disable autonego and force linkup
7204 * 1 0 1 got /C/ but not linkup yet
7205 * 1 1 0 (linkup)
7206 * 1 1 1 If IFM_AUTO, back to autonego
7207 *
7208 */
7209 if (((ctrl & CTRL_SWDPIN(1)) == sig)
7210 && ((status & STATUS_LU) == 0)
7211 && ((rxcw & RXCW_C) == 0)) {
7212 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7213 __func__));
7214 sc->sc_tbi_linkup = 0;
7215 /* Disable auto-negotiation in the TXCW register */
7216 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7217
7218 /*
7219 * Force link-up and also force full-duplex.
7220 *
7221 * NOTE: CTRL was updated TFCE and RFCE automatically,
7222 * so we should update sc->sc_ctrl
7223 */
7224 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7225 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7226 } else if (((status & STATUS_LU) != 0)
7227 && ((rxcw & RXCW_C) != 0)
7228 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7229 sc->sc_tbi_linkup = 1;
7230 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7231 __func__));
7232 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7233 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7234 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7235 && ((rxcw & RXCW_C) != 0)) {
7236 DPRINTF(WM_DEBUG_LINK, ("/C/"));
7237 } else {
7238 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7239 status));
7240 }
7241
7242 return 0;
7243 }
7244
7245 /* Work-around for 82566 Kumeran PCS lock loss */
7246 static void
7247 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
7248 {
7249 int miistatus, active, i;
7250 int reg;
7251
7252 miistatus = sc->sc_mii.mii_media_status;
7253
7254 /* If the link is not up, do nothing */
7255 if ((miistatus & IFM_ACTIVE) != 0)
7256 return;
7257
7258 active = sc->sc_mii.mii_media_active;
7259
7260 /* Nothing to do if the link is other than 1Gbps */
7261 if (IFM_SUBTYPE(active) != IFM_1000_T)
7262 return;
7263
7264 for (i = 0; i < 10; i++) {
7265 /* read twice */
7266 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7267 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7268 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
7269 goto out; /* GOOD! */
7270
7271 /* Reset the PHY */
7272 wm_gmii_reset(sc);
7273 delay(5*1000);
7274 }
7275
7276 /* Disable GigE link negotiation */
7277 reg = CSR_READ(sc, WMREG_PHY_CTRL);
7278 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7279 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7280
7281 /*
7282 * Call gig speed drop workaround on Gig disable before accessing
7283 * any PHY registers.
7284 */
7285 wm_gig_downshift_workaround_ich8lan(sc);
7286
7287 out:
7288 return;
7289 }
7290
7291 /* WOL from S5 stops working */
7292 static void
7293 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
7294 {
7295 uint16_t kmrn_reg;
7296
7297 /* Only for igp3 */
7298 if (sc->sc_phytype == WMPHY_IGP_3) {
7299 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
7300 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
7301 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7302 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
7303 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7304 }
7305 }
7306
7307 #ifdef WM_WOL
7308 /* Power down workaround on D3 */
7309 static void
7310 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
7311 {
7312 uint32_t reg;
7313 int i;
7314
7315 for (i = 0; i < 2; i++) {
7316 /* Disable link */
7317 reg = CSR_READ(sc, WMREG_PHY_CTRL);
7318 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7319 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7320
7321 /*
7322 * Call gig speed drop workaround on Gig disable before
7323 * accessing any PHY registers
7324 */
7325 if (sc->sc_type == WM_T_ICH8)
7326 wm_gig_downshift_workaround_ich8lan(sc);
7327
7328 /* Write VR power-down enable */
7329 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7330 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7331 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
7332 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
7333
7334 /* Read it back and test */
7335 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7336 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7337 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
7338 break;
7339
7340 /* Issue PHY reset and repeat at most one more time */
7341 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7342 }
7343 }
7344 #endif /* WM_WOL */
7345
7346 /*
7347 * Workaround for pch's PHYs
7348 * XXX should be moved to new PHY driver?
7349 */
7350 static void
7351 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
7352 {
7353 if (sc->sc_phytype == WMPHY_82577)
7354 wm_set_mdio_slow_mode_hv(sc);
7355
7356 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
7357
7358 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
7359
7360 /* 82578 */
7361 if (sc->sc_phytype == WMPHY_82578) {
7362 /* PCH rev. < 3 */
7363 if (sc->sc_rev < 3) {
7364 /* XXX 6 bit shift? Why? Is it page2? */
7365 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
7366 0x66c0);
7367 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
7368 0xffff);
7369 }
7370
7371 /* XXX phy rev. < 2 */
7372 }
7373
7374 /* Select page 0 */
7375
7376 /* XXX acquire semaphore */
7377 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
7378 /* XXX release semaphore */
7379
7380 /*
7381 * Configure the K1 Si workaround during phy reset assuming there is
7382 * link so that it disables K1 if link is in 1Gbps.
7383 */
7384 wm_k1_gig_workaround_hv(sc, 1);
7385 }
7386
7387 static void
7388 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
7389 {
7390
7391 wm_set_mdio_slow_mode_hv(sc);
7392 }
7393
7394 static void
7395 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
7396 {
7397 int k1_enable = sc->sc_nvm_k1_enabled;
7398
7399 /* XXX acquire semaphore */
7400
7401 if (link) {
7402 k1_enable = 0;
7403
7404 /* Link stall fix for link up */
7405 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
7406 } else {
7407 /* Link stall fix for link down */
7408 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
7409 }
7410
7411 wm_configure_k1_ich8lan(sc, k1_enable);
7412
7413 /* XXX release semaphore */
7414 }
7415
7416 static void
7417 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
7418 {
7419 uint32_t reg;
7420
7421 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
7422 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
7423 reg | HV_KMRN_MDIO_SLOW);
7424 }
7425
7426 static void
7427 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
7428 {
7429 uint32_t ctrl, ctrl_ext, tmp;
7430 uint16_t kmrn_reg;
7431
7432 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
7433
7434 if (k1_enable)
7435 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
7436 else
7437 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
7438
7439 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
7440
7441 delay(20);
7442
7443 ctrl = CSR_READ(sc, WMREG_CTRL);
7444 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7445
7446 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
7447 tmp |= CTRL_FRCSPD;
7448
7449 CSR_WRITE(sc, WMREG_CTRL, tmp);
7450 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
7451 delay(20);
7452
7453 CSR_WRITE(sc, WMREG_CTRL, ctrl);
7454 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7455 delay(20);
7456 }
7457
7458 static void
7459 wm_smbustopci(struct wm_softc *sc)
7460 {
7461 uint32_t fwsm;
7462
7463 fwsm = CSR_READ(sc, WMREG_FWSM);
7464 if (((fwsm & FWSM_FW_VALID) == 0)
7465 && ((wm_check_reset_block(sc) == 0))) {
7466 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
7467 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
7468 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7469 delay(10);
7470 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
7471 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7472 delay(50*1000);
7473
7474 /*
7475 * Gate automatic PHY configuration by hardware on non-managed
7476 * 82579
7477 */
7478 if (sc->sc_type == WM_T_PCH2)
7479 wm_gate_hw_phy_config_ich8lan(sc, 1);
7480 }
7481 }
7482
7483 static void
7484 wm_set_pcie_completion_timeout(struct wm_softc *sc)
7485 {
7486 uint32_t gcr;
7487 pcireg_t ctrl2;
7488
7489 gcr = CSR_READ(sc, WMREG_GCR);
7490
7491 /* Only take action if timeout value is defaulted to 0 */
7492 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
7493 goto out;
7494
7495 if ((gcr & GCR_CAP_VER2) == 0) {
7496 gcr |= GCR_CMPL_TMOUT_10MS;
7497 goto out;
7498 }
7499
7500 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
7501 sc->sc_pcixe_capoff + PCI_PCIE_DCSR2);
7502 ctrl2 |= WM_PCI_PCIE_DCSR2_16MS;
7503 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
7504 sc->sc_pcixe_capoff + PCI_PCIE_DCSR2, ctrl2);
7505
7506 out:
7507 /* Disable completion timeout resend */
7508 gcr &= ~GCR_CMPL_TMOUT_RESEND;
7509
7510 CSR_WRITE(sc, WMREG_GCR, gcr);
7511 }
7512
7513 /* special case - for 82575 - need to do manual init ... */
7514 static void
7515 wm_reset_init_script_82575(struct wm_softc *sc)
7516 {
7517 /*
7518 * remark: this is untested code - we have no board without EEPROM
7519 * same setup as mentioned int the freeBSD driver for the i82575
7520 */
7521
7522 /* SerDes configuration via SERDESCTRL */
7523 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
7524 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
7525 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
7526 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
7527
7528 /* CCM configuration via CCMCTL register */
7529 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
7530 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
7531
7532 /* PCIe lanes configuration */
7533 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
7534 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
7535 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
7536 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
7537
7538 /* PCIe PLL Configuration */
7539 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
7540 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
7541 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
7542 }
7543
7544 static void
7545 wm_init_manageability(struct wm_softc *sc)
7546 {
7547
7548 if (sc->sc_flags & WM_F_HAS_MANAGE) {
7549 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
7550 uint32_t manc = CSR_READ(sc, WMREG_MANC);
7551
7552 /* disabl hardware interception of ARP */
7553 manc &= ~MANC_ARP_EN;
7554
7555 /* enable receiving management packets to the host */
7556 if (sc->sc_type >= WM_T_82571) {
7557 manc |= MANC_EN_MNG2HOST;
7558 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
7559 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
7560
7561 }
7562
7563 CSR_WRITE(sc, WMREG_MANC, manc);
7564 }
7565 }
7566
7567 static void
7568 wm_release_manageability(struct wm_softc *sc)
7569 {
7570
7571 if (sc->sc_flags & WM_F_HAS_MANAGE) {
7572 uint32_t manc = CSR_READ(sc, WMREG_MANC);
7573
7574 if (sc->sc_type >= WM_T_82571)
7575 manc &= ~MANC_EN_MNG2HOST;
7576
7577 CSR_WRITE(sc, WMREG_MANC, manc);
7578 }
7579 }
7580
7581 static void
7582 wm_get_wakeup(struct wm_softc *sc)
7583 {
7584
7585 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
7586 switch (sc->sc_type) {
7587 case WM_T_82573:
7588 case WM_T_82583:
7589 sc->sc_flags |= WM_F_HAS_AMT;
7590 /* FALLTHROUGH */
7591 case WM_T_80003:
7592 case WM_T_82541:
7593 case WM_T_82547:
7594 case WM_T_82571:
7595 case WM_T_82572:
7596 case WM_T_82574:
7597 case WM_T_82575:
7598 case WM_T_82576:
7599 #if 0 /* XXX */
7600 case WM_T_82580:
7601 case WM_T_82580ER:
7602 #endif
7603 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
7604 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
7605 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
7606 break;
7607 case WM_T_ICH8:
7608 case WM_T_ICH9:
7609 case WM_T_ICH10:
7610 case WM_T_PCH:
7611 case WM_T_PCH2:
7612 sc->sc_flags |= WM_F_HAS_AMT;
7613 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
7614 break;
7615 default:
7616 break;
7617 }
7618
7619 /* 1: HAS_MANAGE */
7620 if (wm_enable_mng_pass_thru(sc) != 0)
7621 sc->sc_flags |= WM_F_HAS_MANAGE;
7622
7623 #ifdef WM_DEBUG
7624 printf("\n");
7625 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
7626 printf("HAS_AMT,");
7627 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
7628 printf("ARC_SUBSYS_VALID,");
7629 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
7630 printf("ASF_FIRMWARE_PRES,");
7631 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
7632 printf("HAS_MANAGE,");
7633 printf("\n");
7634 #endif
7635 /*
7636 * Note that the WOL flags is set after the resetting of the eeprom
7637 * stuff
7638 */
7639 }
7640
7641 #ifdef WM_WOL
7642 /* WOL in the newer chipset interfaces (pchlan) */
7643 static void
7644 wm_enable_phy_wakeup(struct wm_softc *sc)
7645 {
7646 #if 0
7647 uint16_t preg;
7648
7649 /* Copy MAC RARs to PHY RARs */
7650
7651 /* Copy MAC MTA to PHY MTA */
7652
7653 /* Configure PHY Rx Control register */
7654
7655 /* Enable PHY wakeup in MAC register */
7656
7657 /* Configure and enable PHY wakeup in PHY registers */
7658
7659 /* Activate PHY wakeup */
7660
7661 /* XXX */
7662 #endif
7663 }
7664
7665 static void
7666 wm_enable_wakeup(struct wm_softc *sc)
7667 {
7668 uint32_t reg, pmreg;
7669 pcireg_t pmode;
7670
7671 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
7672 &pmreg, NULL) == 0)
7673 return;
7674
7675 /* Advertise the wakeup capability */
7676 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
7677 | CTRL_SWDPIN(3));
7678 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
7679
7680 /* ICH workaround */
7681 switch (sc->sc_type) {
7682 case WM_T_ICH8:
7683 case WM_T_ICH9:
7684 case WM_T_ICH10:
7685 case WM_T_PCH:
7686 case WM_T_PCH2:
7687 /* Disable gig during WOL */
7688 reg = CSR_READ(sc, WMREG_PHY_CTRL);
7689 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
7690 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7691 if (sc->sc_type == WM_T_PCH)
7692 wm_gmii_reset(sc);
7693
7694 /* Power down workaround */
7695 if (sc->sc_phytype == WMPHY_82577) {
7696 struct mii_softc *child;
7697
7698 /* Assume that the PHY is copper */
7699 child = LIST_FIRST(&sc->sc_mii.mii_phys);
7700 if (child->mii_mpd_rev <= 2)
7701 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
7702 (768 << 5) | 25, 0x0444); /* magic num */
7703 }
7704 break;
7705 default:
7706 break;
7707 }
7708
7709 /* Keep the laser running on fiber adapters */
7710 if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
7711 || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
7712 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7713 reg |= CTRL_EXT_SWDPIN(3);
7714 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7715 }
7716
7717 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
7718 #if 0 /* for the multicast packet */
7719 reg |= WUFC_MC;
7720 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
7721 #endif
7722
7723 if (sc->sc_type == WM_T_PCH) {
7724 wm_enable_phy_wakeup(sc);
7725 } else {
7726 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
7727 CSR_WRITE(sc, WMREG_WUFC, reg);
7728 }
7729
7730 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
7731 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
7732 || (sc->sc_type == WM_T_PCH2))
7733 && (sc->sc_phytype == WMPHY_IGP_3))
7734 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
7735
7736 /* Request PME */
7737 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
7738 #if 0
7739 /* Disable WOL */
7740 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
7741 #else
7742 /* For WOL */
7743 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
7744 #endif
7745 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
7746 }
7747 #endif /* WM_WOL */
7748
7749 static bool
7750 wm_suspend(device_t self, const pmf_qual_t *qual)
7751 {
7752 struct wm_softc *sc = device_private(self);
7753
7754 wm_release_manageability(sc);
7755 wm_release_hw_control(sc);
7756 #ifdef WM_WOL
7757 wm_enable_wakeup(sc);
7758 #endif
7759
7760 return true;
7761 }
7762
7763 static bool
7764 wm_resume(device_t self, const pmf_qual_t *qual)
7765 {
7766 struct wm_softc *sc = device_private(self);
7767
7768 wm_init_manageability(sc);
7769
7770 return true;
7771 }
7772