if_wm.c revision 1.215 1 /* $NetBSD: if_wm.c,v 1.215 2010/10/16 06:31:49 taca Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.215 2010/10/16 06:31:49 taca Exp $");
80
81 #include "rnd.h"
82
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/callout.h>
86 #include <sys/mbuf.h>
87 #include <sys/malloc.h>
88 #include <sys/kernel.h>
89 #include <sys/socket.h>
90 #include <sys/ioctl.h>
91 #include <sys/errno.h>
92 #include <sys/device.h>
93 #include <sys/queue.h>
94 #include <sys/syslog.h>
95
96 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
97
98 #if NRND > 0
99 #include <sys/rnd.h>
100 #endif
101
102 #include <net/if.h>
103 #include <net/if_dl.h>
104 #include <net/if_media.h>
105 #include <net/if_ether.h>
106
107 #include <net/bpf.h>
108
109 #include <netinet/in.h> /* XXX for struct ip */
110 #include <netinet/in_systm.h> /* XXX for struct ip */
111 #include <netinet/ip.h> /* XXX for struct ip */
112 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
113 #include <netinet/tcp.h> /* XXX for struct tcphdr */
114
115 #include <sys/bus.h>
116 #include <sys/intr.h>
117 #include <machine/endian.h>
118
119 #include <dev/mii/mii.h>
120 #include <dev/mii/miivar.h>
121 #include <dev/mii/miidevs.h>
122 #include <dev/mii/mii_bitbang.h>
123 #include <dev/mii/ikphyreg.h>
124 #include <dev/mii/igphyreg.h>
125 #include <dev/mii/igphyvar.h>
126 #include <dev/mii/inbmphyreg.h>
127
128 #include <dev/pci/pcireg.h>
129 #include <dev/pci/pcivar.h>
130 #include <dev/pci/pcidevs.h>
131
132 #include <dev/pci/if_wmreg.h>
133 #include <dev/pci/if_wmvar.h>
134
135 #ifdef WM_DEBUG
136 #define WM_DEBUG_LINK 0x01
137 #define WM_DEBUG_TX 0x02
138 #define WM_DEBUG_RX 0x04
139 #define WM_DEBUG_GMII 0x08
140 #define WM_DEBUG_MANAGE 0x10
141 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
142 | WM_DEBUG_MANAGE;
143
144 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
145 #else
146 #define DPRINTF(x, y) /* nothing */
147 #endif /* WM_DEBUG */
148
149 /*
150 * Transmit descriptor list size. Due to errata, we can only have
151 * 256 hardware descriptors in the ring on < 82544, but we use 4096
152 * on >= 82544. We tell the upper layers that they can queue a lot
153 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
154 * of them at a time.
155 *
156 * We allow up to 256 (!) DMA segments per packet. Pathological packet
157 * chains containing many small mbufs have been observed in zero-copy
158 * situations with jumbo frames.
159 */
160 #define WM_NTXSEGS 256
161 #define WM_IFQUEUELEN 256
162 #define WM_TXQUEUELEN_MAX 64
163 #define WM_TXQUEUELEN_MAX_82547 16
164 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
165 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
166 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
167 #define WM_NTXDESC_82542 256
168 #define WM_NTXDESC_82544 4096
169 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
170 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
171 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
172 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
173 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
174
175 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
176
177 /*
178 * Receive descriptor list size. We have one Rx buffer for normal
179 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
180 * packet. We allocate 256 receive descriptors, each with a 2k
181 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
182 */
183 #define WM_NRXDESC 256
184 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
185 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
186 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
187
188 /*
189 * Control structures are DMA'd to the i82542 chip. We allocate them in
190 * a single clump that maps to a single DMA segment to make several things
191 * easier.
192 */
193 struct wm_control_data_82544 {
194 /*
195 * The receive descriptors.
196 */
197 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
198
199 /*
200 * The transmit descriptors. Put these at the end, because
201 * we might use a smaller number of them.
202 */
203 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
204 };
205
206 struct wm_control_data_82542 {
207 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
208 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
209 };
210
211 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
212 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
213 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
214
215 /*
216 * Software state for transmit jobs.
217 */
218 struct wm_txsoft {
219 struct mbuf *txs_mbuf; /* head of our mbuf chain */
220 bus_dmamap_t txs_dmamap; /* our DMA map */
221 int txs_firstdesc; /* first descriptor in packet */
222 int txs_lastdesc; /* last descriptor in packet */
223 int txs_ndesc; /* # of descriptors used */
224 };
225
226 /*
227 * Software state for receive buffers. Each descriptor gets a
228 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
229 * more than one buffer, we chain them together.
230 */
231 struct wm_rxsoft {
232 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
233 bus_dmamap_t rxs_dmamap; /* our DMA map */
234 };
235
236 #define WM_LINKUP_TIMEOUT 50
237
238 static uint16_t swfwphysem[] = {
239 SWFW_PHY0_SM,
240 SWFW_PHY1_SM,
241 SWFW_PHY2_SM,
242 SWFW_PHY3_SM
243 };
244
245 /*
246 * Software state per device.
247 */
248 struct wm_softc {
249 device_t sc_dev; /* generic device information */
250 bus_space_tag_t sc_st; /* bus space tag */
251 bus_space_handle_t sc_sh; /* bus space handle */
252 bus_size_t sc_ss; /* bus space size */
253 bus_space_tag_t sc_iot; /* I/O space tag */
254 bus_space_handle_t sc_ioh; /* I/O space handle */
255 bus_size_t sc_ios; /* I/O space size */
256 bus_space_tag_t sc_flasht; /* flash registers space tag */
257 bus_space_handle_t sc_flashh; /* flash registers space handle */
258 bus_dma_tag_t sc_dmat; /* bus DMA tag */
259
260 struct ethercom sc_ethercom; /* ethernet common data */
261 struct mii_data sc_mii; /* MII/media information */
262
263 pci_chipset_tag_t sc_pc;
264 pcitag_t sc_pcitag;
265 int sc_bus_speed; /* PCI/PCIX bus speed */
266 int sc_pcixe_capoff; /* PCI[Xe] capability register offset */
267
268 const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
269 wm_chip_type sc_type; /* MAC type */
270 int sc_rev; /* MAC revision */
271 wm_phy_type sc_phytype; /* PHY type */
272 int sc_funcid; /* unit number of the chip (0 to 3) */
273 int sc_flags; /* flags; see below */
274 int sc_if_flags; /* last if_flags */
275 int sc_flowflags; /* 802.3x flow control flags */
276 int sc_align_tweak;
277
278 void *sc_ih; /* interrupt cookie */
279 callout_t sc_tick_ch; /* tick callout */
280
281 int sc_ee_addrbits; /* EEPROM address bits */
282 int sc_ich8_flash_base;
283 int sc_ich8_flash_bank_size;
284 int sc_nvm_k1_enabled;
285
286 /*
287 * Software state for the transmit and receive descriptors.
288 */
289 int sc_txnum; /* must be a power of two */
290 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
291 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
292
293 /*
294 * Control data structures.
295 */
296 int sc_ntxdesc; /* must be a power of two */
297 struct wm_control_data_82544 *sc_control_data;
298 bus_dmamap_t sc_cddmamap; /* control data DMA map */
299 bus_dma_segment_t sc_cd_seg; /* control data segment */
300 int sc_cd_rseg; /* real number of control segment */
301 size_t sc_cd_size; /* control data size */
302 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
303 #define sc_txdescs sc_control_data->wcd_txdescs
304 #define sc_rxdescs sc_control_data->wcd_rxdescs
305
306 #ifdef WM_EVENT_COUNTERS
307 /* Event counters. */
308 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
309 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
310 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
311 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
312 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
313 struct evcnt sc_ev_rxintr; /* Rx interrupts */
314 struct evcnt sc_ev_linkintr; /* Link interrupts */
315
316 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
317 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
318 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
319 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
320 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
321 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
322 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
323 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
324
325 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
326 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
327
328 struct evcnt sc_ev_tu; /* Tx underrun */
329
330 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
331 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
332 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
333 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
334 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
335 #endif /* WM_EVENT_COUNTERS */
336
337 bus_addr_t sc_tdt_reg; /* offset of TDT register */
338
339 int sc_txfree; /* number of free Tx descriptors */
340 int sc_txnext; /* next ready Tx descriptor */
341
342 int sc_txsfree; /* number of free Tx jobs */
343 int sc_txsnext; /* next free Tx job */
344 int sc_txsdirty; /* dirty Tx jobs */
345
346 /* These 5 variables are used only on the 82547. */
347 int sc_txfifo_size; /* Tx FIFO size */
348 int sc_txfifo_head; /* current head of FIFO */
349 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
350 int sc_txfifo_stall; /* Tx FIFO is stalled */
351 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
352
353 bus_addr_t sc_rdt_reg; /* offset of RDT register */
354
355 int sc_rxptr; /* next ready Rx descriptor/queue ent */
356 int sc_rxdiscard;
357 int sc_rxlen;
358 struct mbuf *sc_rxhead;
359 struct mbuf *sc_rxtail;
360 struct mbuf **sc_rxtailp;
361
362 uint32_t sc_ctrl; /* prototype CTRL register */
363 #if 0
364 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
365 #endif
366 uint32_t sc_icr; /* prototype interrupt bits */
367 uint32_t sc_itr; /* prototype intr throttling reg */
368 uint32_t sc_tctl; /* prototype TCTL register */
369 uint32_t sc_rctl; /* prototype RCTL register */
370 uint32_t sc_txcw; /* prototype TXCW register */
371 uint32_t sc_tipg; /* prototype TIPG register */
372 uint32_t sc_fcrtl; /* prototype FCRTL register */
373 uint32_t sc_pba; /* prototype PBA register */
374
375 int sc_tbi_linkup; /* TBI link status */
376 int sc_tbi_anegticks; /* autonegotiation ticks */
377 int sc_tbi_ticks; /* tbi ticks */
378 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
379 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
380
381 int sc_mchash_type; /* multicast filter offset */
382
383 #if NRND > 0
384 rndsource_element_t rnd_source; /* random source */
385 #endif
386 };
387
388 #define WM_RXCHAIN_RESET(sc) \
389 do { \
390 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
391 *(sc)->sc_rxtailp = NULL; \
392 (sc)->sc_rxlen = 0; \
393 } while (/*CONSTCOND*/0)
394
395 #define WM_RXCHAIN_LINK(sc, m) \
396 do { \
397 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
398 (sc)->sc_rxtailp = &(m)->m_next; \
399 } while (/*CONSTCOND*/0)
400
401 #ifdef WM_EVENT_COUNTERS
402 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
403 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
404 #else
405 #define WM_EVCNT_INCR(ev) /* nothing */
406 #define WM_EVCNT_ADD(ev, val) /* nothing */
407 #endif
408
409 #define CSR_READ(sc, reg) \
410 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
411 #define CSR_WRITE(sc, reg, val) \
412 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
413 #define CSR_WRITE_FLUSH(sc) \
414 (void) CSR_READ((sc), WMREG_STATUS)
415
416 #define ICH8_FLASH_READ32(sc, reg) \
417 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
418 #define ICH8_FLASH_WRITE32(sc, reg, data) \
419 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
420
421 #define ICH8_FLASH_READ16(sc, reg) \
422 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
423 #define ICH8_FLASH_WRITE16(sc, reg, data) \
424 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
425
426 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
427 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
428
429 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
430 #define WM_CDTXADDR_HI(sc, x) \
431 (sizeof(bus_addr_t) == 8 ? \
432 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
433
434 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
435 #define WM_CDRXADDR_HI(sc, x) \
436 (sizeof(bus_addr_t) == 8 ? \
437 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
438
439 #define WM_CDTXSYNC(sc, x, n, ops) \
440 do { \
441 int __x, __n; \
442 \
443 __x = (x); \
444 __n = (n); \
445 \
446 /* If it will wrap around, sync to the end of the ring. */ \
447 if ((__x + __n) > WM_NTXDESC(sc)) { \
448 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
449 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
450 (WM_NTXDESC(sc) - __x), (ops)); \
451 __n -= (WM_NTXDESC(sc) - __x); \
452 __x = 0; \
453 } \
454 \
455 /* Now sync whatever is left. */ \
456 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
457 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
458 } while (/*CONSTCOND*/0)
459
460 #define WM_CDRXSYNC(sc, x, ops) \
461 do { \
462 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
463 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
464 } while (/*CONSTCOND*/0)
465
466 #define WM_INIT_RXDESC(sc, x) \
467 do { \
468 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
469 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
470 struct mbuf *__m = __rxs->rxs_mbuf; \
471 \
472 /* \
473 * Note: We scoot the packet forward 2 bytes in the buffer \
474 * so that the payload after the Ethernet header is aligned \
475 * to a 4-byte boundary. \
476 * \
477 * XXX BRAINDAMAGE ALERT! \
478 * The stupid chip uses the same size for every buffer, which \
479 * is set in the Receive Control register. We are using the 2K \
480 * size option, but what we REALLY want is (2K - 2)! For this \
481 * reason, we can't "scoot" packets longer than the standard \
482 * Ethernet MTU. On strict-alignment platforms, if the total \
483 * size exceeds (2K - 2) we set align_tweak to 0 and let \
484 * the upper layer copy the headers. \
485 */ \
486 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
487 \
488 wm_set_dma_addr(&__rxd->wrx_addr, \
489 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
490 __rxd->wrx_len = 0; \
491 __rxd->wrx_cksum = 0; \
492 __rxd->wrx_status = 0; \
493 __rxd->wrx_errors = 0; \
494 __rxd->wrx_special = 0; \
495 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
496 \
497 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
498 } while (/*CONSTCOND*/0)
499
500 static void wm_start(struct ifnet *);
501 static void wm_watchdog(struct ifnet *);
502 static int wm_ifflags_cb(struct ethercom *);
503 static int wm_ioctl(struct ifnet *, u_long, void *);
504 static int wm_init(struct ifnet *);
505 static void wm_stop(struct ifnet *, int);
506 static bool wm_suspend(device_t, const pmf_qual_t *);
507 static bool wm_resume(device_t, const pmf_qual_t *);
508
509 static void wm_reset(struct wm_softc *);
510 static void wm_rxdrain(struct wm_softc *);
511 static int wm_add_rxbuf(struct wm_softc *, int);
512 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
513 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
514 static int wm_validate_eeprom_checksum(struct wm_softc *);
515 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
516 static void wm_tick(void *);
517
518 static void wm_set_filter(struct wm_softc *);
519
520 static int wm_intr(void *);
521 static void wm_txintr(struct wm_softc *);
522 static void wm_rxintr(struct wm_softc *);
523 static void wm_linkintr(struct wm_softc *, uint32_t);
524
525 static void wm_tbi_mediainit(struct wm_softc *);
526 static int wm_tbi_mediachange(struct ifnet *);
527 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
528
529 static void wm_tbi_set_linkled(struct wm_softc *);
530 static void wm_tbi_check_link(struct wm_softc *);
531
532 static void wm_gmii_reset(struct wm_softc *);
533
534 static int wm_gmii_i82543_readreg(device_t, int, int);
535 static void wm_gmii_i82543_writereg(device_t, int, int, int);
536
537 static int wm_gmii_i82544_readreg(device_t, int, int);
538 static void wm_gmii_i82544_writereg(device_t, int, int, int);
539
540 static int wm_gmii_i80003_readreg(device_t, int, int);
541 static void wm_gmii_i80003_writereg(device_t, int, int, int);
542 static int wm_gmii_bm_readreg(device_t, int, int);
543 static void wm_gmii_bm_writereg(device_t, int, int, int);
544 static int wm_gmii_hv_readreg(device_t, int, int);
545 static void wm_gmii_hv_writereg(device_t, int, int, int);
546 static int wm_sgmii_readreg(device_t, int, int);
547 static void wm_sgmii_writereg(device_t, int, int, int);
548
549 static void wm_gmii_statchg(device_t);
550
551 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
552 static int wm_gmii_mediachange(struct ifnet *);
553 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
554
555 static int wm_kmrn_readreg(struct wm_softc *, int);
556 static void wm_kmrn_writereg(struct wm_softc *, int, int);
557
558 static void wm_set_spiaddrbits(struct wm_softc *);
559 static int wm_match(device_t, cfdata_t, void *);
560 static void wm_attach(device_t, device_t, void *);
561 static int wm_detach(device_t, int);
562 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
563 static void wm_get_auto_rd_done(struct wm_softc *);
564 static void wm_lan_init_done(struct wm_softc *);
565 static void wm_get_cfg_done(struct wm_softc *);
566 static int wm_get_swsm_semaphore(struct wm_softc *);
567 static void wm_put_swsm_semaphore(struct wm_softc *);
568 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
569 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
570 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
571 static int wm_get_swfwhw_semaphore(struct wm_softc *);
572 static void wm_put_swfwhw_semaphore(struct wm_softc *);
573
574 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
575 static int32_t wm_ich8_cycle_init(struct wm_softc *);
576 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
577 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
578 uint32_t, uint16_t *);
579 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
580 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
581 static void wm_82547_txfifo_stall(void *);
582 static int wm_check_mng_mode(struct wm_softc *);
583 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
584 static int wm_check_mng_mode_82574(struct wm_softc *);
585 static int wm_check_mng_mode_generic(struct wm_softc *);
586 static int wm_enable_mng_pass_thru(struct wm_softc *);
587 static int wm_check_reset_block(struct wm_softc *);
588 static void wm_get_hw_control(struct wm_softc *);
589 static int wm_check_for_link(struct wm_softc *);
590 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
591 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
592 #ifdef WM_WOL
593 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
594 #endif
595 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
596 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
597 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
598 static void wm_set_pcie_completion_timeout(struct wm_softc *);
599 static void wm_reset_init_script_82575(struct wm_softc *);
600 static void wm_release_manageability(struct wm_softc *);
601 static void wm_release_hw_control(struct wm_softc *);
602 static void wm_get_wakeup(struct wm_softc *);
603 #ifdef WM_WOL
604 static void wm_enable_phy_wakeup(struct wm_softc *);
605 static void wm_enable_wakeup(struct wm_softc *);
606 #endif
607 static void wm_init_manageability(struct wm_softc *);
608
609 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
610 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
611
612 /*
613 * Devices supported by this driver.
614 */
615 static const struct wm_product {
616 pci_vendor_id_t wmp_vendor;
617 pci_product_id_t wmp_product;
618 const char *wmp_name;
619 wm_chip_type wmp_type;
620 int wmp_flags;
621 #define WMP_F_1000X 0x01
622 #define WMP_F_1000T 0x02
623 #define WMP_F_SERDES 0x04
624 } wm_products[] = {
625 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
626 "Intel i82542 1000BASE-X Ethernet",
627 WM_T_82542_2_1, WMP_F_1000X },
628
629 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
630 "Intel i82543GC 1000BASE-X Ethernet",
631 WM_T_82543, WMP_F_1000X },
632
633 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
634 "Intel i82543GC 1000BASE-T Ethernet",
635 WM_T_82543, WMP_F_1000T },
636
637 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
638 "Intel i82544EI 1000BASE-T Ethernet",
639 WM_T_82544, WMP_F_1000T },
640
641 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
642 "Intel i82544EI 1000BASE-X Ethernet",
643 WM_T_82544, WMP_F_1000X },
644
645 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
646 "Intel i82544GC 1000BASE-T Ethernet",
647 WM_T_82544, WMP_F_1000T },
648
649 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
650 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
651 WM_T_82544, WMP_F_1000T },
652
653 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
654 "Intel i82540EM 1000BASE-T Ethernet",
655 WM_T_82540, WMP_F_1000T },
656
657 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
658 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
659 WM_T_82540, WMP_F_1000T },
660
661 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
662 "Intel i82540EP 1000BASE-T Ethernet",
663 WM_T_82540, WMP_F_1000T },
664
665 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
666 "Intel i82540EP 1000BASE-T Ethernet",
667 WM_T_82540, WMP_F_1000T },
668
669 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
670 "Intel i82540EP 1000BASE-T Ethernet",
671 WM_T_82540, WMP_F_1000T },
672
673 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
674 "Intel i82545EM 1000BASE-T Ethernet",
675 WM_T_82545, WMP_F_1000T },
676
677 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
678 "Intel i82545GM 1000BASE-T Ethernet",
679 WM_T_82545_3, WMP_F_1000T },
680
681 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
682 "Intel i82545GM 1000BASE-X Ethernet",
683 WM_T_82545_3, WMP_F_1000X },
684 #if 0
685 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
686 "Intel i82545GM Gigabit Ethernet (SERDES)",
687 WM_T_82545_3, WMP_F_SERDES },
688 #endif
689 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
690 "Intel i82546EB 1000BASE-T Ethernet",
691 WM_T_82546, WMP_F_1000T },
692
693 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
694 "Intel i82546EB 1000BASE-T Ethernet",
695 WM_T_82546, WMP_F_1000T },
696
697 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
698 "Intel i82545EM 1000BASE-X Ethernet",
699 WM_T_82545, WMP_F_1000X },
700
701 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
702 "Intel i82546EB 1000BASE-X Ethernet",
703 WM_T_82546, WMP_F_1000X },
704
705 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
706 "Intel i82546GB 1000BASE-T Ethernet",
707 WM_T_82546_3, WMP_F_1000T },
708
709 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
710 "Intel i82546GB 1000BASE-X Ethernet",
711 WM_T_82546_3, WMP_F_1000X },
712 #if 0
713 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
714 "Intel i82546GB Gigabit Ethernet (SERDES)",
715 WM_T_82546_3, WMP_F_SERDES },
716 #endif
717 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
718 "i82546GB quad-port Gigabit Ethernet",
719 WM_T_82546_3, WMP_F_1000T },
720
721 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
722 "i82546GB quad-port Gigabit Ethernet (KSP3)",
723 WM_T_82546_3, WMP_F_1000T },
724
725 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
726 "Intel PRO/1000MT (82546GB)",
727 WM_T_82546_3, WMP_F_1000T },
728
729 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
730 "Intel i82541EI 1000BASE-T Ethernet",
731 WM_T_82541, WMP_F_1000T },
732
733 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
734 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
735 WM_T_82541, WMP_F_1000T },
736
737 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
738 "Intel i82541EI Mobile 1000BASE-T Ethernet",
739 WM_T_82541, WMP_F_1000T },
740
741 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
742 "Intel i82541ER 1000BASE-T Ethernet",
743 WM_T_82541_2, WMP_F_1000T },
744
745 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
746 "Intel i82541GI 1000BASE-T Ethernet",
747 WM_T_82541_2, WMP_F_1000T },
748
749 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
750 "Intel i82541GI Mobile 1000BASE-T Ethernet",
751 WM_T_82541_2, WMP_F_1000T },
752
753 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
754 "Intel i82541PI 1000BASE-T Ethernet",
755 WM_T_82541_2, WMP_F_1000T },
756
757 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
758 "Intel i82547EI 1000BASE-T Ethernet",
759 WM_T_82547, WMP_F_1000T },
760
761 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
762 "Intel i82547EI Mobile 1000BASE-T Ethernet",
763 WM_T_82547, WMP_F_1000T },
764
765 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
766 "Intel i82547GI 1000BASE-T Ethernet",
767 WM_T_82547_2, WMP_F_1000T },
768
769 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
770 "Intel PRO/1000 PT (82571EB)",
771 WM_T_82571, WMP_F_1000T },
772
773 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
774 "Intel PRO/1000 PF (82571EB)",
775 WM_T_82571, WMP_F_1000X },
776 #if 0
777 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
778 "Intel PRO/1000 PB (82571EB)",
779 WM_T_82571, WMP_F_SERDES },
780 #endif
781 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
782 "Intel PRO/1000 QT (82571EB)",
783 WM_T_82571, WMP_F_1000T },
784
785 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
786 "Intel i82572EI 1000baseT Ethernet",
787 WM_T_82572, WMP_F_1000T },
788
789 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
790 "Intel PRO/1000 PT Quad Port Server Adapter",
791 WM_T_82571, WMP_F_1000T, },
792
793 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
794 "Intel i82572EI 1000baseX Ethernet",
795 WM_T_82572, WMP_F_1000X },
796 #if 0
797 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
798 "Intel i82572EI Gigabit Ethernet (SERDES)",
799 WM_T_82572, WMP_F_SERDES },
800 #endif
801
802 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
803 "Intel i82572EI 1000baseT Ethernet",
804 WM_T_82572, WMP_F_1000T },
805
806 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
807 "Intel i82573E",
808 WM_T_82573, WMP_F_1000T },
809
810 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
811 "Intel i82573E IAMT",
812 WM_T_82573, WMP_F_1000T },
813
814 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
815 "Intel i82573L Gigabit Ethernet",
816 WM_T_82573, WMP_F_1000T },
817
818 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
819 "Intel i82574L",
820 WM_T_82574, WMP_F_1000T },
821
822 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
823 "Intel i82583V",
824 WM_T_82583, WMP_F_1000T },
825
826 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
827 "i80003 dual 1000baseT Ethernet",
828 WM_T_80003, WMP_F_1000T },
829
830 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
831 "i80003 dual 1000baseX Ethernet",
832 WM_T_80003, WMP_F_1000T },
833 #if 0
834 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
835 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
836 WM_T_80003, WMP_F_SERDES },
837 #endif
838
839 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
840 "Intel i80003 1000baseT Ethernet",
841 WM_T_80003, WMP_F_1000T },
842 #if 0
843 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
844 "Intel i80003 Gigabit Ethernet (SERDES)",
845 WM_T_80003, WMP_F_SERDES },
846 #endif
847 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
848 "Intel i82801H (M_AMT) LAN Controller",
849 WM_T_ICH8, WMP_F_1000T },
850 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
851 "Intel i82801H (AMT) LAN Controller",
852 WM_T_ICH8, WMP_F_1000T },
853 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
854 "Intel i82801H LAN Controller",
855 WM_T_ICH8, WMP_F_1000T },
856 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
857 "Intel i82801H (IFE) LAN Controller",
858 WM_T_ICH8, WMP_F_1000T },
859 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
860 "Intel i82801H (M) LAN Controller",
861 WM_T_ICH8, WMP_F_1000T },
862 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
863 "Intel i82801H IFE (GT) LAN Controller",
864 WM_T_ICH8, WMP_F_1000T },
865 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
866 "Intel i82801H IFE (G) LAN Controller",
867 WM_T_ICH8, WMP_F_1000T },
868 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
869 "82801I (AMT) LAN Controller",
870 WM_T_ICH9, WMP_F_1000T },
871 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
872 "82801I LAN Controller",
873 WM_T_ICH9, WMP_F_1000T },
874 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
875 "82801I (G) LAN Controller",
876 WM_T_ICH9, WMP_F_1000T },
877 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
878 "82801I (GT) LAN Controller",
879 WM_T_ICH9, WMP_F_1000T },
880 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
881 "82801I (C) LAN Controller",
882 WM_T_ICH9, WMP_F_1000T },
883 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
884 "82801I mobile LAN Controller",
885 WM_T_ICH9, WMP_F_1000T },
886 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
887 "82801I mobile (V) LAN Controller",
888 WM_T_ICH9, WMP_F_1000T },
889 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
890 "82801I mobile (AMT) LAN Controller",
891 WM_T_ICH9, WMP_F_1000T },
892 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
893 "82567LM-4 LAN Controller",
894 WM_T_ICH9, WMP_F_1000T },
895 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
896 "82567V-3 LAN Controller",
897 WM_T_ICH9, WMP_F_1000T },
898 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
899 "82567LM-2 LAN Controller",
900 WM_T_ICH10, WMP_F_1000T },
901 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
902 "82567LF-2 LAN Controller",
903 WM_T_ICH10, WMP_F_1000T },
904 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
905 "82567LM-3 LAN Controller",
906 WM_T_ICH10, WMP_F_1000T },
907 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
908 "82567LF-3 LAN Controller",
909 WM_T_ICH10, WMP_F_1000T },
910 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
911 "82567V-2 LAN Controller",
912 WM_T_ICH10, WMP_F_1000T },
913 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
914 "PCH LAN (82577LM) Controller",
915 WM_T_PCH, WMP_F_1000T },
916 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
917 "PCH LAN (82577LC) Controller",
918 WM_T_PCH, WMP_F_1000T },
919 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
920 "PCH LAN (82578DM) Controller",
921 WM_T_PCH, WMP_F_1000T },
922 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
923 "PCH LAN (82578DC) Controller",
924 WM_T_PCH, WMP_F_1000T },
925 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
926 "82575EB dual-1000baseT Ethernet",
927 WM_T_82575, WMP_F_1000T },
928 #if 0
929 /*
930 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
931 * disabled for now ...
932 */
933 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
934 "82575EB dual-1000baseX Ethernet (SERDES)",
935 WM_T_82575, WMP_F_SERDES },
936 #endif
937 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
938 "82575GB quad-1000baseT Ethernet",
939 WM_T_82575, WMP_F_1000T },
940 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
941 "82575GB quad-1000baseT Ethernet (PM)",
942 WM_T_82575, WMP_F_1000T },
943 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
944 "82576 1000BaseT Ethernet",
945 WM_T_82576, WMP_F_1000T },
946 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
947 "82576 1000BaseX Ethernet",
948 WM_T_82576, WMP_F_1000X },
949 #if 0
950 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
951 "82576 gigabit Ethernet (SERDES)",
952 WM_T_82576, WMP_F_SERDES },
953 #endif
954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
955 "82576 quad-1000BaseT Ethernet",
956 WM_T_82576, WMP_F_1000T },
957 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
958 "82576 gigabit Ethernet",
959 WM_T_82576, WMP_F_1000T },
960 #if 0
961 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
962 "82576 gigabit Ethernet (SERDES)",
963 WM_T_82576, WMP_F_SERDES },
964 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
965 "82576 quad-gigabit Ethernet (SERDES)",
966 WM_T_82576, WMP_F_SERDES },
967 #endif
968 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
969 "82580 1000BaseT Ethernet",
970 WM_T_82580, WMP_F_1000T },
971 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
972 "82580 1000BaseX Ethernet",
973 WM_T_82580, WMP_F_1000X },
974 #if 0
975 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
976 "82580 1000BaseT Ethernet (SERDES)",
977 WM_T_82580, WMP_F_SERDES },
978 #endif
979 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
980 "82580 gigabit Ethernet (SGMII)",
981 WM_T_82580, WMP_F_1000T },
982 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
983 "82580 dual-1000BaseT Ethernet",
984 WM_T_82580, WMP_F_1000T },
985 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER,
986 "82580 1000BaseT Ethernet",
987 WM_T_82580ER, WMP_F_1000T },
988 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER_DUAL,
989 "82580 dual-1000BaseT Ethernet",
990 WM_T_82580ER, WMP_F_1000T },
991 { 0, 0,
992 NULL,
993 0, 0 },
994 };
995
996 #ifdef WM_EVENT_COUNTERS
997 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
998 #endif /* WM_EVENT_COUNTERS */
999
1000 #if 0 /* Not currently used */
1001 static inline uint32_t
1002 wm_io_read(struct wm_softc *sc, int reg)
1003 {
1004
1005 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1006 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1007 }
1008 #endif
1009
1010 static inline void
1011 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1012 {
1013
1014 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1015 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1016 }
1017
1018 static inline void
1019 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1020 uint32_t data)
1021 {
1022 uint32_t regval;
1023 int i;
1024
1025 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1026
1027 CSR_WRITE(sc, reg, regval);
1028
1029 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1030 delay(5);
1031 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1032 break;
1033 }
1034 if (i == SCTL_CTL_POLL_TIMEOUT) {
1035 aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
1036 device_xname(sc->sc_dev), reg);
1037 }
1038 }
1039
1040 static inline void
1041 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1042 {
1043 wa->wa_low = htole32(v & 0xffffffffU);
1044 if (sizeof(bus_addr_t) == 8)
1045 wa->wa_high = htole32((uint64_t) v >> 32);
1046 else
1047 wa->wa_high = 0;
1048 }
1049
1050 static void
1051 wm_set_spiaddrbits(struct wm_softc *sc)
1052 {
1053 uint32_t reg;
1054
1055 sc->sc_flags |= WM_F_EEPROM_SPI;
1056 reg = CSR_READ(sc, WMREG_EECD);
1057 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1058 }
1059
1060 static const struct wm_product *
1061 wm_lookup(const struct pci_attach_args *pa)
1062 {
1063 const struct wm_product *wmp;
1064
1065 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1066 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1067 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1068 return wmp;
1069 }
1070 return NULL;
1071 }
1072
1073 static int
1074 wm_match(device_t parent, cfdata_t cf, void *aux)
1075 {
1076 struct pci_attach_args *pa = aux;
1077
1078 if (wm_lookup(pa) != NULL)
1079 return 1;
1080
1081 return 0;
1082 }
1083
1084 static void
1085 wm_attach(device_t parent, device_t self, void *aux)
1086 {
1087 struct wm_softc *sc = device_private(self);
1088 struct pci_attach_args *pa = aux;
1089 prop_dictionary_t dict;
1090 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1091 pci_chipset_tag_t pc = pa->pa_pc;
1092 pci_intr_handle_t ih;
1093 const char *intrstr = NULL;
1094 const char *eetype, *xname;
1095 bus_space_tag_t memt;
1096 bus_space_handle_t memh;
1097 bus_size_t memsize;
1098 int memh_valid;
1099 int i, error;
1100 const struct wm_product *wmp;
1101 prop_data_t ea;
1102 prop_number_t pn;
1103 uint8_t enaddr[ETHER_ADDR_LEN];
1104 uint16_t cfg1, cfg2, swdpin, io3;
1105 pcireg_t preg, memtype;
1106 uint16_t eeprom_data, apme_mask;
1107 uint32_t reg;
1108
1109 sc->sc_dev = self;
1110 callout_init(&sc->sc_tick_ch, 0);
1111
1112 sc->sc_wmp = wmp = wm_lookup(pa);
1113 if (wmp == NULL) {
1114 printf("\n");
1115 panic("wm_attach: impossible");
1116 }
1117
1118 sc->sc_pc = pa->pa_pc;
1119 sc->sc_pcitag = pa->pa_tag;
1120
1121 if (pci_dma64_available(pa))
1122 sc->sc_dmat = pa->pa_dmat64;
1123 else
1124 sc->sc_dmat = pa->pa_dmat;
1125
1126 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1127 aprint_naive(": Ethernet controller\n");
1128 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, sc->sc_rev);
1129
1130 sc->sc_type = wmp->wmp_type;
1131 if (sc->sc_type < WM_T_82543) {
1132 if (sc->sc_rev < 2) {
1133 aprint_error_dev(sc->sc_dev,
1134 "i82542 must be at least rev. 2\n");
1135 return;
1136 }
1137 if (sc->sc_rev < 3)
1138 sc->sc_type = WM_T_82542_2_0;
1139 }
1140
1141 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1142 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER))
1143 sc->sc_flags |= WM_F_NEWQUEUE;
1144
1145 /* Set device properties (mactype) */
1146 dict = device_properties(sc->sc_dev);
1147 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1148
1149 /*
1150 * Map the device. All devices support memory-mapped acccess,
1151 * and it is really required for normal operation.
1152 */
1153 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1154 switch (memtype) {
1155 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1156 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1157 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1158 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1159 break;
1160 default:
1161 memh_valid = 0;
1162 break;
1163 }
1164
1165 if (memh_valid) {
1166 sc->sc_st = memt;
1167 sc->sc_sh = memh;
1168 sc->sc_ss = memsize;
1169 } else {
1170 aprint_error_dev(sc->sc_dev,
1171 "unable to map device registers\n");
1172 return;
1173 }
1174
1175 wm_get_wakeup(sc);
1176
1177 /*
1178 * In addition, i82544 and later support I/O mapped indirect
1179 * register access. It is not desirable (nor supported in
1180 * this driver) to use it for normal operation, though it is
1181 * required to work around bugs in some chip versions.
1182 */
1183 if (sc->sc_type >= WM_T_82544) {
1184 /* First we have to find the I/O BAR. */
1185 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1186 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1187 PCI_MAPREG_TYPE_IO)
1188 break;
1189 }
1190 if (i == PCI_MAPREG_END)
1191 aprint_error_dev(sc->sc_dev,
1192 "WARNING: unable to find I/O BAR\n");
1193 else {
1194 /*
1195 * The i8254x doesn't apparently respond when the
1196 * I/O BAR is 0, which looks somewhat like it's not
1197 * been configured.
1198 */
1199 preg = pci_conf_read(pc, pa->pa_tag, i);
1200 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1201 aprint_error_dev(sc->sc_dev,
1202 "WARNING: I/O BAR at zero.\n");
1203 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1204 0, &sc->sc_iot, &sc->sc_ioh,
1205 NULL, &sc->sc_ios) == 0) {
1206 sc->sc_flags |= WM_F_IOH_VALID;
1207 } else {
1208 aprint_error_dev(sc->sc_dev,
1209 "WARNING: unable to map I/O space\n");
1210 }
1211 }
1212
1213 }
1214
1215 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1216 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1217 preg |= PCI_COMMAND_MASTER_ENABLE;
1218 if (sc->sc_type < WM_T_82542_2_1)
1219 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1220 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1221
1222 /* power up chip */
1223 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1224 NULL)) && error != EOPNOTSUPP) {
1225 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1226 return;
1227 }
1228
1229 /*
1230 * Map and establish our interrupt.
1231 */
1232 if (pci_intr_map(pa, &ih)) {
1233 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1234 return;
1235 }
1236 intrstr = pci_intr_string(pc, ih);
1237 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1238 if (sc->sc_ih == NULL) {
1239 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1240 if (intrstr != NULL)
1241 aprint_error(" at %s", intrstr);
1242 aprint_error("\n");
1243 return;
1244 }
1245 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1246
1247 /*
1248 * Check the function ID (unit number of the chip).
1249 */
1250 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1251 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1252 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1253 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER))
1254 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1255 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1256 else
1257 sc->sc_funcid = 0;
1258
1259 /*
1260 * Determine a few things about the bus we're connected to.
1261 */
1262 if (sc->sc_type < WM_T_82543) {
1263 /* We don't really know the bus characteristics here. */
1264 sc->sc_bus_speed = 33;
1265 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1266 /*
1267 * CSA (Communication Streaming Architecture) is about as fast
1268 * a 32-bit 66MHz PCI Bus.
1269 */
1270 sc->sc_flags |= WM_F_CSA;
1271 sc->sc_bus_speed = 66;
1272 aprint_verbose_dev(sc->sc_dev,
1273 "Communication Streaming Architecture\n");
1274 if (sc->sc_type == WM_T_82547) {
1275 callout_init(&sc->sc_txfifo_ch, 0);
1276 callout_setfunc(&sc->sc_txfifo_ch,
1277 wm_82547_txfifo_stall, sc);
1278 aprint_verbose_dev(sc->sc_dev,
1279 "using 82547 Tx FIFO stall work-around\n");
1280 }
1281 } else if (sc->sc_type >= WM_T_82571) {
1282 sc->sc_flags |= WM_F_PCIE;
1283 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1284 && (sc->sc_type != WM_T_ICH10)
1285 && (sc->sc_type != WM_T_PCH)) {
1286 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1287 /* ICH* and PCH have no PCIe capability registers */
1288 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1289 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1290 NULL) == 0)
1291 aprint_error_dev(sc->sc_dev,
1292 "unable to find PCIe capability\n");
1293 }
1294 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1295 } else {
1296 reg = CSR_READ(sc, WMREG_STATUS);
1297 if (reg & STATUS_BUS64)
1298 sc->sc_flags |= WM_F_BUS64;
1299 if ((reg & STATUS_PCIX_MODE) != 0) {
1300 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1301
1302 sc->sc_flags |= WM_F_PCIX;
1303 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1304 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1305 aprint_error_dev(sc->sc_dev,
1306 "unable to find PCIX capability\n");
1307 else if (sc->sc_type != WM_T_82545_3 &&
1308 sc->sc_type != WM_T_82546_3) {
1309 /*
1310 * Work around a problem caused by the BIOS
1311 * setting the max memory read byte count
1312 * incorrectly.
1313 */
1314 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1315 sc->sc_pcixe_capoff + PCI_PCIX_CMD);
1316 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1317 sc->sc_pcixe_capoff + PCI_PCIX_STATUS);
1318
1319 bytecnt =
1320 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1321 PCI_PCIX_CMD_BYTECNT_SHIFT;
1322 maxb =
1323 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1324 PCI_PCIX_STATUS_MAXB_SHIFT;
1325 if (bytecnt > maxb) {
1326 aprint_verbose_dev(sc->sc_dev,
1327 "resetting PCI-X MMRBC: %d -> %d\n",
1328 512 << bytecnt, 512 << maxb);
1329 pcix_cmd = (pcix_cmd &
1330 ~PCI_PCIX_CMD_BYTECNT_MASK) |
1331 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1332 pci_conf_write(pa->pa_pc, pa->pa_tag,
1333 sc->sc_pcixe_capoff + PCI_PCIX_CMD,
1334 pcix_cmd);
1335 }
1336 }
1337 }
1338 /*
1339 * The quad port adapter is special; it has a PCIX-PCIX
1340 * bridge on the board, and can run the secondary bus at
1341 * a higher speed.
1342 */
1343 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1344 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1345 : 66;
1346 } else if (sc->sc_flags & WM_F_PCIX) {
1347 switch (reg & STATUS_PCIXSPD_MASK) {
1348 case STATUS_PCIXSPD_50_66:
1349 sc->sc_bus_speed = 66;
1350 break;
1351 case STATUS_PCIXSPD_66_100:
1352 sc->sc_bus_speed = 100;
1353 break;
1354 case STATUS_PCIXSPD_100_133:
1355 sc->sc_bus_speed = 133;
1356 break;
1357 default:
1358 aprint_error_dev(sc->sc_dev,
1359 "unknown PCIXSPD %d; assuming 66MHz\n",
1360 reg & STATUS_PCIXSPD_MASK);
1361 sc->sc_bus_speed = 66;
1362 break;
1363 }
1364 } else
1365 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1366 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1367 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1368 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1369 }
1370
1371 /*
1372 * Allocate the control data structures, and create and load the
1373 * DMA map for it.
1374 *
1375 * NOTE: All Tx descriptors must be in the same 4G segment of
1376 * memory. So must Rx descriptors. We simplify by allocating
1377 * both sets within the same 4G segment.
1378 */
1379 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1380 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1381 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1382 sizeof(struct wm_control_data_82542) :
1383 sizeof(struct wm_control_data_82544);
1384 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1385 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1386 &sc->sc_cd_rseg, 0)) != 0) {
1387 aprint_error_dev(sc->sc_dev,
1388 "unable to allocate control data, error = %d\n",
1389 error);
1390 goto fail_0;
1391 }
1392
1393 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1394 sc->sc_cd_rseg, sc->sc_cd_size,
1395 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1396 aprint_error_dev(sc->sc_dev,
1397 "unable to map control data, error = %d\n", error);
1398 goto fail_1;
1399 }
1400
1401 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1402 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1403 aprint_error_dev(sc->sc_dev,
1404 "unable to create control data DMA map, error = %d\n",
1405 error);
1406 goto fail_2;
1407 }
1408
1409 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1410 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1411 aprint_error_dev(sc->sc_dev,
1412 "unable to load control data DMA map, error = %d\n",
1413 error);
1414 goto fail_3;
1415 }
1416
1417 /*
1418 * Create the transmit buffer DMA maps.
1419 */
1420 WM_TXQUEUELEN(sc) =
1421 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1422 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1423 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1424 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1425 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1426 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1427 aprint_error_dev(sc->sc_dev,
1428 "unable to create Tx DMA map %d, error = %d\n",
1429 i, error);
1430 goto fail_4;
1431 }
1432 }
1433
1434 /*
1435 * Create the receive buffer DMA maps.
1436 */
1437 for (i = 0; i < WM_NRXDESC; i++) {
1438 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1439 MCLBYTES, 0, 0,
1440 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1441 aprint_error_dev(sc->sc_dev,
1442 "unable to create Rx DMA map %d error = %d\n",
1443 i, error);
1444 goto fail_5;
1445 }
1446 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1447 }
1448
1449 /* clear interesting stat counters */
1450 CSR_READ(sc, WMREG_COLC);
1451 CSR_READ(sc, WMREG_RXERRC);
1452
1453 /*
1454 * Reset the chip to a known state.
1455 */
1456 wm_reset(sc);
1457
1458 switch (sc->sc_type) {
1459 case WM_T_82571:
1460 case WM_T_82572:
1461 case WM_T_82573:
1462 case WM_T_82574:
1463 case WM_T_82583:
1464 case WM_T_80003:
1465 case WM_T_ICH8:
1466 case WM_T_ICH9:
1467 case WM_T_ICH10:
1468 case WM_T_PCH:
1469 if (wm_check_mng_mode(sc) != 0)
1470 wm_get_hw_control(sc);
1471 break;
1472 default:
1473 break;
1474 }
1475
1476 /*
1477 * Get some information about the EEPROM.
1478 */
1479 switch (sc->sc_type) {
1480 case WM_T_82542_2_0:
1481 case WM_T_82542_2_1:
1482 case WM_T_82543:
1483 case WM_T_82544:
1484 /* Microwire */
1485 sc->sc_ee_addrbits = 6;
1486 break;
1487 case WM_T_82540:
1488 case WM_T_82545:
1489 case WM_T_82545_3:
1490 case WM_T_82546:
1491 case WM_T_82546_3:
1492 /* Microwire */
1493 reg = CSR_READ(sc, WMREG_EECD);
1494 if (reg & EECD_EE_SIZE)
1495 sc->sc_ee_addrbits = 8;
1496 else
1497 sc->sc_ee_addrbits = 6;
1498 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1499 break;
1500 case WM_T_82541:
1501 case WM_T_82541_2:
1502 case WM_T_82547:
1503 case WM_T_82547_2:
1504 reg = CSR_READ(sc, WMREG_EECD);
1505 if (reg & EECD_EE_TYPE) {
1506 /* SPI */
1507 wm_set_spiaddrbits(sc);
1508 } else
1509 /* Microwire */
1510 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1511 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1512 break;
1513 case WM_T_82571:
1514 case WM_T_82572:
1515 /* SPI */
1516 wm_set_spiaddrbits(sc);
1517 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1518 break;
1519 case WM_T_82573:
1520 case WM_T_82574:
1521 case WM_T_82583:
1522 if (wm_is_onboard_nvm_eeprom(sc) == 0)
1523 sc->sc_flags |= WM_F_EEPROM_FLASH;
1524 else {
1525 /* SPI */
1526 wm_set_spiaddrbits(sc);
1527 }
1528 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1529 break;
1530 case WM_T_82575:
1531 case WM_T_82576:
1532 case WM_T_82580:
1533 case WM_T_82580ER:
1534 case WM_T_80003:
1535 /* SPI */
1536 wm_set_spiaddrbits(sc);
1537 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1538 break;
1539 case WM_T_ICH8:
1540 case WM_T_ICH9:
1541 case WM_T_ICH10:
1542 case WM_T_PCH:
1543 /* FLASH */
1544 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1545 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1546 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1547 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1548 aprint_error_dev(sc->sc_dev,
1549 "can't map FLASH registers\n");
1550 return;
1551 }
1552 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1553 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1554 ICH_FLASH_SECTOR_SIZE;
1555 sc->sc_ich8_flash_bank_size =
1556 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1557 sc->sc_ich8_flash_bank_size -=
1558 (reg & ICH_GFPREG_BASE_MASK);
1559 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1560 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1561 break;
1562 default:
1563 break;
1564 }
1565
1566 /*
1567 * Defer printing the EEPROM type until after verifying the checksum
1568 * This allows the EEPROM type to be printed correctly in the case
1569 * that no EEPROM is attached.
1570 */
1571 /*
1572 * Validate the EEPROM checksum. If the checksum fails, flag
1573 * this for later, so we can fail future reads from the EEPROM.
1574 */
1575 if (wm_validate_eeprom_checksum(sc)) {
1576 /*
1577 * Read twice again because some PCI-e parts fail the
1578 * first check due to the link being in sleep state.
1579 */
1580 if (wm_validate_eeprom_checksum(sc))
1581 sc->sc_flags |= WM_F_EEPROM_INVALID;
1582 }
1583
1584 /* Set device properties (macflags) */
1585 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1586
1587 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1588 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1589 else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1590 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1591 } else {
1592 if (sc->sc_flags & WM_F_EEPROM_SPI)
1593 eetype = "SPI";
1594 else
1595 eetype = "MicroWire";
1596 aprint_verbose_dev(sc->sc_dev,
1597 "%u word (%d address bits) %s EEPROM\n",
1598 1U << sc->sc_ee_addrbits,
1599 sc->sc_ee_addrbits, eetype);
1600 }
1601
1602 /*
1603 * Read the Ethernet address from the EEPROM, if not first found
1604 * in device properties.
1605 */
1606 ea = prop_dictionary_get(dict, "mac-address");
1607 if (ea != NULL) {
1608 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1609 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1610 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1611 } else {
1612 if (wm_read_mac_addr(sc, enaddr) != 0) {
1613 aprint_error_dev(sc->sc_dev,
1614 "unable to read Ethernet address\n");
1615 return;
1616 }
1617 }
1618
1619 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1620 ether_sprintf(enaddr));
1621
1622 /*
1623 * Read the config info from the EEPROM, and set up various
1624 * bits in the control registers based on their contents.
1625 */
1626 pn = prop_dictionary_get(dict, "i82543-cfg1");
1627 if (pn != NULL) {
1628 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1629 cfg1 = (uint16_t) prop_number_integer_value(pn);
1630 } else {
1631 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1632 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1633 return;
1634 }
1635 }
1636
1637 pn = prop_dictionary_get(dict, "i82543-cfg2");
1638 if (pn != NULL) {
1639 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1640 cfg2 = (uint16_t) prop_number_integer_value(pn);
1641 } else {
1642 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1643 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1644 return;
1645 }
1646 }
1647
1648 /* check for WM_F_WOL */
1649 switch (sc->sc_type) {
1650 case WM_T_82542_2_0:
1651 case WM_T_82542_2_1:
1652 case WM_T_82543:
1653 /* dummy? */
1654 eeprom_data = 0;
1655 apme_mask = EEPROM_CFG3_APME;
1656 break;
1657 case WM_T_82544:
1658 apme_mask = EEPROM_CFG2_82544_APM_EN;
1659 eeprom_data = cfg2;
1660 break;
1661 case WM_T_82546:
1662 case WM_T_82546_3:
1663 case WM_T_82571:
1664 case WM_T_82572:
1665 case WM_T_82573:
1666 case WM_T_82574:
1667 case WM_T_82583:
1668 case WM_T_80003:
1669 default:
1670 apme_mask = EEPROM_CFG3_APME;
1671 wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1672 : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1673 break;
1674 case WM_T_82575:
1675 case WM_T_82576:
1676 case WM_T_82580:
1677 case WM_T_82580ER:
1678 case WM_T_ICH8:
1679 case WM_T_ICH9:
1680 case WM_T_ICH10:
1681 case WM_T_PCH:
1682 apme_mask = WUC_APME;
1683 eeprom_data = CSR_READ(sc, WMREG_WUC);
1684 break;
1685 }
1686
1687 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1688 if ((eeprom_data & apme_mask) != 0)
1689 sc->sc_flags |= WM_F_WOL;
1690 #ifdef WM_DEBUG
1691 if ((sc->sc_flags & WM_F_WOL) != 0)
1692 printf("WOL\n");
1693 #endif
1694
1695 /*
1696 * XXX need special handling for some multiple port cards
1697 * to disable a paticular port.
1698 */
1699
1700 if (sc->sc_type >= WM_T_82544) {
1701 pn = prop_dictionary_get(dict, "i82543-swdpin");
1702 if (pn != NULL) {
1703 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1704 swdpin = (uint16_t) prop_number_integer_value(pn);
1705 } else {
1706 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1707 aprint_error_dev(sc->sc_dev,
1708 "unable to read SWDPIN\n");
1709 return;
1710 }
1711 }
1712 }
1713
1714 if (cfg1 & EEPROM_CFG1_ILOS)
1715 sc->sc_ctrl |= CTRL_ILOS;
1716 if (sc->sc_type >= WM_T_82544) {
1717 sc->sc_ctrl |=
1718 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1719 CTRL_SWDPIO_SHIFT;
1720 sc->sc_ctrl |=
1721 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1722 CTRL_SWDPINS_SHIFT;
1723 } else {
1724 sc->sc_ctrl |=
1725 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1726 CTRL_SWDPIO_SHIFT;
1727 }
1728
1729 #if 0
1730 if (sc->sc_type >= WM_T_82544) {
1731 if (cfg1 & EEPROM_CFG1_IPS0)
1732 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1733 if (cfg1 & EEPROM_CFG1_IPS1)
1734 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1735 sc->sc_ctrl_ext |=
1736 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1737 CTRL_EXT_SWDPIO_SHIFT;
1738 sc->sc_ctrl_ext |=
1739 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1740 CTRL_EXT_SWDPINS_SHIFT;
1741 } else {
1742 sc->sc_ctrl_ext |=
1743 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1744 CTRL_EXT_SWDPIO_SHIFT;
1745 }
1746 #endif
1747
1748 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1749 #if 0
1750 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1751 #endif
1752
1753 /*
1754 * Set up some register offsets that are different between
1755 * the i82542 and the i82543 and later chips.
1756 */
1757 if (sc->sc_type < WM_T_82543) {
1758 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1759 sc->sc_tdt_reg = WMREG_OLD_TDT;
1760 } else {
1761 sc->sc_rdt_reg = WMREG_RDT;
1762 sc->sc_tdt_reg = WMREG_TDT;
1763 }
1764
1765 if (sc->sc_type == WM_T_PCH) {
1766 uint16_t val;
1767
1768 /* Save the NVM K1 bit setting */
1769 wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1770
1771 if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1772 sc->sc_nvm_k1_enabled = 1;
1773 else
1774 sc->sc_nvm_k1_enabled = 0;
1775 }
1776
1777 /*
1778 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1779 * media structures accordingly.
1780 */
1781 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1782 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1783 || sc->sc_type == WM_T_82573
1784 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1785 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1786 wm_gmii_mediainit(sc, wmp->wmp_product);
1787 } else if (sc->sc_type < WM_T_82543 ||
1788 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1789 if (wmp->wmp_flags & WMP_F_1000T)
1790 aprint_error_dev(sc->sc_dev,
1791 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1792 wm_tbi_mediainit(sc);
1793 } else {
1794 switch (sc->sc_type) {
1795 case WM_T_82575:
1796 case WM_T_82576:
1797 case WM_T_82580:
1798 case WM_T_82580ER:
1799 reg = CSR_READ(sc, WMREG_CTRL_EXT);
1800 switch (reg & CTRL_EXT_LINK_MODE_MASK) {
1801 case CTRL_EXT_LINK_MODE_SGMII:
1802 aprint_verbose_dev(sc->sc_dev, "SGMII\n");
1803 sc->sc_flags |= WM_F_SGMII;
1804 CSR_WRITE(sc, WMREG_CTRL_EXT,
1805 reg | CTRL_EXT_I2C_ENA);
1806 wm_gmii_mediainit(sc, wmp->wmp_product);
1807 break;
1808 case CTRL_EXT_LINK_MODE_1000KX:
1809 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
1810 aprint_verbose_dev(sc->sc_dev, "1000KX or SERDES\n");
1811 CSR_WRITE(sc, WMREG_CTRL_EXT,
1812 reg | CTRL_EXT_I2C_ENA);
1813 panic("not supported yet\n");
1814 break;
1815 case CTRL_EXT_LINK_MODE_GMII:
1816 default:
1817 CSR_WRITE(sc, WMREG_CTRL_EXT,
1818 reg & ~CTRL_EXT_I2C_ENA);
1819 wm_gmii_mediainit(sc, wmp->wmp_product);
1820 break;
1821 }
1822 break;
1823 default:
1824 if (wmp->wmp_flags & WMP_F_1000X)
1825 aprint_error_dev(sc->sc_dev,
1826 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1827 wm_gmii_mediainit(sc, wmp->wmp_product);
1828 }
1829 }
1830
1831 ifp = &sc->sc_ethercom.ec_if;
1832 xname = device_xname(sc->sc_dev);
1833 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1834 ifp->if_softc = sc;
1835 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1836 ifp->if_ioctl = wm_ioctl;
1837 ifp->if_start = wm_start;
1838 ifp->if_watchdog = wm_watchdog;
1839 ifp->if_init = wm_init;
1840 ifp->if_stop = wm_stop;
1841 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1842 IFQ_SET_READY(&ifp->if_snd);
1843
1844 /* Check for jumbo frame */
1845 switch (sc->sc_type) {
1846 case WM_T_82573:
1847 /* XXX limited to 9234 if ASPM is disabled */
1848 wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1849 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1850 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1851 break;
1852 case WM_T_82571:
1853 case WM_T_82572:
1854 case WM_T_82574:
1855 case WM_T_82575:
1856 case WM_T_82576:
1857 case WM_T_82580:
1858 case WM_T_82580ER:
1859 case WM_T_80003:
1860 case WM_T_ICH9:
1861 case WM_T_ICH10:
1862 /* XXX limited to 9234 */
1863 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1864 break;
1865 case WM_T_PCH:
1866 /* XXX limited to 4096 */
1867 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1868 break;
1869 case WM_T_82542_2_0:
1870 case WM_T_82542_2_1:
1871 case WM_T_82583:
1872 case WM_T_ICH8:
1873 /* No support for jumbo frame */
1874 break;
1875 default:
1876 /* ETHER_MAX_LEN_JUMBO */
1877 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1878 break;
1879 }
1880
1881 /*
1882 * If we're a i82543 or greater, we can support VLANs.
1883 */
1884 if (sc->sc_type >= WM_T_82543)
1885 sc->sc_ethercom.ec_capabilities |=
1886 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1887
1888 /*
1889 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1890 * on i82543 and later.
1891 */
1892 if (sc->sc_type >= WM_T_82543) {
1893 ifp->if_capabilities |=
1894 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1895 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1896 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1897 IFCAP_CSUM_TCPv6_Tx |
1898 IFCAP_CSUM_UDPv6_Tx;
1899 }
1900
1901 /*
1902 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1903 *
1904 * 82541GI (8086:1076) ... no
1905 * 82572EI (8086:10b9) ... yes
1906 */
1907 if (sc->sc_type >= WM_T_82571) {
1908 ifp->if_capabilities |=
1909 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1910 }
1911
1912 /*
1913 * If we're a i82544 or greater (except i82547), we can do
1914 * TCP segmentation offload.
1915 */
1916 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1917 ifp->if_capabilities |= IFCAP_TSOv4;
1918 }
1919
1920 if (sc->sc_type >= WM_T_82571) {
1921 ifp->if_capabilities |= IFCAP_TSOv6;
1922 }
1923
1924 /*
1925 * Attach the interface.
1926 */
1927 if_attach(ifp);
1928 ether_ifattach(ifp, enaddr);
1929 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
1930 #if NRND > 0
1931 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1932 #endif
1933
1934 #ifdef WM_EVENT_COUNTERS
1935 /* Attach event counters. */
1936 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1937 NULL, xname, "txsstall");
1938 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1939 NULL, xname, "txdstall");
1940 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1941 NULL, xname, "txfifo_stall");
1942 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1943 NULL, xname, "txdw");
1944 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1945 NULL, xname, "txqe");
1946 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1947 NULL, xname, "rxintr");
1948 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1949 NULL, xname, "linkintr");
1950
1951 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1952 NULL, xname, "rxipsum");
1953 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1954 NULL, xname, "rxtusum");
1955 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1956 NULL, xname, "txipsum");
1957 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1958 NULL, xname, "txtusum");
1959 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1960 NULL, xname, "txtusum6");
1961
1962 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1963 NULL, xname, "txtso");
1964 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1965 NULL, xname, "txtso6");
1966 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1967 NULL, xname, "txtsopain");
1968
1969 for (i = 0; i < WM_NTXSEGS; i++) {
1970 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1971 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1972 NULL, xname, wm_txseg_evcnt_names[i]);
1973 }
1974
1975 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1976 NULL, xname, "txdrop");
1977
1978 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1979 NULL, xname, "tu");
1980
1981 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1982 NULL, xname, "tx_xoff");
1983 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1984 NULL, xname, "tx_xon");
1985 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1986 NULL, xname, "rx_xoff");
1987 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1988 NULL, xname, "rx_xon");
1989 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1990 NULL, xname, "rx_macctl");
1991 #endif /* WM_EVENT_COUNTERS */
1992
1993 if (pmf_device_register(self, wm_suspend, wm_resume))
1994 pmf_class_network_register(self, ifp);
1995 else
1996 aprint_error_dev(self, "couldn't establish power handler\n");
1997
1998 return;
1999
2000 /*
2001 * Free any resources we've allocated during the failed attach
2002 * attempt. Do this in reverse order and fall through.
2003 */
2004 fail_5:
2005 for (i = 0; i < WM_NRXDESC; i++) {
2006 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2007 bus_dmamap_destroy(sc->sc_dmat,
2008 sc->sc_rxsoft[i].rxs_dmamap);
2009 }
2010 fail_4:
2011 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2012 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2013 bus_dmamap_destroy(sc->sc_dmat,
2014 sc->sc_txsoft[i].txs_dmamap);
2015 }
2016 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2017 fail_3:
2018 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2019 fail_2:
2020 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2021 sc->sc_cd_size);
2022 fail_1:
2023 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2024 fail_0:
2025 return;
2026 }
2027
2028 static int
2029 wm_detach(device_t self, int flags __unused)
2030 {
2031 struct wm_softc *sc = device_private(self);
2032 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2033 int i, s;
2034
2035 s = splnet();
2036 /* Stop the interface. Callouts are stopped in it. */
2037 wm_stop(ifp, 1);
2038 splx(s);
2039
2040 pmf_device_deregister(self);
2041
2042 /* Tell the firmware about the release */
2043 wm_release_manageability(sc);
2044 wm_release_hw_control(sc);
2045
2046 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2047
2048 /* Delete all remaining media. */
2049 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2050
2051 ether_ifdetach(ifp);
2052 if_detach(ifp);
2053
2054
2055 /* Unload RX dmamaps and free mbufs */
2056 wm_rxdrain(sc);
2057
2058 /* Free dmamap. It's the same as the end of the wm_attach() function */
2059 for (i = 0; i < WM_NRXDESC; i++) {
2060 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2061 bus_dmamap_destroy(sc->sc_dmat,
2062 sc->sc_rxsoft[i].rxs_dmamap);
2063 }
2064 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2065 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2066 bus_dmamap_destroy(sc->sc_dmat,
2067 sc->sc_txsoft[i].txs_dmamap);
2068 }
2069 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2070 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2071 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2072 sc->sc_cd_size);
2073 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2074
2075 /* Disestablish the interrupt handler */
2076 if (sc->sc_ih != NULL) {
2077 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2078 sc->sc_ih = NULL;
2079 }
2080
2081 /* Unmap the registers */
2082 if (sc->sc_ss) {
2083 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2084 sc->sc_ss = 0;
2085 }
2086
2087 if (sc->sc_ios) {
2088 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2089 sc->sc_ios = 0;
2090 }
2091
2092 return 0;
2093 }
2094
2095 /*
2096 * wm_tx_offload:
2097 *
2098 * Set up TCP/IP checksumming parameters for the
2099 * specified packet.
2100 */
2101 static int
2102 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
2103 uint8_t *fieldsp)
2104 {
2105 struct mbuf *m0 = txs->txs_mbuf;
2106 struct livengood_tcpip_ctxdesc *t;
2107 uint32_t ipcs, tucs, cmd, cmdlen, seg;
2108 uint32_t ipcse;
2109 struct ether_header *eh;
2110 int offset, iphl;
2111 uint8_t fields;
2112
2113 /*
2114 * XXX It would be nice if the mbuf pkthdr had offset
2115 * fields for the protocol headers.
2116 */
2117
2118 eh = mtod(m0, struct ether_header *);
2119 switch (htons(eh->ether_type)) {
2120 case ETHERTYPE_IP:
2121 case ETHERTYPE_IPV6:
2122 offset = ETHER_HDR_LEN;
2123 break;
2124
2125 case ETHERTYPE_VLAN:
2126 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2127 break;
2128
2129 default:
2130 /*
2131 * Don't support this protocol or encapsulation.
2132 */
2133 *fieldsp = 0;
2134 *cmdp = 0;
2135 return 0;
2136 }
2137
2138 if ((m0->m_pkthdr.csum_flags &
2139 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
2140 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2141 } else {
2142 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2143 }
2144 ipcse = offset + iphl - 1;
2145
2146 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
2147 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
2148 seg = 0;
2149 fields = 0;
2150
2151 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2152 int hlen = offset + iphl;
2153 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2154
2155 if (__predict_false(m0->m_len <
2156 (hlen + sizeof(struct tcphdr)))) {
2157 /*
2158 * TCP/IP headers are not in the first mbuf; we need
2159 * to do this the slow and painful way. Let's just
2160 * hope this doesn't happen very often.
2161 */
2162 struct tcphdr th;
2163
2164 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2165
2166 m_copydata(m0, hlen, sizeof(th), &th);
2167 if (v4) {
2168 struct ip ip;
2169
2170 m_copydata(m0, offset, sizeof(ip), &ip);
2171 ip.ip_len = 0;
2172 m_copyback(m0,
2173 offset + offsetof(struct ip, ip_len),
2174 sizeof(ip.ip_len), &ip.ip_len);
2175 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2176 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2177 } else {
2178 struct ip6_hdr ip6;
2179
2180 m_copydata(m0, offset, sizeof(ip6), &ip6);
2181 ip6.ip6_plen = 0;
2182 m_copyback(m0,
2183 offset + offsetof(struct ip6_hdr, ip6_plen),
2184 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2185 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2186 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2187 }
2188 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2189 sizeof(th.th_sum), &th.th_sum);
2190
2191 hlen += th.th_off << 2;
2192 } else {
2193 /*
2194 * TCP/IP headers are in the first mbuf; we can do
2195 * this the easy way.
2196 */
2197 struct tcphdr *th;
2198
2199 if (v4) {
2200 struct ip *ip =
2201 (void *)(mtod(m0, char *) + offset);
2202 th = (void *)(mtod(m0, char *) + hlen);
2203
2204 ip->ip_len = 0;
2205 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2206 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2207 } else {
2208 struct ip6_hdr *ip6 =
2209 (void *)(mtod(m0, char *) + offset);
2210 th = (void *)(mtod(m0, char *) + hlen);
2211
2212 ip6->ip6_plen = 0;
2213 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2214 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2215 }
2216 hlen += th->th_off << 2;
2217 }
2218
2219 if (v4) {
2220 WM_EVCNT_INCR(&sc->sc_ev_txtso);
2221 cmdlen |= WTX_TCPIP_CMD_IP;
2222 } else {
2223 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2224 ipcse = 0;
2225 }
2226 cmd |= WTX_TCPIP_CMD_TSE;
2227 cmdlen |= WTX_TCPIP_CMD_TSE |
2228 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
2229 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
2230 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
2231 }
2232
2233 /*
2234 * NOTE: Even if we're not using the IP or TCP/UDP checksum
2235 * offload feature, if we load the context descriptor, we
2236 * MUST provide valid values for IPCSS and TUCSS fields.
2237 */
2238
2239 ipcs = WTX_TCPIP_IPCSS(offset) |
2240 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
2241 WTX_TCPIP_IPCSE(ipcse);
2242 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
2243 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
2244 fields |= WTX_IXSM;
2245 }
2246
2247 offset += iphl;
2248
2249 if (m0->m_pkthdr.csum_flags &
2250 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
2251 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2252 fields |= WTX_TXSM;
2253 tucs = WTX_TCPIP_TUCSS(offset) |
2254 WTX_TCPIP_TUCSO(offset +
2255 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
2256 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2257 } else if ((m0->m_pkthdr.csum_flags &
2258 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
2259 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2260 fields |= WTX_TXSM;
2261 tucs = WTX_TCPIP_TUCSS(offset) |
2262 WTX_TCPIP_TUCSO(offset +
2263 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
2264 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2265 } else {
2266 /* Just initialize it to a valid TCP context. */
2267 tucs = WTX_TCPIP_TUCSS(offset) |
2268 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
2269 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2270 }
2271
2272 /* Fill in the context descriptor. */
2273 t = (struct livengood_tcpip_ctxdesc *)
2274 &sc->sc_txdescs[sc->sc_txnext];
2275 t->tcpip_ipcs = htole32(ipcs);
2276 t->tcpip_tucs = htole32(tucs);
2277 t->tcpip_cmdlen = htole32(cmdlen);
2278 t->tcpip_seg = htole32(seg);
2279 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2280
2281 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2282 txs->txs_ndesc++;
2283
2284 *cmdp = cmd;
2285 *fieldsp = fields;
2286
2287 return 0;
2288 }
2289
2290 static void
2291 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2292 {
2293 struct mbuf *m;
2294 int i;
2295
2296 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2297 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2298 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2299 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2300 m->m_data, m->m_len, m->m_flags);
2301 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2302 i, i == 1 ? "" : "s");
2303 }
2304
2305 /*
2306 * wm_82547_txfifo_stall:
2307 *
2308 * Callout used to wait for the 82547 Tx FIFO to drain,
2309 * reset the FIFO pointers, and restart packet transmission.
2310 */
2311 static void
2312 wm_82547_txfifo_stall(void *arg)
2313 {
2314 struct wm_softc *sc = arg;
2315 int s;
2316
2317 s = splnet();
2318
2319 if (sc->sc_txfifo_stall) {
2320 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2321 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2322 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2323 /*
2324 * Packets have drained. Stop transmitter, reset
2325 * FIFO pointers, restart transmitter, and kick
2326 * the packet queue.
2327 */
2328 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2329 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2330 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2331 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2332 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2333 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2334 CSR_WRITE(sc, WMREG_TCTL, tctl);
2335 CSR_WRITE_FLUSH(sc);
2336
2337 sc->sc_txfifo_head = 0;
2338 sc->sc_txfifo_stall = 0;
2339 wm_start(&sc->sc_ethercom.ec_if);
2340 } else {
2341 /*
2342 * Still waiting for packets to drain; try again in
2343 * another tick.
2344 */
2345 callout_schedule(&sc->sc_txfifo_ch, 1);
2346 }
2347 }
2348
2349 splx(s);
2350 }
2351
2352 /*
2353 * wm_82547_txfifo_bugchk:
2354 *
2355 * Check for bug condition in the 82547 Tx FIFO. We need to
2356 * prevent enqueueing a packet that would wrap around the end
2357 * if the Tx FIFO ring buffer, otherwise the chip will croak.
2358 *
2359 * We do this by checking the amount of space before the end
2360 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
2361 * the Tx FIFO, wait for all remaining packets to drain, reset
2362 * the internal FIFO pointers to the beginning, and restart
2363 * transmission on the interface.
2364 */
2365 #define WM_FIFO_HDR 0x10
2366 #define WM_82547_PAD_LEN 0x3e0
2367 static int
2368 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2369 {
2370 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2371 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2372
2373 /* Just return if already stalled. */
2374 if (sc->sc_txfifo_stall)
2375 return 1;
2376
2377 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2378 /* Stall only occurs in half-duplex mode. */
2379 goto send_packet;
2380 }
2381
2382 if (len >= WM_82547_PAD_LEN + space) {
2383 sc->sc_txfifo_stall = 1;
2384 callout_schedule(&sc->sc_txfifo_ch, 1);
2385 return 1;
2386 }
2387
2388 send_packet:
2389 sc->sc_txfifo_head += len;
2390 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2391 sc->sc_txfifo_head -= sc->sc_txfifo_size;
2392
2393 return 0;
2394 }
2395
2396 /*
2397 * wm_start: [ifnet interface function]
2398 *
2399 * Start packet transmission on the interface.
2400 */
2401 static void
2402 wm_start(struct ifnet *ifp)
2403 {
2404 struct wm_softc *sc = ifp->if_softc;
2405 struct mbuf *m0;
2406 struct m_tag *mtag;
2407 struct wm_txsoft *txs;
2408 bus_dmamap_t dmamap;
2409 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2410 bus_addr_t curaddr;
2411 bus_size_t seglen, curlen;
2412 uint32_t cksumcmd;
2413 uint8_t cksumfields;
2414
2415 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2416 return;
2417
2418 /*
2419 * Remember the previous number of free descriptors.
2420 */
2421 ofree = sc->sc_txfree;
2422
2423 /*
2424 * Loop through the send queue, setting up transmit descriptors
2425 * until we drain the queue, or use up all available transmit
2426 * descriptors.
2427 */
2428 for (;;) {
2429 /* Grab a packet off the queue. */
2430 IFQ_POLL(&ifp->if_snd, m0);
2431 if (m0 == NULL)
2432 break;
2433
2434 DPRINTF(WM_DEBUG_TX,
2435 ("%s: TX: have packet to transmit: %p\n",
2436 device_xname(sc->sc_dev), m0));
2437
2438 /* Get a work queue entry. */
2439 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2440 wm_txintr(sc);
2441 if (sc->sc_txsfree == 0) {
2442 DPRINTF(WM_DEBUG_TX,
2443 ("%s: TX: no free job descriptors\n",
2444 device_xname(sc->sc_dev)));
2445 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2446 break;
2447 }
2448 }
2449
2450 txs = &sc->sc_txsoft[sc->sc_txsnext];
2451 dmamap = txs->txs_dmamap;
2452
2453 use_tso = (m0->m_pkthdr.csum_flags &
2454 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2455
2456 /*
2457 * So says the Linux driver:
2458 * The controller does a simple calculation to make sure
2459 * there is enough room in the FIFO before initiating the
2460 * DMA for each buffer. The calc is:
2461 * 4 = ceil(buffer len / MSS)
2462 * To make sure we don't overrun the FIFO, adjust the max
2463 * buffer len if the MSS drops.
2464 */
2465 dmamap->dm_maxsegsz =
2466 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2467 ? m0->m_pkthdr.segsz << 2
2468 : WTX_MAX_LEN;
2469
2470 /*
2471 * Load the DMA map. If this fails, the packet either
2472 * didn't fit in the allotted number of segments, or we
2473 * were short on resources. For the too-many-segments
2474 * case, we simply report an error and drop the packet,
2475 * since we can't sanely copy a jumbo packet to a single
2476 * buffer.
2477 */
2478 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2479 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2480 if (error) {
2481 if (error == EFBIG) {
2482 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2483 log(LOG_ERR, "%s: Tx packet consumes too many "
2484 "DMA segments, dropping...\n",
2485 device_xname(sc->sc_dev));
2486 IFQ_DEQUEUE(&ifp->if_snd, m0);
2487 wm_dump_mbuf_chain(sc, m0);
2488 m_freem(m0);
2489 continue;
2490 }
2491 /*
2492 * Short on resources, just stop for now.
2493 */
2494 DPRINTF(WM_DEBUG_TX,
2495 ("%s: TX: dmamap load failed: %d\n",
2496 device_xname(sc->sc_dev), error));
2497 break;
2498 }
2499
2500 segs_needed = dmamap->dm_nsegs;
2501 if (use_tso) {
2502 /* For sentinel descriptor; see below. */
2503 segs_needed++;
2504 }
2505
2506 /*
2507 * Ensure we have enough descriptors free to describe
2508 * the packet. Note, we always reserve one descriptor
2509 * at the end of the ring due to the semantics of the
2510 * TDT register, plus one more in the event we need
2511 * to load offload context.
2512 */
2513 if (segs_needed > sc->sc_txfree - 2) {
2514 /*
2515 * Not enough free descriptors to transmit this
2516 * packet. We haven't committed anything yet,
2517 * so just unload the DMA map, put the packet
2518 * pack on the queue, and punt. Notify the upper
2519 * layer that there are no more slots left.
2520 */
2521 DPRINTF(WM_DEBUG_TX,
2522 ("%s: TX: need %d (%d) descriptors, have %d\n",
2523 device_xname(sc->sc_dev), dmamap->dm_nsegs,
2524 segs_needed, sc->sc_txfree - 1));
2525 ifp->if_flags |= IFF_OACTIVE;
2526 bus_dmamap_unload(sc->sc_dmat, dmamap);
2527 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2528 break;
2529 }
2530
2531 /*
2532 * Check for 82547 Tx FIFO bug. We need to do this
2533 * once we know we can transmit the packet, since we
2534 * do some internal FIFO space accounting here.
2535 */
2536 if (sc->sc_type == WM_T_82547 &&
2537 wm_82547_txfifo_bugchk(sc, m0)) {
2538 DPRINTF(WM_DEBUG_TX,
2539 ("%s: TX: 82547 Tx FIFO bug detected\n",
2540 device_xname(sc->sc_dev)));
2541 ifp->if_flags |= IFF_OACTIVE;
2542 bus_dmamap_unload(sc->sc_dmat, dmamap);
2543 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2544 break;
2545 }
2546
2547 IFQ_DEQUEUE(&ifp->if_snd, m0);
2548
2549 /*
2550 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2551 */
2552
2553 DPRINTF(WM_DEBUG_TX,
2554 ("%s: TX: packet has %d (%d) DMA segments\n",
2555 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2556
2557 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2558
2559 /*
2560 * Store a pointer to the packet so that we can free it
2561 * later.
2562 *
2563 * Initially, we consider the number of descriptors the
2564 * packet uses the number of DMA segments. This may be
2565 * incremented by 1 if we do checksum offload (a descriptor
2566 * is used to set the checksum context).
2567 */
2568 txs->txs_mbuf = m0;
2569 txs->txs_firstdesc = sc->sc_txnext;
2570 txs->txs_ndesc = segs_needed;
2571
2572 /* Set up offload parameters for this packet. */
2573 if (m0->m_pkthdr.csum_flags &
2574 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2575 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2576 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2577 if (wm_tx_offload(sc, txs, &cksumcmd,
2578 &cksumfields) != 0) {
2579 /* Error message already displayed. */
2580 bus_dmamap_unload(sc->sc_dmat, dmamap);
2581 continue;
2582 }
2583 } else {
2584 cksumcmd = 0;
2585 cksumfields = 0;
2586 }
2587
2588 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2589
2590 /* Sync the DMA map. */
2591 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2592 BUS_DMASYNC_PREWRITE);
2593
2594 /*
2595 * Initialize the transmit descriptor.
2596 */
2597 for (nexttx = sc->sc_txnext, seg = 0;
2598 seg < dmamap->dm_nsegs; seg++) {
2599 for (seglen = dmamap->dm_segs[seg].ds_len,
2600 curaddr = dmamap->dm_segs[seg].ds_addr;
2601 seglen != 0;
2602 curaddr += curlen, seglen -= curlen,
2603 nexttx = WM_NEXTTX(sc, nexttx)) {
2604 curlen = seglen;
2605
2606 /*
2607 * So says the Linux driver:
2608 * Work around for premature descriptor
2609 * write-backs in TSO mode. Append a
2610 * 4-byte sentinel descriptor.
2611 */
2612 if (use_tso &&
2613 seg == dmamap->dm_nsegs - 1 &&
2614 curlen > 8)
2615 curlen -= 4;
2616
2617 wm_set_dma_addr(
2618 &sc->sc_txdescs[nexttx].wtx_addr,
2619 curaddr);
2620 sc->sc_txdescs[nexttx].wtx_cmdlen =
2621 htole32(cksumcmd | curlen);
2622 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2623 0;
2624 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2625 cksumfields;
2626 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2627 lasttx = nexttx;
2628
2629 DPRINTF(WM_DEBUG_TX,
2630 ("%s: TX: desc %d: low %#" PRIxPADDR ", "
2631 "len %#04zx\n",
2632 device_xname(sc->sc_dev), nexttx,
2633 curaddr & 0xffffffffUL, curlen));
2634 }
2635 }
2636
2637 KASSERT(lasttx != -1);
2638
2639 /*
2640 * Set up the command byte on the last descriptor of
2641 * the packet. If we're in the interrupt delay window,
2642 * delay the interrupt.
2643 */
2644 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2645 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2646
2647 /*
2648 * If VLANs are enabled and the packet has a VLAN tag, set
2649 * up the descriptor to encapsulate the packet for us.
2650 *
2651 * This is only valid on the last descriptor of the packet.
2652 */
2653 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2654 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2655 htole32(WTX_CMD_VLE);
2656 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2657 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2658 }
2659
2660 txs->txs_lastdesc = lasttx;
2661
2662 DPRINTF(WM_DEBUG_TX,
2663 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2664 device_xname(sc->sc_dev),
2665 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2666
2667 /* Sync the descriptors we're using. */
2668 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2669 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2670
2671 /* Give the packet to the chip. */
2672 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2673
2674 DPRINTF(WM_DEBUG_TX,
2675 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2676
2677 DPRINTF(WM_DEBUG_TX,
2678 ("%s: TX: finished transmitting packet, job %d\n",
2679 device_xname(sc->sc_dev), sc->sc_txsnext));
2680
2681 /* Advance the tx pointer. */
2682 sc->sc_txfree -= txs->txs_ndesc;
2683 sc->sc_txnext = nexttx;
2684
2685 sc->sc_txsfree--;
2686 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2687
2688 /* Pass the packet to any BPF listeners. */
2689 bpf_mtap(ifp, m0);
2690 }
2691
2692 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2693 /* No more slots; notify upper layer. */
2694 ifp->if_flags |= IFF_OACTIVE;
2695 }
2696
2697 if (sc->sc_txfree != ofree) {
2698 /* Set a watchdog timer in case the chip flakes out. */
2699 ifp->if_timer = 5;
2700 }
2701 }
2702
2703 /*
2704 * wm_watchdog: [ifnet interface function]
2705 *
2706 * Watchdog timer handler.
2707 */
2708 static void
2709 wm_watchdog(struct ifnet *ifp)
2710 {
2711 struct wm_softc *sc = ifp->if_softc;
2712
2713 /*
2714 * Since we're using delayed interrupts, sweep up
2715 * before we report an error.
2716 */
2717 wm_txintr(sc);
2718
2719 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2720 log(LOG_ERR,
2721 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2722 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2723 sc->sc_txnext);
2724 ifp->if_oerrors++;
2725
2726 /* Reset the interface. */
2727 (void) wm_init(ifp);
2728 }
2729
2730 /* Try to get more packets going. */
2731 wm_start(ifp);
2732 }
2733
2734 static int
2735 wm_ifflags_cb(struct ethercom *ec)
2736 {
2737 struct ifnet *ifp = &ec->ec_if;
2738 struct wm_softc *sc = ifp->if_softc;
2739 int change = ifp->if_flags ^ sc->sc_if_flags;
2740
2741 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
2742 return ENETRESET;
2743 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) == 0)
2744 return 0;
2745
2746 wm_set_filter(sc);
2747
2748 sc->sc_if_flags = ifp->if_flags;
2749 return 0;
2750 }
2751
2752 /*
2753 * wm_ioctl: [ifnet interface function]
2754 *
2755 * Handle control requests from the operator.
2756 */
2757 static int
2758 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2759 {
2760 struct wm_softc *sc = ifp->if_softc;
2761 struct ifreq *ifr = (struct ifreq *) data;
2762 struct ifaddr *ifa = (struct ifaddr *)data;
2763 struct sockaddr_dl *sdl;
2764 int s, error;
2765
2766 s = splnet();
2767
2768 switch (cmd) {
2769 case SIOCSIFMEDIA:
2770 case SIOCGIFMEDIA:
2771 /* Flow control requires full-duplex mode. */
2772 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2773 (ifr->ifr_media & IFM_FDX) == 0)
2774 ifr->ifr_media &= ~IFM_ETH_FMASK;
2775 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2776 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2777 /* We can do both TXPAUSE and RXPAUSE. */
2778 ifr->ifr_media |=
2779 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2780 }
2781 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2782 }
2783 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2784 break;
2785 case SIOCINITIFADDR:
2786 if (ifa->ifa_addr->sa_family == AF_LINK) {
2787 sdl = satosdl(ifp->if_dl->ifa_addr);
2788 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2789 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2790 /* unicast address is first multicast entry */
2791 wm_set_filter(sc);
2792 error = 0;
2793 break;
2794 }
2795 /* Fall through for rest */
2796 default:
2797 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2798 break;
2799
2800 error = 0;
2801
2802 if (cmd == SIOCSIFCAP)
2803 error = (*ifp->if_init)(ifp);
2804 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2805 ;
2806 else if (ifp->if_flags & IFF_RUNNING) {
2807 /*
2808 * Multicast list has changed; set the hardware filter
2809 * accordingly.
2810 */
2811 wm_set_filter(sc);
2812 }
2813 break;
2814 }
2815
2816 /* Try to get more packets going. */
2817 wm_start(ifp);
2818
2819 splx(s);
2820 return error;
2821 }
2822
2823 /*
2824 * wm_intr:
2825 *
2826 * Interrupt service routine.
2827 */
2828 static int
2829 wm_intr(void *arg)
2830 {
2831 struct wm_softc *sc = arg;
2832 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2833 uint32_t icr;
2834 int handled = 0;
2835
2836 while (1 /* CONSTCOND */) {
2837 icr = CSR_READ(sc, WMREG_ICR);
2838 if ((icr & sc->sc_icr) == 0)
2839 break;
2840 #if 0 /*NRND > 0*/
2841 if (RND_ENABLED(&sc->rnd_source))
2842 rnd_add_uint32(&sc->rnd_source, icr);
2843 #endif
2844
2845 handled = 1;
2846
2847 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2848 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2849 DPRINTF(WM_DEBUG_RX,
2850 ("%s: RX: got Rx intr 0x%08x\n",
2851 device_xname(sc->sc_dev),
2852 icr & (ICR_RXDMT0|ICR_RXT0)));
2853 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2854 }
2855 #endif
2856 wm_rxintr(sc);
2857
2858 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2859 if (icr & ICR_TXDW) {
2860 DPRINTF(WM_DEBUG_TX,
2861 ("%s: TX: got TXDW interrupt\n",
2862 device_xname(sc->sc_dev)));
2863 WM_EVCNT_INCR(&sc->sc_ev_txdw);
2864 }
2865 #endif
2866 wm_txintr(sc);
2867
2868 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2869 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2870 wm_linkintr(sc, icr);
2871 }
2872
2873 if (icr & ICR_RXO) {
2874 #if defined(WM_DEBUG)
2875 log(LOG_WARNING, "%s: Receive overrun\n",
2876 device_xname(sc->sc_dev));
2877 #endif /* defined(WM_DEBUG) */
2878 }
2879 }
2880
2881 if (handled) {
2882 /* Try to get more packets going. */
2883 wm_start(ifp);
2884 }
2885
2886 return handled;
2887 }
2888
2889 /*
2890 * wm_txintr:
2891 *
2892 * Helper; handle transmit interrupts.
2893 */
2894 static void
2895 wm_txintr(struct wm_softc *sc)
2896 {
2897 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2898 struct wm_txsoft *txs;
2899 uint8_t status;
2900 int i;
2901
2902 ifp->if_flags &= ~IFF_OACTIVE;
2903
2904 /*
2905 * Go through the Tx list and free mbufs for those
2906 * frames which have been transmitted.
2907 */
2908 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2909 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2910 txs = &sc->sc_txsoft[i];
2911
2912 DPRINTF(WM_DEBUG_TX,
2913 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
2914
2915 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2916 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2917
2918 status =
2919 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2920 if ((status & WTX_ST_DD) == 0) {
2921 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2922 BUS_DMASYNC_PREREAD);
2923 break;
2924 }
2925
2926 DPRINTF(WM_DEBUG_TX,
2927 ("%s: TX: job %d done: descs %d..%d\n",
2928 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
2929 txs->txs_lastdesc));
2930
2931 /*
2932 * XXX We should probably be using the statistics
2933 * XXX registers, but I don't know if they exist
2934 * XXX on chips before the i82544.
2935 */
2936
2937 #ifdef WM_EVENT_COUNTERS
2938 if (status & WTX_ST_TU)
2939 WM_EVCNT_INCR(&sc->sc_ev_tu);
2940 #endif /* WM_EVENT_COUNTERS */
2941
2942 if (status & (WTX_ST_EC|WTX_ST_LC)) {
2943 ifp->if_oerrors++;
2944 if (status & WTX_ST_LC)
2945 log(LOG_WARNING, "%s: late collision\n",
2946 device_xname(sc->sc_dev));
2947 else if (status & WTX_ST_EC) {
2948 ifp->if_collisions += 16;
2949 log(LOG_WARNING, "%s: excessive collisions\n",
2950 device_xname(sc->sc_dev));
2951 }
2952 } else
2953 ifp->if_opackets++;
2954
2955 sc->sc_txfree += txs->txs_ndesc;
2956 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2957 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2958 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2959 m_freem(txs->txs_mbuf);
2960 txs->txs_mbuf = NULL;
2961 }
2962
2963 /* Update the dirty transmit buffer pointer. */
2964 sc->sc_txsdirty = i;
2965 DPRINTF(WM_DEBUG_TX,
2966 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
2967
2968 /*
2969 * If there are no more pending transmissions, cancel the watchdog
2970 * timer.
2971 */
2972 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2973 ifp->if_timer = 0;
2974 }
2975
2976 /*
2977 * wm_rxintr:
2978 *
2979 * Helper; handle receive interrupts.
2980 */
2981 static void
2982 wm_rxintr(struct wm_softc *sc)
2983 {
2984 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2985 struct wm_rxsoft *rxs;
2986 struct mbuf *m;
2987 int i, len;
2988 uint8_t status, errors;
2989 uint16_t vlantag;
2990
2991 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
2992 rxs = &sc->sc_rxsoft[i];
2993
2994 DPRINTF(WM_DEBUG_RX,
2995 ("%s: RX: checking descriptor %d\n",
2996 device_xname(sc->sc_dev), i));
2997
2998 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2999
3000 status = sc->sc_rxdescs[i].wrx_status;
3001 errors = sc->sc_rxdescs[i].wrx_errors;
3002 len = le16toh(sc->sc_rxdescs[i].wrx_len);
3003 vlantag = sc->sc_rxdescs[i].wrx_special;
3004
3005 if ((status & WRX_ST_DD) == 0) {
3006 /*
3007 * We have processed all of the receive descriptors.
3008 */
3009 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
3010 break;
3011 }
3012
3013 if (__predict_false(sc->sc_rxdiscard)) {
3014 DPRINTF(WM_DEBUG_RX,
3015 ("%s: RX: discarding contents of descriptor %d\n",
3016 device_xname(sc->sc_dev), i));
3017 WM_INIT_RXDESC(sc, i);
3018 if (status & WRX_ST_EOP) {
3019 /* Reset our state. */
3020 DPRINTF(WM_DEBUG_RX,
3021 ("%s: RX: resetting rxdiscard -> 0\n",
3022 device_xname(sc->sc_dev)));
3023 sc->sc_rxdiscard = 0;
3024 }
3025 continue;
3026 }
3027
3028 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3029 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3030
3031 m = rxs->rxs_mbuf;
3032
3033 /*
3034 * Add a new receive buffer to the ring, unless of
3035 * course the length is zero. Treat the latter as a
3036 * failed mapping.
3037 */
3038 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
3039 /*
3040 * Failed, throw away what we've done so
3041 * far, and discard the rest of the packet.
3042 */
3043 ifp->if_ierrors++;
3044 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3045 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3046 WM_INIT_RXDESC(sc, i);
3047 if ((status & WRX_ST_EOP) == 0)
3048 sc->sc_rxdiscard = 1;
3049 if (sc->sc_rxhead != NULL)
3050 m_freem(sc->sc_rxhead);
3051 WM_RXCHAIN_RESET(sc);
3052 DPRINTF(WM_DEBUG_RX,
3053 ("%s: RX: Rx buffer allocation failed, "
3054 "dropping packet%s\n", device_xname(sc->sc_dev),
3055 sc->sc_rxdiscard ? " (discard)" : ""));
3056 continue;
3057 }
3058
3059 m->m_len = len;
3060 sc->sc_rxlen += len;
3061 DPRINTF(WM_DEBUG_RX,
3062 ("%s: RX: buffer at %p len %d\n",
3063 device_xname(sc->sc_dev), m->m_data, len));
3064
3065 /*
3066 * If this is not the end of the packet, keep
3067 * looking.
3068 */
3069 if ((status & WRX_ST_EOP) == 0) {
3070 WM_RXCHAIN_LINK(sc, m);
3071 DPRINTF(WM_DEBUG_RX,
3072 ("%s: RX: not yet EOP, rxlen -> %d\n",
3073 device_xname(sc->sc_dev), sc->sc_rxlen));
3074 continue;
3075 }
3076
3077 /*
3078 * Okay, we have the entire packet now. The chip is
3079 * configured to include the FCS (not all chips can
3080 * be configured to strip it), so we need to trim it.
3081 * May need to adjust length of previous mbuf in the
3082 * chain if the current mbuf is too short.
3083 */
3084 if (m->m_len < ETHER_CRC_LEN) {
3085 sc->sc_rxtail->m_len -= (ETHER_CRC_LEN - m->m_len);
3086 m->m_len = 0;
3087 } else {
3088 m->m_len -= ETHER_CRC_LEN;
3089 }
3090 len = sc->sc_rxlen - ETHER_CRC_LEN;
3091
3092 WM_RXCHAIN_LINK(sc, m);
3093
3094 *sc->sc_rxtailp = NULL;
3095 m = sc->sc_rxhead;
3096
3097 WM_RXCHAIN_RESET(sc);
3098
3099 DPRINTF(WM_DEBUG_RX,
3100 ("%s: RX: have entire packet, len -> %d\n",
3101 device_xname(sc->sc_dev), len));
3102
3103 /*
3104 * If an error occurred, update stats and drop the packet.
3105 */
3106 if (errors &
3107 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3108 if (errors & WRX_ER_SE)
3109 log(LOG_WARNING, "%s: symbol error\n",
3110 device_xname(sc->sc_dev));
3111 else if (errors & WRX_ER_SEQ)
3112 log(LOG_WARNING, "%s: receive sequence error\n",
3113 device_xname(sc->sc_dev));
3114 else if (errors & WRX_ER_CE)
3115 log(LOG_WARNING, "%s: CRC error\n",
3116 device_xname(sc->sc_dev));
3117 m_freem(m);
3118 continue;
3119 }
3120
3121 /*
3122 * No errors. Receive the packet.
3123 */
3124 m->m_pkthdr.rcvif = ifp;
3125 m->m_pkthdr.len = len;
3126
3127 /*
3128 * If VLANs are enabled, VLAN packets have been unwrapped
3129 * for us. Associate the tag with the packet.
3130 */
3131 if ((status & WRX_ST_VP) != 0) {
3132 VLAN_INPUT_TAG(ifp, m,
3133 le16toh(vlantag),
3134 continue);
3135 }
3136
3137 /*
3138 * Set up checksum info for this packet.
3139 */
3140 if ((status & WRX_ST_IXSM) == 0) {
3141 if (status & WRX_ST_IPCS) {
3142 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3143 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3144 if (errors & WRX_ER_IPE)
3145 m->m_pkthdr.csum_flags |=
3146 M_CSUM_IPv4_BAD;
3147 }
3148 if (status & WRX_ST_TCPCS) {
3149 /*
3150 * Note: we don't know if this was TCP or UDP,
3151 * so we just set both bits, and expect the
3152 * upper layers to deal.
3153 */
3154 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3155 m->m_pkthdr.csum_flags |=
3156 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3157 M_CSUM_TCPv6 | M_CSUM_UDPv6;
3158 if (errors & WRX_ER_TCPE)
3159 m->m_pkthdr.csum_flags |=
3160 M_CSUM_TCP_UDP_BAD;
3161 }
3162 }
3163
3164 ifp->if_ipackets++;
3165
3166 /* Pass this up to any BPF listeners. */
3167 bpf_mtap(ifp, m);
3168
3169 /* Pass it on. */
3170 (*ifp->if_input)(ifp, m);
3171 }
3172
3173 /* Update the receive pointer. */
3174 sc->sc_rxptr = i;
3175
3176 DPRINTF(WM_DEBUG_RX,
3177 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3178 }
3179
3180 /*
3181 * wm_linkintr_gmii:
3182 *
3183 * Helper; handle link interrupts for GMII.
3184 */
3185 static void
3186 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
3187 {
3188
3189 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3190 __func__));
3191
3192 if (icr & ICR_LSC) {
3193 DPRINTF(WM_DEBUG_LINK,
3194 ("%s: LINK: LSC -> mii_tick\n",
3195 device_xname(sc->sc_dev)));
3196 mii_tick(&sc->sc_mii);
3197 if (sc->sc_type == WM_T_82543) {
3198 int miistatus, active;
3199
3200 /*
3201 * With 82543, we need to force speed and
3202 * duplex on the MAC equal to what the PHY
3203 * speed and duplex configuration is.
3204 */
3205 miistatus = sc->sc_mii.mii_media_status;
3206
3207 if (miistatus & IFM_ACTIVE) {
3208 active = sc->sc_mii.mii_media_active;
3209 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
3210 switch (IFM_SUBTYPE(active)) {
3211 case IFM_10_T:
3212 sc->sc_ctrl |= CTRL_SPEED_10;
3213 break;
3214 case IFM_100_TX:
3215 sc->sc_ctrl |= CTRL_SPEED_100;
3216 break;
3217 case IFM_1000_T:
3218 sc->sc_ctrl |= CTRL_SPEED_1000;
3219 break;
3220 default:
3221 /*
3222 * fiber?
3223 * Shoud not enter here.
3224 */
3225 printf("unknown media (%x)\n",
3226 active);
3227 break;
3228 }
3229 if (active & IFM_FDX)
3230 sc->sc_ctrl |= CTRL_FD;
3231 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3232 }
3233 } else if ((sc->sc_type == WM_T_ICH8)
3234 && (sc->sc_phytype == WMPHY_IGP_3)) {
3235 wm_kmrn_lock_loss_workaround_ich8lan(sc);
3236 } else if (sc->sc_type == WM_T_PCH) {
3237 wm_k1_gig_workaround_hv(sc,
3238 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
3239 }
3240
3241 if ((sc->sc_phytype == WMPHY_82578)
3242 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
3243 == IFM_1000_T)) {
3244
3245 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
3246 delay(200*1000); /* XXX too big */
3247
3248 /* Link stall fix for link up */
3249 wm_gmii_hv_writereg(sc->sc_dev, 1,
3250 HV_MUX_DATA_CTRL,
3251 HV_MUX_DATA_CTRL_GEN_TO_MAC
3252 | HV_MUX_DATA_CTRL_FORCE_SPEED);
3253 wm_gmii_hv_writereg(sc->sc_dev, 1,
3254 HV_MUX_DATA_CTRL,
3255 HV_MUX_DATA_CTRL_GEN_TO_MAC);
3256 }
3257 }
3258 } else if (icr & ICR_RXSEQ) {
3259 DPRINTF(WM_DEBUG_LINK,
3260 ("%s: LINK Receive sequence error\n",
3261 device_xname(sc->sc_dev)));
3262 }
3263 }
3264
3265 /*
3266 * wm_linkintr_tbi:
3267 *
3268 * Helper; handle link interrupts for TBI mode.
3269 */
3270 static void
3271 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3272 {
3273 uint32_t status;
3274
3275 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3276 __func__));
3277
3278 status = CSR_READ(sc, WMREG_STATUS);
3279 if (icr & ICR_LSC) {
3280 if (status & STATUS_LU) {
3281 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3282 device_xname(sc->sc_dev),
3283 (status & STATUS_FD) ? "FDX" : "HDX"));
3284 /*
3285 * NOTE: CTRL will update TFCE and RFCE automatically,
3286 * so we should update sc->sc_ctrl
3287 */
3288
3289 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3290 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3291 sc->sc_fcrtl &= ~FCRTL_XONE;
3292 if (status & STATUS_FD)
3293 sc->sc_tctl |=
3294 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3295 else
3296 sc->sc_tctl |=
3297 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3298 if (sc->sc_ctrl & CTRL_TFCE)
3299 sc->sc_fcrtl |= FCRTL_XONE;
3300 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3301 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3302 WMREG_OLD_FCRTL : WMREG_FCRTL,
3303 sc->sc_fcrtl);
3304 sc->sc_tbi_linkup = 1;
3305 } else {
3306 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3307 device_xname(sc->sc_dev)));
3308 sc->sc_tbi_linkup = 0;
3309 }
3310 wm_tbi_set_linkled(sc);
3311 } else if (icr & ICR_RXCFG) {
3312 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3313 device_xname(sc->sc_dev)));
3314 sc->sc_tbi_nrxcfg++;
3315 wm_check_for_link(sc);
3316 } else if (icr & ICR_RXSEQ) {
3317 DPRINTF(WM_DEBUG_LINK,
3318 ("%s: LINK: Receive sequence error\n",
3319 device_xname(sc->sc_dev)));
3320 }
3321 }
3322
3323 /*
3324 * wm_linkintr:
3325 *
3326 * Helper; handle link interrupts.
3327 */
3328 static void
3329 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3330 {
3331
3332 if (sc->sc_flags & WM_F_HAS_MII)
3333 wm_linkintr_gmii(sc, icr);
3334 else
3335 wm_linkintr_tbi(sc, icr);
3336 }
3337
3338 /*
3339 * wm_tick:
3340 *
3341 * One second timer, used to check link status, sweep up
3342 * completed transmit jobs, etc.
3343 */
3344 static void
3345 wm_tick(void *arg)
3346 {
3347 struct wm_softc *sc = arg;
3348 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3349 int s;
3350
3351 s = splnet();
3352
3353 if (sc->sc_type >= WM_T_82542_2_1) {
3354 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3355 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3356 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3357 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3358 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3359 }
3360
3361 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3362 ifp->if_ierrors += 0ULL + /* ensure quad_t */
3363 + CSR_READ(sc, WMREG_CRCERRS)
3364 + CSR_READ(sc, WMREG_ALGNERRC)
3365 + CSR_READ(sc, WMREG_SYMERRC)
3366 + CSR_READ(sc, WMREG_RXERRC)
3367 + CSR_READ(sc, WMREG_SEC)
3368 + CSR_READ(sc, WMREG_CEXTERR)
3369 + CSR_READ(sc, WMREG_RLEC);
3370 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
3371
3372 if (sc->sc_flags & WM_F_HAS_MII)
3373 mii_tick(&sc->sc_mii);
3374 else
3375 wm_tbi_check_link(sc);
3376
3377 splx(s);
3378
3379 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3380 }
3381
3382 /*
3383 * wm_reset:
3384 *
3385 * Reset the i82542 chip.
3386 */
3387 static void
3388 wm_reset(struct wm_softc *sc)
3389 {
3390 int phy_reset = 0;
3391 uint32_t reg, mask;
3392 int i;
3393
3394 /*
3395 * Allocate on-chip memory according to the MTU size.
3396 * The Packet Buffer Allocation register must be written
3397 * before the chip is reset.
3398 */
3399 switch (sc->sc_type) {
3400 case WM_T_82547:
3401 case WM_T_82547_2:
3402 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3403 PBA_22K : PBA_30K;
3404 sc->sc_txfifo_head = 0;
3405 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3406 sc->sc_txfifo_size =
3407 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3408 sc->sc_txfifo_stall = 0;
3409 break;
3410 case WM_T_82571:
3411 case WM_T_82572:
3412 case WM_T_82575: /* XXX need special handing for jumbo frames */
3413 case WM_T_80003:
3414 sc->sc_pba = PBA_32K;
3415 break;
3416 case WM_T_82580:
3417 case WM_T_82580ER:
3418 sc->sc_pba = PBA_35K;
3419 break;
3420 case WM_T_82576:
3421 sc->sc_pba = PBA_64K;
3422 break;
3423 case WM_T_82573:
3424 sc->sc_pba = PBA_12K;
3425 break;
3426 case WM_T_82574:
3427 case WM_T_82583:
3428 sc->sc_pba = PBA_20K;
3429 break;
3430 case WM_T_ICH8:
3431 sc->sc_pba = PBA_8K;
3432 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3433 break;
3434 case WM_T_ICH9:
3435 case WM_T_ICH10:
3436 case WM_T_PCH:
3437 sc->sc_pba = PBA_10K;
3438 break;
3439 default:
3440 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3441 PBA_40K : PBA_48K;
3442 break;
3443 }
3444 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3445
3446 /* Prevent the PCI-E bus from sticking */
3447 if (sc->sc_flags & WM_F_PCIE) {
3448 int timeout = 800;
3449
3450 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3451 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3452
3453 while (timeout--) {
3454 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
3455 break;
3456 delay(100);
3457 }
3458 }
3459
3460 /* Set the completion timeout for interface */
3461 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
3462 wm_set_pcie_completion_timeout(sc);
3463
3464 /* Clear interrupt */
3465 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3466
3467 /* Stop the transmit and receive processes. */
3468 CSR_WRITE(sc, WMREG_RCTL, 0);
3469 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3470 sc->sc_rctl &= ~RCTL_EN;
3471
3472 /* XXX set_tbi_sbp_82543() */
3473
3474 delay(10*1000);
3475
3476 /* Must acquire the MDIO ownership before MAC reset */
3477 switch (sc->sc_type) {
3478 case WM_T_82573:
3479 case WM_T_82574:
3480 case WM_T_82583:
3481 i = 0;
3482 reg = CSR_READ(sc, WMREG_EXTCNFCTR)
3483 | EXTCNFCTR_MDIO_SW_OWNERSHIP;
3484 do {
3485 CSR_WRITE(sc, WMREG_EXTCNFCTR,
3486 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
3487 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
3488 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
3489 break;
3490 reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
3491 delay(2*1000);
3492 i++;
3493 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
3494 break;
3495 default:
3496 break;
3497 }
3498
3499 /*
3500 * 82541 Errata 29? & 82547 Errata 28?
3501 * See also the description about PHY_RST bit in CTRL register
3502 * in 8254x_GBe_SDM.pdf.
3503 */
3504 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3505 CSR_WRITE(sc, WMREG_CTRL,
3506 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3507 delay(5000);
3508 }
3509
3510 switch (sc->sc_type) {
3511 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3512 case WM_T_82541:
3513 case WM_T_82541_2:
3514 case WM_T_82547:
3515 case WM_T_82547_2:
3516 /*
3517 * On some chipsets, a reset through a memory-mapped write
3518 * cycle can cause the chip to reset before completing the
3519 * write cycle. This causes major headache that can be
3520 * avoided by issuing the reset via indirect register writes
3521 * through I/O space.
3522 *
3523 * So, if we successfully mapped the I/O BAR at attach time,
3524 * use that. Otherwise, try our luck with a memory-mapped
3525 * reset.
3526 */
3527 if (sc->sc_flags & WM_F_IOH_VALID)
3528 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3529 else
3530 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3531 break;
3532 case WM_T_82545_3:
3533 case WM_T_82546_3:
3534 /* Use the shadow control register on these chips. */
3535 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3536 break;
3537 case WM_T_80003:
3538 mask = swfwphysem[sc->sc_funcid];
3539 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3540 wm_get_swfw_semaphore(sc, mask);
3541 CSR_WRITE(sc, WMREG_CTRL, reg);
3542 wm_put_swfw_semaphore(sc, mask);
3543 break;
3544 case WM_T_ICH8:
3545 case WM_T_ICH9:
3546 case WM_T_ICH10:
3547 case WM_T_PCH:
3548 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3549 if (wm_check_reset_block(sc) == 0) {
3550 if (sc->sc_type >= WM_T_PCH) {
3551 uint32_t status;
3552
3553 status = CSR_READ(sc, WMREG_STATUS);
3554 CSR_WRITE(sc, WMREG_STATUS,
3555 status & ~STATUS_PHYRA);
3556 }
3557
3558 reg |= CTRL_PHY_RESET;
3559 phy_reset = 1;
3560 }
3561 wm_get_swfwhw_semaphore(sc);
3562 CSR_WRITE(sc, WMREG_CTRL, reg);
3563 delay(20*1000);
3564 wm_put_swfwhw_semaphore(sc);
3565 break;
3566 case WM_T_82542_2_0:
3567 case WM_T_82542_2_1:
3568 case WM_T_82543:
3569 case WM_T_82540:
3570 case WM_T_82545:
3571 case WM_T_82546:
3572 case WM_T_82571:
3573 case WM_T_82572:
3574 case WM_T_82573:
3575 case WM_T_82574:
3576 case WM_T_82575:
3577 case WM_T_82576:
3578 case WM_T_82580:
3579 case WM_T_82580ER:
3580 case WM_T_82583:
3581 default:
3582 /* Everything else can safely use the documented method. */
3583 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3584 break;
3585 }
3586
3587 if (phy_reset != 0)
3588 wm_get_cfg_done(sc);
3589
3590 /* reload EEPROM */
3591 switch (sc->sc_type) {
3592 case WM_T_82542_2_0:
3593 case WM_T_82542_2_1:
3594 case WM_T_82543:
3595 case WM_T_82544:
3596 delay(10);
3597 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3598 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3599 delay(2000);
3600 break;
3601 case WM_T_82540:
3602 case WM_T_82545:
3603 case WM_T_82545_3:
3604 case WM_T_82546:
3605 case WM_T_82546_3:
3606 delay(5*1000);
3607 /* XXX Disable HW ARPs on ASF enabled adapters */
3608 break;
3609 case WM_T_82541:
3610 case WM_T_82541_2:
3611 case WM_T_82547:
3612 case WM_T_82547_2:
3613 delay(20000);
3614 /* XXX Disable HW ARPs on ASF enabled adapters */
3615 break;
3616 case WM_T_82571:
3617 case WM_T_82572:
3618 case WM_T_82573:
3619 case WM_T_82574:
3620 case WM_T_82583:
3621 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3622 delay(10);
3623 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3624 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3625 }
3626 /* check EECD_EE_AUTORD */
3627 wm_get_auto_rd_done(sc);
3628 /*
3629 * Phy configuration from NVM just starts after EECD_AUTO_RD
3630 * is set.
3631 */
3632 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3633 || (sc->sc_type == WM_T_82583))
3634 delay(25*1000);
3635 break;
3636 case WM_T_82575:
3637 case WM_T_82576:
3638 case WM_T_82580:
3639 case WM_T_82580ER:
3640 case WM_T_80003:
3641 case WM_T_ICH8:
3642 case WM_T_ICH9:
3643 /* check EECD_EE_AUTORD */
3644 wm_get_auto_rd_done(sc);
3645 break;
3646 case WM_T_ICH10:
3647 case WM_T_PCH:
3648 wm_lan_init_done(sc);
3649 break;
3650 default:
3651 panic("%s: unknown type\n", __func__);
3652 }
3653
3654 /* Check whether EEPROM is present or not */
3655 switch (sc->sc_type) {
3656 case WM_T_82575:
3657 case WM_T_82576:
3658 #if 0 /* XXX */
3659 case WM_T_82580:
3660 case WM_T_82580ER:
3661 #endif
3662 case WM_T_ICH8:
3663 case WM_T_ICH9:
3664 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3665 /* Not found */
3666 sc->sc_flags |= WM_F_EEPROM_INVALID;
3667 if ((sc->sc_type == WM_T_82575)
3668 || (sc->sc_type == WM_T_82576)
3669 || (sc->sc_type == WM_T_82580)
3670 || (sc->sc_type == WM_T_82580ER))
3671 wm_reset_init_script_82575(sc);
3672 }
3673 break;
3674 default:
3675 break;
3676 }
3677
3678 if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)) {
3679 /* clear global device reset status bit */
3680 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3681 }
3682
3683 /* Clear any pending interrupt events. */
3684 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3685 reg = CSR_READ(sc, WMREG_ICR);
3686
3687 /* reload sc_ctrl */
3688 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3689
3690 /* dummy read from WUC */
3691 if (sc->sc_type == WM_T_PCH)
3692 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3693 /*
3694 * For PCH, this write will make sure that any noise will be detected
3695 * as a CRC error and be dropped rather than show up as a bad packet
3696 * to the DMA engine
3697 */
3698 if (sc->sc_type == WM_T_PCH)
3699 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3700
3701 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3702 CSR_WRITE(sc, WMREG_WUC, 0);
3703
3704 /* XXX need special handling for 82580 */
3705 }
3706
3707 /*
3708 * wm_init: [ifnet interface function]
3709 *
3710 * Initialize the interface. Must be called at splnet().
3711 */
3712 static int
3713 wm_init(struct ifnet *ifp)
3714 {
3715 struct wm_softc *sc = ifp->if_softc;
3716 struct wm_rxsoft *rxs;
3717 int i, error = 0;
3718 uint32_t reg;
3719
3720 /*
3721 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3722 * There is a small but measurable benefit to avoiding the adjusment
3723 * of the descriptor so that the headers are aligned, for normal mtu,
3724 * on such platforms. One possibility is that the DMA itself is
3725 * slightly more efficient if the front of the entire packet (instead
3726 * of the front of the headers) is aligned.
3727 *
3728 * Note we must always set align_tweak to 0 if we are using
3729 * jumbo frames.
3730 */
3731 #ifdef __NO_STRICT_ALIGNMENT
3732 sc->sc_align_tweak = 0;
3733 #else
3734 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3735 sc->sc_align_tweak = 0;
3736 else
3737 sc->sc_align_tweak = 2;
3738 #endif /* __NO_STRICT_ALIGNMENT */
3739
3740 /* Cancel any pending I/O. */
3741 wm_stop(ifp, 0);
3742
3743 /* update statistics before reset */
3744 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3745 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3746
3747 /* Reset the chip to a known state. */
3748 wm_reset(sc);
3749
3750 switch (sc->sc_type) {
3751 case WM_T_82571:
3752 case WM_T_82572:
3753 case WM_T_82573:
3754 case WM_T_82574:
3755 case WM_T_82583:
3756 case WM_T_80003:
3757 case WM_T_ICH8:
3758 case WM_T_ICH9:
3759 case WM_T_ICH10:
3760 case WM_T_PCH:
3761 if (wm_check_mng_mode(sc) != 0)
3762 wm_get_hw_control(sc);
3763 break;
3764 default:
3765 break;
3766 }
3767
3768 /* Reset the PHY. */
3769 if (sc->sc_flags & WM_F_HAS_MII)
3770 wm_gmii_reset(sc);
3771
3772 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3773 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3774 if (sc->sc_type == WM_T_PCH)
3775 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
3776
3777 /* Initialize the transmit descriptor ring. */
3778 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3779 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3780 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3781 sc->sc_txfree = WM_NTXDESC(sc);
3782 sc->sc_txnext = 0;
3783
3784 if (sc->sc_type < WM_T_82543) {
3785 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
3786 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
3787 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3788 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3789 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3790 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3791 } else {
3792 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
3793 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
3794 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3795 CSR_WRITE(sc, WMREG_TDH, 0);
3796 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3797 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3798
3799 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3800 /*
3801 * Don't write TDT before TCTL.EN is set.
3802 * See the document.
3803 */
3804 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
3805 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
3806 | TXDCTL_WTHRESH(0));
3807 else {
3808 CSR_WRITE(sc, WMREG_TDT, 0);
3809 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3810 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3811 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3812 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3813 }
3814 }
3815 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3816 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3817
3818 /* Initialize the transmit job descriptors. */
3819 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3820 sc->sc_txsoft[i].txs_mbuf = NULL;
3821 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3822 sc->sc_txsnext = 0;
3823 sc->sc_txsdirty = 0;
3824
3825 /*
3826 * Initialize the receive descriptor and receive job
3827 * descriptor rings.
3828 */
3829 if (sc->sc_type < WM_T_82543) {
3830 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3831 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3832 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3833 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3834 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3835 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3836
3837 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3838 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3839 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3840 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3841 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3842 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3843 } else {
3844 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3845 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3846 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3847 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3848 CSR_WRITE(sc, WMREG_EITR(0), 450);
3849 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
3850 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
3851 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
3852 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
3853 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
3854 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
3855 | RXDCTL_WTHRESH(1));
3856 } else {
3857 CSR_WRITE(sc, WMREG_RDH, 0);
3858 CSR_WRITE(sc, WMREG_RDT, 0);
3859 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3860 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
3861 }
3862 }
3863 for (i = 0; i < WM_NRXDESC; i++) {
3864 rxs = &sc->sc_rxsoft[i];
3865 if (rxs->rxs_mbuf == NULL) {
3866 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3867 log(LOG_ERR, "%s: unable to allocate or map rx "
3868 "buffer %d, error = %d\n",
3869 device_xname(sc->sc_dev), i, error);
3870 /*
3871 * XXX Should attempt to run with fewer receive
3872 * XXX buffers instead of just failing.
3873 */
3874 wm_rxdrain(sc);
3875 goto out;
3876 }
3877 } else {
3878 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3879 WM_INIT_RXDESC(sc, i);
3880 /*
3881 * For 82575 and newer device, the RX descriptors
3882 * must be initialized after the setting of RCTL.EN in
3883 * wm_set_filter()
3884 */
3885 }
3886 }
3887 sc->sc_rxptr = 0;
3888 sc->sc_rxdiscard = 0;
3889 WM_RXCHAIN_RESET(sc);
3890
3891 /*
3892 * Clear out the VLAN table -- we don't use it (yet).
3893 */
3894 CSR_WRITE(sc, WMREG_VET, 0);
3895 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3896 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3897
3898 /*
3899 * Set up flow-control parameters.
3900 *
3901 * XXX Values could probably stand some tuning.
3902 */
3903 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3904 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
3905 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3906 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3907 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3908 }
3909
3910 sc->sc_fcrtl = FCRTL_DFLT;
3911 if (sc->sc_type < WM_T_82543) {
3912 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3913 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3914 } else {
3915 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3916 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3917 }
3918
3919 if (sc->sc_type == WM_T_80003)
3920 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3921 else
3922 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3923
3924 /* Deal with VLAN enables. */
3925 if (VLAN_ATTACHED(&sc->sc_ethercom))
3926 sc->sc_ctrl |= CTRL_VME;
3927 else
3928 sc->sc_ctrl &= ~CTRL_VME;
3929
3930 /* Write the control register. */
3931 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3932
3933 if (sc->sc_flags & WM_F_HAS_MII) {
3934 int val;
3935
3936 switch (sc->sc_type) {
3937 case WM_T_80003:
3938 case WM_T_ICH8:
3939 case WM_T_ICH9:
3940 case WM_T_ICH10:
3941 case WM_T_PCH:
3942 /*
3943 * Set the mac to wait the maximum time between each
3944 * iteration and increase the max iterations when
3945 * polling the phy; this fixes erroneous timeouts at
3946 * 10Mbps.
3947 */
3948 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
3949 0xFFFF);
3950 val = wm_kmrn_readreg(sc,
3951 KUMCTRLSTA_OFFSET_INB_PARAM);
3952 val |= 0x3F;
3953 wm_kmrn_writereg(sc,
3954 KUMCTRLSTA_OFFSET_INB_PARAM, val);
3955 break;
3956 default:
3957 break;
3958 }
3959
3960 if (sc->sc_type == WM_T_80003) {
3961 val = CSR_READ(sc, WMREG_CTRL_EXT);
3962 val &= ~CTRL_EXT_LINK_MODE_MASK;
3963 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3964
3965 /* Bypass RX and TX FIFO's */
3966 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3967 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
3968 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3969 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3970 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3971 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3972 }
3973 }
3974 #if 0
3975 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3976 #endif
3977
3978 /*
3979 * Set up checksum offload parameters.
3980 */
3981 reg = CSR_READ(sc, WMREG_RXCSUM);
3982 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3983 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3984 reg |= RXCSUM_IPOFL;
3985 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3986 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3987 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3988 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3989 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3990
3991 /* Reset TBI's RXCFG count */
3992 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
3993
3994 /*
3995 * Set up the interrupt registers.
3996 */
3997 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3998 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3999 ICR_RXO | ICR_RXT0;
4000 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
4001 sc->sc_icr |= ICR_RXCFG;
4002 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4003
4004 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4005 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)) {
4006 reg = CSR_READ(sc, WMREG_KABGTXD);
4007 reg |= KABGTXD_BGSQLBIAS;
4008 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4009 }
4010
4011 /* Set up the inter-packet gap. */
4012 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4013
4014 if (sc->sc_type >= WM_T_82543) {
4015 /*
4016 * Set up the interrupt throttling register (units of 256ns)
4017 * Note that a footnote in Intel's documentation says this
4018 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4019 * or 10Mbit mode. Empirically, it appears to be the case
4020 * that that is also true for the 1024ns units of the other
4021 * interrupt-related timer registers -- so, really, we ought
4022 * to divide this value by 4 when the link speed is low.
4023 *
4024 * XXX implement this division at link speed change!
4025 */
4026
4027 /*
4028 * For N interrupts/sec, set this value to:
4029 * 1000000000 / (N * 256). Note that we set the
4030 * absolute and packet timer values to this value
4031 * divided by 4 to get "simple timer" behavior.
4032 */
4033
4034 sc->sc_itr = 1500; /* 2604 ints/sec */
4035 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4036 }
4037
4038 /* Set the VLAN ethernetype. */
4039 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4040
4041 /*
4042 * Set up the transmit control register; we start out with
4043 * a collision distance suitable for FDX, but update it whe
4044 * we resolve the media type.
4045 */
4046 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4047 | TCTL_CT(TX_COLLISION_THRESHOLD)
4048 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4049 if (sc->sc_type >= WM_T_82571)
4050 sc->sc_tctl |= TCTL_MULR;
4051 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4052
4053 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4054 /*
4055 * Write TDT after TCTL.EN is set.
4056 * See the document.
4057 */
4058 CSR_WRITE(sc, WMREG_TDT, 0);
4059 }
4060
4061 if (sc->sc_type == WM_T_80003) {
4062 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4063 reg &= ~TCTL_EXT_GCEX_MASK;
4064 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4065 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4066 }
4067
4068 /* Set the media. */
4069 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4070 goto out;
4071
4072 /* Configure for OS presence */
4073 wm_init_manageability(sc);
4074
4075 /*
4076 * Set up the receive control register; we actually program
4077 * the register when we set the receive filter. Use multicast
4078 * address offset type 0.
4079 *
4080 * Only the i82544 has the ability to strip the incoming
4081 * CRC, so we don't enable that feature.
4082 */
4083 sc->sc_mchash_type = 0;
4084 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4085 | RCTL_MO(sc->sc_mchash_type);
4086
4087 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4088 && (ifp->if_mtu > ETHERMTU)) {
4089 sc->sc_rctl |= RCTL_LPE;
4090 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4091 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4092 }
4093
4094 if (MCLBYTES == 2048) {
4095 sc->sc_rctl |= RCTL_2k;
4096 } else {
4097 if (sc->sc_type >= WM_T_82543) {
4098 switch (MCLBYTES) {
4099 case 4096:
4100 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4101 break;
4102 case 8192:
4103 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4104 break;
4105 case 16384:
4106 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4107 break;
4108 default:
4109 panic("wm_init: MCLBYTES %d unsupported",
4110 MCLBYTES);
4111 break;
4112 }
4113 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4114 }
4115
4116 /* Set the receive filter. */
4117 wm_set_filter(sc);
4118
4119 /* On 575 and later set RDT only if RX enabled */
4120 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4121 for (i = 0; i < WM_NRXDESC; i++)
4122 WM_INIT_RXDESC(sc, i);
4123
4124 /* Start the one second link check clock. */
4125 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4126
4127 /* ...all done! */
4128 ifp->if_flags |= IFF_RUNNING;
4129 ifp->if_flags &= ~IFF_OACTIVE;
4130
4131 out:
4132 sc->sc_if_flags = ifp->if_flags;
4133 if (error)
4134 log(LOG_ERR, "%s: interface not running\n",
4135 device_xname(sc->sc_dev));
4136 return error;
4137 }
4138
4139 /*
4140 * wm_rxdrain:
4141 *
4142 * Drain the receive queue.
4143 */
4144 static void
4145 wm_rxdrain(struct wm_softc *sc)
4146 {
4147 struct wm_rxsoft *rxs;
4148 int i;
4149
4150 for (i = 0; i < WM_NRXDESC; i++) {
4151 rxs = &sc->sc_rxsoft[i];
4152 if (rxs->rxs_mbuf != NULL) {
4153 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4154 m_freem(rxs->rxs_mbuf);
4155 rxs->rxs_mbuf = NULL;
4156 }
4157 }
4158 }
4159
4160 /*
4161 * wm_stop: [ifnet interface function]
4162 *
4163 * Stop transmission on the interface.
4164 */
4165 static void
4166 wm_stop(struct ifnet *ifp, int disable)
4167 {
4168 struct wm_softc *sc = ifp->if_softc;
4169 struct wm_txsoft *txs;
4170 int i;
4171
4172 /* Stop the one second clock. */
4173 callout_stop(&sc->sc_tick_ch);
4174
4175 /* Stop the 82547 Tx FIFO stall check timer. */
4176 if (sc->sc_type == WM_T_82547)
4177 callout_stop(&sc->sc_txfifo_ch);
4178
4179 if (sc->sc_flags & WM_F_HAS_MII) {
4180 /* Down the MII. */
4181 mii_down(&sc->sc_mii);
4182 } else {
4183 #if 0
4184 /* Should we clear PHY's status properly? */
4185 wm_reset(sc);
4186 #endif
4187 }
4188
4189 /* Stop the transmit and receive processes. */
4190 CSR_WRITE(sc, WMREG_TCTL, 0);
4191 CSR_WRITE(sc, WMREG_RCTL, 0);
4192 sc->sc_rctl &= ~RCTL_EN;
4193
4194 /*
4195 * Clear the interrupt mask to ensure the device cannot assert its
4196 * interrupt line.
4197 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4198 * any currently pending or shared interrupt.
4199 */
4200 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4201 sc->sc_icr = 0;
4202
4203 /* Release any queued transmit buffers. */
4204 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4205 txs = &sc->sc_txsoft[i];
4206 if (txs->txs_mbuf != NULL) {
4207 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4208 m_freem(txs->txs_mbuf);
4209 txs->txs_mbuf = NULL;
4210 }
4211 }
4212
4213 /* Mark the interface as down and cancel the watchdog timer. */
4214 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4215 ifp->if_timer = 0;
4216
4217 if (disable)
4218 wm_rxdrain(sc);
4219
4220 #if 0 /* notyet */
4221 if (sc->sc_type >= WM_T_82544)
4222 CSR_WRITE(sc, WMREG_WUC, 0);
4223 #endif
4224 }
4225
4226 void
4227 wm_get_auto_rd_done(struct wm_softc *sc)
4228 {
4229 int i;
4230
4231 /* wait for eeprom to reload */
4232 switch (sc->sc_type) {
4233 case WM_T_82571:
4234 case WM_T_82572:
4235 case WM_T_82573:
4236 case WM_T_82574:
4237 case WM_T_82583:
4238 case WM_T_82575:
4239 case WM_T_82576:
4240 case WM_T_82580:
4241 case WM_T_82580ER:
4242 case WM_T_80003:
4243 case WM_T_ICH8:
4244 case WM_T_ICH9:
4245 for (i = 0; i < 10; i++) {
4246 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4247 break;
4248 delay(1000);
4249 }
4250 if (i == 10) {
4251 log(LOG_ERR, "%s: auto read from eeprom failed to "
4252 "complete\n", device_xname(sc->sc_dev));
4253 }
4254 break;
4255 default:
4256 break;
4257 }
4258 }
4259
4260 void
4261 wm_lan_init_done(struct wm_softc *sc)
4262 {
4263 uint32_t reg = 0;
4264 int i;
4265
4266 /* wait for eeprom to reload */
4267 switch (sc->sc_type) {
4268 case WM_T_ICH10:
4269 case WM_T_PCH:
4270 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4271 reg = CSR_READ(sc, WMREG_STATUS);
4272 if ((reg & STATUS_LAN_INIT_DONE) != 0)
4273 break;
4274 delay(100);
4275 }
4276 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4277 log(LOG_ERR, "%s: %s: lan_init_done failed to "
4278 "complete\n", device_xname(sc->sc_dev), __func__);
4279 }
4280 break;
4281 default:
4282 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4283 __func__);
4284 break;
4285 }
4286
4287 reg &= ~STATUS_LAN_INIT_DONE;
4288 CSR_WRITE(sc, WMREG_STATUS, reg);
4289 }
4290
4291 void
4292 wm_get_cfg_done(struct wm_softc *sc)
4293 {
4294 int mask;
4295 uint32_t reg;
4296 int i;
4297
4298 /* wait for eeprom to reload */
4299 switch (sc->sc_type) {
4300 case WM_T_82542_2_0:
4301 case WM_T_82542_2_1:
4302 /* null */
4303 break;
4304 case WM_T_82543:
4305 case WM_T_82544:
4306 case WM_T_82540:
4307 case WM_T_82545:
4308 case WM_T_82545_3:
4309 case WM_T_82546:
4310 case WM_T_82546_3:
4311 case WM_T_82541:
4312 case WM_T_82541_2:
4313 case WM_T_82547:
4314 case WM_T_82547_2:
4315 case WM_T_82573:
4316 case WM_T_82574:
4317 case WM_T_82583:
4318 /* generic */
4319 delay(10*1000);
4320 break;
4321 case WM_T_80003:
4322 case WM_T_82571:
4323 case WM_T_82572:
4324 case WM_T_82575:
4325 case WM_T_82576:
4326 case WM_T_82580:
4327 case WM_T_82580ER:
4328 if (sc->sc_type == WM_T_82571) {
4329 /* Only 82571 shares port 0 */
4330 mask = EEMNGCTL_CFGDONE_0;
4331 } else
4332 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4333 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4334 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4335 break;
4336 delay(1000);
4337 }
4338 if (i >= WM_PHY_CFG_TIMEOUT) {
4339 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
4340 device_xname(sc->sc_dev), __func__));
4341 }
4342 break;
4343 case WM_T_ICH8:
4344 case WM_T_ICH9:
4345 case WM_T_ICH10:
4346 case WM_T_PCH:
4347 if (sc->sc_type >= WM_T_PCH) {
4348 reg = CSR_READ(sc, WMREG_STATUS);
4349 if ((reg & STATUS_PHYRA) != 0)
4350 CSR_WRITE(sc, WMREG_STATUS,
4351 reg & ~STATUS_PHYRA);
4352 }
4353 delay(10*1000);
4354 break;
4355 default:
4356 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4357 __func__);
4358 break;
4359 }
4360 }
4361
4362 /*
4363 * wm_acquire_eeprom:
4364 *
4365 * Perform the EEPROM handshake required on some chips.
4366 */
4367 static int
4368 wm_acquire_eeprom(struct wm_softc *sc)
4369 {
4370 uint32_t reg;
4371 int x;
4372 int ret = 0;
4373
4374 /* always success */
4375 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4376 return 0;
4377
4378 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
4379 ret = wm_get_swfwhw_semaphore(sc);
4380 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
4381 /* this will also do wm_get_swsm_semaphore() if needed */
4382 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
4383 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4384 ret = wm_get_swsm_semaphore(sc);
4385 }
4386
4387 if (ret) {
4388 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4389 __func__);
4390 return 1;
4391 }
4392
4393 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4394 reg = CSR_READ(sc, WMREG_EECD);
4395
4396 /* Request EEPROM access. */
4397 reg |= EECD_EE_REQ;
4398 CSR_WRITE(sc, WMREG_EECD, reg);
4399
4400 /* ..and wait for it to be granted. */
4401 for (x = 0; x < 1000; x++) {
4402 reg = CSR_READ(sc, WMREG_EECD);
4403 if (reg & EECD_EE_GNT)
4404 break;
4405 delay(5);
4406 }
4407 if ((reg & EECD_EE_GNT) == 0) {
4408 aprint_error_dev(sc->sc_dev,
4409 "could not acquire EEPROM GNT\n");
4410 reg &= ~EECD_EE_REQ;
4411 CSR_WRITE(sc, WMREG_EECD, reg);
4412 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4413 wm_put_swfwhw_semaphore(sc);
4414 if (sc->sc_flags & WM_F_SWFW_SYNC)
4415 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4416 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4417 wm_put_swsm_semaphore(sc);
4418 return 1;
4419 }
4420 }
4421
4422 return 0;
4423 }
4424
4425 /*
4426 * wm_release_eeprom:
4427 *
4428 * Release the EEPROM mutex.
4429 */
4430 static void
4431 wm_release_eeprom(struct wm_softc *sc)
4432 {
4433 uint32_t reg;
4434
4435 /* always success */
4436 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4437 return;
4438
4439 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4440 reg = CSR_READ(sc, WMREG_EECD);
4441 reg &= ~EECD_EE_REQ;
4442 CSR_WRITE(sc, WMREG_EECD, reg);
4443 }
4444
4445 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4446 wm_put_swfwhw_semaphore(sc);
4447 if (sc->sc_flags & WM_F_SWFW_SYNC)
4448 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4449 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4450 wm_put_swsm_semaphore(sc);
4451 }
4452
4453 /*
4454 * wm_eeprom_sendbits:
4455 *
4456 * Send a series of bits to the EEPROM.
4457 */
4458 static void
4459 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
4460 {
4461 uint32_t reg;
4462 int x;
4463
4464 reg = CSR_READ(sc, WMREG_EECD);
4465
4466 for (x = nbits; x > 0; x--) {
4467 if (bits & (1U << (x - 1)))
4468 reg |= EECD_DI;
4469 else
4470 reg &= ~EECD_DI;
4471 CSR_WRITE(sc, WMREG_EECD, reg);
4472 delay(2);
4473 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4474 delay(2);
4475 CSR_WRITE(sc, WMREG_EECD, reg);
4476 delay(2);
4477 }
4478 }
4479
4480 /*
4481 * wm_eeprom_recvbits:
4482 *
4483 * Receive a series of bits from the EEPROM.
4484 */
4485 static void
4486 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
4487 {
4488 uint32_t reg, val;
4489 int x;
4490
4491 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
4492
4493 val = 0;
4494 for (x = nbits; x > 0; x--) {
4495 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4496 delay(2);
4497 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
4498 val |= (1U << (x - 1));
4499 CSR_WRITE(sc, WMREG_EECD, reg);
4500 delay(2);
4501 }
4502 *valp = val;
4503 }
4504
4505 /*
4506 * wm_read_eeprom_uwire:
4507 *
4508 * Read a word from the EEPROM using the MicroWire protocol.
4509 */
4510 static int
4511 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4512 {
4513 uint32_t reg, val;
4514 int i;
4515
4516 for (i = 0; i < wordcnt; i++) {
4517 /* Clear SK and DI. */
4518 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
4519 CSR_WRITE(sc, WMREG_EECD, reg);
4520
4521 /* Set CHIP SELECT. */
4522 reg |= EECD_CS;
4523 CSR_WRITE(sc, WMREG_EECD, reg);
4524 delay(2);
4525
4526 /* Shift in the READ command. */
4527 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
4528
4529 /* Shift in address. */
4530 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
4531
4532 /* Shift out the data. */
4533 wm_eeprom_recvbits(sc, &val, 16);
4534 data[i] = val & 0xffff;
4535
4536 /* Clear CHIP SELECT. */
4537 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
4538 CSR_WRITE(sc, WMREG_EECD, reg);
4539 delay(2);
4540 }
4541
4542 return 0;
4543 }
4544
4545 /*
4546 * wm_spi_eeprom_ready:
4547 *
4548 * Wait for a SPI EEPROM to be ready for commands.
4549 */
4550 static int
4551 wm_spi_eeprom_ready(struct wm_softc *sc)
4552 {
4553 uint32_t val;
4554 int usec;
4555
4556 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
4557 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
4558 wm_eeprom_recvbits(sc, &val, 8);
4559 if ((val & SPI_SR_RDY) == 0)
4560 break;
4561 }
4562 if (usec >= SPI_MAX_RETRIES) {
4563 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
4564 return 1;
4565 }
4566 return 0;
4567 }
4568
4569 /*
4570 * wm_read_eeprom_spi:
4571 *
4572 * Read a work from the EEPROM using the SPI protocol.
4573 */
4574 static int
4575 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4576 {
4577 uint32_t reg, val;
4578 int i;
4579 uint8_t opc;
4580
4581 /* Clear SK and CS. */
4582 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
4583 CSR_WRITE(sc, WMREG_EECD, reg);
4584 delay(2);
4585
4586 if (wm_spi_eeprom_ready(sc))
4587 return 1;
4588
4589 /* Toggle CS to flush commands. */
4590 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
4591 delay(2);
4592 CSR_WRITE(sc, WMREG_EECD, reg);
4593 delay(2);
4594
4595 opc = SPI_OPC_READ;
4596 if (sc->sc_ee_addrbits == 8 && word >= 128)
4597 opc |= SPI_OPC_A8;
4598
4599 wm_eeprom_sendbits(sc, opc, 8);
4600 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
4601
4602 for (i = 0; i < wordcnt; i++) {
4603 wm_eeprom_recvbits(sc, &val, 16);
4604 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
4605 }
4606
4607 /* Raise CS and clear SK. */
4608 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
4609 CSR_WRITE(sc, WMREG_EECD, reg);
4610 delay(2);
4611
4612 return 0;
4613 }
4614
4615 #define EEPROM_CHECKSUM 0xBABA
4616 #define EEPROM_SIZE 0x0040
4617
4618 /*
4619 * wm_validate_eeprom_checksum
4620 *
4621 * The checksum is defined as the sum of the first 64 (16 bit) words.
4622 */
4623 static int
4624 wm_validate_eeprom_checksum(struct wm_softc *sc)
4625 {
4626 uint16_t checksum;
4627 uint16_t eeprom_data;
4628 int i;
4629
4630 checksum = 0;
4631
4632 for (i = 0; i < EEPROM_SIZE; i++) {
4633 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
4634 return 1;
4635 checksum += eeprom_data;
4636 }
4637
4638 if (checksum != (uint16_t) EEPROM_CHECKSUM)
4639 return 1;
4640
4641 return 0;
4642 }
4643
4644 /*
4645 * wm_read_eeprom:
4646 *
4647 * Read data from the serial EEPROM.
4648 */
4649 static int
4650 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4651 {
4652 int rv;
4653
4654 if (sc->sc_flags & WM_F_EEPROM_INVALID)
4655 return 1;
4656
4657 if (wm_acquire_eeprom(sc))
4658 return 1;
4659
4660 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4661 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4662 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
4663 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
4664 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
4665 else if (sc->sc_flags & WM_F_EEPROM_SPI)
4666 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
4667 else
4668 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
4669
4670 wm_release_eeprom(sc);
4671 return rv;
4672 }
4673
4674 static int
4675 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
4676 uint16_t *data)
4677 {
4678 int i, eerd = 0;
4679 int error = 0;
4680
4681 for (i = 0; i < wordcnt; i++) {
4682 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
4683
4684 CSR_WRITE(sc, WMREG_EERD, eerd);
4685 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
4686 if (error != 0)
4687 break;
4688
4689 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
4690 }
4691
4692 return error;
4693 }
4694
4695 static int
4696 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
4697 {
4698 uint32_t attempts = 100000;
4699 uint32_t i, reg = 0;
4700 int32_t done = -1;
4701
4702 for (i = 0; i < attempts; i++) {
4703 reg = CSR_READ(sc, rw);
4704
4705 if (reg & EERD_DONE) {
4706 done = 0;
4707 break;
4708 }
4709 delay(5);
4710 }
4711
4712 return done;
4713 }
4714
4715 static int
4716 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
4717 {
4718 uint16_t myea[ETHER_ADDR_LEN / 2];
4719 uint16_t offset = EEPROM_OFF_MACADDR;
4720 int do_invert = 0;
4721
4722 if (sc->sc_funcid != 0)
4723 switch (sc->sc_type) {
4724 case WM_T_82580:
4725 case WM_T_82580ER:
4726 switch (sc->sc_funcid) {
4727 case 1:
4728 offset = EEPROM_OFF_LAN1;
4729 break;
4730 case 2:
4731 offset = EEPROM_OFF_LAN2;
4732 break;
4733 case 3:
4734 offset = EEPROM_OFF_LAN3;
4735 break;
4736 default:
4737 goto bad;
4738 /* NOTREACHED */
4739 break;
4740 }
4741 break;
4742 case WM_T_82571:
4743 case WM_T_82575:
4744 case WM_T_82576:
4745 case WM_T_80003:
4746 if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1,
4747 &offset) != 0) {
4748 goto bad;
4749 }
4750
4751 /* no pointer */
4752 if (offset == 0xffff) {
4753 /* reset the offset to LAN0 */
4754 offset = EEPROM_OFF_MACADDR;
4755 do_invert = 1;
4756 goto do_read;
4757 }
4758
4759 switch (sc->sc_funcid) {
4760 case 1:
4761 offset += EEPROM_OFF_MACADDR_LAN1;
4762 break;
4763 case 2:
4764 offset += EEPROM_OFF_MACADDR_LAN2;
4765 break;
4766 case 3:
4767 offset += EEPROM_OFF_MACADDR_LAN3;
4768 break;
4769 default:
4770 goto bad;
4771 /* NOTREACHED */
4772 break;
4773 }
4774 break;
4775 default:
4776 do_invert = 1;
4777 break;
4778 }
4779
4780 do_read:
4781 if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]),
4782 myea) != 0) {
4783 goto bad;
4784 }
4785
4786 enaddr[0] = myea[0] & 0xff;
4787 enaddr[1] = myea[0] >> 8;
4788 enaddr[2] = myea[1] & 0xff;
4789 enaddr[3] = myea[1] >> 8;
4790 enaddr[4] = myea[2] & 0xff;
4791 enaddr[5] = myea[2] >> 8;
4792
4793 /*
4794 * Toggle the LSB of the MAC address on the second port
4795 * of some dual port cards.
4796 */
4797 if (do_invert != 0)
4798 enaddr[5] ^= 1;
4799
4800 return 0;
4801
4802 bad:
4803 aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n");
4804
4805 return -1;
4806 }
4807
4808 /*
4809 * wm_add_rxbuf:
4810 *
4811 * Add a receive buffer to the indiciated descriptor.
4812 */
4813 static int
4814 wm_add_rxbuf(struct wm_softc *sc, int idx)
4815 {
4816 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
4817 struct mbuf *m;
4818 int error;
4819
4820 MGETHDR(m, M_DONTWAIT, MT_DATA);
4821 if (m == NULL)
4822 return ENOBUFS;
4823
4824 MCLGET(m, M_DONTWAIT);
4825 if ((m->m_flags & M_EXT) == 0) {
4826 m_freem(m);
4827 return ENOBUFS;
4828 }
4829
4830 if (rxs->rxs_mbuf != NULL)
4831 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4832
4833 rxs->rxs_mbuf = m;
4834
4835 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4836 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4837 BUS_DMA_READ|BUS_DMA_NOWAIT);
4838 if (error) {
4839 /* XXX XXX XXX */
4840 aprint_error_dev(sc->sc_dev,
4841 "unable to load rx DMA map %d, error = %d\n",
4842 idx, error);
4843 panic("wm_add_rxbuf");
4844 }
4845
4846 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4847 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4848
4849 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4850 if ((sc->sc_rctl & RCTL_EN) != 0)
4851 WM_INIT_RXDESC(sc, idx);
4852 } else
4853 WM_INIT_RXDESC(sc, idx);
4854
4855 return 0;
4856 }
4857
4858 /*
4859 * wm_set_ral:
4860 *
4861 * Set an entery in the receive address list.
4862 */
4863 static void
4864 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4865 {
4866 uint32_t ral_lo, ral_hi;
4867
4868 if (enaddr != NULL) {
4869 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
4870 (enaddr[3] << 24);
4871 ral_hi = enaddr[4] | (enaddr[5] << 8);
4872 ral_hi |= RAL_AV;
4873 } else {
4874 ral_lo = 0;
4875 ral_hi = 0;
4876 }
4877
4878 if (sc->sc_type >= WM_T_82544) {
4879 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
4880 ral_lo);
4881 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
4882 ral_hi);
4883 } else {
4884 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
4885 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
4886 }
4887 }
4888
4889 /*
4890 * wm_mchash:
4891 *
4892 * Compute the hash of the multicast address for the 4096-bit
4893 * multicast filter.
4894 */
4895 static uint32_t
4896 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4897 {
4898 static const int lo_shift[4] = { 4, 3, 2, 0 };
4899 static const int hi_shift[4] = { 4, 5, 6, 8 };
4900 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4901 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4902 uint32_t hash;
4903
4904 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4905 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)) {
4906 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4907 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4908 return (hash & 0x3ff);
4909 }
4910 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4911 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4912
4913 return (hash & 0xfff);
4914 }
4915
4916 /*
4917 * wm_set_filter:
4918 *
4919 * Set up the receive filter.
4920 */
4921 static void
4922 wm_set_filter(struct wm_softc *sc)
4923 {
4924 struct ethercom *ec = &sc->sc_ethercom;
4925 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4926 struct ether_multi *enm;
4927 struct ether_multistep step;
4928 bus_addr_t mta_reg;
4929 uint32_t hash, reg, bit;
4930 int i, size;
4931
4932 if (sc->sc_type >= WM_T_82544)
4933 mta_reg = WMREG_CORDOVA_MTA;
4934 else
4935 mta_reg = WMREG_MTA;
4936
4937 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4938
4939 if (ifp->if_flags & IFF_BROADCAST)
4940 sc->sc_rctl |= RCTL_BAM;
4941 if (ifp->if_flags & IFF_PROMISC) {
4942 sc->sc_rctl |= RCTL_UPE;
4943 goto allmulti;
4944 }
4945
4946 /*
4947 * Set the station address in the first RAL slot, and
4948 * clear the remaining slots.
4949 */
4950 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4951 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4952 size = WM_ICH8_RAL_TABSIZE;
4953 else
4954 size = WM_RAL_TABSIZE;
4955 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4956 for (i = 1; i < size; i++)
4957 wm_set_ral(sc, NULL, i);
4958
4959 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4960 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4961 size = WM_ICH8_MC_TABSIZE;
4962 else
4963 size = WM_MC_TABSIZE;
4964 /* Clear out the multicast table. */
4965 for (i = 0; i < size; i++)
4966 CSR_WRITE(sc, mta_reg + (i << 2), 0);
4967
4968 ETHER_FIRST_MULTI(step, ec, enm);
4969 while (enm != NULL) {
4970 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4971 /*
4972 * We must listen to a range of multicast addresses.
4973 * For now, just accept all multicasts, rather than
4974 * trying to set only those filter bits needed to match
4975 * the range. (At this time, the only use of address
4976 * ranges is for IP multicast routing, for which the
4977 * range is big enough to require all bits set.)
4978 */
4979 goto allmulti;
4980 }
4981
4982 hash = wm_mchash(sc, enm->enm_addrlo);
4983
4984 reg = (hash >> 5);
4985 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4986 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4987 reg &= 0x1f;
4988 else
4989 reg &= 0x7f;
4990 bit = hash & 0x1f;
4991
4992 hash = CSR_READ(sc, mta_reg + (reg << 2));
4993 hash |= 1U << bit;
4994
4995 /* XXX Hardware bug?? */
4996 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
4997 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4998 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4999 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
5000 } else
5001 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5002
5003 ETHER_NEXT_MULTI(step, enm);
5004 }
5005
5006 ifp->if_flags &= ~IFF_ALLMULTI;
5007 goto setit;
5008
5009 allmulti:
5010 ifp->if_flags |= IFF_ALLMULTI;
5011 sc->sc_rctl |= RCTL_MPE;
5012
5013 setit:
5014 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
5015 }
5016
5017 /*
5018 * wm_tbi_mediainit:
5019 *
5020 * Initialize media for use on 1000BASE-X devices.
5021 */
5022 static void
5023 wm_tbi_mediainit(struct wm_softc *sc)
5024 {
5025 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5026 const char *sep = "";
5027
5028 if (sc->sc_type < WM_T_82543)
5029 sc->sc_tipg = TIPG_WM_DFLT;
5030 else
5031 sc->sc_tipg = TIPG_LG_DFLT;
5032
5033 sc->sc_tbi_anegticks = 5;
5034
5035 /* Initialize our media structures */
5036 sc->sc_mii.mii_ifp = ifp;
5037
5038 sc->sc_ethercom.ec_mii = &sc->sc_mii;
5039 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
5040 wm_tbi_mediastatus);
5041
5042 /*
5043 * SWD Pins:
5044 *
5045 * 0 = Link LED (output)
5046 * 1 = Loss Of Signal (input)
5047 */
5048 sc->sc_ctrl |= CTRL_SWDPIO(0);
5049 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
5050
5051 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5052
5053 #define ADD(ss, mm, dd) \
5054 do { \
5055 aprint_normal("%s%s", sep, ss); \
5056 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
5057 sep = ", "; \
5058 } while (/*CONSTCOND*/0)
5059
5060 aprint_normal_dev(sc->sc_dev, "");
5061 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
5062 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
5063 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
5064 aprint_normal("\n");
5065
5066 #undef ADD
5067
5068 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5069 }
5070
5071 /*
5072 * wm_tbi_mediastatus: [ifmedia interface function]
5073 *
5074 * Get the current interface media status on a 1000BASE-X device.
5075 */
5076 static void
5077 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5078 {
5079 struct wm_softc *sc = ifp->if_softc;
5080 uint32_t ctrl, status;
5081
5082 ifmr->ifm_status = IFM_AVALID;
5083 ifmr->ifm_active = IFM_ETHER;
5084
5085 status = CSR_READ(sc, WMREG_STATUS);
5086 if ((status & STATUS_LU) == 0) {
5087 ifmr->ifm_active |= IFM_NONE;
5088 return;
5089 }
5090
5091 ifmr->ifm_status |= IFM_ACTIVE;
5092 ifmr->ifm_active |= IFM_1000_SX;
5093 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
5094 ifmr->ifm_active |= IFM_FDX;
5095 ctrl = CSR_READ(sc, WMREG_CTRL);
5096 if (ctrl & CTRL_RFCE)
5097 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
5098 if (ctrl & CTRL_TFCE)
5099 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
5100 }
5101
5102 /*
5103 * wm_tbi_mediachange: [ifmedia interface function]
5104 *
5105 * Set hardware to newly-selected media on a 1000BASE-X device.
5106 */
5107 static int
5108 wm_tbi_mediachange(struct ifnet *ifp)
5109 {
5110 struct wm_softc *sc = ifp->if_softc;
5111 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5112 uint32_t status;
5113 int i;
5114
5115 sc->sc_txcw = 0;
5116 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
5117 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
5118 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
5119 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5120 sc->sc_txcw |= TXCW_ANE;
5121 } else {
5122 /*
5123 * If autonegotiation is turned off, force link up and turn on
5124 * full duplex
5125 */
5126 sc->sc_txcw &= ~TXCW_ANE;
5127 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
5128 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5129 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5130 delay(1000);
5131 }
5132
5133 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
5134 device_xname(sc->sc_dev),sc->sc_txcw));
5135 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5136 delay(10000);
5137
5138 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
5139 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
5140
5141 /*
5142 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
5143 * optics detect a signal, 0 if they don't.
5144 */
5145 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
5146 /* Have signal; wait for the link to come up. */
5147
5148 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5149 /*
5150 * Reset the link, and let autonegotiation do its thing
5151 */
5152 sc->sc_ctrl |= CTRL_LRST;
5153 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5154 delay(1000);
5155 sc->sc_ctrl &= ~CTRL_LRST;
5156 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5157 delay(1000);
5158 }
5159
5160 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
5161 delay(10000);
5162 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
5163 break;
5164 }
5165
5166 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
5167 device_xname(sc->sc_dev),i));
5168
5169 status = CSR_READ(sc, WMREG_STATUS);
5170 DPRINTF(WM_DEBUG_LINK,
5171 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
5172 device_xname(sc->sc_dev),status, STATUS_LU));
5173 if (status & STATUS_LU) {
5174 /* Link is up. */
5175 DPRINTF(WM_DEBUG_LINK,
5176 ("%s: LINK: set media -> link up %s\n",
5177 device_xname(sc->sc_dev),
5178 (status & STATUS_FD) ? "FDX" : "HDX"));
5179
5180 /*
5181 * NOTE: CTRL will update TFCE and RFCE automatically,
5182 * so we should update sc->sc_ctrl
5183 */
5184 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5185 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5186 sc->sc_fcrtl &= ~FCRTL_XONE;
5187 if (status & STATUS_FD)
5188 sc->sc_tctl |=
5189 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5190 else
5191 sc->sc_tctl |=
5192 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5193 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
5194 sc->sc_fcrtl |= FCRTL_XONE;
5195 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5196 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5197 WMREG_OLD_FCRTL : WMREG_FCRTL,
5198 sc->sc_fcrtl);
5199 sc->sc_tbi_linkup = 1;
5200 } else {
5201 if (i == WM_LINKUP_TIMEOUT)
5202 wm_check_for_link(sc);
5203 /* Link is down. */
5204 DPRINTF(WM_DEBUG_LINK,
5205 ("%s: LINK: set media -> link down\n",
5206 device_xname(sc->sc_dev)));
5207 sc->sc_tbi_linkup = 0;
5208 }
5209 } else {
5210 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
5211 device_xname(sc->sc_dev)));
5212 sc->sc_tbi_linkup = 0;
5213 }
5214
5215 wm_tbi_set_linkled(sc);
5216
5217 return 0;
5218 }
5219
5220 /*
5221 * wm_tbi_set_linkled:
5222 *
5223 * Update the link LED on 1000BASE-X devices.
5224 */
5225 static void
5226 wm_tbi_set_linkled(struct wm_softc *sc)
5227 {
5228
5229 if (sc->sc_tbi_linkup)
5230 sc->sc_ctrl |= CTRL_SWDPIN(0);
5231 else
5232 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
5233
5234 /* 82540 or newer devices are active low */
5235 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
5236
5237 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5238 }
5239
5240 /*
5241 * wm_tbi_check_link:
5242 *
5243 * Check the link on 1000BASE-X devices.
5244 */
5245 static void
5246 wm_tbi_check_link(struct wm_softc *sc)
5247 {
5248 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5249 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5250 uint32_t rxcw, ctrl, status;
5251
5252 status = CSR_READ(sc, WMREG_STATUS);
5253
5254 rxcw = CSR_READ(sc, WMREG_RXCW);
5255 ctrl = CSR_READ(sc, WMREG_CTRL);
5256
5257 /* set link status */
5258 if ((status & STATUS_LU) == 0) {
5259 DPRINTF(WM_DEBUG_LINK,
5260 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
5261 sc->sc_tbi_linkup = 0;
5262 } else if (sc->sc_tbi_linkup == 0) {
5263 DPRINTF(WM_DEBUG_LINK,
5264 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
5265 (status & STATUS_FD) ? "FDX" : "HDX"));
5266 sc->sc_tbi_linkup = 1;
5267 }
5268
5269 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
5270 && ((status & STATUS_LU) == 0)) {
5271 sc->sc_tbi_linkup = 0;
5272 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
5273 /* RXCFG storm! */
5274 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
5275 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
5276 wm_init(ifp);
5277 wm_start(ifp);
5278 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5279 /* If the timer expired, retry autonegotiation */
5280 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
5281 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
5282 sc->sc_tbi_ticks = 0;
5283 /*
5284 * Reset the link, and let autonegotiation do
5285 * its thing
5286 */
5287 sc->sc_ctrl |= CTRL_LRST;
5288 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5289 delay(1000);
5290 sc->sc_ctrl &= ~CTRL_LRST;
5291 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5292 delay(1000);
5293 CSR_WRITE(sc, WMREG_TXCW,
5294 sc->sc_txcw & ~TXCW_ANE);
5295 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5296 }
5297 }
5298 }
5299
5300 wm_tbi_set_linkled(sc);
5301 }
5302
5303 /*
5304 * wm_gmii_reset:
5305 *
5306 * Reset the PHY.
5307 */
5308 static void
5309 wm_gmii_reset(struct wm_softc *sc)
5310 {
5311 uint32_t reg;
5312 int rv;
5313
5314 /* get phy semaphore */
5315 switch (sc->sc_type) {
5316 case WM_T_82571:
5317 case WM_T_82572:
5318 case WM_T_82573:
5319 case WM_T_82574:
5320 case WM_T_82583:
5321 /* XXX should get sw semaphore, too */
5322 rv = wm_get_swsm_semaphore(sc);
5323 break;
5324 case WM_T_82575:
5325 case WM_T_82576:
5326 case WM_T_82580:
5327 case WM_T_82580ER:
5328 case WM_T_80003:
5329 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5330 break;
5331 case WM_T_ICH8:
5332 case WM_T_ICH9:
5333 case WM_T_ICH10:
5334 case WM_T_PCH:
5335 rv = wm_get_swfwhw_semaphore(sc);
5336 break;
5337 default:
5338 /* nothing to do*/
5339 rv = 0;
5340 break;
5341 }
5342 if (rv != 0) {
5343 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5344 __func__);
5345 return;
5346 }
5347
5348 switch (sc->sc_type) {
5349 case WM_T_82542_2_0:
5350 case WM_T_82542_2_1:
5351 /* null */
5352 break;
5353 case WM_T_82543:
5354 /*
5355 * With 82543, we need to force speed and duplex on the MAC
5356 * equal to what the PHY speed and duplex configuration is.
5357 * In addition, we need to perform a hardware reset on the PHY
5358 * to take it out of reset.
5359 */
5360 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5361 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5362
5363 /* The PHY reset pin is active-low. */
5364 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5365 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
5366 CTRL_EXT_SWDPIN(4));
5367 reg |= CTRL_EXT_SWDPIO(4);
5368
5369 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5370 delay(10*1000);
5371
5372 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
5373 delay(150);
5374 #if 0
5375 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
5376 #endif
5377 delay(20*1000); /* XXX extra delay to get PHY ID? */
5378 break;
5379 case WM_T_82544: /* reset 10000us */
5380 case WM_T_82540:
5381 case WM_T_82545:
5382 case WM_T_82545_3:
5383 case WM_T_82546:
5384 case WM_T_82546_3:
5385 case WM_T_82541:
5386 case WM_T_82541_2:
5387 case WM_T_82547:
5388 case WM_T_82547_2:
5389 case WM_T_82571: /* reset 100us */
5390 case WM_T_82572:
5391 case WM_T_82573:
5392 case WM_T_82574:
5393 case WM_T_82575:
5394 case WM_T_82576:
5395 case WM_T_82580:
5396 case WM_T_82580ER:
5397 case WM_T_82583:
5398 case WM_T_80003:
5399 /* generic reset */
5400 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5401 delay((sc->sc_type >= WM_T_82571) ? 100 : 10*1000);
5402 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5403 delay(150);
5404
5405 if ((sc->sc_type == WM_T_82541)
5406 || (sc->sc_type == WM_T_82541_2)
5407 || (sc->sc_type == WM_T_82547)
5408 || (sc->sc_type == WM_T_82547_2)) {
5409 /* workaround for igp are done in igp_reset() */
5410 /* XXX add code to set LED after phy reset */
5411 }
5412 break;
5413 case WM_T_ICH8:
5414 case WM_T_ICH9:
5415 case WM_T_ICH10:
5416 case WM_T_PCH:
5417 /* generic reset */
5418 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5419 delay(100);
5420 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5421 delay(150);
5422 break;
5423 default:
5424 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5425 __func__);
5426 break;
5427 }
5428
5429 /* release PHY semaphore */
5430 switch (sc->sc_type) {
5431 case WM_T_82571:
5432 case WM_T_82572:
5433 case WM_T_82573:
5434 case WM_T_82574:
5435 case WM_T_82583:
5436 /* XXX should put sw semaphore, too */
5437 wm_put_swsm_semaphore(sc);
5438 break;
5439 case WM_T_82575:
5440 case WM_T_82576:
5441 case WM_T_82580:
5442 case WM_T_82580ER:
5443 case WM_T_80003:
5444 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5445 break;
5446 case WM_T_ICH8:
5447 case WM_T_ICH9:
5448 case WM_T_ICH10:
5449 case WM_T_PCH:
5450 wm_put_swfwhw_semaphore(sc);
5451 break;
5452 default:
5453 /* nothing to do*/
5454 rv = 0;
5455 break;
5456 }
5457
5458 /* get_cfg_done */
5459 wm_get_cfg_done(sc);
5460
5461 /* extra setup */
5462 switch (sc->sc_type) {
5463 case WM_T_82542_2_0:
5464 case WM_T_82542_2_1:
5465 case WM_T_82543:
5466 case WM_T_82544:
5467 case WM_T_82540:
5468 case WM_T_82545:
5469 case WM_T_82545_3:
5470 case WM_T_82546:
5471 case WM_T_82546_3:
5472 case WM_T_82541_2:
5473 case WM_T_82547_2:
5474 case WM_T_82571:
5475 case WM_T_82572:
5476 case WM_T_82573:
5477 case WM_T_82574:
5478 case WM_T_82575:
5479 case WM_T_82576:
5480 case WM_T_82580:
5481 case WM_T_82580ER:
5482 case WM_T_82583:
5483 case WM_T_80003:
5484 /* null */
5485 break;
5486 case WM_T_82541:
5487 case WM_T_82547:
5488 /* XXX Configure actively LED after PHY reset */
5489 break;
5490 case WM_T_ICH8:
5491 case WM_T_ICH9:
5492 case WM_T_ICH10:
5493 case WM_T_PCH:
5494 /* Allow time for h/w to get to a quiescent state afer reset */
5495 delay(10*1000);
5496
5497 if (sc->sc_type == WM_T_PCH) {
5498 wm_hv_phy_workaround_ich8lan(sc);
5499
5500 /*
5501 * dummy read to clear the phy wakeup bit after lcd
5502 * reset
5503 */
5504 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
5505 }
5506
5507 /*
5508 * XXX Configure the LCD with th extended configuration region
5509 * in NVM
5510 */
5511
5512 /* Configure the LCD with the OEM bits in NVM */
5513 if (sc->sc_type == WM_T_PCH) {
5514 /*
5515 * Disable LPLU.
5516 * XXX It seems that 82567 has LPLU, too.
5517 */
5518 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
5519 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
5520 reg |= HV_OEM_BITS_ANEGNOW;
5521 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
5522 }
5523 break;
5524 default:
5525 panic("%s: unknown type\n", __func__);
5526 break;
5527 }
5528 }
5529
5530 /*
5531 * wm_gmii_mediainit:
5532 *
5533 * Initialize media for use on 1000BASE-T devices.
5534 */
5535 static void
5536 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
5537 {
5538 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5539
5540 /* We have MII. */
5541 sc->sc_flags |= WM_F_HAS_MII;
5542
5543 if (sc->sc_type == WM_T_80003)
5544 sc->sc_tipg = TIPG_1000T_80003_DFLT;
5545 else
5546 sc->sc_tipg = TIPG_1000T_DFLT;
5547
5548 /*
5549 * Let the chip set speed/duplex on its own based on
5550 * signals from the PHY.
5551 * XXXbouyer - I'm not sure this is right for the 80003,
5552 * the em driver only sets CTRL_SLU here - but it seems to work.
5553 */
5554 sc->sc_ctrl |= CTRL_SLU;
5555 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5556
5557 /* Initialize our media structures and probe the GMII. */
5558 sc->sc_mii.mii_ifp = ifp;
5559
5560 switch (prodid) {
5561 case PCI_PRODUCT_INTEL_PCH_M_LM:
5562 case PCI_PRODUCT_INTEL_PCH_M_LC:
5563 /* 82577 */
5564 sc->sc_phytype = WMPHY_82577;
5565 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5566 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5567 break;
5568 case PCI_PRODUCT_INTEL_PCH_D_DM:
5569 case PCI_PRODUCT_INTEL_PCH_D_DC:
5570 /* 82578 */
5571 sc->sc_phytype = WMPHY_82578;
5572 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5573 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5574 break;
5575 case PCI_PRODUCT_INTEL_82801I_BM:
5576 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
5577 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
5578 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
5579 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
5580 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
5581 /* 82567 */
5582 sc->sc_phytype = WMPHY_BM;
5583 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5584 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5585 break;
5586 default:
5587 if ((sc->sc_flags & WM_F_SGMII) != 0) {
5588 sc->sc_mii.mii_readreg = wm_sgmii_readreg;
5589 sc->sc_mii.mii_writereg = wm_sgmii_writereg;
5590 } else if (sc->sc_type >= WM_T_80003) {
5591 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
5592 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
5593 } else if (sc->sc_type >= WM_T_82544) {
5594 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
5595 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
5596 } else {
5597 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
5598 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
5599 }
5600 break;
5601 }
5602 sc->sc_mii.mii_statchg = wm_gmii_statchg;
5603
5604 wm_gmii_reset(sc);
5605
5606 sc->sc_ethercom.ec_mii = &sc->sc_mii;
5607 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
5608 wm_gmii_mediastatus);
5609
5610 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
5611 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)) {
5612 if ((sc->sc_flags & WM_F_SGMII) == 0) {
5613 /* Attach only one port */
5614 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
5615 MII_OFFSET_ANY, MIIF_DOPAUSE);
5616 } else {
5617 int i;
5618 uint32_t ctrl_ext;
5619
5620 /* Power on sgmii phy if it is disabled */
5621 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
5622 CSR_WRITE(sc, WMREG_CTRL_EXT,
5623 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
5624 CSR_WRITE_FLUSH(sc);
5625 delay(300*1000); /* XXX too long */
5626
5627 /* from 1 to 8 */
5628 for (i = 1; i < 8; i++)
5629 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
5630 i, MII_OFFSET_ANY, MIIF_DOPAUSE);
5631
5632 /* restore previous sfp cage power state */
5633 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
5634 }
5635 } else {
5636 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5637 MII_OFFSET_ANY, MIIF_DOPAUSE);
5638 }
5639
5640 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5641 /* if failed, retry with *_bm_* */
5642 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5643 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5644
5645 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5646 MII_OFFSET_ANY, MIIF_DOPAUSE);
5647 }
5648 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5649 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
5650 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
5651 sc->sc_phytype = WMPHY_NONE;
5652 } else {
5653 /* Check PHY type */
5654 uint32_t model;
5655 struct mii_softc *child;
5656
5657 child = LIST_FIRST(&sc->sc_mii.mii_phys);
5658 if (device_is_a(child->mii_dev, "igphy")) {
5659 struct igphy_softc *isc = (struct igphy_softc *)child;
5660
5661 model = isc->sc_mii.mii_mpd_model;
5662 if (model == MII_MODEL_yyINTEL_I82566)
5663 sc->sc_phytype = WMPHY_IGP_3;
5664 }
5665
5666 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5667 }
5668 }
5669
5670 /*
5671 * wm_gmii_mediastatus: [ifmedia interface function]
5672 *
5673 * Get the current interface media status on a 1000BASE-T device.
5674 */
5675 static void
5676 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5677 {
5678 struct wm_softc *sc = ifp->if_softc;
5679
5680 ether_mediastatus(ifp, ifmr);
5681 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
5682 | sc->sc_flowflags;
5683 }
5684
5685 /*
5686 * wm_gmii_mediachange: [ifmedia interface function]
5687 *
5688 * Set hardware to newly-selected media on a 1000BASE-T device.
5689 */
5690 static int
5691 wm_gmii_mediachange(struct ifnet *ifp)
5692 {
5693 struct wm_softc *sc = ifp->if_softc;
5694 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5695 int rc;
5696
5697 if ((ifp->if_flags & IFF_UP) == 0)
5698 return 0;
5699
5700 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5701 sc->sc_ctrl |= CTRL_SLU;
5702 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
5703 || (sc->sc_type > WM_T_82543)) {
5704 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
5705 } else {
5706 sc->sc_ctrl &= ~CTRL_ASDE;
5707 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5708 if (ife->ifm_media & IFM_FDX)
5709 sc->sc_ctrl |= CTRL_FD;
5710 switch (IFM_SUBTYPE(ife->ifm_media)) {
5711 case IFM_10_T:
5712 sc->sc_ctrl |= CTRL_SPEED_10;
5713 break;
5714 case IFM_100_TX:
5715 sc->sc_ctrl |= CTRL_SPEED_100;
5716 break;
5717 case IFM_1000_T:
5718 sc->sc_ctrl |= CTRL_SPEED_1000;
5719 break;
5720 default:
5721 panic("wm_gmii_mediachange: bad media 0x%x",
5722 ife->ifm_media);
5723 }
5724 }
5725 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5726 if (sc->sc_type <= WM_T_82543)
5727 wm_gmii_reset(sc);
5728
5729 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
5730 return 0;
5731 return rc;
5732 }
5733
5734 #define MDI_IO CTRL_SWDPIN(2)
5735 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
5736 #define MDI_CLK CTRL_SWDPIN(3)
5737
5738 static void
5739 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
5740 {
5741 uint32_t i, v;
5742
5743 v = CSR_READ(sc, WMREG_CTRL);
5744 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5745 v |= MDI_DIR | CTRL_SWDPIO(3);
5746
5747 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
5748 if (data & i)
5749 v |= MDI_IO;
5750 else
5751 v &= ~MDI_IO;
5752 CSR_WRITE(sc, WMREG_CTRL, v);
5753 delay(10);
5754 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5755 delay(10);
5756 CSR_WRITE(sc, WMREG_CTRL, v);
5757 delay(10);
5758 }
5759 }
5760
5761 static uint32_t
5762 i82543_mii_recvbits(struct wm_softc *sc)
5763 {
5764 uint32_t v, i, data = 0;
5765
5766 v = CSR_READ(sc, WMREG_CTRL);
5767 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5768 v |= CTRL_SWDPIO(3);
5769
5770 CSR_WRITE(sc, WMREG_CTRL, v);
5771 delay(10);
5772 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5773 delay(10);
5774 CSR_WRITE(sc, WMREG_CTRL, v);
5775 delay(10);
5776
5777 for (i = 0; i < 16; i++) {
5778 data <<= 1;
5779 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5780 delay(10);
5781 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
5782 data |= 1;
5783 CSR_WRITE(sc, WMREG_CTRL, v);
5784 delay(10);
5785 }
5786
5787 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5788 delay(10);
5789 CSR_WRITE(sc, WMREG_CTRL, v);
5790 delay(10);
5791
5792 return data;
5793 }
5794
5795 #undef MDI_IO
5796 #undef MDI_DIR
5797 #undef MDI_CLK
5798
5799 /*
5800 * wm_gmii_i82543_readreg: [mii interface function]
5801 *
5802 * Read a PHY register on the GMII (i82543 version).
5803 */
5804 static int
5805 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
5806 {
5807 struct wm_softc *sc = device_private(self);
5808 int rv;
5809
5810 i82543_mii_sendbits(sc, 0xffffffffU, 32);
5811 i82543_mii_sendbits(sc, reg | (phy << 5) |
5812 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
5813 rv = i82543_mii_recvbits(sc) & 0xffff;
5814
5815 DPRINTF(WM_DEBUG_GMII,
5816 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
5817 device_xname(sc->sc_dev), phy, reg, rv));
5818
5819 return rv;
5820 }
5821
5822 /*
5823 * wm_gmii_i82543_writereg: [mii interface function]
5824 *
5825 * Write a PHY register on the GMII (i82543 version).
5826 */
5827 static void
5828 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
5829 {
5830 struct wm_softc *sc = device_private(self);
5831
5832 i82543_mii_sendbits(sc, 0xffffffffU, 32);
5833 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
5834 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
5835 (MII_COMMAND_START << 30), 32);
5836 }
5837
5838 /*
5839 * wm_gmii_i82544_readreg: [mii interface function]
5840 *
5841 * Read a PHY register on the GMII.
5842 */
5843 static int
5844 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
5845 {
5846 struct wm_softc *sc = device_private(self);
5847 uint32_t mdic = 0;
5848 int i, rv;
5849
5850 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
5851 MDIC_REGADD(reg));
5852
5853 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
5854 mdic = CSR_READ(sc, WMREG_MDIC);
5855 if (mdic & MDIC_READY)
5856 break;
5857 delay(50);
5858 }
5859
5860 if ((mdic & MDIC_READY) == 0) {
5861 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
5862 device_xname(sc->sc_dev), phy, reg);
5863 rv = 0;
5864 } else if (mdic & MDIC_E) {
5865 #if 0 /* This is normal if no PHY is present. */
5866 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
5867 device_xname(sc->sc_dev), phy, reg);
5868 #endif
5869 rv = 0;
5870 } else {
5871 rv = MDIC_DATA(mdic);
5872 if (rv == 0xffff)
5873 rv = 0;
5874 }
5875
5876 return rv;
5877 }
5878
5879 /*
5880 * wm_gmii_i82544_writereg: [mii interface function]
5881 *
5882 * Write a PHY register on the GMII.
5883 */
5884 static void
5885 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
5886 {
5887 struct wm_softc *sc = device_private(self);
5888 uint32_t mdic = 0;
5889 int i;
5890
5891 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
5892 MDIC_REGADD(reg) | MDIC_DATA(val));
5893
5894 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
5895 mdic = CSR_READ(sc, WMREG_MDIC);
5896 if (mdic & MDIC_READY)
5897 break;
5898 delay(50);
5899 }
5900
5901 if ((mdic & MDIC_READY) == 0)
5902 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
5903 device_xname(sc->sc_dev), phy, reg);
5904 else if (mdic & MDIC_E)
5905 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
5906 device_xname(sc->sc_dev), phy, reg);
5907 }
5908
5909 /*
5910 * wm_gmii_i80003_readreg: [mii interface function]
5911 *
5912 * Read a PHY register on the kumeran
5913 * This could be handled by the PHY layer if we didn't have to lock the
5914 * ressource ...
5915 */
5916 static int
5917 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
5918 {
5919 struct wm_softc *sc = device_private(self);
5920 int sem;
5921 int rv;
5922
5923 if (phy != 1) /* only one PHY on kumeran bus */
5924 return 0;
5925
5926 sem = swfwphysem[sc->sc_funcid];
5927 if (wm_get_swfw_semaphore(sc, sem)) {
5928 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5929 __func__);
5930 return 0;
5931 }
5932
5933 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
5934 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5935 reg >> GG82563_PAGE_SHIFT);
5936 } else {
5937 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
5938 reg >> GG82563_PAGE_SHIFT);
5939 }
5940 /* Wait more 200us for a bug of the ready bit in the MDIC register */
5941 delay(200);
5942 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
5943 delay(200);
5944
5945 wm_put_swfw_semaphore(sc, sem);
5946 return rv;
5947 }
5948
5949 /*
5950 * wm_gmii_i80003_writereg: [mii interface function]
5951 *
5952 * Write a PHY register on the kumeran.
5953 * This could be handled by the PHY layer if we didn't have to lock the
5954 * ressource ...
5955 */
5956 static void
5957 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
5958 {
5959 struct wm_softc *sc = device_private(self);
5960 int sem;
5961
5962 if (phy != 1) /* only one PHY on kumeran bus */
5963 return;
5964
5965 sem = swfwphysem[sc->sc_funcid];
5966 if (wm_get_swfw_semaphore(sc, sem)) {
5967 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5968 __func__);
5969 return;
5970 }
5971
5972 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
5973 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5974 reg >> GG82563_PAGE_SHIFT);
5975 } else {
5976 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
5977 reg >> GG82563_PAGE_SHIFT);
5978 }
5979 /* Wait more 200us for a bug of the ready bit in the MDIC register */
5980 delay(200);
5981 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
5982 delay(200);
5983
5984 wm_put_swfw_semaphore(sc, sem);
5985 }
5986
5987 /*
5988 * wm_gmii_bm_readreg: [mii interface function]
5989 *
5990 * Read a PHY register on the kumeran
5991 * This could be handled by the PHY layer if we didn't have to lock the
5992 * ressource ...
5993 */
5994 static int
5995 wm_gmii_bm_readreg(device_t self, int phy, int reg)
5996 {
5997 struct wm_softc *sc = device_private(self);
5998 int sem;
5999 int rv;
6000
6001 sem = swfwphysem[sc->sc_funcid];
6002 if (wm_get_swfw_semaphore(sc, sem)) {
6003 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6004 __func__);
6005 return 0;
6006 }
6007
6008 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6009 if (phy == 1)
6010 wm_gmii_i82544_writereg(self, phy, 0x1f,
6011 reg);
6012 else
6013 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6014 reg >> GG82563_PAGE_SHIFT);
6015
6016 }
6017
6018 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6019 wm_put_swfw_semaphore(sc, sem);
6020 return rv;
6021 }
6022
6023 /*
6024 * wm_gmii_bm_writereg: [mii interface function]
6025 *
6026 * Write a PHY register on the kumeran.
6027 * This could be handled by the PHY layer if we didn't have to lock the
6028 * ressource ...
6029 */
6030 static void
6031 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6032 {
6033 struct wm_softc *sc = device_private(self);
6034 int sem;
6035
6036 sem = swfwphysem[sc->sc_funcid];
6037 if (wm_get_swfw_semaphore(sc, sem)) {
6038 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6039 __func__);
6040 return;
6041 }
6042
6043 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6044 if (phy == 1)
6045 wm_gmii_i82544_writereg(self, phy, 0x1f,
6046 reg);
6047 else
6048 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6049 reg >> GG82563_PAGE_SHIFT);
6050
6051 }
6052
6053 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6054 wm_put_swfw_semaphore(sc, sem);
6055 }
6056
6057 static void
6058 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6059 {
6060 struct wm_softc *sc = device_private(self);
6061 uint16_t regnum = BM_PHY_REG_NUM(offset);
6062 uint16_t wuce;
6063
6064 /* XXX Gig must be disabled for MDIO accesses to page 800 */
6065 if (sc->sc_type == WM_T_PCH) {
6066 /* XXX e1000 driver do nothing... why? */
6067 }
6068
6069 /* Set page 769 */
6070 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6071 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6072
6073 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6074
6075 wuce &= ~BM_WUC_HOST_WU_BIT;
6076 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6077 wuce | BM_WUC_ENABLE_BIT);
6078
6079 /* Select page 800 */
6080 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6081 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6082
6083 /* Write page 800 */
6084 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6085
6086 if (rd)
6087 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6088 else
6089 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6090
6091 /* Set page 769 */
6092 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6093 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6094
6095 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6096 }
6097
6098 /*
6099 * wm_gmii_hv_readreg: [mii interface function]
6100 *
6101 * Read a PHY register on the kumeran
6102 * This could be handled by the PHY layer if we didn't have to lock the
6103 * ressource ...
6104 */
6105 static int
6106 wm_gmii_hv_readreg(device_t self, int phy, int reg)
6107 {
6108 struct wm_softc *sc = device_private(self);
6109 uint16_t page = BM_PHY_REG_PAGE(reg);
6110 uint16_t regnum = BM_PHY_REG_NUM(reg);
6111 uint16_t val;
6112 int rv;
6113
6114 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6115 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6116 __func__);
6117 return 0;
6118 }
6119
6120 /* XXX Workaround failure in MDIO access while cable is disconnected */
6121 if (sc->sc_phytype == WMPHY_82577) {
6122 /* XXX must write */
6123 }
6124
6125 /* Page 800 works differently than the rest so it has its own func */
6126 if (page == BM_WUC_PAGE) {
6127 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
6128 return val;
6129 }
6130
6131 /*
6132 * Lower than page 768 works differently than the rest so it has its
6133 * own func
6134 */
6135 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6136 printf("gmii_hv_readreg!!!\n");
6137 return 0;
6138 }
6139
6140 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6141 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6142 page << BME1000_PAGE_SHIFT);
6143 }
6144
6145 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
6146 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6147 return rv;
6148 }
6149
6150 /*
6151 * wm_gmii_hv_writereg: [mii interface function]
6152 *
6153 * Write a PHY register on the kumeran.
6154 * This could be handled by the PHY layer if we didn't have to lock the
6155 * ressource ...
6156 */
6157 static void
6158 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
6159 {
6160 struct wm_softc *sc = device_private(self);
6161 uint16_t page = BM_PHY_REG_PAGE(reg);
6162 uint16_t regnum = BM_PHY_REG_NUM(reg);
6163
6164 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6165 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6166 __func__);
6167 return;
6168 }
6169
6170 /* XXX Workaround failure in MDIO access while cable is disconnected */
6171
6172 /* Page 800 works differently than the rest so it has its own func */
6173 if (page == BM_WUC_PAGE) {
6174 uint16_t tmp;
6175
6176 tmp = val;
6177 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
6178 return;
6179 }
6180
6181 /*
6182 * Lower than page 768 works differently than the rest so it has its
6183 * own func
6184 */
6185 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6186 printf("gmii_hv_writereg!!!\n");
6187 return;
6188 }
6189
6190 /*
6191 * XXX Workaround MDIO accesses being disabled after entering IEEE
6192 * Power Down (whenever bit 11 of the PHY control register is set)
6193 */
6194
6195 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6196 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6197 page << BME1000_PAGE_SHIFT);
6198 }
6199
6200 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
6201 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6202 }
6203
6204 /*
6205 * wm_gmii_hv_readreg: [mii interface function]
6206 *
6207 * Read a PHY register on the kumeran
6208 * This could be handled by the PHY layer if we didn't have to lock the
6209 * ressource ...
6210 */
6211 static int
6212 wm_sgmii_readreg(device_t self, int phy, int reg)
6213 {
6214 struct wm_softc *sc = device_private(self);
6215 uint32_t i2ccmd;
6216 int i, rv;
6217
6218 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6219 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6220 __func__);
6221 return 0;
6222 }
6223
6224 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6225 | (phy << I2CCMD_PHY_ADDR_SHIFT)
6226 | I2CCMD_OPCODE_READ;
6227 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6228
6229 /* Poll the ready bit */
6230 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6231 delay(50);
6232 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6233 if (i2ccmd & I2CCMD_READY)
6234 break;
6235 }
6236 if ((i2ccmd & I2CCMD_READY) == 0)
6237 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
6238 if ((i2ccmd & I2CCMD_ERROR) != 0)
6239 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6240
6241 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
6242
6243 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6244 return rv;
6245 }
6246
6247 /*
6248 * wm_gmii_hv_writereg: [mii interface function]
6249 *
6250 * Write a PHY register on the kumeran.
6251 * This could be handled by the PHY layer if we didn't have to lock the
6252 * ressource ...
6253 */
6254 static void
6255 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
6256 {
6257 struct wm_softc *sc = device_private(self);
6258 uint32_t i2ccmd;
6259 int i;
6260
6261 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6262 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6263 __func__);
6264 return;
6265 }
6266
6267 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6268 | (phy << I2CCMD_PHY_ADDR_SHIFT)
6269 | I2CCMD_OPCODE_WRITE;
6270 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6271
6272 /* Poll the ready bit */
6273 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6274 delay(50);
6275 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6276 if (i2ccmd & I2CCMD_READY)
6277 break;
6278 }
6279 if ((i2ccmd & I2CCMD_READY) == 0)
6280 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
6281 if ((i2ccmd & I2CCMD_ERROR) != 0)
6282 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6283
6284 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6285 }
6286
6287 /*
6288 * wm_gmii_statchg: [mii interface function]
6289 *
6290 * Callback from MII layer when media changes.
6291 */
6292 static void
6293 wm_gmii_statchg(device_t self)
6294 {
6295 struct wm_softc *sc = device_private(self);
6296 struct mii_data *mii = &sc->sc_mii;
6297
6298 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
6299 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6300 sc->sc_fcrtl &= ~FCRTL_XONE;
6301
6302 /*
6303 * Get flow control negotiation result.
6304 */
6305 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
6306 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
6307 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
6308 mii->mii_media_active &= ~IFM_ETH_FMASK;
6309 }
6310
6311 if (sc->sc_flowflags & IFM_FLOW) {
6312 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
6313 sc->sc_ctrl |= CTRL_TFCE;
6314 sc->sc_fcrtl |= FCRTL_XONE;
6315 }
6316 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
6317 sc->sc_ctrl |= CTRL_RFCE;
6318 }
6319
6320 if (sc->sc_mii.mii_media_active & IFM_FDX) {
6321 DPRINTF(WM_DEBUG_LINK,
6322 ("%s: LINK: statchg: FDX\n", device_xname(sc->sc_dev)));
6323 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6324 } else {
6325 DPRINTF(WM_DEBUG_LINK,
6326 ("%s: LINK: statchg: HDX\n", device_xname(sc->sc_dev)));
6327 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6328 }
6329
6330 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6331 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6332 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
6333 : WMREG_FCRTL, sc->sc_fcrtl);
6334 if (sc->sc_type == WM_T_80003) {
6335 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
6336 case IFM_1000_T:
6337 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6338 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
6339 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6340 break;
6341 default:
6342 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6343 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
6344 sc->sc_tipg = TIPG_10_100_80003_DFLT;
6345 break;
6346 }
6347 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6348 }
6349 }
6350
6351 /*
6352 * wm_kmrn_readreg:
6353 *
6354 * Read a kumeran register
6355 */
6356 static int
6357 wm_kmrn_readreg(struct wm_softc *sc, int reg)
6358 {
6359 int rv;
6360
6361 if (sc->sc_flags == WM_F_SWFW_SYNC) {
6362 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6363 aprint_error_dev(sc->sc_dev,
6364 "%s: failed to get semaphore\n", __func__);
6365 return 0;
6366 }
6367 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
6368 if (wm_get_swfwhw_semaphore(sc)) {
6369 aprint_error_dev(sc->sc_dev,
6370 "%s: failed to get semaphore\n", __func__);
6371 return 0;
6372 }
6373 }
6374
6375 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
6376 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
6377 KUMCTRLSTA_REN);
6378 delay(2);
6379
6380 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
6381
6382 if (sc->sc_flags == WM_F_SWFW_SYNC)
6383 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
6384 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
6385 wm_put_swfwhw_semaphore(sc);
6386
6387 return rv;
6388 }
6389
6390 /*
6391 * wm_kmrn_writereg:
6392 *
6393 * Write a kumeran register
6394 */
6395 static void
6396 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
6397 {
6398
6399 if (sc->sc_flags == WM_F_SWFW_SYNC) {
6400 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6401 aprint_error_dev(sc->sc_dev,
6402 "%s: failed to get semaphore\n", __func__);
6403 return;
6404 }
6405 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
6406 if (wm_get_swfwhw_semaphore(sc)) {
6407 aprint_error_dev(sc->sc_dev,
6408 "%s: failed to get semaphore\n", __func__);
6409 return;
6410 }
6411 }
6412
6413 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
6414 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
6415 (val & KUMCTRLSTA_MASK));
6416
6417 if (sc->sc_flags == WM_F_SWFW_SYNC)
6418 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
6419 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
6420 wm_put_swfwhw_semaphore(sc);
6421 }
6422
6423 static int
6424 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
6425 {
6426 uint32_t eecd = 0;
6427
6428 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
6429 || sc->sc_type == WM_T_82583) {
6430 eecd = CSR_READ(sc, WMREG_EECD);
6431
6432 /* Isolate bits 15 & 16 */
6433 eecd = ((eecd >> 15) & 0x03);
6434
6435 /* If both bits are set, device is Flash type */
6436 if (eecd == 0x03)
6437 return 0;
6438 }
6439 return 1;
6440 }
6441
6442 static int
6443 wm_get_swsm_semaphore(struct wm_softc *sc)
6444 {
6445 int32_t timeout;
6446 uint32_t swsm;
6447
6448 /* Get the FW semaphore. */
6449 timeout = 1000 + 1; /* XXX */
6450 while (timeout) {
6451 swsm = CSR_READ(sc, WMREG_SWSM);
6452 swsm |= SWSM_SWESMBI;
6453 CSR_WRITE(sc, WMREG_SWSM, swsm);
6454 /* if we managed to set the bit we got the semaphore. */
6455 swsm = CSR_READ(sc, WMREG_SWSM);
6456 if (swsm & SWSM_SWESMBI)
6457 break;
6458
6459 delay(50);
6460 timeout--;
6461 }
6462
6463 if (timeout == 0) {
6464 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
6465 /* Release semaphores */
6466 wm_put_swsm_semaphore(sc);
6467 return 1;
6468 }
6469 return 0;
6470 }
6471
6472 static void
6473 wm_put_swsm_semaphore(struct wm_softc *sc)
6474 {
6475 uint32_t swsm;
6476
6477 swsm = CSR_READ(sc, WMREG_SWSM);
6478 swsm &= ~(SWSM_SWESMBI);
6479 CSR_WRITE(sc, WMREG_SWSM, swsm);
6480 }
6481
6482 static int
6483 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
6484 {
6485 uint32_t swfw_sync;
6486 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
6487 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
6488 int timeout = 200;
6489
6490 for (timeout = 0; timeout < 200; timeout++) {
6491 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
6492 if (wm_get_swsm_semaphore(sc)) {
6493 aprint_error_dev(sc->sc_dev,
6494 "%s: failed to get semaphore\n",
6495 __func__);
6496 return 1;
6497 }
6498 }
6499 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
6500 if ((swfw_sync & (swmask | fwmask)) == 0) {
6501 swfw_sync |= swmask;
6502 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
6503 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6504 wm_put_swsm_semaphore(sc);
6505 return 0;
6506 }
6507 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6508 wm_put_swsm_semaphore(sc);
6509 delay(5000);
6510 }
6511 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
6512 device_xname(sc->sc_dev), mask, swfw_sync);
6513 return 1;
6514 }
6515
6516 static void
6517 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
6518 {
6519 uint32_t swfw_sync;
6520
6521 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
6522 while (wm_get_swsm_semaphore(sc) != 0)
6523 continue;
6524 }
6525 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
6526 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
6527 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
6528 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6529 wm_put_swsm_semaphore(sc);
6530 }
6531
6532 static int
6533 wm_get_swfwhw_semaphore(struct wm_softc *sc)
6534 {
6535 uint32_t ext_ctrl;
6536 int timeout = 200;
6537
6538 for (timeout = 0; timeout < 200; timeout++) {
6539 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6540 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
6541 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
6542
6543 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6544 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
6545 return 0;
6546 delay(5000);
6547 }
6548 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
6549 device_xname(sc->sc_dev), ext_ctrl);
6550 return 1;
6551 }
6552
6553 static void
6554 wm_put_swfwhw_semaphore(struct wm_softc *sc)
6555 {
6556 uint32_t ext_ctrl;
6557 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6558 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
6559 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
6560 }
6561
6562 static int
6563 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
6564 {
6565 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
6566 uint8_t bank_high_byte;
6567 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
6568
6569 if ((sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
6570 /* Value of bit 22 corresponds to the flash bank we're on. */
6571 *bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
6572 } else {
6573 wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
6574 if ((bank_high_byte & 0xc0) == 0x80)
6575 *bank = 0;
6576 else {
6577 wm_read_ich8_byte(sc, act_offset + bank1_offset,
6578 &bank_high_byte);
6579 if ((bank_high_byte & 0xc0) == 0x80)
6580 *bank = 1;
6581 else {
6582 aprint_error_dev(sc->sc_dev,
6583 "EEPROM not present\n");
6584 return -1;
6585 }
6586 }
6587 }
6588
6589 return 0;
6590 }
6591
6592 /******************************************************************************
6593 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
6594 * register.
6595 *
6596 * sc - Struct containing variables accessed by shared code
6597 * offset - offset of word in the EEPROM to read
6598 * data - word read from the EEPROM
6599 * words - number of words to read
6600 *****************************************************************************/
6601 static int
6602 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
6603 {
6604 int32_t error = 0;
6605 uint32_t flash_bank = 0;
6606 uint32_t act_offset = 0;
6607 uint32_t bank_offset = 0;
6608 uint16_t word = 0;
6609 uint16_t i = 0;
6610
6611 /* We need to know which is the valid flash bank. In the event
6612 * that we didn't allocate eeprom_shadow_ram, we may not be
6613 * managing flash_bank. So it cannot be trusted and needs
6614 * to be updated with each read.
6615 */
6616 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
6617 if (error) {
6618 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
6619 __func__);
6620 return error;
6621 }
6622
6623 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
6624 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
6625
6626 error = wm_get_swfwhw_semaphore(sc);
6627 if (error) {
6628 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6629 __func__);
6630 return error;
6631 }
6632
6633 for (i = 0; i < words; i++) {
6634 /* The NVM part needs a byte offset, hence * 2 */
6635 act_offset = bank_offset + ((offset + i) * 2);
6636 error = wm_read_ich8_word(sc, act_offset, &word);
6637 if (error) {
6638 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
6639 __func__);
6640 break;
6641 }
6642 data[i] = word;
6643 }
6644
6645 wm_put_swfwhw_semaphore(sc);
6646 return error;
6647 }
6648
6649 /******************************************************************************
6650 * This function does initial flash setup so that a new read/write/erase cycle
6651 * can be started.
6652 *
6653 * sc - The pointer to the hw structure
6654 ****************************************************************************/
6655 static int32_t
6656 wm_ich8_cycle_init(struct wm_softc *sc)
6657 {
6658 uint16_t hsfsts;
6659 int32_t error = 1;
6660 int32_t i = 0;
6661
6662 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6663
6664 /* May be check the Flash Des Valid bit in Hw status */
6665 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
6666 return error;
6667 }
6668
6669 /* Clear FCERR in Hw status by writing 1 */
6670 /* Clear DAEL in Hw status by writing a 1 */
6671 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
6672
6673 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6674
6675 /*
6676 * Either we should have a hardware SPI cycle in progress bit to check
6677 * against, in order to start a new cycle or FDONE bit should be
6678 * changed in the hardware so that it is 1 after harware reset, which
6679 * can then be used as an indication whether a cycle is in progress or
6680 * has been completed .. we should also have some software semaphore
6681 * mechanism to guard FDONE or the cycle in progress bit so that two
6682 * threads access to those bits can be sequentiallized or a way so that
6683 * 2 threads dont start the cycle at the same time
6684 */
6685
6686 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6687 /*
6688 * There is no cycle running at present, so we can start a
6689 * cycle
6690 */
6691
6692 /* Begin by setting Flash Cycle Done. */
6693 hsfsts |= HSFSTS_DONE;
6694 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6695 error = 0;
6696 } else {
6697 /*
6698 * otherwise poll for sometime so the current cycle has a
6699 * chance to end before giving up.
6700 */
6701 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
6702 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6703 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6704 error = 0;
6705 break;
6706 }
6707 delay(1);
6708 }
6709 if (error == 0) {
6710 /*
6711 * Successful in waiting for previous cycle to timeout,
6712 * now set the Flash Cycle Done.
6713 */
6714 hsfsts |= HSFSTS_DONE;
6715 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6716 }
6717 }
6718 return error;
6719 }
6720
6721 /******************************************************************************
6722 * This function starts a flash cycle and waits for its completion
6723 *
6724 * sc - The pointer to the hw structure
6725 ****************************************************************************/
6726 static int32_t
6727 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
6728 {
6729 uint16_t hsflctl;
6730 uint16_t hsfsts;
6731 int32_t error = 1;
6732 uint32_t i = 0;
6733
6734 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
6735 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6736 hsflctl |= HSFCTL_GO;
6737 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6738
6739 /* wait till FDONE bit is set to 1 */
6740 do {
6741 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6742 if (hsfsts & HSFSTS_DONE)
6743 break;
6744 delay(1);
6745 i++;
6746 } while (i < timeout);
6747 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
6748 error = 0;
6749
6750 return error;
6751 }
6752
6753 /******************************************************************************
6754 * Reads a byte or word from the NVM using the ICH8 flash access registers.
6755 *
6756 * sc - The pointer to the hw structure
6757 * index - The index of the byte or word to read.
6758 * size - Size of data to read, 1=byte 2=word
6759 * data - Pointer to the word to store the value read.
6760 *****************************************************************************/
6761 static int32_t
6762 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
6763 uint32_t size, uint16_t* data)
6764 {
6765 uint16_t hsfsts;
6766 uint16_t hsflctl;
6767 uint32_t flash_linear_address;
6768 uint32_t flash_data = 0;
6769 int32_t error = 1;
6770 int32_t count = 0;
6771
6772 if (size < 1 || size > 2 || data == 0x0 ||
6773 index > ICH_FLASH_LINEAR_ADDR_MASK)
6774 return error;
6775
6776 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
6777 sc->sc_ich8_flash_base;
6778
6779 do {
6780 delay(1);
6781 /* Steps */
6782 error = wm_ich8_cycle_init(sc);
6783 if (error)
6784 break;
6785
6786 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6787 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
6788 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
6789 & HSFCTL_BCOUNT_MASK;
6790 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
6791 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6792
6793 /*
6794 * Write the last 24 bits of index into Flash Linear address
6795 * field in Flash Address
6796 */
6797 /* TODO: TBD maybe check the index against the size of flash */
6798
6799 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
6800
6801 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
6802
6803 /*
6804 * Check if FCERR is set to 1, if set to 1, clear it and try
6805 * the whole sequence a few more times, else read in (shift in)
6806 * the Flash Data0, the order is least significant byte first
6807 * msb to lsb
6808 */
6809 if (error == 0) {
6810 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
6811 if (size == 1)
6812 *data = (uint8_t)(flash_data & 0x000000FF);
6813 else if (size == 2)
6814 *data = (uint16_t)(flash_data & 0x0000FFFF);
6815 break;
6816 } else {
6817 /*
6818 * If we've gotten here, then things are probably
6819 * completely hosed, but if the error condition is
6820 * detected, it won't hurt to give it another try...
6821 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
6822 */
6823 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6824 if (hsfsts & HSFSTS_ERR) {
6825 /* Repeat for some time before giving up. */
6826 continue;
6827 } else if ((hsfsts & HSFSTS_DONE) == 0)
6828 break;
6829 }
6830 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
6831
6832 return error;
6833 }
6834
6835 /******************************************************************************
6836 * Reads a single byte from the NVM using the ICH8 flash access registers.
6837 *
6838 * sc - pointer to wm_hw structure
6839 * index - The index of the byte to read.
6840 * data - Pointer to a byte to store the value read.
6841 *****************************************************************************/
6842 static int32_t
6843 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
6844 {
6845 int32_t status;
6846 uint16_t word = 0;
6847
6848 status = wm_read_ich8_data(sc, index, 1, &word);
6849 if (status == 0)
6850 *data = (uint8_t)word;
6851
6852 return status;
6853 }
6854
6855 /******************************************************************************
6856 * Reads a word from the NVM using the ICH8 flash access registers.
6857 *
6858 * sc - pointer to wm_hw structure
6859 * index - The starting byte index of the word to read.
6860 * data - Pointer to a word to store the value read.
6861 *****************************************************************************/
6862 static int32_t
6863 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
6864 {
6865 int32_t status;
6866
6867 status = wm_read_ich8_data(sc, index, 2, data);
6868 return status;
6869 }
6870
6871 static int
6872 wm_check_mng_mode(struct wm_softc *sc)
6873 {
6874 int rv;
6875
6876 switch (sc->sc_type) {
6877 case WM_T_ICH8:
6878 case WM_T_ICH9:
6879 case WM_T_ICH10:
6880 case WM_T_PCH:
6881 rv = wm_check_mng_mode_ich8lan(sc);
6882 break;
6883 case WM_T_82574:
6884 case WM_T_82583:
6885 rv = wm_check_mng_mode_82574(sc);
6886 break;
6887 case WM_T_82571:
6888 case WM_T_82572:
6889 case WM_T_82573:
6890 case WM_T_80003:
6891 rv = wm_check_mng_mode_generic(sc);
6892 break;
6893 default:
6894 /* noting to do */
6895 rv = 0;
6896 break;
6897 }
6898
6899 return rv;
6900 }
6901
6902 static int
6903 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
6904 {
6905 uint32_t fwsm;
6906
6907 fwsm = CSR_READ(sc, WMREG_FWSM);
6908
6909 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
6910 return 1;
6911
6912 return 0;
6913 }
6914
6915 static int
6916 wm_check_mng_mode_82574(struct wm_softc *sc)
6917 {
6918 uint16_t data;
6919
6920 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
6921
6922 if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
6923 return 1;
6924
6925 return 0;
6926 }
6927
6928 static int
6929 wm_check_mng_mode_generic(struct wm_softc *sc)
6930 {
6931 uint32_t fwsm;
6932
6933 fwsm = CSR_READ(sc, WMREG_FWSM);
6934
6935 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
6936 return 1;
6937
6938 return 0;
6939 }
6940
6941 static int
6942 wm_enable_mng_pass_thru(struct wm_softc *sc)
6943 {
6944 uint32_t manc, fwsm, factps;
6945
6946 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
6947 return 0;
6948
6949 manc = CSR_READ(sc, WMREG_MANC);
6950
6951 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
6952 device_xname(sc->sc_dev), manc));
6953 if (((manc & MANC_RECV_TCO_EN) == 0)
6954 || ((manc & MANC_EN_MAC_ADDR_FILTER) == 0))
6955 return 0;
6956
6957 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
6958 fwsm = CSR_READ(sc, WMREG_FWSM);
6959 factps = CSR_READ(sc, WMREG_FACTPS);
6960 if (((factps & FACTPS_MNGCG) == 0)
6961 && ((fwsm & FWSM_MODE_MASK)
6962 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
6963 return 1;
6964 } else if (((manc & MANC_SMBUS_EN) != 0)
6965 && ((manc & MANC_ASF_EN) == 0))
6966 return 1;
6967
6968 return 0;
6969 }
6970
6971 static int
6972 wm_check_reset_block(struct wm_softc *sc)
6973 {
6974 uint32_t reg;
6975
6976 switch (sc->sc_type) {
6977 case WM_T_ICH8:
6978 case WM_T_ICH9:
6979 case WM_T_ICH10:
6980 case WM_T_PCH:
6981 reg = CSR_READ(sc, WMREG_FWSM);
6982 if ((reg & FWSM_RSPCIPHY) != 0)
6983 return 0;
6984 else
6985 return -1;
6986 break;
6987 case WM_T_82571:
6988 case WM_T_82572:
6989 case WM_T_82573:
6990 case WM_T_82574:
6991 case WM_T_82583:
6992 case WM_T_80003:
6993 reg = CSR_READ(sc, WMREG_MANC);
6994 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
6995 return -1;
6996 else
6997 return 0;
6998 break;
6999 default:
7000 /* no problem */
7001 break;
7002 }
7003
7004 return 0;
7005 }
7006
7007 static void
7008 wm_get_hw_control(struct wm_softc *sc)
7009 {
7010 uint32_t reg;
7011
7012 switch (sc->sc_type) {
7013 case WM_T_82573:
7014 reg = CSR_READ(sc, WMREG_SWSM);
7015 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
7016 break;
7017 case WM_T_82571:
7018 case WM_T_82572:
7019 case WM_T_82574:
7020 case WM_T_82583:
7021 case WM_T_80003:
7022 case WM_T_ICH8:
7023 case WM_T_ICH9:
7024 case WM_T_ICH10:
7025 case WM_T_PCH:
7026 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7027 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
7028 break;
7029 default:
7030 break;
7031 }
7032 }
7033
7034 static void
7035 wm_release_hw_control(struct wm_softc *sc)
7036 {
7037 uint32_t reg;
7038
7039 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
7040 return;
7041
7042 if (sc->sc_type == WM_T_82573) {
7043 reg = CSR_READ(sc, WMREG_SWSM);
7044 reg &= ~SWSM_DRV_LOAD;
7045 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
7046 } else {
7047 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7048 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
7049 }
7050 }
7051
7052 /* XXX Currently TBI only */
7053 static int
7054 wm_check_for_link(struct wm_softc *sc)
7055 {
7056 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7057 uint32_t rxcw;
7058 uint32_t ctrl;
7059 uint32_t status;
7060 uint32_t sig;
7061
7062 rxcw = CSR_READ(sc, WMREG_RXCW);
7063 ctrl = CSR_READ(sc, WMREG_CTRL);
7064 status = CSR_READ(sc, WMREG_STATUS);
7065
7066 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7067
7068 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7069 device_xname(sc->sc_dev), __func__,
7070 ((ctrl & CTRL_SWDPIN(1)) == sig),
7071 ((status & STATUS_LU) != 0),
7072 ((rxcw & RXCW_C) != 0)
7073 ));
7074
7075 /*
7076 * SWDPIN LU RXCW
7077 * 0 0 0
7078 * 0 0 1 (should not happen)
7079 * 0 1 0 (should not happen)
7080 * 0 1 1 (should not happen)
7081 * 1 0 0 Disable autonego and force linkup
7082 * 1 0 1 got /C/ but not linkup yet
7083 * 1 1 0 (linkup)
7084 * 1 1 1 If IFM_AUTO, back to autonego
7085 *
7086 */
7087 if (((ctrl & CTRL_SWDPIN(1)) == sig)
7088 && ((status & STATUS_LU) == 0)
7089 && ((rxcw & RXCW_C) == 0)) {
7090 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7091 __func__));
7092 sc->sc_tbi_linkup = 0;
7093 /* Disable auto-negotiation in the TXCW register */
7094 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7095
7096 /*
7097 * Force link-up and also force full-duplex.
7098 *
7099 * NOTE: CTRL was updated TFCE and RFCE automatically,
7100 * so we should update sc->sc_ctrl
7101 */
7102 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7103 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7104 } else if (((status & STATUS_LU) != 0)
7105 && ((rxcw & RXCW_C) != 0)
7106 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7107 sc->sc_tbi_linkup = 1;
7108 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7109 __func__));
7110 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7111 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7112 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7113 && ((rxcw & RXCW_C) != 0)) {
7114 DPRINTF(WM_DEBUG_LINK, ("/C/"));
7115 } else {
7116 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7117 status));
7118 }
7119
7120 return 0;
7121 }
7122
7123 /* Work-around for 82566 Kumeran PCS lock loss */
7124 static void
7125 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
7126 {
7127 int miistatus, active, i;
7128 int reg;
7129
7130 miistatus = sc->sc_mii.mii_media_status;
7131
7132 /* If the link is not up, do nothing */
7133 if ((miistatus & IFM_ACTIVE) != 0)
7134 return;
7135
7136 active = sc->sc_mii.mii_media_active;
7137
7138 /* Nothing to do if the link is other than 1Gbps */
7139 if (IFM_SUBTYPE(active) != IFM_1000_T)
7140 return;
7141
7142 for (i = 0; i < 10; i++) {
7143 /* read twice */
7144 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7145 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7146 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
7147 goto out; /* GOOD! */
7148
7149 /* Reset the PHY */
7150 wm_gmii_reset(sc);
7151 delay(5*1000);
7152 }
7153
7154 /* Disable GigE link negotiation */
7155 reg = CSR_READ(sc, WMREG_PHY_CTRL);
7156 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7157 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7158
7159 /*
7160 * Call gig speed drop workaround on Gig disable before accessing
7161 * any PHY registers.
7162 */
7163 wm_gig_downshift_workaround_ich8lan(sc);
7164
7165 out:
7166 return;
7167 }
7168
7169 /* WOL from S5 stops working */
7170 static void
7171 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
7172 {
7173 uint16_t kmrn_reg;
7174
7175 /* Only for igp3 */
7176 if (sc->sc_phytype == WMPHY_IGP_3) {
7177 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
7178 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
7179 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7180 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
7181 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7182 }
7183 }
7184
7185 #ifdef WM_WOL
7186 /* Power down workaround on D3 */
7187 static void
7188 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
7189 {
7190 uint32_t reg;
7191 int i;
7192
7193 for (i = 0; i < 2; i++) {
7194 /* Disable link */
7195 reg = CSR_READ(sc, WMREG_PHY_CTRL);
7196 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7197 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7198
7199 /*
7200 * Call gig speed drop workaround on Gig disable before
7201 * accessing any PHY registers
7202 */
7203 if (sc->sc_type == WM_T_ICH8)
7204 wm_gig_downshift_workaround_ich8lan(sc);
7205
7206 /* Write VR power-down enable */
7207 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7208 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7209 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
7210 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
7211
7212 /* Read it back and test */
7213 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7214 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7215 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
7216 break;
7217
7218 /* Issue PHY reset and repeat at most one more time */
7219 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7220 }
7221 }
7222 #endif /* WM_WOL */
7223
7224 /*
7225 * Workaround for pch's PHYs
7226 * XXX should be moved to new PHY driver?
7227 */
7228 static void
7229 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
7230 {
7231
7232 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
7233
7234 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
7235
7236 /* 82578 */
7237 if (sc->sc_phytype == WMPHY_82578) {
7238 /* PCH rev. < 3 */
7239 if (sc->sc_rev < 3) {
7240 /* XXX 6 bit shift? Why? Is it page2? */
7241 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
7242 0x66c0);
7243 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
7244 0xffff);
7245 }
7246
7247 /* XXX phy rev. < 2 */
7248 }
7249
7250 /* Select page 0 */
7251
7252 /* XXX acquire semaphore */
7253 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
7254 /* XXX release semaphore */
7255
7256 /*
7257 * Configure the K1 Si workaround during phy reset assuming there is
7258 * link so that it disables K1 if link is in 1Gbps.
7259 */
7260 wm_k1_gig_workaround_hv(sc, 1);
7261 }
7262
7263 static void
7264 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
7265 {
7266 int k1_enable = sc->sc_nvm_k1_enabled;
7267
7268 /* XXX acquire semaphore */
7269
7270 if (link) {
7271 k1_enable = 0;
7272
7273 /* Link stall fix for link up */
7274 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
7275 } else {
7276 /* Link stall fix for link down */
7277 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
7278 }
7279
7280 wm_configure_k1_ich8lan(sc, k1_enable);
7281
7282 /* XXX release semaphore */
7283 }
7284
7285 static void
7286 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
7287 {
7288 uint32_t ctrl, ctrl_ext, tmp;
7289 uint16_t kmrn_reg;
7290
7291 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
7292
7293 if (k1_enable)
7294 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
7295 else
7296 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
7297
7298 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
7299
7300 delay(20);
7301
7302 ctrl = CSR_READ(sc, WMREG_CTRL);
7303 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7304
7305 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
7306 tmp |= CTRL_FRCSPD;
7307
7308 CSR_WRITE(sc, WMREG_CTRL, tmp);
7309 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
7310 delay(20);
7311
7312 CSR_WRITE(sc, WMREG_CTRL, ctrl);
7313 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7314 delay(20);
7315 }
7316
7317 static void
7318 wm_set_pcie_completion_timeout(struct wm_softc *sc)
7319 {
7320 uint32_t gcr;
7321 pcireg_t ctrl2;
7322
7323 gcr = CSR_READ(sc, WMREG_GCR);
7324
7325 /* Only take action if timeout value is defaulted to 0 */
7326 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
7327 goto out;
7328
7329 if ((gcr & GCR_CAP_VER2) == 0) {
7330 gcr |= GCR_CMPL_TMOUT_10MS;
7331 goto out;
7332 }
7333
7334 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
7335 sc->sc_pcixe_capoff + PCI_PCIE_DCSR2);
7336 ctrl2 |= WM_PCI_PCIE_DCSR2_16MS;
7337 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
7338 sc->sc_pcixe_capoff + PCI_PCIE_DCSR2, ctrl2);
7339
7340 out:
7341 /* Disable completion timeout resend */
7342 gcr &= ~GCR_CMPL_TMOUT_RESEND;
7343
7344 CSR_WRITE(sc, WMREG_GCR, gcr);
7345 }
7346
7347 /* special case - for 82575 - need to do manual init ... */
7348 static void
7349 wm_reset_init_script_82575(struct wm_softc *sc)
7350 {
7351 /*
7352 * remark: this is untested code - we have no board without EEPROM
7353 * same setup as mentioned int the freeBSD driver for the i82575
7354 */
7355
7356 /* SerDes configuration via SERDESCTRL */
7357 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
7358 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
7359 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
7360 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
7361
7362 /* CCM configuration via CCMCTL register */
7363 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
7364 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
7365
7366 /* PCIe lanes configuration */
7367 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
7368 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
7369 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
7370 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
7371
7372 /* PCIe PLL Configuration */
7373 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
7374 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
7375 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
7376 }
7377
7378 static void
7379 wm_init_manageability(struct wm_softc *sc)
7380 {
7381
7382 if (sc->sc_flags & WM_F_HAS_MANAGE) {
7383 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
7384 uint32_t manc = CSR_READ(sc, WMREG_MANC);
7385
7386 /* disabl hardware interception of ARP */
7387 manc &= ~MANC_ARP_EN;
7388
7389 /* enable receiving management packets to the host */
7390 if (sc->sc_type >= WM_T_82571) {
7391 manc |= MANC_EN_MNG2HOST;
7392 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
7393 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
7394
7395 }
7396
7397 CSR_WRITE(sc, WMREG_MANC, manc);
7398 }
7399 }
7400
7401 static void
7402 wm_release_manageability(struct wm_softc *sc)
7403 {
7404
7405 if (sc->sc_flags & WM_F_HAS_MANAGE) {
7406 uint32_t manc = CSR_READ(sc, WMREG_MANC);
7407
7408 if (sc->sc_type >= WM_T_82571)
7409 manc &= ~MANC_EN_MNG2HOST;
7410
7411 CSR_WRITE(sc, WMREG_MANC, manc);
7412 }
7413 }
7414
7415 static void
7416 wm_get_wakeup(struct wm_softc *sc)
7417 {
7418
7419 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
7420 switch (sc->sc_type) {
7421 case WM_T_82573:
7422 case WM_T_82583:
7423 sc->sc_flags |= WM_F_HAS_AMT;
7424 /* FALLTHROUGH */
7425 case WM_T_80003:
7426 case WM_T_82541:
7427 case WM_T_82547:
7428 case WM_T_82571:
7429 case WM_T_82572:
7430 case WM_T_82574:
7431 case WM_T_82575:
7432 case WM_T_82576:
7433 #if 0 /* XXX */
7434 case WM_T_82580:
7435 case WM_T_82580ER:
7436 #endif
7437 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
7438 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
7439 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
7440 break;
7441 case WM_T_ICH8:
7442 case WM_T_ICH9:
7443 case WM_T_ICH10:
7444 case WM_T_PCH:
7445 sc->sc_flags |= WM_F_HAS_AMT;
7446 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
7447 break;
7448 default:
7449 break;
7450 }
7451
7452 /* 1: HAS_MANAGE */
7453 if (wm_enable_mng_pass_thru(sc) != 0)
7454 sc->sc_flags |= WM_F_HAS_MANAGE;
7455
7456 #ifdef WM_DEBUG
7457 printf("\n");
7458 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
7459 printf("HAS_AMT,");
7460 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
7461 printf("ARC_SUBSYS_VALID,");
7462 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
7463 printf("ASF_FIRMWARE_PRES,");
7464 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
7465 printf("HAS_MANAGE,");
7466 printf("\n");
7467 #endif
7468 /*
7469 * Note that the WOL flags is set after the resetting of the eeprom
7470 * stuff
7471 */
7472 }
7473
7474 #ifdef WM_WOL
7475 /* WOL in the newer chipset interfaces (pchlan) */
7476 static void
7477 wm_enable_phy_wakeup(struct wm_softc *sc)
7478 {
7479 #if 0
7480 uint16_t preg;
7481
7482 /* Copy MAC RARs to PHY RARs */
7483
7484 /* Copy MAC MTA to PHY MTA */
7485
7486 /* Configure PHY Rx Control register */
7487
7488 /* Enable PHY wakeup in MAC register */
7489
7490 /* Configure and enable PHY wakeup in PHY registers */
7491
7492 /* Activate PHY wakeup */
7493
7494 /* XXX */
7495 #endif
7496 }
7497
7498 static void
7499 wm_enable_wakeup(struct wm_softc *sc)
7500 {
7501 uint32_t reg, pmreg;
7502 pcireg_t pmode;
7503
7504 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
7505 &pmreg, NULL) == 0)
7506 return;
7507
7508 /* Advertise the wakeup capability */
7509 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
7510 | CTRL_SWDPIN(3));
7511 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
7512
7513 /* ICH workaround */
7514 switch (sc->sc_type) {
7515 case WM_T_ICH8:
7516 case WM_T_ICH9:
7517 case WM_T_ICH10:
7518 case WM_T_PCH:
7519 /* Disable gig during WOL */
7520 reg = CSR_READ(sc, WMREG_PHY_CTRL);
7521 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
7522 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7523 if (sc->sc_type == WM_T_PCH)
7524 wm_gmii_reset(sc);
7525
7526 /* Power down workaround */
7527 if (sc->sc_phytype == WMPHY_82577) {
7528 struct mii_softc *child;
7529
7530 /* Assume that the PHY is copper */
7531 child = LIST_FIRST(&sc->sc_mii.mii_phys);
7532 if (child->mii_mpd_rev <= 2)
7533 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
7534 (768 << 5) | 25, 0x0444); /* magic num */
7535 }
7536 break;
7537 default:
7538 break;
7539 }
7540
7541 /* Keep the laser running on fiber adapters */
7542 if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
7543 || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
7544 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7545 reg |= CTRL_EXT_SWDPIN(3);
7546 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7547 }
7548
7549 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
7550 #if 0 /* for the multicast packet */
7551 reg |= WUFC_MC;
7552 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
7553 #endif
7554
7555 if (sc->sc_type == WM_T_PCH) {
7556 wm_enable_phy_wakeup(sc);
7557 } else {
7558 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
7559 CSR_WRITE(sc, WMREG_WUFC, reg);
7560 }
7561
7562 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
7563 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
7564 && (sc->sc_phytype == WMPHY_IGP_3))
7565 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
7566
7567 /* Request PME */
7568 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
7569 #if 0
7570 /* Disable WOL */
7571 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
7572 #else
7573 /* For WOL */
7574 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
7575 #endif
7576 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
7577 }
7578 #endif /* WM_WOL */
7579
7580 static bool
7581 wm_suspend(device_t self, const pmf_qual_t *qual)
7582 {
7583 struct wm_softc *sc = device_private(self);
7584
7585 wm_release_manageability(sc);
7586 wm_release_hw_control(sc);
7587 #ifdef WM_WOL
7588 wm_enable_wakeup(sc);
7589 #endif
7590
7591 return true;
7592 }
7593
7594 static bool
7595 wm_resume(device_t self, const pmf_qual_t *qual)
7596 {
7597 struct wm_softc *sc = device_private(self);
7598
7599 wm_init_manageability(sc);
7600
7601 return true;
7602 }
7603